1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 #include <linux/types.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/netdevice.h> 8 #include <linux/vmalloc.h> 9 #include <linux/string.h> 10 #include <linux/in.h> 11 #include <linux/ip.h> 12 #include <linux/tcp.h> 13 #include <linux/ipv6.h> 14 #include <linux/if_bridge.h> 15 #ifdef NETIF_F_HW_VLAN_CTAG_TX 16 #include <linux/if_vlan.h> 17 #endif 18 19 #include "ixgbe.h" 20 #include "ixgbe_type.h" 21 #include "ixgbe_sriov.h" 22 23 #ifdef CONFIG_PCI_IOV 24 static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, 25 unsigned int num_vfs) 26 { 27 struct ixgbe_hw *hw = &adapter->hw; 28 struct vf_macvlans *mv_list; 29 int num_vf_macvlans, i; 30 31 num_vf_macvlans = hw->mac.num_rar_entries - 32 (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); 33 if (!num_vf_macvlans) 34 return; 35 36 mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), 37 GFP_KERNEL); 38 if (mv_list) { 39 /* Initialize list of VF macvlans */ 40 INIT_LIST_HEAD(&adapter->vf_mvs.l); 41 for (i = 0; i < num_vf_macvlans; i++) { 42 mv_list[i].vf = -1; 43 mv_list[i].free = true; 44 list_add(&mv_list[i].l, &adapter->vf_mvs.l); 45 } 46 adapter->mv_list = mv_list; 47 } 48 } 49 50 static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, 51 unsigned int num_vfs) 52 { 53 struct ixgbe_hw *hw = &adapter->hw; 54 int i; 55 56 if (adapter->xdp_prog) { 57 e_warn(probe, "SRIOV is not supported with XDP\n"); 58 return -EINVAL; 59 } 60 61 /* Enable VMDq flag so device will be set in VM mode */ 62 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | 63 IXGBE_FLAG_VMDQ_ENABLED; 64 65 /* Allocate memory for per VF control structures */ 66 adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), 67 GFP_KERNEL); 68 if (!adapter->vfinfo) 69 return -ENOMEM; 70 71 adapter->num_vfs = num_vfs; 72 73 ixgbe_alloc_vf_macvlans(adapter, num_vfs); 74 adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; 75 76 /* Initialize default switching mode VEB */ 77 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 78 adapter->bridge_mode = BRIDGE_MODE_VEB; 79 80 /* limit trafffic classes based on VFs enabled */ 81 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { 82 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; 83 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; 84 } else if (num_vfs < 32) { 85 adapter->dcb_cfg.num_tcs.pg_tcs = 4; 86 adapter->dcb_cfg.num_tcs.pfc_tcs = 4; 87 } else { 88 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 89 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 90 } 91 92 /* Disable RSC when in SR-IOV mode */ 93 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 94 IXGBE_FLAG2_RSC_ENABLED); 95 96 for (i = 0; i < num_vfs; i++) { 97 /* enable spoof checking for all VFs */ 98 adapter->vfinfo[i].spoofchk_enabled = true; 99 100 /* We support VF RSS querying only for 82599 and x540 101 * devices at the moment. These devices share RSS 102 * indirection table and RSS hash key with PF therefore 103 * we want to disable the querying by default. 104 */ 105 adapter->vfinfo[i].rss_query_enabled = 0; 106 107 /* Untrust all VFs */ 108 adapter->vfinfo[i].trusted = false; 109 110 /* set the default xcast mode */ 111 adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; 112 } 113 114 e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs); 115 return 0; 116 } 117 118 /** 119 * ixgbe_get_vfs - Find and take references to all vf devices 120 * @adapter: Pointer to adapter struct 121 */ 122 static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) 123 { 124 struct pci_dev *pdev = adapter->pdev; 125 u16 vendor = pdev->vendor; 126 struct pci_dev *vfdev; 127 int vf = 0; 128 u16 vf_id; 129 int pos; 130 131 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 132 if (!pos) 133 return; 134 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); 135 136 vfdev = pci_get_device(vendor, vf_id, NULL); 137 for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { 138 if (!vfdev->is_virtfn) 139 continue; 140 if (vfdev->physfn != pdev) 141 continue; 142 if (vf >= adapter->num_vfs) 143 continue; 144 pci_dev_get(vfdev); 145 adapter->vfinfo[vf].vfdev = vfdev; 146 ++vf; 147 } 148 } 149 150 /* Note this function is called when the user wants to enable SR-IOV 151 * VFs using the now deprecated module parameter 152 */ 153 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) 154 { 155 int pre_existing_vfs = 0; 156 unsigned int num_vfs; 157 158 pre_existing_vfs = pci_num_vf(adapter->pdev); 159 if (!pre_existing_vfs && !max_vfs) 160 return; 161 162 /* If there are pre-existing VFs then we have to force 163 * use of that many - over ride any module parameter value. 164 * This may result from the user unloading the PF driver 165 * while VFs were assigned to guest VMs or because the VFs 166 * have been created via the new PCI SR-IOV sysfs interface. 167 */ 168 if (pre_existing_vfs) { 169 num_vfs = pre_existing_vfs; 170 dev_warn(&adapter->pdev->dev, 171 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); 172 } else { 173 int err; 174 /* 175 * The 82599 supports up to 64 VFs per physical function 176 * but this implementation limits allocation to 63 so that 177 * basic networking resources are still available to the 178 * physical function. If the user requests greater than 179 * 63 VFs then it is an error - reset to default of zero. 180 */ 181 num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); 182 183 err = pci_enable_sriov(adapter->pdev, num_vfs); 184 if (err) { 185 e_err(probe, "Failed to enable PCI sriov: %d\n", err); 186 return; 187 } 188 } 189 190 if (!__ixgbe_enable_sriov(adapter, num_vfs)) { 191 ixgbe_get_vfs(adapter); 192 return; 193 } 194 195 /* If we have gotten to this point then there is no memory available 196 * to manage the VF devices - print message and bail. 197 */ 198 e_err(probe, "Unable to allocate memory for VF Data Storage - " 199 "SRIOV disabled\n"); 200 ixgbe_disable_sriov(adapter); 201 } 202 203 #endif /* #ifdef CONFIG_PCI_IOV */ 204 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 205 { 206 unsigned int num_vfs = adapter->num_vfs, vf; 207 int rss; 208 209 /* set num VFs to 0 to prevent access to vfinfo */ 210 adapter->num_vfs = 0; 211 212 /* put the reference to all of the vf devices */ 213 for (vf = 0; vf < num_vfs; ++vf) { 214 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; 215 216 if (!vfdev) 217 continue; 218 adapter->vfinfo[vf].vfdev = NULL; 219 pci_dev_put(vfdev); 220 } 221 222 /* free VF control structures */ 223 kfree(adapter->vfinfo); 224 adapter->vfinfo = NULL; 225 226 /* free macvlan list */ 227 kfree(adapter->mv_list); 228 adapter->mv_list = NULL; 229 230 /* if SR-IOV is already disabled then there is nothing to do */ 231 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 232 return 0; 233 234 #ifdef CONFIG_PCI_IOV 235 /* 236 * If our VFs are assigned we cannot shut down SR-IOV 237 * without causing issues, so just leave the hardware 238 * available but disabled 239 */ 240 if (pci_vfs_assigned(adapter->pdev)) { 241 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); 242 return -EPERM; 243 } 244 /* disable iov and allow time for transactions to clear */ 245 pci_disable_sriov(adapter->pdev); 246 #endif 247 248 /* Disable VMDq flag so device will be set in VM mode */ 249 if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) { 250 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; 251 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 252 rss = min_t(int, ixgbe_max_rss_indices(adapter), 253 num_online_cpus()); 254 } else { 255 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); 256 } 257 258 adapter->ring_feature[RING_F_VMDQ].offset = 0; 259 adapter->ring_feature[RING_F_RSS].limit = rss; 260 261 /* take a breather then clean up driver data */ 262 msleep(100); 263 return 0; 264 } 265 266 static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) 267 { 268 #ifdef CONFIG_PCI_IOV 269 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 270 int pre_existing_vfs = pci_num_vf(dev); 271 int err = 0, num_rx_pools, i, limit; 272 u8 num_tc; 273 274 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 275 err = ixgbe_disable_sriov(adapter); 276 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 277 return num_vfs; 278 279 if (err) 280 return err; 281 282 /* While the SR-IOV capability structure reports total VFs to be 64, 283 * we limit the actual number allocated as below based on two factors. 284 * Num_TCs MAX_VFs 285 * 1 63 286 * <=4 31 287 * >4 15 288 * First, we reserve some transmit/receive resources for the PF. 289 * Second, VMDQ also uses the same pools that SR-IOV does. We need to 290 * account for this, so that we don't accidentally allocate more VFs 291 * than we have available pools. The PCI bus driver already checks for 292 * other values out of range. 293 */ 294 num_tc = adapter->hw_tcs; 295 num_rx_pools = bitmap_weight(adapter->fwd_bitmask, 296 adapter->num_rx_pools); 297 limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : 298 (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; 299 300 if (num_vfs > (limit - num_rx_pools)) { 301 e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", 302 num_tc, num_rx_pools - 1, limit - num_rx_pools); 303 return -EPERM; 304 } 305 306 err = __ixgbe_enable_sriov(adapter, num_vfs); 307 if (err) 308 return err; 309 310 for (i = 0; i < num_vfs; i++) 311 ixgbe_vf_configuration(dev, (i | 0x10000000)); 312 313 /* reset before enabling SRIOV to avoid mailbox issues */ 314 ixgbe_sriov_reinit(adapter); 315 316 err = pci_enable_sriov(dev, num_vfs); 317 if (err) { 318 e_dev_warn("Failed to enable PCI sriov: %d\n", err); 319 return err; 320 } 321 ixgbe_get_vfs(adapter); 322 323 return num_vfs; 324 #else 325 return 0; 326 #endif 327 } 328 329 static int ixgbe_pci_sriov_disable(struct pci_dev *dev) 330 { 331 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 332 int err; 333 #ifdef CONFIG_PCI_IOV 334 u32 current_flags = adapter->flags; 335 int prev_num_vf = pci_num_vf(dev); 336 #endif 337 338 err = ixgbe_disable_sriov(adapter); 339 340 /* Only reinit if no error and state changed */ 341 #ifdef CONFIG_PCI_IOV 342 if (!err && (current_flags != adapter->flags || 343 prev_num_vf != pci_num_vf(dev))) 344 ixgbe_sriov_reinit(adapter); 345 #endif 346 347 return err; 348 } 349 350 int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 351 { 352 if (num_vfs == 0) 353 return ixgbe_pci_sriov_disable(dev); 354 else 355 return ixgbe_pci_sriov_enable(dev, num_vfs); 356 } 357 358 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, 359 u32 *msgbuf, u32 vf) 360 { 361 int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 362 >> IXGBE_VT_MSGINFO_SHIFT; 363 u16 *hash_list = (u16 *)&msgbuf[1]; 364 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 365 struct ixgbe_hw *hw = &adapter->hw; 366 int i; 367 u32 vector_bit; 368 u32 vector_reg; 369 u32 mta_reg; 370 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 371 372 /* only so many hash values supported */ 373 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 374 375 /* 376 * salt away the number of multi cast addresses assigned 377 * to this VF for later use to restore when the PF multi cast 378 * list changes 379 */ 380 vfinfo->num_vf_mc_hashes = entries; 381 382 /* 383 * VFs are limited to using the MTA hash table for their multicast 384 * addresses 385 */ 386 for (i = 0; i < entries; i++) { 387 vfinfo->vf_mc_hashes[i] = hash_list[i]; 388 } 389 390 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 391 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; 392 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; 393 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 394 mta_reg |= BIT(vector_bit); 395 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 396 } 397 vmolr |= IXGBE_VMOLR_ROMPE; 398 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 399 400 return 0; 401 } 402 403 #ifdef CONFIG_PCI_IOV 404 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 405 { 406 struct ixgbe_hw *hw = &adapter->hw; 407 struct vf_data_storage *vfinfo; 408 int i, j; 409 u32 vector_bit; 410 u32 vector_reg; 411 u32 mta_reg; 412 413 for (i = 0; i < adapter->num_vfs; i++) { 414 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); 415 vfinfo = &adapter->vfinfo[i]; 416 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { 417 hw->addr_ctrl.mta_in_use++; 418 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; 419 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; 420 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 421 mta_reg |= BIT(vector_bit); 422 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 423 } 424 425 if (vfinfo->num_vf_mc_hashes) 426 vmolr |= IXGBE_VMOLR_ROMPE; 427 else 428 vmolr &= ~IXGBE_VMOLR_ROMPE; 429 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); 430 } 431 432 /* Restore any VF macvlans */ 433 ixgbe_full_sync_mac_table(adapter); 434 } 435 #endif 436 437 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 438 u32 vf) 439 { 440 struct ixgbe_hw *hw = &adapter->hw; 441 int err; 442 443 /* If VLAN overlaps with one the PF is currently monitoring make 444 * sure that we are able to allocate a VLVF entry. This may be 445 * redundant but it guarantees PF will maintain visibility to 446 * the VLAN. 447 */ 448 if (add && test_bit(vid, adapter->active_vlans)) { 449 err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); 450 if (err) 451 return err; 452 } 453 454 err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); 455 456 if (add && !err) 457 return err; 458 459 /* If we failed to add the VF VLAN or we are removing the VF VLAN 460 * we may need to drop the PF pool bit in order to allow us to free 461 * up the VLVF resources. 462 */ 463 if (test_bit(vid, adapter->active_vlans) || 464 (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 465 ixgbe_update_pf_promisc_vlvf(adapter, vid); 466 467 return err; 468 } 469 470 static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) 471 { 472 struct ixgbe_hw *hw = &adapter->hw; 473 int max_frame = msgbuf[1]; 474 u32 max_frs; 475 476 /* 477 * For 82599EB we have to keep all PFs and VFs operating with 478 * the same max_frame value in order to avoid sending an oversize 479 * frame to a VF. In order to guarantee this is handled correctly 480 * for all cases we have several special exceptions to take into 481 * account before we can enable the VF for receive 482 */ 483 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 484 struct net_device *dev = adapter->netdev; 485 int pf_max_frame = dev->mtu + ETH_HLEN; 486 u32 reg_offset, vf_shift, vfre; 487 s32 err = 0; 488 489 #ifdef CONFIG_FCOE 490 if (dev->features & NETIF_F_FCOE_MTU) 491 pf_max_frame = max_t(int, pf_max_frame, 492 IXGBE_FCOE_JUMBO_FRAME_SIZE); 493 494 #endif /* CONFIG_FCOE */ 495 switch (adapter->vfinfo[vf].vf_api) { 496 case ixgbe_mbox_api_11: 497 case ixgbe_mbox_api_12: 498 case ixgbe_mbox_api_13: 499 /* Version 1.1 supports jumbo frames on VFs if PF has 500 * jumbo frames enabled which means legacy VFs are 501 * disabled 502 */ 503 if (pf_max_frame > ETH_FRAME_LEN) 504 break; 505 /* fall through */ 506 default: 507 /* If the PF or VF are running w/ jumbo frames enabled 508 * we need to shut down the VF Rx path as we cannot 509 * support jumbo frames on legacy VFs 510 */ 511 if ((pf_max_frame > ETH_FRAME_LEN) || 512 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) 513 err = -EINVAL; 514 break; 515 } 516 517 /* determine VF receive enable location */ 518 vf_shift = vf % 32; 519 reg_offset = vf / 32; 520 521 /* enable or disable receive depending on error */ 522 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 523 if (err) 524 vfre &= ~BIT(vf_shift); 525 else 526 vfre |= BIT(vf_shift); 527 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); 528 529 if (err) { 530 e_err(drv, "VF max_frame %d out of range\n", max_frame); 531 return err; 532 } 533 } 534 535 /* MTU < 68 is an error and causes problems on some kernels */ 536 if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { 537 e_err(drv, "VF max_frame %d out of range\n", max_frame); 538 return -EINVAL; 539 } 540 541 /* pull current max frame size from hardware */ 542 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 543 max_frs &= IXGBE_MHADD_MFS_MASK; 544 max_frs >>= IXGBE_MHADD_MFS_SHIFT; 545 546 if (max_frs < max_frame) { 547 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; 548 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); 549 } 550 551 e_info(hw, "VF requests change max MTU to %d\n", max_frame); 552 553 return 0; 554 } 555 556 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 557 { 558 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 559 vmolr |= IXGBE_VMOLR_BAM; 560 if (aupe) 561 vmolr |= IXGBE_VMOLR_AUPE; 562 else 563 vmolr &= ~IXGBE_VMOLR_AUPE; 564 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 565 } 566 567 static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) 568 { 569 struct ixgbe_hw *hw = &adapter->hw; 570 571 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); 572 } 573 574 static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) 575 { 576 struct ixgbe_hw *hw = &adapter->hw; 577 u32 vlvfb_mask, pool_mask, i; 578 579 /* create mask for VF and other pools */ 580 pool_mask = ~BIT(VMDQ_P(0) % 32); 581 vlvfb_mask = BIT(vf % 32); 582 583 /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ 584 for (i = IXGBE_VLVF_ENTRIES; i--;) { 585 u32 bits[2], vlvfb, vid, vfta, vlvf; 586 u32 word = i * 2 + vf / 32; 587 u32 mask; 588 589 vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); 590 591 /* if our bit isn't set we can skip it */ 592 if (!(vlvfb & vlvfb_mask)) 593 continue; 594 595 /* clear our bit from vlvfb */ 596 vlvfb ^= vlvfb_mask; 597 598 /* create 64b mask to chedk to see if we should clear VLVF */ 599 bits[word % 2] = vlvfb; 600 bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); 601 602 /* if other pools are present, just remove ourselves */ 603 if (bits[(VMDQ_P(0) / 32) ^ 1] || 604 (bits[VMDQ_P(0) / 32] & pool_mask)) 605 goto update_vlvfb; 606 607 /* if PF is present, leave VFTA */ 608 if (bits[0] || bits[1]) 609 goto update_vlvf; 610 611 /* if we cannot determine VLAN just remove ourselves */ 612 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); 613 if (!vlvf) 614 goto update_vlvfb; 615 616 vid = vlvf & VLAN_VID_MASK; 617 mask = BIT(vid % 32); 618 619 /* clear bit from VFTA */ 620 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); 621 if (vfta & mask) 622 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); 623 update_vlvf: 624 /* clear POOL selection enable */ 625 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); 626 627 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 628 vlvfb = 0; 629 update_vlvfb: 630 /* clear pool bits */ 631 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); 632 } 633 } 634 635 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, 636 int vf, int index, unsigned char *mac_addr) 637 { 638 struct vf_macvlans *entry; 639 struct list_head *pos; 640 int retval = 0; 641 642 if (index <= 1) { 643 list_for_each(pos, &adapter->vf_mvs.l) { 644 entry = list_entry(pos, struct vf_macvlans, l); 645 if (entry->vf == vf) { 646 entry->vf = -1; 647 entry->free = true; 648 entry->is_macvlan = false; 649 ixgbe_del_mac_filter(adapter, 650 entry->vf_macvlan, vf); 651 } 652 } 653 } 654 655 /* 656 * If index was zero then we were asked to clear the uc list 657 * for the VF. We're done. 658 */ 659 if (!index) 660 return 0; 661 662 entry = NULL; 663 664 list_for_each(pos, &adapter->vf_mvs.l) { 665 entry = list_entry(pos, struct vf_macvlans, l); 666 if (entry->free) 667 break; 668 } 669 670 /* 671 * If we traversed the entire list and didn't find a free entry 672 * then we're out of space on the RAR table. Also entry may 673 * be NULL because the original memory allocation for the list 674 * failed, which is not fatal but does mean we can't support 675 * VF requests for MACVLAN because we couldn't allocate 676 * memory for the list management required. 677 */ 678 if (!entry || !entry->free) 679 return -ENOSPC; 680 681 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); 682 if (retval < 0) 683 return retval; 684 685 entry->free = false; 686 entry->is_macvlan = true; 687 entry->vf = vf; 688 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); 689 690 return 0; 691 } 692 693 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 694 { 695 struct ixgbe_hw *hw = &adapter->hw; 696 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 697 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 698 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 699 u8 num_tcs = adapter->hw_tcs; 700 u32 reg_val; 701 u32 queue; 702 u32 word; 703 704 /* remove VLAN filters beloning to this VF */ 705 ixgbe_clear_vf_vlans(adapter, vf); 706 707 /* add back PF assigned VLAN or VLAN 0 */ 708 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); 709 710 /* reset offloads to defaults */ 711 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); 712 713 /* set outgoing tags for VFs */ 714 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { 715 ixgbe_clear_vmvir(adapter, vf); 716 } else { 717 if (vfinfo->pf_qos || !num_tcs) 718 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, 719 vfinfo->pf_qos, vf); 720 else 721 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, 722 adapter->default_up, vf); 723 724 if (vfinfo->spoofchk_enabled) 725 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 726 } 727 728 /* reset multicast table array for vf */ 729 adapter->vfinfo[vf].num_vf_mc_hashes = 0; 730 731 /* Flush and reset the mta with the new values */ 732 ixgbe_set_rx_mode(adapter->netdev); 733 734 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); 735 ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); 736 737 /* reset VF api back to unknown */ 738 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 739 740 /* Restart each queue for given VF */ 741 for (queue = 0; queue < q_per_pool; queue++) { 742 unsigned int reg_idx = (vf * q_per_pool) + queue; 743 744 reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); 745 746 /* Re-enabling only configured queues */ 747 if (reg_val) { 748 reg_val |= IXGBE_TXDCTL_ENABLE; 749 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); 750 reg_val &= ~IXGBE_TXDCTL_ENABLE; 751 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); 752 } 753 } 754 755 /* Clear VF's mailbox memory */ 756 for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) 757 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); 758 759 IXGBE_WRITE_FLUSH(hw); 760 } 761 762 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 763 int vf, unsigned char *mac_addr) 764 { 765 s32 retval; 766 767 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); 768 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); 769 if (retval >= 0) 770 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 771 ETH_ALEN); 772 else 773 memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); 774 775 return retval; 776 } 777 778 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 779 { 780 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 781 unsigned int vfn = (event_mask & 0x3f); 782 783 bool enable = ((event_mask & 0x10000000U) != 0); 784 785 if (enable) 786 eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); 787 788 return 0; 789 } 790 791 static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, 792 u32 qde) 793 { 794 struct ixgbe_hw *hw = &adapter->hw; 795 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 796 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 797 int i; 798 799 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { 800 u32 reg; 801 802 /* flush previous write */ 803 IXGBE_WRITE_FLUSH(hw); 804 805 /* indicate to hardware that we want to set drop enable */ 806 reg = IXGBE_QDE_WRITE | qde; 807 reg |= i << IXGBE_QDE_IDX_SHIFT; 808 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); 809 } 810 } 811 812 static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 813 { 814 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 815 struct ixgbe_hw *hw = &adapter->hw; 816 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 817 u32 reg, reg_offset, vf_shift; 818 u32 msgbuf[4] = {0, 0, 0, 0}; 819 u8 *addr = (u8 *)(&msgbuf[1]); 820 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 821 int i; 822 823 e_info(probe, "VF Reset msg received from vf %d\n", vf); 824 825 /* reset the filters for the device */ 826 ixgbe_vf_reset_event(adapter, vf); 827 828 /* set vf mac address */ 829 if (!is_zero_ether_addr(vf_mac)) 830 ixgbe_set_vf_mac(adapter, vf, vf_mac); 831 832 vf_shift = vf % 32; 833 reg_offset = vf / 32; 834 835 /* enable transmit for vf */ 836 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); 837 reg |= BIT(vf_shift); 838 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); 839 840 /* force drop enable for all VF Rx queues */ 841 reg = IXGBE_QDE_ENABLE; 842 if (adapter->vfinfo[vf].pf_vlan) 843 reg |= IXGBE_QDE_HIDE_VLAN; 844 845 ixgbe_write_qde(adapter, vf, reg); 846 847 /* enable receive for vf */ 848 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 849 reg |= BIT(vf_shift); 850 /* 851 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. 852 * For more info take a look at ixgbe_set_vf_lpe 853 */ 854 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 855 struct net_device *dev = adapter->netdev; 856 int pf_max_frame = dev->mtu + ETH_HLEN; 857 858 #ifdef CONFIG_FCOE 859 if (dev->features & NETIF_F_FCOE_MTU) 860 pf_max_frame = max_t(int, pf_max_frame, 861 IXGBE_FCOE_JUMBO_FRAME_SIZE); 862 863 #endif /* CONFIG_FCOE */ 864 if (pf_max_frame > ETH_FRAME_LEN) 865 reg &= ~BIT(vf_shift); 866 } 867 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); 868 869 /* enable VF mailbox for further messages */ 870 adapter->vfinfo[vf].clear_to_send = true; 871 872 /* Enable counting of spoofed packets in the SSVPC register */ 873 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); 874 reg |= BIT(vf_shift); 875 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); 876 877 /* 878 * Reset the VFs TDWBAL and TDWBAH registers 879 * which are not cleared by an FLR 880 */ 881 for (i = 0; i < q_per_pool; i++) { 882 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); 883 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); 884 } 885 886 /* reply to reset with ack and vf mac address */ 887 msgbuf[0] = IXGBE_VF_RESET; 888 if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { 889 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; 890 memcpy(addr, vf_mac, ETH_ALEN); 891 } else { 892 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 893 } 894 895 /* 896 * Piggyback the multicast filter type so VF can compute the 897 * correct vectors 898 */ 899 msgbuf[3] = hw->mac.mc_filter_type; 900 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); 901 902 return 0; 903 } 904 905 static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, 906 u32 *msgbuf, u32 vf) 907 { 908 u8 *new_mac = ((u8 *)(&msgbuf[1])); 909 910 if (!is_valid_ether_addr(new_mac)) { 911 e_warn(drv, "VF %d attempted to set invalid mac\n", vf); 912 return -1; 913 } 914 915 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && 916 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { 917 e_warn(drv, 918 "VF %d attempted to override administratively set MAC address\n" 919 "Reload the VF driver to resume operations\n", 920 vf); 921 return -1; 922 } 923 924 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; 925 } 926 927 static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, 928 u32 *msgbuf, u32 vf) 929 { 930 u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 931 u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 932 u8 tcs = adapter->hw_tcs; 933 934 if (adapter->vfinfo[vf].pf_vlan || tcs) { 935 e_warn(drv, 936 "VF %d attempted to override administratively set VLAN configuration\n" 937 "Reload the VF driver to resume operations\n", 938 vf); 939 return -1; 940 } 941 942 /* VLAN 0 is a special case, don't allow it to be removed */ 943 if (!vid && !add) 944 return 0; 945 946 return ixgbe_set_vf_vlan(adapter, add, vid, vf); 947 } 948 949 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, 950 u32 *msgbuf, u32 vf) 951 { 952 u8 *new_mac = ((u8 *)(&msgbuf[1])); 953 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> 954 IXGBE_VT_MSGINFO_SHIFT; 955 int err; 956 957 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && 958 index > 0) { 959 e_warn(drv, 960 "VF %d requested MACVLAN filter but is administratively denied\n", 961 vf); 962 return -1; 963 } 964 965 /* An non-zero index indicates the VF is setting a filter */ 966 if (index) { 967 if (!is_valid_ether_addr(new_mac)) { 968 e_warn(drv, "VF %d attempted to set invalid mac\n", vf); 969 return -1; 970 } 971 972 /* 973 * If the VF is allowed to set MAC filters then turn off 974 * anti-spoofing to avoid false positives. 975 */ 976 if (adapter->vfinfo[vf].spoofchk_enabled) { 977 struct ixgbe_hw *hw = &adapter->hw; 978 979 hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); 980 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 981 } 982 } 983 984 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); 985 if (err == -ENOSPC) 986 e_warn(drv, 987 "VF %d has requested a MACVLAN filter but there is no space for it\n", 988 vf); 989 990 return err < 0; 991 } 992 993 static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, 994 u32 *msgbuf, u32 vf) 995 { 996 int api = msgbuf[1]; 997 998 switch (api) { 999 case ixgbe_mbox_api_10: 1000 case ixgbe_mbox_api_11: 1001 case ixgbe_mbox_api_12: 1002 case ixgbe_mbox_api_13: 1003 adapter->vfinfo[vf].vf_api = api; 1004 return 0; 1005 default: 1006 break; 1007 } 1008 1009 e_info(drv, "VF %d requested invalid api version %u\n", vf, api); 1010 1011 return -1; 1012 } 1013 1014 static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, 1015 u32 *msgbuf, u32 vf) 1016 { 1017 struct net_device *dev = adapter->netdev; 1018 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 1019 unsigned int default_tc = 0; 1020 u8 num_tcs = adapter->hw_tcs; 1021 1022 /* verify the PF is supporting the correct APIs */ 1023 switch (adapter->vfinfo[vf].vf_api) { 1024 case ixgbe_mbox_api_20: 1025 case ixgbe_mbox_api_11: 1026 case ixgbe_mbox_api_12: 1027 case ixgbe_mbox_api_13: 1028 break; 1029 default: 1030 return -1; 1031 } 1032 1033 /* only allow 1 Tx queue for bandwidth limiting */ 1034 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); 1035 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); 1036 1037 /* if TCs > 1 determine which TC belongs to default user priority */ 1038 if (num_tcs > 1) 1039 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); 1040 1041 /* notify VF of need for VLAN tag stripping, and correct queue */ 1042 if (num_tcs) 1043 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; 1044 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) 1045 msgbuf[IXGBE_VF_TRANS_VLAN] = 1; 1046 else 1047 msgbuf[IXGBE_VF_TRANS_VLAN] = 0; 1048 1049 /* notify VF of default queue */ 1050 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; 1051 1052 return 0; 1053 } 1054 1055 static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) 1056 { 1057 u32 i, j; 1058 u32 *out_buf = &msgbuf[1]; 1059 const u8 *reta = adapter->rss_indir_tbl; 1060 u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); 1061 1062 /* Check if operation is permitted */ 1063 if (!adapter->vfinfo[vf].rss_query_enabled) 1064 return -EPERM; 1065 1066 /* verify the PF is supporting the correct API */ 1067 switch (adapter->vfinfo[vf].vf_api) { 1068 case ixgbe_mbox_api_13: 1069 case ixgbe_mbox_api_12: 1070 break; 1071 default: 1072 return -EOPNOTSUPP; 1073 } 1074 1075 /* This mailbox command is supported (required) only for 82599 and x540 1076 * VFs which support up to 4 RSS queues. Therefore we will compress the 1077 * RETA by saving only 2 bits from each entry. This way we will be able 1078 * to transfer the whole RETA in a single mailbox operation. 1079 */ 1080 for (i = 0; i < reta_size / 16; i++) { 1081 out_buf[i] = 0; 1082 for (j = 0; j < 16; j++) 1083 out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); 1084 } 1085 1086 return 0; 1087 } 1088 1089 static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, 1090 u32 *msgbuf, u32 vf) 1091 { 1092 u32 *rss_key = &msgbuf[1]; 1093 1094 /* Check if the operation is permitted */ 1095 if (!adapter->vfinfo[vf].rss_query_enabled) 1096 return -EPERM; 1097 1098 /* verify the PF is supporting the correct API */ 1099 switch (adapter->vfinfo[vf].vf_api) { 1100 case ixgbe_mbox_api_13: 1101 case ixgbe_mbox_api_12: 1102 break; 1103 default: 1104 return -EOPNOTSUPP; 1105 } 1106 1107 memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); 1108 1109 return 0; 1110 } 1111 1112 static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, 1113 u32 *msgbuf, u32 vf) 1114 { 1115 struct ixgbe_hw *hw = &adapter->hw; 1116 int xcast_mode = msgbuf[1]; 1117 u32 vmolr, fctrl, disable, enable; 1118 1119 /* verify the PF is supporting the correct APIs */ 1120 switch (adapter->vfinfo[vf].vf_api) { 1121 case ixgbe_mbox_api_12: 1122 /* promisc introduced in 1.3 version */ 1123 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) 1124 return -EOPNOTSUPP; 1125 /* Fall threw */ 1126 case ixgbe_mbox_api_13: 1127 break; 1128 default: 1129 return -EOPNOTSUPP; 1130 } 1131 1132 if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && 1133 !adapter->vfinfo[vf].trusted) { 1134 xcast_mode = IXGBEVF_XCAST_MODE_MULTI; 1135 } 1136 1137 if (adapter->vfinfo[vf].xcast_mode == xcast_mode) 1138 goto out; 1139 1140 switch (xcast_mode) { 1141 case IXGBEVF_XCAST_MODE_NONE: 1142 disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | 1143 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; 1144 enable = 0; 1145 break; 1146 case IXGBEVF_XCAST_MODE_MULTI: 1147 disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; 1148 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; 1149 break; 1150 case IXGBEVF_XCAST_MODE_ALLMULTI: 1151 disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; 1152 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; 1153 break; 1154 case IXGBEVF_XCAST_MODE_PROMISC: 1155 if (hw->mac.type <= ixgbe_mac_82599EB) 1156 return -EOPNOTSUPP; 1157 1158 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1159 if (!(fctrl & IXGBE_FCTRL_UPE)) { 1160 /* VF promisc requires PF in promisc */ 1161 e_warn(drv, 1162 "Enabling VF promisc requires PF in promisc\n"); 1163 return -EPERM; 1164 } 1165 1166 disable = 0; 1167 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | 1168 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; 1169 break; 1170 default: 1171 return -EOPNOTSUPP; 1172 } 1173 1174 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 1175 vmolr &= ~disable; 1176 vmolr |= enable; 1177 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 1178 1179 adapter->vfinfo[vf].xcast_mode = xcast_mode; 1180 1181 out: 1182 msgbuf[1] = xcast_mode; 1183 1184 return 0; 1185 } 1186 1187 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) 1188 { 1189 u32 mbx_size = IXGBE_VFMAILBOX_SIZE; 1190 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 1191 struct ixgbe_hw *hw = &adapter->hw; 1192 s32 retval; 1193 1194 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 1195 1196 if (retval) { 1197 pr_err("Error receiving message from VF\n"); 1198 return retval; 1199 } 1200 1201 /* this is a message we already processed, do nothing */ 1202 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) 1203 return 0; 1204 1205 /* flush the ack before we write any messages back */ 1206 IXGBE_WRITE_FLUSH(hw); 1207 1208 if (msgbuf[0] == IXGBE_VF_RESET) 1209 return ixgbe_vf_reset_msg(adapter, vf); 1210 1211 /* 1212 * until the vf completes a virtual function reset it should not be 1213 * allowed to start any configuration. 1214 */ 1215 if (!adapter->vfinfo[vf].clear_to_send) { 1216 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 1217 ixgbe_write_mbx(hw, msgbuf, 1, vf); 1218 return 0; 1219 } 1220 1221 switch ((msgbuf[0] & 0xFFFF)) { 1222 case IXGBE_VF_SET_MAC_ADDR: 1223 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); 1224 break; 1225 case IXGBE_VF_SET_MULTICAST: 1226 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); 1227 break; 1228 case IXGBE_VF_SET_VLAN: 1229 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); 1230 break; 1231 case IXGBE_VF_SET_LPE: 1232 retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); 1233 break; 1234 case IXGBE_VF_SET_MACVLAN: 1235 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); 1236 break; 1237 case IXGBE_VF_API_NEGOTIATE: 1238 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); 1239 break; 1240 case IXGBE_VF_GET_QUEUES: 1241 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); 1242 break; 1243 case IXGBE_VF_GET_RETA: 1244 retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); 1245 break; 1246 case IXGBE_VF_GET_RSS_KEY: 1247 retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); 1248 break; 1249 case IXGBE_VF_UPDATE_XCAST_MODE: 1250 retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); 1251 break; 1252 default: 1253 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 1254 retval = IXGBE_ERR_MBX; 1255 break; 1256 } 1257 1258 /* notify the VF of the results of what it sent us */ 1259 if (retval) 1260 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 1261 else 1262 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; 1263 1264 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; 1265 1266 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); 1267 1268 return retval; 1269 } 1270 1271 static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) 1272 { 1273 struct ixgbe_hw *hw = &adapter->hw; 1274 u32 msg = IXGBE_VT_MSGTYPE_NACK; 1275 1276 /* if device isn't clear to send it shouldn't be reading either */ 1277 if (!adapter->vfinfo[vf].clear_to_send) 1278 ixgbe_write_mbx(hw, &msg, 1, vf); 1279 } 1280 1281 void ixgbe_msg_task(struct ixgbe_adapter *adapter) 1282 { 1283 struct ixgbe_hw *hw = &adapter->hw; 1284 u32 vf; 1285 1286 for (vf = 0; vf < adapter->num_vfs; vf++) { 1287 /* process any reset requests */ 1288 if (!ixgbe_check_for_rst(hw, vf)) 1289 ixgbe_vf_reset_event(adapter, vf); 1290 1291 /* process any messages pending */ 1292 if (!ixgbe_check_for_msg(hw, vf)) 1293 ixgbe_rcv_msg_from_vf(adapter, vf); 1294 1295 /* process any acks */ 1296 if (!ixgbe_check_for_ack(hw, vf)) 1297 ixgbe_rcv_ack_from_vf(adapter, vf); 1298 } 1299 } 1300 1301 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) 1302 { 1303 struct ixgbe_hw *hw = &adapter->hw; 1304 1305 /* disable transmit and receive for all vfs */ 1306 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); 1307 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); 1308 1309 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); 1310 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); 1311 } 1312 1313 static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) 1314 { 1315 struct ixgbe_hw *hw = &adapter->hw; 1316 u32 ping; 1317 1318 ping = IXGBE_PF_CONTROL_MSG; 1319 if (adapter->vfinfo[vf].clear_to_send) 1320 ping |= IXGBE_VT_MSGTYPE_CTS; 1321 ixgbe_write_mbx(hw, &ping, 1, vf); 1322 } 1323 1324 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) 1325 { 1326 struct ixgbe_hw *hw = &adapter->hw; 1327 u32 ping; 1328 int i; 1329 1330 for (i = 0 ; i < adapter->num_vfs; i++) { 1331 ping = IXGBE_PF_CONTROL_MSG; 1332 if (adapter->vfinfo[i].clear_to_send) 1333 ping |= IXGBE_VT_MSGTYPE_CTS; 1334 ixgbe_write_mbx(hw, &ping, 1, i); 1335 } 1336 } 1337 1338 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1339 { 1340 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1341 s32 retval; 1342 1343 if (vf >= adapter->num_vfs) 1344 return -EINVAL; 1345 1346 if (is_valid_ether_addr(mac)) { 1347 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", 1348 mac, vf); 1349 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective."); 1350 1351 retval = ixgbe_set_vf_mac(adapter, vf, mac); 1352 if (retval >= 0) { 1353 adapter->vfinfo[vf].pf_set_mac = true; 1354 1355 if (test_bit(__IXGBE_DOWN, &adapter->state)) { 1356 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); 1357 dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); 1358 } 1359 } else { 1360 dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); 1361 } 1362 } else if (is_zero_ether_addr(mac)) { 1363 unsigned char *vf_mac_addr = 1364 adapter->vfinfo[vf].vf_mac_addresses; 1365 1366 /* nothing to do */ 1367 if (is_zero_ether_addr(vf_mac_addr)) 1368 return 0; 1369 1370 dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf); 1371 1372 retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); 1373 if (retval >= 0) { 1374 adapter->vfinfo[vf].pf_set_mac = false; 1375 memcpy(vf_mac_addr, mac, ETH_ALEN); 1376 } else { 1377 dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n"); 1378 } 1379 } else { 1380 retval = -EINVAL; 1381 } 1382 1383 return retval; 1384 } 1385 1386 static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, 1387 u16 vlan, u8 qos) 1388 { 1389 struct ixgbe_hw *hw = &adapter->hw; 1390 int err; 1391 1392 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); 1393 if (err) 1394 goto out; 1395 1396 /* Revoke tagless access via VLAN 0 */ 1397 ixgbe_set_vf_vlan(adapter, false, 0, vf); 1398 1399 ixgbe_set_vmvir(adapter, vlan, qos, vf); 1400 ixgbe_set_vmolr(hw, vf, false); 1401 1402 /* enable hide vlan on X550 */ 1403 if (hw->mac.type >= ixgbe_mac_X550) 1404 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | 1405 IXGBE_QDE_HIDE_VLAN); 1406 1407 adapter->vfinfo[vf].pf_vlan = vlan; 1408 adapter->vfinfo[vf].pf_qos = qos; 1409 dev_info(&adapter->pdev->dev, 1410 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); 1411 if (test_bit(__IXGBE_DOWN, &adapter->state)) { 1412 dev_warn(&adapter->pdev->dev, 1413 "The VF VLAN has been set, but the PF device is not up.\n"); 1414 dev_warn(&adapter->pdev->dev, 1415 "Bring the PF device up before attempting to use the VF device.\n"); 1416 } 1417 1418 out: 1419 return err; 1420 } 1421 1422 static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) 1423 { 1424 struct ixgbe_hw *hw = &adapter->hw; 1425 int err; 1426 1427 err = ixgbe_set_vf_vlan(adapter, false, 1428 adapter->vfinfo[vf].pf_vlan, vf); 1429 /* Restore tagless access via VLAN 0 */ 1430 ixgbe_set_vf_vlan(adapter, true, 0, vf); 1431 ixgbe_clear_vmvir(adapter, vf); 1432 ixgbe_set_vmolr(hw, vf, true); 1433 1434 /* disable hide VLAN on X550 */ 1435 if (hw->mac.type >= ixgbe_mac_X550) 1436 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); 1437 1438 adapter->vfinfo[vf].pf_vlan = 0; 1439 adapter->vfinfo[vf].pf_qos = 0; 1440 1441 return err; 1442 } 1443 1444 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1445 u8 qos, __be16 vlan_proto) 1446 { 1447 int err = 0; 1448 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1449 1450 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) 1451 return -EINVAL; 1452 if (vlan_proto != htons(ETH_P_8021Q)) 1453 return -EPROTONOSUPPORT; 1454 if (vlan || qos) { 1455 /* Check if there is already a port VLAN set, if so 1456 * we have to delete the old one first before we 1457 * can set the new one. The usage model had 1458 * previously assumed the user would delete the 1459 * old port VLAN before setting a new one but this 1460 * is not necessarily the case. 1461 */ 1462 if (adapter->vfinfo[vf].pf_vlan) 1463 err = ixgbe_disable_port_vlan(adapter, vf); 1464 if (err) 1465 goto out; 1466 err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); 1467 } else { 1468 err = ixgbe_disable_port_vlan(adapter, vf); 1469 } 1470 1471 out: 1472 return err; 1473 } 1474 1475 int ixgbe_link_mbps(struct ixgbe_adapter *adapter) 1476 { 1477 switch (adapter->link_speed) { 1478 case IXGBE_LINK_SPEED_100_FULL: 1479 return 100; 1480 case IXGBE_LINK_SPEED_1GB_FULL: 1481 return 1000; 1482 case IXGBE_LINK_SPEED_10GB_FULL: 1483 return 10000; 1484 default: 1485 return 0; 1486 } 1487 } 1488 1489 static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) 1490 { 1491 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 1492 struct ixgbe_hw *hw = &adapter->hw; 1493 u32 bcnrc_val = 0; 1494 u16 queue, queues_per_pool; 1495 u16 tx_rate = adapter->vfinfo[vf].tx_rate; 1496 1497 if (tx_rate) { 1498 /* start with base link speed value */ 1499 bcnrc_val = adapter->vf_rate_link_speed; 1500 1501 /* Calculate the rate factor values to set */ 1502 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; 1503 bcnrc_val /= tx_rate; 1504 1505 /* clear everything but the rate factor */ 1506 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | 1507 IXGBE_RTTBCNRC_RF_DEC_MASK; 1508 1509 /* enable the rate scheduler */ 1510 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; 1511 } 1512 1513 /* 1514 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 1515 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported 1516 * and 0x004 otherwise. 1517 */ 1518 switch (hw->mac.type) { 1519 case ixgbe_mac_82599EB: 1520 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); 1521 break; 1522 case ixgbe_mac_X540: 1523 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); 1524 break; 1525 default: 1526 break; 1527 } 1528 1529 /* determine how many queues per pool based on VMDq mask */ 1530 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 1531 1532 /* write value for all Tx queues belonging to VF */ 1533 for (queue = 0; queue < queues_per_pool; queue++) { 1534 unsigned int reg_idx = (vf * queues_per_pool) + queue; 1535 1536 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); 1537 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 1538 } 1539 } 1540 1541 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) 1542 { 1543 int i; 1544 1545 /* VF Tx rate limit was not set */ 1546 if (!adapter->vf_rate_link_speed) 1547 return; 1548 1549 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { 1550 adapter->vf_rate_link_speed = 0; 1551 dev_info(&adapter->pdev->dev, 1552 "Link speed has been changed. VF Transmit rate is disabled\n"); 1553 } 1554 1555 for (i = 0; i < adapter->num_vfs; i++) { 1556 if (!adapter->vf_rate_link_speed) 1557 adapter->vfinfo[i].tx_rate = 0; 1558 1559 ixgbe_set_vf_rate_limit(adapter, i); 1560 } 1561 } 1562 1563 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, 1564 int max_tx_rate) 1565 { 1566 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1567 int link_speed; 1568 1569 /* verify VF is active */ 1570 if (vf >= adapter->num_vfs) 1571 return -EINVAL; 1572 1573 /* verify link is up */ 1574 if (!adapter->link_up) 1575 return -EINVAL; 1576 1577 /* verify we are linked at 10Gbps */ 1578 link_speed = ixgbe_link_mbps(adapter); 1579 if (link_speed != 10000) 1580 return -EINVAL; 1581 1582 if (min_tx_rate) 1583 return -EINVAL; 1584 1585 /* rate limit cannot be less than 10Mbs or greater than link speed */ 1586 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) 1587 return -EINVAL; 1588 1589 /* store values */ 1590 adapter->vf_rate_link_speed = link_speed; 1591 adapter->vfinfo[vf].tx_rate = max_tx_rate; 1592 1593 /* update hardware configuration */ 1594 ixgbe_set_vf_rate_limit(adapter, vf); 1595 1596 return 0; 1597 } 1598 1599 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) 1600 { 1601 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1602 struct ixgbe_hw *hw = &adapter->hw; 1603 1604 if (vf >= adapter->num_vfs) 1605 return -EINVAL; 1606 1607 adapter->vfinfo[vf].spoofchk_enabled = setting; 1608 1609 /* configure MAC spoofing */ 1610 hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); 1611 1612 /* configure VLAN spoofing */ 1613 hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); 1614 1615 /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be 1616 * calling set_ethertype_anti_spoofing for each VF in loop below 1617 */ 1618 if (hw->mac.ops.set_ethertype_anti_spoofing) { 1619 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), 1620 (IXGBE_ETQF_FILTER_EN | 1621 IXGBE_ETQF_TX_ANTISPOOF | 1622 IXGBE_ETH_P_LLDP)); 1623 1624 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), 1625 (IXGBE_ETQF_FILTER_EN | 1626 IXGBE_ETQF_TX_ANTISPOOF | 1627 ETH_P_PAUSE)); 1628 1629 hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); 1630 } 1631 1632 return 0; 1633 } 1634 1635 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, 1636 bool setting) 1637 { 1638 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1639 1640 /* This operation is currently supported only for 82599 and x540 1641 * devices. 1642 */ 1643 if (adapter->hw.mac.type < ixgbe_mac_82599EB || 1644 adapter->hw.mac.type >= ixgbe_mac_X550) 1645 return -EOPNOTSUPP; 1646 1647 if (vf >= adapter->num_vfs) 1648 return -EINVAL; 1649 1650 adapter->vfinfo[vf].rss_query_enabled = setting; 1651 1652 return 0; 1653 } 1654 1655 int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) 1656 { 1657 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1658 1659 if (vf >= adapter->num_vfs) 1660 return -EINVAL; 1661 1662 /* nothing to do */ 1663 if (adapter->vfinfo[vf].trusted == setting) 1664 return 0; 1665 1666 adapter->vfinfo[vf].trusted = setting; 1667 1668 /* reset VF to reconfigure features */ 1669 adapter->vfinfo[vf].clear_to_send = false; 1670 ixgbe_ping_vf(adapter, vf); 1671 1672 e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); 1673 1674 return 0; 1675 } 1676 1677 int ixgbe_ndo_get_vf_config(struct net_device *netdev, 1678 int vf, struct ifla_vf_info *ivi) 1679 { 1680 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1681 if (vf >= adapter->num_vfs) 1682 return -EINVAL; 1683 ivi->vf = vf; 1684 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 1685 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; 1686 ivi->min_tx_rate = 0; 1687 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 1688 ivi->qos = adapter->vfinfo[vf].pf_qos; 1689 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; 1690 ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; 1691 ivi->trusted = adapter->vfinfo[vf].trusted; 1692 return 0; 1693 } 1694