1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include <linux/types.h> 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/netdevice.h> 33 #include <linux/vmalloc.h> 34 #include <linux/string.h> 35 #include <linux/in.h> 36 #include <linux/ip.h> 37 #include <linux/tcp.h> 38 #include <linux/ipv6.h> 39 #include <linux/if_bridge.h> 40 #ifdef NETIF_F_HW_VLAN_CTAG_TX 41 #include <linux/if_vlan.h> 42 #endif 43 44 #include "ixgbe.h" 45 #include "ixgbe_type.h" 46 #include "ixgbe_sriov.h" 47 48 #ifdef CONFIG_PCI_IOV 49 static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) 50 { 51 struct ixgbe_hw *hw = &adapter->hw; 52 int num_vf_macvlans, i; 53 struct vf_macvlans *mv_list; 54 55 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; 56 e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); 57 58 /* Enable VMDq flag so device will be set in VM mode */ 59 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; 60 if (!adapter->ring_feature[RING_F_VMDQ].limit) 61 adapter->ring_feature[RING_F_VMDQ].limit = 1; 62 adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; 63 64 num_vf_macvlans = hw->mac.num_rar_entries - 65 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); 66 67 adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, 68 sizeof(struct vf_macvlans), 69 GFP_KERNEL); 70 if (mv_list) { 71 /* Initialize list of VF macvlans */ 72 INIT_LIST_HEAD(&adapter->vf_mvs.l); 73 for (i = 0; i < num_vf_macvlans; i++) { 74 mv_list->vf = -1; 75 mv_list->free = true; 76 list_add(&mv_list->l, &adapter->vf_mvs.l); 77 mv_list++; 78 } 79 } 80 81 /* Initialize default switching mode VEB */ 82 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 83 adapter->bridge_mode = BRIDGE_MODE_VEB; 84 85 /* If call to enable VFs succeeded then allocate memory 86 * for per VF control structures. 87 */ 88 adapter->vfinfo = 89 kcalloc(adapter->num_vfs, 90 sizeof(struct vf_data_storage), GFP_KERNEL); 91 if (adapter->vfinfo) { 92 /* limit trafffic classes based on VFs enabled */ 93 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && 94 (adapter->num_vfs < 16)) { 95 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; 96 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; 97 } else if (adapter->num_vfs < 32) { 98 adapter->dcb_cfg.num_tcs.pg_tcs = 4; 99 adapter->dcb_cfg.num_tcs.pfc_tcs = 4; 100 } else { 101 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 102 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 103 } 104 105 /* Disable RSC when in SR-IOV mode */ 106 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 107 IXGBE_FLAG2_RSC_ENABLED); 108 109 for (i = 0; i < adapter->num_vfs; i++) { 110 /* enable spoof checking for all VFs */ 111 adapter->vfinfo[i].spoofchk_enabled = true; 112 113 /* We support VF RSS querying only for 82599 and x540 114 * devices at the moment. These devices share RSS 115 * indirection table and RSS hash key with PF therefore 116 * we want to disable the querying by default. 117 */ 118 adapter->vfinfo[i].rss_query_enabled = 0; 119 120 /* Untrust all VFs */ 121 adapter->vfinfo[i].trusted = false; 122 123 /* set the default xcast mode */ 124 adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; 125 } 126 127 return 0; 128 } 129 130 return -ENOMEM; 131 } 132 133 /* Note this function is called when the user wants to enable SR-IOV 134 * VFs using the now deprecated module parameter 135 */ 136 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) 137 { 138 int pre_existing_vfs = 0; 139 140 pre_existing_vfs = pci_num_vf(adapter->pdev); 141 if (!pre_existing_vfs && !adapter->num_vfs) 142 return; 143 144 /* If there are pre-existing VFs then we have to force 145 * use of that many - over ride any module parameter value. 146 * This may result from the user unloading the PF driver 147 * while VFs were assigned to guest VMs or because the VFs 148 * have been created via the new PCI SR-IOV sysfs interface. 149 */ 150 if (pre_existing_vfs) { 151 adapter->num_vfs = pre_existing_vfs; 152 dev_warn(&adapter->pdev->dev, 153 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); 154 } else { 155 int err; 156 /* 157 * The 82599 supports up to 64 VFs per physical function 158 * but this implementation limits allocation to 63 so that 159 * basic networking resources are still available to the 160 * physical function. If the user requests greater than 161 * 63 VFs then it is an error - reset to default of zero. 162 */ 163 adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); 164 165 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); 166 if (err) { 167 e_err(probe, "Failed to enable PCI sriov: %d\n", err); 168 adapter->num_vfs = 0; 169 return; 170 } 171 } 172 173 if (!__ixgbe_enable_sriov(adapter)) 174 return; 175 176 /* If we have gotten to this point then there is no memory available 177 * to manage the VF devices - print message and bail. 178 */ 179 e_err(probe, "Unable to allocate memory for VF Data Storage - " 180 "SRIOV disabled\n"); 181 ixgbe_disable_sriov(adapter); 182 } 183 184 #endif /* #ifdef CONFIG_PCI_IOV */ 185 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 186 { 187 struct ixgbe_hw *hw = &adapter->hw; 188 u32 gpie; 189 u32 vmdctl; 190 int rss; 191 192 /* set num VFs to 0 to prevent access to vfinfo */ 193 adapter->num_vfs = 0; 194 195 /* free VF control structures */ 196 kfree(adapter->vfinfo); 197 adapter->vfinfo = NULL; 198 199 /* free macvlan list */ 200 kfree(adapter->mv_list); 201 adapter->mv_list = NULL; 202 203 /* if SR-IOV is already disabled then there is nothing to do */ 204 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 205 return 0; 206 207 #ifdef CONFIG_PCI_IOV 208 /* 209 * If our VFs are assigned we cannot shut down SR-IOV 210 * without causing issues, so just leave the hardware 211 * available but disabled 212 */ 213 if (pci_vfs_assigned(adapter->pdev)) { 214 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); 215 return -EPERM; 216 } 217 /* disable iov and allow time for transactions to clear */ 218 pci_disable_sriov(adapter->pdev); 219 #endif 220 221 /* turn off device IOV mode */ 222 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); 223 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 224 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 225 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 226 227 /* set default pool back to 0 */ 228 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 229 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; 230 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 231 IXGBE_WRITE_FLUSH(hw); 232 233 /* Disable VMDq flag so device will be set in VM mode */ 234 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { 235 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; 236 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 237 rss = min_t(int, ixgbe_max_rss_indices(adapter), 238 num_online_cpus()); 239 } else { 240 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); 241 } 242 243 adapter->ring_feature[RING_F_VMDQ].offset = 0; 244 adapter->ring_feature[RING_F_RSS].limit = rss; 245 246 /* take a breather then clean up driver data */ 247 msleep(100); 248 return 0; 249 } 250 251 static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) 252 { 253 #ifdef CONFIG_PCI_IOV 254 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 255 int err = 0; 256 int i; 257 int pre_existing_vfs = pci_num_vf(dev); 258 259 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 260 err = ixgbe_disable_sriov(adapter); 261 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 262 return num_vfs; 263 264 if (err) 265 return err; 266 267 /* While the SR-IOV capability structure reports total VFs to be 64, 268 * we have to limit the actual number allocated based on two factors. 269 * First, we reserve some transmit/receive resources for the PF. 270 * Second, VMDQ also uses the same pools that SR-IOV does. We need to 271 * account for this, so that we don't accidentally allocate more VFs 272 * than we have available pools. The PCI bus driver already checks for 273 * other values out of range. 274 */ 275 if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS) 276 return -EPERM; 277 278 adapter->num_vfs = num_vfs; 279 280 err = __ixgbe_enable_sriov(adapter); 281 if (err) 282 return err; 283 284 for (i = 0; i < adapter->num_vfs; i++) 285 ixgbe_vf_configuration(dev, (i | 0x10000000)); 286 287 err = pci_enable_sriov(dev, num_vfs); 288 if (err) { 289 e_dev_warn("Failed to enable PCI sriov: %d\n", err); 290 return err; 291 } 292 ixgbe_sriov_reinit(adapter); 293 294 return num_vfs; 295 #else 296 return 0; 297 #endif 298 } 299 300 static int ixgbe_pci_sriov_disable(struct pci_dev *dev) 301 { 302 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 303 int err; 304 #ifdef CONFIG_PCI_IOV 305 u32 current_flags = adapter->flags; 306 #endif 307 308 err = ixgbe_disable_sriov(adapter); 309 310 /* Only reinit if no error and state changed */ 311 #ifdef CONFIG_PCI_IOV 312 if (!err && current_flags != adapter->flags) 313 ixgbe_sriov_reinit(adapter); 314 #endif 315 316 return err; 317 } 318 319 int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 320 { 321 if (num_vfs == 0) 322 return ixgbe_pci_sriov_disable(dev); 323 else 324 return ixgbe_pci_sriov_enable(dev, num_vfs); 325 } 326 327 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, 328 u32 *msgbuf, u32 vf) 329 { 330 int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 331 >> IXGBE_VT_MSGINFO_SHIFT; 332 u16 *hash_list = (u16 *)&msgbuf[1]; 333 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 334 struct ixgbe_hw *hw = &adapter->hw; 335 int i; 336 u32 vector_bit; 337 u32 vector_reg; 338 u32 mta_reg; 339 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 340 341 /* only so many hash values supported */ 342 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 343 344 /* 345 * salt away the number of multi cast addresses assigned 346 * to this VF for later use to restore when the PF multi cast 347 * list changes 348 */ 349 vfinfo->num_vf_mc_hashes = entries; 350 351 /* 352 * VFs are limited to using the MTA hash table for their multicast 353 * addresses 354 */ 355 for (i = 0; i < entries; i++) { 356 vfinfo->vf_mc_hashes[i] = hash_list[i]; 357 } 358 359 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 360 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; 361 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; 362 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 363 mta_reg |= (1 << vector_bit); 364 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 365 } 366 vmolr |= IXGBE_VMOLR_ROMPE; 367 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 368 369 return 0; 370 } 371 372 #ifdef CONFIG_PCI_IOV 373 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 374 { 375 struct ixgbe_hw *hw = &adapter->hw; 376 struct vf_data_storage *vfinfo; 377 int i, j; 378 u32 vector_bit; 379 u32 vector_reg; 380 u32 mta_reg; 381 382 for (i = 0; i < adapter->num_vfs; i++) { 383 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); 384 vfinfo = &adapter->vfinfo[i]; 385 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { 386 hw->addr_ctrl.mta_in_use++; 387 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; 388 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; 389 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 390 mta_reg |= (1 << vector_bit); 391 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 392 } 393 394 if (vfinfo->num_vf_mc_hashes) 395 vmolr |= IXGBE_VMOLR_ROMPE; 396 else 397 vmolr &= ~IXGBE_VMOLR_ROMPE; 398 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); 399 } 400 401 /* Restore any VF macvlans */ 402 ixgbe_full_sync_mac_table(adapter); 403 } 404 #endif 405 406 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 407 u32 vf) 408 { 409 /* VLAN 0 is a special case, don't allow it to be removed */ 410 if (!vid && !add) 411 return 0; 412 413 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 414 } 415 416 static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) 417 { 418 struct ixgbe_hw *hw = &adapter->hw; 419 int max_frame = msgbuf[1]; 420 u32 max_frs; 421 422 /* 423 * For 82599EB we have to keep all PFs and VFs operating with 424 * the same max_frame value in order to avoid sending an oversize 425 * frame to a VF. In order to guarantee this is handled correctly 426 * for all cases we have several special exceptions to take into 427 * account before we can enable the VF for receive 428 */ 429 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 430 struct net_device *dev = adapter->netdev; 431 int pf_max_frame = dev->mtu + ETH_HLEN; 432 u32 reg_offset, vf_shift, vfre; 433 s32 err = 0; 434 435 #ifdef CONFIG_FCOE 436 if (dev->features & NETIF_F_FCOE_MTU) 437 pf_max_frame = max_t(int, pf_max_frame, 438 IXGBE_FCOE_JUMBO_FRAME_SIZE); 439 440 #endif /* CONFIG_FCOE */ 441 switch (adapter->vfinfo[vf].vf_api) { 442 case ixgbe_mbox_api_11: 443 case ixgbe_mbox_api_12: 444 /* 445 * Version 1.1 supports jumbo frames on VFs if PF has 446 * jumbo frames enabled which means legacy VFs are 447 * disabled 448 */ 449 if (pf_max_frame > ETH_FRAME_LEN) 450 break; 451 default: 452 /* 453 * If the PF or VF are running w/ jumbo frames enabled 454 * we need to shut down the VF Rx path as we cannot 455 * support jumbo frames on legacy VFs 456 */ 457 if ((pf_max_frame > ETH_FRAME_LEN) || 458 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) 459 err = -EINVAL; 460 break; 461 } 462 463 /* determine VF receive enable location */ 464 vf_shift = vf % 32; 465 reg_offset = vf / 32; 466 467 /* enable or disable receive depending on error */ 468 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 469 if (err) 470 vfre &= ~(1 << vf_shift); 471 else 472 vfre |= 1 << vf_shift; 473 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); 474 475 if (err) { 476 e_err(drv, "VF max_frame %d out of range\n", max_frame); 477 return err; 478 } 479 } 480 481 /* MTU < 68 is an error and causes problems on some kernels */ 482 if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { 483 e_err(drv, "VF max_frame %d out of range\n", max_frame); 484 return -EINVAL; 485 } 486 487 /* pull current max frame size from hardware */ 488 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 489 max_frs &= IXGBE_MHADD_MFS_MASK; 490 max_frs >>= IXGBE_MHADD_MFS_SHIFT; 491 492 if (max_frs < max_frame) { 493 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; 494 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); 495 } 496 497 e_info(hw, "VF requests change max MTU to %d\n", max_frame); 498 499 return 0; 500 } 501 502 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 503 { 504 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 505 vmolr |= IXGBE_VMOLR_BAM; 506 if (aupe) 507 vmolr |= IXGBE_VMOLR_AUPE; 508 else 509 vmolr &= ~IXGBE_VMOLR_AUPE; 510 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 511 } 512 513 static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) 514 { 515 struct ixgbe_hw *hw = &adapter->hw; 516 517 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); 518 } 519 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 520 { 521 struct ixgbe_hw *hw = &adapter->hw; 522 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 523 u8 num_tcs = netdev_get_num_tc(adapter->netdev); 524 525 /* add PF assigned VLAN or VLAN 0 */ 526 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); 527 528 /* reset offloads to defaults */ 529 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); 530 531 /* set outgoing tags for VFs */ 532 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { 533 ixgbe_clear_vmvir(adapter, vf); 534 } else { 535 if (vfinfo->pf_qos || !num_tcs) 536 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, 537 vfinfo->pf_qos, vf); 538 else 539 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, 540 adapter->default_up, vf); 541 542 if (vfinfo->spoofchk_enabled) 543 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 544 } 545 546 /* reset multicast table array for vf */ 547 adapter->vfinfo[vf].num_vf_mc_hashes = 0; 548 549 /* Flush and reset the mta with the new values */ 550 ixgbe_set_rx_mode(adapter->netdev); 551 552 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); 553 554 /* reset VF api back to unknown */ 555 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 556 } 557 558 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 559 int vf, unsigned char *mac_addr) 560 { 561 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); 562 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 563 ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); 564 565 return 0; 566 } 567 568 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, 569 int vf, int index, unsigned char *mac_addr) 570 { 571 struct list_head *pos; 572 struct vf_macvlans *entry; 573 574 if (index <= 1) { 575 list_for_each(pos, &adapter->vf_mvs.l) { 576 entry = list_entry(pos, struct vf_macvlans, l); 577 if (entry->vf == vf) { 578 entry->vf = -1; 579 entry->free = true; 580 entry->is_macvlan = false; 581 ixgbe_del_mac_filter(adapter, 582 entry->vf_macvlan, vf); 583 } 584 } 585 } 586 587 /* 588 * If index was zero then we were asked to clear the uc list 589 * for the VF. We're done. 590 */ 591 if (!index) 592 return 0; 593 594 entry = NULL; 595 596 list_for_each(pos, &adapter->vf_mvs.l) { 597 entry = list_entry(pos, struct vf_macvlans, l); 598 if (entry->free) 599 break; 600 } 601 602 /* 603 * If we traversed the entire list and didn't find a free entry 604 * then we're out of space on the RAR table. Also entry may 605 * be NULL because the original memory allocation for the list 606 * failed, which is not fatal but does mean we can't support 607 * VF requests for MACVLAN because we couldn't allocate 608 * memory for the list management required. 609 */ 610 if (!entry || !entry->free) 611 return -ENOSPC; 612 613 entry->free = false; 614 entry->is_macvlan = true; 615 entry->vf = vf; 616 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); 617 618 ixgbe_add_mac_filter(adapter, mac_addr, vf); 619 620 return 0; 621 } 622 623 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 624 { 625 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 626 unsigned int vfn = (event_mask & 0x3f); 627 628 bool enable = ((event_mask & 0x10000000U) != 0); 629 630 if (enable) 631 eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); 632 633 return 0; 634 } 635 636 static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, 637 u32 qde) 638 { 639 struct ixgbe_hw *hw = &adapter->hw; 640 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 641 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 642 int i; 643 644 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { 645 u32 reg; 646 647 /* flush previous write */ 648 IXGBE_WRITE_FLUSH(hw); 649 650 /* indicate to hardware that we want to set drop enable */ 651 reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; 652 reg |= i << IXGBE_QDE_IDX_SHIFT; 653 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); 654 } 655 } 656 657 static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 658 { 659 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 660 struct ixgbe_hw *hw = &adapter->hw; 661 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 662 u32 reg, reg_offset, vf_shift; 663 u32 msgbuf[4] = {0, 0, 0, 0}; 664 u8 *addr = (u8 *)(&msgbuf[1]); 665 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 666 int i; 667 668 e_info(probe, "VF Reset msg received from vf %d\n", vf); 669 670 /* reset the filters for the device */ 671 ixgbe_vf_reset_event(adapter, vf); 672 673 /* set vf mac address */ 674 if (!is_zero_ether_addr(vf_mac)) 675 ixgbe_set_vf_mac(adapter, vf, vf_mac); 676 677 vf_shift = vf % 32; 678 reg_offset = vf / 32; 679 680 /* enable transmit for vf */ 681 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); 682 reg |= 1 << vf_shift; 683 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); 684 685 /* force drop enable for all VF Rx queues */ 686 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); 687 688 /* enable receive for vf */ 689 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 690 reg |= 1 << vf_shift; 691 /* 692 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. 693 * For more info take a look at ixgbe_set_vf_lpe 694 */ 695 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 696 struct net_device *dev = adapter->netdev; 697 int pf_max_frame = dev->mtu + ETH_HLEN; 698 699 #ifdef CONFIG_FCOE 700 if (dev->features & NETIF_F_FCOE_MTU) 701 pf_max_frame = max_t(int, pf_max_frame, 702 IXGBE_FCOE_JUMBO_FRAME_SIZE); 703 704 #endif /* CONFIG_FCOE */ 705 if (pf_max_frame > ETH_FRAME_LEN) 706 reg &= ~(1 << vf_shift); 707 } 708 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); 709 710 /* enable VF mailbox for further messages */ 711 adapter->vfinfo[vf].clear_to_send = true; 712 713 /* Enable counting of spoofed packets in the SSVPC register */ 714 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); 715 reg |= (1 << vf_shift); 716 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); 717 718 /* 719 * Reset the VFs TDWBAL and TDWBAH registers 720 * which are not cleared by an FLR 721 */ 722 for (i = 0; i < q_per_pool; i++) { 723 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); 724 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); 725 } 726 727 /* reply to reset with ack and vf mac address */ 728 msgbuf[0] = IXGBE_VF_RESET; 729 if (!is_zero_ether_addr(vf_mac)) { 730 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; 731 memcpy(addr, vf_mac, ETH_ALEN); 732 } else { 733 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 734 dev_warn(&adapter->pdev->dev, 735 "VF %d has no MAC address assigned, you may have to assign one manually\n", 736 vf); 737 } 738 739 /* 740 * Piggyback the multicast filter type so VF can compute the 741 * correct vectors 742 */ 743 msgbuf[3] = hw->mac.mc_filter_type; 744 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); 745 746 return 0; 747 } 748 749 static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, 750 u32 *msgbuf, u32 vf) 751 { 752 u8 *new_mac = ((u8 *)(&msgbuf[1])); 753 754 if (!is_valid_ether_addr(new_mac)) { 755 e_warn(drv, "VF %d attempted to set invalid mac\n", vf); 756 return -1; 757 } 758 759 if (adapter->vfinfo[vf].pf_set_mac && 760 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { 761 e_warn(drv, 762 "VF %d attempted to override administratively set MAC address\n" 763 "Reload the VF driver to resume operations\n", 764 vf); 765 return -1; 766 } 767 768 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; 769 } 770 771 static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) 772 { 773 u32 vlvf; 774 s32 regindex; 775 776 /* short cut the special case */ 777 if (vlan == 0) 778 return 0; 779 780 /* Search for the vlan id in the VLVF entries */ 781 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 782 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 783 if ((vlvf & VLAN_VID_MASK) == vlan) 784 break; 785 } 786 787 /* Return a negative value if not found */ 788 if (regindex >= IXGBE_VLVF_ENTRIES) 789 regindex = -1; 790 791 return regindex; 792 } 793 794 static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, 795 u32 *msgbuf, u32 vf) 796 { 797 struct ixgbe_hw *hw = &adapter->hw; 798 int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 799 int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 800 int err; 801 s32 reg_ndx; 802 u32 vlvf; 803 u32 bits; 804 u8 tcs = netdev_get_num_tc(adapter->netdev); 805 806 if (adapter->vfinfo[vf].pf_vlan || tcs) { 807 e_warn(drv, 808 "VF %d attempted to override administratively set VLAN configuration\n" 809 "Reload the VF driver to resume operations\n", 810 vf); 811 return -1; 812 } 813 814 if (add) 815 adapter->vfinfo[vf].vlan_count++; 816 else if (adapter->vfinfo[vf].vlan_count) 817 adapter->vfinfo[vf].vlan_count--; 818 819 /* in case of promiscuous mode any VLAN filter set for a VF must 820 * also have the PF pool added to it. 821 */ 822 if (add && adapter->netdev->flags & IFF_PROMISC) 823 err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); 824 825 err = ixgbe_set_vf_vlan(adapter, add, vid, vf); 826 if (!err && adapter->vfinfo[vf].spoofchk_enabled) 827 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 828 829 /* Go through all the checks to see if the VLAN filter should 830 * be wiped completely. 831 */ 832 if (!add && adapter->netdev->flags & IFF_PROMISC) { 833 reg_ndx = ixgbe_find_vlvf_entry(hw, vid); 834 if (reg_ndx < 0) 835 return err; 836 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); 837 /* See if any other pools are set for this VLAN filter 838 * entry other than the PF. 839 */ 840 if (VMDQ_P(0) < 32) { 841 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); 842 bits &= ~(1 << VMDQ_P(0)); 843 bits |= IXGBE_READ_REG(hw, 844 IXGBE_VLVFB(reg_ndx * 2) + 1); 845 } else { 846 bits = IXGBE_READ_REG(hw, 847 IXGBE_VLVFB(reg_ndx * 2) + 1); 848 bits &= ~(1 << (VMDQ_P(0) - 32)); 849 bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); 850 } 851 852 /* If the filter was removed then ensure PF pool bit 853 * is cleared if the PF only added itself to the pool 854 * because the PF is in promiscuous mode. 855 */ 856 if ((vlvf & VLAN_VID_MASK) == vid && 857 !test_bit(vid, adapter->active_vlans) && !bits) 858 ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); 859 } 860 861 return err; 862 } 863 864 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, 865 u32 *msgbuf, u32 vf) 866 { 867 u8 *new_mac = ((u8 *)(&msgbuf[1])); 868 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> 869 IXGBE_VT_MSGINFO_SHIFT; 870 int err; 871 872 if (adapter->vfinfo[vf].pf_set_mac && index > 0) { 873 e_warn(drv, 874 "VF %d requested MACVLAN filter but is administratively denied\n", 875 vf); 876 return -1; 877 } 878 879 /* An non-zero index indicates the VF is setting a filter */ 880 if (index) { 881 if (!is_valid_ether_addr(new_mac)) { 882 e_warn(drv, "VF %d attempted to set invalid mac\n", vf); 883 return -1; 884 } 885 886 /* 887 * If the VF is allowed to set MAC filters then turn off 888 * anti-spoofing to avoid false positives. 889 */ 890 if (adapter->vfinfo[vf].spoofchk_enabled) 891 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); 892 } 893 894 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); 895 if (err == -ENOSPC) 896 e_warn(drv, 897 "VF %d has requested a MACVLAN filter but there is no space for it\n", 898 vf); 899 900 return err < 0; 901 } 902 903 static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, 904 u32 *msgbuf, u32 vf) 905 { 906 int api = msgbuf[1]; 907 908 switch (api) { 909 case ixgbe_mbox_api_10: 910 case ixgbe_mbox_api_11: 911 case ixgbe_mbox_api_12: 912 adapter->vfinfo[vf].vf_api = api; 913 return 0; 914 default: 915 break; 916 } 917 918 e_info(drv, "VF %d requested invalid api version %u\n", vf, api); 919 920 return -1; 921 } 922 923 static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, 924 u32 *msgbuf, u32 vf) 925 { 926 struct net_device *dev = adapter->netdev; 927 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 928 unsigned int default_tc = 0; 929 u8 num_tcs = netdev_get_num_tc(dev); 930 931 /* verify the PF is supporting the correct APIs */ 932 switch (adapter->vfinfo[vf].vf_api) { 933 case ixgbe_mbox_api_20: 934 case ixgbe_mbox_api_11: 935 case ixgbe_mbox_api_12: 936 break; 937 default: 938 return -1; 939 } 940 941 /* only allow 1 Tx queue for bandwidth limiting */ 942 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); 943 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); 944 945 /* if TCs > 1 determine which TC belongs to default user priority */ 946 if (num_tcs > 1) 947 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); 948 949 /* notify VF of need for VLAN tag stripping, and correct queue */ 950 if (num_tcs) 951 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; 952 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) 953 msgbuf[IXGBE_VF_TRANS_VLAN] = 1; 954 else 955 msgbuf[IXGBE_VF_TRANS_VLAN] = 0; 956 957 /* notify VF of default queue */ 958 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; 959 960 return 0; 961 } 962 963 static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) 964 { 965 u32 i, j; 966 u32 *out_buf = &msgbuf[1]; 967 const u8 *reta = adapter->rss_indir_tbl; 968 u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); 969 970 /* Check if operation is permitted */ 971 if (!adapter->vfinfo[vf].rss_query_enabled) 972 return -EPERM; 973 974 /* verify the PF is supporting the correct API */ 975 if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) 976 return -EOPNOTSUPP; 977 978 /* This mailbox command is supported (required) only for 82599 and x540 979 * VFs which support up to 4 RSS queues. Therefore we will compress the 980 * RETA by saving only 2 bits from each entry. This way we will be able 981 * to transfer the whole RETA in a single mailbox operation. 982 */ 983 for (i = 0; i < reta_size / 16; i++) { 984 out_buf[i] = 0; 985 for (j = 0; j < 16; j++) 986 out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); 987 } 988 989 return 0; 990 } 991 992 static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, 993 u32 *msgbuf, u32 vf) 994 { 995 u32 *rss_key = &msgbuf[1]; 996 997 /* Check if the operation is permitted */ 998 if (!adapter->vfinfo[vf].rss_query_enabled) 999 return -EPERM; 1000 1001 /* verify the PF is supporting the correct API */ 1002 if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) 1003 return -EOPNOTSUPP; 1004 1005 memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); 1006 1007 return 0; 1008 } 1009 1010 static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, 1011 u32 *msgbuf, u32 vf) 1012 { 1013 struct ixgbe_hw *hw = &adapter->hw; 1014 int xcast_mode = msgbuf[1]; 1015 u32 vmolr, disable, enable; 1016 1017 /* verify the PF is supporting the correct APIs */ 1018 switch (adapter->vfinfo[vf].vf_api) { 1019 case ixgbe_mbox_api_12: 1020 break; 1021 default: 1022 return -EOPNOTSUPP; 1023 } 1024 1025 if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && 1026 !adapter->vfinfo[vf].trusted) { 1027 xcast_mode = IXGBEVF_XCAST_MODE_MULTI; 1028 } 1029 1030 if (adapter->vfinfo[vf].xcast_mode == xcast_mode) 1031 goto out; 1032 1033 switch (xcast_mode) { 1034 case IXGBEVF_XCAST_MODE_NONE: 1035 disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; 1036 enable = 0; 1037 break; 1038 case IXGBEVF_XCAST_MODE_MULTI: 1039 disable = IXGBE_VMOLR_MPE; 1040 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; 1041 break; 1042 case IXGBEVF_XCAST_MODE_ALLMULTI: 1043 disable = 0; 1044 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; 1045 break; 1046 default: 1047 return -EOPNOTSUPP; 1048 } 1049 1050 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 1051 vmolr &= ~disable; 1052 vmolr |= enable; 1053 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 1054 1055 adapter->vfinfo[vf].xcast_mode = xcast_mode; 1056 1057 out: 1058 msgbuf[1] = xcast_mode; 1059 1060 return 0; 1061 } 1062 1063 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) 1064 { 1065 u32 mbx_size = IXGBE_VFMAILBOX_SIZE; 1066 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 1067 struct ixgbe_hw *hw = &adapter->hw; 1068 s32 retval; 1069 1070 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 1071 1072 if (retval) { 1073 pr_err("Error receiving message from VF\n"); 1074 return retval; 1075 } 1076 1077 /* this is a message we already processed, do nothing */ 1078 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) 1079 return 0; 1080 1081 /* flush the ack before we write any messages back */ 1082 IXGBE_WRITE_FLUSH(hw); 1083 1084 if (msgbuf[0] == IXGBE_VF_RESET) 1085 return ixgbe_vf_reset_msg(adapter, vf); 1086 1087 /* 1088 * until the vf completes a virtual function reset it should not be 1089 * allowed to start any configuration. 1090 */ 1091 if (!adapter->vfinfo[vf].clear_to_send) { 1092 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 1093 ixgbe_write_mbx(hw, msgbuf, 1, vf); 1094 return 0; 1095 } 1096 1097 switch ((msgbuf[0] & 0xFFFF)) { 1098 case IXGBE_VF_SET_MAC_ADDR: 1099 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); 1100 break; 1101 case IXGBE_VF_SET_MULTICAST: 1102 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); 1103 break; 1104 case IXGBE_VF_SET_VLAN: 1105 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); 1106 break; 1107 case IXGBE_VF_SET_LPE: 1108 retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); 1109 break; 1110 case IXGBE_VF_SET_MACVLAN: 1111 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); 1112 break; 1113 case IXGBE_VF_API_NEGOTIATE: 1114 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); 1115 break; 1116 case IXGBE_VF_GET_QUEUES: 1117 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); 1118 break; 1119 case IXGBE_VF_GET_RETA: 1120 retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); 1121 break; 1122 case IXGBE_VF_GET_RSS_KEY: 1123 retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); 1124 break; 1125 case IXGBE_VF_UPDATE_XCAST_MODE: 1126 retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); 1127 break; 1128 default: 1129 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 1130 retval = IXGBE_ERR_MBX; 1131 break; 1132 } 1133 1134 /* notify the VF of the results of what it sent us */ 1135 if (retval) 1136 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 1137 else 1138 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; 1139 1140 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; 1141 1142 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); 1143 1144 return retval; 1145 } 1146 1147 static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) 1148 { 1149 struct ixgbe_hw *hw = &adapter->hw; 1150 u32 msg = IXGBE_VT_MSGTYPE_NACK; 1151 1152 /* if device isn't clear to send it shouldn't be reading either */ 1153 if (!adapter->vfinfo[vf].clear_to_send) 1154 ixgbe_write_mbx(hw, &msg, 1, vf); 1155 } 1156 1157 void ixgbe_msg_task(struct ixgbe_adapter *adapter) 1158 { 1159 struct ixgbe_hw *hw = &adapter->hw; 1160 u32 vf; 1161 1162 for (vf = 0; vf < adapter->num_vfs; vf++) { 1163 /* process any reset requests */ 1164 if (!ixgbe_check_for_rst(hw, vf)) 1165 ixgbe_vf_reset_event(adapter, vf); 1166 1167 /* process any messages pending */ 1168 if (!ixgbe_check_for_msg(hw, vf)) 1169 ixgbe_rcv_msg_from_vf(adapter, vf); 1170 1171 /* process any acks */ 1172 if (!ixgbe_check_for_ack(hw, vf)) 1173 ixgbe_rcv_ack_from_vf(adapter, vf); 1174 } 1175 } 1176 1177 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) 1178 { 1179 struct ixgbe_hw *hw = &adapter->hw; 1180 1181 /* disable transmit and receive for all vfs */ 1182 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); 1183 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); 1184 1185 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); 1186 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); 1187 } 1188 1189 static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) 1190 { 1191 struct ixgbe_hw *hw = &adapter->hw; 1192 u32 ping; 1193 1194 ping = IXGBE_PF_CONTROL_MSG; 1195 if (adapter->vfinfo[vf].clear_to_send) 1196 ping |= IXGBE_VT_MSGTYPE_CTS; 1197 ixgbe_write_mbx(hw, &ping, 1, vf); 1198 } 1199 1200 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) 1201 { 1202 struct ixgbe_hw *hw = &adapter->hw; 1203 u32 ping; 1204 int i; 1205 1206 for (i = 0 ; i < adapter->num_vfs; i++) { 1207 ping = IXGBE_PF_CONTROL_MSG; 1208 if (adapter->vfinfo[i].clear_to_send) 1209 ping |= IXGBE_VT_MSGTYPE_CTS; 1210 ixgbe_write_mbx(hw, &ping, 1, i); 1211 } 1212 } 1213 1214 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1215 { 1216 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1217 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) 1218 return -EINVAL; 1219 adapter->vfinfo[vf].pf_set_mac = true; 1220 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); 1221 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" 1222 " change effective."); 1223 if (test_bit(__IXGBE_DOWN, &adapter->state)) { 1224 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," 1225 " but the PF device is not up.\n"); 1226 dev_warn(&adapter->pdev->dev, "Bring the PF device up before" 1227 " attempting to use the VF device.\n"); 1228 } 1229 return ixgbe_set_vf_mac(adapter, vf, mac); 1230 } 1231 1232 static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, 1233 u16 vlan, u8 qos) 1234 { 1235 struct ixgbe_hw *hw = &adapter->hw; 1236 int err; 1237 1238 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); 1239 if (err) 1240 goto out; 1241 1242 ixgbe_set_vmvir(adapter, vlan, qos, vf); 1243 ixgbe_set_vmolr(hw, vf, false); 1244 if (adapter->vfinfo[vf].spoofchk_enabled) 1245 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 1246 adapter->vfinfo[vf].vlan_count++; 1247 1248 /* enable hide vlan on X550 */ 1249 if (hw->mac.type >= ixgbe_mac_X550) 1250 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | 1251 IXGBE_QDE_HIDE_VLAN); 1252 1253 adapter->vfinfo[vf].pf_vlan = vlan; 1254 adapter->vfinfo[vf].pf_qos = qos; 1255 dev_info(&adapter->pdev->dev, 1256 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); 1257 if (test_bit(__IXGBE_DOWN, &adapter->state)) { 1258 dev_warn(&adapter->pdev->dev, 1259 "The VF VLAN has been set, but the PF device is not up.\n"); 1260 dev_warn(&adapter->pdev->dev, 1261 "Bring the PF device up before attempting to use the VF device.\n"); 1262 } 1263 1264 out: 1265 return err; 1266 } 1267 1268 static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) 1269 { 1270 struct ixgbe_hw *hw = &adapter->hw; 1271 int err; 1272 1273 err = ixgbe_set_vf_vlan(adapter, false, 1274 adapter->vfinfo[vf].pf_vlan, vf); 1275 ixgbe_clear_vmvir(adapter, vf); 1276 ixgbe_set_vmolr(hw, vf, true); 1277 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 1278 if (adapter->vfinfo[vf].vlan_count) 1279 adapter->vfinfo[vf].vlan_count--; 1280 1281 /* disable hide VLAN on X550 */ 1282 if (hw->mac.type >= ixgbe_mac_X550) 1283 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); 1284 1285 adapter->vfinfo[vf].pf_vlan = 0; 1286 adapter->vfinfo[vf].pf_qos = 0; 1287 1288 return err; 1289 } 1290 1291 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1292 { 1293 int err = 0; 1294 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1295 1296 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) 1297 return -EINVAL; 1298 if (vlan || qos) { 1299 /* Check if there is already a port VLAN set, if so 1300 * we have to delete the old one first before we 1301 * can set the new one. The usage model had 1302 * previously assumed the user would delete the 1303 * old port VLAN before setting a new one but this 1304 * is not necessarily the case. 1305 */ 1306 if (adapter->vfinfo[vf].pf_vlan) 1307 err = ixgbe_disable_port_vlan(adapter, vf); 1308 if (err) 1309 goto out; 1310 err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); 1311 } else { 1312 err = ixgbe_disable_port_vlan(adapter, vf); 1313 } 1314 1315 out: 1316 return err; 1317 } 1318 1319 static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) 1320 { 1321 switch (adapter->link_speed) { 1322 case IXGBE_LINK_SPEED_100_FULL: 1323 return 100; 1324 case IXGBE_LINK_SPEED_1GB_FULL: 1325 return 1000; 1326 case IXGBE_LINK_SPEED_10GB_FULL: 1327 return 10000; 1328 default: 1329 return 0; 1330 } 1331 } 1332 1333 static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) 1334 { 1335 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 1336 struct ixgbe_hw *hw = &adapter->hw; 1337 u32 bcnrc_val = 0; 1338 u16 queue, queues_per_pool; 1339 u16 tx_rate = adapter->vfinfo[vf].tx_rate; 1340 1341 if (tx_rate) { 1342 /* start with base link speed value */ 1343 bcnrc_val = adapter->vf_rate_link_speed; 1344 1345 /* Calculate the rate factor values to set */ 1346 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; 1347 bcnrc_val /= tx_rate; 1348 1349 /* clear everything but the rate factor */ 1350 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | 1351 IXGBE_RTTBCNRC_RF_DEC_MASK; 1352 1353 /* enable the rate scheduler */ 1354 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; 1355 } 1356 1357 /* 1358 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 1359 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported 1360 * and 0x004 otherwise. 1361 */ 1362 switch (hw->mac.type) { 1363 case ixgbe_mac_82599EB: 1364 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); 1365 break; 1366 case ixgbe_mac_X540: 1367 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); 1368 break; 1369 default: 1370 break; 1371 } 1372 1373 /* determine how many queues per pool based on VMDq mask */ 1374 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 1375 1376 /* write value for all Tx queues belonging to VF */ 1377 for (queue = 0; queue < queues_per_pool; queue++) { 1378 unsigned int reg_idx = (vf * queues_per_pool) + queue; 1379 1380 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); 1381 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 1382 } 1383 } 1384 1385 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) 1386 { 1387 int i; 1388 1389 /* VF Tx rate limit was not set */ 1390 if (!adapter->vf_rate_link_speed) 1391 return; 1392 1393 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { 1394 adapter->vf_rate_link_speed = 0; 1395 dev_info(&adapter->pdev->dev, 1396 "Link speed has been changed. VF Transmit rate is disabled\n"); 1397 } 1398 1399 for (i = 0; i < adapter->num_vfs; i++) { 1400 if (!adapter->vf_rate_link_speed) 1401 adapter->vfinfo[i].tx_rate = 0; 1402 1403 ixgbe_set_vf_rate_limit(adapter, i); 1404 } 1405 } 1406 1407 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, 1408 int max_tx_rate) 1409 { 1410 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1411 int link_speed; 1412 1413 /* verify VF is active */ 1414 if (vf >= adapter->num_vfs) 1415 return -EINVAL; 1416 1417 /* verify link is up */ 1418 if (!adapter->link_up) 1419 return -EINVAL; 1420 1421 /* verify we are linked at 10Gbps */ 1422 link_speed = ixgbe_link_mbps(adapter); 1423 if (link_speed != 10000) 1424 return -EINVAL; 1425 1426 if (min_tx_rate) 1427 return -EINVAL; 1428 1429 /* rate limit cannot be less than 10Mbs or greater than link speed */ 1430 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) 1431 return -EINVAL; 1432 1433 /* store values */ 1434 adapter->vf_rate_link_speed = link_speed; 1435 adapter->vfinfo[vf].tx_rate = max_tx_rate; 1436 1437 /* update hardware configuration */ 1438 ixgbe_set_vf_rate_limit(adapter, vf); 1439 1440 return 0; 1441 } 1442 1443 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) 1444 { 1445 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1446 int vf_target_reg = vf >> 3; 1447 int vf_target_shift = vf % 8; 1448 struct ixgbe_hw *hw = &adapter->hw; 1449 u32 regval; 1450 1451 if (vf >= adapter->num_vfs) 1452 return -EINVAL; 1453 1454 adapter->vfinfo[vf].spoofchk_enabled = setting; 1455 1456 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 1457 regval &= ~(1 << vf_target_shift); 1458 regval |= (setting << vf_target_shift); 1459 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); 1460 1461 if (adapter->vfinfo[vf].vlan_count) { 1462 vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; 1463 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 1464 regval &= ~(1 << vf_target_shift); 1465 regval |= (setting << vf_target_shift); 1466 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); 1467 } 1468 1469 return 0; 1470 } 1471 1472 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, 1473 bool setting) 1474 { 1475 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1476 1477 /* This operation is currently supported only for 82599 and x540 1478 * devices. 1479 */ 1480 if (adapter->hw.mac.type < ixgbe_mac_82599EB || 1481 adapter->hw.mac.type >= ixgbe_mac_X550) 1482 return -EOPNOTSUPP; 1483 1484 if (vf >= adapter->num_vfs) 1485 return -EINVAL; 1486 1487 adapter->vfinfo[vf].rss_query_enabled = setting; 1488 1489 return 0; 1490 } 1491 1492 int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) 1493 { 1494 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1495 1496 if (vf >= adapter->num_vfs) 1497 return -EINVAL; 1498 1499 /* nothing to do */ 1500 if (adapter->vfinfo[vf].trusted == setting) 1501 return 0; 1502 1503 adapter->vfinfo[vf].trusted = setting; 1504 1505 /* reset VF to reconfigure features */ 1506 adapter->vfinfo[vf].clear_to_send = false; 1507 ixgbe_ping_vf(adapter, vf); 1508 1509 e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); 1510 1511 return 0; 1512 } 1513 1514 int ixgbe_ndo_get_vf_config(struct net_device *netdev, 1515 int vf, struct ifla_vf_info *ivi) 1516 { 1517 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1518 if (vf >= adapter->num_vfs) 1519 return -EINVAL; 1520 ivi->vf = vf; 1521 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 1522 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; 1523 ivi->min_tx_rate = 0; 1524 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 1525 ivi->qos = adapter->vfinfo[vf].pf_qos; 1526 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; 1527 ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; 1528 ivi->trusted = adapter->vfinfo[vf].trusted; 1529 return 0; 1530 } 1531