1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2015 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, see <http://www.gnu.org/licenses/>. 17 18 The full GNU General Public License is included in this distribution in 19 the file called "COPYING". 20 21 Contact Information: 22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 25 *******************************************************************************/ 26 27 #include "vf.h" 28 #include "ixgbevf.h" 29 30 /** 31 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx 32 * @hw: pointer to hardware structure 33 * 34 * Starts the hardware by filling the bus info structure and media type, clears 35 * all on chip counters, initializes receive address registers, multicast 36 * table, VLAN filter table, calls routine to set up link and flow control 37 * settings, and leaves transmit and receive units disabled and uninitialized 38 **/ 39 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) 40 { 41 /* Clear adapter stopped flag */ 42 hw->adapter_stopped = false; 43 44 return 0; 45 } 46 47 /** 48 * ixgbevf_init_hw_vf - virtual function hardware initialization 49 * @hw: pointer to hardware structure 50 * 51 * Initialize the hardware by resetting the hardware and then starting 52 * the hardware 53 **/ 54 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) 55 { 56 s32 status = hw->mac.ops.start_hw(hw); 57 58 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 59 60 return status; 61 } 62 63 /** 64 * ixgbevf_reset_hw_vf - Performs hardware reset 65 * @hw: pointer to hardware structure 66 * 67 * Resets the hardware by resetting the transmit and receive units, masks and 68 * clears all interrupts. 69 **/ 70 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) 71 { 72 struct ixgbe_mbx_info *mbx = &hw->mbx; 73 u32 timeout = IXGBE_VF_INIT_TIMEOUT; 74 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; 75 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; 76 u8 *addr = (u8 *)(&msgbuf[1]); 77 78 /* Call adapter stop to disable tx/rx and clear interrupts */ 79 hw->mac.ops.stop_adapter(hw); 80 81 /* reset the api version */ 82 hw->api_version = ixgbe_mbox_api_10; 83 84 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); 85 IXGBE_WRITE_FLUSH(hw); 86 87 /* we cannot reset while the RSTI / RSTD bits are asserted */ 88 while (!mbx->ops.check_for_rst(hw) && timeout) { 89 timeout--; 90 udelay(5); 91 } 92 93 if (!timeout) 94 return IXGBE_ERR_RESET_FAILED; 95 96 /* mailbox timeout can now become active */ 97 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; 98 99 msgbuf[0] = IXGBE_VF_RESET; 100 mbx->ops.write_posted(hw, msgbuf, 1); 101 102 mdelay(10); 103 104 /* set our "perm_addr" based on info provided by PF 105 * also set up the mc_filter_type which is piggy backed 106 * on the mac address in word 3 107 */ 108 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); 109 if (ret_val) 110 return ret_val; 111 112 /* New versions of the PF may NACK the reset return message 113 * to indicate that no MAC address has yet been assigned for 114 * the VF. 115 */ 116 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && 117 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) 118 return IXGBE_ERR_INVALID_MAC_ADDR; 119 120 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) 121 ether_addr_copy(hw->mac.perm_addr, addr); 122 123 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; 124 125 return 0; 126 } 127 128 /** 129 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units 130 * @hw: pointer to hardware structure 131 * 132 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 133 * disables transmit and receive units. The adapter_stopped flag is used by 134 * the shared code and drivers to determine if the adapter is in a stopped 135 * state and should not touch the hardware. 136 **/ 137 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) 138 { 139 u32 number_of_queues; 140 u32 reg_val; 141 u16 i; 142 143 /* Set the adapter_stopped flag so other driver functions stop touching 144 * the hardware 145 */ 146 hw->adapter_stopped = true; 147 148 /* Disable the receive unit by stopped each queue */ 149 number_of_queues = hw->mac.max_rx_queues; 150 for (i = 0; i < number_of_queues; i++) { 151 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 152 if (reg_val & IXGBE_RXDCTL_ENABLE) { 153 reg_val &= ~IXGBE_RXDCTL_ENABLE; 154 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); 155 } 156 } 157 158 IXGBE_WRITE_FLUSH(hw); 159 160 /* Clear interrupt mask to stop from interrupts being generated */ 161 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 162 163 /* Clear any pending interrupts */ 164 IXGBE_READ_REG(hw, IXGBE_VTEICR); 165 166 /* Disable the transmit unit. Each queue must be disabled. */ 167 number_of_queues = hw->mac.max_tx_queues; 168 for (i = 0; i < number_of_queues; i++) { 169 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 170 if (reg_val & IXGBE_TXDCTL_ENABLE) { 171 reg_val &= ~IXGBE_TXDCTL_ENABLE; 172 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); 173 } 174 } 175 176 return 0; 177 } 178 179 /** 180 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set 181 * @hw: pointer to hardware structure 182 * @mc_addr: the multicast address 183 * 184 * Extracts the 12 bits, from a multicast address, to determine which 185 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 186 * incoming Rx multicast addresses, to determine the bit-vector to check in 187 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 188 * by the MO field of the MCSTCTRL. The MO field is set during initialization 189 * to mc_filter_type. 190 **/ 191 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 192 { 193 u32 vector = 0; 194 195 switch (hw->mac.mc_filter_type) { 196 case 0: /* use bits [47:36] of the address */ 197 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 198 break; 199 case 1: /* use bits [46:35] of the address */ 200 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 201 break; 202 case 2: /* use bits [45:34] of the address */ 203 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 204 break; 205 case 3: /* use bits [43:32] of the address */ 206 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 207 break; 208 default: /* Invalid mc_filter_type */ 209 break; 210 } 211 212 /* vector can only be 12-bits or boundary will be exceeded */ 213 vector &= 0xFFF; 214 return vector; 215 } 216 217 /** 218 * ixgbevf_get_mac_addr_vf - Read device MAC address 219 * @hw: pointer to the HW structure 220 * @mac_addr: pointer to storage for retrieved MAC address 221 **/ 222 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) 223 { 224 ether_addr_copy(mac_addr, hw->mac.perm_addr); 225 226 return 0; 227 } 228 229 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) 230 { 231 struct ixgbe_mbx_info *mbx = &hw->mbx; 232 u32 msgbuf[3]; 233 u8 *msg_addr = (u8 *)(&msgbuf[1]); 234 s32 ret_val; 235 236 memset(msgbuf, 0, sizeof(msgbuf)); 237 /* If index is one then this is the start of a new list and needs 238 * indication to the PF so it can do it's own list management. 239 * If it is zero then that tells the PF to just clear all of 240 * this VF's macvlans and there is no new list. 241 */ 242 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; 243 msgbuf[0] |= IXGBE_VF_SET_MACVLAN; 244 if (addr) 245 ether_addr_copy(msg_addr, addr); 246 ret_val = mbx->ops.write_posted(hw, msgbuf, 3); 247 248 if (!ret_val) 249 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 250 251 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 252 253 if (!ret_val) 254 if (msgbuf[0] == 255 (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) 256 ret_val = -ENOMEM; 257 258 return ret_val; 259 } 260 261 /** 262 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. 263 * @adapter: pointer to the port handle 264 * @reta: buffer to fill with RETA contents. 265 * @num_rx_queues: Number of Rx queues configured for this port 266 * 267 * The "reta" buffer should be big enough to contain 32 registers. 268 * 269 * Returns: 0 on success. 270 * if API doesn't support this operation - (-EOPNOTSUPP). 271 */ 272 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) 273 { 274 int err, i, j; 275 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 276 u32 *hw_reta = &msgbuf[1]; 277 u32 mask = 0; 278 279 /* We have to use a mailbox for 82599 and x540 devices only. 280 * For these devices RETA has 128 entries. 281 * Also these VFs support up to 4 RSS queues. Therefore PF will compress 282 * 16 RETA entries in each DWORD giving 2 bits to each entry. 283 */ 284 int dwords = IXGBEVF_82599_RETA_SIZE / 16; 285 286 /* We support the RSS querying for 82599 and x540 devices only. 287 * Thus return an error if API doesn't support RETA querying or querying 288 * is not supported for this device type. 289 */ 290 if (hw->api_version != ixgbe_mbox_api_12 || 291 hw->mac.type >= ixgbe_mac_X550_vf) 292 return -EOPNOTSUPP; 293 294 msgbuf[0] = IXGBE_VF_GET_RETA; 295 296 err = hw->mbx.ops.write_posted(hw, msgbuf, 1); 297 298 if (err) 299 return err; 300 301 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); 302 303 if (err) 304 return err; 305 306 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 307 308 /* If the operation has been refused by a PF return -EPERM */ 309 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) 310 return -EPERM; 311 312 /* If we didn't get an ACK there must have been 313 * some sort of mailbox error so we should treat it 314 * as such. 315 */ 316 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK)) 317 return IXGBE_ERR_MBX; 318 319 /* ixgbevf doesn't support more than 2 queues at the moment */ 320 if (num_rx_queues > 1) 321 mask = 0x1; 322 323 for (i = 0; i < dwords; i++) 324 for (j = 0; j < 16; j++) 325 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; 326 327 return 0; 328 } 329 330 /** 331 * ixgbevf_get_rss_key_locked - get the RSS Random Key 332 * @hw: pointer to the HW structure 333 * @rss_key: buffer to fill with RSS Hash Key contents. 334 * 335 * The "rss_key" buffer should be big enough to contain 10 registers. 336 * 337 * Returns: 0 on success. 338 * if API doesn't support this operation - (-EOPNOTSUPP). 339 */ 340 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) 341 { 342 int err; 343 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 344 345 /* We currently support the RSS Random Key retrieval for 82599 and x540 346 * devices only. 347 * 348 * Thus return an error if API doesn't support RSS Random Key retrieval 349 * or if the operation is not supported for this device type. 350 */ 351 if (hw->api_version != ixgbe_mbox_api_12 || 352 hw->mac.type >= ixgbe_mac_X550_vf) 353 return -EOPNOTSUPP; 354 355 msgbuf[0] = IXGBE_VF_GET_RSS_KEY; 356 err = hw->mbx.ops.write_posted(hw, msgbuf, 1); 357 358 if (err) 359 return err; 360 361 err = hw->mbx.ops.read_posted(hw, msgbuf, 11); 362 363 if (err) 364 return err; 365 366 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 367 368 /* If the operation has been refused by a PF return -EPERM */ 369 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) 370 return -EPERM; 371 372 /* If we didn't get an ACK there must have been 373 * some sort of mailbox error so we should treat it 374 * as such. 375 */ 376 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK)) 377 return IXGBE_ERR_MBX; 378 379 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); 380 381 return 0; 382 } 383 384 /** 385 * ixgbevf_set_rar_vf - set device MAC address 386 * @hw: pointer to hardware structure 387 * @index: Receive address register to write 388 * @addr: Address to put into receive address register 389 * @vmdq: Unused in this implementation 390 **/ 391 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, 392 u32 vmdq) 393 { 394 struct ixgbe_mbx_info *mbx = &hw->mbx; 395 u32 msgbuf[3]; 396 u8 *msg_addr = (u8 *)(&msgbuf[1]); 397 s32 ret_val; 398 399 memset(msgbuf, 0, sizeof(msgbuf)); 400 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; 401 ether_addr_copy(msg_addr, addr); 402 ret_val = mbx->ops.write_posted(hw, msgbuf, 3); 403 404 if (!ret_val) 405 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 406 407 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 408 409 /* if nacked the address was rejected, use "perm_addr" */ 410 if (!ret_val && 411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) 412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); 413 414 return ret_val; 415 } 416 417 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, 418 u32 *msg, u16 size) 419 { 420 struct ixgbe_mbx_info *mbx = &hw->mbx; 421 u32 retmsg[IXGBE_VFMAILBOX_SIZE]; 422 s32 retval = mbx->ops.write_posted(hw, msg, size); 423 424 if (!retval) 425 mbx->ops.read_posted(hw, retmsg, size); 426 } 427 428 /** 429 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses 430 * @hw: pointer to the HW structure 431 * @netdev: pointer to net device structure 432 * 433 * Updates the Multicast Table Array. 434 **/ 435 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, 436 struct net_device *netdev) 437 { 438 struct netdev_hw_addr *ha; 439 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 440 u16 *vector_list = (u16 *)&msgbuf[1]; 441 u32 cnt, i; 442 443 /* Each entry in the list uses 1 16 bit word. We have 30 444 * 16 bit words available in our HW msg buffer (minus 1 for the 445 * msg type). That's 30 hash values if we pack 'em right. If 446 * there are more than 30 MC addresses to add then punt the 447 * extras for now and then add code to handle more than 30 later. 448 * It would be unusual for a server to request that many multi-cast 449 * addresses except for in large enterprise network environments. 450 */ 451 452 cnt = netdev_mc_count(netdev); 453 if (cnt > 30) 454 cnt = 30; 455 msgbuf[0] = IXGBE_VF_SET_MULTICAST; 456 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; 457 458 i = 0; 459 netdev_for_each_mc_addr(ha, netdev) { 460 if (i == cnt) 461 break; 462 if (is_link_local_ether_addr(ha->addr)) 463 continue; 464 465 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); 466 } 467 468 ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); 469 470 return 0; 471 } 472 473 /** 474 * ixgbevf_update_xcast_mode - Update Multicast mode 475 * @hw: pointer to the HW structure 476 * @netdev: pointer to net device structure 477 * @xcast_mode: new multicast mode 478 * 479 * Updates the Multicast Mode of VF. 480 **/ 481 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, 482 struct net_device *netdev, int xcast_mode) 483 { 484 struct ixgbe_mbx_info *mbx = &hw->mbx; 485 u32 msgbuf[2]; 486 s32 err; 487 488 switch (hw->api_version) { 489 case ixgbe_mbox_api_12: 490 break; 491 default: 492 return -EOPNOTSUPP; 493 } 494 495 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; 496 msgbuf[1] = xcast_mode; 497 498 err = mbx->ops.write_posted(hw, msgbuf, 2); 499 if (err) 500 return err; 501 502 err = mbx->ops.read_posted(hw, msgbuf, 2); 503 if (err) 504 return err; 505 506 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 507 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) 508 return -EPERM; 509 510 return 0; 511 } 512 513 /** 514 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address 515 * @hw: pointer to the HW structure 516 * @vlan: 12 bit VLAN ID 517 * @vind: unused by VF drivers 518 * @vlan_on: if true then set bit, else clear bit 519 **/ 520 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, 521 bool vlan_on) 522 { 523 struct ixgbe_mbx_info *mbx = &hw->mbx; 524 u32 msgbuf[2]; 525 s32 err; 526 527 msgbuf[0] = IXGBE_VF_SET_VLAN; 528 msgbuf[1] = vlan; 529 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 530 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; 531 532 err = mbx->ops.write_posted(hw, msgbuf, 2); 533 if (err) 534 goto mbx_err; 535 536 err = mbx->ops.read_posted(hw, msgbuf, 2); 537 if (err) 538 goto mbx_err; 539 540 /* remove extra bits from the message */ 541 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 542 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT); 543 544 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK)) 545 err = IXGBE_ERR_INVALID_ARGUMENT; 546 547 mbx_err: 548 return err; 549 } 550 551 /** 552 * ixgbevf_setup_mac_link_vf - Setup MAC link settings 553 * @hw: pointer to hardware structure 554 * @speed: Unused in this implementation 555 * @autoneg: Unused in this implementation 556 * @autoneg_wait_to_complete: Unused in this implementation 557 * 558 * Do nothing and return success. VF drivers are not allowed to change 559 * global settings. Maintained for driver compatibility. 560 **/ 561 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, 562 ixgbe_link_speed speed, bool autoneg, 563 bool autoneg_wait_to_complete) 564 { 565 return 0; 566 } 567 568 /** 569 * ixgbevf_check_mac_link_vf - Get link/speed status 570 * @hw: pointer to hardware structure 571 * @speed: pointer to link speed 572 * @link_up: true is link is up, false otherwise 573 * @autoneg_wait_to_complete: true when waiting for completion is needed 574 * 575 * Reads the links register to determine if link is up and the current speed 576 **/ 577 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, 578 ixgbe_link_speed *speed, 579 bool *link_up, 580 bool autoneg_wait_to_complete) 581 { 582 struct ixgbe_mbx_info *mbx = &hw->mbx; 583 struct ixgbe_mac_info *mac = &hw->mac; 584 s32 ret_val = 0; 585 u32 links_reg; 586 u32 in_msg = 0; 587 588 /* If we were hit with a reset drop the link */ 589 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) 590 mac->get_link_status = true; 591 592 if (!mac->get_link_status) 593 goto out; 594 595 /* if link status is down no point in checking to see if pf is up */ 596 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 597 if (!(links_reg & IXGBE_LINKS_UP)) 598 goto out; 599 600 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 601 * before the link status is correct 602 */ 603 if (mac->type == ixgbe_mac_82599_vf) { 604 int i; 605 606 for (i = 0; i < 5; i++) { 607 udelay(100); 608 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 609 610 if (!(links_reg & IXGBE_LINKS_UP)) 611 goto out; 612 } 613 } 614 615 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 616 case IXGBE_LINKS_SPEED_10G_82599: 617 *speed = IXGBE_LINK_SPEED_10GB_FULL; 618 break; 619 case IXGBE_LINKS_SPEED_1G_82599: 620 *speed = IXGBE_LINK_SPEED_1GB_FULL; 621 break; 622 case IXGBE_LINKS_SPEED_100_82599: 623 *speed = IXGBE_LINK_SPEED_100_FULL; 624 break; 625 } 626 627 /* if the read failed it could just be a mailbox collision, best wait 628 * until we are called again and don't report an error 629 */ 630 if (mbx->ops.read(hw, &in_msg, 1)) 631 goto out; 632 633 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 634 /* msg is not CTS and is NACK we must have lost CTS status */ 635 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 636 ret_val = -1; 637 goto out; 638 } 639 640 /* the pf is talking, if we timed out in the past we reinit */ 641 if (!mbx->timeout) { 642 ret_val = -1; 643 goto out; 644 } 645 646 /* if we passed all the tests above then the link is up and we no 647 * longer need to check for link 648 */ 649 mac->get_link_status = false; 650 651 out: 652 *link_up = !mac->get_link_status; 653 return ret_val; 654 } 655 656 /** 657 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length 658 * @hw: pointer to the HW structure 659 * @max_size: value to assign to max frame size 660 **/ 661 void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) 662 { 663 u32 msgbuf[2]; 664 665 msgbuf[0] = IXGBE_VF_SET_LPE; 666 msgbuf[1] = max_size; 667 ixgbevf_write_msg_read_ack(hw, msgbuf, 2); 668 } 669 670 /** 671 * ixgbevf_negotiate_api_version - Negotiate supported API version 672 * @hw: pointer to the HW structure 673 * @api: integer containing requested API version 674 **/ 675 int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) 676 { 677 int err; 678 u32 msg[3]; 679 680 /* Negotiate the mailbox API version */ 681 msg[0] = IXGBE_VF_API_NEGOTIATE; 682 msg[1] = api; 683 msg[2] = 0; 684 err = hw->mbx.ops.write_posted(hw, msg, 3); 685 686 if (!err) 687 err = hw->mbx.ops.read_posted(hw, msg, 3); 688 689 if (!err) { 690 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 691 692 /* Store value and return 0 on success */ 693 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { 694 hw->api_version = api; 695 return 0; 696 } 697 698 err = IXGBE_ERR_INVALID_ARGUMENT; 699 } 700 701 return err; 702 } 703 704 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, 705 unsigned int *default_tc) 706 { 707 int err; 708 u32 msg[5]; 709 710 /* do nothing if API doesn't support ixgbevf_get_queues */ 711 switch (hw->api_version) { 712 case ixgbe_mbox_api_11: 713 case ixgbe_mbox_api_12: 714 break; 715 default: 716 return 0; 717 } 718 719 /* Fetch queue configuration from the PF */ 720 msg[0] = IXGBE_VF_GET_QUEUE; 721 msg[1] = msg[2] = msg[3] = msg[4] = 0; 722 err = hw->mbx.ops.write_posted(hw, msg, 5); 723 724 if (!err) 725 err = hw->mbx.ops.read_posted(hw, msg, 5); 726 727 if (!err) { 728 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 729 730 /* if we we didn't get an ACK there must have been 731 * some sort of mailbox error so we should treat it 732 * as such 733 */ 734 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK)) 735 return IXGBE_ERR_MBX; 736 737 /* record and validate values from message */ 738 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; 739 if (hw->mac.max_tx_queues == 0 || 740 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) 741 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; 742 743 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; 744 if (hw->mac.max_rx_queues == 0 || 745 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) 746 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; 747 748 *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; 749 /* in case of unknown state assume we cannot tag frames */ 750 if (*num_tcs > hw->mac.max_rx_queues) 751 *num_tcs = 1; 752 753 *default_tc = msg[IXGBE_VF_DEF_QUEUE]; 754 /* default to queue 0 on out-of-bounds queue number */ 755 if (*default_tc >= hw->mac.max_tx_queues) 756 *default_tc = 0; 757 } 758 759 return err; 760 } 761 762 static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 763 .init_hw = ixgbevf_init_hw_vf, 764 .reset_hw = ixgbevf_reset_hw_vf, 765 .start_hw = ixgbevf_start_hw_vf, 766 .get_mac_addr = ixgbevf_get_mac_addr_vf, 767 .stop_adapter = ixgbevf_stop_hw_vf, 768 .setup_link = ixgbevf_setup_mac_link_vf, 769 .check_link = ixgbevf_check_mac_link_vf, 770 .set_rar = ixgbevf_set_rar_vf, 771 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, 772 .update_xcast_mode = ixgbevf_update_xcast_mode, 773 .set_uc_addr = ixgbevf_set_uc_addr_vf, 774 .set_vfta = ixgbevf_set_vfta_vf, 775 }; 776 777 const struct ixgbevf_info ixgbevf_82599_vf_info = { 778 .mac = ixgbe_mac_82599_vf, 779 .mac_ops = &ixgbevf_mac_ops, 780 }; 781 782 const struct ixgbevf_info ixgbevf_X540_vf_info = { 783 .mac = ixgbe_mac_X540_vf, 784 .mac_ops = &ixgbevf_mac_ops, 785 }; 786 787 const struct ixgbevf_info ixgbevf_X550_vf_info = { 788 .mac = ixgbe_mac_X550_vf, 789 .mac_ops = &ixgbevf_mac_ops, 790 }; 791 792 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { 793 .mac = ixgbe_mac_X550EM_x_vf, 794 .mac_ops = &ixgbevf_mac_ops, 795 }; 796