1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /*********************notification routines***********************/ 30 31 /** 32 * i40e_vc_vf_broadcast 33 * @pf: pointer to the PF structure 34 * @opcode: operation code 35 * @retval: return value 36 * @msg: pointer to the msg buffer 37 * @msglen: msg length 38 * 39 * send a message to all VFs on a given PF 40 **/ 41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 42 enum i40e_virtchnl_ops v_opcode, 43 i40e_status v_retval, u8 *msg, 44 u16 msglen) 45 { 46 struct i40e_hw *hw = &pf->hw; 47 struct i40e_vf *vf = pf->vf; 48 int i; 49 50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 51 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 52 /* Not all vfs are enabled so skip the ones that are not */ 53 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 54 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 55 continue; 56 57 /* Ignore return value on purpose - a given VF may fail, but 58 * we need to keep going and send to all of them 59 */ 60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 61 msg, msglen, NULL); 62 } 63 } 64 65 /** 66 * i40e_vc_notify_link_state 67 * @vf: pointer to the VF structure 68 * 69 * send a link status message to a single VF 70 **/ 71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 72 { 73 struct i40e_virtchnl_pf_event pfe; 74 struct i40e_pf *pf = vf->pf; 75 struct i40e_hw *hw = &pf->hw; 76 struct i40e_link_status *ls = &pf->hw.phy.link_info; 77 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 78 79 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 80 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 81 if (vf->link_forced) { 82 pfe.event_data.link_event.link_status = vf->link_up; 83 pfe.event_data.link_event.link_speed = 84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 85 } else { 86 pfe.event_data.link_event.link_status = 87 ls->link_info & I40E_AQ_LINK_UP; 88 pfe.event_data.link_event.link_speed = ls->link_speed; 89 } 90 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 91 0, (u8 *)&pfe, sizeof(pfe), NULL); 92 } 93 94 /** 95 * i40e_vc_notify_link_state 96 * @pf: pointer to the PF structure 97 * 98 * send a link status message to all VFs on a given PF 99 **/ 100 void i40e_vc_notify_link_state(struct i40e_pf *pf) 101 { 102 int i; 103 104 for (i = 0; i < pf->num_alloc_vfs; i++) 105 i40e_vc_notify_vf_link_state(&pf->vf[i]); 106 } 107 108 /** 109 * i40e_vc_notify_reset 110 * @pf: pointer to the PF structure 111 * 112 * indicate a pending reset to all VFs on a given PF 113 **/ 114 void i40e_vc_notify_reset(struct i40e_pf *pf) 115 { 116 struct i40e_virtchnl_pf_event pfe; 117 118 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 119 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 120 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, 121 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 122 } 123 124 /** 125 * i40e_vc_notify_vf_reset 126 * @vf: pointer to the VF structure 127 * 128 * indicate a pending reset to the given VF 129 **/ 130 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 131 { 132 struct i40e_virtchnl_pf_event pfe; 133 int abs_vf_id; 134 135 /* validate the request */ 136 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 137 return; 138 139 /* verify if the VF is in either init or active before proceeding */ 140 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 141 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 142 return; 143 144 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 145 146 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 147 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 148 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 149 0, (u8 *)&pfe, 150 sizeof(struct i40e_virtchnl_pf_event), NULL); 151 } 152 /***********************misc routines*****************************/ 153 154 /** 155 * i40e_vc_disable_vf 156 * @pf: pointer to the PF info 157 * @vf: pointer to the VF info 158 * 159 * Disable the VF through a SW reset 160 **/ 161 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 162 { 163 i40e_vc_notify_vf_reset(vf); 164 i40e_reset_vf(vf, false); 165 } 166 167 /** 168 * i40e_vc_isvalid_vsi_id 169 * @vf: pointer to the VF info 170 * @vsi_id: VF relative VSI id 171 * 172 * check for the valid VSI id 173 **/ 174 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 175 { 176 struct i40e_pf *pf = vf->pf; 177 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 178 179 return (vsi && (vsi->vf_id == vf->vf_id)); 180 } 181 182 /** 183 * i40e_vc_isvalid_queue_id 184 * @vf: pointer to the VF info 185 * @vsi_id: vsi id 186 * @qid: vsi relative queue id 187 * 188 * check for the valid queue id 189 **/ 190 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 191 u8 qid) 192 { 193 struct i40e_pf *pf = vf->pf; 194 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 195 196 return (vsi && (qid < vsi->alloc_queue_pairs)); 197 } 198 199 /** 200 * i40e_vc_isvalid_vector_id 201 * @vf: pointer to the VF info 202 * @vector_id: VF relative vector id 203 * 204 * check for the valid vector id 205 **/ 206 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 207 { 208 struct i40e_pf *pf = vf->pf; 209 210 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 211 } 212 213 /***********************vf resource mgmt routines*****************/ 214 215 /** 216 * i40e_vc_get_pf_queue_id 217 * @vf: pointer to the VF info 218 * @vsi_id: id of VSI as provided by the FW 219 * @vsi_queue_id: vsi relative queue id 220 * 221 * return PF relative queue id 222 **/ 223 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 224 u8 vsi_queue_id) 225 { 226 struct i40e_pf *pf = vf->pf; 227 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 228 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 229 230 if (!vsi) 231 return pf_queue_id; 232 233 if (le16_to_cpu(vsi->info.mapping_flags) & 234 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 235 pf_queue_id = 236 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 237 else 238 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 239 vsi_queue_id; 240 241 return pf_queue_id; 242 } 243 244 /** 245 * i40e_config_irq_link_list 246 * @vf: pointer to the VF info 247 * @vsi_id: id of VSI as given by the FW 248 * @vecmap: irq map info 249 * 250 * configure irq link list from the map 251 **/ 252 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 253 struct i40e_virtchnl_vector_map *vecmap) 254 { 255 unsigned long linklistmap = 0, tempmap; 256 struct i40e_pf *pf = vf->pf; 257 struct i40e_hw *hw = &pf->hw; 258 u16 vsi_queue_id, pf_queue_id; 259 enum i40e_queue_type qtype; 260 u16 next_q, vector_id; 261 u32 reg, reg_idx; 262 u16 itr_idx = 0; 263 264 vector_id = vecmap->vector_id; 265 /* setup the head */ 266 if (0 == vector_id) 267 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 268 else 269 reg_idx = I40E_VPINT_LNKLSTN( 270 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 271 (vector_id - 1)); 272 273 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 274 /* Special case - No queues mapped on this vector */ 275 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 276 goto irq_list_done; 277 } 278 tempmap = vecmap->rxq_map; 279 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 280 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 281 vsi_queue_id)); 282 } 283 284 tempmap = vecmap->txq_map; 285 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 286 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 287 vsi_queue_id + 1)); 288 } 289 290 next_q = find_first_bit(&linklistmap, 291 (I40E_MAX_VSI_QP * 292 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 293 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 294 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 295 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 296 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 297 298 wr32(hw, reg_idx, reg); 299 300 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 301 switch (qtype) { 302 case I40E_QUEUE_TYPE_RX: 303 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 304 itr_idx = vecmap->rxitr_idx; 305 break; 306 case I40E_QUEUE_TYPE_TX: 307 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 308 itr_idx = vecmap->txitr_idx; 309 break; 310 default: 311 break; 312 } 313 314 next_q = find_next_bit(&linklistmap, 315 (I40E_MAX_VSI_QP * 316 I40E_VIRTCHNL_SUPPORTED_QTYPES), 317 next_q + 1); 318 if (next_q < 319 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 323 vsi_queue_id); 324 } else { 325 pf_queue_id = I40E_QUEUE_END_OF_LIST; 326 qtype = 0; 327 } 328 329 /* format for the RQCTL & TQCTL regs is same */ 330 reg = (vector_id) | 331 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 332 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 333 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 334 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 335 wr32(hw, reg_idx, reg); 336 } 337 338 /* if the vf is running in polling mode and using interrupt zero, 339 * need to disable auto-mask on enabling zero interrupt for VFs. 340 */ 341 if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 342 (vector_id == 0)) { 343 reg = rd32(hw, I40E_GLINT_CTL); 344 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 345 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 346 wr32(hw, I40E_GLINT_CTL, reg); 347 } 348 } 349 350 irq_list_done: 351 i40e_flush(hw); 352 } 353 354 /** 355 * i40e_config_vsi_tx_queue 356 * @vf: pointer to the VF info 357 * @vsi_id: id of VSI as provided by the FW 358 * @vsi_queue_id: vsi relative queue index 359 * @info: config. info 360 * 361 * configure tx queue 362 **/ 363 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 364 u16 vsi_queue_id, 365 struct i40e_virtchnl_txq_info *info) 366 { 367 struct i40e_pf *pf = vf->pf; 368 struct i40e_hw *hw = &pf->hw; 369 struct i40e_hmc_obj_txq tx_ctx; 370 struct i40e_vsi *vsi; 371 u16 pf_queue_id; 372 u32 qtx_ctl; 373 int ret = 0; 374 375 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 376 vsi = i40e_find_vsi_from_id(pf, vsi_id); 377 378 /* clear the context structure first */ 379 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 380 381 /* only set the required fields */ 382 tx_ctx.base = info->dma_ring_addr / 128; 383 tx_ctx.qlen = info->ring_len; 384 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 385 tx_ctx.rdylist_act = 0; 386 tx_ctx.head_wb_ena = info->headwb_enabled; 387 tx_ctx.head_wb_addr = info->dma_headwb_addr; 388 389 /* clear the context in the HMC */ 390 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 391 if (ret) { 392 dev_err(&pf->pdev->dev, 393 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 394 pf_queue_id, ret); 395 ret = -ENOENT; 396 goto error_context; 397 } 398 399 /* set the context in the HMC */ 400 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 401 if (ret) { 402 dev_err(&pf->pdev->dev, 403 "Failed to set VF LAN Tx queue context %d error: %d\n", 404 pf_queue_id, ret); 405 ret = -ENOENT; 406 goto error_context; 407 } 408 409 /* associate this queue with the PCI VF function */ 410 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 411 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 412 & I40E_QTX_CTL_PF_INDX_MASK); 413 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 414 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 415 & I40E_QTX_CTL_VFVM_INDX_MASK); 416 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 417 i40e_flush(hw); 418 419 error_context: 420 return ret; 421 } 422 423 /** 424 * i40e_config_vsi_rx_queue 425 * @vf: pointer to the VF info 426 * @vsi_id: id of VSI as provided by the FW 427 * @vsi_queue_id: vsi relative queue index 428 * @info: config. info 429 * 430 * configure rx queue 431 **/ 432 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 433 u16 vsi_queue_id, 434 struct i40e_virtchnl_rxq_info *info) 435 { 436 struct i40e_pf *pf = vf->pf; 437 struct i40e_hw *hw = &pf->hw; 438 struct i40e_hmc_obj_rxq rx_ctx; 439 u16 pf_queue_id; 440 int ret = 0; 441 442 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 443 444 /* clear the context structure first */ 445 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 446 447 /* only set the required fields */ 448 rx_ctx.base = info->dma_ring_addr / 128; 449 rx_ctx.qlen = info->ring_len; 450 451 if (info->splithdr_enabled) { 452 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 453 I40E_RX_SPLIT_IP | 454 I40E_RX_SPLIT_TCP_UDP | 455 I40E_RX_SPLIT_SCTP; 456 /* header length validation */ 457 if (info->hdr_size > ((2 * 1024) - 64)) { 458 ret = -EINVAL; 459 goto error_param; 460 } 461 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 462 463 /* set splitalways mode 10b */ 464 rx_ctx.dtype = 0x2; 465 } 466 467 /* databuffer length validation */ 468 if (info->databuffer_size > ((16 * 1024) - 128)) { 469 ret = -EINVAL; 470 goto error_param; 471 } 472 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 473 474 /* max pkt. length validation */ 475 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 476 ret = -EINVAL; 477 goto error_param; 478 } 479 rx_ctx.rxmax = info->max_pkt_size; 480 481 /* enable 32bytes desc always */ 482 rx_ctx.dsize = 1; 483 484 /* default values */ 485 rx_ctx.lrxqthresh = 2; 486 rx_ctx.crcstrip = 1; 487 rx_ctx.prefena = 1; 488 rx_ctx.l2tsel = 1; 489 490 /* clear the context in the HMC */ 491 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 492 if (ret) { 493 dev_err(&pf->pdev->dev, 494 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 495 pf_queue_id, ret); 496 ret = -ENOENT; 497 goto error_param; 498 } 499 500 /* set the context in the HMC */ 501 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 502 if (ret) { 503 dev_err(&pf->pdev->dev, 504 "Failed to set VF LAN Rx queue context %d error: %d\n", 505 pf_queue_id, ret); 506 ret = -ENOENT; 507 goto error_param; 508 } 509 510 error_param: 511 return ret; 512 } 513 514 /** 515 * i40e_alloc_vsi_res 516 * @vf: pointer to the VF info 517 * @type: type of VSI to allocate 518 * 519 * alloc VF vsi context & resources 520 **/ 521 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 522 { 523 struct i40e_mac_filter *f = NULL; 524 struct i40e_pf *pf = vf->pf; 525 struct i40e_vsi *vsi; 526 int ret = 0; 527 528 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 529 530 if (!vsi) { 531 dev_err(&pf->pdev->dev, 532 "add vsi failed for VF %d, aq_err %d\n", 533 vf->vf_id, pf->hw.aq.asq_last_status); 534 ret = -ENOENT; 535 goto error_alloc_vsi_res; 536 } 537 if (type == I40E_VSI_SRIOV) { 538 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 539 540 vf->lan_vsi_idx = vsi->idx; 541 vf->lan_vsi_id = vsi->id; 542 /* If the port VLAN has been configured and then the 543 * VF driver was removed then the VSI port VLAN 544 * configuration was destroyed. Check if there is 545 * a port VLAN and restore the VSI configuration if 546 * needed. 547 */ 548 if (vf->port_vlan_id) 549 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 550 551 spin_lock_bh(&vsi->mac_filter_list_lock); 552 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 553 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 554 vf->port_vlan_id ? vf->port_vlan_id : -1, 555 true, false); 556 if (!f) 557 dev_info(&pf->pdev->dev, 558 "Could not add MAC filter %pM for VF %d\n", 559 vf->default_lan_addr.addr, vf->vf_id); 560 } 561 f = i40e_add_filter(vsi, brdcast, 562 vf->port_vlan_id ? vf->port_vlan_id : -1, 563 true, false); 564 if (!f) 565 dev_info(&pf->pdev->dev, 566 "Could not allocate VF broadcast filter\n"); 567 spin_unlock_bh(&vsi->mac_filter_list_lock); 568 } 569 570 /* program mac filter */ 571 ret = i40e_sync_vsi_filters(vsi); 572 if (ret) 573 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 574 575 /* Set VF bandwidth if specified */ 576 if (vf->tx_rate) { 577 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 578 vf->tx_rate / 50, 0, NULL); 579 if (ret) 580 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 581 vf->vf_id, ret); 582 } 583 584 error_alloc_vsi_res: 585 return ret; 586 } 587 588 /** 589 * i40e_enable_vf_mappings 590 * @vf: pointer to the VF info 591 * 592 * enable VF mappings 593 **/ 594 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 595 { 596 struct i40e_pf *pf = vf->pf; 597 struct i40e_hw *hw = &pf->hw; 598 u32 reg, total_queue_pairs = 0; 599 int j; 600 601 /* Tell the hardware we're using noncontiguous mapping. HW requires 602 * that VF queues be mapped using this method, even when they are 603 * contiguous in real life 604 */ 605 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 606 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 607 608 /* enable VF vplan_qtable mappings */ 609 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 610 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 611 612 /* map PF queues to VF queues */ 613 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 614 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 615 616 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 617 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 618 total_queue_pairs++; 619 } 620 621 /* map PF queues to VSI */ 622 for (j = 0; j < 7; j++) { 623 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 624 reg = 0x07FF07FF; /* unused */ 625 } else { 626 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 627 j * 2); 628 reg = qid; 629 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 630 (j * 2) + 1); 631 reg |= qid << 16; 632 } 633 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 634 } 635 636 i40e_flush(hw); 637 } 638 639 /** 640 * i40e_disable_vf_mappings 641 * @vf: pointer to the VF info 642 * 643 * disable VF mappings 644 **/ 645 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 646 { 647 struct i40e_pf *pf = vf->pf; 648 struct i40e_hw *hw = &pf->hw; 649 int i; 650 651 /* disable qp mappings */ 652 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 653 for (i = 0; i < I40E_MAX_VSI_QP; i++) 654 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 655 I40E_QUEUE_END_OF_LIST); 656 i40e_flush(hw); 657 } 658 659 /** 660 * i40e_free_vf_res 661 * @vf: pointer to the VF info 662 * 663 * free VF resources 664 **/ 665 static void i40e_free_vf_res(struct i40e_vf *vf) 666 { 667 struct i40e_pf *pf = vf->pf; 668 struct i40e_hw *hw = &pf->hw; 669 u32 reg_idx, reg; 670 int i, msix_vf; 671 672 /* free vsi & disconnect it from the parent uplink */ 673 if (vf->lan_vsi_idx) { 674 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 675 vf->lan_vsi_idx = 0; 676 vf->lan_vsi_id = 0; 677 } 678 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 679 680 /* disable interrupts so the VF starts in a known state */ 681 for (i = 0; i < msix_vf; i++) { 682 /* format is same for both registers */ 683 if (0 == i) 684 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 685 else 686 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 687 (vf->vf_id)) 688 + (i - 1)); 689 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 690 i40e_flush(hw); 691 } 692 693 /* clear the irq settings */ 694 for (i = 0; i < msix_vf; i++) { 695 /* format is same for both registers */ 696 if (0 == i) 697 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 698 else 699 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 700 (vf->vf_id)) 701 + (i - 1)); 702 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 703 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 704 wr32(hw, reg_idx, reg); 705 i40e_flush(hw); 706 } 707 /* reset some of the state varibles keeping 708 * track of the resources 709 */ 710 vf->num_queue_pairs = 0; 711 vf->vf_states = 0; 712 clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); 713 } 714 715 /** 716 * i40e_alloc_vf_res 717 * @vf: pointer to the VF info 718 * 719 * allocate VF resources 720 **/ 721 static int i40e_alloc_vf_res(struct i40e_vf *vf) 722 { 723 struct i40e_pf *pf = vf->pf; 724 int total_queue_pairs = 0; 725 int ret; 726 727 /* allocate hw vsi context & associated resources */ 728 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 729 if (ret) 730 goto error_alloc; 731 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 732 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 733 734 /* store the total qps number for the runtime 735 * VF req validation 736 */ 737 vf->num_queue_pairs = total_queue_pairs; 738 739 /* VF is now completely initialized */ 740 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 741 742 error_alloc: 743 if (ret) 744 i40e_free_vf_res(vf); 745 746 return ret; 747 } 748 749 #define VF_DEVICE_STATUS 0xAA 750 #define VF_TRANS_PENDING_MASK 0x20 751 /** 752 * i40e_quiesce_vf_pci 753 * @vf: pointer to the VF structure 754 * 755 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 756 * if the transactions never clear. 757 **/ 758 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 759 { 760 struct i40e_pf *pf = vf->pf; 761 struct i40e_hw *hw = &pf->hw; 762 int vf_abs_id, i; 763 u32 reg; 764 765 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 766 767 wr32(hw, I40E_PF_PCI_CIAA, 768 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 769 for (i = 0; i < 100; i++) { 770 reg = rd32(hw, I40E_PF_PCI_CIAD); 771 if ((reg & VF_TRANS_PENDING_MASK) == 0) 772 return 0; 773 udelay(1); 774 } 775 return -EIO; 776 } 777 778 /** 779 * i40e_reset_vf 780 * @vf: pointer to the VF structure 781 * @flr: VFLR was issued or not 782 * 783 * reset the VF 784 **/ 785 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 786 { 787 struct i40e_pf *pf = vf->pf; 788 struct i40e_hw *hw = &pf->hw; 789 bool rsd = false; 790 int i; 791 u32 reg; 792 793 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 794 return; 795 796 /* warn the VF */ 797 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 798 799 /* In the case of a VFLR, the HW has already reset the VF and we 800 * just need to clean up, so don't hit the VFRTRIG register. 801 */ 802 if (!flr) { 803 /* reset VF using VPGEN_VFRTRIG reg */ 804 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 805 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 806 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 807 i40e_flush(hw); 808 } 809 810 if (i40e_quiesce_vf_pci(vf)) 811 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 812 vf->vf_id); 813 814 /* poll VPGEN_VFRSTAT reg to make sure 815 * that reset is complete 816 */ 817 for (i = 0; i < 10; i++) { 818 /* VF reset requires driver to first reset the VF and then 819 * poll the status register to make sure that the reset 820 * completed successfully. Due to internal HW FIFO flushes, 821 * we must wait 10ms before the register will be valid. 822 */ 823 usleep_range(10000, 20000); 824 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 825 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 826 rsd = true; 827 break; 828 } 829 } 830 831 if (flr) 832 usleep_range(10000, 20000); 833 834 if (!rsd) 835 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 836 vf->vf_id); 837 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 838 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 839 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 840 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 841 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 842 843 /* On initial reset, we won't have any queues */ 844 if (vf->lan_vsi_idx == 0) 845 goto complete_reset; 846 847 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); 848 complete_reset: 849 /* reallocate VF resources to reset the VSI state */ 850 i40e_free_vf_res(vf); 851 if (!i40e_alloc_vf_res(vf)) { 852 i40e_enable_vf_mappings(vf); 853 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 854 clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 855 } 856 /* tell the VF the reset is done */ 857 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 858 i40e_flush(hw); 859 clear_bit(__I40E_VF_DISABLE, &pf->state); 860 } 861 862 /** 863 * i40e_free_vfs 864 * @pf: pointer to the PF structure 865 * 866 * free VF resources 867 **/ 868 void i40e_free_vfs(struct i40e_pf *pf) 869 { 870 struct i40e_hw *hw = &pf->hw; 871 u32 reg_idx, bit_idx; 872 int i, tmp, vf_id; 873 874 if (!pf->vf) 875 return; 876 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 877 usleep_range(1000, 2000); 878 879 for (i = 0; i < pf->num_alloc_vfs; i++) 880 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 881 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], 882 false); 883 884 for (i = 0; i < pf->num_alloc_vfs; i++) 885 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 886 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], 887 false); 888 889 /* Disable IOV before freeing resources. This lets any VF drivers 890 * running in the host get themselves cleaned up before we yank 891 * the carpet out from underneath their feet. 892 */ 893 if (!pci_vfs_assigned(pf->pdev)) 894 pci_disable_sriov(pf->pdev); 895 else 896 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 897 898 msleep(20); /* let any messages in transit get finished up */ 899 900 /* free up VF resources */ 901 tmp = pf->num_alloc_vfs; 902 pf->num_alloc_vfs = 0; 903 for (i = 0; i < tmp; i++) { 904 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 905 i40e_free_vf_res(&pf->vf[i]); 906 /* disable qp mappings */ 907 i40e_disable_vf_mappings(&pf->vf[i]); 908 } 909 910 kfree(pf->vf); 911 pf->vf = NULL; 912 913 /* This check is for when the driver is unloaded while VFs are 914 * assigned. Setting the number of VFs to 0 through sysfs is caught 915 * before this function ever gets called. 916 */ 917 if (!pci_vfs_assigned(pf->pdev)) { 918 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 919 * work correctly when SR-IOV gets re-enabled. 920 */ 921 for (vf_id = 0; vf_id < tmp; vf_id++) { 922 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 923 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 924 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 925 } 926 } 927 clear_bit(__I40E_VF_DISABLE, &pf->state); 928 } 929 930 #ifdef CONFIG_PCI_IOV 931 /** 932 * i40e_alloc_vfs 933 * @pf: pointer to the PF structure 934 * @num_alloc_vfs: number of VFs to allocate 935 * 936 * allocate VF resources 937 **/ 938 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 939 { 940 struct i40e_vf *vfs; 941 int i, ret = 0; 942 943 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 944 i40e_irq_dynamic_disable_icr0(pf); 945 946 /* Check to see if we're just allocating resources for extant VFs */ 947 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 948 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 949 if (ret) { 950 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 951 pf->num_alloc_vfs = 0; 952 goto err_iov; 953 } 954 } 955 /* allocate memory */ 956 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 957 if (!vfs) { 958 ret = -ENOMEM; 959 goto err_alloc; 960 } 961 pf->vf = vfs; 962 963 /* apply default profile */ 964 for (i = 0; i < num_alloc_vfs; i++) { 965 vfs[i].pf = pf; 966 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 967 vfs[i].vf_id = i; 968 969 /* assign default capabilities */ 970 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 971 vfs[i].spoofchk = true; 972 /* VF resources get allocated during reset */ 973 i40e_reset_vf(&vfs[i], false); 974 975 } 976 pf->num_alloc_vfs = num_alloc_vfs; 977 978 err_alloc: 979 if (ret) 980 i40e_free_vfs(pf); 981 err_iov: 982 /* Re-enable interrupt 0. */ 983 i40e_irq_dynamic_enable_icr0(pf); 984 return ret; 985 } 986 987 #endif 988 /** 989 * i40e_pci_sriov_enable 990 * @pdev: pointer to a pci_dev structure 991 * @num_vfs: number of VFs to allocate 992 * 993 * Enable or change the number of VFs 994 **/ 995 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 996 { 997 #ifdef CONFIG_PCI_IOV 998 struct i40e_pf *pf = pci_get_drvdata(pdev); 999 int pre_existing_vfs = pci_num_vf(pdev); 1000 int err = 0; 1001 1002 if (test_bit(__I40E_TESTING, &pf->state)) { 1003 dev_warn(&pdev->dev, 1004 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1005 err = -EPERM; 1006 goto err_out; 1007 } 1008 1009 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1010 i40e_free_vfs(pf); 1011 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1012 goto out; 1013 1014 if (num_vfs > pf->num_req_vfs) { 1015 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1016 num_vfs, pf->num_req_vfs); 1017 err = -EPERM; 1018 goto err_out; 1019 } 1020 1021 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1022 err = i40e_alloc_vfs(pf, num_vfs); 1023 if (err) { 1024 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1025 goto err_out; 1026 } 1027 1028 out: 1029 return num_vfs; 1030 1031 err_out: 1032 return err; 1033 #endif 1034 return 0; 1035 } 1036 1037 /** 1038 * i40e_pci_sriov_configure 1039 * @pdev: pointer to a pci_dev structure 1040 * @num_vfs: number of VFs to allocate 1041 * 1042 * Enable or change the number of VFs. Called when the user updates the number 1043 * of VFs in sysfs. 1044 **/ 1045 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1046 { 1047 struct i40e_pf *pf = pci_get_drvdata(pdev); 1048 1049 if (num_vfs) { 1050 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1051 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1052 i40e_do_reset_safe(pf, 1053 BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1054 } 1055 return i40e_pci_sriov_enable(pdev, num_vfs); 1056 } 1057 1058 if (!pci_vfs_assigned(pf->pdev)) { 1059 i40e_free_vfs(pf); 1060 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1061 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1062 } else { 1063 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1064 return -EINVAL; 1065 } 1066 return 0; 1067 } 1068 1069 /***********************virtual channel routines******************/ 1070 1071 /** 1072 * i40e_vc_send_msg_to_vf 1073 * @vf: pointer to the VF info 1074 * @v_opcode: virtual channel opcode 1075 * @v_retval: virtual channel return value 1076 * @msg: pointer to the msg buffer 1077 * @msglen: msg length 1078 * 1079 * send msg to VF 1080 **/ 1081 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1082 u32 v_retval, u8 *msg, u16 msglen) 1083 { 1084 struct i40e_pf *pf; 1085 struct i40e_hw *hw; 1086 int abs_vf_id; 1087 i40e_status aq_ret; 1088 1089 /* validate the request */ 1090 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1091 return -EINVAL; 1092 1093 pf = vf->pf; 1094 hw = &pf->hw; 1095 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1096 1097 /* single place to detect unsuccessful return values */ 1098 if (v_retval) { 1099 vf->num_invalid_msgs++; 1100 dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n", 1101 vf->vf_id, v_opcode, v_retval); 1102 if (vf->num_invalid_msgs > 1103 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1104 dev_err(&pf->pdev->dev, 1105 "Number of invalid messages exceeded for VF %d\n", 1106 vf->vf_id); 1107 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1108 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 1109 } 1110 } else { 1111 vf->num_valid_msgs++; 1112 /* reset the invalid counter, if a valid message is received. */ 1113 vf->num_invalid_msgs = 0; 1114 } 1115 1116 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1117 msg, msglen, NULL); 1118 if (aq_ret) { 1119 dev_err(&pf->pdev->dev, 1120 "Unable to send the message to VF %d aq_err %d\n", 1121 vf->vf_id, pf->hw.aq.asq_last_status); 1122 return -EIO; 1123 } 1124 1125 return 0; 1126 } 1127 1128 /** 1129 * i40e_vc_send_resp_to_vf 1130 * @vf: pointer to the VF info 1131 * @opcode: operation code 1132 * @retval: return value 1133 * 1134 * send resp msg to VF 1135 **/ 1136 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1137 enum i40e_virtchnl_ops opcode, 1138 i40e_status retval) 1139 { 1140 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1141 } 1142 1143 /** 1144 * i40e_vc_get_version_msg 1145 * @vf: pointer to the VF info 1146 * 1147 * called from the VF to request the API version used by the PF 1148 **/ 1149 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1150 { 1151 struct i40e_virtchnl_version_info info = { 1152 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 1153 }; 1154 1155 vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; 1156 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1157 if (VF_IS_V10(vf)) 1158 info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1159 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 1160 I40E_SUCCESS, (u8 *)&info, 1161 sizeof(struct 1162 i40e_virtchnl_version_info)); 1163 } 1164 1165 /** 1166 * i40e_vc_get_vf_resources_msg 1167 * @vf: pointer to the VF info 1168 * @msg: pointer to the msg buffer 1169 * @msglen: msg length 1170 * 1171 * called from the VF to request its resources 1172 **/ 1173 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1174 { 1175 struct i40e_virtchnl_vf_resource *vfres = NULL; 1176 struct i40e_pf *pf = vf->pf; 1177 i40e_status aq_ret = 0; 1178 struct i40e_vsi *vsi; 1179 int i = 0, len = 0; 1180 int num_vsis = 1; 1181 int ret; 1182 1183 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1184 aq_ret = I40E_ERR_PARAM; 1185 goto err; 1186 } 1187 1188 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1189 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1190 1191 vfres = kzalloc(len, GFP_KERNEL); 1192 if (!vfres) { 1193 aq_ret = I40E_ERR_NO_MEMORY; 1194 len = 0; 1195 goto err; 1196 } 1197 if (VF_IS_V11(vf)) 1198 vf->driver_caps = *(u32 *)msg; 1199 else 1200 vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | 1201 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | 1202 I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1203 1204 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1205 vsi = pf->vsi[vf->lan_vsi_idx]; 1206 if (!vsi->info.pvid) 1207 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1208 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { 1209 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) 1210 vfres->vf_offload_flags |= 1211 I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1212 } else { 1213 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; 1214 } 1215 1216 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) 1217 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1218 1219 vfres->num_vsis = num_vsis; 1220 vfres->num_queue_pairs = vf->num_queue_pairs; 1221 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1222 if (vf->lan_vsi_idx) { 1223 vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; 1224 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1225 vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs; 1226 /* VFs only use TC 0 */ 1227 vfres->vsi_res[i].qset_handle 1228 = le16_to_cpu(vsi->info.qs_handle[0]); 1229 ether_addr_copy(vfres->vsi_res[i].default_mac_addr, 1230 vf->default_lan_addr.addr); 1231 i++; 1232 } 1233 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1234 1235 err: 1236 /* send the response back to the VF */ 1237 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1238 aq_ret, (u8 *)vfres, len); 1239 1240 kfree(vfres); 1241 return ret; 1242 } 1243 1244 /** 1245 * i40e_vc_reset_vf_msg 1246 * @vf: pointer to the VF info 1247 * @msg: pointer to the msg buffer 1248 * @msglen: msg length 1249 * 1250 * called from the VF to reset itself, 1251 * unlike other virtchnl messages, PF driver 1252 * doesn't send the response back to the VF 1253 **/ 1254 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1255 { 1256 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1257 i40e_reset_vf(vf, false); 1258 } 1259 1260 /** 1261 * i40e_vc_config_promiscuous_mode_msg 1262 * @vf: pointer to the VF info 1263 * @msg: pointer to the msg buffer 1264 * @msglen: msg length 1265 * 1266 * called from the VF to configure the promiscuous mode of 1267 * VF vsis 1268 **/ 1269 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1270 u8 *msg, u16 msglen) 1271 { 1272 struct i40e_virtchnl_promisc_info *info = 1273 (struct i40e_virtchnl_promisc_info *)msg; 1274 struct i40e_pf *pf = vf->pf; 1275 struct i40e_hw *hw = &pf->hw; 1276 struct i40e_vsi *vsi; 1277 bool allmulti = false; 1278 i40e_status aq_ret; 1279 1280 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); 1281 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1282 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1283 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1284 (vsi->type != I40E_VSI_FCOE)) { 1285 aq_ret = I40E_ERR_PARAM; 1286 goto error_param; 1287 } 1288 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1289 allmulti = true; 1290 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1291 allmulti, NULL); 1292 1293 error_param: 1294 /* send the response to the VF */ 1295 return i40e_vc_send_resp_to_vf(vf, 1296 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1297 aq_ret); 1298 } 1299 1300 /** 1301 * i40e_vc_config_queues_msg 1302 * @vf: pointer to the VF info 1303 * @msg: pointer to the msg buffer 1304 * @msglen: msg length 1305 * 1306 * called from the VF to configure the rx/tx 1307 * queues 1308 **/ 1309 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1310 { 1311 struct i40e_virtchnl_vsi_queue_config_info *qci = 1312 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1313 struct i40e_virtchnl_queue_pair_info *qpi; 1314 struct i40e_pf *pf = vf->pf; 1315 u16 vsi_id, vsi_queue_id; 1316 i40e_status aq_ret = 0; 1317 int i; 1318 1319 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1320 aq_ret = I40E_ERR_PARAM; 1321 goto error_param; 1322 } 1323 1324 vsi_id = qci->vsi_id; 1325 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1326 aq_ret = I40E_ERR_PARAM; 1327 goto error_param; 1328 } 1329 for (i = 0; i < qci->num_queue_pairs; i++) { 1330 qpi = &qci->qpair[i]; 1331 vsi_queue_id = qpi->txq.queue_id; 1332 if ((qpi->txq.vsi_id != vsi_id) || 1333 (qpi->rxq.vsi_id != vsi_id) || 1334 (qpi->rxq.queue_id != vsi_queue_id) || 1335 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1336 aq_ret = I40E_ERR_PARAM; 1337 goto error_param; 1338 } 1339 1340 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1341 &qpi->rxq) || 1342 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1343 &qpi->txq)) { 1344 aq_ret = I40E_ERR_PARAM; 1345 goto error_param; 1346 } 1347 } 1348 /* set vsi num_queue_pairs in use to num configured by VF */ 1349 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1350 1351 error_param: 1352 /* send the response to the VF */ 1353 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1354 aq_ret); 1355 } 1356 1357 /** 1358 * i40e_vc_config_irq_map_msg 1359 * @vf: pointer to the VF info 1360 * @msg: pointer to the msg buffer 1361 * @msglen: msg length 1362 * 1363 * called from the VF to configure the irq to 1364 * queue map 1365 **/ 1366 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1367 { 1368 struct i40e_virtchnl_irq_map_info *irqmap_info = 1369 (struct i40e_virtchnl_irq_map_info *)msg; 1370 struct i40e_virtchnl_vector_map *map; 1371 u16 vsi_id, vsi_queue_id, vector_id; 1372 i40e_status aq_ret = 0; 1373 unsigned long tempmap; 1374 int i; 1375 1376 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1377 aq_ret = I40E_ERR_PARAM; 1378 goto error_param; 1379 } 1380 1381 for (i = 0; i < irqmap_info->num_vectors; i++) { 1382 map = &irqmap_info->vecmap[i]; 1383 1384 vector_id = map->vector_id; 1385 vsi_id = map->vsi_id; 1386 /* validate msg params */ 1387 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1388 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1389 aq_ret = I40E_ERR_PARAM; 1390 goto error_param; 1391 } 1392 1393 /* lookout for the invalid queue index */ 1394 tempmap = map->rxq_map; 1395 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1396 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1397 vsi_queue_id)) { 1398 aq_ret = I40E_ERR_PARAM; 1399 goto error_param; 1400 } 1401 } 1402 1403 tempmap = map->txq_map; 1404 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1405 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1406 vsi_queue_id)) { 1407 aq_ret = I40E_ERR_PARAM; 1408 goto error_param; 1409 } 1410 } 1411 1412 i40e_config_irq_link_list(vf, vsi_id, map); 1413 } 1414 error_param: 1415 /* send the response to the VF */ 1416 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1417 aq_ret); 1418 } 1419 1420 /** 1421 * i40e_vc_enable_queues_msg 1422 * @vf: pointer to the VF info 1423 * @msg: pointer to the msg buffer 1424 * @msglen: msg length 1425 * 1426 * called from the VF to enable all or specific queue(s) 1427 **/ 1428 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1429 { 1430 struct i40e_virtchnl_queue_select *vqs = 1431 (struct i40e_virtchnl_queue_select *)msg; 1432 struct i40e_pf *pf = vf->pf; 1433 u16 vsi_id = vqs->vsi_id; 1434 i40e_status aq_ret = 0; 1435 1436 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1437 aq_ret = I40E_ERR_PARAM; 1438 goto error_param; 1439 } 1440 1441 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1442 aq_ret = I40E_ERR_PARAM; 1443 goto error_param; 1444 } 1445 1446 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1447 aq_ret = I40E_ERR_PARAM; 1448 goto error_param; 1449 } 1450 1451 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) 1452 aq_ret = I40E_ERR_TIMEOUT; 1453 error_param: 1454 /* send the response to the VF */ 1455 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1456 aq_ret); 1457 } 1458 1459 /** 1460 * i40e_vc_disable_queues_msg 1461 * @vf: pointer to the VF info 1462 * @msg: pointer to the msg buffer 1463 * @msglen: msg length 1464 * 1465 * called from the VF to disable all or specific 1466 * queue(s) 1467 **/ 1468 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1469 { 1470 struct i40e_virtchnl_queue_select *vqs = 1471 (struct i40e_virtchnl_queue_select *)msg; 1472 struct i40e_pf *pf = vf->pf; 1473 i40e_status aq_ret = 0; 1474 1475 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1476 aq_ret = I40E_ERR_PARAM; 1477 goto error_param; 1478 } 1479 1480 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1481 aq_ret = I40E_ERR_PARAM; 1482 goto error_param; 1483 } 1484 1485 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1486 aq_ret = I40E_ERR_PARAM; 1487 goto error_param; 1488 } 1489 1490 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) 1491 aq_ret = I40E_ERR_TIMEOUT; 1492 1493 error_param: 1494 /* send the response to the VF */ 1495 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1496 aq_ret); 1497 } 1498 1499 /** 1500 * i40e_vc_get_stats_msg 1501 * @vf: pointer to the VF info 1502 * @msg: pointer to the msg buffer 1503 * @msglen: msg length 1504 * 1505 * called from the VF to get vsi stats 1506 **/ 1507 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1508 { 1509 struct i40e_virtchnl_queue_select *vqs = 1510 (struct i40e_virtchnl_queue_select *)msg; 1511 struct i40e_pf *pf = vf->pf; 1512 struct i40e_eth_stats stats; 1513 i40e_status aq_ret = 0; 1514 struct i40e_vsi *vsi; 1515 1516 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1517 1518 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1519 aq_ret = I40E_ERR_PARAM; 1520 goto error_param; 1521 } 1522 1523 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1524 aq_ret = I40E_ERR_PARAM; 1525 goto error_param; 1526 } 1527 1528 vsi = pf->vsi[vf->lan_vsi_idx]; 1529 if (!vsi) { 1530 aq_ret = I40E_ERR_PARAM; 1531 goto error_param; 1532 } 1533 i40e_update_eth_stats(vsi); 1534 stats = vsi->eth_stats; 1535 1536 error_param: 1537 /* send the response back to the VF */ 1538 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1539 (u8 *)&stats, sizeof(stats)); 1540 } 1541 1542 /** 1543 * i40e_check_vf_permission 1544 * @vf: pointer to the VF info 1545 * @macaddr: pointer to the MAC Address being checked 1546 * 1547 * Check if the VF has permission to add or delete unicast MAC address 1548 * filters and return error code -EPERM if not. Then check if the 1549 * address filter requested is broadcast or zero and if so return 1550 * an invalid MAC address error code. 1551 **/ 1552 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1553 { 1554 struct i40e_pf *pf = vf->pf; 1555 int ret = 0; 1556 1557 if (is_broadcast_ether_addr(macaddr) || 1558 is_zero_ether_addr(macaddr)) { 1559 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1560 ret = I40E_ERR_INVALID_MAC_ADDR; 1561 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1562 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1563 /* If the host VMM administrator has set the VF MAC address 1564 * administratively via the ndo_set_vf_mac command then deny 1565 * permission to the VF to add or delete unicast MAC addresses. 1566 * The VF may request to set the MAC address filter already 1567 * assigned to it so do not return an error in that case. 1568 */ 1569 dev_err(&pf->pdev->dev, 1570 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); 1571 ret = -EPERM; 1572 } 1573 return ret; 1574 } 1575 1576 /** 1577 * i40e_vc_add_mac_addr_msg 1578 * @vf: pointer to the VF info 1579 * @msg: pointer to the msg buffer 1580 * @msglen: msg length 1581 * 1582 * add guest mac address filter 1583 **/ 1584 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1585 { 1586 struct i40e_virtchnl_ether_addr_list *al = 1587 (struct i40e_virtchnl_ether_addr_list *)msg; 1588 struct i40e_pf *pf = vf->pf; 1589 struct i40e_vsi *vsi = NULL; 1590 u16 vsi_id = al->vsi_id; 1591 i40e_status ret = 0; 1592 int i; 1593 1594 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1595 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1596 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1597 ret = I40E_ERR_PARAM; 1598 goto error_param; 1599 } 1600 1601 for (i = 0; i < al->num_elements; i++) { 1602 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1603 if (ret) 1604 goto error_param; 1605 } 1606 vsi = pf->vsi[vf->lan_vsi_idx]; 1607 1608 /* Lock once, because all function inside for loop accesses VSI's 1609 * MAC filter list which needs to be protected using same lock. 1610 */ 1611 spin_lock_bh(&vsi->mac_filter_list_lock); 1612 1613 /* add new addresses to the list */ 1614 for (i = 0; i < al->num_elements; i++) { 1615 struct i40e_mac_filter *f; 1616 1617 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1618 if (!f) { 1619 if (i40e_is_vsi_in_vlan(vsi)) 1620 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1621 true, false); 1622 else 1623 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1624 true, false); 1625 } 1626 1627 if (!f) { 1628 dev_err(&pf->pdev->dev, 1629 "Unable to add MAC filter %pM for VF %d\n", 1630 al->list[i].addr, vf->vf_id); 1631 ret = I40E_ERR_PARAM; 1632 spin_unlock_bh(&vsi->mac_filter_list_lock); 1633 goto error_param; 1634 } 1635 } 1636 spin_unlock_bh(&vsi->mac_filter_list_lock); 1637 1638 /* program the updated filter list */ 1639 ret = i40e_sync_vsi_filters(vsi); 1640 if (ret) 1641 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 1642 vf->vf_id, ret); 1643 1644 error_param: 1645 /* send the response to the VF */ 1646 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1647 ret); 1648 } 1649 1650 /** 1651 * i40e_vc_del_mac_addr_msg 1652 * @vf: pointer to the VF info 1653 * @msg: pointer to the msg buffer 1654 * @msglen: msg length 1655 * 1656 * remove guest mac address filter 1657 **/ 1658 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1659 { 1660 struct i40e_virtchnl_ether_addr_list *al = 1661 (struct i40e_virtchnl_ether_addr_list *)msg; 1662 struct i40e_pf *pf = vf->pf; 1663 struct i40e_vsi *vsi = NULL; 1664 u16 vsi_id = al->vsi_id; 1665 i40e_status ret = 0; 1666 int i; 1667 1668 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1669 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1670 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1671 ret = I40E_ERR_PARAM; 1672 goto error_param; 1673 } 1674 1675 for (i = 0; i < al->num_elements; i++) { 1676 if (is_broadcast_ether_addr(al->list[i].addr) || 1677 is_zero_ether_addr(al->list[i].addr)) { 1678 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 1679 al->list[i].addr, vf->vf_id); 1680 ret = I40E_ERR_INVALID_MAC_ADDR; 1681 goto error_param; 1682 } 1683 } 1684 vsi = pf->vsi[vf->lan_vsi_idx]; 1685 1686 spin_lock_bh(&vsi->mac_filter_list_lock); 1687 /* delete addresses from the list */ 1688 for (i = 0; i < al->num_elements; i++) 1689 if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) { 1690 ret = I40E_ERR_INVALID_MAC_ADDR; 1691 spin_unlock_bh(&vsi->mac_filter_list_lock); 1692 goto error_param; 1693 } 1694 1695 spin_unlock_bh(&vsi->mac_filter_list_lock); 1696 1697 /* program the updated filter list */ 1698 ret = i40e_sync_vsi_filters(vsi); 1699 if (ret) 1700 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 1701 vf->vf_id, ret); 1702 1703 error_param: 1704 /* send the response to the VF */ 1705 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1706 ret); 1707 } 1708 1709 /** 1710 * i40e_vc_add_vlan_msg 1711 * @vf: pointer to the VF info 1712 * @msg: pointer to the msg buffer 1713 * @msglen: msg length 1714 * 1715 * program guest vlan id 1716 **/ 1717 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1718 { 1719 struct i40e_virtchnl_vlan_filter_list *vfl = 1720 (struct i40e_virtchnl_vlan_filter_list *)msg; 1721 struct i40e_pf *pf = vf->pf; 1722 struct i40e_vsi *vsi = NULL; 1723 u16 vsi_id = vfl->vsi_id; 1724 i40e_status aq_ret = 0; 1725 int i; 1726 1727 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1728 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1729 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1730 aq_ret = I40E_ERR_PARAM; 1731 goto error_param; 1732 } 1733 1734 for (i = 0; i < vfl->num_elements; i++) { 1735 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1736 aq_ret = I40E_ERR_PARAM; 1737 dev_err(&pf->pdev->dev, 1738 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1739 goto error_param; 1740 } 1741 } 1742 vsi = pf->vsi[vf->lan_vsi_idx]; 1743 if (vsi->info.pvid) { 1744 aq_ret = I40E_ERR_PARAM; 1745 goto error_param; 1746 } 1747 1748 i40e_vlan_stripping_enable(vsi); 1749 for (i = 0; i < vfl->num_elements; i++) { 1750 /* add new VLAN filter */ 1751 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1752 1753 if (ret) 1754 dev_err(&pf->pdev->dev, 1755 "Unable to add VLAN filter %d for VF %d, error %d\n", 1756 vfl->vlan_id[i], vf->vf_id, ret); 1757 } 1758 1759 error_param: 1760 /* send the response to the VF */ 1761 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1762 } 1763 1764 /** 1765 * i40e_vc_remove_vlan_msg 1766 * @vf: pointer to the VF info 1767 * @msg: pointer to the msg buffer 1768 * @msglen: msg length 1769 * 1770 * remove programmed guest vlan id 1771 **/ 1772 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1773 { 1774 struct i40e_virtchnl_vlan_filter_list *vfl = 1775 (struct i40e_virtchnl_vlan_filter_list *)msg; 1776 struct i40e_pf *pf = vf->pf; 1777 struct i40e_vsi *vsi = NULL; 1778 u16 vsi_id = vfl->vsi_id; 1779 i40e_status aq_ret = 0; 1780 int i; 1781 1782 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1783 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1784 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1785 aq_ret = I40E_ERR_PARAM; 1786 goto error_param; 1787 } 1788 1789 for (i = 0; i < vfl->num_elements; i++) { 1790 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1791 aq_ret = I40E_ERR_PARAM; 1792 goto error_param; 1793 } 1794 } 1795 1796 vsi = pf->vsi[vf->lan_vsi_idx]; 1797 if (vsi->info.pvid) { 1798 aq_ret = I40E_ERR_PARAM; 1799 goto error_param; 1800 } 1801 1802 for (i = 0; i < vfl->num_elements; i++) { 1803 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1804 1805 if (ret) 1806 dev_err(&pf->pdev->dev, 1807 "Unable to delete VLAN filter %d for VF %d, error %d\n", 1808 vfl->vlan_id[i], vf->vf_id, ret); 1809 } 1810 1811 error_param: 1812 /* send the response to the VF */ 1813 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1814 } 1815 1816 /** 1817 * i40e_vc_validate_vf_msg 1818 * @vf: pointer to the VF info 1819 * @msg: pointer to the msg buffer 1820 * @msglen: msg length 1821 * @msghndl: msg handle 1822 * 1823 * validate msg 1824 **/ 1825 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1826 u32 v_retval, u8 *msg, u16 msglen) 1827 { 1828 bool err_msg_format = false; 1829 int valid_len; 1830 1831 /* Check if VF is disabled. */ 1832 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1833 return I40E_ERR_PARAM; 1834 1835 /* Validate message length. */ 1836 switch (v_opcode) { 1837 case I40E_VIRTCHNL_OP_VERSION: 1838 valid_len = sizeof(struct i40e_virtchnl_version_info); 1839 break; 1840 case I40E_VIRTCHNL_OP_RESET_VF: 1841 valid_len = 0; 1842 break; 1843 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1844 if (VF_IS_V11(vf)) 1845 valid_len = sizeof(u32); 1846 else 1847 valid_len = 0; 1848 break; 1849 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1850 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1851 break; 1852 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1853 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1854 break; 1855 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1856 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1857 if (msglen >= valid_len) { 1858 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1859 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1860 valid_len += (vqc->num_queue_pairs * 1861 sizeof(struct 1862 i40e_virtchnl_queue_pair_info)); 1863 if (vqc->num_queue_pairs == 0) 1864 err_msg_format = true; 1865 } 1866 break; 1867 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1868 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1869 if (msglen >= valid_len) { 1870 struct i40e_virtchnl_irq_map_info *vimi = 1871 (struct i40e_virtchnl_irq_map_info *)msg; 1872 valid_len += (vimi->num_vectors * 1873 sizeof(struct i40e_virtchnl_vector_map)); 1874 if (vimi->num_vectors == 0) 1875 err_msg_format = true; 1876 } 1877 break; 1878 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1879 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1880 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1881 break; 1882 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1883 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1884 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1885 if (msglen >= valid_len) { 1886 struct i40e_virtchnl_ether_addr_list *veal = 1887 (struct i40e_virtchnl_ether_addr_list *)msg; 1888 valid_len += veal->num_elements * 1889 sizeof(struct i40e_virtchnl_ether_addr); 1890 if (veal->num_elements == 0) 1891 err_msg_format = true; 1892 } 1893 break; 1894 case I40E_VIRTCHNL_OP_ADD_VLAN: 1895 case I40E_VIRTCHNL_OP_DEL_VLAN: 1896 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1897 if (msglen >= valid_len) { 1898 struct i40e_virtchnl_vlan_filter_list *vfl = 1899 (struct i40e_virtchnl_vlan_filter_list *)msg; 1900 valid_len += vfl->num_elements * sizeof(u16); 1901 if (vfl->num_elements == 0) 1902 err_msg_format = true; 1903 } 1904 break; 1905 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1906 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1907 break; 1908 case I40E_VIRTCHNL_OP_GET_STATS: 1909 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1910 break; 1911 /* These are always errors coming from the VF. */ 1912 case I40E_VIRTCHNL_OP_EVENT: 1913 case I40E_VIRTCHNL_OP_UNKNOWN: 1914 default: 1915 return -EPERM; 1916 } 1917 /* few more checks */ 1918 if ((valid_len != msglen) || (err_msg_format)) { 1919 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1920 return -EINVAL; 1921 } else { 1922 return 0; 1923 } 1924 } 1925 1926 /** 1927 * i40e_vc_process_vf_msg 1928 * @pf: pointer to the PF structure 1929 * @vf_id: source VF id 1930 * @msg: pointer to the msg buffer 1931 * @msglen: msg length 1932 * @msghndl: msg handle 1933 * 1934 * called from the common aeq/arq handler to 1935 * process request from VF 1936 **/ 1937 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1938 u32 v_retval, u8 *msg, u16 msglen) 1939 { 1940 struct i40e_hw *hw = &pf->hw; 1941 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1942 struct i40e_vf *vf; 1943 int ret; 1944 1945 pf->vf_aq_requests++; 1946 if (local_vf_id >= pf->num_alloc_vfs) 1947 return -EINVAL; 1948 vf = &(pf->vf[local_vf_id]); 1949 /* perform basic checks on the msg */ 1950 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1951 1952 if (ret) { 1953 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 1954 local_vf_id, v_opcode, msglen); 1955 return ret; 1956 } 1957 1958 switch (v_opcode) { 1959 case I40E_VIRTCHNL_OP_VERSION: 1960 ret = i40e_vc_get_version_msg(vf, msg); 1961 break; 1962 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1963 ret = i40e_vc_get_vf_resources_msg(vf, msg); 1964 break; 1965 case I40E_VIRTCHNL_OP_RESET_VF: 1966 i40e_vc_reset_vf_msg(vf); 1967 ret = 0; 1968 break; 1969 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1970 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1971 break; 1972 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1973 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1974 break; 1975 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1976 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1977 break; 1978 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1979 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1980 i40e_vc_notify_vf_link_state(vf); 1981 break; 1982 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1983 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1984 break; 1985 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1986 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1987 break; 1988 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1989 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1990 break; 1991 case I40E_VIRTCHNL_OP_ADD_VLAN: 1992 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1993 break; 1994 case I40E_VIRTCHNL_OP_DEL_VLAN: 1995 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1996 break; 1997 case I40E_VIRTCHNL_OP_GET_STATS: 1998 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1999 break; 2000 case I40E_VIRTCHNL_OP_UNKNOWN: 2001 default: 2002 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 2003 v_opcode, local_vf_id); 2004 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 2005 I40E_ERR_NOT_IMPLEMENTED); 2006 break; 2007 } 2008 2009 return ret; 2010 } 2011 2012 /** 2013 * i40e_vc_process_vflr_event 2014 * @pf: pointer to the PF structure 2015 * 2016 * called from the vlfr irq handler to 2017 * free up VF resources and state variables 2018 **/ 2019 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 2020 { 2021 u32 reg, reg_idx, bit_idx, vf_id; 2022 struct i40e_hw *hw = &pf->hw; 2023 struct i40e_vf *vf; 2024 2025 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 2026 return 0; 2027 2028 /* re-enable vflr interrupt cause */ 2029 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 2030 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 2031 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 2032 i40e_flush(hw); 2033 2034 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 2035 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 2036 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 2037 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 2038 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 2039 vf = &pf->vf[vf_id]; 2040 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2041 if (reg & BIT(bit_idx)) { 2042 /* clear the bit in GLGEN_VFLRSTAT */ 2043 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 2044 2045 if (!test_bit(__I40E_DOWN, &pf->state)) 2046 i40e_reset_vf(vf, true); 2047 } 2048 } 2049 2050 return 0; 2051 } 2052 2053 /** 2054 * i40e_ndo_set_vf_mac 2055 * @netdev: network interface device structure 2056 * @vf_id: VF identifier 2057 * @mac: mac address 2058 * 2059 * program VF mac address 2060 **/ 2061 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2062 { 2063 struct i40e_netdev_priv *np = netdev_priv(netdev); 2064 struct i40e_vsi *vsi = np->vsi; 2065 struct i40e_pf *pf = vsi->back; 2066 struct i40e_mac_filter *f; 2067 struct i40e_vf *vf; 2068 int ret = 0; 2069 2070 /* validate the request */ 2071 if (vf_id >= pf->num_alloc_vfs) { 2072 dev_err(&pf->pdev->dev, 2073 "Invalid VF Identifier %d\n", vf_id); 2074 ret = -EINVAL; 2075 goto error_param; 2076 } 2077 2078 vf = &(pf->vf[vf_id]); 2079 vsi = pf->vsi[vf->lan_vsi_idx]; 2080 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2081 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2082 vf_id); 2083 ret = -EAGAIN; 2084 goto error_param; 2085 } 2086 2087 if (is_multicast_ether_addr(mac)) { 2088 dev_err(&pf->pdev->dev, 2089 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 2090 ret = -EINVAL; 2091 goto error_param; 2092 } 2093 2094 /* Lock once because below invoked function add/del_filter requires 2095 * mac_filter_list_lock to be held 2096 */ 2097 spin_lock_bh(&vsi->mac_filter_list_lock); 2098 2099 /* delete the temporary mac address */ 2100 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 2101 i40e_del_filter(vsi, vf->default_lan_addr.addr, 2102 vf->port_vlan_id ? vf->port_vlan_id : -1, 2103 true, false); 2104 2105 /* Delete all the filters for this VSI - we're going to kill it 2106 * anyway. 2107 */ 2108 list_for_each_entry(f, &vsi->mac_filter_list, list) 2109 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); 2110 2111 spin_unlock_bh(&vsi->mac_filter_list_lock); 2112 2113 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2114 /* program mac filter */ 2115 if (i40e_sync_vsi_filters(vsi)) { 2116 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2117 ret = -EIO; 2118 goto error_param; 2119 } 2120 ether_addr_copy(vf->default_lan_addr.addr, mac); 2121 vf->pf_set_mac = true; 2122 /* Force the VF driver stop so it has to reload with new MAC address */ 2123 i40e_vc_disable_vf(pf, vf); 2124 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2125 2126 error_param: 2127 return ret; 2128 } 2129 2130 /** 2131 * i40e_ndo_set_vf_port_vlan 2132 * @netdev: network interface device structure 2133 * @vf_id: VF identifier 2134 * @vlan_id: mac address 2135 * @qos: priority setting 2136 * 2137 * program VF vlan id and/or qos 2138 **/ 2139 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2140 int vf_id, u16 vlan_id, u8 qos) 2141 { 2142 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 2143 struct i40e_netdev_priv *np = netdev_priv(netdev); 2144 struct i40e_pf *pf = np->vsi->back; 2145 bool is_vsi_in_vlan = false; 2146 struct i40e_vsi *vsi; 2147 struct i40e_vf *vf; 2148 int ret = 0; 2149 2150 /* validate the request */ 2151 if (vf_id >= pf->num_alloc_vfs) { 2152 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2153 ret = -EINVAL; 2154 goto error_pvid; 2155 } 2156 2157 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2158 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2159 ret = -EINVAL; 2160 goto error_pvid; 2161 } 2162 2163 vf = &(pf->vf[vf_id]); 2164 vsi = pf->vsi[vf->lan_vsi_idx]; 2165 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2166 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2167 vf_id); 2168 ret = -EAGAIN; 2169 goto error_pvid; 2170 } 2171 2172 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 2173 /* duplicate request, so just return success */ 2174 goto error_pvid; 2175 2176 spin_lock_bh(&vsi->mac_filter_list_lock); 2177 is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi); 2178 spin_unlock_bh(&vsi->mac_filter_list_lock); 2179 2180 if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) { 2181 dev_err(&pf->pdev->dev, 2182 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2183 vf_id); 2184 /* Administrator Error - knock the VF offline until he does 2185 * the right thing by reconfiguring his network correctly 2186 * and then reloading the VF driver. 2187 */ 2188 i40e_vc_disable_vf(pf, vf); 2189 } 2190 2191 /* Check for condition where there was already a port VLAN ID 2192 * filter set and now it is being deleted by setting it to zero. 2193 * Additionally check for the condition where there was a port 2194 * VLAN but now there is a new and different port VLAN being set. 2195 * Before deleting all the old VLAN filters we must add new ones 2196 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2197 * MAC addresses deleted. 2198 */ 2199 if ((!(vlan_id || qos) || 2200 vlanprio != le16_to_cpu(vsi->info.pvid)) && 2201 vsi->info.pvid) 2202 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2203 2204 if (vsi->info.pvid) { 2205 /* kill old VLAN */ 2206 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2207 VLAN_VID_MASK)); 2208 if (ret) { 2209 dev_info(&vsi->back->pdev->dev, 2210 "remove VLAN failed, ret=%d, aq_err=%d\n", 2211 ret, pf->hw.aq.asq_last_status); 2212 } 2213 } 2214 if (vlan_id || qos) 2215 ret = i40e_vsi_add_pvid(vsi, vlanprio); 2216 else 2217 i40e_vsi_remove_pvid(vsi); 2218 2219 if (vlan_id) { 2220 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2221 vlan_id, qos, vf_id); 2222 2223 /* add new VLAN filter */ 2224 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2225 if (ret) { 2226 dev_info(&vsi->back->pdev->dev, 2227 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2228 vsi->back->hw.aq.asq_last_status); 2229 goto error_pvid; 2230 } 2231 /* Kill non-vlan MAC filters - ignore error return since 2232 * there might not be any non-vlan MAC filters. 2233 */ 2234 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2235 } 2236 2237 if (ret) { 2238 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2239 goto error_pvid; 2240 } 2241 /* The Port VLAN needs to be saved across resets the same as the 2242 * default LAN MAC address. 2243 */ 2244 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2245 ret = 0; 2246 2247 error_pvid: 2248 return ret; 2249 } 2250 2251 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2252 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2253 /** 2254 * i40e_ndo_set_vf_bw 2255 * @netdev: network interface device structure 2256 * @vf_id: VF identifier 2257 * @tx_rate: Tx rate 2258 * 2259 * configure VF Tx rate 2260 **/ 2261 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 2262 int max_tx_rate) 2263 { 2264 struct i40e_netdev_priv *np = netdev_priv(netdev); 2265 struct i40e_pf *pf = np->vsi->back; 2266 struct i40e_vsi *vsi; 2267 struct i40e_vf *vf; 2268 int speed = 0; 2269 int ret = 0; 2270 2271 /* validate the request */ 2272 if (vf_id >= pf->num_alloc_vfs) { 2273 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 2274 ret = -EINVAL; 2275 goto error; 2276 } 2277 2278 if (min_tx_rate) { 2279 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 2280 min_tx_rate, vf_id); 2281 return -EINVAL; 2282 } 2283 2284 vf = &(pf->vf[vf_id]); 2285 vsi = pf->vsi[vf->lan_vsi_idx]; 2286 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2287 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2288 vf_id); 2289 ret = -EAGAIN; 2290 goto error; 2291 } 2292 2293 switch (pf->hw.phy.link_info.link_speed) { 2294 case I40E_LINK_SPEED_40GB: 2295 speed = 40000; 2296 break; 2297 case I40E_LINK_SPEED_10GB: 2298 speed = 10000; 2299 break; 2300 case I40E_LINK_SPEED_1GB: 2301 speed = 1000; 2302 break; 2303 default: 2304 break; 2305 } 2306 2307 if (max_tx_rate > speed) { 2308 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", 2309 max_tx_rate, vf->vf_id); 2310 ret = -EINVAL; 2311 goto error; 2312 } 2313 2314 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 2315 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 2316 max_tx_rate = 50; 2317 } 2318 2319 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 2320 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 2321 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 2322 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 2323 if (ret) { 2324 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 2325 ret); 2326 ret = -EIO; 2327 goto error; 2328 } 2329 vf->tx_rate = max_tx_rate; 2330 error: 2331 return ret; 2332 } 2333 2334 /** 2335 * i40e_ndo_get_vf_config 2336 * @netdev: network interface device structure 2337 * @vf_id: VF identifier 2338 * @ivi: VF configuration structure 2339 * 2340 * return VF configuration 2341 **/ 2342 int i40e_ndo_get_vf_config(struct net_device *netdev, 2343 int vf_id, struct ifla_vf_info *ivi) 2344 { 2345 struct i40e_netdev_priv *np = netdev_priv(netdev); 2346 struct i40e_vsi *vsi = np->vsi; 2347 struct i40e_pf *pf = vsi->back; 2348 struct i40e_vf *vf; 2349 int ret = 0; 2350 2351 /* validate the request */ 2352 if (vf_id >= pf->num_alloc_vfs) { 2353 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2354 ret = -EINVAL; 2355 goto error_param; 2356 } 2357 2358 vf = &(pf->vf[vf_id]); 2359 /* first vsi is always the LAN vsi */ 2360 vsi = pf->vsi[vf->lan_vsi_idx]; 2361 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2362 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2363 vf_id); 2364 ret = -EAGAIN; 2365 goto error_param; 2366 } 2367 2368 ivi->vf = vf_id; 2369 2370 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 2371 2372 ivi->max_tx_rate = vf->tx_rate; 2373 ivi->min_tx_rate = 0; 2374 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2375 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2376 I40E_VLAN_PRIORITY_SHIFT; 2377 if (vf->link_forced == false) 2378 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 2379 else if (vf->link_up == true) 2380 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 2381 else 2382 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 2383 ivi->spoofchk = vf->spoofchk; 2384 ret = 0; 2385 2386 error_param: 2387 return ret; 2388 } 2389 2390 /** 2391 * i40e_ndo_set_vf_link_state 2392 * @netdev: network interface device structure 2393 * @vf_id: VF identifier 2394 * @link: required link state 2395 * 2396 * Set the link state of a specified VF, regardless of physical link state 2397 **/ 2398 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 2399 { 2400 struct i40e_netdev_priv *np = netdev_priv(netdev); 2401 struct i40e_pf *pf = np->vsi->back; 2402 struct i40e_virtchnl_pf_event pfe; 2403 struct i40e_hw *hw = &pf->hw; 2404 struct i40e_vf *vf; 2405 int abs_vf_id; 2406 int ret = 0; 2407 2408 /* validate the request */ 2409 if (vf_id >= pf->num_alloc_vfs) { 2410 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2411 ret = -EINVAL; 2412 goto error_out; 2413 } 2414 2415 vf = &pf->vf[vf_id]; 2416 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2417 2418 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2419 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2420 2421 switch (link) { 2422 case IFLA_VF_LINK_STATE_AUTO: 2423 vf->link_forced = false; 2424 pfe.event_data.link_event.link_status = 2425 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 2426 pfe.event_data.link_event.link_speed = 2427 pf->hw.phy.link_info.link_speed; 2428 break; 2429 case IFLA_VF_LINK_STATE_ENABLE: 2430 vf->link_forced = true; 2431 vf->link_up = true; 2432 pfe.event_data.link_event.link_status = true; 2433 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 2434 break; 2435 case IFLA_VF_LINK_STATE_DISABLE: 2436 vf->link_forced = true; 2437 vf->link_up = false; 2438 pfe.event_data.link_event.link_status = false; 2439 pfe.event_data.link_event.link_speed = 0; 2440 break; 2441 default: 2442 ret = -EINVAL; 2443 goto error_out; 2444 } 2445 /* Notify the VF of its new link state */ 2446 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2447 0, (u8 *)&pfe, sizeof(pfe), NULL); 2448 2449 error_out: 2450 return ret; 2451 } 2452 2453 /** 2454 * i40e_ndo_set_vf_spoofchk 2455 * @netdev: network interface device structure 2456 * @vf_id: VF identifier 2457 * @enable: flag to enable or disable feature 2458 * 2459 * Enable or disable VF spoof checking 2460 **/ 2461 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 2462 { 2463 struct i40e_netdev_priv *np = netdev_priv(netdev); 2464 struct i40e_vsi *vsi = np->vsi; 2465 struct i40e_pf *pf = vsi->back; 2466 struct i40e_vsi_context ctxt; 2467 struct i40e_hw *hw = &pf->hw; 2468 struct i40e_vf *vf; 2469 int ret = 0; 2470 2471 /* validate the request */ 2472 if (vf_id >= pf->num_alloc_vfs) { 2473 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2474 ret = -EINVAL; 2475 goto out; 2476 } 2477 2478 vf = &(pf->vf[vf_id]); 2479 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2480 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2481 vf_id); 2482 ret = -EAGAIN; 2483 goto out; 2484 } 2485 2486 if (enable == vf->spoofchk) 2487 goto out; 2488 2489 vf->spoofchk = enable; 2490 memset(&ctxt, 0, sizeof(ctxt)); 2491 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 2492 ctxt.pf_num = pf->hw.pf_id; 2493 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 2494 if (enable) 2495 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 2496 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 2497 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 2498 if (ret) { 2499 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 2500 ret); 2501 ret = -EIO; 2502 } 2503 out: 2504 return ret; 2505 } 2506