1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /*********************notification routines***********************/ 30 31 /** 32 * i40e_vc_vf_broadcast 33 * @pf: pointer to the PF structure 34 * @opcode: operation code 35 * @retval: return value 36 * @msg: pointer to the msg buffer 37 * @msglen: msg length 38 * 39 * send a message to all VFs on a given PF 40 **/ 41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 42 enum i40e_virtchnl_ops v_opcode, 43 i40e_status v_retval, u8 *msg, 44 u16 msglen) 45 { 46 struct i40e_hw *hw = &pf->hw; 47 struct i40e_vf *vf = pf->vf; 48 int i; 49 50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 51 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 52 /* Not all vfs are enabled so skip the ones that are not */ 53 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 54 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 55 continue; 56 57 /* Ignore return value on purpose - a given VF may fail, but 58 * we need to keep going and send to all of them 59 */ 60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 61 msg, msglen, NULL); 62 } 63 } 64 65 /** 66 * i40e_vc_notify_link_state 67 * @vf: pointer to the VF structure 68 * 69 * send a link status message to a single VF 70 **/ 71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 72 { 73 struct i40e_virtchnl_pf_event pfe; 74 struct i40e_pf *pf = vf->pf; 75 struct i40e_hw *hw = &pf->hw; 76 struct i40e_link_status *ls = &pf->hw.phy.link_info; 77 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 78 79 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 80 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 81 if (vf->link_forced) { 82 pfe.event_data.link_event.link_status = vf->link_up; 83 pfe.event_data.link_event.link_speed = 84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 85 } else { 86 pfe.event_data.link_event.link_status = 87 ls->link_info & I40E_AQ_LINK_UP; 88 pfe.event_data.link_event.link_speed = ls->link_speed; 89 } 90 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 91 0, (u8 *)&pfe, sizeof(pfe), NULL); 92 } 93 94 /** 95 * i40e_vc_notify_link_state 96 * @pf: pointer to the PF structure 97 * 98 * send a link status message to all VFs on a given PF 99 **/ 100 void i40e_vc_notify_link_state(struct i40e_pf *pf) 101 { 102 int i; 103 104 for (i = 0; i < pf->num_alloc_vfs; i++) 105 i40e_vc_notify_vf_link_state(&pf->vf[i]); 106 } 107 108 /** 109 * i40e_vc_notify_reset 110 * @pf: pointer to the PF structure 111 * 112 * indicate a pending reset to all VFs on a given PF 113 **/ 114 void i40e_vc_notify_reset(struct i40e_pf *pf) 115 { 116 struct i40e_virtchnl_pf_event pfe; 117 118 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 119 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 120 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, 121 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 122 } 123 124 /** 125 * i40e_vc_notify_vf_reset 126 * @vf: pointer to the VF structure 127 * 128 * indicate a pending reset to the given VF 129 **/ 130 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 131 { 132 struct i40e_virtchnl_pf_event pfe; 133 int abs_vf_id; 134 135 /* validate the request */ 136 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 137 return; 138 139 /* verify if the VF is in either init or active before proceeding */ 140 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 141 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 142 return; 143 144 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 145 146 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 147 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 148 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 149 0, (u8 *)&pfe, 150 sizeof(struct i40e_virtchnl_pf_event), NULL); 151 } 152 /***********************misc routines*****************************/ 153 154 /** 155 * i40e_vc_disable_vf 156 * @pf: pointer to the PF info 157 * @vf: pointer to the VF info 158 * 159 * Disable the VF through a SW reset 160 **/ 161 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 162 { 163 i40e_vc_notify_vf_reset(vf); 164 i40e_reset_vf(vf, false); 165 } 166 167 /** 168 * i40e_vc_isvalid_vsi_id 169 * @vf: pointer to the VF info 170 * @vsi_id: VF relative VSI id 171 * 172 * check for the valid VSI id 173 **/ 174 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 175 { 176 struct i40e_pf *pf = vf->pf; 177 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 178 179 return (vsi && (vsi->vf_id == vf->vf_id)); 180 } 181 182 /** 183 * i40e_vc_isvalid_queue_id 184 * @vf: pointer to the VF info 185 * @vsi_id: vsi id 186 * @qid: vsi relative queue id 187 * 188 * check for the valid queue id 189 **/ 190 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 191 u8 qid) 192 { 193 struct i40e_pf *pf = vf->pf; 194 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 195 196 return (vsi && (qid < vsi->alloc_queue_pairs)); 197 } 198 199 /** 200 * i40e_vc_isvalid_vector_id 201 * @vf: pointer to the VF info 202 * @vector_id: VF relative vector id 203 * 204 * check for the valid vector id 205 **/ 206 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 207 { 208 struct i40e_pf *pf = vf->pf; 209 210 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 211 } 212 213 /***********************vf resource mgmt routines*****************/ 214 215 /** 216 * i40e_vc_get_pf_queue_id 217 * @vf: pointer to the VF info 218 * @vsi_id: id of VSI as provided by the FW 219 * @vsi_queue_id: vsi relative queue id 220 * 221 * return PF relative queue id 222 **/ 223 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 224 u8 vsi_queue_id) 225 { 226 struct i40e_pf *pf = vf->pf; 227 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 228 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 229 230 if (!vsi) 231 return pf_queue_id; 232 233 if (le16_to_cpu(vsi->info.mapping_flags) & 234 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 235 pf_queue_id = 236 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 237 else 238 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 239 vsi_queue_id; 240 241 return pf_queue_id; 242 } 243 244 /** 245 * i40e_config_irq_link_list 246 * @vf: pointer to the VF info 247 * @vsi_id: id of VSI as given by the FW 248 * @vecmap: irq map info 249 * 250 * configure irq link list from the map 251 **/ 252 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 253 struct i40e_virtchnl_vector_map *vecmap) 254 { 255 unsigned long linklistmap = 0, tempmap; 256 struct i40e_pf *pf = vf->pf; 257 struct i40e_hw *hw = &pf->hw; 258 u16 vsi_queue_id, pf_queue_id; 259 enum i40e_queue_type qtype; 260 u16 next_q, vector_id; 261 u32 reg, reg_idx; 262 u16 itr_idx = 0; 263 264 vector_id = vecmap->vector_id; 265 /* setup the head */ 266 if (0 == vector_id) 267 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 268 else 269 reg_idx = I40E_VPINT_LNKLSTN( 270 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 271 (vector_id - 1)); 272 273 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 274 /* Special case - No queues mapped on this vector */ 275 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 276 goto irq_list_done; 277 } 278 tempmap = vecmap->rxq_map; 279 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 280 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 281 vsi_queue_id)); 282 } 283 284 tempmap = vecmap->txq_map; 285 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 286 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 287 vsi_queue_id + 1)); 288 } 289 290 next_q = find_first_bit(&linklistmap, 291 (I40E_MAX_VSI_QP * 292 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 293 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 294 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 295 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 296 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 297 298 wr32(hw, reg_idx, reg); 299 300 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 301 switch (qtype) { 302 case I40E_QUEUE_TYPE_RX: 303 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 304 itr_idx = vecmap->rxitr_idx; 305 break; 306 case I40E_QUEUE_TYPE_TX: 307 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 308 itr_idx = vecmap->txitr_idx; 309 break; 310 default: 311 break; 312 } 313 314 next_q = find_next_bit(&linklistmap, 315 (I40E_MAX_VSI_QP * 316 I40E_VIRTCHNL_SUPPORTED_QTYPES), 317 next_q + 1); 318 if (next_q < 319 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 323 vsi_queue_id); 324 } else { 325 pf_queue_id = I40E_QUEUE_END_OF_LIST; 326 qtype = 0; 327 } 328 329 /* format for the RQCTL & TQCTL regs is same */ 330 reg = (vector_id) | 331 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 332 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 333 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 334 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 335 wr32(hw, reg_idx, reg); 336 } 337 338 irq_list_done: 339 i40e_flush(hw); 340 } 341 342 /** 343 * i40e_config_vsi_tx_queue 344 * @vf: pointer to the VF info 345 * @vsi_id: id of VSI as provided by the FW 346 * @vsi_queue_id: vsi relative queue index 347 * @info: config. info 348 * 349 * configure tx queue 350 **/ 351 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 352 u16 vsi_queue_id, 353 struct i40e_virtchnl_txq_info *info) 354 { 355 struct i40e_pf *pf = vf->pf; 356 struct i40e_hw *hw = &pf->hw; 357 struct i40e_hmc_obj_txq tx_ctx; 358 struct i40e_vsi *vsi; 359 u16 pf_queue_id; 360 u32 qtx_ctl; 361 int ret = 0; 362 363 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 364 vsi = i40e_find_vsi_from_id(pf, vsi_id); 365 366 /* clear the context structure first */ 367 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 368 369 /* only set the required fields */ 370 tx_ctx.base = info->dma_ring_addr / 128; 371 tx_ctx.qlen = info->ring_len; 372 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 373 tx_ctx.rdylist_act = 0; 374 tx_ctx.head_wb_ena = info->headwb_enabled; 375 tx_ctx.head_wb_addr = info->dma_headwb_addr; 376 377 /* clear the context in the HMC */ 378 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 379 if (ret) { 380 dev_err(&pf->pdev->dev, 381 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 382 pf_queue_id, ret); 383 ret = -ENOENT; 384 goto error_context; 385 } 386 387 /* set the context in the HMC */ 388 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 389 if (ret) { 390 dev_err(&pf->pdev->dev, 391 "Failed to set VF LAN Tx queue context %d error: %d\n", 392 pf_queue_id, ret); 393 ret = -ENOENT; 394 goto error_context; 395 } 396 397 /* associate this queue with the PCI VF function */ 398 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 399 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 400 & I40E_QTX_CTL_PF_INDX_MASK); 401 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 402 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 403 & I40E_QTX_CTL_VFVM_INDX_MASK); 404 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 405 i40e_flush(hw); 406 407 error_context: 408 return ret; 409 } 410 411 /** 412 * i40e_config_vsi_rx_queue 413 * @vf: pointer to the VF info 414 * @vsi_id: id of VSI as provided by the FW 415 * @vsi_queue_id: vsi relative queue index 416 * @info: config. info 417 * 418 * configure rx queue 419 **/ 420 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 421 u16 vsi_queue_id, 422 struct i40e_virtchnl_rxq_info *info) 423 { 424 struct i40e_pf *pf = vf->pf; 425 struct i40e_hw *hw = &pf->hw; 426 struct i40e_hmc_obj_rxq rx_ctx; 427 u16 pf_queue_id; 428 int ret = 0; 429 430 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 431 432 /* clear the context structure first */ 433 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 434 435 /* only set the required fields */ 436 rx_ctx.base = info->dma_ring_addr / 128; 437 rx_ctx.qlen = info->ring_len; 438 439 if (info->splithdr_enabled) { 440 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 441 I40E_RX_SPLIT_IP | 442 I40E_RX_SPLIT_TCP_UDP | 443 I40E_RX_SPLIT_SCTP; 444 /* header length validation */ 445 if (info->hdr_size > ((2 * 1024) - 64)) { 446 ret = -EINVAL; 447 goto error_param; 448 } 449 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 450 451 /* set splitalways mode 10b */ 452 rx_ctx.dtype = 0x2; 453 } 454 455 /* databuffer length validation */ 456 if (info->databuffer_size > ((16 * 1024) - 128)) { 457 ret = -EINVAL; 458 goto error_param; 459 } 460 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 461 462 /* max pkt. length validation */ 463 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 464 ret = -EINVAL; 465 goto error_param; 466 } 467 rx_ctx.rxmax = info->max_pkt_size; 468 469 /* enable 32bytes desc always */ 470 rx_ctx.dsize = 1; 471 472 /* default values */ 473 rx_ctx.lrxqthresh = 2; 474 rx_ctx.crcstrip = 1; 475 rx_ctx.prefena = 1; 476 rx_ctx.l2tsel = 1; 477 478 /* clear the context in the HMC */ 479 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 480 if (ret) { 481 dev_err(&pf->pdev->dev, 482 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 483 pf_queue_id, ret); 484 ret = -ENOENT; 485 goto error_param; 486 } 487 488 /* set the context in the HMC */ 489 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 490 if (ret) { 491 dev_err(&pf->pdev->dev, 492 "Failed to set VF LAN Rx queue context %d error: %d\n", 493 pf_queue_id, ret); 494 ret = -ENOENT; 495 goto error_param; 496 } 497 498 error_param: 499 return ret; 500 } 501 502 /** 503 * i40e_alloc_vsi_res 504 * @vf: pointer to the VF info 505 * @type: type of VSI to allocate 506 * 507 * alloc VF vsi context & resources 508 **/ 509 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 510 { 511 struct i40e_mac_filter *f = NULL; 512 struct i40e_pf *pf = vf->pf; 513 struct i40e_vsi *vsi; 514 int ret = 0; 515 516 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 517 518 if (!vsi) { 519 dev_err(&pf->pdev->dev, 520 "add vsi failed for VF %d, aq_err %d\n", 521 vf->vf_id, pf->hw.aq.asq_last_status); 522 ret = -ENOENT; 523 goto error_alloc_vsi_res; 524 } 525 if (type == I40E_VSI_SRIOV) { 526 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 527 vf->lan_vsi_idx = vsi->idx; 528 vf->lan_vsi_id = vsi->id; 529 /* If the port VLAN has been configured and then the 530 * VF driver was removed then the VSI port VLAN 531 * configuration was destroyed. Check if there is 532 * a port VLAN and restore the VSI configuration if 533 * needed. 534 */ 535 if (vf->port_vlan_id) 536 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 537 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 538 vf->port_vlan_id ? vf->port_vlan_id : -1, 539 true, false); 540 if (!f) 541 dev_info(&pf->pdev->dev, 542 "Could not allocate VF MAC addr\n"); 543 f = i40e_add_filter(vsi, brdcast, 544 vf->port_vlan_id ? vf->port_vlan_id : -1, 545 true, false); 546 if (!f) 547 dev_info(&pf->pdev->dev, 548 "Could not allocate VF broadcast filter\n"); 549 } 550 551 /* program mac filter */ 552 ret = i40e_sync_vsi_filters(vsi); 553 if (ret) 554 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 555 556 /* Set VF bandwidth if specified */ 557 if (vf->tx_rate) { 558 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 559 vf->tx_rate / 50, 0, NULL); 560 if (ret) 561 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 562 vf->vf_id, ret); 563 } 564 565 error_alloc_vsi_res: 566 return ret; 567 } 568 569 /** 570 * i40e_enable_vf_mappings 571 * @vf: pointer to the VF info 572 * 573 * enable VF mappings 574 **/ 575 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 576 { 577 struct i40e_pf *pf = vf->pf; 578 struct i40e_hw *hw = &pf->hw; 579 u32 reg, total_queue_pairs = 0; 580 int j; 581 582 /* Tell the hardware we're using noncontiguous mapping. HW requires 583 * that VF queues be mapped using this method, even when they are 584 * contiguous in real life 585 */ 586 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 587 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 588 589 /* enable VF vplan_qtable mappings */ 590 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 591 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 592 593 /* map PF queues to VF queues */ 594 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 595 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 596 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 597 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 598 total_queue_pairs++; 599 } 600 601 /* map PF queues to VSI */ 602 for (j = 0; j < 7; j++) { 603 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 604 reg = 0x07FF07FF; /* unused */ 605 } else { 606 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 607 j * 2); 608 reg = qid; 609 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 610 (j * 2) + 1); 611 reg |= qid << 16; 612 } 613 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 614 } 615 616 i40e_flush(hw); 617 } 618 619 /** 620 * i40e_disable_vf_mappings 621 * @vf: pointer to the VF info 622 * 623 * disable VF mappings 624 **/ 625 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 626 { 627 struct i40e_pf *pf = vf->pf; 628 struct i40e_hw *hw = &pf->hw; 629 int i; 630 631 /* disable qp mappings */ 632 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 633 for (i = 0; i < I40E_MAX_VSI_QP; i++) 634 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 635 I40E_QUEUE_END_OF_LIST); 636 i40e_flush(hw); 637 } 638 639 /** 640 * i40e_free_vf_res 641 * @vf: pointer to the VF info 642 * 643 * free VF resources 644 **/ 645 static void i40e_free_vf_res(struct i40e_vf *vf) 646 { 647 struct i40e_pf *pf = vf->pf; 648 struct i40e_hw *hw = &pf->hw; 649 u32 reg_idx, reg; 650 int i, msix_vf; 651 652 /* free vsi & disconnect it from the parent uplink */ 653 if (vf->lan_vsi_idx) { 654 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 655 vf->lan_vsi_idx = 0; 656 vf->lan_vsi_id = 0; 657 } 658 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 659 660 /* disable interrupts so the VF starts in a known state */ 661 for (i = 0; i < msix_vf; i++) { 662 /* format is same for both registers */ 663 if (0 == i) 664 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 665 else 666 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 667 (vf->vf_id)) 668 + (i - 1)); 669 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 670 i40e_flush(hw); 671 } 672 673 /* clear the irq settings */ 674 for (i = 0; i < msix_vf; i++) { 675 /* format is same for both registers */ 676 if (0 == i) 677 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 678 else 679 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 680 (vf->vf_id)) 681 + (i - 1)); 682 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 683 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 684 wr32(hw, reg_idx, reg); 685 i40e_flush(hw); 686 } 687 /* reset some of the state varibles keeping 688 * track of the resources 689 */ 690 vf->num_queue_pairs = 0; 691 vf->vf_states = 0; 692 } 693 694 /** 695 * i40e_alloc_vf_res 696 * @vf: pointer to the VF info 697 * 698 * allocate VF resources 699 **/ 700 static int i40e_alloc_vf_res(struct i40e_vf *vf) 701 { 702 struct i40e_pf *pf = vf->pf; 703 int total_queue_pairs = 0; 704 int ret; 705 706 /* allocate hw vsi context & associated resources */ 707 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 708 if (ret) 709 goto error_alloc; 710 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 711 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 712 713 /* store the total qps number for the runtime 714 * VF req validation 715 */ 716 vf->num_queue_pairs = total_queue_pairs; 717 718 /* VF is now completely initialized */ 719 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 720 721 error_alloc: 722 if (ret) 723 i40e_free_vf_res(vf); 724 725 return ret; 726 } 727 728 #define VF_DEVICE_STATUS 0xAA 729 #define VF_TRANS_PENDING_MASK 0x20 730 /** 731 * i40e_quiesce_vf_pci 732 * @vf: pointer to the VF structure 733 * 734 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 735 * if the transactions never clear. 736 **/ 737 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 738 { 739 struct i40e_pf *pf = vf->pf; 740 struct i40e_hw *hw = &pf->hw; 741 int vf_abs_id, i; 742 u32 reg; 743 744 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 745 746 wr32(hw, I40E_PF_PCI_CIAA, 747 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 748 for (i = 0; i < 100; i++) { 749 reg = rd32(hw, I40E_PF_PCI_CIAD); 750 if ((reg & VF_TRANS_PENDING_MASK) == 0) 751 return 0; 752 udelay(1); 753 } 754 return -EIO; 755 } 756 757 /** 758 * i40e_reset_vf 759 * @vf: pointer to the VF structure 760 * @flr: VFLR was issued or not 761 * 762 * reset the VF 763 **/ 764 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 765 { 766 struct i40e_pf *pf = vf->pf; 767 struct i40e_hw *hw = &pf->hw; 768 bool rsd = false; 769 int i; 770 u32 reg; 771 772 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 773 return; 774 775 /* warn the VF */ 776 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 777 778 /* In the case of a VFLR, the HW has already reset the VF and we 779 * just need to clean up, so don't hit the VFRTRIG register. 780 */ 781 if (!flr) { 782 /* reset VF using VPGEN_VFRTRIG reg */ 783 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 784 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 785 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 786 i40e_flush(hw); 787 } 788 789 if (i40e_quiesce_vf_pci(vf)) 790 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 791 vf->vf_id); 792 793 /* poll VPGEN_VFRSTAT reg to make sure 794 * that reset is complete 795 */ 796 for (i = 0; i < 10; i++) { 797 /* VF reset requires driver to first reset the VF and then 798 * poll the status register to make sure that the reset 799 * completed successfully. Due to internal HW FIFO flushes, 800 * we must wait 10ms before the register will be valid. 801 */ 802 usleep_range(10000, 20000); 803 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 804 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 805 rsd = true; 806 break; 807 } 808 } 809 810 if (flr) 811 usleep_range(10000, 20000); 812 813 if (!rsd) 814 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 815 vf->vf_id); 816 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 817 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 818 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 819 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 820 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 821 822 /* On initial reset, we won't have any queues */ 823 if (vf->lan_vsi_idx == 0) 824 goto complete_reset; 825 826 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); 827 complete_reset: 828 /* reallocate VF resources to reset the VSI state */ 829 i40e_free_vf_res(vf); 830 i40e_alloc_vf_res(vf); 831 i40e_enable_vf_mappings(vf); 832 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 833 clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 834 835 /* tell the VF the reset is done */ 836 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 837 i40e_flush(hw); 838 clear_bit(__I40E_VF_DISABLE, &pf->state); 839 } 840 841 /** 842 * i40e_free_vfs 843 * @pf: pointer to the PF structure 844 * 845 * free VF resources 846 **/ 847 void i40e_free_vfs(struct i40e_pf *pf) 848 { 849 struct i40e_hw *hw = &pf->hw; 850 u32 reg_idx, bit_idx; 851 int i, tmp, vf_id; 852 853 if (!pf->vf) 854 return; 855 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 856 usleep_range(1000, 2000); 857 858 for (i = 0; i < pf->num_alloc_vfs; i++) 859 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 860 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], 861 false); 862 863 /* Disable IOV before freeing resources. This lets any VF drivers 864 * running in the host get themselves cleaned up before we yank 865 * the carpet out from underneath their feet. 866 */ 867 if (!pci_vfs_assigned(pf->pdev)) 868 pci_disable_sriov(pf->pdev); 869 else 870 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 871 872 msleep(20); /* let any messages in transit get finished up */ 873 874 /* free up VF resources */ 875 tmp = pf->num_alloc_vfs; 876 pf->num_alloc_vfs = 0; 877 for (i = 0; i < tmp; i++) { 878 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 879 i40e_free_vf_res(&pf->vf[i]); 880 /* disable qp mappings */ 881 i40e_disable_vf_mappings(&pf->vf[i]); 882 } 883 884 kfree(pf->vf); 885 pf->vf = NULL; 886 887 /* This check is for when the driver is unloaded while VFs are 888 * assigned. Setting the number of VFs to 0 through sysfs is caught 889 * before this function ever gets called. 890 */ 891 if (!pci_vfs_assigned(pf->pdev)) { 892 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 893 * work correctly when SR-IOV gets re-enabled. 894 */ 895 for (vf_id = 0; vf_id < tmp; vf_id++) { 896 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 897 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 898 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 899 } 900 } 901 clear_bit(__I40E_VF_DISABLE, &pf->state); 902 } 903 904 #ifdef CONFIG_PCI_IOV 905 /** 906 * i40e_alloc_vfs 907 * @pf: pointer to the PF structure 908 * @num_alloc_vfs: number of VFs to allocate 909 * 910 * allocate VF resources 911 **/ 912 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 913 { 914 struct i40e_vf *vfs; 915 int i, ret = 0; 916 917 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 918 i40e_irq_dynamic_disable_icr0(pf); 919 920 /* Check to see if we're just allocating resources for extant VFs */ 921 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 922 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 923 if (ret) { 924 dev_err(&pf->pdev->dev, 925 "Failed to enable SR-IOV, error %d.\n", ret); 926 pf->num_alloc_vfs = 0; 927 goto err_iov; 928 } 929 } 930 /* allocate memory */ 931 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 932 if (!vfs) { 933 ret = -ENOMEM; 934 goto err_alloc; 935 } 936 pf->vf = vfs; 937 938 /* apply default profile */ 939 for (i = 0; i < num_alloc_vfs; i++) { 940 vfs[i].pf = pf; 941 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 942 vfs[i].vf_id = i; 943 944 /* assign default capabilities */ 945 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 946 vfs[i].spoofchk = true; 947 /* VF resources get allocated during reset */ 948 i40e_reset_vf(&vfs[i], false); 949 950 /* enable VF vplan_qtable mappings */ 951 i40e_enable_vf_mappings(&vfs[i]); 952 } 953 pf->num_alloc_vfs = num_alloc_vfs; 954 955 err_alloc: 956 if (ret) 957 i40e_free_vfs(pf); 958 err_iov: 959 /* Re-enable interrupt 0. */ 960 i40e_irq_dynamic_enable_icr0(pf); 961 return ret; 962 } 963 964 #endif 965 /** 966 * i40e_pci_sriov_enable 967 * @pdev: pointer to a pci_dev structure 968 * @num_vfs: number of VFs to allocate 969 * 970 * Enable or change the number of VFs 971 **/ 972 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 973 { 974 #ifdef CONFIG_PCI_IOV 975 struct i40e_pf *pf = pci_get_drvdata(pdev); 976 int pre_existing_vfs = pci_num_vf(pdev); 977 int err = 0; 978 979 if (pf->state & __I40E_TESTING) { 980 dev_warn(&pdev->dev, 981 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 982 err = -EPERM; 983 goto err_out; 984 } 985 986 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 987 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 988 i40e_free_vfs(pf); 989 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 990 goto out; 991 992 if (num_vfs > pf->num_req_vfs) { 993 err = -EPERM; 994 goto err_out; 995 } 996 997 err = i40e_alloc_vfs(pf, num_vfs); 998 if (err) { 999 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1000 goto err_out; 1001 } 1002 1003 out: 1004 return num_vfs; 1005 1006 err_out: 1007 return err; 1008 #endif 1009 return 0; 1010 } 1011 1012 /** 1013 * i40e_pci_sriov_configure 1014 * @pdev: pointer to a pci_dev structure 1015 * @num_vfs: number of VFs to allocate 1016 * 1017 * Enable or change the number of VFs. Called when the user updates the number 1018 * of VFs in sysfs. 1019 **/ 1020 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1021 { 1022 struct i40e_pf *pf = pci_get_drvdata(pdev); 1023 1024 if (num_vfs) { 1025 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1026 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1027 i40e_do_reset_safe(pf, 1028 BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1029 } 1030 return i40e_pci_sriov_enable(pdev, num_vfs); 1031 } 1032 1033 if (!pci_vfs_assigned(pf->pdev)) { 1034 i40e_free_vfs(pf); 1035 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1036 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1037 } else { 1038 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1039 return -EINVAL; 1040 } 1041 return 0; 1042 } 1043 1044 /***********************virtual channel routines******************/ 1045 1046 /** 1047 * i40e_vc_send_msg_to_vf 1048 * @vf: pointer to the VF info 1049 * @v_opcode: virtual channel opcode 1050 * @v_retval: virtual channel return value 1051 * @msg: pointer to the msg buffer 1052 * @msglen: msg length 1053 * 1054 * send msg to VF 1055 **/ 1056 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1057 u32 v_retval, u8 *msg, u16 msglen) 1058 { 1059 struct i40e_pf *pf; 1060 struct i40e_hw *hw; 1061 int abs_vf_id; 1062 i40e_status aq_ret; 1063 1064 /* validate the request */ 1065 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1066 return -EINVAL; 1067 1068 pf = vf->pf; 1069 hw = &pf->hw; 1070 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1071 1072 /* single place to detect unsuccessful return values */ 1073 if (v_retval) { 1074 vf->num_invalid_msgs++; 1075 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", 1076 v_opcode, v_retval); 1077 if (vf->num_invalid_msgs > 1078 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1079 dev_err(&pf->pdev->dev, 1080 "Number of invalid messages exceeded for VF %d\n", 1081 vf->vf_id); 1082 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1083 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 1084 } 1085 } else { 1086 vf->num_valid_msgs++; 1087 } 1088 1089 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1090 msg, msglen, NULL); 1091 if (aq_ret) { 1092 dev_err(&pf->pdev->dev, 1093 "Unable to send the message to VF %d aq_err %d\n", 1094 vf->vf_id, pf->hw.aq.asq_last_status); 1095 return -EIO; 1096 } 1097 1098 return 0; 1099 } 1100 1101 /** 1102 * i40e_vc_send_resp_to_vf 1103 * @vf: pointer to the VF info 1104 * @opcode: operation code 1105 * @retval: return value 1106 * 1107 * send resp msg to VF 1108 **/ 1109 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1110 enum i40e_virtchnl_ops opcode, 1111 i40e_status retval) 1112 { 1113 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1114 } 1115 1116 /** 1117 * i40e_vc_get_version_msg 1118 * @vf: pointer to the VF info 1119 * 1120 * called from the VF to request the API version used by the PF 1121 **/ 1122 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1123 { 1124 struct i40e_virtchnl_version_info info = { 1125 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 1126 }; 1127 1128 vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; 1129 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1130 if (VF_IS_V10(vf)) 1131 info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1132 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 1133 I40E_SUCCESS, (u8 *)&info, 1134 sizeof(struct 1135 i40e_virtchnl_version_info)); 1136 } 1137 1138 /** 1139 * i40e_vc_get_vf_resources_msg 1140 * @vf: pointer to the VF info 1141 * @msg: pointer to the msg buffer 1142 * @msglen: msg length 1143 * 1144 * called from the VF to request its resources 1145 **/ 1146 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1147 { 1148 struct i40e_virtchnl_vf_resource *vfres = NULL; 1149 struct i40e_pf *pf = vf->pf; 1150 i40e_status aq_ret = 0; 1151 struct i40e_vsi *vsi; 1152 int i = 0, len = 0; 1153 int num_vsis = 1; 1154 int ret; 1155 1156 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1157 aq_ret = I40E_ERR_PARAM; 1158 goto err; 1159 } 1160 1161 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1162 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1163 1164 vfres = kzalloc(len, GFP_KERNEL); 1165 if (!vfres) { 1166 aq_ret = I40E_ERR_NO_MEMORY; 1167 len = 0; 1168 goto err; 1169 } 1170 if (VF_IS_V11(vf)) 1171 vf->driver_caps = *(u32 *)msg; 1172 else 1173 vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | 1174 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | 1175 I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1176 1177 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1178 vsi = pf->vsi[vf->lan_vsi_idx]; 1179 if (!vsi->info.pvid) 1180 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN | 1181 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; 1182 1183 vfres->num_vsis = num_vsis; 1184 vfres->num_queue_pairs = vf->num_queue_pairs; 1185 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1186 if (vf->lan_vsi_idx) { 1187 vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; 1188 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1189 vfres->vsi_res[i].num_queue_pairs = 1190 pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1191 memcpy(vfres->vsi_res[i].default_mac_addr, 1192 vf->default_lan_addr.addr, ETH_ALEN); 1193 i++; 1194 } 1195 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1196 1197 err: 1198 /* send the response back to the VF */ 1199 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1200 aq_ret, (u8 *)vfres, len); 1201 1202 kfree(vfres); 1203 return ret; 1204 } 1205 1206 /** 1207 * i40e_vc_reset_vf_msg 1208 * @vf: pointer to the VF info 1209 * @msg: pointer to the msg buffer 1210 * @msglen: msg length 1211 * 1212 * called from the VF to reset itself, 1213 * unlike other virtchnl messages, PF driver 1214 * doesn't send the response back to the VF 1215 **/ 1216 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1217 { 1218 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1219 i40e_reset_vf(vf, false); 1220 } 1221 1222 /** 1223 * i40e_vc_config_promiscuous_mode_msg 1224 * @vf: pointer to the VF info 1225 * @msg: pointer to the msg buffer 1226 * @msglen: msg length 1227 * 1228 * called from the VF to configure the promiscuous mode of 1229 * VF vsis 1230 **/ 1231 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1232 u8 *msg, u16 msglen) 1233 { 1234 struct i40e_virtchnl_promisc_info *info = 1235 (struct i40e_virtchnl_promisc_info *)msg; 1236 struct i40e_pf *pf = vf->pf; 1237 struct i40e_hw *hw = &pf->hw; 1238 struct i40e_vsi *vsi; 1239 bool allmulti = false; 1240 i40e_status aq_ret; 1241 1242 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); 1243 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1244 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1245 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1246 (vsi->type != I40E_VSI_FCOE)) { 1247 aq_ret = I40E_ERR_PARAM; 1248 goto error_param; 1249 } 1250 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1251 allmulti = true; 1252 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1253 allmulti, NULL); 1254 1255 error_param: 1256 /* send the response to the VF */ 1257 return i40e_vc_send_resp_to_vf(vf, 1258 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1259 aq_ret); 1260 } 1261 1262 /** 1263 * i40e_vc_config_queues_msg 1264 * @vf: pointer to the VF info 1265 * @msg: pointer to the msg buffer 1266 * @msglen: msg length 1267 * 1268 * called from the VF to configure the rx/tx 1269 * queues 1270 **/ 1271 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1272 { 1273 struct i40e_virtchnl_vsi_queue_config_info *qci = 1274 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1275 struct i40e_virtchnl_queue_pair_info *qpi; 1276 struct i40e_pf *pf = vf->pf; 1277 u16 vsi_id, vsi_queue_id; 1278 i40e_status aq_ret = 0; 1279 int i; 1280 1281 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1282 aq_ret = I40E_ERR_PARAM; 1283 goto error_param; 1284 } 1285 1286 vsi_id = qci->vsi_id; 1287 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1288 aq_ret = I40E_ERR_PARAM; 1289 goto error_param; 1290 } 1291 for (i = 0; i < qci->num_queue_pairs; i++) { 1292 qpi = &qci->qpair[i]; 1293 vsi_queue_id = qpi->txq.queue_id; 1294 if ((qpi->txq.vsi_id != vsi_id) || 1295 (qpi->rxq.vsi_id != vsi_id) || 1296 (qpi->rxq.queue_id != vsi_queue_id) || 1297 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1298 aq_ret = I40E_ERR_PARAM; 1299 goto error_param; 1300 } 1301 1302 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1303 &qpi->rxq) || 1304 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1305 &qpi->txq)) { 1306 aq_ret = I40E_ERR_PARAM; 1307 goto error_param; 1308 } 1309 } 1310 /* set vsi num_queue_pairs in use to num configured by VF */ 1311 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1312 1313 error_param: 1314 /* send the response to the VF */ 1315 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1316 aq_ret); 1317 } 1318 1319 /** 1320 * i40e_vc_config_irq_map_msg 1321 * @vf: pointer to the VF info 1322 * @msg: pointer to the msg buffer 1323 * @msglen: msg length 1324 * 1325 * called from the VF to configure the irq to 1326 * queue map 1327 **/ 1328 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1329 { 1330 struct i40e_virtchnl_irq_map_info *irqmap_info = 1331 (struct i40e_virtchnl_irq_map_info *)msg; 1332 struct i40e_virtchnl_vector_map *map; 1333 u16 vsi_id, vsi_queue_id, vector_id; 1334 i40e_status aq_ret = 0; 1335 unsigned long tempmap; 1336 int i; 1337 1338 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1339 aq_ret = I40E_ERR_PARAM; 1340 goto error_param; 1341 } 1342 1343 for (i = 0; i < irqmap_info->num_vectors; i++) { 1344 map = &irqmap_info->vecmap[i]; 1345 1346 vector_id = map->vector_id; 1347 vsi_id = map->vsi_id; 1348 /* validate msg params */ 1349 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1350 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1351 aq_ret = I40E_ERR_PARAM; 1352 goto error_param; 1353 } 1354 1355 /* lookout for the invalid queue index */ 1356 tempmap = map->rxq_map; 1357 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1358 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1359 vsi_queue_id)) { 1360 aq_ret = I40E_ERR_PARAM; 1361 goto error_param; 1362 } 1363 } 1364 1365 tempmap = map->txq_map; 1366 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1367 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1368 vsi_queue_id)) { 1369 aq_ret = I40E_ERR_PARAM; 1370 goto error_param; 1371 } 1372 } 1373 1374 i40e_config_irq_link_list(vf, vsi_id, map); 1375 } 1376 error_param: 1377 /* send the response to the VF */ 1378 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1379 aq_ret); 1380 } 1381 1382 /** 1383 * i40e_vc_enable_queues_msg 1384 * @vf: pointer to the VF info 1385 * @msg: pointer to the msg buffer 1386 * @msglen: msg length 1387 * 1388 * called from the VF to enable all or specific queue(s) 1389 **/ 1390 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1391 { 1392 struct i40e_virtchnl_queue_select *vqs = 1393 (struct i40e_virtchnl_queue_select *)msg; 1394 struct i40e_pf *pf = vf->pf; 1395 u16 vsi_id = vqs->vsi_id; 1396 i40e_status aq_ret = 0; 1397 1398 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1399 aq_ret = I40E_ERR_PARAM; 1400 goto error_param; 1401 } 1402 1403 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1404 aq_ret = I40E_ERR_PARAM; 1405 goto error_param; 1406 } 1407 1408 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1409 aq_ret = I40E_ERR_PARAM; 1410 goto error_param; 1411 } 1412 1413 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) 1414 aq_ret = I40E_ERR_TIMEOUT; 1415 error_param: 1416 /* send the response to the VF */ 1417 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1418 aq_ret); 1419 } 1420 1421 /** 1422 * i40e_vc_disable_queues_msg 1423 * @vf: pointer to the VF info 1424 * @msg: pointer to the msg buffer 1425 * @msglen: msg length 1426 * 1427 * called from the VF to disable all or specific 1428 * queue(s) 1429 **/ 1430 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1431 { 1432 struct i40e_virtchnl_queue_select *vqs = 1433 (struct i40e_virtchnl_queue_select *)msg; 1434 struct i40e_pf *pf = vf->pf; 1435 i40e_status aq_ret = 0; 1436 1437 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1438 aq_ret = I40E_ERR_PARAM; 1439 goto error_param; 1440 } 1441 1442 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1443 aq_ret = I40E_ERR_PARAM; 1444 goto error_param; 1445 } 1446 1447 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1448 aq_ret = I40E_ERR_PARAM; 1449 goto error_param; 1450 } 1451 1452 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) 1453 aq_ret = I40E_ERR_TIMEOUT; 1454 1455 error_param: 1456 /* send the response to the VF */ 1457 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1458 aq_ret); 1459 } 1460 1461 /** 1462 * i40e_vc_get_stats_msg 1463 * @vf: pointer to the VF info 1464 * @msg: pointer to the msg buffer 1465 * @msglen: msg length 1466 * 1467 * called from the VF to get vsi stats 1468 **/ 1469 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1470 { 1471 struct i40e_virtchnl_queue_select *vqs = 1472 (struct i40e_virtchnl_queue_select *)msg; 1473 struct i40e_pf *pf = vf->pf; 1474 struct i40e_eth_stats stats; 1475 i40e_status aq_ret = 0; 1476 struct i40e_vsi *vsi; 1477 1478 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1479 1480 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1481 aq_ret = I40E_ERR_PARAM; 1482 goto error_param; 1483 } 1484 1485 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1486 aq_ret = I40E_ERR_PARAM; 1487 goto error_param; 1488 } 1489 1490 vsi = pf->vsi[vf->lan_vsi_idx]; 1491 if (!vsi) { 1492 aq_ret = I40E_ERR_PARAM; 1493 goto error_param; 1494 } 1495 i40e_update_eth_stats(vsi); 1496 stats = vsi->eth_stats; 1497 1498 error_param: 1499 /* send the response back to the VF */ 1500 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1501 (u8 *)&stats, sizeof(stats)); 1502 } 1503 1504 /** 1505 * i40e_check_vf_permission 1506 * @vf: pointer to the VF info 1507 * @macaddr: pointer to the MAC Address being checked 1508 * 1509 * Check if the VF has permission to add or delete unicast MAC address 1510 * filters and return error code -EPERM if not. Then check if the 1511 * address filter requested is broadcast or zero and if so return 1512 * an invalid MAC address error code. 1513 **/ 1514 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1515 { 1516 struct i40e_pf *pf = vf->pf; 1517 int ret = 0; 1518 1519 if (is_broadcast_ether_addr(macaddr) || 1520 is_zero_ether_addr(macaddr)) { 1521 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1522 ret = I40E_ERR_INVALID_MAC_ADDR; 1523 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1524 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1525 /* If the host VMM administrator has set the VF MAC address 1526 * administratively via the ndo_set_vf_mac command then deny 1527 * permission to the VF to add or delete unicast MAC addresses. 1528 * The VF may request to set the MAC address filter already 1529 * assigned to it so do not return an error in that case. 1530 */ 1531 dev_err(&pf->pdev->dev, 1532 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); 1533 ret = -EPERM; 1534 } 1535 return ret; 1536 } 1537 1538 /** 1539 * i40e_vc_add_mac_addr_msg 1540 * @vf: pointer to the VF info 1541 * @msg: pointer to the msg buffer 1542 * @msglen: msg length 1543 * 1544 * add guest mac address filter 1545 **/ 1546 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1547 { 1548 struct i40e_virtchnl_ether_addr_list *al = 1549 (struct i40e_virtchnl_ether_addr_list *)msg; 1550 struct i40e_pf *pf = vf->pf; 1551 struct i40e_vsi *vsi = NULL; 1552 u16 vsi_id = al->vsi_id; 1553 i40e_status ret = 0; 1554 int i; 1555 1556 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1557 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1558 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1559 ret = I40E_ERR_PARAM; 1560 goto error_param; 1561 } 1562 1563 for (i = 0; i < al->num_elements; i++) { 1564 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1565 if (ret) 1566 goto error_param; 1567 } 1568 vsi = pf->vsi[vf->lan_vsi_idx]; 1569 1570 /* add new addresses to the list */ 1571 for (i = 0; i < al->num_elements; i++) { 1572 struct i40e_mac_filter *f; 1573 1574 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1575 if (!f) { 1576 if (i40e_is_vsi_in_vlan(vsi)) 1577 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1578 true, false); 1579 else 1580 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1581 true, false); 1582 } 1583 1584 if (!f) { 1585 dev_err(&pf->pdev->dev, 1586 "Unable to add VF MAC filter\n"); 1587 ret = I40E_ERR_PARAM; 1588 goto error_param; 1589 } 1590 } 1591 1592 /* program the updated filter list */ 1593 if (i40e_sync_vsi_filters(vsi)) 1594 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1595 1596 error_param: 1597 /* send the response to the VF */ 1598 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1599 ret); 1600 } 1601 1602 /** 1603 * i40e_vc_del_mac_addr_msg 1604 * @vf: pointer to the VF info 1605 * @msg: pointer to the msg buffer 1606 * @msglen: msg length 1607 * 1608 * remove guest mac address filter 1609 **/ 1610 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1611 { 1612 struct i40e_virtchnl_ether_addr_list *al = 1613 (struct i40e_virtchnl_ether_addr_list *)msg; 1614 struct i40e_pf *pf = vf->pf; 1615 struct i40e_vsi *vsi = NULL; 1616 u16 vsi_id = al->vsi_id; 1617 i40e_status ret = 0; 1618 int i; 1619 1620 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1621 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1622 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1623 ret = I40E_ERR_PARAM; 1624 goto error_param; 1625 } 1626 1627 for (i = 0; i < al->num_elements; i++) { 1628 if (is_broadcast_ether_addr(al->list[i].addr) || 1629 is_zero_ether_addr(al->list[i].addr)) { 1630 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 1631 al->list[i].addr); 1632 ret = I40E_ERR_INVALID_MAC_ADDR; 1633 goto error_param; 1634 } 1635 } 1636 vsi = pf->vsi[vf->lan_vsi_idx]; 1637 1638 /* delete addresses from the list */ 1639 for (i = 0; i < al->num_elements; i++) 1640 i40e_del_filter(vsi, al->list[i].addr, 1641 I40E_VLAN_ANY, true, false); 1642 1643 /* program the updated filter list */ 1644 if (i40e_sync_vsi_filters(vsi)) 1645 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1646 1647 error_param: 1648 /* send the response to the VF */ 1649 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1650 ret); 1651 } 1652 1653 /** 1654 * i40e_vc_add_vlan_msg 1655 * @vf: pointer to the VF info 1656 * @msg: pointer to the msg buffer 1657 * @msglen: msg length 1658 * 1659 * program guest vlan id 1660 **/ 1661 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1662 { 1663 struct i40e_virtchnl_vlan_filter_list *vfl = 1664 (struct i40e_virtchnl_vlan_filter_list *)msg; 1665 struct i40e_pf *pf = vf->pf; 1666 struct i40e_vsi *vsi = NULL; 1667 u16 vsi_id = vfl->vsi_id; 1668 i40e_status aq_ret = 0; 1669 int i; 1670 1671 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1672 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1673 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1674 aq_ret = I40E_ERR_PARAM; 1675 goto error_param; 1676 } 1677 1678 for (i = 0; i < vfl->num_elements; i++) { 1679 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1680 aq_ret = I40E_ERR_PARAM; 1681 dev_err(&pf->pdev->dev, 1682 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1683 goto error_param; 1684 } 1685 } 1686 vsi = pf->vsi[vf->lan_vsi_idx]; 1687 if (vsi->info.pvid) { 1688 aq_ret = I40E_ERR_PARAM; 1689 goto error_param; 1690 } 1691 1692 i40e_vlan_stripping_enable(vsi); 1693 for (i = 0; i < vfl->num_elements; i++) { 1694 /* add new VLAN filter */ 1695 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1696 if (ret) 1697 dev_err(&pf->pdev->dev, 1698 "Unable to add VF vlan filter %d, error %d\n", 1699 vfl->vlan_id[i], ret); 1700 } 1701 1702 error_param: 1703 /* send the response to the VF */ 1704 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1705 } 1706 1707 /** 1708 * i40e_vc_remove_vlan_msg 1709 * @vf: pointer to the VF info 1710 * @msg: pointer to the msg buffer 1711 * @msglen: msg length 1712 * 1713 * remove programmed guest vlan id 1714 **/ 1715 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1716 { 1717 struct i40e_virtchnl_vlan_filter_list *vfl = 1718 (struct i40e_virtchnl_vlan_filter_list *)msg; 1719 struct i40e_pf *pf = vf->pf; 1720 struct i40e_vsi *vsi = NULL; 1721 u16 vsi_id = vfl->vsi_id; 1722 i40e_status aq_ret = 0; 1723 int i; 1724 1725 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1726 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1727 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1728 aq_ret = I40E_ERR_PARAM; 1729 goto error_param; 1730 } 1731 1732 for (i = 0; i < vfl->num_elements; i++) { 1733 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1734 aq_ret = I40E_ERR_PARAM; 1735 goto error_param; 1736 } 1737 } 1738 1739 vsi = pf->vsi[vf->lan_vsi_idx]; 1740 if (vsi->info.pvid) { 1741 aq_ret = I40E_ERR_PARAM; 1742 goto error_param; 1743 } 1744 1745 for (i = 0; i < vfl->num_elements; i++) { 1746 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1747 if (ret) 1748 dev_err(&pf->pdev->dev, 1749 "Unable to delete VF vlan filter %d, error %d\n", 1750 vfl->vlan_id[i], ret); 1751 } 1752 1753 error_param: 1754 /* send the response to the VF */ 1755 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1756 } 1757 1758 /** 1759 * i40e_vc_validate_vf_msg 1760 * @vf: pointer to the VF info 1761 * @msg: pointer to the msg buffer 1762 * @msglen: msg length 1763 * @msghndl: msg handle 1764 * 1765 * validate msg 1766 **/ 1767 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1768 u32 v_retval, u8 *msg, u16 msglen) 1769 { 1770 bool err_msg_format = false; 1771 int valid_len; 1772 1773 /* Check if VF is disabled. */ 1774 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1775 return I40E_ERR_PARAM; 1776 1777 /* Validate message length. */ 1778 switch (v_opcode) { 1779 case I40E_VIRTCHNL_OP_VERSION: 1780 valid_len = sizeof(struct i40e_virtchnl_version_info); 1781 break; 1782 case I40E_VIRTCHNL_OP_RESET_VF: 1783 valid_len = 0; 1784 break; 1785 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1786 if (VF_IS_V11(vf)) 1787 valid_len = sizeof(u32); 1788 else 1789 valid_len = 0; 1790 break; 1791 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1792 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1793 break; 1794 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1795 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1796 break; 1797 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1798 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1799 if (msglen >= valid_len) { 1800 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1801 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1802 valid_len += (vqc->num_queue_pairs * 1803 sizeof(struct 1804 i40e_virtchnl_queue_pair_info)); 1805 if (vqc->num_queue_pairs == 0) 1806 err_msg_format = true; 1807 } 1808 break; 1809 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1810 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1811 if (msglen >= valid_len) { 1812 struct i40e_virtchnl_irq_map_info *vimi = 1813 (struct i40e_virtchnl_irq_map_info *)msg; 1814 valid_len += (vimi->num_vectors * 1815 sizeof(struct i40e_virtchnl_vector_map)); 1816 if (vimi->num_vectors == 0) 1817 err_msg_format = true; 1818 } 1819 break; 1820 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1821 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1822 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1823 break; 1824 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1825 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1826 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1827 if (msglen >= valid_len) { 1828 struct i40e_virtchnl_ether_addr_list *veal = 1829 (struct i40e_virtchnl_ether_addr_list *)msg; 1830 valid_len += veal->num_elements * 1831 sizeof(struct i40e_virtchnl_ether_addr); 1832 if (veal->num_elements == 0) 1833 err_msg_format = true; 1834 } 1835 break; 1836 case I40E_VIRTCHNL_OP_ADD_VLAN: 1837 case I40E_VIRTCHNL_OP_DEL_VLAN: 1838 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1839 if (msglen >= valid_len) { 1840 struct i40e_virtchnl_vlan_filter_list *vfl = 1841 (struct i40e_virtchnl_vlan_filter_list *)msg; 1842 valid_len += vfl->num_elements * sizeof(u16); 1843 if (vfl->num_elements == 0) 1844 err_msg_format = true; 1845 } 1846 break; 1847 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1848 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1849 break; 1850 case I40E_VIRTCHNL_OP_GET_STATS: 1851 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1852 break; 1853 /* These are always errors coming from the VF. */ 1854 case I40E_VIRTCHNL_OP_EVENT: 1855 case I40E_VIRTCHNL_OP_UNKNOWN: 1856 default: 1857 return -EPERM; 1858 break; 1859 } 1860 /* few more checks */ 1861 if ((valid_len != msglen) || (err_msg_format)) { 1862 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1863 return -EINVAL; 1864 } else { 1865 return 0; 1866 } 1867 } 1868 1869 /** 1870 * i40e_vc_process_vf_msg 1871 * @pf: pointer to the PF structure 1872 * @vf_id: source VF id 1873 * @msg: pointer to the msg buffer 1874 * @msglen: msg length 1875 * @msghndl: msg handle 1876 * 1877 * called from the common aeq/arq handler to 1878 * process request from VF 1879 **/ 1880 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1881 u32 v_retval, u8 *msg, u16 msglen) 1882 { 1883 struct i40e_hw *hw = &pf->hw; 1884 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1885 struct i40e_vf *vf; 1886 int ret; 1887 1888 pf->vf_aq_requests++; 1889 if (local_vf_id >= pf->num_alloc_vfs) 1890 return -EINVAL; 1891 vf = &(pf->vf[local_vf_id]); 1892 /* perform basic checks on the msg */ 1893 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1894 1895 if (ret) { 1896 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 1897 local_vf_id, v_opcode, msglen); 1898 return ret; 1899 } 1900 1901 switch (v_opcode) { 1902 case I40E_VIRTCHNL_OP_VERSION: 1903 ret = i40e_vc_get_version_msg(vf, msg); 1904 break; 1905 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1906 ret = i40e_vc_get_vf_resources_msg(vf, msg); 1907 break; 1908 case I40E_VIRTCHNL_OP_RESET_VF: 1909 i40e_vc_reset_vf_msg(vf); 1910 ret = 0; 1911 break; 1912 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1913 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1914 break; 1915 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1916 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1917 break; 1918 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1919 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1920 break; 1921 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1922 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1923 i40e_vc_notify_vf_link_state(vf); 1924 break; 1925 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1926 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1927 break; 1928 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1929 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1930 break; 1931 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1932 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1933 break; 1934 case I40E_VIRTCHNL_OP_ADD_VLAN: 1935 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1936 break; 1937 case I40E_VIRTCHNL_OP_DEL_VLAN: 1938 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1939 break; 1940 case I40E_VIRTCHNL_OP_GET_STATS: 1941 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1942 break; 1943 case I40E_VIRTCHNL_OP_UNKNOWN: 1944 default: 1945 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 1946 v_opcode, local_vf_id); 1947 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 1948 I40E_ERR_NOT_IMPLEMENTED); 1949 break; 1950 } 1951 1952 return ret; 1953 } 1954 1955 /** 1956 * i40e_vc_process_vflr_event 1957 * @pf: pointer to the PF structure 1958 * 1959 * called from the vlfr irq handler to 1960 * free up VF resources and state variables 1961 **/ 1962 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 1963 { 1964 u32 reg, reg_idx, bit_idx, vf_id; 1965 struct i40e_hw *hw = &pf->hw; 1966 struct i40e_vf *vf; 1967 1968 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 1969 return 0; 1970 1971 /* re-enable vflr interrupt cause */ 1972 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1973 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 1974 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1975 i40e_flush(hw); 1976 1977 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 1978 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 1979 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1980 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1981 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1982 vf = &pf->vf[vf_id]; 1983 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 1984 if (reg & BIT(bit_idx)) { 1985 /* clear the bit in GLGEN_VFLRSTAT */ 1986 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1987 1988 if (!test_bit(__I40E_DOWN, &pf->state)) 1989 i40e_reset_vf(vf, true); 1990 } 1991 } 1992 1993 return 0; 1994 } 1995 1996 /** 1997 * i40e_ndo_set_vf_mac 1998 * @netdev: network interface device structure 1999 * @vf_id: VF identifier 2000 * @mac: mac address 2001 * 2002 * program VF mac address 2003 **/ 2004 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2005 { 2006 struct i40e_netdev_priv *np = netdev_priv(netdev); 2007 struct i40e_vsi *vsi = np->vsi; 2008 struct i40e_pf *pf = vsi->back; 2009 struct i40e_mac_filter *f; 2010 struct i40e_vf *vf; 2011 int ret = 0; 2012 2013 /* validate the request */ 2014 if (vf_id >= pf->num_alloc_vfs) { 2015 dev_err(&pf->pdev->dev, 2016 "Invalid VF Identifier %d\n", vf_id); 2017 ret = -EINVAL; 2018 goto error_param; 2019 } 2020 2021 vf = &(pf->vf[vf_id]); 2022 vsi = pf->vsi[vf->lan_vsi_idx]; 2023 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2024 dev_err(&pf->pdev->dev, 2025 "Uninitialized VF %d\n", vf_id); 2026 ret = -EINVAL; 2027 goto error_param; 2028 } 2029 2030 if (!is_valid_ether_addr(mac)) { 2031 dev_err(&pf->pdev->dev, 2032 "Invalid VF ethernet address\n"); 2033 ret = -EINVAL; 2034 goto error_param; 2035 } 2036 2037 /* delete the temporary mac address */ 2038 i40e_del_filter(vsi, vf->default_lan_addr.addr, 2039 vf->port_vlan_id ? vf->port_vlan_id : -1, 2040 true, false); 2041 2042 /* Delete all the filters for this VSI - we're going to kill it 2043 * anyway. 2044 */ 2045 list_for_each_entry(f, &vsi->mac_filter_list, list) 2046 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); 2047 2048 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2049 /* program mac filter */ 2050 if (i40e_sync_vsi_filters(vsi)) { 2051 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2052 ret = -EIO; 2053 goto error_param; 2054 } 2055 ether_addr_copy(vf->default_lan_addr.addr, mac); 2056 vf->pf_set_mac = true; 2057 /* Force the VF driver stop so it has to reload with new MAC address */ 2058 i40e_vc_disable_vf(pf, vf); 2059 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2060 2061 error_param: 2062 return ret; 2063 } 2064 2065 /** 2066 * i40e_ndo_set_vf_port_vlan 2067 * @netdev: network interface device structure 2068 * @vf_id: VF identifier 2069 * @vlan_id: mac address 2070 * @qos: priority setting 2071 * 2072 * program VF vlan id and/or qos 2073 **/ 2074 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2075 int vf_id, u16 vlan_id, u8 qos) 2076 { 2077 struct i40e_netdev_priv *np = netdev_priv(netdev); 2078 struct i40e_pf *pf = np->vsi->back; 2079 struct i40e_vsi *vsi; 2080 struct i40e_vf *vf; 2081 int ret = 0; 2082 2083 /* validate the request */ 2084 if (vf_id >= pf->num_alloc_vfs) { 2085 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2086 ret = -EINVAL; 2087 goto error_pvid; 2088 } 2089 2090 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2091 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2092 ret = -EINVAL; 2093 goto error_pvid; 2094 } 2095 2096 vf = &(pf->vf[vf_id]); 2097 vsi = pf->vsi[vf->lan_vsi_idx]; 2098 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2099 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2100 ret = -EINVAL; 2101 goto error_pvid; 2102 } 2103 2104 if (vsi->info.pvid == (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT))) 2105 /* duplicate request, so just return success */ 2106 goto error_pvid; 2107 2108 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { 2109 dev_err(&pf->pdev->dev, 2110 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2111 vf_id); 2112 /* Administrator Error - knock the VF offline until he does 2113 * the right thing by reconfiguring his network correctly 2114 * and then reloading the VF driver. 2115 */ 2116 i40e_vc_disable_vf(pf, vf); 2117 } 2118 2119 /* Check for condition where there was already a port VLAN ID 2120 * filter set and now it is being deleted by setting it to zero. 2121 * Additionally check for the condition where there was a port 2122 * VLAN but now there is a new and different port VLAN being set. 2123 * Before deleting all the old VLAN filters we must add new ones 2124 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2125 * MAC addresses deleted. 2126 */ 2127 if ((!(vlan_id || qos) || 2128 (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && 2129 vsi->info.pvid) 2130 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2131 2132 if (vsi->info.pvid) { 2133 /* kill old VLAN */ 2134 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2135 VLAN_VID_MASK)); 2136 if (ret) { 2137 dev_info(&vsi->back->pdev->dev, 2138 "remove VLAN failed, ret=%d, aq_err=%d\n", 2139 ret, pf->hw.aq.asq_last_status); 2140 } 2141 } 2142 if (vlan_id || qos) 2143 ret = i40e_vsi_add_pvid(vsi, 2144 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); 2145 else 2146 i40e_vsi_remove_pvid(vsi); 2147 2148 if (vlan_id) { 2149 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2150 vlan_id, qos, vf_id); 2151 2152 /* add new VLAN filter */ 2153 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2154 if (ret) { 2155 dev_info(&vsi->back->pdev->dev, 2156 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2157 vsi->back->hw.aq.asq_last_status); 2158 goto error_pvid; 2159 } 2160 /* Kill non-vlan MAC filters - ignore error return since 2161 * there might not be any non-vlan MAC filters. 2162 */ 2163 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2164 } 2165 2166 if (ret) { 2167 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2168 goto error_pvid; 2169 } 2170 /* The Port VLAN needs to be saved across resets the same as the 2171 * default LAN MAC address. 2172 */ 2173 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2174 ret = 0; 2175 2176 error_pvid: 2177 return ret; 2178 } 2179 2180 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2181 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2182 /** 2183 * i40e_ndo_set_vf_bw 2184 * @netdev: network interface device structure 2185 * @vf_id: VF identifier 2186 * @tx_rate: Tx rate 2187 * 2188 * configure VF Tx rate 2189 **/ 2190 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 2191 int max_tx_rate) 2192 { 2193 struct i40e_netdev_priv *np = netdev_priv(netdev); 2194 struct i40e_pf *pf = np->vsi->back; 2195 struct i40e_vsi *vsi; 2196 struct i40e_vf *vf; 2197 int speed = 0; 2198 int ret = 0; 2199 2200 /* validate the request */ 2201 if (vf_id >= pf->num_alloc_vfs) { 2202 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 2203 ret = -EINVAL; 2204 goto error; 2205 } 2206 2207 if (min_tx_rate) { 2208 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 2209 min_tx_rate, vf_id); 2210 return -EINVAL; 2211 } 2212 2213 vf = &(pf->vf[vf_id]); 2214 vsi = pf->vsi[vf->lan_vsi_idx]; 2215 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2216 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); 2217 ret = -EINVAL; 2218 goto error; 2219 } 2220 2221 switch (pf->hw.phy.link_info.link_speed) { 2222 case I40E_LINK_SPEED_40GB: 2223 speed = 40000; 2224 break; 2225 case I40E_LINK_SPEED_10GB: 2226 speed = 10000; 2227 break; 2228 case I40E_LINK_SPEED_1GB: 2229 speed = 1000; 2230 break; 2231 default: 2232 break; 2233 } 2234 2235 if (max_tx_rate > speed) { 2236 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", 2237 max_tx_rate, vf->vf_id); 2238 ret = -EINVAL; 2239 goto error; 2240 } 2241 2242 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 2243 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 2244 max_tx_rate = 50; 2245 } 2246 2247 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 2248 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 2249 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 2250 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 2251 if (ret) { 2252 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 2253 ret); 2254 ret = -EIO; 2255 goto error; 2256 } 2257 vf->tx_rate = max_tx_rate; 2258 error: 2259 return ret; 2260 } 2261 2262 /** 2263 * i40e_ndo_get_vf_config 2264 * @netdev: network interface device structure 2265 * @vf_id: VF identifier 2266 * @ivi: VF configuration structure 2267 * 2268 * return VF configuration 2269 **/ 2270 int i40e_ndo_get_vf_config(struct net_device *netdev, 2271 int vf_id, struct ifla_vf_info *ivi) 2272 { 2273 struct i40e_netdev_priv *np = netdev_priv(netdev); 2274 struct i40e_vsi *vsi = np->vsi; 2275 struct i40e_pf *pf = vsi->back; 2276 struct i40e_vf *vf; 2277 int ret = 0; 2278 2279 /* validate the request */ 2280 if (vf_id >= pf->num_alloc_vfs) { 2281 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2282 ret = -EINVAL; 2283 goto error_param; 2284 } 2285 2286 vf = &(pf->vf[vf_id]); 2287 /* first vsi is always the LAN vsi */ 2288 vsi = pf->vsi[vf->lan_vsi_idx]; 2289 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2290 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2291 ret = -EINVAL; 2292 goto error_param; 2293 } 2294 2295 ivi->vf = vf_id; 2296 2297 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2298 2299 ivi->max_tx_rate = vf->tx_rate; 2300 ivi->min_tx_rate = 0; 2301 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2302 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2303 I40E_VLAN_PRIORITY_SHIFT; 2304 if (vf->link_forced == false) 2305 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 2306 else if (vf->link_up == true) 2307 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 2308 else 2309 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 2310 ivi->spoofchk = vf->spoofchk; 2311 ret = 0; 2312 2313 error_param: 2314 return ret; 2315 } 2316 2317 /** 2318 * i40e_ndo_set_vf_link_state 2319 * @netdev: network interface device structure 2320 * @vf_id: VF identifier 2321 * @link: required link state 2322 * 2323 * Set the link state of a specified VF, regardless of physical link state 2324 **/ 2325 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 2326 { 2327 struct i40e_netdev_priv *np = netdev_priv(netdev); 2328 struct i40e_pf *pf = np->vsi->back; 2329 struct i40e_virtchnl_pf_event pfe; 2330 struct i40e_hw *hw = &pf->hw; 2331 struct i40e_vf *vf; 2332 int abs_vf_id; 2333 int ret = 0; 2334 2335 /* validate the request */ 2336 if (vf_id >= pf->num_alloc_vfs) { 2337 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2338 ret = -EINVAL; 2339 goto error_out; 2340 } 2341 2342 vf = &pf->vf[vf_id]; 2343 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2344 2345 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2346 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2347 2348 switch (link) { 2349 case IFLA_VF_LINK_STATE_AUTO: 2350 vf->link_forced = false; 2351 pfe.event_data.link_event.link_status = 2352 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 2353 pfe.event_data.link_event.link_speed = 2354 pf->hw.phy.link_info.link_speed; 2355 break; 2356 case IFLA_VF_LINK_STATE_ENABLE: 2357 vf->link_forced = true; 2358 vf->link_up = true; 2359 pfe.event_data.link_event.link_status = true; 2360 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 2361 break; 2362 case IFLA_VF_LINK_STATE_DISABLE: 2363 vf->link_forced = true; 2364 vf->link_up = false; 2365 pfe.event_data.link_event.link_status = false; 2366 pfe.event_data.link_event.link_speed = 0; 2367 break; 2368 default: 2369 ret = -EINVAL; 2370 goto error_out; 2371 } 2372 /* Notify the VF of its new link state */ 2373 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2374 0, (u8 *)&pfe, sizeof(pfe), NULL); 2375 2376 error_out: 2377 return ret; 2378 } 2379 2380 /** 2381 * i40e_ndo_set_vf_spoofchk 2382 * @netdev: network interface device structure 2383 * @vf_id: VF identifier 2384 * @enable: flag to enable or disable feature 2385 * 2386 * Enable or disable VF spoof checking 2387 **/ 2388 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 2389 { 2390 struct i40e_netdev_priv *np = netdev_priv(netdev); 2391 struct i40e_vsi *vsi = np->vsi; 2392 struct i40e_pf *pf = vsi->back; 2393 struct i40e_vsi_context ctxt; 2394 struct i40e_hw *hw = &pf->hw; 2395 struct i40e_vf *vf; 2396 int ret = 0; 2397 2398 /* validate the request */ 2399 if (vf_id >= pf->num_alloc_vfs) { 2400 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2401 ret = -EINVAL; 2402 goto out; 2403 } 2404 2405 vf = &(pf->vf[vf_id]); 2406 2407 if (enable == vf->spoofchk) 2408 goto out; 2409 2410 vf->spoofchk = enable; 2411 memset(&ctxt, 0, sizeof(ctxt)); 2412 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 2413 ctxt.pf_num = pf->hw.pf_id; 2414 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 2415 if (enable) 2416 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 2417 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 2418 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 2419 if (ret) { 2420 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 2421 ret); 2422 ret = -EIO; 2423 } 2424 out: 2425 return ret; 2426 } 2427