1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /***********************misc routines*****************************/ 30 31 /** 32 * i40e_vc_disable_vf 33 * @pf: pointer to the pf info 34 * @vf: pointer to the vf info 35 * 36 * Disable the VF through a SW reset 37 **/ 38 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 39 { 40 struct i40e_hw *hw = &pf->hw; 41 u32 reg; 42 43 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 44 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 45 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 46 i40e_flush(hw); 47 } 48 49 /** 50 * i40e_vc_isvalid_vsi_id 51 * @vf: pointer to the vf info 52 * @vsi_id: vf relative vsi id 53 * 54 * check for the valid vsi id 55 **/ 56 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) 57 { 58 struct i40e_pf *pf = vf->pf; 59 60 return pf->vsi[vsi_id]->vf_id == vf->vf_id; 61 } 62 63 /** 64 * i40e_vc_isvalid_queue_id 65 * @vf: pointer to the vf info 66 * @vsi_id: vsi id 67 * @qid: vsi relative queue id 68 * 69 * check for the valid queue id 70 **/ 71 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, 72 u8 qid) 73 { 74 struct i40e_pf *pf = vf->pf; 75 76 return qid < pf->vsi[vsi_id]->alloc_queue_pairs; 77 } 78 79 /** 80 * i40e_vc_isvalid_vector_id 81 * @vf: pointer to the vf info 82 * @vector_id: vf relative vector id 83 * 84 * check for the valid vector id 85 **/ 86 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 87 { 88 struct i40e_pf *pf = vf->pf; 89 90 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 91 } 92 93 /***********************vf resource mgmt routines*****************/ 94 95 /** 96 * i40e_vc_get_pf_queue_id 97 * @vf: pointer to the vf info 98 * @vsi_idx: index of VSI in PF struct 99 * @vsi_queue_id: vsi relative queue id 100 * 101 * return pf relative queue id 102 **/ 103 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, 104 u8 vsi_queue_id) 105 { 106 struct i40e_pf *pf = vf->pf; 107 struct i40e_vsi *vsi = pf->vsi[vsi_idx]; 108 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 109 110 if (le16_to_cpu(vsi->info.mapping_flags) & 111 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 112 pf_queue_id = 113 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 114 else 115 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 116 vsi_queue_id; 117 118 return pf_queue_id; 119 } 120 121 /** 122 * i40e_config_irq_link_list 123 * @vf: pointer to the vf info 124 * @vsi_idx: index of VSI in PF struct 125 * @vecmap: irq map info 126 * 127 * configure irq link list from the map 128 **/ 129 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, 130 struct i40e_virtchnl_vector_map *vecmap) 131 { 132 unsigned long linklistmap = 0, tempmap; 133 struct i40e_pf *pf = vf->pf; 134 struct i40e_hw *hw = &pf->hw; 135 u16 vsi_queue_id, pf_queue_id; 136 enum i40e_queue_type qtype; 137 u16 next_q, vector_id; 138 u32 reg, reg_idx; 139 u16 itr_idx = 0; 140 141 vector_id = vecmap->vector_id; 142 /* setup the head */ 143 if (0 == vector_id) 144 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 145 else 146 reg_idx = I40E_VPINT_LNKLSTN( 147 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 148 (vector_id - 1)); 149 150 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 151 /* Special case - No queues mapped on this vector */ 152 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 153 goto irq_list_done; 154 } 155 tempmap = vecmap->rxq_map; 156 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 157 linklistmap |= (1 << 158 (I40E_VIRTCHNL_SUPPORTED_QTYPES * 159 vsi_queue_id)); 160 } 161 162 tempmap = vecmap->txq_map; 163 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 164 linklistmap |= (1 << 165 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id 166 + 1)); 167 } 168 169 next_q = find_first_bit(&linklistmap, 170 (I40E_MAX_VSI_QP * 171 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 172 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 173 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 174 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 175 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 176 177 wr32(hw, reg_idx, reg); 178 179 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 180 switch (qtype) { 181 case I40E_QUEUE_TYPE_RX: 182 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 183 itr_idx = vecmap->rxitr_idx; 184 break; 185 case I40E_QUEUE_TYPE_TX: 186 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 187 itr_idx = vecmap->txitr_idx; 188 break; 189 default: 190 break; 191 } 192 193 next_q = find_next_bit(&linklistmap, 194 (I40E_MAX_VSI_QP * 195 I40E_VIRTCHNL_SUPPORTED_QTYPES), 196 next_q + 1); 197 if (next_q < 198 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 199 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 200 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 201 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, 202 vsi_queue_id); 203 } else { 204 pf_queue_id = I40E_QUEUE_END_OF_LIST; 205 qtype = 0; 206 } 207 208 /* format for the RQCTL & TQCTL regs is same */ 209 reg = (vector_id) | 210 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 211 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 212 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 213 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 214 wr32(hw, reg_idx, reg); 215 } 216 217 irq_list_done: 218 i40e_flush(hw); 219 } 220 221 /** 222 * i40e_config_vsi_tx_queue 223 * @vf: pointer to the vf info 224 * @vsi_idx: index of VSI in PF struct 225 * @vsi_queue_id: vsi relative queue index 226 * @info: config. info 227 * 228 * configure tx queue 229 **/ 230 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, 231 u16 vsi_queue_id, 232 struct i40e_virtchnl_txq_info *info) 233 { 234 struct i40e_pf *pf = vf->pf; 235 struct i40e_hw *hw = &pf->hw; 236 struct i40e_hmc_obj_txq tx_ctx; 237 u16 pf_queue_id; 238 u32 qtx_ctl; 239 int ret = 0; 240 241 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 242 243 /* clear the context structure first */ 244 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 245 246 /* only set the required fields */ 247 tx_ctx.base = info->dma_ring_addr / 128; 248 tx_ctx.qlen = info->ring_len; 249 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 250 tx_ctx.rdylist_act = 0; 251 tx_ctx.head_wb_ena = info->headwb_enabled; 252 tx_ctx.head_wb_addr = info->dma_headwb_addr; 253 254 /* clear the context in the HMC */ 255 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 256 if (ret) { 257 dev_err(&pf->pdev->dev, 258 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 259 pf_queue_id, ret); 260 ret = -ENOENT; 261 goto error_context; 262 } 263 264 /* set the context in the HMC */ 265 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 266 if (ret) { 267 dev_err(&pf->pdev->dev, 268 "Failed to set VF LAN Tx queue context %d error: %d\n", 269 pf_queue_id, ret); 270 ret = -ENOENT; 271 goto error_context; 272 } 273 274 /* associate this queue with the PCI VF function */ 275 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 276 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 277 & I40E_QTX_CTL_PF_INDX_MASK); 278 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 279 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 280 & I40E_QTX_CTL_VFVM_INDX_MASK); 281 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 282 i40e_flush(hw); 283 284 error_context: 285 return ret; 286 } 287 288 /** 289 * i40e_config_vsi_rx_queue 290 * @vf: pointer to the vf info 291 * @vsi_idx: index of VSI in PF struct 292 * @vsi_queue_id: vsi relative queue index 293 * @info: config. info 294 * 295 * configure rx queue 296 **/ 297 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, 298 u16 vsi_queue_id, 299 struct i40e_virtchnl_rxq_info *info) 300 { 301 struct i40e_pf *pf = vf->pf; 302 struct i40e_hw *hw = &pf->hw; 303 struct i40e_hmc_obj_rxq rx_ctx; 304 u16 pf_queue_id; 305 int ret = 0; 306 307 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 308 309 /* clear the context structure first */ 310 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 311 312 /* only set the required fields */ 313 rx_ctx.base = info->dma_ring_addr / 128; 314 rx_ctx.qlen = info->ring_len; 315 316 if (info->splithdr_enabled) { 317 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 318 I40E_RX_SPLIT_IP | 319 I40E_RX_SPLIT_TCP_UDP | 320 I40E_RX_SPLIT_SCTP; 321 /* header length validation */ 322 if (info->hdr_size > ((2 * 1024) - 64)) { 323 ret = -EINVAL; 324 goto error_param; 325 } 326 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 327 328 /* set splitalways mode 10b */ 329 rx_ctx.dtype = 0x2; 330 } 331 332 /* databuffer length validation */ 333 if (info->databuffer_size > ((16 * 1024) - 128)) { 334 ret = -EINVAL; 335 goto error_param; 336 } 337 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 338 339 /* max pkt. length validation */ 340 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 341 ret = -EINVAL; 342 goto error_param; 343 } 344 rx_ctx.rxmax = info->max_pkt_size; 345 346 /* enable 32bytes desc always */ 347 rx_ctx.dsize = 1; 348 349 /* default values */ 350 rx_ctx.lrxqthresh = 2; 351 rx_ctx.crcstrip = 1; 352 rx_ctx.prefena = 1; 353 rx_ctx.l2tsel = 1; 354 355 /* clear the context in the HMC */ 356 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 357 if (ret) { 358 dev_err(&pf->pdev->dev, 359 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 360 pf_queue_id, ret); 361 ret = -ENOENT; 362 goto error_param; 363 } 364 365 /* set the context in the HMC */ 366 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 367 if (ret) { 368 dev_err(&pf->pdev->dev, 369 "Failed to set VF LAN Rx queue context %d error: %d\n", 370 pf_queue_id, ret); 371 ret = -ENOENT; 372 goto error_param; 373 } 374 375 error_param: 376 return ret; 377 } 378 379 /** 380 * i40e_alloc_vsi_res 381 * @vf: pointer to the vf info 382 * @type: type of VSI to allocate 383 * 384 * alloc vf vsi context & resources 385 **/ 386 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 387 { 388 struct i40e_mac_filter *f = NULL; 389 struct i40e_pf *pf = vf->pf; 390 struct i40e_vsi *vsi; 391 int ret = 0; 392 393 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 394 395 if (!vsi) { 396 dev_err(&pf->pdev->dev, 397 "add vsi failed for vf %d, aq_err %d\n", 398 vf->vf_id, pf->hw.aq.asq_last_status); 399 ret = -ENOENT; 400 goto error_alloc_vsi_res; 401 } 402 if (type == I40E_VSI_SRIOV) { 403 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 404 vf->lan_vsi_index = vsi->idx; 405 vf->lan_vsi_id = vsi->id; 406 dev_info(&pf->pdev->dev, 407 "VF %d assigned LAN VSI index %d, VSI id %d\n", 408 vf->vf_id, vsi->idx, vsi->id); 409 /* If the port VLAN has been configured and then the 410 * VF driver was removed then the VSI port VLAN 411 * configuration was destroyed. Check if there is 412 * a port VLAN and restore the VSI configuration if 413 * needed. 414 */ 415 if (vf->port_vlan_id) 416 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 417 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 418 vf->port_vlan_id, true, false); 419 if (!f) 420 dev_info(&pf->pdev->dev, 421 "Could not allocate VF MAC addr\n"); 422 f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, 423 true, false); 424 if (!f) 425 dev_info(&pf->pdev->dev, 426 "Could not allocate VF broadcast filter\n"); 427 } 428 429 /* program mac filter */ 430 ret = i40e_sync_vsi_filters(vsi); 431 if (ret) 432 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 433 434 /* Set VF bandwidth if specified */ 435 if (vf->tx_rate) { 436 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 437 vf->tx_rate / 50, 0, NULL); 438 if (ret) 439 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 440 vf->vf_id, ret); 441 } 442 443 error_alloc_vsi_res: 444 return ret; 445 } 446 447 /** 448 * i40e_enable_vf_mappings 449 * @vf: pointer to the vf info 450 * 451 * enable vf mappings 452 **/ 453 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 454 { 455 struct i40e_pf *pf = vf->pf; 456 struct i40e_hw *hw = &pf->hw; 457 u32 reg, total_queue_pairs = 0; 458 int j; 459 460 /* Tell the hardware we're using noncontiguous mapping. HW requires 461 * that VF queues be mapped using this method, even when they are 462 * contiguous in real life 463 */ 464 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 465 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 466 467 /* enable VF vplan_qtable mappings */ 468 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 469 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 470 471 /* map PF queues to VF queues */ 472 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) { 473 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 474 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 475 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 476 total_queue_pairs++; 477 } 478 479 /* map PF queues to VSI */ 480 for (j = 0; j < 7; j++) { 481 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) { 482 reg = 0x07FF07FF; /* unused */ 483 } else { 484 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 485 j * 2); 486 reg = qid; 487 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 488 (j * 2) + 1); 489 reg |= qid << 16; 490 } 491 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 492 } 493 494 i40e_flush(hw); 495 } 496 497 /** 498 * i40e_disable_vf_mappings 499 * @vf: pointer to the vf info 500 * 501 * disable vf mappings 502 **/ 503 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 504 { 505 struct i40e_pf *pf = vf->pf; 506 struct i40e_hw *hw = &pf->hw; 507 int i; 508 509 /* disable qp mappings */ 510 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 511 for (i = 0; i < I40E_MAX_VSI_QP; i++) 512 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 513 I40E_QUEUE_END_OF_LIST); 514 i40e_flush(hw); 515 } 516 517 /** 518 * i40e_free_vf_res 519 * @vf: pointer to the vf info 520 * 521 * free vf resources 522 **/ 523 static void i40e_free_vf_res(struct i40e_vf *vf) 524 { 525 struct i40e_pf *pf = vf->pf; 526 struct i40e_hw *hw = &pf->hw; 527 u32 reg_idx, reg; 528 int i, msix_vf; 529 530 /* free vsi & disconnect it from the parent uplink */ 531 if (vf->lan_vsi_index) { 532 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); 533 vf->lan_vsi_index = 0; 534 vf->lan_vsi_id = 0; 535 } 536 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 537 538 /* disable interrupts so the VF starts in a known state */ 539 for (i = 0; i < msix_vf; i++) { 540 /* format is same for both registers */ 541 if (0 == i) 542 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 543 else 544 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 545 (vf->vf_id)) 546 + (i - 1)); 547 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 548 i40e_flush(hw); 549 } 550 551 /* clear the irq settings */ 552 for (i = 0; i < msix_vf; i++) { 553 /* format is same for both registers */ 554 if (0 == i) 555 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 556 else 557 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 558 (vf->vf_id)) 559 + (i - 1)); 560 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 561 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 562 wr32(hw, reg_idx, reg); 563 i40e_flush(hw); 564 } 565 /* reset some of the state varibles keeping 566 * track of the resources 567 */ 568 vf->num_queue_pairs = 0; 569 vf->vf_states = 0; 570 } 571 572 /** 573 * i40e_alloc_vf_res 574 * @vf: pointer to the vf info 575 * 576 * allocate vf resources 577 **/ 578 static int i40e_alloc_vf_res(struct i40e_vf *vf) 579 { 580 struct i40e_pf *pf = vf->pf; 581 int total_queue_pairs = 0; 582 int ret; 583 584 /* allocate hw vsi context & associated resources */ 585 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 586 if (ret) 587 goto error_alloc; 588 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; 589 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 590 591 /* store the total qps number for the runtime 592 * vf req validation 593 */ 594 vf->num_queue_pairs = total_queue_pairs; 595 596 /* vf is now completely initialized */ 597 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 598 599 error_alloc: 600 if (ret) 601 i40e_free_vf_res(vf); 602 603 return ret; 604 } 605 606 #define VF_DEVICE_STATUS 0xAA 607 #define VF_TRANS_PENDING_MASK 0x20 608 /** 609 * i40e_quiesce_vf_pci 610 * @vf: pointer to the vf structure 611 * 612 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 613 * if the transactions never clear. 614 **/ 615 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 616 { 617 struct i40e_pf *pf = vf->pf; 618 struct i40e_hw *hw = &pf->hw; 619 int vf_abs_id, i; 620 u32 reg; 621 622 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 623 624 wr32(hw, I40E_PF_PCI_CIAA, 625 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 626 for (i = 0; i < 100; i++) { 627 reg = rd32(hw, I40E_PF_PCI_CIAD); 628 if ((reg & VF_TRANS_PENDING_MASK) == 0) 629 return 0; 630 udelay(1); 631 } 632 return -EIO; 633 } 634 635 /** 636 * i40e_reset_vf 637 * @vf: pointer to the vf structure 638 * @flr: VFLR was issued or not 639 * 640 * reset the vf 641 **/ 642 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 643 { 644 struct i40e_pf *pf = vf->pf; 645 struct i40e_hw *hw = &pf->hw; 646 bool rsd = false; 647 int i; 648 u32 reg; 649 650 /* warn the VF */ 651 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 652 653 /* In the case of a VFLR, the HW has already reset the VF and we 654 * just need to clean up, so don't hit the VFRTRIG register. 655 */ 656 if (!flr) { 657 /* reset vf using VPGEN_VFRTRIG reg */ 658 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 659 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 660 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 661 i40e_flush(hw); 662 } 663 664 if (i40e_quiesce_vf_pci(vf)) 665 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 666 vf->vf_id); 667 668 /* poll VPGEN_VFRSTAT reg to make sure 669 * that reset is complete 670 */ 671 for (i = 0; i < 100; i++) { 672 /* vf reset requires driver to first reset the 673 * vf and then poll the status register to make sure 674 * that the requested op was completed 675 * successfully 676 */ 677 usleep_range(10, 20); 678 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 679 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 680 rsd = true; 681 break; 682 } 683 } 684 685 if (!rsd) 686 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 687 vf->vf_id); 688 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 689 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 690 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 691 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 692 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 693 694 /* On initial reset, we won't have any queues */ 695 if (vf->lan_vsi_index == 0) 696 goto complete_reset; 697 698 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); 699 complete_reset: 700 /* reallocate vf resources to reset the VSI state */ 701 i40e_free_vf_res(vf); 702 i40e_alloc_vf_res(vf); 703 i40e_enable_vf_mappings(vf); 704 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 705 706 /* tell the VF the reset is done */ 707 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 708 i40e_flush(hw); 709 } 710 711 /** 712 * i40e_enable_pf_switch_lb 713 * @pf: pointer to the pf structure 714 * 715 * enable switch loop back or die - no point in a return value 716 **/ 717 void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 718 { 719 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 720 struct i40e_vsi_context ctxt; 721 int aq_ret; 722 723 ctxt.seid = pf->main_vsi_seid; 724 ctxt.pf_num = pf->hw.pf_id; 725 ctxt.vf_num = 0; 726 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 727 if (aq_ret) { 728 dev_info(&pf->pdev->dev, 729 "%s couldn't get pf vsi config, err %d, aq_err %d\n", 730 __func__, aq_ret, pf->hw.aq.asq_last_status); 731 return; 732 } 733 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 734 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 735 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 736 737 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 738 if (aq_ret) { 739 dev_info(&pf->pdev->dev, 740 "%s: update vsi switch failed, aq_err=%d\n", 741 __func__, vsi->back->hw.aq.asq_last_status); 742 } 743 } 744 745 /** 746 * i40e_disable_pf_switch_lb 747 * @pf: pointer to the pf structure 748 * 749 * disable switch loop back or die - no point in a return value 750 **/ 751 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 752 { 753 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 754 struct i40e_vsi_context ctxt; 755 int aq_ret; 756 757 ctxt.seid = pf->main_vsi_seid; 758 ctxt.pf_num = pf->hw.pf_id; 759 ctxt.vf_num = 0; 760 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 761 if (aq_ret) { 762 dev_info(&pf->pdev->dev, 763 "%s couldn't get pf vsi config, err %d, aq_err %d\n", 764 __func__, aq_ret, pf->hw.aq.asq_last_status); 765 return; 766 } 767 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 768 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 769 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 770 771 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 772 if (aq_ret) { 773 dev_info(&pf->pdev->dev, 774 "%s: update vsi switch failed, aq_err=%d\n", 775 __func__, vsi->back->hw.aq.asq_last_status); 776 } 777 } 778 779 /** 780 * i40e_free_vfs 781 * @pf: pointer to the pf structure 782 * 783 * free vf resources 784 **/ 785 void i40e_free_vfs(struct i40e_pf *pf) 786 { 787 struct i40e_hw *hw = &pf->hw; 788 u32 reg_idx, bit_idx; 789 int i, tmp, vf_id; 790 791 if (!pf->vf) 792 return; 793 794 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 795 i40e_irq_dynamic_disable_icr0(pf); 796 797 mdelay(10); /* let any messages in transit get finished up */ 798 /* free up vf resources */ 799 tmp = pf->num_alloc_vfs; 800 pf->num_alloc_vfs = 0; 801 for (i = 0; i < tmp; i++) { 802 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 803 i40e_free_vf_res(&pf->vf[i]); 804 /* disable qp mappings */ 805 i40e_disable_vf_mappings(&pf->vf[i]); 806 } 807 808 kfree(pf->vf); 809 pf->vf = NULL; 810 811 /* This check is for when the driver is unloaded while VFs are 812 * assigned. Setting the number of VFs to 0 through sysfs is caught 813 * before this function ever gets called. 814 */ 815 if (!pci_vfs_assigned(pf->pdev)) { 816 pci_disable_sriov(pf->pdev); 817 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 818 * work correctly when SR-IOV gets re-enabled. 819 */ 820 for (vf_id = 0; vf_id < tmp; vf_id++) { 821 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 822 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 823 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 824 } 825 i40e_disable_pf_switch_lb(pf); 826 } else { 827 dev_warn(&pf->pdev->dev, 828 "unable to disable SR-IOV because VFs are assigned.\n"); 829 } 830 831 /* Re-enable interrupt 0. */ 832 i40e_irq_dynamic_enable_icr0(pf); 833 } 834 835 #ifdef CONFIG_PCI_IOV 836 /** 837 * i40e_alloc_vfs 838 * @pf: pointer to the pf structure 839 * @num_alloc_vfs: number of vfs to allocate 840 * 841 * allocate vf resources 842 **/ 843 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 844 { 845 struct i40e_vf *vfs; 846 int i, ret = 0; 847 848 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 849 i40e_irq_dynamic_disable_icr0(pf); 850 851 /* Check to see if we're just allocating resources for extant VFs */ 852 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 853 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 854 if (ret) { 855 dev_err(&pf->pdev->dev, 856 "Failed to enable SR-IOV, error %d.\n", ret); 857 pf->num_alloc_vfs = 0; 858 goto err_iov; 859 } 860 } 861 /* allocate memory */ 862 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 863 if (!vfs) { 864 ret = -ENOMEM; 865 goto err_alloc; 866 } 867 pf->vf = vfs; 868 869 /* apply default profile */ 870 for (i = 0; i < num_alloc_vfs; i++) { 871 vfs[i].pf = pf; 872 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 873 vfs[i].vf_id = i; 874 875 /* assign default capabilities */ 876 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 877 vfs[i].spoofchk = true; 878 /* vf resources get allocated during reset */ 879 i40e_reset_vf(&vfs[i], false); 880 881 /* enable vf vplan_qtable mappings */ 882 i40e_enable_vf_mappings(&vfs[i]); 883 } 884 pf->num_alloc_vfs = num_alloc_vfs; 885 886 i40e_enable_pf_switch_lb(pf); 887 err_alloc: 888 if (ret) 889 i40e_free_vfs(pf); 890 err_iov: 891 /* Re-enable interrupt 0. */ 892 i40e_irq_dynamic_enable_icr0(pf); 893 return ret; 894 } 895 896 #endif 897 /** 898 * i40e_pci_sriov_enable 899 * @pdev: pointer to a pci_dev structure 900 * @num_vfs: number of vfs to allocate 901 * 902 * Enable or change the number of VFs 903 **/ 904 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 905 { 906 #ifdef CONFIG_PCI_IOV 907 struct i40e_pf *pf = pci_get_drvdata(pdev); 908 int pre_existing_vfs = pci_num_vf(pdev); 909 int err = 0; 910 911 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 912 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 913 i40e_free_vfs(pf); 914 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 915 goto out; 916 917 if (num_vfs > pf->num_req_vfs) { 918 err = -EPERM; 919 goto err_out; 920 } 921 922 err = i40e_alloc_vfs(pf, num_vfs); 923 if (err) { 924 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 925 goto err_out; 926 } 927 928 out: 929 return num_vfs; 930 931 err_out: 932 return err; 933 #endif 934 return 0; 935 } 936 937 /** 938 * i40e_pci_sriov_configure 939 * @pdev: pointer to a pci_dev structure 940 * @num_vfs: number of vfs to allocate 941 * 942 * Enable or change the number of VFs. Called when the user updates the number 943 * of VFs in sysfs. 944 **/ 945 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 946 { 947 struct i40e_pf *pf = pci_get_drvdata(pdev); 948 949 if (num_vfs) 950 return i40e_pci_sriov_enable(pdev, num_vfs); 951 952 if (!pci_vfs_assigned(pf->pdev)) { 953 i40e_free_vfs(pf); 954 } else { 955 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 956 return -EINVAL; 957 } 958 return 0; 959 } 960 961 /***********************virtual channel routines******************/ 962 963 /** 964 * i40e_vc_send_msg_to_vf 965 * @vf: pointer to the vf info 966 * @v_opcode: virtual channel opcode 967 * @v_retval: virtual channel return value 968 * @msg: pointer to the msg buffer 969 * @msglen: msg length 970 * 971 * send msg to vf 972 **/ 973 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 974 u32 v_retval, u8 *msg, u16 msglen) 975 { 976 struct i40e_pf *pf; 977 struct i40e_hw *hw; 978 int abs_vf_id; 979 i40e_status aq_ret; 980 981 /* validate the request */ 982 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 983 return -EINVAL; 984 985 pf = vf->pf; 986 hw = &pf->hw; 987 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 988 989 /* single place to detect unsuccessful return values */ 990 if (v_retval) { 991 vf->num_invalid_msgs++; 992 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", 993 v_opcode, v_retval); 994 if (vf->num_invalid_msgs > 995 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 996 dev_err(&pf->pdev->dev, 997 "Number of invalid messages exceeded for VF %d\n", 998 vf->vf_id); 999 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1000 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 1001 } 1002 } else { 1003 vf->num_valid_msgs++; 1004 } 1005 1006 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1007 msg, msglen, NULL); 1008 if (aq_ret) { 1009 dev_err(&pf->pdev->dev, 1010 "Unable to send the message to VF %d aq_err %d\n", 1011 vf->vf_id, pf->hw.aq.asq_last_status); 1012 return -EIO; 1013 } 1014 1015 return 0; 1016 } 1017 1018 /** 1019 * i40e_vc_send_resp_to_vf 1020 * @vf: pointer to the vf info 1021 * @opcode: operation code 1022 * @retval: return value 1023 * 1024 * send resp msg to vf 1025 **/ 1026 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1027 enum i40e_virtchnl_ops opcode, 1028 i40e_status retval) 1029 { 1030 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1031 } 1032 1033 /** 1034 * i40e_vc_get_version_msg 1035 * @vf: pointer to the vf info 1036 * 1037 * called from the vf to request the API version used by the PF 1038 **/ 1039 static int i40e_vc_get_version_msg(struct i40e_vf *vf) 1040 { 1041 struct i40e_virtchnl_version_info info = { 1042 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 1043 }; 1044 1045 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 1046 I40E_SUCCESS, (u8 *)&info, 1047 sizeof(struct 1048 i40e_virtchnl_version_info)); 1049 } 1050 1051 /** 1052 * i40e_vc_get_vf_resources_msg 1053 * @vf: pointer to the vf info 1054 * @msg: pointer to the msg buffer 1055 * @msglen: msg length 1056 * 1057 * called from the vf to request its resources 1058 **/ 1059 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) 1060 { 1061 struct i40e_virtchnl_vf_resource *vfres = NULL; 1062 struct i40e_pf *pf = vf->pf; 1063 i40e_status aq_ret = 0; 1064 struct i40e_vsi *vsi; 1065 int i = 0, len = 0; 1066 int num_vsis = 1; 1067 int ret; 1068 1069 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1070 aq_ret = I40E_ERR_PARAM; 1071 goto err; 1072 } 1073 1074 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1075 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1076 1077 vfres = kzalloc(len, GFP_KERNEL); 1078 if (!vfres) { 1079 aq_ret = I40E_ERR_NO_MEMORY; 1080 len = 0; 1081 goto err; 1082 } 1083 1084 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1085 vsi = pf->vsi[vf->lan_vsi_index]; 1086 if (!vsi->info.pvid) 1087 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1088 1089 vfres->num_vsis = num_vsis; 1090 vfres->num_queue_pairs = vf->num_queue_pairs; 1091 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1092 if (vf->lan_vsi_index) { 1093 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; 1094 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1095 vfres->vsi_res[i].num_queue_pairs = 1096 pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; 1097 memcpy(vfres->vsi_res[i].default_mac_addr, 1098 vf->default_lan_addr.addr, ETH_ALEN); 1099 i++; 1100 } 1101 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1102 1103 err: 1104 /* send the response back to the vf */ 1105 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1106 aq_ret, (u8 *)vfres, len); 1107 1108 kfree(vfres); 1109 return ret; 1110 } 1111 1112 /** 1113 * i40e_vc_reset_vf_msg 1114 * @vf: pointer to the vf info 1115 * @msg: pointer to the msg buffer 1116 * @msglen: msg length 1117 * 1118 * called from the vf to reset itself, 1119 * unlike other virtchnl messages, pf driver 1120 * doesn't send the response back to the vf 1121 **/ 1122 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1123 { 1124 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1125 i40e_reset_vf(vf, false); 1126 } 1127 1128 /** 1129 * i40e_vc_config_promiscuous_mode_msg 1130 * @vf: pointer to the vf info 1131 * @msg: pointer to the msg buffer 1132 * @msglen: msg length 1133 * 1134 * called from the vf to configure the promiscuous mode of 1135 * vf vsis 1136 **/ 1137 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1138 u8 *msg, u16 msglen) 1139 { 1140 struct i40e_virtchnl_promisc_info *info = 1141 (struct i40e_virtchnl_promisc_info *)msg; 1142 struct i40e_pf *pf = vf->pf; 1143 struct i40e_hw *hw = &pf->hw; 1144 struct i40e_vsi *vsi; 1145 bool allmulti = false; 1146 i40e_status aq_ret; 1147 1148 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1149 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1150 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1151 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { 1152 aq_ret = I40E_ERR_PARAM; 1153 goto error_param; 1154 } 1155 vsi = pf->vsi[info->vsi_id]; 1156 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1157 allmulti = true; 1158 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1159 allmulti, NULL); 1160 1161 error_param: 1162 /* send the response to the vf */ 1163 return i40e_vc_send_resp_to_vf(vf, 1164 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1165 aq_ret); 1166 } 1167 1168 /** 1169 * i40e_vc_config_queues_msg 1170 * @vf: pointer to the vf info 1171 * @msg: pointer to the msg buffer 1172 * @msglen: msg length 1173 * 1174 * called from the vf to configure the rx/tx 1175 * queues 1176 **/ 1177 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1178 { 1179 struct i40e_virtchnl_vsi_queue_config_info *qci = 1180 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1181 struct i40e_virtchnl_queue_pair_info *qpi; 1182 struct i40e_pf *pf = vf->pf; 1183 u16 vsi_id, vsi_queue_id; 1184 i40e_status aq_ret = 0; 1185 int i; 1186 1187 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1188 aq_ret = I40E_ERR_PARAM; 1189 goto error_param; 1190 } 1191 1192 vsi_id = qci->vsi_id; 1193 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1194 aq_ret = I40E_ERR_PARAM; 1195 goto error_param; 1196 } 1197 for (i = 0; i < qci->num_queue_pairs; i++) { 1198 qpi = &qci->qpair[i]; 1199 vsi_queue_id = qpi->txq.queue_id; 1200 if ((qpi->txq.vsi_id != vsi_id) || 1201 (qpi->rxq.vsi_id != vsi_id) || 1202 (qpi->rxq.queue_id != vsi_queue_id) || 1203 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1204 aq_ret = I40E_ERR_PARAM; 1205 goto error_param; 1206 } 1207 1208 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1209 &qpi->rxq) || 1210 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1211 &qpi->txq)) { 1212 aq_ret = I40E_ERR_PARAM; 1213 goto error_param; 1214 } 1215 } 1216 /* set vsi num_queue_pairs in use to num configured by vf */ 1217 pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; 1218 1219 error_param: 1220 /* send the response to the vf */ 1221 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1222 aq_ret); 1223 } 1224 1225 /** 1226 * i40e_vc_config_irq_map_msg 1227 * @vf: pointer to the vf info 1228 * @msg: pointer to the msg buffer 1229 * @msglen: msg length 1230 * 1231 * called from the vf to configure the irq to 1232 * queue map 1233 **/ 1234 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1235 { 1236 struct i40e_virtchnl_irq_map_info *irqmap_info = 1237 (struct i40e_virtchnl_irq_map_info *)msg; 1238 struct i40e_virtchnl_vector_map *map; 1239 u16 vsi_id, vsi_queue_id, vector_id; 1240 i40e_status aq_ret = 0; 1241 unsigned long tempmap; 1242 int i; 1243 1244 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1245 aq_ret = I40E_ERR_PARAM; 1246 goto error_param; 1247 } 1248 1249 for (i = 0; i < irqmap_info->num_vectors; i++) { 1250 map = &irqmap_info->vecmap[i]; 1251 1252 vector_id = map->vector_id; 1253 vsi_id = map->vsi_id; 1254 /* validate msg params */ 1255 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1256 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1257 aq_ret = I40E_ERR_PARAM; 1258 goto error_param; 1259 } 1260 1261 /* lookout for the invalid queue index */ 1262 tempmap = map->rxq_map; 1263 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1264 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1265 vsi_queue_id)) { 1266 aq_ret = I40E_ERR_PARAM; 1267 goto error_param; 1268 } 1269 } 1270 1271 tempmap = map->txq_map; 1272 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1273 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1274 vsi_queue_id)) { 1275 aq_ret = I40E_ERR_PARAM; 1276 goto error_param; 1277 } 1278 } 1279 1280 i40e_config_irq_link_list(vf, vsi_id, map); 1281 } 1282 error_param: 1283 /* send the response to the vf */ 1284 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1285 aq_ret); 1286 } 1287 1288 /** 1289 * i40e_vc_enable_queues_msg 1290 * @vf: pointer to the vf info 1291 * @msg: pointer to the msg buffer 1292 * @msglen: msg length 1293 * 1294 * called from the vf to enable all or specific queue(s) 1295 **/ 1296 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1297 { 1298 struct i40e_virtchnl_queue_select *vqs = 1299 (struct i40e_virtchnl_queue_select *)msg; 1300 struct i40e_pf *pf = vf->pf; 1301 u16 vsi_id = vqs->vsi_id; 1302 i40e_status aq_ret = 0; 1303 1304 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1305 aq_ret = I40E_ERR_PARAM; 1306 goto error_param; 1307 } 1308 1309 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1310 aq_ret = I40E_ERR_PARAM; 1311 goto error_param; 1312 } 1313 1314 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1315 aq_ret = I40E_ERR_PARAM; 1316 goto error_param; 1317 } 1318 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) 1319 aq_ret = I40E_ERR_TIMEOUT; 1320 error_param: 1321 /* send the response to the vf */ 1322 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1323 aq_ret); 1324 } 1325 1326 /** 1327 * i40e_vc_disable_queues_msg 1328 * @vf: pointer to the vf info 1329 * @msg: pointer to the msg buffer 1330 * @msglen: msg length 1331 * 1332 * called from the vf to disable all or specific 1333 * queue(s) 1334 **/ 1335 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1336 { 1337 struct i40e_virtchnl_queue_select *vqs = 1338 (struct i40e_virtchnl_queue_select *)msg; 1339 struct i40e_pf *pf = vf->pf; 1340 u16 vsi_id = vqs->vsi_id; 1341 i40e_status aq_ret = 0; 1342 1343 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1344 aq_ret = I40E_ERR_PARAM; 1345 goto error_param; 1346 } 1347 1348 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1349 aq_ret = I40E_ERR_PARAM; 1350 goto error_param; 1351 } 1352 1353 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1354 aq_ret = I40E_ERR_PARAM; 1355 goto error_param; 1356 } 1357 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) 1358 aq_ret = I40E_ERR_TIMEOUT; 1359 1360 error_param: 1361 /* send the response to the vf */ 1362 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1363 aq_ret); 1364 } 1365 1366 /** 1367 * i40e_vc_get_stats_msg 1368 * @vf: pointer to the vf info 1369 * @msg: pointer to the msg buffer 1370 * @msglen: msg length 1371 * 1372 * called from the vf to get vsi stats 1373 **/ 1374 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1375 { 1376 struct i40e_virtchnl_queue_select *vqs = 1377 (struct i40e_virtchnl_queue_select *)msg; 1378 struct i40e_pf *pf = vf->pf; 1379 struct i40e_eth_stats stats; 1380 i40e_status aq_ret = 0; 1381 struct i40e_vsi *vsi; 1382 1383 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1384 1385 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1386 aq_ret = I40E_ERR_PARAM; 1387 goto error_param; 1388 } 1389 1390 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1391 aq_ret = I40E_ERR_PARAM; 1392 goto error_param; 1393 } 1394 1395 vsi = pf->vsi[vqs->vsi_id]; 1396 if (!vsi) { 1397 aq_ret = I40E_ERR_PARAM; 1398 goto error_param; 1399 } 1400 i40e_update_eth_stats(vsi); 1401 stats = vsi->eth_stats; 1402 1403 error_param: 1404 /* send the response back to the vf */ 1405 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1406 (u8 *)&stats, sizeof(stats)); 1407 } 1408 1409 /** 1410 * i40e_check_vf_permission 1411 * @vf: pointer to the vf info 1412 * @macaddr: pointer to the MAC Address being checked 1413 * 1414 * Check if the VF has permission to add or delete unicast MAC address 1415 * filters and return error code -EPERM if not. Then check if the 1416 * address filter requested is broadcast or zero and if so return 1417 * an invalid MAC address error code. 1418 **/ 1419 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1420 { 1421 struct i40e_pf *pf = vf->pf; 1422 int ret = 0; 1423 1424 if (is_broadcast_ether_addr(macaddr) || 1425 is_zero_ether_addr(macaddr)) { 1426 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1427 ret = I40E_ERR_INVALID_MAC_ADDR; 1428 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1429 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1430 /* If the host VMM administrator has set the VF MAC address 1431 * administratively via the ndo_set_vf_mac command then deny 1432 * permission to the VF to add or delete unicast MAC addresses. 1433 * The VF may request to set the MAC address filter already 1434 * assigned to it so do not return an error in that case. 1435 */ 1436 dev_err(&pf->pdev->dev, 1437 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); 1438 ret = -EPERM; 1439 } 1440 return ret; 1441 } 1442 1443 /** 1444 * i40e_vc_add_mac_addr_msg 1445 * @vf: pointer to the vf info 1446 * @msg: pointer to the msg buffer 1447 * @msglen: msg length 1448 * 1449 * add guest mac address filter 1450 **/ 1451 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1452 { 1453 struct i40e_virtchnl_ether_addr_list *al = 1454 (struct i40e_virtchnl_ether_addr_list *)msg; 1455 struct i40e_pf *pf = vf->pf; 1456 struct i40e_vsi *vsi = NULL; 1457 u16 vsi_id = al->vsi_id; 1458 i40e_status ret = 0; 1459 int i; 1460 1461 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1462 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1463 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1464 ret = I40E_ERR_PARAM; 1465 goto error_param; 1466 } 1467 1468 for (i = 0; i < al->num_elements; i++) { 1469 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1470 if (ret) 1471 goto error_param; 1472 } 1473 vsi = pf->vsi[vsi_id]; 1474 1475 /* add new addresses to the list */ 1476 for (i = 0; i < al->num_elements; i++) { 1477 struct i40e_mac_filter *f; 1478 1479 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1480 if (!f) { 1481 if (i40e_is_vsi_in_vlan(vsi)) 1482 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1483 true, false); 1484 else 1485 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1486 true, false); 1487 } 1488 1489 if (!f) { 1490 dev_err(&pf->pdev->dev, 1491 "Unable to add VF MAC filter\n"); 1492 ret = I40E_ERR_PARAM; 1493 goto error_param; 1494 } 1495 } 1496 1497 /* program the updated filter list */ 1498 if (i40e_sync_vsi_filters(vsi)) 1499 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1500 1501 error_param: 1502 /* send the response to the vf */ 1503 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1504 ret); 1505 } 1506 1507 /** 1508 * i40e_vc_del_mac_addr_msg 1509 * @vf: pointer to the vf info 1510 * @msg: pointer to the msg buffer 1511 * @msglen: msg length 1512 * 1513 * remove guest mac address filter 1514 **/ 1515 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1516 { 1517 struct i40e_virtchnl_ether_addr_list *al = 1518 (struct i40e_virtchnl_ether_addr_list *)msg; 1519 struct i40e_pf *pf = vf->pf; 1520 struct i40e_vsi *vsi = NULL; 1521 u16 vsi_id = al->vsi_id; 1522 i40e_status ret = 0; 1523 int i; 1524 1525 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1526 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1527 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1528 ret = I40E_ERR_PARAM; 1529 goto error_param; 1530 } 1531 1532 for (i = 0; i < al->num_elements; i++) { 1533 if (is_broadcast_ether_addr(al->list[i].addr) || 1534 is_zero_ether_addr(al->list[i].addr)) { 1535 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 1536 al->list[i].addr); 1537 ret = I40E_ERR_INVALID_MAC_ADDR; 1538 goto error_param; 1539 } 1540 } 1541 vsi = pf->vsi[vsi_id]; 1542 1543 /* delete addresses from the list */ 1544 for (i = 0; i < al->num_elements; i++) 1545 i40e_del_filter(vsi, al->list[i].addr, 1546 I40E_VLAN_ANY, true, false); 1547 1548 /* program the updated filter list */ 1549 if (i40e_sync_vsi_filters(vsi)) 1550 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1551 1552 error_param: 1553 /* send the response to the vf */ 1554 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1555 ret); 1556 } 1557 1558 /** 1559 * i40e_vc_add_vlan_msg 1560 * @vf: pointer to the vf info 1561 * @msg: pointer to the msg buffer 1562 * @msglen: msg length 1563 * 1564 * program guest vlan id 1565 **/ 1566 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1567 { 1568 struct i40e_virtchnl_vlan_filter_list *vfl = 1569 (struct i40e_virtchnl_vlan_filter_list *)msg; 1570 struct i40e_pf *pf = vf->pf; 1571 struct i40e_vsi *vsi = NULL; 1572 u16 vsi_id = vfl->vsi_id; 1573 i40e_status aq_ret = 0; 1574 int i; 1575 1576 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1577 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1578 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1579 aq_ret = I40E_ERR_PARAM; 1580 goto error_param; 1581 } 1582 1583 for (i = 0; i < vfl->num_elements; i++) { 1584 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1585 aq_ret = I40E_ERR_PARAM; 1586 dev_err(&pf->pdev->dev, 1587 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1588 goto error_param; 1589 } 1590 } 1591 vsi = pf->vsi[vsi_id]; 1592 if (vsi->info.pvid) { 1593 aq_ret = I40E_ERR_PARAM; 1594 goto error_param; 1595 } 1596 1597 i40e_vlan_stripping_enable(vsi); 1598 for (i = 0; i < vfl->num_elements; i++) { 1599 /* add new VLAN filter */ 1600 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1601 if (ret) 1602 dev_err(&pf->pdev->dev, 1603 "Unable to add VF vlan filter %d, error %d\n", 1604 vfl->vlan_id[i], ret); 1605 } 1606 1607 error_param: 1608 /* send the response to the vf */ 1609 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1610 } 1611 1612 /** 1613 * i40e_vc_remove_vlan_msg 1614 * @vf: pointer to the vf info 1615 * @msg: pointer to the msg buffer 1616 * @msglen: msg length 1617 * 1618 * remove programmed guest vlan id 1619 **/ 1620 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1621 { 1622 struct i40e_virtchnl_vlan_filter_list *vfl = 1623 (struct i40e_virtchnl_vlan_filter_list *)msg; 1624 struct i40e_pf *pf = vf->pf; 1625 struct i40e_vsi *vsi = NULL; 1626 u16 vsi_id = vfl->vsi_id; 1627 i40e_status aq_ret = 0; 1628 int i; 1629 1630 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1631 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1632 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1633 aq_ret = I40E_ERR_PARAM; 1634 goto error_param; 1635 } 1636 1637 for (i = 0; i < vfl->num_elements; i++) { 1638 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1639 aq_ret = I40E_ERR_PARAM; 1640 goto error_param; 1641 } 1642 } 1643 1644 vsi = pf->vsi[vsi_id]; 1645 if (vsi->info.pvid) { 1646 aq_ret = I40E_ERR_PARAM; 1647 goto error_param; 1648 } 1649 1650 for (i = 0; i < vfl->num_elements; i++) { 1651 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1652 if (ret) 1653 dev_err(&pf->pdev->dev, 1654 "Unable to delete VF vlan filter %d, error %d\n", 1655 vfl->vlan_id[i], ret); 1656 } 1657 1658 error_param: 1659 /* send the response to the vf */ 1660 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1661 } 1662 1663 /** 1664 * i40e_vc_validate_vf_msg 1665 * @vf: pointer to the vf info 1666 * @msg: pointer to the msg buffer 1667 * @msglen: msg length 1668 * @msghndl: msg handle 1669 * 1670 * validate msg 1671 **/ 1672 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1673 u32 v_retval, u8 *msg, u16 msglen) 1674 { 1675 bool err_msg_format = false; 1676 int valid_len; 1677 1678 /* Check if VF is disabled. */ 1679 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1680 return I40E_ERR_PARAM; 1681 1682 /* Validate message length. */ 1683 switch (v_opcode) { 1684 case I40E_VIRTCHNL_OP_VERSION: 1685 valid_len = sizeof(struct i40e_virtchnl_version_info); 1686 break; 1687 case I40E_VIRTCHNL_OP_RESET_VF: 1688 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1689 valid_len = 0; 1690 break; 1691 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1692 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1693 break; 1694 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1695 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1696 break; 1697 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1698 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1699 if (msglen >= valid_len) { 1700 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1701 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1702 valid_len += (vqc->num_queue_pairs * 1703 sizeof(struct 1704 i40e_virtchnl_queue_pair_info)); 1705 if (vqc->num_queue_pairs == 0) 1706 err_msg_format = true; 1707 } 1708 break; 1709 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1710 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1711 if (msglen >= valid_len) { 1712 struct i40e_virtchnl_irq_map_info *vimi = 1713 (struct i40e_virtchnl_irq_map_info *)msg; 1714 valid_len += (vimi->num_vectors * 1715 sizeof(struct i40e_virtchnl_vector_map)); 1716 if (vimi->num_vectors == 0) 1717 err_msg_format = true; 1718 } 1719 break; 1720 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1721 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1722 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1723 break; 1724 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1725 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1726 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1727 if (msglen >= valid_len) { 1728 struct i40e_virtchnl_ether_addr_list *veal = 1729 (struct i40e_virtchnl_ether_addr_list *)msg; 1730 valid_len += veal->num_elements * 1731 sizeof(struct i40e_virtchnl_ether_addr); 1732 if (veal->num_elements == 0) 1733 err_msg_format = true; 1734 } 1735 break; 1736 case I40E_VIRTCHNL_OP_ADD_VLAN: 1737 case I40E_VIRTCHNL_OP_DEL_VLAN: 1738 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1739 if (msglen >= valid_len) { 1740 struct i40e_virtchnl_vlan_filter_list *vfl = 1741 (struct i40e_virtchnl_vlan_filter_list *)msg; 1742 valid_len += vfl->num_elements * sizeof(u16); 1743 if (vfl->num_elements == 0) 1744 err_msg_format = true; 1745 } 1746 break; 1747 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1748 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1749 break; 1750 case I40E_VIRTCHNL_OP_GET_STATS: 1751 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1752 break; 1753 /* These are always errors coming from the VF. */ 1754 case I40E_VIRTCHNL_OP_EVENT: 1755 case I40E_VIRTCHNL_OP_UNKNOWN: 1756 default: 1757 return -EPERM; 1758 break; 1759 } 1760 /* few more checks */ 1761 if ((valid_len != msglen) || (err_msg_format)) { 1762 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1763 return -EINVAL; 1764 } else { 1765 return 0; 1766 } 1767 } 1768 1769 /** 1770 * i40e_vc_process_vf_msg 1771 * @pf: pointer to the pf structure 1772 * @vf_id: source vf id 1773 * @msg: pointer to the msg buffer 1774 * @msglen: msg length 1775 * @msghndl: msg handle 1776 * 1777 * called from the common aeq/arq handler to 1778 * process request from vf 1779 **/ 1780 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1781 u32 v_retval, u8 *msg, u16 msglen) 1782 { 1783 struct i40e_hw *hw = &pf->hw; 1784 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1785 struct i40e_vf *vf; 1786 int ret; 1787 1788 pf->vf_aq_requests++; 1789 if (local_vf_id >= pf->num_alloc_vfs) 1790 return -EINVAL; 1791 vf = &(pf->vf[local_vf_id]); 1792 /* perform basic checks on the msg */ 1793 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1794 1795 if (ret) { 1796 dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", 1797 local_vf_id, v_opcode, msglen); 1798 return ret; 1799 } 1800 1801 switch (v_opcode) { 1802 case I40E_VIRTCHNL_OP_VERSION: 1803 ret = i40e_vc_get_version_msg(vf); 1804 break; 1805 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1806 ret = i40e_vc_get_vf_resources_msg(vf); 1807 break; 1808 case I40E_VIRTCHNL_OP_RESET_VF: 1809 i40e_vc_reset_vf_msg(vf); 1810 ret = 0; 1811 break; 1812 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1813 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1814 break; 1815 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1816 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1817 break; 1818 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1819 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1820 break; 1821 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1822 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1823 break; 1824 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1825 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1826 break; 1827 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1828 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1829 break; 1830 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1831 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1832 break; 1833 case I40E_VIRTCHNL_OP_ADD_VLAN: 1834 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1835 break; 1836 case I40E_VIRTCHNL_OP_DEL_VLAN: 1837 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1838 break; 1839 case I40E_VIRTCHNL_OP_GET_STATS: 1840 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1841 break; 1842 case I40E_VIRTCHNL_OP_UNKNOWN: 1843 default: 1844 dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", 1845 v_opcode, local_vf_id); 1846 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 1847 I40E_ERR_NOT_IMPLEMENTED); 1848 break; 1849 } 1850 1851 return ret; 1852 } 1853 1854 /** 1855 * i40e_vc_process_vflr_event 1856 * @pf: pointer to the pf structure 1857 * 1858 * called from the vlfr irq handler to 1859 * free up vf resources and state variables 1860 **/ 1861 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 1862 { 1863 u32 reg, reg_idx, bit_idx, vf_id; 1864 struct i40e_hw *hw = &pf->hw; 1865 struct i40e_vf *vf; 1866 1867 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 1868 return 0; 1869 1870 /* re-enable vflr interrupt cause */ 1871 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1872 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 1873 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1874 i40e_flush(hw); 1875 1876 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 1877 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 1878 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1879 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1880 /* read GLGEN_VFLRSTAT register to find out the flr vfs */ 1881 vf = &pf->vf[vf_id]; 1882 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 1883 if (reg & (1 << bit_idx)) { 1884 /* clear the bit in GLGEN_VFLRSTAT */ 1885 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 1886 1887 if (!test_bit(__I40E_DOWN, &pf->state)) 1888 i40e_reset_vf(vf, true); 1889 } 1890 } 1891 1892 return 0; 1893 } 1894 1895 /** 1896 * i40e_vc_vf_broadcast 1897 * @pf: pointer to the pf structure 1898 * @opcode: operation code 1899 * @retval: return value 1900 * @msg: pointer to the msg buffer 1901 * @msglen: msg length 1902 * 1903 * send a message to all VFs on a given PF 1904 **/ 1905 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 1906 enum i40e_virtchnl_ops v_opcode, 1907 i40e_status v_retval, u8 *msg, 1908 u16 msglen) 1909 { 1910 struct i40e_hw *hw = &pf->hw; 1911 struct i40e_vf *vf = pf->vf; 1912 int i; 1913 1914 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 1915 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1916 /* Not all vfs are enabled so skip the ones that are not */ 1917 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 1918 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1919 continue; 1920 1921 /* Ignore return value on purpose - a given VF may fail, but 1922 * we need to keep going and send to all of them 1923 */ 1924 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1925 msg, msglen, NULL); 1926 } 1927 } 1928 1929 /** 1930 * i40e_vc_notify_link_state 1931 * @pf: pointer to the pf structure 1932 * 1933 * send a link status message to all VFs on a given PF 1934 **/ 1935 void i40e_vc_notify_link_state(struct i40e_pf *pf) 1936 { 1937 struct i40e_virtchnl_pf_event pfe; 1938 struct i40e_hw *hw = &pf->hw; 1939 struct i40e_vf *vf = pf->vf; 1940 struct i40e_link_status *ls = &pf->hw.phy.link_info; 1941 int i; 1942 1943 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1944 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1945 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 1946 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1947 if (vf->link_forced) { 1948 pfe.event_data.link_event.link_status = vf->link_up; 1949 pfe.event_data.link_event.link_speed = 1950 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 1951 } else { 1952 pfe.event_data.link_event.link_status = 1953 ls->link_info & I40E_AQ_LINK_UP; 1954 pfe.event_data.link_event.link_speed = ls->link_speed; 1955 } 1956 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 1957 0, (u8 *)&pfe, sizeof(pfe), 1958 NULL); 1959 } 1960 } 1961 1962 /** 1963 * i40e_vc_notify_reset 1964 * @pf: pointer to the pf structure 1965 * 1966 * indicate a pending reset to all VFs on a given PF 1967 **/ 1968 void i40e_vc_notify_reset(struct i40e_pf *pf) 1969 { 1970 struct i40e_virtchnl_pf_event pfe; 1971 1972 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1973 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1974 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1975 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1976 } 1977 1978 /** 1979 * i40e_vc_notify_vf_reset 1980 * @vf: pointer to the vf structure 1981 * 1982 * indicate a pending reset to the given VF 1983 **/ 1984 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 1985 { 1986 struct i40e_virtchnl_pf_event pfe; 1987 int abs_vf_id; 1988 1989 /* validate the request */ 1990 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1991 return; 1992 1993 /* verify if the VF is in either init or active before proceeding */ 1994 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 1995 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1996 return; 1997 1998 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 1999 2000 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 2001 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 2002 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2003 I40E_SUCCESS, (u8 *)&pfe, 2004 sizeof(struct i40e_virtchnl_pf_event), NULL); 2005 } 2006 2007 /** 2008 * i40e_ndo_set_vf_mac 2009 * @netdev: network interface device structure 2010 * @vf_id: vf identifier 2011 * @mac: mac address 2012 * 2013 * program vf mac address 2014 **/ 2015 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2016 { 2017 struct i40e_netdev_priv *np = netdev_priv(netdev); 2018 struct i40e_vsi *vsi = np->vsi; 2019 struct i40e_pf *pf = vsi->back; 2020 struct i40e_mac_filter *f; 2021 struct i40e_vf *vf; 2022 int ret = 0; 2023 2024 /* validate the request */ 2025 if (vf_id >= pf->num_alloc_vfs) { 2026 dev_err(&pf->pdev->dev, 2027 "Invalid VF Identifier %d\n", vf_id); 2028 ret = -EINVAL; 2029 goto error_param; 2030 } 2031 2032 vf = &(pf->vf[vf_id]); 2033 vsi = pf->vsi[vf->lan_vsi_index]; 2034 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2035 dev_err(&pf->pdev->dev, 2036 "Uninitialized VF %d\n", vf_id); 2037 ret = -EINVAL; 2038 goto error_param; 2039 } 2040 2041 if (!is_valid_ether_addr(mac)) { 2042 dev_err(&pf->pdev->dev, 2043 "Invalid VF ethernet address\n"); 2044 ret = -EINVAL; 2045 goto error_param; 2046 } 2047 2048 /* delete the temporary mac address */ 2049 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, 2050 true, false); 2051 2052 /* Delete all the filters for this VSI - we're going to kill it 2053 * anyway. 2054 */ 2055 list_for_each_entry(f, &vsi->mac_filter_list, list) 2056 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); 2057 2058 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2059 /* program mac filter */ 2060 if (i40e_sync_vsi_filters(vsi)) { 2061 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2062 ret = -EIO; 2063 goto error_param; 2064 } 2065 ether_addr_copy(vf->default_lan_addr.addr, mac); 2066 vf->pf_set_mac = true; 2067 /* Force the VF driver stop so it has to reload with new MAC address */ 2068 i40e_vc_disable_vf(pf, vf); 2069 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2070 2071 error_param: 2072 return ret; 2073 } 2074 2075 /** 2076 * i40e_ndo_set_vf_port_vlan 2077 * @netdev: network interface device structure 2078 * @vf_id: vf identifier 2079 * @vlan_id: mac address 2080 * @qos: priority setting 2081 * 2082 * program vf vlan id and/or qos 2083 **/ 2084 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2085 int vf_id, u16 vlan_id, u8 qos) 2086 { 2087 struct i40e_netdev_priv *np = netdev_priv(netdev); 2088 struct i40e_pf *pf = np->vsi->back; 2089 struct i40e_vsi *vsi; 2090 struct i40e_vf *vf; 2091 int ret = 0; 2092 2093 /* validate the request */ 2094 if (vf_id >= pf->num_alloc_vfs) { 2095 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2096 ret = -EINVAL; 2097 goto error_pvid; 2098 } 2099 2100 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2101 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2102 ret = -EINVAL; 2103 goto error_pvid; 2104 } 2105 2106 vf = &(pf->vf[vf_id]); 2107 vsi = pf->vsi[vf->lan_vsi_index]; 2108 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2109 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2110 ret = -EINVAL; 2111 goto error_pvid; 2112 } 2113 2114 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { 2115 dev_err(&pf->pdev->dev, 2116 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2117 vf_id); 2118 /* Administrator Error - knock the VF offline until he does 2119 * the right thing by reconfiguring his network correctly 2120 * and then reloading the VF driver. 2121 */ 2122 i40e_vc_disable_vf(pf, vf); 2123 } 2124 2125 /* Check for condition where there was already a port VLAN ID 2126 * filter set and now it is being deleted by setting it to zero. 2127 * Additionally check for the condition where there was a port 2128 * VLAN but now there is a new and different port VLAN being set. 2129 * Before deleting all the old VLAN filters we must add new ones 2130 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2131 * MAC addresses deleted. 2132 */ 2133 if ((!(vlan_id || qos) || 2134 (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && 2135 vsi->info.pvid) 2136 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2137 2138 if (vsi->info.pvid) { 2139 /* kill old VLAN */ 2140 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2141 VLAN_VID_MASK)); 2142 if (ret) { 2143 dev_info(&vsi->back->pdev->dev, 2144 "remove VLAN failed, ret=%d, aq_err=%d\n", 2145 ret, pf->hw.aq.asq_last_status); 2146 } 2147 } 2148 if (vlan_id || qos) 2149 ret = i40e_vsi_add_pvid(vsi, 2150 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); 2151 else 2152 i40e_vsi_remove_pvid(vsi); 2153 2154 if (vlan_id) { 2155 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2156 vlan_id, qos, vf_id); 2157 2158 /* add new VLAN filter */ 2159 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2160 if (ret) { 2161 dev_info(&vsi->back->pdev->dev, 2162 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2163 vsi->back->hw.aq.asq_last_status); 2164 goto error_pvid; 2165 } 2166 /* Kill non-vlan MAC filters - ignore error return since 2167 * there might not be any non-vlan MAC filters. 2168 */ 2169 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2170 } 2171 2172 if (ret) { 2173 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2174 goto error_pvid; 2175 } 2176 /* The Port VLAN needs to be saved across resets the same as the 2177 * default LAN MAC address. 2178 */ 2179 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2180 ret = 0; 2181 2182 error_pvid: 2183 return ret; 2184 } 2185 2186 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2187 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2188 /** 2189 * i40e_ndo_set_vf_bw 2190 * @netdev: network interface device structure 2191 * @vf_id: vf identifier 2192 * @tx_rate: tx rate 2193 * 2194 * configure vf tx rate 2195 **/ 2196 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 2197 int max_tx_rate) 2198 { 2199 struct i40e_netdev_priv *np = netdev_priv(netdev); 2200 struct i40e_pf *pf = np->vsi->back; 2201 struct i40e_vsi *vsi; 2202 struct i40e_vf *vf; 2203 int speed = 0; 2204 int ret = 0; 2205 2206 /* validate the request */ 2207 if (vf_id >= pf->num_alloc_vfs) { 2208 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 2209 ret = -EINVAL; 2210 goto error; 2211 } 2212 2213 if (min_tx_rate) { 2214 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n", 2215 min_tx_rate, vf_id); 2216 return -EINVAL; 2217 } 2218 2219 vf = &(pf->vf[vf_id]); 2220 vsi = pf->vsi[vf->lan_vsi_index]; 2221 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2222 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); 2223 ret = -EINVAL; 2224 goto error; 2225 } 2226 2227 switch (pf->hw.phy.link_info.link_speed) { 2228 case I40E_LINK_SPEED_40GB: 2229 speed = 40000; 2230 break; 2231 case I40E_LINK_SPEED_10GB: 2232 speed = 10000; 2233 break; 2234 case I40E_LINK_SPEED_1GB: 2235 speed = 1000; 2236 break; 2237 default: 2238 break; 2239 } 2240 2241 if (max_tx_rate > speed) { 2242 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.", 2243 max_tx_rate, vf->vf_id); 2244 ret = -EINVAL; 2245 goto error; 2246 } 2247 2248 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 2249 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 2250 max_tx_rate = 50; 2251 } 2252 2253 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 2254 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 2255 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 2256 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 2257 if (ret) { 2258 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 2259 ret); 2260 ret = -EIO; 2261 goto error; 2262 } 2263 vf->tx_rate = max_tx_rate; 2264 error: 2265 return ret; 2266 } 2267 2268 /** 2269 * i40e_ndo_get_vf_config 2270 * @netdev: network interface device structure 2271 * @vf_id: vf identifier 2272 * @ivi: vf configuration structure 2273 * 2274 * return vf configuration 2275 **/ 2276 int i40e_ndo_get_vf_config(struct net_device *netdev, 2277 int vf_id, struct ifla_vf_info *ivi) 2278 { 2279 struct i40e_netdev_priv *np = netdev_priv(netdev); 2280 struct i40e_vsi *vsi = np->vsi; 2281 struct i40e_pf *pf = vsi->back; 2282 struct i40e_vf *vf; 2283 int ret = 0; 2284 2285 /* validate the request */ 2286 if (vf_id >= pf->num_alloc_vfs) { 2287 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2288 ret = -EINVAL; 2289 goto error_param; 2290 } 2291 2292 vf = &(pf->vf[vf_id]); 2293 /* first vsi is always the LAN vsi */ 2294 vsi = pf->vsi[vf->lan_vsi_index]; 2295 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2296 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2297 ret = -EINVAL; 2298 goto error_param; 2299 } 2300 2301 ivi->vf = vf_id; 2302 2303 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2304 2305 ivi->max_tx_rate = vf->tx_rate; 2306 ivi->min_tx_rate = 0; 2307 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2308 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2309 I40E_VLAN_PRIORITY_SHIFT; 2310 if (vf->link_forced == false) 2311 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 2312 else if (vf->link_up == true) 2313 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 2314 else 2315 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 2316 ivi->spoofchk = vf->spoofchk; 2317 ret = 0; 2318 2319 error_param: 2320 return ret; 2321 } 2322 2323 /** 2324 * i40e_ndo_set_vf_link_state 2325 * @netdev: network interface device structure 2326 * @vf_id: vf identifier 2327 * @link: required link state 2328 * 2329 * Set the link state of a specified VF, regardless of physical link state 2330 **/ 2331 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 2332 { 2333 struct i40e_netdev_priv *np = netdev_priv(netdev); 2334 struct i40e_pf *pf = np->vsi->back; 2335 struct i40e_virtchnl_pf_event pfe; 2336 struct i40e_hw *hw = &pf->hw; 2337 struct i40e_vf *vf; 2338 int abs_vf_id; 2339 int ret = 0; 2340 2341 /* validate the request */ 2342 if (vf_id >= pf->num_alloc_vfs) { 2343 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2344 ret = -EINVAL; 2345 goto error_out; 2346 } 2347 2348 vf = &pf->vf[vf_id]; 2349 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2350 2351 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2352 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2353 2354 switch (link) { 2355 case IFLA_VF_LINK_STATE_AUTO: 2356 vf->link_forced = false; 2357 pfe.event_data.link_event.link_status = 2358 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 2359 pfe.event_data.link_event.link_speed = 2360 pf->hw.phy.link_info.link_speed; 2361 break; 2362 case IFLA_VF_LINK_STATE_ENABLE: 2363 vf->link_forced = true; 2364 vf->link_up = true; 2365 pfe.event_data.link_event.link_status = true; 2366 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 2367 break; 2368 case IFLA_VF_LINK_STATE_DISABLE: 2369 vf->link_forced = true; 2370 vf->link_up = false; 2371 pfe.event_data.link_event.link_status = false; 2372 pfe.event_data.link_event.link_speed = 0; 2373 break; 2374 default: 2375 ret = -EINVAL; 2376 goto error_out; 2377 } 2378 /* Notify the VF of its new link state */ 2379 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2380 0, (u8 *)&pfe, sizeof(pfe), NULL); 2381 2382 error_out: 2383 return ret; 2384 } 2385 2386 /** 2387 * i40e_ndo_set_vf_spoofchk 2388 * @netdev: network interface device structure 2389 * @vf_id: vf identifier 2390 * @enable: flag to enable or disable feature 2391 * 2392 * Enable or disable VF spoof checking 2393 **/ 2394 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 2395 { 2396 struct i40e_netdev_priv *np = netdev_priv(netdev); 2397 struct i40e_vsi *vsi = np->vsi; 2398 struct i40e_pf *pf = vsi->back; 2399 struct i40e_vsi_context ctxt; 2400 struct i40e_hw *hw = &pf->hw; 2401 struct i40e_vf *vf; 2402 int ret = 0; 2403 2404 /* validate the request */ 2405 if (vf_id >= pf->num_alloc_vfs) { 2406 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2407 ret = -EINVAL; 2408 goto out; 2409 } 2410 2411 vf = &(pf->vf[vf_id]); 2412 2413 if (enable == vf->spoofchk) 2414 goto out; 2415 2416 vf->spoofchk = enable; 2417 memset(&ctxt, 0, sizeof(ctxt)); 2418 ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid; 2419 ctxt.pf_num = pf->hw.pf_id; 2420 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 2421 if (enable) 2422 ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; 2423 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 2424 if (ret) { 2425 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 2426 ret); 2427 ret = -EIO; 2428 } 2429 out: 2430 return ret; 2431 } 2432