1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /***********************misc routines*****************************/ 30 31 /** 32 * i40e_vc_isvalid_vsi_id 33 * @vf: pointer to the vf info 34 * @vsi_id: vf relative vsi id 35 * 36 * check for the valid vsi id 37 **/ 38 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) 39 { 40 struct i40e_pf *pf = vf->pf; 41 42 return pf->vsi[vsi_id]->vf_id == vf->vf_id; 43 } 44 45 /** 46 * i40e_vc_isvalid_queue_id 47 * @vf: pointer to the vf info 48 * @vsi_id: vsi id 49 * @qid: vsi relative queue id 50 * 51 * check for the valid queue id 52 **/ 53 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, 54 u8 qid) 55 { 56 struct i40e_pf *pf = vf->pf; 57 58 return qid < pf->vsi[vsi_id]->num_queue_pairs; 59 } 60 61 /** 62 * i40e_vc_isvalid_vector_id 63 * @vf: pointer to the vf info 64 * @vector_id: vf relative vector id 65 * 66 * check for the valid vector id 67 **/ 68 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 69 { 70 struct i40e_pf *pf = vf->pf; 71 72 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 73 } 74 75 /***********************vf resource mgmt routines*****************/ 76 77 /** 78 * i40e_vc_get_pf_queue_id 79 * @vf: pointer to the vf info 80 * @vsi_idx: index of VSI in PF struct 81 * @vsi_queue_id: vsi relative queue id 82 * 83 * return pf relative queue id 84 **/ 85 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, 86 u8 vsi_queue_id) 87 { 88 struct i40e_pf *pf = vf->pf; 89 struct i40e_vsi *vsi = pf->vsi[vsi_idx]; 90 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 91 92 if (le16_to_cpu(vsi->info.mapping_flags) & 93 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 94 pf_queue_id = 95 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 96 else 97 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 98 vsi_queue_id; 99 100 return pf_queue_id; 101 } 102 103 /** 104 * i40e_config_irq_link_list 105 * @vf: pointer to the vf info 106 * @vsi_idx: index of VSI in PF struct 107 * @vecmap: irq map info 108 * 109 * configure irq link list from the map 110 **/ 111 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, 112 struct i40e_virtchnl_vector_map *vecmap) 113 { 114 unsigned long linklistmap = 0, tempmap; 115 struct i40e_pf *pf = vf->pf; 116 struct i40e_hw *hw = &pf->hw; 117 u16 vsi_queue_id, pf_queue_id; 118 enum i40e_queue_type qtype; 119 u16 next_q, vector_id; 120 u32 reg, reg_idx; 121 u16 itr_idx = 0; 122 123 vector_id = vecmap->vector_id; 124 /* setup the head */ 125 if (0 == vector_id) 126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 127 else 128 reg_idx = I40E_VPINT_LNKLSTN( 129 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 130 (vector_id - 1)); 131 132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 133 /* Special case - No queues mapped on this vector */ 134 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 135 goto irq_list_done; 136 } 137 tempmap = vecmap->rxq_map; 138 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 139 linklistmap |= (1 << 140 (I40E_VIRTCHNL_SUPPORTED_QTYPES * 141 vsi_queue_id)); 142 } 143 144 tempmap = vecmap->txq_map; 145 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 146 linklistmap |= (1 << 147 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id 148 + 1)); 149 } 150 151 next_q = find_first_bit(&linklistmap, 152 (I40E_MAX_VSI_QP * 153 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 154 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 155 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 156 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 157 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 158 159 wr32(hw, reg_idx, reg); 160 161 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 162 switch (qtype) { 163 case I40E_QUEUE_TYPE_RX: 164 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 165 itr_idx = vecmap->rxitr_idx; 166 break; 167 case I40E_QUEUE_TYPE_TX: 168 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 169 itr_idx = vecmap->txitr_idx; 170 break; 171 default: 172 break; 173 } 174 175 next_q = find_next_bit(&linklistmap, 176 (I40E_MAX_VSI_QP * 177 I40E_VIRTCHNL_SUPPORTED_QTYPES), 178 next_q + 1); 179 if (next_q < 180 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 181 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 182 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 183 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, 184 vsi_queue_id); 185 } else { 186 pf_queue_id = I40E_QUEUE_END_OF_LIST; 187 qtype = 0; 188 } 189 190 /* format for the RQCTL & TQCTL regs is same */ 191 reg = (vector_id) | 192 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 193 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 194 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 195 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 196 wr32(hw, reg_idx, reg); 197 } 198 199 irq_list_done: 200 i40e_flush(hw); 201 } 202 203 /** 204 * i40e_config_vsi_tx_queue 205 * @vf: pointer to the vf info 206 * @vsi_idx: index of VSI in PF struct 207 * @vsi_queue_id: vsi relative queue index 208 * @info: config. info 209 * 210 * configure tx queue 211 **/ 212 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, 213 u16 vsi_queue_id, 214 struct i40e_virtchnl_txq_info *info) 215 { 216 struct i40e_pf *pf = vf->pf; 217 struct i40e_hw *hw = &pf->hw; 218 struct i40e_hmc_obj_txq tx_ctx; 219 u16 pf_queue_id; 220 u32 qtx_ctl; 221 int ret = 0; 222 223 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 224 225 /* clear the context structure first */ 226 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 227 228 /* only set the required fields */ 229 tx_ctx.base = info->dma_ring_addr / 128; 230 tx_ctx.qlen = info->ring_len; 231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 232 tx_ctx.rdylist_act = 0; 233 tx_ctx.head_wb_ena = 1; 234 tx_ctx.head_wb_addr = info->dma_ring_addr + 235 (info->ring_len * sizeof(struct i40e_tx_desc)); 236 237 /* clear the context in the HMC */ 238 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 239 if (ret) { 240 dev_err(&pf->pdev->dev, 241 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 242 pf_queue_id, ret); 243 ret = -ENOENT; 244 goto error_context; 245 } 246 247 /* set the context in the HMC */ 248 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 249 if (ret) { 250 dev_err(&pf->pdev->dev, 251 "Failed to set VF LAN Tx queue context %d error: %d\n", 252 pf_queue_id, ret); 253 ret = -ENOENT; 254 goto error_context; 255 } 256 257 /* associate this queue with the PCI VF function */ 258 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 259 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 260 & I40E_QTX_CTL_PF_INDX_MASK); 261 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 262 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 263 & I40E_QTX_CTL_VFVM_INDX_MASK); 264 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 265 i40e_flush(hw); 266 267 error_context: 268 return ret; 269 } 270 271 /** 272 * i40e_config_vsi_rx_queue 273 * @vf: pointer to the vf info 274 * @vsi_idx: index of VSI in PF struct 275 * @vsi_queue_id: vsi relative queue index 276 * @info: config. info 277 * 278 * configure rx queue 279 **/ 280 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, 281 u16 vsi_queue_id, 282 struct i40e_virtchnl_rxq_info *info) 283 { 284 struct i40e_pf *pf = vf->pf; 285 struct i40e_hw *hw = &pf->hw; 286 struct i40e_hmc_obj_rxq rx_ctx; 287 u16 pf_queue_id; 288 int ret = 0; 289 290 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 291 292 /* clear the context structure first */ 293 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 294 295 /* only set the required fields */ 296 rx_ctx.base = info->dma_ring_addr / 128; 297 rx_ctx.qlen = info->ring_len; 298 299 if (info->splithdr_enabled) { 300 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 301 I40E_RX_SPLIT_IP | 302 I40E_RX_SPLIT_TCP_UDP | 303 I40E_RX_SPLIT_SCTP; 304 /* header length validation */ 305 if (info->hdr_size > ((2 * 1024) - 64)) { 306 ret = -EINVAL; 307 goto error_param; 308 } 309 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 310 311 /* set splitalways mode 10b */ 312 rx_ctx.dtype = 0x2; 313 } 314 315 /* databuffer length validation */ 316 if (info->databuffer_size > ((16 * 1024) - 128)) { 317 ret = -EINVAL; 318 goto error_param; 319 } 320 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 321 322 /* max pkt. length validation */ 323 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 324 ret = -EINVAL; 325 goto error_param; 326 } 327 rx_ctx.rxmax = info->max_pkt_size; 328 329 /* enable 32bytes desc always */ 330 rx_ctx.dsize = 1; 331 332 /* default values */ 333 rx_ctx.tphrdesc_ena = 1; 334 rx_ctx.tphwdesc_ena = 1; 335 rx_ctx.tphdata_ena = 1; 336 rx_ctx.tphhead_ena = 1; 337 rx_ctx.lrxqthresh = 2; 338 rx_ctx.crcstrip = 1; 339 340 /* clear the context in the HMC */ 341 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 342 if (ret) { 343 dev_err(&pf->pdev->dev, 344 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 345 pf_queue_id, ret); 346 ret = -ENOENT; 347 goto error_param; 348 } 349 350 /* set the context in the HMC */ 351 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 352 if (ret) { 353 dev_err(&pf->pdev->dev, 354 "Failed to set VF LAN Rx queue context %d error: %d\n", 355 pf_queue_id, ret); 356 ret = -ENOENT; 357 goto error_param; 358 } 359 360 error_param: 361 return ret; 362 } 363 364 /** 365 * i40e_alloc_vsi_res 366 * @vf: pointer to the vf info 367 * @type: type of VSI to allocate 368 * 369 * alloc vf vsi context & resources 370 **/ 371 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 372 { 373 struct i40e_mac_filter *f = NULL; 374 struct i40e_pf *pf = vf->pf; 375 struct i40e_vsi *vsi; 376 int ret = 0; 377 378 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 379 380 if (!vsi) { 381 dev_err(&pf->pdev->dev, 382 "add vsi failed for vf %d, aq_err %d\n", 383 vf->vf_id, pf->hw.aq.asq_last_status); 384 ret = -ENOENT; 385 goto error_alloc_vsi_res; 386 } 387 if (type == I40E_VSI_SRIOV) { 388 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 389 vf->lan_vsi_index = vsi->idx; 390 vf->lan_vsi_id = vsi->id; 391 dev_info(&pf->pdev->dev, 392 "VF %d assigned LAN VSI index %d, VSI id %d\n", 393 vf->vf_id, vsi->idx, vsi->id); 394 /* If the port VLAN has been configured and then the 395 * VF driver was removed then the VSI port VLAN 396 * configuration was destroyed. Check if there is 397 * a port VLAN and restore the VSI configuration if 398 * needed. 399 */ 400 if (vf->port_vlan_id) 401 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 402 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 403 vf->port_vlan_id, true, false); 404 if (!f) 405 dev_info(&pf->pdev->dev, 406 "Could not allocate VF MAC addr\n"); 407 f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, 408 true, false); 409 if (!f) 410 dev_info(&pf->pdev->dev, 411 "Could not allocate VF broadcast filter\n"); 412 } 413 414 /* program mac filter */ 415 ret = i40e_sync_vsi_filters(vsi); 416 if (ret) 417 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 418 419 error_alloc_vsi_res: 420 return ret; 421 } 422 423 /** 424 * i40e_enable_vf_mappings 425 * @vf: pointer to the vf info 426 * 427 * enable vf mappings 428 **/ 429 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 430 { 431 struct i40e_pf *pf = vf->pf; 432 struct i40e_hw *hw = &pf->hw; 433 u32 reg, total_queue_pairs = 0; 434 int j; 435 436 /* Tell the hardware we're using noncontiguous mapping. HW requires 437 * that VF queues be mapped using this method, even when they are 438 * contiguous in real life 439 */ 440 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 441 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 442 443 /* enable VF vplan_qtable mappings */ 444 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 445 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 446 447 /* map PF queues to VF queues */ 448 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { 449 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 450 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 451 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 452 total_queue_pairs++; 453 } 454 455 /* map PF queues to VSI */ 456 for (j = 0; j < 7; j++) { 457 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) { 458 reg = 0x07FF07FF; /* unused */ 459 } else { 460 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 461 j * 2); 462 reg = qid; 463 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 464 (j * 2) + 1); 465 reg |= qid << 16; 466 } 467 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 468 } 469 470 i40e_flush(hw); 471 } 472 473 /** 474 * i40e_disable_vf_mappings 475 * @vf: pointer to the vf info 476 * 477 * disable vf mappings 478 **/ 479 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 480 { 481 struct i40e_pf *pf = vf->pf; 482 struct i40e_hw *hw = &pf->hw; 483 int i; 484 485 /* disable qp mappings */ 486 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 487 for (i = 0; i < I40E_MAX_VSI_QP; i++) 488 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 489 I40E_QUEUE_END_OF_LIST); 490 i40e_flush(hw); 491 } 492 493 /** 494 * i40e_free_vf_res 495 * @vf: pointer to the vf info 496 * 497 * free vf resources 498 **/ 499 static void i40e_free_vf_res(struct i40e_vf *vf) 500 { 501 struct i40e_pf *pf = vf->pf; 502 struct i40e_hw *hw = &pf->hw; 503 u32 reg_idx, reg; 504 int i, msix_vf; 505 506 /* free vsi & disconnect it from the parent uplink */ 507 if (vf->lan_vsi_index) { 508 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); 509 vf->lan_vsi_index = 0; 510 vf->lan_vsi_id = 0; 511 } 512 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 513 514 /* disable interrupts so the VF starts in a known state */ 515 for (i = 0; i < msix_vf; i++) { 516 /* format is same for both registers */ 517 if (0 == i) 518 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 519 else 520 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 521 (vf->vf_id)) 522 + (i - 1)); 523 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 524 i40e_flush(hw); 525 } 526 527 /* clear the irq settings */ 528 for (i = 0; i < msix_vf; i++) { 529 /* format is same for both registers */ 530 if (0 == i) 531 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 532 else 533 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 534 (vf->vf_id)) 535 + (i - 1)); 536 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 537 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 538 wr32(hw, reg_idx, reg); 539 i40e_flush(hw); 540 } 541 /* reset some of the state varibles keeping 542 * track of the resources 543 */ 544 vf->num_queue_pairs = 0; 545 vf->vf_states = 0; 546 } 547 548 /** 549 * i40e_alloc_vf_res 550 * @vf: pointer to the vf info 551 * 552 * allocate vf resources 553 **/ 554 static int i40e_alloc_vf_res(struct i40e_vf *vf) 555 { 556 struct i40e_pf *pf = vf->pf; 557 int total_queue_pairs = 0; 558 int ret; 559 560 /* allocate hw vsi context & associated resources */ 561 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 562 if (ret) 563 goto error_alloc; 564 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 565 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 566 567 /* store the total qps number for the runtime 568 * vf req validation 569 */ 570 vf->num_queue_pairs = total_queue_pairs; 571 572 /* vf is now completely initialized */ 573 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 574 575 error_alloc: 576 if (ret) 577 i40e_free_vf_res(vf); 578 579 return ret; 580 } 581 582 #define VF_DEVICE_STATUS 0xAA 583 #define VF_TRANS_PENDING_MASK 0x20 584 /** 585 * i40e_quiesce_vf_pci 586 * @vf: pointer to the vf structure 587 * 588 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 589 * if the transactions never clear. 590 **/ 591 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 592 { 593 struct i40e_pf *pf = vf->pf; 594 struct i40e_hw *hw = &pf->hw; 595 int vf_abs_id, i; 596 u32 reg; 597 598 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 599 600 wr32(hw, I40E_PF_PCI_CIAA, 601 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 602 for (i = 0; i < 100; i++) { 603 reg = rd32(hw, I40E_PF_PCI_CIAD); 604 if ((reg & VF_TRANS_PENDING_MASK) == 0) 605 return 0; 606 udelay(1); 607 } 608 return -EIO; 609 } 610 611 /** 612 * i40e_reset_vf 613 * @vf: pointer to the vf structure 614 * @flr: VFLR was issued or not 615 * 616 * reset the vf 617 **/ 618 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 619 { 620 struct i40e_pf *pf = vf->pf; 621 struct i40e_hw *hw = &pf->hw; 622 bool rsd = false; 623 int i; 624 u32 reg; 625 626 /* warn the VF */ 627 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 628 629 /* In the case of a VFLR, the HW has already reset the VF and we 630 * just need to clean up, so don't hit the VFRTRIG register. 631 */ 632 if (!flr) { 633 /* reset vf using VPGEN_VFRTRIG reg */ 634 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 635 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 636 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 637 i40e_flush(hw); 638 } 639 640 if (i40e_quiesce_vf_pci(vf)) 641 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 642 vf->vf_id); 643 644 /* poll VPGEN_VFRSTAT reg to make sure 645 * that reset is complete 646 */ 647 for (i = 0; i < 100; i++) { 648 /* vf reset requires driver to first reset the 649 * vf & than poll the status register to make sure 650 * that the requested op was completed 651 * successfully 652 */ 653 udelay(10); 654 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 655 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 656 rsd = true; 657 break; 658 } 659 } 660 661 if (!rsd) 662 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 663 vf->vf_id); 664 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 665 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 666 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 667 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 668 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 669 670 /* On initial reset, we won't have any queues */ 671 if (vf->lan_vsi_index == 0) 672 goto complete_reset; 673 674 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); 675 complete_reset: 676 /* reallocate vf resources to reset the VSI state */ 677 i40e_free_vf_res(vf); 678 i40e_alloc_vf_res(vf); 679 i40e_enable_vf_mappings(vf); 680 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 681 682 /* tell the VF the reset is done */ 683 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 684 i40e_flush(hw); 685 } 686 687 /** 688 * i40e_vfs_are_assigned 689 * @pf: pointer to the pf structure 690 * 691 * Determine if any VFs are assigned to VMs 692 **/ 693 static bool i40e_vfs_are_assigned(struct i40e_pf *pf) 694 { 695 struct pci_dev *pdev = pf->pdev; 696 struct pci_dev *vfdev; 697 698 /* loop through all the VFs to see if we own any that are assigned */ 699 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL); 700 while (vfdev) { 701 /* if we don't own it we don't care */ 702 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { 703 /* if it is assigned we cannot release it */ 704 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 705 return true; 706 } 707 708 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, 709 I40E_DEV_ID_VF, 710 vfdev); 711 } 712 713 return false; 714 } 715 #ifdef CONFIG_PCI_IOV 716 717 /** 718 * i40e_enable_pf_switch_lb 719 * @pf: pointer to the pf structure 720 * 721 * enable switch loop back or die - no point in a return value 722 **/ 723 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 724 { 725 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 726 struct i40e_vsi_context ctxt; 727 int aq_ret; 728 729 ctxt.seid = pf->main_vsi_seid; 730 ctxt.pf_num = pf->hw.pf_id; 731 ctxt.vf_num = 0; 732 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 733 if (aq_ret) { 734 dev_info(&pf->pdev->dev, 735 "%s couldn't get pf vsi config, err %d, aq_err %d\n", 736 __func__, aq_ret, pf->hw.aq.asq_last_status); 737 return; 738 } 739 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 740 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 741 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 742 743 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 744 if (aq_ret) { 745 dev_info(&pf->pdev->dev, 746 "%s: update vsi switch failed, aq_err=%d\n", 747 __func__, vsi->back->hw.aq.asq_last_status); 748 } 749 } 750 #endif 751 752 /** 753 * i40e_disable_pf_switch_lb 754 * @pf: pointer to the pf structure 755 * 756 * disable switch loop back or die - no point in a return value 757 **/ 758 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 759 { 760 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 761 struct i40e_vsi_context ctxt; 762 int aq_ret; 763 764 ctxt.seid = pf->main_vsi_seid; 765 ctxt.pf_num = pf->hw.pf_id; 766 ctxt.vf_num = 0; 767 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 768 if (aq_ret) { 769 dev_info(&pf->pdev->dev, 770 "%s couldn't get pf vsi config, err %d, aq_err %d\n", 771 __func__, aq_ret, pf->hw.aq.asq_last_status); 772 return; 773 } 774 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 775 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 776 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 777 778 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 779 if (aq_ret) { 780 dev_info(&pf->pdev->dev, 781 "%s: update vsi switch failed, aq_err=%d\n", 782 __func__, vsi->back->hw.aq.asq_last_status); 783 } 784 } 785 786 /** 787 * i40e_free_vfs 788 * @pf: pointer to the pf structure 789 * 790 * free vf resources 791 **/ 792 void i40e_free_vfs(struct i40e_pf *pf) 793 { 794 struct i40e_hw *hw = &pf->hw; 795 u32 reg_idx, bit_idx; 796 int i, tmp, vf_id; 797 798 if (!pf->vf) 799 return; 800 801 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 802 i40e_irq_dynamic_disable_icr0(pf); 803 804 mdelay(10); /* let any messages in transit get finished up */ 805 /* free up vf resources */ 806 tmp = pf->num_alloc_vfs; 807 pf->num_alloc_vfs = 0; 808 for (i = 0; i < tmp; i++) { 809 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 810 i40e_free_vf_res(&pf->vf[i]); 811 /* disable qp mappings */ 812 i40e_disable_vf_mappings(&pf->vf[i]); 813 } 814 815 kfree(pf->vf); 816 pf->vf = NULL; 817 818 if (!i40e_vfs_are_assigned(pf)) { 819 pci_disable_sriov(pf->pdev); 820 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 821 * work correctly when SR-IOV gets re-enabled. 822 */ 823 for (vf_id = 0; vf_id < tmp; vf_id++) { 824 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 825 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 826 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 827 } 828 i40e_disable_pf_switch_lb(pf); 829 } else { 830 dev_warn(&pf->pdev->dev, 831 "unable to disable SR-IOV because VFs are assigned.\n"); 832 } 833 834 /* Re-enable interrupt 0. */ 835 i40e_irq_dynamic_enable_icr0(pf); 836 } 837 838 #ifdef CONFIG_PCI_IOV 839 /** 840 * i40e_alloc_vfs 841 * @pf: pointer to the pf structure 842 * @num_alloc_vfs: number of vfs to allocate 843 * 844 * allocate vf resources 845 **/ 846 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 847 { 848 struct i40e_vf *vfs; 849 int i, ret = 0; 850 851 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 852 i40e_irq_dynamic_disable_icr0(pf); 853 854 /* Check to see if we're just allocating resources for extant VFs */ 855 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 856 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 857 if (ret) { 858 dev_err(&pf->pdev->dev, 859 "Failed to enable SR-IOV, error %d.\n", ret); 860 pf->num_alloc_vfs = 0; 861 goto err_iov; 862 } 863 } 864 /* allocate memory */ 865 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 866 if (!vfs) { 867 ret = -ENOMEM; 868 goto err_alloc; 869 } 870 871 /* apply default profile */ 872 for (i = 0; i < num_alloc_vfs; i++) { 873 vfs[i].pf = pf; 874 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 875 vfs[i].vf_id = i; 876 877 /* assign default capabilities */ 878 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 879 /* vf resources get allocated during reset */ 880 i40e_reset_vf(&vfs[i], false); 881 882 /* enable vf vplan_qtable mappings */ 883 i40e_enable_vf_mappings(&vfs[i]); 884 } 885 pf->vf = vfs; 886 pf->num_alloc_vfs = num_alloc_vfs; 887 888 i40e_enable_pf_switch_lb(pf); 889 err_alloc: 890 if (ret) 891 i40e_free_vfs(pf); 892 err_iov: 893 /* Re-enable interrupt 0. */ 894 i40e_irq_dynamic_enable_icr0(pf); 895 return ret; 896 } 897 898 #endif 899 /** 900 * i40e_pci_sriov_enable 901 * @pdev: pointer to a pci_dev structure 902 * @num_vfs: number of vfs to allocate 903 * 904 * Enable or change the number of VFs 905 **/ 906 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 907 { 908 #ifdef CONFIG_PCI_IOV 909 struct i40e_pf *pf = pci_get_drvdata(pdev); 910 int pre_existing_vfs = pci_num_vf(pdev); 911 int err = 0; 912 913 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 914 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 915 i40e_free_vfs(pf); 916 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 917 goto out; 918 919 if (num_vfs > pf->num_req_vfs) { 920 err = -EPERM; 921 goto err_out; 922 } 923 924 err = i40e_alloc_vfs(pf, num_vfs); 925 if (err) { 926 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 927 goto err_out; 928 } 929 930 out: 931 return num_vfs; 932 933 err_out: 934 return err; 935 #endif 936 return 0; 937 } 938 939 /** 940 * i40e_pci_sriov_configure 941 * @pdev: pointer to a pci_dev structure 942 * @num_vfs: number of vfs to allocate 943 * 944 * Enable or change the number of VFs. Called when the user updates the number 945 * of VFs in sysfs. 946 **/ 947 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 948 { 949 struct i40e_pf *pf = pci_get_drvdata(pdev); 950 951 if (num_vfs) 952 return i40e_pci_sriov_enable(pdev, num_vfs); 953 954 i40e_free_vfs(pf); 955 return 0; 956 } 957 958 /***********************virtual channel routines******************/ 959 960 /** 961 * i40e_vc_send_msg_to_vf 962 * @vf: pointer to the vf info 963 * @v_opcode: virtual channel opcode 964 * @v_retval: virtual channel return value 965 * @msg: pointer to the msg buffer 966 * @msglen: msg length 967 * 968 * send msg to vf 969 **/ 970 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 971 u32 v_retval, u8 *msg, u16 msglen) 972 { 973 struct i40e_pf *pf = vf->pf; 974 struct i40e_hw *hw = &pf->hw; 975 int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 976 i40e_status aq_ret; 977 978 /* single place to detect unsuccessful return values */ 979 if (v_retval) { 980 vf->num_invalid_msgs++; 981 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", 982 v_opcode, v_retval); 983 if (vf->num_invalid_msgs > 984 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 985 dev_err(&pf->pdev->dev, 986 "Number of invalid messages exceeded for VF %d\n", 987 vf->vf_id); 988 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 989 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 990 } 991 } else { 992 vf->num_valid_msgs++; 993 } 994 995 aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval, 996 msg, msglen, NULL); 997 if (aq_ret) { 998 dev_err(&pf->pdev->dev, 999 "Unable to send the message to VF %d aq_err %d\n", 1000 vf->vf_id, pf->hw.aq.asq_last_status); 1001 return -EIO; 1002 } 1003 1004 return 0; 1005 } 1006 1007 /** 1008 * i40e_vc_send_resp_to_vf 1009 * @vf: pointer to the vf info 1010 * @opcode: operation code 1011 * @retval: return value 1012 * 1013 * send resp msg to vf 1014 **/ 1015 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1016 enum i40e_virtchnl_ops opcode, 1017 i40e_status retval) 1018 { 1019 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1020 } 1021 1022 /** 1023 * i40e_vc_get_version_msg 1024 * @vf: pointer to the vf info 1025 * 1026 * called from the vf to request the API version used by the PF 1027 **/ 1028 static int i40e_vc_get_version_msg(struct i40e_vf *vf) 1029 { 1030 struct i40e_virtchnl_version_info info = { 1031 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 1032 }; 1033 1034 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 1035 I40E_SUCCESS, (u8 *)&info, 1036 sizeof(struct 1037 i40e_virtchnl_version_info)); 1038 } 1039 1040 /** 1041 * i40e_vc_get_vf_resources_msg 1042 * @vf: pointer to the vf info 1043 * @msg: pointer to the msg buffer 1044 * @msglen: msg length 1045 * 1046 * called from the vf to request its resources 1047 **/ 1048 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) 1049 { 1050 struct i40e_virtchnl_vf_resource *vfres = NULL; 1051 struct i40e_pf *pf = vf->pf; 1052 i40e_status aq_ret = 0; 1053 struct i40e_vsi *vsi; 1054 int i = 0, len = 0; 1055 int num_vsis = 1; 1056 int ret; 1057 1058 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1059 aq_ret = I40E_ERR_PARAM; 1060 goto err; 1061 } 1062 1063 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1064 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1065 1066 vfres = kzalloc(len, GFP_KERNEL); 1067 if (!vfres) { 1068 aq_ret = I40E_ERR_NO_MEMORY; 1069 len = 0; 1070 goto err; 1071 } 1072 1073 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1074 vsi = pf->vsi[vf->lan_vsi_index]; 1075 if (!vsi->info.pvid) 1076 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1077 1078 vfres->num_vsis = num_vsis; 1079 vfres->num_queue_pairs = vf->num_queue_pairs; 1080 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1081 if (vf->lan_vsi_index) { 1082 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; 1083 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1084 vfres->vsi_res[i].num_queue_pairs = 1085 pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 1086 memcpy(vfres->vsi_res[i].default_mac_addr, 1087 vf->default_lan_addr.addr, ETH_ALEN); 1088 i++; 1089 } 1090 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1091 1092 err: 1093 /* send the response back to the vf */ 1094 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1095 aq_ret, (u8 *)vfres, len); 1096 1097 kfree(vfres); 1098 return ret; 1099 } 1100 1101 /** 1102 * i40e_vc_reset_vf_msg 1103 * @vf: pointer to the vf info 1104 * @msg: pointer to the msg buffer 1105 * @msglen: msg length 1106 * 1107 * called from the vf to reset itself, 1108 * unlike other virtchnl messages, pf driver 1109 * doesn't send the response back to the vf 1110 **/ 1111 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1112 { 1113 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1114 i40e_reset_vf(vf, false); 1115 } 1116 1117 /** 1118 * i40e_vc_config_promiscuous_mode_msg 1119 * @vf: pointer to the vf info 1120 * @msg: pointer to the msg buffer 1121 * @msglen: msg length 1122 * 1123 * called from the vf to configure the promiscuous mode of 1124 * vf vsis 1125 **/ 1126 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1127 u8 *msg, u16 msglen) 1128 { 1129 struct i40e_virtchnl_promisc_info *info = 1130 (struct i40e_virtchnl_promisc_info *)msg; 1131 struct i40e_pf *pf = vf->pf; 1132 struct i40e_hw *hw = &pf->hw; 1133 bool allmulti = false; 1134 bool promisc = false; 1135 i40e_status aq_ret; 1136 1137 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1138 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1139 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1140 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { 1141 aq_ret = I40E_ERR_PARAM; 1142 goto error_param; 1143 } 1144 1145 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) 1146 promisc = true; 1147 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id, 1148 promisc, NULL); 1149 if (aq_ret) 1150 goto error_param; 1151 1152 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1153 allmulti = true; 1154 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id, 1155 allmulti, NULL); 1156 1157 error_param: 1158 /* send the response to the vf */ 1159 return i40e_vc_send_resp_to_vf(vf, 1160 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1161 aq_ret); 1162 } 1163 1164 /** 1165 * i40e_vc_config_queues_msg 1166 * @vf: pointer to the vf info 1167 * @msg: pointer to the msg buffer 1168 * @msglen: msg length 1169 * 1170 * called from the vf to configure the rx/tx 1171 * queues 1172 **/ 1173 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1174 { 1175 struct i40e_virtchnl_vsi_queue_config_info *qci = 1176 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1177 struct i40e_virtchnl_queue_pair_info *qpi; 1178 u16 vsi_id, vsi_queue_id; 1179 i40e_status aq_ret = 0; 1180 int i; 1181 1182 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1183 aq_ret = I40E_ERR_PARAM; 1184 goto error_param; 1185 } 1186 1187 vsi_id = qci->vsi_id; 1188 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1189 aq_ret = I40E_ERR_PARAM; 1190 goto error_param; 1191 } 1192 for (i = 0; i < qci->num_queue_pairs; i++) { 1193 qpi = &qci->qpair[i]; 1194 vsi_queue_id = qpi->txq.queue_id; 1195 if ((qpi->txq.vsi_id != vsi_id) || 1196 (qpi->rxq.vsi_id != vsi_id) || 1197 (qpi->rxq.queue_id != vsi_queue_id) || 1198 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1199 aq_ret = I40E_ERR_PARAM; 1200 goto error_param; 1201 } 1202 1203 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1204 &qpi->rxq) || 1205 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1206 &qpi->txq)) { 1207 aq_ret = I40E_ERR_PARAM; 1208 goto error_param; 1209 } 1210 } 1211 1212 error_param: 1213 /* send the response to the vf */ 1214 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1215 aq_ret); 1216 } 1217 1218 /** 1219 * i40e_vc_config_irq_map_msg 1220 * @vf: pointer to the vf info 1221 * @msg: pointer to the msg buffer 1222 * @msglen: msg length 1223 * 1224 * called from the vf to configure the irq to 1225 * queue map 1226 **/ 1227 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1228 { 1229 struct i40e_virtchnl_irq_map_info *irqmap_info = 1230 (struct i40e_virtchnl_irq_map_info *)msg; 1231 struct i40e_virtchnl_vector_map *map; 1232 u16 vsi_id, vsi_queue_id, vector_id; 1233 i40e_status aq_ret = 0; 1234 unsigned long tempmap; 1235 int i; 1236 1237 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1238 aq_ret = I40E_ERR_PARAM; 1239 goto error_param; 1240 } 1241 1242 for (i = 0; i < irqmap_info->num_vectors; i++) { 1243 map = &irqmap_info->vecmap[i]; 1244 1245 vector_id = map->vector_id; 1246 vsi_id = map->vsi_id; 1247 /* validate msg params */ 1248 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1249 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1250 aq_ret = I40E_ERR_PARAM; 1251 goto error_param; 1252 } 1253 1254 /* lookout for the invalid queue index */ 1255 tempmap = map->rxq_map; 1256 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1257 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1258 vsi_queue_id)) { 1259 aq_ret = I40E_ERR_PARAM; 1260 goto error_param; 1261 } 1262 } 1263 1264 tempmap = map->txq_map; 1265 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1266 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1267 vsi_queue_id)) { 1268 aq_ret = I40E_ERR_PARAM; 1269 goto error_param; 1270 } 1271 } 1272 1273 i40e_config_irq_link_list(vf, vsi_id, map); 1274 } 1275 error_param: 1276 /* send the response to the vf */ 1277 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1278 aq_ret); 1279 } 1280 1281 /** 1282 * i40e_vc_enable_queues_msg 1283 * @vf: pointer to the vf info 1284 * @msg: pointer to the msg buffer 1285 * @msglen: msg length 1286 * 1287 * called from the vf to enable all or specific queue(s) 1288 **/ 1289 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1290 { 1291 struct i40e_virtchnl_queue_select *vqs = 1292 (struct i40e_virtchnl_queue_select *)msg; 1293 struct i40e_pf *pf = vf->pf; 1294 u16 vsi_id = vqs->vsi_id; 1295 i40e_status aq_ret = 0; 1296 1297 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1298 aq_ret = I40E_ERR_PARAM; 1299 goto error_param; 1300 } 1301 1302 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1303 aq_ret = I40E_ERR_PARAM; 1304 goto error_param; 1305 } 1306 1307 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1308 aq_ret = I40E_ERR_PARAM; 1309 goto error_param; 1310 } 1311 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) 1312 aq_ret = I40E_ERR_TIMEOUT; 1313 error_param: 1314 /* send the response to the vf */ 1315 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1316 aq_ret); 1317 } 1318 1319 /** 1320 * i40e_vc_disable_queues_msg 1321 * @vf: pointer to the vf info 1322 * @msg: pointer to the msg buffer 1323 * @msglen: msg length 1324 * 1325 * called from the vf to disable all or specific 1326 * queue(s) 1327 **/ 1328 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1329 { 1330 struct i40e_virtchnl_queue_select *vqs = 1331 (struct i40e_virtchnl_queue_select *)msg; 1332 struct i40e_pf *pf = vf->pf; 1333 u16 vsi_id = vqs->vsi_id; 1334 i40e_status aq_ret = 0; 1335 1336 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1337 aq_ret = I40E_ERR_PARAM; 1338 goto error_param; 1339 } 1340 1341 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1342 aq_ret = I40E_ERR_PARAM; 1343 goto error_param; 1344 } 1345 1346 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1347 aq_ret = I40E_ERR_PARAM; 1348 goto error_param; 1349 } 1350 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) 1351 aq_ret = I40E_ERR_TIMEOUT; 1352 1353 error_param: 1354 /* send the response to the vf */ 1355 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1356 aq_ret); 1357 } 1358 1359 /** 1360 * i40e_vc_get_stats_msg 1361 * @vf: pointer to the vf info 1362 * @msg: pointer to the msg buffer 1363 * @msglen: msg length 1364 * 1365 * called from the vf to get vsi stats 1366 **/ 1367 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1368 { 1369 struct i40e_virtchnl_queue_select *vqs = 1370 (struct i40e_virtchnl_queue_select *)msg; 1371 struct i40e_pf *pf = vf->pf; 1372 struct i40e_eth_stats stats; 1373 i40e_status aq_ret = 0; 1374 struct i40e_vsi *vsi; 1375 1376 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1377 1378 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1379 aq_ret = I40E_ERR_PARAM; 1380 goto error_param; 1381 } 1382 1383 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1384 aq_ret = I40E_ERR_PARAM; 1385 goto error_param; 1386 } 1387 1388 vsi = pf->vsi[vqs->vsi_id]; 1389 if (!vsi) { 1390 aq_ret = I40E_ERR_PARAM; 1391 goto error_param; 1392 } 1393 i40e_update_eth_stats(vsi); 1394 stats = vsi->eth_stats; 1395 1396 error_param: 1397 /* send the response back to the vf */ 1398 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1399 (u8 *)&stats, sizeof(stats)); 1400 } 1401 1402 /** 1403 * i40e_check_vf_permission 1404 * @vf: pointer to the vf info 1405 * @macaddr: pointer to the MAC Address being checked 1406 * 1407 * Check if the VF has permission to add or delete unicast MAC address 1408 * filters and return error code -EPERM if not. Then check if the 1409 * address filter requested is broadcast or zero and if so return 1410 * an invalid MAC address error code. 1411 **/ 1412 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1413 { 1414 struct i40e_pf *pf = vf->pf; 1415 int ret = 0; 1416 1417 if (is_broadcast_ether_addr(macaddr) || 1418 is_zero_ether_addr(macaddr)) { 1419 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1420 ret = I40E_ERR_INVALID_MAC_ADDR; 1421 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1422 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1423 /* If the host VMM administrator has set the VF MAC address 1424 * administratively via the ndo_set_vf_mac command then deny 1425 * permission to the VF to add or delete unicast MAC addresses. 1426 * The VF may request to set the MAC address filter already 1427 * assigned to it so do not return an error in that case. 1428 */ 1429 dev_err(&pf->pdev->dev, 1430 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); 1431 ret = -EPERM; 1432 } 1433 return ret; 1434 } 1435 1436 /** 1437 * i40e_vc_add_mac_addr_msg 1438 * @vf: pointer to the vf info 1439 * @msg: pointer to the msg buffer 1440 * @msglen: msg length 1441 * 1442 * add guest mac address filter 1443 **/ 1444 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1445 { 1446 struct i40e_virtchnl_ether_addr_list *al = 1447 (struct i40e_virtchnl_ether_addr_list *)msg; 1448 struct i40e_pf *pf = vf->pf; 1449 struct i40e_vsi *vsi = NULL; 1450 u16 vsi_id = al->vsi_id; 1451 i40e_status ret = 0; 1452 int i; 1453 1454 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1455 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1456 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1457 ret = I40E_ERR_PARAM; 1458 goto error_param; 1459 } 1460 1461 for (i = 0; i < al->num_elements; i++) { 1462 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1463 if (ret) 1464 goto error_param; 1465 } 1466 vsi = pf->vsi[vsi_id]; 1467 1468 /* add new addresses to the list */ 1469 for (i = 0; i < al->num_elements; i++) { 1470 struct i40e_mac_filter *f; 1471 1472 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1473 if (!f) { 1474 if (i40e_is_vsi_in_vlan(vsi)) 1475 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1476 true, false); 1477 else 1478 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1479 true, false); 1480 } 1481 1482 if (!f) { 1483 dev_err(&pf->pdev->dev, 1484 "Unable to add VF MAC filter\n"); 1485 ret = I40E_ERR_PARAM; 1486 goto error_param; 1487 } 1488 } 1489 1490 /* program the updated filter list */ 1491 if (i40e_sync_vsi_filters(vsi)) 1492 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1493 1494 error_param: 1495 /* send the response to the vf */ 1496 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1497 ret); 1498 } 1499 1500 /** 1501 * i40e_vc_del_mac_addr_msg 1502 * @vf: pointer to the vf info 1503 * @msg: pointer to the msg buffer 1504 * @msglen: msg length 1505 * 1506 * remove guest mac address filter 1507 **/ 1508 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1509 { 1510 struct i40e_virtchnl_ether_addr_list *al = 1511 (struct i40e_virtchnl_ether_addr_list *)msg; 1512 struct i40e_pf *pf = vf->pf; 1513 struct i40e_vsi *vsi = NULL; 1514 u16 vsi_id = al->vsi_id; 1515 i40e_status ret = 0; 1516 int i; 1517 1518 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1519 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1520 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1521 ret = I40E_ERR_PARAM; 1522 goto error_param; 1523 } 1524 1525 for (i = 0; i < al->num_elements; i++) { 1526 if (is_broadcast_ether_addr(al->list[i].addr) || 1527 is_zero_ether_addr(al->list[i].addr)) { 1528 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 1529 al->list[i].addr); 1530 ret = I40E_ERR_INVALID_MAC_ADDR; 1531 goto error_param; 1532 } 1533 } 1534 vsi = pf->vsi[vsi_id]; 1535 1536 /* delete addresses from the list */ 1537 for (i = 0; i < al->num_elements; i++) 1538 i40e_del_filter(vsi, al->list[i].addr, 1539 I40E_VLAN_ANY, true, false); 1540 1541 /* program the updated filter list */ 1542 if (i40e_sync_vsi_filters(vsi)) 1543 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1544 1545 error_param: 1546 /* send the response to the vf */ 1547 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1548 ret); 1549 } 1550 1551 /** 1552 * i40e_vc_add_vlan_msg 1553 * @vf: pointer to the vf info 1554 * @msg: pointer to the msg buffer 1555 * @msglen: msg length 1556 * 1557 * program guest vlan id 1558 **/ 1559 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1560 { 1561 struct i40e_virtchnl_vlan_filter_list *vfl = 1562 (struct i40e_virtchnl_vlan_filter_list *)msg; 1563 struct i40e_pf *pf = vf->pf; 1564 struct i40e_vsi *vsi = NULL; 1565 u16 vsi_id = vfl->vsi_id; 1566 i40e_status aq_ret = 0; 1567 int i; 1568 1569 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1570 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1571 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1572 aq_ret = I40E_ERR_PARAM; 1573 goto error_param; 1574 } 1575 1576 for (i = 0; i < vfl->num_elements; i++) { 1577 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1578 aq_ret = I40E_ERR_PARAM; 1579 dev_err(&pf->pdev->dev, 1580 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1581 goto error_param; 1582 } 1583 } 1584 vsi = pf->vsi[vsi_id]; 1585 if (vsi->info.pvid) { 1586 aq_ret = I40E_ERR_PARAM; 1587 goto error_param; 1588 } 1589 1590 i40e_vlan_stripping_enable(vsi); 1591 for (i = 0; i < vfl->num_elements; i++) { 1592 /* add new VLAN filter */ 1593 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1594 if (ret) 1595 dev_err(&pf->pdev->dev, 1596 "Unable to add VF vlan filter %d, error %d\n", 1597 vfl->vlan_id[i], ret); 1598 } 1599 1600 error_param: 1601 /* send the response to the vf */ 1602 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1603 } 1604 1605 /** 1606 * i40e_vc_remove_vlan_msg 1607 * @vf: pointer to the vf info 1608 * @msg: pointer to the msg buffer 1609 * @msglen: msg length 1610 * 1611 * remove programmed guest vlan id 1612 **/ 1613 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1614 { 1615 struct i40e_virtchnl_vlan_filter_list *vfl = 1616 (struct i40e_virtchnl_vlan_filter_list *)msg; 1617 struct i40e_pf *pf = vf->pf; 1618 struct i40e_vsi *vsi = NULL; 1619 u16 vsi_id = vfl->vsi_id; 1620 i40e_status aq_ret = 0; 1621 int i; 1622 1623 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1624 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1625 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1626 aq_ret = I40E_ERR_PARAM; 1627 goto error_param; 1628 } 1629 1630 for (i = 0; i < vfl->num_elements; i++) { 1631 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1632 aq_ret = I40E_ERR_PARAM; 1633 goto error_param; 1634 } 1635 } 1636 1637 vsi = pf->vsi[vsi_id]; 1638 if (vsi->info.pvid) { 1639 aq_ret = I40E_ERR_PARAM; 1640 goto error_param; 1641 } 1642 1643 for (i = 0; i < vfl->num_elements; i++) { 1644 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1645 if (ret) 1646 dev_err(&pf->pdev->dev, 1647 "Unable to delete VF vlan filter %d, error %d\n", 1648 vfl->vlan_id[i], ret); 1649 } 1650 1651 error_param: 1652 /* send the response to the vf */ 1653 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1654 } 1655 1656 /** 1657 * i40e_vc_validate_vf_msg 1658 * @vf: pointer to the vf info 1659 * @msg: pointer to the msg buffer 1660 * @msglen: msg length 1661 * @msghndl: msg handle 1662 * 1663 * validate msg 1664 **/ 1665 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1666 u32 v_retval, u8 *msg, u16 msglen) 1667 { 1668 bool err_msg_format = false; 1669 int valid_len; 1670 1671 /* Check if VF is disabled. */ 1672 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1673 return I40E_ERR_PARAM; 1674 1675 /* Validate message length. */ 1676 switch (v_opcode) { 1677 case I40E_VIRTCHNL_OP_VERSION: 1678 valid_len = sizeof(struct i40e_virtchnl_version_info); 1679 break; 1680 case I40E_VIRTCHNL_OP_RESET_VF: 1681 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1682 valid_len = 0; 1683 break; 1684 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1685 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1686 break; 1687 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1688 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1689 break; 1690 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1691 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1692 if (msglen >= valid_len) { 1693 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1694 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1695 valid_len += (vqc->num_queue_pairs * 1696 sizeof(struct 1697 i40e_virtchnl_queue_pair_info)); 1698 if (vqc->num_queue_pairs == 0) 1699 err_msg_format = true; 1700 } 1701 break; 1702 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1703 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1704 if (msglen >= valid_len) { 1705 struct i40e_virtchnl_irq_map_info *vimi = 1706 (struct i40e_virtchnl_irq_map_info *)msg; 1707 valid_len += (vimi->num_vectors * 1708 sizeof(struct i40e_virtchnl_vector_map)); 1709 if (vimi->num_vectors == 0) 1710 err_msg_format = true; 1711 } 1712 break; 1713 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1714 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1715 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1716 break; 1717 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1718 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1719 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1720 if (msglen >= valid_len) { 1721 struct i40e_virtchnl_ether_addr_list *veal = 1722 (struct i40e_virtchnl_ether_addr_list *)msg; 1723 valid_len += veal->num_elements * 1724 sizeof(struct i40e_virtchnl_ether_addr); 1725 if (veal->num_elements == 0) 1726 err_msg_format = true; 1727 } 1728 break; 1729 case I40E_VIRTCHNL_OP_ADD_VLAN: 1730 case I40E_VIRTCHNL_OP_DEL_VLAN: 1731 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1732 if (msglen >= valid_len) { 1733 struct i40e_virtchnl_vlan_filter_list *vfl = 1734 (struct i40e_virtchnl_vlan_filter_list *)msg; 1735 valid_len += vfl->num_elements * sizeof(u16); 1736 if (vfl->num_elements == 0) 1737 err_msg_format = true; 1738 } 1739 break; 1740 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1741 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1742 break; 1743 case I40E_VIRTCHNL_OP_GET_STATS: 1744 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1745 break; 1746 /* These are always errors coming from the VF. */ 1747 case I40E_VIRTCHNL_OP_EVENT: 1748 case I40E_VIRTCHNL_OP_UNKNOWN: 1749 default: 1750 return -EPERM; 1751 break; 1752 } 1753 /* few more checks */ 1754 if ((valid_len != msglen) || (err_msg_format)) { 1755 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1756 return -EINVAL; 1757 } else { 1758 return 0; 1759 } 1760 } 1761 1762 /** 1763 * i40e_vc_process_vf_msg 1764 * @pf: pointer to the pf structure 1765 * @vf_id: source vf id 1766 * @msg: pointer to the msg buffer 1767 * @msglen: msg length 1768 * @msghndl: msg handle 1769 * 1770 * called from the common aeq/arq handler to 1771 * process request from vf 1772 **/ 1773 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1774 u32 v_retval, u8 *msg, u16 msglen) 1775 { 1776 struct i40e_hw *hw = &pf->hw; 1777 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1778 struct i40e_vf *vf; 1779 int ret; 1780 1781 pf->vf_aq_requests++; 1782 if (local_vf_id >= pf->num_alloc_vfs) 1783 return -EINVAL; 1784 vf = &(pf->vf[local_vf_id]); 1785 /* perform basic checks on the msg */ 1786 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1787 1788 if (ret) { 1789 dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", 1790 local_vf_id, v_opcode, msglen); 1791 return ret; 1792 } 1793 1794 switch (v_opcode) { 1795 case I40E_VIRTCHNL_OP_VERSION: 1796 ret = i40e_vc_get_version_msg(vf); 1797 break; 1798 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1799 ret = i40e_vc_get_vf_resources_msg(vf); 1800 break; 1801 case I40E_VIRTCHNL_OP_RESET_VF: 1802 i40e_vc_reset_vf_msg(vf); 1803 ret = 0; 1804 break; 1805 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1806 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1807 break; 1808 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1809 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1810 break; 1811 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1812 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1813 break; 1814 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1815 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1816 break; 1817 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1818 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1819 break; 1820 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1821 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1822 break; 1823 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1824 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1825 break; 1826 case I40E_VIRTCHNL_OP_ADD_VLAN: 1827 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1828 break; 1829 case I40E_VIRTCHNL_OP_DEL_VLAN: 1830 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1831 break; 1832 case I40E_VIRTCHNL_OP_GET_STATS: 1833 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1834 break; 1835 case I40E_VIRTCHNL_OP_UNKNOWN: 1836 default: 1837 dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", 1838 v_opcode, local_vf_id); 1839 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 1840 I40E_ERR_NOT_IMPLEMENTED); 1841 break; 1842 } 1843 1844 return ret; 1845 } 1846 1847 /** 1848 * i40e_vc_process_vflr_event 1849 * @pf: pointer to the pf structure 1850 * 1851 * called from the vlfr irq handler to 1852 * free up vf resources and state variables 1853 **/ 1854 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 1855 { 1856 u32 reg, reg_idx, bit_idx, vf_id; 1857 struct i40e_hw *hw = &pf->hw; 1858 struct i40e_vf *vf; 1859 1860 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 1861 return 0; 1862 1863 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 1864 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 1865 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1866 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1867 /* read GLGEN_VFLRSTAT register to find out the flr vfs */ 1868 vf = &pf->vf[vf_id]; 1869 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 1870 if (reg & (1 << bit_idx)) { 1871 /* clear the bit in GLGEN_VFLRSTAT */ 1872 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 1873 1874 if (!test_bit(__I40E_DOWN, &pf->state)) 1875 i40e_reset_vf(vf, true); 1876 } 1877 } 1878 1879 /* re-enable vflr interrupt cause */ 1880 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1881 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 1882 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1883 i40e_flush(hw); 1884 1885 return 0; 1886 } 1887 1888 /** 1889 * i40e_vc_vf_broadcast 1890 * @pf: pointer to the pf structure 1891 * @opcode: operation code 1892 * @retval: return value 1893 * @msg: pointer to the msg buffer 1894 * @msglen: msg length 1895 * 1896 * send a message to all VFs on a given PF 1897 **/ 1898 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 1899 enum i40e_virtchnl_ops v_opcode, 1900 i40e_status v_retval, u8 *msg, 1901 u16 msglen) 1902 { 1903 struct i40e_hw *hw = &pf->hw; 1904 struct i40e_vf *vf = pf->vf; 1905 int i; 1906 1907 for (i = 0; i < pf->num_alloc_vfs; i++) { 1908 /* Ignore return value on purpose - a given VF may fail, but 1909 * we need to keep going and send to all of them 1910 */ 1911 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, 1912 msg, msglen, NULL); 1913 vf++; 1914 } 1915 } 1916 1917 /** 1918 * i40e_vc_notify_link_state 1919 * @pf: pointer to the pf structure 1920 * 1921 * send a link status message to all VFs on a given PF 1922 **/ 1923 void i40e_vc_notify_link_state(struct i40e_pf *pf) 1924 { 1925 struct i40e_virtchnl_pf_event pfe; 1926 struct i40e_hw *hw = &pf->hw; 1927 struct i40e_vf *vf = pf->vf; 1928 struct i40e_link_status *ls = &pf->hw.phy.link_info; 1929 int i; 1930 1931 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1932 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1933 for (i = 0; i < pf->num_alloc_vfs; i++) { 1934 if (vf->link_forced) { 1935 pfe.event_data.link_event.link_status = vf->link_up; 1936 pfe.event_data.link_event.link_speed = 1937 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 1938 } else { 1939 pfe.event_data.link_event.link_status = 1940 ls->link_info & I40E_AQ_LINK_UP; 1941 pfe.event_data.link_event.link_speed = ls->link_speed; 1942 } 1943 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 1944 0, (u8 *)&pfe, sizeof(pfe), 1945 NULL); 1946 vf++; 1947 } 1948 } 1949 1950 /** 1951 * i40e_vc_notify_reset 1952 * @pf: pointer to the pf structure 1953 * 1954 * indicate a pending reset to all VFs on a given PF 1955 **/ 1956 void i40e_vc_notify_reset(struct i40e_pf *pf) 1957 { 1958 struct i40e_virtchnl_pf_event pfe; 1959 1960 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1961 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1962 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1963 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1964 } 1965 1966 /** 1967 * i40e_vc_notify_vf_reset 1968 * @vf: pointer to the vf structure 1969 * 1970 * indicate a pending reset to the given VF 1971 **/ 1972 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 1973 { 1974 struct i40e_virtchnl_pf_event pfe; 1975 1976 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1977 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1978 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 1979 I40E_SUCCESS, (u8 *)&pfe, 1980 sizeof(struct i40e_virtchnl_pf_event), NULL); 1981 } 1982 1983 /** 1984 * i40e_ndo_set_vf_mac 1985 * @netdev: network interface device structure 1986 * @vf_id: vf identifier 1987 * @mac: mac address 1988 * 1989 * program vf mac address 1990 **/ 1991 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1992 { 1993 struct i40e_netdev_priv *np = netdev_priv(netdev); 1994 struct i40e_vsi *vsi = np->vsi; 1995 struct i40e_pf *pf = vsi->back; 1996 struct i40e_mac_filter *f; 1997 struct i40e_vf *vf; 1998 int ret = 0; 1999 2000 /* validate the request */ 2001 if (vf_id >= pf->num_alloc_vfs) { 2002 dev_err(&pf->pdev->dev, 2003 "Invalid VF Identifier %d\n", vf_id); 2004 ret = -EINVAL; 2005 goto error_param; 2006 } 2007 2008 vf = &(pf->vf[vf_id]); 2009 vsi = pf->vsi[vf->lan_vsi_index]; 2010 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2011 dev_err(&pf->pdev->dev, 2012 "Uninitialized VF %d\n", vf_id); 2013 ret = -EINVAL; 2014 goto error_param; 2015 } 2016 2017 if (!is_valid_ether_addr(mac)) { 2018 dev_err(&pf->pdev->dev, 2019 "Invalid VF ethernet address\n"); 2020 ret = -EINVAL; 2021 goto error_param; 2022 } 2023 2024 /* delete the temporary mac address */ 2025 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); 2026 2027 /* add the new mac address */ 2028 f = i40e_add_filter(vsi, mac, 0, true, false); 2029 if (!f) { 2030 dev_err(&pf->pdev->dev, 2031 "Unable to add VF ucast filter\n"); 2032 ret = -ENOMEM; 2033 goto error_param; 2034 } 2035 2036 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2037 /* program mac filter */ 2038 if (i40e_sync_vsi_filters(vsi)) { 2039 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2040 ret = -EIO; 2041 goto error_param; 2042 } 2043 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); 2044 vf->pf_set_mac = true; 2045 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2046 ret = 0; 2047 2048 error_param: 2049 return ret; 2050 } 2051 2052 /** 2053 * i40e_ndo_set_vf_port_vlan 2054 * @netdev: network interface device structure 2055 * @vf_id: vf identifier 2056 * @vlan_id: mac address 2057 * @qos: priority setting 2058 * 2059 * program vf vlan id and/or qos 2060 **/ 2061 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2062 int vf_id, u16 vlan_id, u8 qos) 2063 { 2064 struct i40e_netdev_priv *np = netdev_priv(netdev); 2065 struct i40e_pf *pf = np->vsi->back; 2066 struct i40e_vsi *vsi; 2067 struct i40e_vf *vf; 2068 int ret = 0; 2069 2070 /* validate the request */ 2071 if (vf_id >= pf->num_alloc_vfs) { 2072 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2073 ret = -EINVAL; 2074 goto error_pvid; 2075 } 2076 2077 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2078 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2079 ret = -EINVAL; 2080 goto error_pvid; 2081 } 2082 2083 vf = &(pf->vf[vf_id]); 2084 vsi = pf->vsi[vf->lan_vsi_index]; 2085 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2086 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2087 ret = -EINVAL; 2088 goto error_pvid; 2089 } 2090 2091 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) 2092 dev_err(&pf->pdev->dev, 2093 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2094 vf_id); 2095 2096 /* Check for condition where there was already a port VLAN ID 2097 * filter set and now it is being deleted by setting it to zero. 2098 * Before deleting all the old VLAN filters we must add new ones 2099 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2100 * MAC addresses deleted. 2101 */ 2102 if (!(vlan_id || qos) && vsi->info.pvid) 2103 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2104 2105 if (vsi->info.pvid) { 2106 /* kill old VLAN */ 2107 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2108 VLAN_VID_MASK)); 2109 if (ret) { 2110 dev_info(&vsi->back->pdev->dev, 2111 "remove VLAN failed, ret=%d, aq_err=%d\n", 2112 ret, pf->hw.aq.asq_last_status); 2113 } 2114 } 2115 if (vlan_id || qos) 2116 ret = i40e_vsi_add_pvid(vsi, 2117 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); 2118 else 2119 i40e_vsi_remove_pvid(vsi); 2120 2121 if (vlan_id) { 2122 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2123 vlan_id, qos, vf_id); 2124 2125 /* add new VLAN filter */ 2126 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2127 if (ret) { 2128 dev_info(&vsi->back->pdev->dev, 2129 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2130 vsi->back->hw.aq.asq_last_status); 2131 goto error_pvid; 2132 } 2133 /* Kill non-vlan MAC filters - ignore error return since 2134 * there might not be any non-vlan MAC filters. 2135 */ 2136 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2137 } 2138 2139 if (ret) { 2140 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2141 goto error_pvid; 2142 } 2143 /* The Port VLAN needs to be saved across resets the same as the 2144 * default LAN MAC address. 2145 */ 2146 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2147 ret = 0; 2148 2149 error_pvid: 2150 return ret; 2151 } 2152 2153 /** 2154 * i40e_ndo_set_vf_bw 2155 * @netdev: network interface device structure 2156 * @vf_id: vf identifier 2157 * @tx_rate: tx rate 2158 * 2159 * configure vf tx rate 2160 **/ 2161 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) 2162 { 2163 return -EOPNOTSUPP; 2164 } 2165 2166 /** 2167 * i40e_ndo_get_vf_config 2168 * @netdev: network interface device structure 2169 * @vf_id: vf identifier 2170 * @ivi: vf configuration structure 2171 * 2172 * return vf configuration 2173 **/ 2174 int i40e_ndo_get_vf_config(struct net_device *netdev, 2175 int vf_id, struct ifla_vf_info *ivi) 2176 { 2177 struct i40e_netdev_priv *np = netdev_priv(netdev); 2178 struct i40e_vsi *vsi = np->vsi; 2179 struct i40e_pf *pf = vsi->back; 2180 struct i40e_vf *vf; 2181 int ret = 0; 2182 2183 /* validate the request */ 2184 if (vf_id >= pf->num_alloc_vfs) { 2185 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2186 ret = -EINVAL; 2187 goto error_param; 2188 } 2189 2190 vf = &(pf->vf[vf_id]); 2191 /* first vsi is always the LAN vsi */ 2192 vsi = pf->vsi[vf->lan_vsi_index]; 2193 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2194 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2195 ret = -EINVAL; 2196 goto error_param; 2197 } 2198 2199 ivi->vf = vf_id; 2200 2201 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2202 2203 ivi->tx_rate = 0; 2204 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2205 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2206 I40E_VLAN_PRIORITY_SHIFT; 2207 ret = 0; 2208 2209 error_param: 2210 return ret; 2211 } 2212 2213 /** 2214 * i40e_ndo_set_vf_link_state 2215 * @netdev: network interface device structure 2216 * @vf_id: vf identifier 2217 * @link: required link state 2218 * 2219 * Set the link state of a specified VF, regardless of physical link state 2220 **/ 2221 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 2222 { 2223 struct i40e_netdev_priv *np = netdev_priv(netdev); 2224 struct i40e_pf *pf = np->vsi->back; 2225 struct i40e_virtchnl_pf_event pfe; 2226 struct i40e_hw *hw = &pf->hw; 2227 struct i40e_vf *vf; 2228 int ret = 0; 2229 2230 /* validate the request */ 2231 if (vf_id >= pf->num_alloc_vfs) { 2232 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2233 ret = -EINVAL; 2234 goto error_out; 2235 } 2236 2237 vf = &pf->vf[vf_id]; 2238 2239 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2240 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2241 2242 switch (link) { 2243 case IFLA_VF_LINK_STATE_AUTO: 2244 vf->link_forced = false; 2245 pfe.event_data.link_event.link_status = 2246 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 2247 pfe.event_data.link_event.link_speed = 2248 pf->hw.phy.link_info.link_speed; 2249 break; 2250 case IFLA_VF_LINK_STATE_ENABLE: 2251 vf->link_forced = true; 2252 vf->link_up = true; 2253 pfe.event_data.link_event.link_status = true; 2254 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 2255 break; 2256 case IFLA_VF_LINK_STATE_DISABLE: 2257 vf->link_forced = true; 2258 vf->link_up = false; 2259 pfe.event_data.link_event.link_status = false; 2260 pfe.event_data.link_event.link_speed = 0; 2261 break; 2262 default: 2263 ret = -EINVAL; 2264 goto error_out; 2265 } 2266 /* Notify the VF of its new link state */ 2267 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 2268 0, (u8 *)&pfe, sizeof(pfe), NULL); 2269 2270 error_out: 2271 return ret; 2272 } 2273