1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /***********************misc routines*****************************/ 30 31 /** 32 * i40e_vc_isvalid_vsi_id 33 * @vf: pointer to the vf info 34 * @vsi_id: vf relative vsi id 35 * 36 * check for the valid vsi id 37 **/ 38 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) 39 { 40 struct i40e_pf *pf = vf->pf; 41 42 return pf->vsi[vsi_id]->vf_id == vf->vf_id; 43 } 44 45 /** 46 * i40e_vc_isvalid_queue_id 47 * @vf: pointer to the vf info 48 * @vsi_id: vsi id 49 * @qid: vsi relative queue id 50 * 51 * check for the valid queue id 52 **/ 53 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, 54 u8 qid) 55 { 56 struct i40e_pf *pf = vf->pf; 57 58 return qid < pf->vsi[vsi_id]->num_queue_pairs; 59 } 60 61 /** 62 * i40e_vc_isvalid_vector_id 63 * @vf: pointer to the vf info 64 * @vector_id: vf relative vector id 65 * 66 * check for the valid vector id 67 **/ 68 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 69 { 70 struct i40e_pf *pf = vf->pf; 71 72 return vector_id <= pf->hw.func_caps.num_msix_vectors_vf; 73 } 74 75 /***********************vf resource mgmt routines*****************/ 76 77 /** 78 * i40e_vc_get_pf_queue_id 79 * @vf: pointer to the vf info 80 * @vsi_idx: index of VSI in PF struct 81 * @vsi_queue_id: vsi relative queue id 82 * 83 * return pf relative queue id 84 **/ 85 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, 86 u8 vsi_queue_id) 87 { 88 struct i40e_pf *pf = vf->pf; 89 struct i40e_vsi *vsi = pf->vsi[vsi_idx]; 90 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 91 92 if (le16_to_cpu(vsi->info.mapping_flags) & 93 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 94 pf_queue_id = 95 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 96 else 97 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 98 vsi_queue_id; 99 100 return pf_queue_id; 101 } 102 103 /** 104 * i40e_config_irq_link_list 105 * @vf: pointer to the vf info 106 * @vsi_idx: index of VSI in PF struct 107 * @vecmap: irq map info 108 * 109 * configure irq link list from the map 110 **/ 111 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, 112 struct i40e_virtchnl_vector_map *vecmap) 113 { 114 unsigned long linklistmap = 0, tempmap; 115 struct i40e_pf *pf = vf->pf; 116 struct i40e_hw *hw = &pf->hw; 117 u16 vsi_queue_id, pf_queue_id; 118 enum i40e_queue_type qtype; 119 u16 next_q, vector_id; 120 u32 reg, reg_idx; 121 u16 itr_idx = 0; 122 123 vector_id = vecmap->vector_id; 124 /* setup the head */ 125 if (0 == vector_id) 126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 127 else 128 reg_idx = I40E_VPINT_LNKLSTN( 129 (pf->hw.func_caps.num_msix_vectors_vf 130 * vf->vf_id) + (vector_id - 1)); 131 132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 133 /* Special case - No queues mapped on this vector */ 134 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 135 goto irq_list_done; 136 } 137 tempmap = vecmap->rxq_map; 138 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 139 linklistmap |= (1 << 140 (I40E_VIRTCHNL_SUPPORTED_QTYPES * 141 vsi_queue_id)); 142 } 143 144 tempmap = vecmap->txq_map; 145 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 146 linklistmap |= (1 << 147 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id 148 + 1)); 149 } 150 151 next_q = find_first_bit(&linklistmap, 152 (I40E_MAX_VSI_QP * 153 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 154 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 155 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 156 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 157 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 158 159 wr32(hw, reg_idx, reg); 160 161 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 162 switch (qtype) { 163 case I40E_QUEUE_TYPE_RX: 164 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 165 itr_idx = vecmap->rxitr_idx; 166 break; 167 case I40E_QUEUE_TYPE_TX: 168 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 169 itr_idx = vecmap->txitr_idx; 170 break; 171 default: 172 break; 173 } 174 175 next_q = find_next_bit(&linklistmap, 176 (I40E_MAX_VSI_QP * 177 I40E_VIRTCHNL_SUPPORTED_QTYPES), 178 next_q + 1); 179 if (next_q < 180 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 181 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 182 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 183 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, 184 vsi_queue_id); 185 } else { 186 pf_queue_id = I40E_QUEUE_END_OF_LIST; 187 qtype = 0; 188 } 189 190 /* format for the RQCTL & TQCTL regs is same */ 191 reg = (vector_id) | 192 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 193 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 194 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 195 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 196 wr32(hw, reg_idx, reg); 197 } 198 199 irq_list_done: 200 i40e_flush(hw); 201 } 202 203 /** 204 * i40e_config_vsi_tx_queue 205 * @vf: pointer to the vf info 206 * @vsi_idx: index of VSI in PF struct 207 * @vsi_queue_id: vsi relative queue index 208 * @info: config. info 209 * 210 * configure tx queue 211 **/ 212 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, 213 u16 vsi_queue_id, 214 struct i40e_virtchnl_txq_info *info) 215 { 216 struct i40e_pf *pf = vf->pf; 217 struct i40e_hw *hw = &pf->hw; 218 struct i40e_hmc_obj_txq tx_ctx; 219 u16 pf_queue_id; 220 u32 qtx_ctl; 221 int ret = 0; 222 223 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 224 225 /* clear the context structure first */ 226 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 227 228 /* only set the required fields */ 229 tx_ctx.base = info->dma_ring_addr / 128; 230 tx_ctx.qlen = info->ring_len; 231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 232 tx_ctx.rdylist_act = 0; 233 234 /* clear the context in the HMC */ 235 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 236 if (ret) { 237 dev_err(&pf->pdev->dev, 238 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 239 pf_queue_id, ret); 240 ret = -ENOENT; 241 goto error_context; 242 } 243 244 /* set the context in the HMC */ 245 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 246 if (ret) { 247 dev_err(&pf->pdev->dev, 248 "Failed to set VF LAN Tx queue context %d error: %d\n", 249 pf_queue_id, ret); 250 ret = -ENOENT; 251 goto error_context; 252 } 253 254 /* associate this queue with the PCI VF function */ 255 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 256 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 257 & I40E_QTX_CTL_PF_INDX_MASK); 258 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 259 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 260 & I40E_QTX_CTL_VFVM_INDX_MASK); 261 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 262 i40e_flush(hw); 263 264 error_context: 265 return ret; 266 } 267 268 /** 269 * i40e_config_vsi_rx_queue 270 * @vf: pointer to the vf info 271 * @vsi_idx: index of VSI in PF struct 272 * @vsi_queue_id: vsi relative queue index 273 * @info: config. info 274 * 275 * configure rx queue 276 **/ 277 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, 278 u16 vsi_queue_id, 279 struct i40e_virtchnl_rxq_info *info) 280 { 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 struct i40e_hmc_obj_rxq rx_ctx; 284 u16 pf_queue_id; 285 int ret = 0; 286 287 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 288 289 /* clear the context structure first */ 290 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 291 292 /* only set the required fields */ 293 rx_ctx.base = info->dma_ring_addr / 128; 294 rx_ctx.qlen = info->ring_len; 295 296 if (info->splithdr_enabled) { 297 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 298 I40E_RX_SPLIT_IP | 299 I40E_RX_SPLIT_TCP_UDP | 300 I40E_RX_SPLIT_SCTP; 301 /* header length validation */ 302 if (info->hdr_size > ((2 * 1024) - 64)) { 303 ret = -EINVAL; 304 goto error_param; 305 } 306 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 307 308 /* set splitalways mode 10b */ 309 rx_ctx.dtype = 0x2; 310 } 311 312 /* databuffer length validation */ 313 if (info->databuffer_size > ((16 * 1024) - 128)) { 314 ret = -EINVAL; 315 goto error_param; 316 } 317 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 318 319 /* max pkt. length validation */ 320 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 321 ret = -EINVAL; 322 goto error_param; 323 } 324 rx_ctx.rxmax = info->max_pkt_size; 325 326 /* enable 32bytes desc always */ 327 rx_ctx.dsize = 1; 328 329 /* default values */ 330 rx_ctx.tphrdesc_ena = 1; 331 rx_ctx.tphwdesc_ena = 1; 332 rx_ctx.tphdata_ena = 1; 333 rx_ctx.tphhead_ena = 1; 334 rx_ctx.lrxqthresh = 2; 335 rx_ctx.crcstrip = 1; 336 337 /* clear the context in the HMC */ 338 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 339 if (ret) { 340 dev_err(&pf->pdev->dev, 341 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 342 pf_queue_id, ret); 343 ret = -ENOENT; 344 goto error_param; 345 } 346 347 /* set the context in the HMC */ 348 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 349 if (ret) { 350 dev_err(&pf->pdev->dev, 351 "Failed to set VF LAN Rx queue context %d error: %d\n", 352 pf_queue_id, ret); 353 ret = -ENOENT; 354 goto error_param; 355 } 356 357 error_param: 358 return ret; 359 } 360 361 /** 362 * i40e_alloc_vsi_res 363 * @vf: pointer to the vf info 364 * @type: type of VSI to allocate 365 * 366 * alloc vf vsi context & resources 367 **/ 368 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 369 { 370 struct i40e_mac_filter *f = NULL; 371 struct i40e_pf *pf = vf->pf; 372 struct i40e_vsi *vsi; 373 int ret = 0; 374 375 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 376 377 if (!vsi) { 378 dev_err(&pf->pdev->dev, 379 "add vsi failed for vf %d, aq_err %d\n", 380 vf->vf_id, pf->hw.aq.asq_last_status); 381 ret = -ENOENT; 382 goto error_alloc_vsi_res; 383 } 384 if (type == I40E_VSI_SRIOV) { 385 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 386 vf->lan_vsi_index = vsi->idx; 387 vf->lan_vsi_id = vsi->id; 388 dev_info(&pf->pdev->dev, 389 "VF %d assigned LAN VSI index %d, VSI id %d\n", 390 vf->vf_id, vsi->idx, vsi->id); 391 /* If the port VLAN has been configured and then the 392 * VF driver was removed then the VSI port VLAN 393 * configuration was destroyed. Check if there is 394 * a port VLAN and restore the VSI configuration if 395 * needed. 396 */ 397 if (vf->port_vlan_id) 398 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 399 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 400 vf->port_vlan_id, true, false); 401 if (!f) 402 dev_info(&pf->pdev->dev, 403 "Could not allocate VF MAC addr\n"); 404 f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, 405 true, false); 406 if (!f) 407 dev_info(&pf->pdev->dev, 408 "Could not allocate VF broadcast filter\n"); 409 } 410 411 if (!f) { 412 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n"); 413 ret = -ENOMEM; 414 goto error_alloc_vsi_res; 415 } 416 417 /* program mac filter */ 418 ret = i40e_sync_vsi_filters(vsi); 419 if (ret) { 420 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 421 goto error_alloc_vsi_res; 422 } 423 424 error_alloc_vsi_res: 425 return ret; 426 } 427 428 /** 429 * i40e_enable_vf_mappings 430 * @vf: pointer to the vf info 431 * 432 * enable vf mappings 433 **/ 434 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 435 { 436 struct i40e_pf *pf = vf->pf; 437 struct i40e_hw *hw = &pf->hw; 438 u32 reg, total_queue_pairs = 0; 439 int j; 440 441 /* Tell the hardware we're using noncontiguous mapping. HW requires 442 * that VF queues be mapped using this method, even when they are 443 * contiguous in real life 444 */ 445 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 446 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 447 448 /* enable VF vplan_qtable mappings */ 449 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 450 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 451 452 /* map PF queues to VF queues */ 453 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { 454 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 455 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 456 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 457 total_queue_pairs++; 458 } 459 460 /* map PF queues to VSI */ 461 for (j = 0; j < 7; j++) { 462 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) { 463 reg = 0x07FF07FF; /* unused */ 464 } else { 465 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 466 j * 2); 467 reg = qid; 468 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 469 (j * 2) + 1); 470 reg |= qid << 16; 471 } 472 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 473 } 474 475 i40e_flush(hw); 476 } 477 478 /** 479 * i40e_disable_vf_mappings 480 * @vf: pointer to the vf info 481 * 482 * disable vf mappings 483 **/ 484 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 485 { 486 struct i40e_pf *pf = vf->pf; 487 struct i40e_hw *hw = &pf->hw; 488 int i; 489 490 /* disable qp mappings */ 491 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 492 for (i = 0; i < I40E_MAX_VSI_QP; i++) 493 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 494 I40E_QUEUE_END_OF_LIST); 495 i40e_flush(hw); 496 } 497 498 /** 499 * i40e_free_vf_res 500 * @vf: pointer to the vf info 501 * 502 * free vf resources 503 **/ 504 static void i40e_free_vf_res(struct i40e_vf *vf) 505 { 506 struct i40e_pf *pf = vf->pf; 507 struct i40e_hw *hw = &pf->hw; 508 u32 reg_idx, reg; 509 int i, msix_vf; 510 511 /* free vsi & disconnect it from the parent uplink */ 512 if (vf->lan_vsi_index) { 513 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); 514 vf->lan_vsi_index = 0; 515 vf->lan_vsi_id = 0; 516 } 517 msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1; 518 /* disable interrupts so the VF starts in a known state */ 519 for (i = 0; i < msix_vf; i++) { 520 /* format is same for both registers */ 521 if (0 == i) 522 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 523 else 524 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 525 (vf->vf_id)) 526 + (i - 1)); 527 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 528 i40e_flush(hw); 529 } 530 531 /* clear the irq settings */ 532 for (i = 0; i < msix_vf; i++) { 533 /* format is same for both registers */ 534 if (0 == i) 535 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 536 else 537 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 538 (vf->vf_id)) 539 + (i - 1)); 540 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 541 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 542 wr32(hw, reg_idx, reg); 543 i40e_flush(hw); 544 } 545 /* reset some of the state varibles keeping 546 * track of the resources 547 */ 548 vf->num_queue_pairs = 0; 549 vf->vf_states = 0; 550 } 551 552 /** 553 * i40e_alloc_vf_res 554 * @vf: pointer to the vf info 555 * 556 * allocate vf resources 557 **/ 558 static int i40e_alloc_vf_res(struct i40e_vf *vf) 559 { 560 struct i40e_pf *pf = vf->pf; 561 int total_queue_pairs = 0; 562 int ret; 563 564 /* allocate hw vsi context & associated resources */ 565 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 566 if (ret) 567 goto error_alloc; 568 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 569 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 570 571 /* store the total qps number for the runtime 572 * vf req validation 573 */ 574 vf->num_queue_pairs = total_queue_pairs; 575 576 /* vf is now completely initialized */ 577 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 578 579 error_alloc: 580 if (ret) 581 i40e_free_vf_res(vf); 582 583 return ret; 584 } 585 586 #define VF_DEVICE_STATUS 0xAA 587 #define VF_TRANS_PENDING_MASK 0x20 588 /** 589 * i40e_quiesce_vf_pci 590 * @vf: pointer to the vf structure 591 * 592 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 593 * if the transactions never clear. 594 **/ 595 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 596 { 597 struct i40e_pf *pf = vf->pf; 598 struct i40e_hw *hw = &pf->hw; 599 int vf_abs_id, i; 600 u32 reg; 601 602 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 603 604 wr32(hw, I40E_PF_PCI_CIAA, 605 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 606 for (i = 0; i < 100; i++) { 607 reg = rd32(hw, I40E_PF_PCI_CIAD); 608 if ((reg & VF_TRANS_PENDING_MASK) == 0) 609 return 0; 610 udelay(1); 611 } 612 return -EIO; 613 } 614 615 /** 616 * i40e_reset_vf 617 * @vf: pointer to the vf structure 618 * @flr: VFLR was issued or not 619 * 620 * reset the vf 621 **/ 622 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 623 { 624 struct i40e_pf *pf = vf->pf; 625 struct i40e_hw *hw = &pf->hw; 626 bool rsd = false; 627 int i; 628 u32 reg; 629 630 /* warn the VF */ 631 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 632 633 /* In the case of a VFLR, the HW has already reset the VF and we 634 * just need to clean up, so don't hit the VFRTRIG register. 635 */ 636 if (!flr) { 637 /* reset vf using VPGEN_VFRTRIG reg */ 638 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 639 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 640 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 641 i40e_flush(hw); 642 } 643 644 if (i40e_quiesce_vf_pci(vf)) 645 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 646 vf->vf_id); 647 648 /* poll VPGEN_VFRSTAT reg to make sure 649 * that reset is complete 650 */ 651 for (i = 0; i < 100; i++) { 652 /* vf reset requires driver to first reset the 653 * vf & than poll the status register to make sure 654 * that the requested op was completed 655 * successfully 656 */ 657 udelay(10); 658 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 659 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 660 rsd = true; 661 break; 662 } 663 } 664 665 if (!rsd) 666 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 667 vf->vf_id); 668 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 669 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 670 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 671 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 672 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 673 674 /* On initial reset, we won't have any queues */ 675 if (vf->lan_vsi_index == 0) 676 goto complete_reset; 677 678 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); 679 complete_reset: 680 /* reallocate vf resources to reset the VSI state */ 681 i40e_free_vf_res(vf); 682 mdelay(10); 683 i40e_alloc_vf_res(vf); 684 i40e_enable_vf_mappings(vf); 685 686 /* tell the VF the reset is done */ 687 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 688 i40e_flush(hw); 689 } 690 691 /** 692 * i40e_vfs_are_assigned 693 * @pf: pointer to the pf structure 694 * 695 * Determine if any VFs are assigned to VMs 696 **/ 697 static bool i40e_vfs_are_assigned(struct i40e_pf *pf) 698 { 699 struct pci_dev *pdev = pf->pdev; 700 struct pci_dev *vfdev; 701 702 /* loop through all the VFs to see if we own any that are assigned */ 703 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL); 704 while (vfdev) { 705 /* if we don't own it we don't care */ 706 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { 707 /* if it is assigned we cannot release it */ 708 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 709 return true; 710 } 711 712 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, 713 I40E_DEV_ID_VF, 714 vfdev); 715 } 716 717 return false; 718 } 719 #ifdef CONFIG_PCI_IOV 720 721 /** 722 * i40e_enable_pf_switch_lb 723 * @pf: pointer to the pf structure 724 * 725 * enable switch loop back or die - no point in a return value 726 **/ 727 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 728 { 729 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 730 struct i40e_vsi_context ctxt; 731 int aq_ret; 732 733 ctxt.seid = pf->main_vsi_seid; 734 ctxt.pf_num = pf->hw.pf_id; 735 ctxt.vf_num = 0; 736 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 737 if (aq_ret) { 738 dev_info(&pf->pdev->dev, 739 "%s couldn't get pf vsi config, err %d, aq_err %d\n", 740 __func__, aq_ret, pf->hw.aq.asq_last_status); 741 return; 742 } 743 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 744 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 745 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 746 747 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 748 if (aq_ret) { 749 dev_info(&pf->pdev->dev, 750 "%s: update vsi switch failed, aq_err=%d\n", 751 __func__, vsi->back->hw.aq.asq_last_status); 752 } 753 } 754 #endif 755 756 /** 757 * i40e_disable_pf_switch_lb 758 * @pf: pointer to the pf structure 759 * 760 * disable switch loop back or die - no point in a return value 761 **/ 762 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 763 { 764 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 765 struct i40e_vsi_context ctxt; 766 int aq_ret; 767 768 ctxt.seid = pf->main_vsi_seid; 769 ctxt.pf_num = pf->hw.pf_id; 770 ctxt.vf_num = 0; 771 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 772 if (aq_ret) { 773 dev_info(&pf->pdev->dev, 774 "%s couldn't get pf vsi config, err %d, aq_err %d\n", 775 __func__, aq_ret, pf->hw.aq.asq_last_status); 776 return; 777 } 778 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 779 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 780 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 781 782 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 783 if (aq_ret) { 784 dev_info(&pf->pdev->dev, 785 "%s: update vsi switch failed, aq_err=%d\n", 786 __func__, vsi->back->hw.aq.asq_last_status); 787 } 788 } 789 790 /** 791 * i40e_free_vfs 792 * @pf: pointer to the pf structure 793 * 794 * free vf resources 795 **/ 796 void i40e_free_vfs(struct i40e_pf *pf) 797 { 798 struct i40e_hw *hw = &pf->hw; 799 u32 reg_idx, bit_idx; 800 int i, tmp, vf_id; 801 802 if (!pf->vf) 803 return; 804 805 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 806 i40e_irq_dynamic_disable_icr0(pf); 807 808 mdelay(10); /* let any messages in transit get finished up */ 809 /* free up vf resources */ 810 tmp = pf->num_alloc_vfs; 811 pf->num_alloc_vfs = 0; 812 for (i = 0; i < tmp; i++) { 813 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 814 i40e_free_vf_res(&pf->vf[i]); 815 /* disable qp mappings */ 816 i40e_disable_vf_mappings(&pf->vf[i]); 817 } 818 819 kfree(pf->vf); 820 pf->vf = NULL; 821 822 if (!i40e_vfs_are_assigned(pf)) { 823 pci_disable_sriov(pf->pdev); 824 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 825 * work correctly when SR-IOV gets re-enabled. 826 */ 827 for (vf_id = 0; vf_id < tmp; vf_id++) { 828 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 829 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 830 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 831 } 832 i40e_disable_pf_switch_lb(pf); 833 } else { 834 dev_warn(&pf->pdev->dev, 835 "unable to disable SR-IOV because VFs are assigned.\n"); 836 } 837 838 /* Re-enable interrupt 0. */ 839 i40e_irq_dynamic_enable_icr0(pf); 840 } 841 842 #ifdef CONFIG_PCI_IOV 843 /** 844 * i40e_alloc_vfs 845 * @pf: pointer to the pf structure 846 * @num_alloc_vfs: number of vfs to allocate 847 * 848 * allocate vf resources 849 **/ 850 static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 851 { 852 struct i40e_vf *vfs; 853 int i, ret = 0; 854 855 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 856 i40e_irq_dynamic_disable_icr0(pf); 857 858 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 859 if (ret) { 860 dev_err(&pf->pdev->dev, 861 "pci_enable_sriov failed with error %d!\n", ret); 862 pf->num_alloc_vfs = 0; 863 goto err_iov; 864 } 865 866 /* allocate memory */ 867 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL); 868 if (!vfs) { 869 ret = -ENOMEM; 870 goto err_alloc; 871 } 872 873 /* apply default profile */ 874 for (i = 0; i < num_alloc_vfs; i++) { 875 vfs[i].pf = pf; 876 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 877 vfs[i].vf_id = i; 878 879 /* assign default capabilities */ 880 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 881 /* vf resources get allocated during reset */ 882 i40e_reset_vf(&vfs[i], false); 883 884 /* enable vf vplan_qtable mappings */ 885 i40e_enable_vf_mappings(&vfs[i]); 886 } 887 pf->vf = vfs; 888 pf->num_alloc_vfs = num_alloc_vfs; 889 890 i40e_enable_pf_switch_lb(pf); 891 err_alloc: 892 if (ret) 893 i40e_free_vfs(pf); 894 err_iov: 895 /* Re-enable interrupt 0. */ 896 i40e_irq_dynamic_enable_icr0(pf); 897 return ret; 898 } 899 900 #endif 901 /** 902 * i40e_pci_sriov_enable 903 * @pdev: pointer to a pci_dev structure 904 * @num_vfs: number of vfs to allocate 905 * 906 * Enable or change the number of VFs 907 **/ 908 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 909 { 910 #ifdef CONFIG_PCI_IOV 911 struct i40e_pf *pf = pci_get_drvdata(pdev); 912 int pre_existing_vfs = pci_num_vf(pdev); 913 int err = 0; 914 915 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 916 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 917 i40e_free_vfs(pf); 918 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 919 goto out; 920 921 if (num_vfs > pf->num_req_vfs) { 922 err = -EPERM; 923 goto err_out; 924 } 925 926 err = i40e_alloc_vfs(pf, num_vfs); 927 if (err) { 928 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 929 goto err_out; 930 } 931 932 out: 933 return num_vfs; 934 935 err_out: 936 return err; 937 #endif 938 return 0; 939 } 940 941 /** 942 * i40e_pci_sriov_configure 943 * @pdev: pointer to a pci_dev structure 944 * @num_vfs: number of vfs to allocate 945 * 946 * Enable or change the number of VFs. Called when the user updates the number 947 * of VFs in sysfs. 948 **/ 949 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 950 { 951 struct i40e_pf *pf = pci_get_drvdata(pdev); 952 953 if (num_vfs) 954 return i40e_pci_sriov_enable(pdev, num_vfs); 955 956 i40e_free_vfs(pf); 957 return 0; 958 } 959 960 /***********************virtual channel routines******************/ 961 962 /** 963 * i40e_vc_send_msg_to_vf 964 * @vf: pointer to the vf info 965 * @v_opcode: virtual channel opcode 966 * @v_retval: virtual channel return value 967 * @msg: pointer to the msg buffer 968 * @msglen: msg length 969 * 970 * send msg to vf 971 **/ 972 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 973 u32 v_retval, u8 *msg, u16 msglen) 974 { 975 struct i40e_pf *pf = vf->pf; 976 struct i40e_hw *hw = &pf->hw; 977 int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 978 i40e_status aq_ret; 979 980 /* single place to detect unsuccessful return values */ 981 if (v_retval) { 982 vf->num_invalid_msgs++; 983 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", 984 v_opcode, v_retval); 985 if (vf->num_invalid_msgs > 986 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 987 dev_err(&pf->pdev->dev, 988 "Number of invalid messages exceeded for VF %d\n", 989 vf->vf_id); 990 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 991 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 992 } 993 } else { 994 vf->num_valid_msgs++; 995 } 996 997 aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval, 998 msg, msglen, NULL); 999 if (aq_ret) { 1000 dev_err(&pf->pdev->dev, 1001 "Unable to send the message to VF %d aq_err %d\n", 1002 vf->vf_id, pf->hw.aq.asq_last_status); 1003 return -EIO; 1004 } 1005 1006 return 0; 1007 } 1008 1009 /** 1010 * i40e_vc_send_resp_to_vf 1011 * @vf: pointer to the vf info 1012 * @opcode: operation code 1013 * @retval: return value 1014 * 1015 * send resp msg to vf 1016 **/ 1017 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1018 enum i40e_virtchnl_ops opcode, 1019 i40e_status retval) 1020 { 1021 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1022 } 1023 1024 /** 1025 * i40e_vc_get_version_msg 1026 * @vf: pointer to the vf info 1027 * 1028 * called from the vf to request the API version used by the PF 1029 **/ 1030 static int i40e_vc_get_version_msg(struct i40e_vf *vf) 1031 { 1032 struct i40e_virtchnl_version_info info = { 1033 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 1034 }; 1035 1036 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 1037 I40E_SUCCESS, (u8 *)&info, 1038 sizeof(struct 1039 i40e_virtchnl_version_info)); 1040 } 1041 1042 /** 1043 * i40e_vc_get_vf_resources_msg 1044 * @vf: pointer to the vf info 1045 * @msg: pointer to the msg buffer 1046 * @msglen: msg length 1047 * 1048 * called from the vf to request its resources 1049 **/ 1050 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) 1051 { 1052 struct i40e_virtchnl_vf_resource *vfres = NULL; 1053 struct i40e_pf *pf = vf->pf; 1054 i40e_status aq_ret = 0; 1055 struct i40e_vsi *vsi; 1056 int i = 0, len = 0; 1057 int num_vsis = 1; 1058 int ret; 1059 1060 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1061 aq_ret = I40E_ERR_PARAM; 1062 goto err; 1063 } 1064 1065 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1066 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1067 1068 vfres = kzalloc(len, GFP_KERNEL); 1069 if (!vfres) { 1070 aq_ret = I40E_ERR_NO_MEMORY; 1071 len = 0; 1072 goto err; 1073 } 1074 1075 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1076 vsi = pf->vsi[vf->lan_vsi_index]; 1077 if (!vsi->info.pvid) 1078 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1079 1080 vfres->num_vsis = num_vsis; 1081 vfres->num_queue_pairs = vf->num_queue_pairs; 1082 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1083 if (vf->lan_vsi_index) { 1084 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; 1085 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1086 vfres->vsi_res[i].num_queue_pairs = 1087 pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 1088 memcpy(vfres->vsi_res[i].default_mac_addr, 1089 vf->default_lan_addr.addr, ETH_ALEN); 1090 i++; 1091 } 1092 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1093 1094 err: 1095 /* send the response back to the vf */ 1096 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1097 aq_ret, (u8 *)vfres, len); 1098 1099 kfree(vfres); 1100 return ret; 1101 } 1102 1103 /** 1104 * i40e_vc_reset_vf_msg 1105 * @vf: pointer to the vf info 1106 * @msg: pointer to the msg buffer 1107 * @msglen: msg length 1108 * 1109 * called from the vf to reset itself, 1110 * unlike other virtchnl messages, pf driver 1111 * doesn't send the response back to the vf 1112 **/ 1113 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1114 { 1115 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1116 i40e_reset_vf(vf, false); 1117 } 1118 1119 /** 1120 * i40e_vc_config_promiscuous_mode_msg 1121 * @vf: pointer to the vf info 1122 * @msg: pointer to the msg buffer 1123 * @msglen: msg length 1124 * 1125 * called from the vf to configure the promiscuous mode of 1126 * vf vsis 1127 **/ 1128 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1129 u8 *msg, u16 msglen) 1130 { 1131 struct i40e_virtchnl_promisc_info *info = 1132 (struct i40e_virtchnl_promisc_info *)msg; 1133 struct i40e_pf *pf = vf->pf; 1134 struct i40e_hw *hw = &pf->hw; 1135 bool allmulti = false; 1136 bool promisc = false; 1137 i40e_status aq_ret; 1138 1139 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1140 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1141 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1142 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { 1143 aq_ret = I40E_ERR_PARAM; 1144 goto error_param; 1145 } 1146 1147 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) 1148 promisc = true; 1149 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id, 1150 promisc, NULL); 1151 if (aq_ret) 1152 goto error_param; 1153 1154 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1155 allmulti = true; 1156 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id, 1157 allmulti, NULL); 1158 1159 error_param: 1160 /* send the response to the vf */ 1161 return i40e_vc_send_resp_to_vf(vf, 1162 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1163 aq_ret); 1164 } 1165 1166 /** 1167 * i40e_vc_config_queues_msg 1168 * @vf: pointer to the vf info 1169 * @msg: pointer to the msg buffer 1170 * @msglen: msg length 1171 * 1172 * called from the vf to configure the rx/tx 1173 * queues 1174 **/ 1175 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1176 { 1177 struct i40e_virtchnl_vsi_queue_config_info *qci = 1178 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1179 struct i40e_virtchnl_queue_pair_info *qpi; 1180 u16 vsi_id, vsi_queue_id; 1181 i40e_status aq_ret = 0; 1182 int i; 1183 1184 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1185 aq_ret = I40E_ERR_PARAM; 1186 goto error_param; 1187 } 1188 1189 vsi_id = qci->vsi_id; 1190 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1191 aq_ret = I40E_ERR_PARAM; 1192 goto error_param; 1193 } 1194 for (i = 0; i < qci->num_queue_pairs; i++) { 1195 qpi = &qci->qpair[i]; 1196 vsi_queue_id = qpi->txq.queue_id; 1197 if ((qpi->txq.vsi_id != vsi_id) || 1198 (qpi->rxq.vsi_id != vsi_id) || 1199 (qpi->rxq.queue_id != vsi_queue_id) || 1200 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1201 aq_ret = I40E_ERR_PARAM; 1202 goto error_param; 1203 } 1204 1205 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1206 &qpi->rxq) || 1207 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1208 &qpi->txq)) { 1209 aq_ret = I40E_ERR_PARAM; 1210 goto error_param; 1211 } 1212 } 1213 1214 error_param: 1215 /* send the response to the vf */ 1216 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1217 aq_ret); 1218 } 1219 1220 /** 1221 * i40e_vc_config_irq_map_msg 1222 * @vf: pointer to the vf info 1223 * @msg: pointer to the msg buffer 1224 * @msglen: msg length 1225 * 1226 * called from the vf to configure the irq to 1227 * queue map 1228 **/ 1229 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1230 { 1231 struct i40e_virtchnl_irq_map_info *irqmap_info = 1232 (struct i40e_virtchnl_irq_map_info *)msg; 1233 struct i40e_virtchnl_vector_map *map; 1234 u16 vsi_id, vsi_queue_id, vector_id; 1235 i40e_status aq_ret = 0; 1236 unsigned long tempmap; 1237 int i; 1238 1239 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1240 aq_ret = I40E_ERR_PARAM; 1241 goto error_param; 1242 } 1243 1244 for (i = 0; i < irqmap_info->num_vectors; i++) { 1245 map = &irqmap_info->vecmap[i]; 1246 1247 vector_id = map->vector_id; 1248 vsi_id = map->vsi_id; 1249 /* validate msg params */ 1250 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1251 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1252 aq_ret = I40E_ERR_PARAM; 1253 goto error_param; 1254 } 1255 1256 /* lookout for the invalid queue index */ 1257 tempmap = map->rxq_map; 1258 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1259 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1260 vsi_queue_id)) { 1261 aq_ret = I40E_ERR_PARAM; 1262 goto error_param; 1263 } 1264 } 1265 1266 tempmap = map->txq_map; 1267 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1268 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1269 vsi_queue_id)) { 1270 aq_ret = I40E_ERR_PARAM; 1271 goto error_param; 1272 } 1273 } 1274 1275 i40e_config_irq_link_list(vf, vsi_id, map); 1276 } 1277 error_param: 1278 /* send the response to the vf */ 1279 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1280 aq_ret); 1281 } 1282 1283 /** 1284 * i40e_vc_enable_queues_msg 1285 * @vf: pointer to the vf info 1286 * @msg: pointer to the msg buffer 1287 * @msglen: msg length 1288 * 1289 * called from the vf to enable all or specific queue(s) 1290 **/ 1291 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1292 { 1293 struct i40e_virtchnl_queue_select *vqs = 1294 (struct i40e_virtchnl_queue_select *)msg; 1295 struct i40e_pf *pf = vf->pf; 1296 u16 vsi_id = vqs->vsi_id; 1297 i40e_status aq_ret = 0; 1298 1299 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1300 aq_ret = I40E_ERR_PARAM; 1301 goto error_param; 1302 } 1303 1304 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1305 aq_ret = I40E_ERR_PARAM; 1306 goto error_param; 1307 } 1308 1309 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1310 aq_ret = I40E_ERR_PARAM; 1311 goto error_param; 1312 } 1313 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) 1314 aq_ret = I40E_ERR_TIMEOUT; 1315 error_param: 1316 /* send the response to the vf */ 1317 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1318 aq_ret); 1319 } 1320 1321 /** 1322 * i40e_vc_disable_queues_msg 1323 * @vf: pointer to the vf info 1324 * @msg: pointer to the msg buffer 1325 * @msglen: msg length 1326 * 1327 * called from the vf to disable all or specific 1328 * queue(s) 1329 **/ 1330 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1331 { 1332 struct i40e_virtchnl_queue_select *vqs = 1333 (struct i40e_virtchnl_queue_select *)msg; 1334 struct i40e_pf *pf = vf->pf; 1335 u16 vsi_id = vqs->vsi_id; 1336 i40e_status aq_ret = 0; 1337 1338 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1339 aq_ret = I40E_ERR_PARAM; 1340 goto error_param; 1341 } 1342 1343 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1344 aq_ret = I40E_ERR_PARAM; 1345 goto error_param; 1346 } 1347 1348 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1349 aq_ret = I40E_ERR_PARAM; 1350 goto error_param; 1351 } 1352 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) 1353 aq_ret = I40E_ERR_TIMEOUT; 1354 1355 error_param: 1356 /* send the response to the vf */ 1357 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1358 aq_ret); 1359 } 1360 1361 /** 1362 * i40e_vc_get_stats_msg 1363 * @vf: pointer to the vf info 1364 * @msg: pointer to the msg buffer 1365 * @msglen: msg length 1366 * 1367 * called from the vf to get vsi stats 1368 **/ 1369 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1370 { 1371 struct i40e_virtchnl_queue_select *vqs = 1372 (struct i40e_virtchnl_queue_select *)msg; 1373 struct i40e_pf *pf = vf->pf; 1374 struct i40e_eth_stats stats; 1375 i40e_status aq_ret = 0; 1376 struct i40e_vsi *vsi; 1377 1378 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1379 1380 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1381 aq_ret = I40E_ERR_PARAM; 1382 goto error_param; 1383 } 1384 1385 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1386 aq_ret = I40E_ERR_PARAM; 1387 goto error_param; 1388 } 1389 1390 vsi = pf->vsi[vqs->vsi_id]; 1391 if (!vsi) { 1392 aq_ret = I40E_ERR_PARAM; 1393 goto error_param; 1394 } 1395 i40e_update_eth_stats(vsi); 1396 stats = vsi->eth_stats; 1397 1398 error_param: 1399 /* send the response back to the vf */ 1400 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1401 (u8 *)&stats, sizeof(stats)); 1402 } 1403 1404 /** 1405 * i40e_check_vf_permission 1406 * @vf: pointer to the vf info 1407 * @macaddr: pointer to the MAC Address being checked 1408 * 1409 * Check if the VF has permission to add or delete unicast MAC address 1410 * filters and return error code -EPERM if not. Then check if the 1411 * address filter requested is broadcast or zero and if so return 1412 * an invalid MAC address error code. 1413 **/ 1414 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1415 { 1416 struct i40e_pf *pf = vf->pf; 1417 int ret = 0; 1418 1419 if (is_broadcast_ether_addr(macaddr) || 1420 is_zero_ether_addr(macaddr)) { 1421 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1422 ret = I40E_ERR_INVALID_MAC_ADDR; 1423 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1424 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1425 /* If the host VMM administrator has set the VF MAC address 1426 * administratively via the ndo_set_vf_mac command then deny 1427 * permission to the VF to add or delete unicast MAC addresses. 1428 * The VF may request to set the MAC address filter already 1429 * assigned to it so do not return an error in that case. 1430 */ 1431 dev_err(&pf->pdev->dev, 1432 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); 1433 ret = -EPERM; 1434 } 1435 return ret; 1436 } 1437 1438 /** 1439 * i40e_vc_add_mac_addr_msg 1440 * @vf: pointer to the vf info 1441 * @msg: pointer to the msg buffer 1442 * @msglen: msg length 1443 * 1444 * add guest mac address filter 1445 **/ 1446 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1447 { 1448 struct i40e_virtchnl_ether_addr_list *al = 1449 (struct i40e_virtchnl_ether_addr_list *)msg; 1450 struct i40e_pf *pf = vf->pf; 1451 struct i40e_vsi *vsi = NULL; 1452 u16 vsi_id = al->vsi_id; 1453 i40e_status ret = 0; 1454 int i; 1455 1456 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1457 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1458 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1459 ret = I40E_ERR_PARAM; 1460 goto error_param; 1461 } 1462 1463 for (i = 0; i < al->num_elements; i++) { 1464 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1465 if (ret) 1466 goto error_param; 1467 } 1468 vsi = pf->vsi[vsi_id]; 1469 1470 /* add new addresses to the list */ 1471 for (i = 0; i < al->num_elements; i++) { 1472 struct i40e_mac_filter *f; 1473 1474 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1475 if (!f) { 1476 if (i40e_is_vsi_in_vlan(vsi)) 1477 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1478 true, false); 1479 else 1480 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1481 true, false); 1482 } 1483 1484 if (!f) { 1485 dev_err(&pf->pdev->dev, 1486 "Unable to add VF MAC filter\n"); 1487 ret = I40E_ERR_PARAM; 1488 goto error_param; 1489 } 1490 } 1491 1492 /* program the updated filter list */ 1493 if (i40e_sync_vsi_filters(vsi)) 1494 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1495 1496 error_param: 1497 /* send the response to the vf */ 1498 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1499 ret); 1500 } 1501 1502 /** 1503 * i40e_vc_del_mac_addr_msg 1504 * @vf: pointer to the vf info 1505 * @msg: pointer to the msg buffer 1506 * @msglen: msg length 1507 * 1508 * remove guest mac address filter 1509 **/ 1510 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1511 { 1512 struct i40e_virtchnl_ether_addr_list *al = 1513 (struct i40e_virtchnl_ether_addr_list *)msg; 1514 struct i40e_pf *pf = vf->pf; 1515 struct i40e_vsi *vsi = NULL; 1516 u16 vsi_id = al->vsi_id; 1517 i40e_status ret = 0; 1518 int i; 1519 1520 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1521 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1522 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1523 ret = I40E_ERR_PARAM; 1524 goto error_param; 1525 } 1526 1527 for (i = 0; i < al->num_elements; i++) { 1528 if (is_broadcast_ether_addr(al->list[i].addr) || 1529 is_zero_ether_addr(al->list[i].addr)) { 1530 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 1531 al->list[i].addr); 1532 ret = I40E_ERR_INVALID_MAC_ADDR; 1533 goto error_param; 1534 } 1535 } 1536 vsi = pf->vsi[vsi_id]; 1537 1538 /* delete addresses from the list */ 1539 for (i = 0; i < al->num_elements; i++) 1540 i40e_del_filter(vsi, al->list[i].addr, 1541 I40E_VLAN_ANY, true, false); 1542 1543 /* program the updated filter list */ 1544 if (i40e_sync_vsi_filters(vsi)) 1545 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1546 1547 error_param: 1548 /* send the response to the vf */ 1549 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1550 ret); 1551 } 1552 1553 /** 1554 * i40e_vc_add_vlan_msg 1555 * @vf: pointer to the vf info 1556 * @msg: pointer to the msg buffer 1557 * @msglen: msg length 1558 * 1559 * program guest vlan id 1560 **/ 1561 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1562 { 1563 struct i40e_virtchnl_vlan_filter_list *vfl = 1564 (struct i40e_virtchnl_vlan_filter_list *)msg; 1565 struct i40e_pf *pf = vf->pf; 1566 struct i40e_vsi *vsi = NULL; 1567 u16 vsi_id = vfl->vsi_id; 1568 i40e_status aq_ret = 0; 1569 int i; 1570 1571 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1572 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1573 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1574 aq_ret = I40E_ERR_PARAM; 1575 goto error_param; 1576 } 1577 1578 for (i = 0; i < vfl->num_elements; i++) { 1579 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1580 aq_ret = I40E_ERR_PARAM; 1581 dev_err(&pf->pdev->dev, 1582 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1583 goto error_param; 1584 } 1585 } 1586 vsi = pf->vsi[vsi_id]; 1587 if (vsi->info.pvid) { 1588 aq_ret = I40E_ERR_PARAM; 1589 goto error_param; 1590 } 1591 1592 i40e_vlan_stripping_enable(vsi); 1593 for (i = 0; i < vfl->num_elements; i++) { 1594 /* add new VLAN filter */ 1595 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1596 if (ret) 1597 dev_err(&pf->pdev->dev, 1598 "Unable to add VF vlan filter %d, error %d\n", 1599 vfl->vlan_id[i], ret); 1600 } 1601 1602 error_param: 1603 /* send the response to the vf */ 1604 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1605 } 1606 1607 /** 1608 * i40e_vc_remove_vlan_msg 1609 * @vf: pointer to the vf info 1610 * @msg: pointer to the msg buffer 1611 * @msglen: msg length 1612 * 1613 * remove programmed guest vlan id 1614 **/ 1615 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1616 { 1617 struct i40e_virtchnl_vlan_filter_list *vfl = 1618 (struct i40e_virtchnl_vlan_filter_list *)msg; 1619 struct i40e_pf *pf = vf->pf; 1620 struct i40e_vsi *vsi = NULL; 1621 u16 vsi_id = vfl->vsi_id; 1622 i40e_status aq_ret = 0; 1623 int i; 1624 1625 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1626 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1627 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1628 aq_ret = I40E_ERR_PARAM; 1629 goto error_param; 1630 } 1631 1632 for (i = 0; i < vfl->num_elements; i++) { 1633 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1634 aq_ret = I40E_ERR_PARAM; 1635 goto error_param; 1636 } 1637 } 1638 1639 vsi = pf->vsi[vsi_id]; 1640 if (vsi->info.pvid) { 1641 aq_ret = I40E_ERR_PARAM; 1642 goto error_param; 1643 } 1644 1645 for (i = 0; i < vfl->num_elements; i++) { 1646 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1647 if (ret) 1648 dev_err(&pf->pdev->dev, 1649 "Unable to delete VF vlan filter %d, error %d\n", 1650 vfl->vlan_id[i], ret); 1651 } 1652 1653 error_param: 1654 /* send the response to the vf */ 1655 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1656 } 1657 1658 /** 1659 * i40e_vc_validate_vf_msg 1660 * @vf: pointer to the vf info 1661 * @msg: pointer to the msg buffer 1662 * @msglen: msg length 1663 * @msghndl: msg handle 1664 * 1665 * validate msg 1666 **/ 1667 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1668 u32 v_retval, u8 *msg, u16 msglen) 1669 { 1670 bool err_msg_format = false; 1671 int valid_len; 1672 1673 /* Check if VF is disabled. */ 1674 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1675 return I40E_ERR_PARAM; 1676 1677 /* Validate message length. */ 1678 switch (v_opcode) { 1679 case I40E_VIRTCHNL_OP_VERSION: 1680 valid_len = sizeof(struct i40e_virtchnl_version_info); 1681 break; 1682 case I40E_VIRTCHNL_OP_RESET_VF: 1683 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1684 valid_len = 0; 1685 break; 1686 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1687 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1688 break; 1689 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1690 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1691 break; 1692 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1693 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1694 if (msglen >= valid_len) { 1695 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1696 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1697 valid_len += (vqc->num_queue_pairs * 1698 sizeof(struct 1699 i40e_virtchnl_queue_pair_info)); 1700 if (vqc->num_queue_pairs == 0) 1701 err_msg_format = true; 1702 } 1703 break; 1704 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1705 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1706 if (msglen >= valid_len) { 1707 struct i40e_virtchnl_irq_map_info *vimi = 1708 (struct i40e_virtchnl_irq_map_info *)msg; 1709 valid_len += (vimi->num_vectors * 1710 sizeof(struct i40e_virtchnl_vector_map)); 1711 if (vimi->num_vectors == 0) 1712 err_msg_format = true; 1713 } 1714 break; 1715 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1716 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1717 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1718 break; 1719 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1720 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1721 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1722 if (msglen >= valid_len) { 1723 struct i40e_virtchnl_ether_addr_list *veal = 1724 (struct i40e_virtchnl_ether_addr_list *)msg; 1725 valid_len += veal->num_elements * 1726 sizeof(struct i40e_virtchnl_ether_addr); 1727 if (veal->num_elements == 0) 1728 err_msg_format = true; 1729 } 1730 break; 1731 case I40E_VIRTCHNL_OP_ADD_VLAN: 1732 case I40E_VIRTCHNL_OP_DEL_VLAN: 1733 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1734 if (msglen >= valid_len) { 1735 struct i40e_virtchnl_vlan_filter_list *vfl = 1736 (struct i40e_virtchnl_vlan_filter_list *)msg; 1737 valid_len += vfl->num_elements * sizeof(u16); 1738 if (vfl->num_elements == 0) 1739 err_msg_format = true; 1740 } 1741 break; 1742 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1743 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1744 break; 1745 case I40E_VIRTCHNL_OP_GET_STATS: 1746 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1747 break; 1748 /* These are always errors coming from the VF. */ 1749 case I40E_VIRTCHNL_OP_EVENT: 1750 case I40E_VIRTCHNL_OP_UNKNOWN: 1751 default: 1752 return -EPERM; 1753 break; 1754 } 1755 /* few more checks */ 1756 if ((valid_len != msglen) || (err_msg_format)) { 1757 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1758 return -EINVAL; 1759 } else { 1760 return 0; 1761 } 1762 } 1763 1764 /** 1765 * i40e_vc_process_vf_msg 1766 * @pf: pointer to the pf structure 1767 * @vf_id: source vf id 1768 * @msg: pointer to the msg buffer 1769 * @msglen: msg length 1770 * @msghndl: msg handle 1771 * 1772 * called from the common aeq/arq handler to 1773 * process request from vf 1774 **/ 1775 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1776 u32 v_retval, u8 *msg, u16 msglen) 1777 { 1778 struct i40e_hw *hw = &pf->hw; 1779 int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1780 struct i40e_vf *vf; 1781 int ret; 1782 1783 pf->vf_aq_requests++; 1784 if (local_vf_id >= pf->num_alloc_vfs) 1785 return -EINVAL; 1786 vf = &(pf->vf[local_vf_id]); 1787 /* perform basic checks on the msg */ 1788 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1789 1790 if (ret) { 1791 dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", 1792 local_vf_id, v_opcode, msglen); 1793 return ret; 1794 } 1795 1796 switch (v_opcode) { 1797 case I40E_VIRTCHNL_OP_VERSION: 1798 ret = i40e_vc_get_version_msg(vf); 1799 break; 1800 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1801 ret = i40e_vc_get_vf_resources_msg(vf); 1802 break; 1803 case I40E_VIRTCHNL_OP_RESET_VF: 1804 i40e_vc_reset_vf_msg(vf); 1805 ret = 0; 1806 break; 1807 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1808 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1809 break; 1810 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1811 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1812 break; 1813 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1814 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1815 break; 1816 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1817 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1818 break; 1819 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1820 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1821 break; 1822 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1823 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1824 break; 1825 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1826 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1827 break; 1828 case I40E_VIRTCHNL_OP_ADD_VLAN: 1829 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1830 break; 1831 case I40E_VIRTCHNL_OP_DEL_VLAN: 1832 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1833 break; 1834 case I40E_VIRTCHNL_OP_GET_STATS: 1835 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1836 break; 1837 case I40E_VIRTCHNL_OP_UNKNOWN: 1838 default: 1839 dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", 1840 v_opcode, local_vf_id); 1841 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 1842 I40E_ERR_NOT_IMPLEMENTED); 1843 break; 1844 } 1845 1846 return ret; 1847 } 1848 1849 /** 1850 * i40e_vc_process_vflr_event 1851 * @pf: pointer to the pf structure 1852 * 1853 * called from the vlfr irq handler to 1854 * free up vf resources and state variables 1855 **/ 1856 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 1857 { 1858 u32 reg, reg_idx, bit_idx, vf_id; 1859 struct i40e_hw *hw = &pf->hw; 1860 struct i40e_vf *vf; 1861 1862 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 1863 return 0; 1864 1865 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 1866 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 1867 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1868 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1869 /* read GLGEN_VFLRSTAT register to find out the flr vfs */ 1870 vf = &pf->vf[vf_id]; 1871 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 1872 if (reg & (1 << bit_idx)) { 1873 /* clear the bit in GLGEN_VFLRSTAT */ 1874 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 1875 1876 i40e_reset_vf(vf, true); 1877 } 1878 } 1879 1880 /* re-enable vflr interrupt cause */ 1881 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1882 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 1883 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1884 i40e_flush(hw); 1885 1886 return 0; 1887 } 1888 1889 /** 1890 * i40e_vc_vf_broadcast 1891 * @pf: pointer to the pf structure 1892 * @opcode: operation code 1893 * @retval: return value 1894 * @msg: pointer to the msg buffer 1895 * @msglen: msg length 1896 * 1897 * send a message to all VFs on a given PF 1898 **/ 1899 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 1900 enum i40e_virtchnl_ops v_opcode, 1901 i40e_status v_retval, u8 *msg, 1902 u16 msglen) 1903 { 1904 struct i40e_hw *hw = &pf->hw; 1905 struct i40e_vf *vf = pf->vf; 1906 int i; 1907 1908 for (i = 0; i < pf->num_alloc_vfs; i++) { 1909 /* Ignore return value on purpose - a given VF may fail, but 1910 * we need to keep going and send to all of them 1911 */ 1912 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, 1913 msg, msglen, NULL); 1914 vf++; 1915 } 1916 } 1917 1918 /** 1919 * i40e_vc_notify_link_state 1920 * @pf: pointer to the pf structure 1921 * 1922 * send a link status message to all VFs on a given PF 1923 **/ 1924 void i40e_vc_notify_link_state(struct i40e_pf *pf) 1925 { 1926 struct i40e_virtchnl_pf_event pfe; 1927 1928 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1929 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1930 pfe.event_data.link_event.link_status = 1931 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 1932 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; 1933 1934 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1935 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1936 } 1937 1938 /** 1939 * i40e_vc_notify_reset 1940 * @pf: pointer to the pf structure 1941 * 1942 * indicate a pending reset to all VFs on a given PF 1943 **/ 1944 void i40e_vc_notify_reset(struct i40e_pf *pf) 1945 { 1946 struct i40e_virtchnl_pf_event pfe; 1947 1948 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1949 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1950 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1951 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1952 } 1953 1954 /** 1955 * i40e_vc_notify_vf_reset 1956 * @vf: pointer to the vf structure 1957 * 1958 * indicate a pending reset to the given VF 1959 **/ 1960 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 1961 { 1962 struct i40e_virtchnl_pf_event pfe; 1963 1964 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1965 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1966 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 1967 I40E_SUCCESS, (u8 *)&pfe, 1968 sizeof(struct i40e_virtchnl_pf_event), NULL); 1969 } 1970 1971 /** 1972 * i40e_ndo_set_vf_mac 1973 * @netdev: network interface device structure 1974 * @vf_id: vf identifier 1975 * @mac: mac address 1976 * 1977 * program vf mac address 1978 **/ 1979 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1980 { 1981 struct i40e_netdev_priv *np = netdev_priv(netdev); 1982 struct i40e_vsi *vsi = np->vsi; 1983 struct i40e_pf *pf = vsi->back; 1984 struct i40e_mac_filter *f; 1985 struct i40e_vf *vf; 1986 int ret = 0; 1987 1988 /* validate the request */ 1989 if (vf_id >= pf->num_alloc_vfs) { 1990 dev_err(&pf->pdev->dev, 1991 "Invalid VF Identifier %d\n", vf_id); 1992 ret = -EINVAL; 1993 goto error_param; 1994 } 1995 1996 vf = &(pf->vf[vf_id]); 1997 vsi = pf->vsi[vf->lan_vsi_index]; 1998 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1999 dev_err(&pf->pdev->dev, 2000 "Uninitialized VF %d\n", vf_id); 2001 ret = -EINVAL; 2002 goto error_param; 2003 } 2004 2005 if (!is_valid_ether_addr(mac)) { 2006 dev_err(&pf->pdev->dev, 2007 "Invalid VF ethernet address\n"); 2008 ret = -EINVAL; 2009 goto error_param; 2010 } 2011 2012 /* delete the temporary mac address */ 2013 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); 2014 2015 /* add the new mac address */ 2016 f = i40e_add_filter(vsi, mac, 0, true, false); 2017 if (!f) { 2018 dev_err(&pf->pdev->dev, 2019 "Unable to add VF ucast filter\n"); 2020 ret = -ENOMEM; 2021 goto error_param; 2022 } 2023 2024 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2025 /* program mac filter */ 2026 if (i40e_sync_vsi_filters(vsi)) { 2027 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2028 ret = -EIO; 2029 goto error_param; 2030 } 2031 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); 2032 vf->pf_set_mac = true; 2033 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2034 ret = 0; 2035 2036 error_param: 2037 return ret; 2038 } 2039 2040 /** 2041 * i40e_ndo_set_vf_port_vlan 2042 * @netdev: network interface device structure 2043 * @vf_id: vf identifier 2044 * @vlan_id: mac address 2045 * @qos: priority setting 2046 * 2047 * program vf vlan id and/or qos 2048 **/ 2049 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2050 int vf_id, u16 vlan_id, u8 qos) 2051 { 2052 struct i40e_netdev_priv *np = netdev_priv(netdev); 2053 struct i40e_pf *pf = np->vsi->back; 2054 struct i40e_vsi *vsi; 2055 struct i40e_vf *vf; 2056 int ret = 0; 2057 2058 /* validate the request */ 2059 if (vf_id >= pf->num_alloc_vfs) { 2060 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2061 ret = -EINVAL; 2062 goto error_pvid; 2063 } 2064 2065 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2066 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2067 ret = -EINVAL; 2068 goto error_pvid; 2069 } 2070 2071 vf = &(pf->vf[vf_id]); 2072 vsi = pf->vsi[vf->lan_vsi_index]; 2073 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2074 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2075 ret = -EINVAL; 2076 goto error_pvid; 2077 } 2078 2079 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) 2080 dev_err(&pf->pdev->dev, 2081 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2082 vf_id); 2083 2084 /* Check for condition where there was already a port VLAN ID 2085 * filter set and now it is being deleted by setting it to zero. 2086 * Before deleting all the old VLAN filters we must add new ones 2087 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2088 * MAC addresses deleted. 2089 */ 2090 if (!(vlan_id || qos) && vsi->info.pvid) 2091 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2092 2093 if (vsi->info.pvid) { 2094 /* kill old VLAN */ 2095 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2096 VLAN_VID_MASK)); 2097 if (ret) { 2098 dev_info(&vsi->back->pdev->dev, 2099 "remove VLAN failed, ret=%d, aq_err=%d\n", 2100 ret, pf->hw.aq.asq_last_status); 2101 } 2102 } 2103 if (vlan_id || qos) 2104 ret = i40e_vsi_add_pvid(vsi, 2105 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); 2106 else 2107 i40e_vsi_remove_pvid(vsi); 2108 2109 if (vlan_id) { 2110 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2111 vlan_id, qos, vf_id); 2112 2113 /* add new VLAN filter */ 2114 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2115 if (ret) { 2116 dev_info(&vsi->back->pdev->dev, 2117 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2118 vsi->back->hw.aq.asq_last_status); 2119 goto error_pvid; 2120 } 2121 /* Kill non-vlan MAC filters - ignore error return since 2122 * there might not be any non-vlan MAC filters. 2123 */ 2124 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2125 } 2126 2127 if (ret) { 2128 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2129 goto error_pvid; 2130 } 2131 /* The Port VLAN needs to be saved across resets the same as the 2132 * default LAN MAC address. 2133 */ 2134 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2135 ret = 0; 2136 2137 error_pvid: 2138 return ret; 2139 } 2140 2141 /** 2142 * i40e_ndo_set_vf_bw 2143 * @netdev: network interface device structure 2144 * @vf_id: vf identifier 2145 * @tx_rate: tx rate 2146 * 2147 * configure vf tx rate 2148 **/ 2149 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) 2150 { 2151 return -EOPNOTSUPP; 2152 } 2153 2154 /** 2155 * i40e_ndo_get_vf_config 2156 * @netdev: network interface device structure 2157 * @vf_id: vf identifier 2158 * @ivi: vf configuration structure 2159 * 2160 * return vf configuration 2161 **/ 2162 int i40e_ndo_get_vf_config(struct net_device *netdev, 2163 int vf_id, struct ifla_vf_info *ivi) 2164 { 2165 struct i40e_netdev_priv *np = netdev_priv(netdev); 2166 struct i40e_vsi *vsi = np->vsi; 2167 struct i40e_pf *pf = vsi->back; 2168 struct i40e_vf *vf; 2169 int ret = 0; 2170 2171 /* validate the request */ 2172 if (vf_id >= pf->num_alloc_vfs) { 2173 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2174 ret = -EINVAL; 2175 goto error_param; 2176 } 2177 2178 vf = &(pf->vf[vf_id]); 2179 /* first vsi is always the LAN vsi */ 2180 vsi = pf->vsi[vf->lan_vsi_index]; 2181 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2182 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2183 ret = -EINVAL; 2184 goto error_param; 2185 } 2186 2187 ivi->vf = vf_id; 2188 2189 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2190 2191 ivi->tx_rate = 0; 2192 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2193 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2194 I40E_VLAN_PRIORITY_SHIFT; 2195 ret = 0; 2196 2197 error_param: 2198 return ret; 2199 } 2200