1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 * 22 * Contact Information: 23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * 26 ******************************************************************************/ 27 28 #include "i40e.h" 29 30 /***********************misc routines*****************************/ 31 32 /** 33 * i40e_vc_isvalid_vsi_id 34 * @vf: pointer to the vf info 35 * @vsi_id: vf relative vsi id 36 * 37 * check for the valid vsi id 38 **/ 39 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) 40 { 41 struct i40e_pf *pf = vf->pf; 42 43 return pf->vsi[vsi_id]->vf_id == vf->vf_id; 44 } 45 46 /** 47 * i40e_vc_isvalid_queue_id 48 * @vf: pointer to the vf info 49 * @vsi_id: vsi id 50 * @qid: vsi relative queue id 51 * 52 * check for the valid queue id 53 **/ 54 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, 55 u8 qid) 56 { 57 struct i40e_pf *pf = vf->pf; 58 59 return qid < pf->vsi[vsi_id]->num_queue_pairs; 60 } 61 62 /** 63 * i40e_vc_isvalid_vector_id 64 * @vf: pointer to the vf info 65 * @vector_id: vf relative vector id 66 * 67 * check for the valid vector id 68 **/ 69 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 70 { 71 struct i40e_pf *pf = vf->pf; 72 73 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 74 } 75 76 /***********************vf resource mgmt routines*****************/ 77 78 /** 79 * i40e_vc_get_pf_queue_id 80 * @vf: pointer to the vf info 81 * @vsi_idx: index of VSI in PF struct 82 * @vsi_queue_id: vsi relative queue id 83 * 84 * return pf relative queue id 85 **/ 86 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, 87 u8 vsi_queue_id) 88 { 89 struct i40e_pf *pf = vf->pf; 90 struct i40e_vsi *vsi = pf->vsi[vsi_idx]; 91 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 92 93 if (le16_to_cpu(vsi->info.mapping_flags) & 94 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 95 pf_queue_id = 96 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 97 else 98 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 99 vsi_queue_id; 100 101 return pf_queue_id; 102 } 103 104 /** 105 * i40e_ctrl_vsi_tx_queue 106 * @vf: pointer to the vf info 107 * @vsi_idx: index of VSI in PF struct 108 * @vsi_queue_id: vsi relative queue index 109 * @ctrl: control flags 110 * 111 * enable/disable/enable check/disable check 112 **/ 113 static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, 114 u16 vsi_queue_id, 115 enum i40e_queue_ctrl ctrl) 116 { 117 struct i40e_pf *pf = vf->pf; 118 struct i40e_hw *hw = &pf->hw; 119 bool writeback = false; 120 u16 pf_queue_id; 121 int ret = 0; 122 u32 reg; 123 124 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 125 reg = rd32(hw, I40E_QTX_ENA(pf_queue_id)); 126 127 switch (ctrl) { 128 case I40E_QUEUE_CTRL_ENABLE: 129 reg |= I40E_QTX_ENA_QENA_REQ_MASK; 130 writeback = true; 131 break; 132 case I40E_QUEUE_CTRL_ENABLECHECK: 133 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM; 134 break; 135 case I40E_QUEUE_CTRL_DISABLE: 136 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 137 writeback = true; 138 break; 139 case I40E_QUEUE_CTRL_DISABLECHECK: 140 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0; 141 break; 142 case I40E_QUEUE_CTRL_FASTDISABLE: 143 reg |= I40E_QTX_ENA_FAST_QDIS_MASK; 144 writeback = true; 145 break; 146 case I40E_QUEUE_CTRL_FASTDISABLECHECK: 147 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0; 148 if (!ret) { 149 reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK; 150 writeback = true; 151 } 152 break; 153 default: 154 ret = -EINVAL; 155 break; 156 } 157 158 if (writeback) { 159 wr32(hw, I40E_QTX_ENA(pf_queue_id), reg); 160 i40e_flush(hw); 161 } 162 163 return ret; 164 } 165 166 /** 167 * i40e_ctrl_vsi_rx_queue 168 * @vf: pointer to the vf info 169 * @vsi_idx: index of VSI in PF struct 170 * @vsi_queue_id: vsi relative queue index 171 * @ctrl: control flags 172 * 173 * enable/disable/enable check/disable check 174 **/ 175 static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, 176 u16 vsi_queue_id, 177 enum i40e_queue_ctrl ctrl) 178 { 179 struct i40e_pf *pf = vf->pf; 180 struct i40e_hw *hw = &pf->hw; 181 bool writeback = false; 182 u16 pf_queue_id; 183 int ret = 0; 184 u32 reg; 185 186 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 187 reg = rd32(hw, I40E_QRX_ENA(pf_queue_id)); 188 189 switch (ctrl) { 190 case I40E_QUEUE_CTRL_ENABLE: 191 reg |= I40E_QRX_ENA_QENA_REQ_MASK; 192 writeback = true; 193 break; 194 case I40E_QUEUE_CTRL_ENABLECHECK: 195 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM; 196 break; 197 case I40E_QUEUE_CTRL_DISABLE: 198 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 199 writeback = true; 200 break; 201 case I40E_QUEUE_CTRL_DISABLECHECK: 202 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0; 203 break; 204 case I40E_QUEUE_CTRL_FASTDISABLE: 205 reg |= I40E_QRX_ENA_FAST_QDIS_MASK; 206 writeback = true; 207 break; 208 case I40E_QUEUE_CTRL_FASTDISABLECHECK: 209 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0; 210 if (!ret) { 211 reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK; 212 writeback = true; 213 } 214 break; 215 default: 216 ret = -EINVAL; 217 break; 218 } 219 220 if (writeback) { 221 wr32(hw, I40E_QRX_ENA(pf_queue_id), reg); 222 i40e_flush(hw); 223 } 224 225 return ret; 226 } 227 228 /** 229 * i40e_config_irq_link_list 230 * @vf: pointer to the vf info 231 * @vsi_idx: index of VSI in PF struct 232 * @vecmap: irq map info 233 * 234 * configure irq link list from the map 235 **/ 236 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, 237 struct i40e_virtchnl_vector_map *vecmap) 238 { 239 unsigned long linklistmap = 0, tempmap; 240 struct i40e_pf *pf = vf->pf; 241 struct i40e_hw *hw = &pf->hw; 242 u16 vsi_queue_id, pf_queue_id; 243 enum i40e_queue_type qtype; 244 u16 next_q, vector_id; 245 u32 reg, reg_idx; 246 u16 itr_idx = 0; 247 248 vector_id = vecmap->vector_id; 249 /* setup the head */ 250 if (0 == vector_id) 251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 252 else 253 reg_idx = I40E_VPINT_LNKLSTN( 254 ((pf->hw.func_caps.num_msix_vectors_vf - 1) 255 * vf->vf_id) + (vector_id - 1)); 256 257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 258 /* Special case - No queues mapped on this vector */ 259 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 260 goto irq_list_done; 261 } 262 tempmap = vecmap->rxq_map; 263 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 264 while (vsi_queue_id < I40E_MAX_VSI_QP) { 265 linklistmap |= (1 << 266 (I40E_VIRTCHNL_SUPPORTED_QTYPES * 267 vsi_queue_id)); 268 vsi_queue_id = 269 find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1); 270 } 271 272 tempmap = vecmap->txq_map; 273 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 274 while (vsi_queue_id < I40E_MAX_VSI_QP) { 275 linklistmap |= (1 << 276 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id 277 + 1)); 278 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 279 vsi_queue_id + 1); 280 } 281 282 next_q = find_first_bit(&linklistmap, 283 (I40E_MAX_VSI_QP * 284 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 285 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 286 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 287 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 288 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 289 290 wr32(hw, reg_idx, reg); 291 292 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 293 switch (qtype) { 294 case I40E_QUEUE_TYPE_RX: 295 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 296 itr_idx = vecmap->rxitr_idx; 297 break; 298 case I40E_QUEUE_TYPE_TX: 299 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 300 itr_idx = vecmap->txitr_idx; 301 break; 302 default: 303 break; 304 } 305 306 next_q = find_next_bit(&linklistmap, 307 (I40E_MAX_VSI_QP * 308 I40E_VIRTCHNL_SUPPORTED_QTYPES), 309 next_q + 1); 310 if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 311 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 312 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 313 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, 314 vsi_queue_id); 315 } else { 316 pf_queue_id = I40E_QUEUE_END_OF_LIST; 317 qtype = 0; 318 } 319 320 /* format for the RQCTL & TQCTL regs is same */ 321 reg = (vector_id) | 322 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 323 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 324 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 325 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 326 wr32(hw, reg_idx, reg); 327 } 328 329 irq_list_done: 330 i40e_flush(hw); 331 } 332 333 /** 334 * i40e_config_vsi_tx_queue 335 * @vf: pointer to the vf info 336 * @vsi_idx: index of VSI in PF struct 337 * @vsi_queue_id: vsi relative queue index 338 * @info: config. info 339 * 340 * configure tx queue 341 **/ 342 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, 343 u16 vsi_queue_id, 344 struct i40e_virtchnl_txq_info *info) 345 { 346 struct i40e_pf *pf = vf->pf; 347 struct i40e_hw *hw = &pf->hw; 348 struct i40e_hmc_obj_txq tx_ctx; 349 u16 pf_queue_id; 350 u32 qtx_ctl; 351 int ret = 0; 352 353 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 354 355 /* clear the context structure first */ 356 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 357 358 /* only set the required fields */ 359 tx_ctx.base = info->dma_ring_addr / 128; 360 tx_ctx.qlen = info->ring_len; 361 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 362 tx_ctx.rdylist_act = 0; 363 364 /* clear the context in the HMC */ 365 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 366 if (ret) { 367 dev_err(&pf->pdev->dev, 368 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 369 pf_queue_id, ret); 370 ret = -ENOENT; 371 goto error_context; 372 } 373 374 /* set the context in the HMC */ 375 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 376 if (ret) { 377 dev_err(&pf->pdev->dev, 378 "Failed to set VF LAN Tx queue context %d error: %d\n", 379 pf_queue_id, ret); 380 ret = -ENOENT; 381 goto error_context; 382 } 383 384 /* associate this queue with the PCI VF function */ 385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 386 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) 387 & I40E_QTX_CTL_PF_INDX_MASK); 388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 389 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 390 & I40E_QTX_CTL_VFVM_INDX_MASK); 391 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 392 i40e_flush(hw); 393 394 error_context: 395 return ret; 396 } 397 398 /** 399 * i40e_config_vsi_rx_queue 400 * @vf: pointer to the vf info 401 * @vsi_idx: index of VSI in PF struct 402 * @vsi_queue_id: vsi relative queue index 403 * @info: config. info 404 * 405 * configure rx queue 406 **/ 407 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, 408 u16 vsi_queue_id, 409 struct i40e_virtchnl_rxq_info *info) 410 { 411 struct i40e_pf *pf = vf->pf; 412 struct i40e_hw *hw = &pf->hw; 413 struct i40e_hmc_obj_rxq rx_ctx; 414 u16 pf_queue_id; 415 int ret = 0; 416 417 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 418 419 /* clear the context structure first */ 420 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 421 422 /* only set the required fields */ 423 rx_ctx.base = info->dma_ring_addr / 128; 424 rx_ctx.qlen = info->ring_len; 425 426 if (info->splithdr_enabled) { 427 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 428 I40E_RX_SPLIT_IP | 429 I40E_RX_SPLIT_TCP_UDP | 430 I40E_RX_SPLIT_SCTP; 431 /* header length validation */ 432 if (info->hdr_size > ((2 * 1024) - 64)) { 433 ret = -EINVAL; 434 goto error_param; 435 } 436 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 437 438 /* set splitalways mode 10b */ 439 rx_ctx.dtype = 0x2; 440 } 441 442 /* databuffer length validation */ 443 if (info->databuffer_size > ((16 * 1024) - 128)) { 444 ret = -EINVAL; 445 goto error_param; 446 } 447 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 448 449 /* max pkt. length validation */ 450 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 451 ret = -EINVAL; 452 goto error_param; 453 } 454 rx_ctx.rxmax = info->max_pkt_size; 455 456 /* enable 32bytes desc always */ 457 rx_ctx.dsize = 1; 458 459 /* default values */ 460 rx_ctx.tphrdesc_ena = 1; 461 rx_ctx.tphwdesc_ena = 1; 462 rx_ctx.tphdata_ena = 1; 463 rx_ctx.tphhead_ena = 1; 464 rx_ctx.lrxqthresh = 2; 465 rx_ctx.crcstrip = 1; 466 467 /* clear the context in the HMC */ 468 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 469 if (ret) { 470 dev_err(&pf->pdev->dev, 471 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 472 pf_queue_id, ret); 473 ret = -ENOENT; 474 goto error_param; 475 } 476 477 /* set the context in the HMC */ 478 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 479 if (ret) { 480 dev_err(&pf->pdev->dev, 481 "Failed to set VF LAN Rx queue context %d error: %d\n", 482 pf_queue_id, ret); 483 ret = -ENOENT; 484 goto error_param; 485 } 486 487 error_param: 488 return ret; 489 } 490 491 /** 492 * i40e_alloc_vsi_res 493 * @vf: pointer to the vf info 494 * @type: type of VSI to allocate 495 * 496 * alloc vf vsi context & resources 497 **/ 498 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 499 { 500 struct i40e_mac_filter *f = NULL; 501 struct i40e_pf *pf = vf->pf; 502 struct i40e_hw *hw = &pf->hw; 503 struct i40e_vsi *vsi; 504 int ret = 0; 505 506 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 507 508 if (!vsi) { 509 dev_err(&pf->pdev->dev, 510 "add vsi failed for vf %d, aq_err %d\n", 511 vf->vf_id, pf->hw.aq.asq_last_status); 512 ret = -ENOENT; 513 goto error_alloc_vsi_res; 514 } 515 if (type == I40E_VSI_SRIOV) { 516 vf->lan_vsi_index = vsi->idx; 517 vf->lan_vsi_id = vsi->id; 518 dev_info(&pf->pdev->dev, 519 "LAN VSI index %d, VSI id %d\n", 520 vsi->idx, vsi->id); 521 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 522 0, true, false); 523 } 524 if (!f) { 525 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n"); 526 ret = -ENOMEM; 527 goto error_alloc_vsi_res; 528 } 529 530 /* program mac filter */ 531 ret = i40e_sync_vsi_filters(vsi); 532 if (ret) { 533 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 534 goto error_alloc_vsi_res; 535 } 536 537 /* accept bcast pkts. by default */ 538 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL); 539 if (ret) { 540 dev_err(&pf->pdev->dev, 541 "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n", 542 vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status); 543 ret = -EINVAL; 544 } 545 546 error_alloc_vsi_res: 547 return ret; 548 } 549 550 /** 551 * i40e_reset_vf 552 * @vf: pointer to the vf structure 553 * @flr: VFLR was issued or not 554 * 555 * reset the vf 556 **/ 557 int i40e_reset_vf(struct i40e_vf *vf, bool flr) 558 { 559 int ret = -ENOENT; 560 struct i40e_pf *pf = vf->pf; 561 struct i40e_hw *hw = &pf->hw; 562 u32 reg, reg_idx, msix_vf; 563 bool rsd = false; 564 u16 pf_queue_id; 565 int i, j; 566 567 /* warn the VF */ 568 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS); 569 570 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 571 572 /* PF triggers VFR only when VF requests, in case of 573 * VFLR, HW triggers VFR 574 */ 575 if (!flr) { 576 /* reset vf using VPGEN_VFRTRIG reg */ 577 reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK; 578 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 579 i40e_flush(hw); 580 } 581 582 /* poll VPGEN_VFRSTAT reg to make sure 583 * that reset is complete 584 */ 585 for (i = 0; i < 4; i++) { 586 /* vf reset requires driver to first reset the 587 * vf & than poll the status register to make sure 588 * that the requested op was completed 589 * successfully 590 */ 591 udelay(10); 592 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 593 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 594 rsd = true; 595 break; 596 } 597 } 598 599 if (!rsd) 600 dev_err(&pf->pdev->dev, "VF reset check timeout %d\n", 601 vf->vf_id); 602 603 /* fast disable qps */ 604 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { 605 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j, 606 I40E_QUEUE_CTRL_FASTDISABLE); 607 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j, 608 I40E_QUEUE_CTRL_FASTDISABLE); 609 } 610 611 /* Queue enable/disable requires driver to 612 * first reset the vf & than poll the status register 613 * to make sure that the requested op was completed 614 * successfully 615 */ 616 udelay(10); 617 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { 618 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j, 619 I40E_QUEUE_CTRL_FASTDISABLECHECK); 620 if (ret) 621 dev_info(&pf->pdev->dev, 622 "Queue control check failed on Tx queue %d of VSI %d VF %d\n", 623 vf->lan_vsi_index, j, vf->vf_id); 624 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j, 625 I40E_QUEUE_CTRL_FASTDISABLECHECK); 626 if (ret) 627 dev_info(&pf->pdev->dev, 628 "Queue control check failed on Rx queue %d of VSI %d VF %d\n", 629 vf->lan_vsi_index, j, vf->vf_id); 630 } 631 632 /* clear the irq settings */ 633 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 634 for (i = 0; i < msix_vf; i++) { 635 /* format is same for both registers */ 636 if (0 == i) 637 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 638 else 639 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 640 (vf->vf_id)) 641 + (i - 1)); 642 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 643 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 644 wr32(hw, reg_idx, reg); 645 i40e_flush(hw); 646 } 647 /* disable interrupts so the VF starts in a known state */ 648 for (i = 0; i < msix_vf; i++) { 649 /* format is same for both registers */ 650 if (0 == i) 651 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 652 else 653 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 654 (vf->vf_id)) 655 + (i - 1)); 656 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 657 i40e_flush(hw); 658 } 659 660 /* set the defaults for the rqctl & tqctl registers */ 661 reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK | 662 I40E_QINT_RQCTL_NEXTQ_TYPE_MASK); 663 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { 664 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 665 wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg); 666 wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg); 667 } 668 669 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 670 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 671 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 672 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 673 /* tell the VF the reset is done */ 674 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 675 i40e_flush(hw); 676 677 return ret; 678 } 679 680 /** 681 * i40e_enable_vf_mappings 682 * @vf: pointer to the vf info 683 * 684 * enable vf mappings 685 **/ 686 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 687 { 688 struct i40e_pf *pf = vf->pf; 689 struct i40e_hw *hw = &pf->hw; 690 u32 reg, total_queue_pairs = 0; 691 int j; 692 693 /* Tell the hardware we're using noncontiguous mapping. HW requires 694 * that VF queues be mapped using this method, even when they are 695 * contiguous in real life 696 */ 697 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 698 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 699 700 /* enable VF vplan_qtable mappings */ 701 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 702 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 703 704 /* map PF queues to VF queues */ 705 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { 706 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 707 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 708 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 709 total_queue_pairs++; 710 } 711 712 /* map PF queues to VSI */ 713 for (j = 0; j < 7; j++) { 714 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) { 715 reg = 0x07FF07FF; /* unused */ 716 } else { 717 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 718 j * 2); 719 reg = qid; 720 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 721 (j * 2) + 1); 722 reg |= qid << 16; 723 } 724 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 725 } 726 727 i40e_flush(hw); 728 } 729 730 /** 731 * i40e_disable_vf_mappings 732 * @vf: pointer to the vf info 733 * 734 * disable vf mappings 735 **/ 736 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 737 { 738 struct i40e_pf *pf = vf->pf; 739 struct i40e_hw *hw = &pf->hw; 740 int i; 741 742 /* disable qp mappings */ 743 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 744 for (i = 0; i < I40E_MAX_VSI_QP; i++) 745 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 746 I40E_QUEUE_END_OF_LIST); 747 i40e_flush(hw); 748 } 749 750 /** 751 * i40e_free_vf_res 752 * @vf: pointer to the vf info 753 * 754 * free vf resources 755 **/ 756 static void i40e_free_vf_res(struct i40e_vf *vf) 757 { 758 struct i40e_pf *pf = vf->pf; 759 760 /* free vsi & disconnect it from the parent uplink */ 761 if (vf->lan_vsi_index) { 762 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); 763 vf->lan_vsi_index = 0; 764 vf->lan_vsi_id = 0; 765 } 766 /* reset some of the state varibles keeping 767 * track of the resources 768 */ 769 vf->num_queue_pairs = 0; 770 vf->vf_states = 0; 771 } 772 773 /** 774 * i40e_alloc_vf_res 775 * @vf: pointer to the vf info 776 * 777 * allocate vf resources 778 **/ 779 static int i40e_alloc_vf_res(struct i40e_vf *vf) 780 { 781 struct i40e_pf *pf = vf->pf; 782 int total_queue_pairs = 0; 783 int ret; 784 785 /* allocate hw vsi context & associated resources */ 786 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 787 if (ret) 788 goto error_alloc; 789 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 790 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 791 792 /* store the total qps number for the runtime 793 * vf req validation 794 */ 795 vf->num_queue_pairs = total_queue_pairs; 796 797 /* vf is now completely initialized */ 798 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 799 800 error_alloc: 801 if (ret) 802 i40e_free_vf_res(vf); 803 804 return ret; 805 } 806 807 /** 808 * i40e_vfs_are_assigned 809 * @pf: pointer to the pf structure 810 * 811 * Determine if any VFs are assigned to VMs 812 **/ 813 static bool i40e_vfs_are_assigned(struct i40e_pf *pf) 814 { 815 struct pci_dev *pdev = pf->pdev; 816 struct pci_dev *vfdev; 817 818 /* loop through all the VFs to see if we own any that are assigned */ 819 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL); 820 while (vfdev) { 821 /* if we don't own it we don't care */ 822 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { 823 /* if it is assigned we cannot release it */ 824 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 825 return true; 826 } 827 828 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, 829 I40E_VF_DEVICE_ID, 830 vfdev); 831 } 832 833 return false; 834 } 835 836 /** 837 * i40e_free_vfs 838 * @pf: pointer to the pf structure 839 * 840 * free vf resources 841 **/ 842 void i40e_free_vfs(struct i40e_pf *pf) 843 { 844 struct i40e_hw *hw = &pf->hw; 845 int i; 846 847 if (!pf->vf) 848 return; 849 850 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 851 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 852 i40e_flush(hw); 853 854 /* free up vf resources */ 855 for (i = 0; i < pf->num_alloc_vfs; i++) { 856 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 857 i40e_free_vf_res(&pf->vf[i]); 858 /* disable qp mappings */ 859 i40e_disable_vf_mappings(&pf->vf[i]); 860 } 861 862 kfree(pf->vf); 863 pf->vf = NULL; 864 pf->num_alloc_vfs = 0; 865 866 if (!i40e_vfs_are_assigned(pf)) 867 pci_disable_sriov(pf->pdev); 868 else 869 dev_warn(&pf->pdev->dev, 870 "unable to disable SR-IOV because VFs are assigned.\n"); 871 872 /* Re-enable interrupt 0. */ 873 wr32(hw, I40E_PFINT_DYN_CTL0, 874 I40E_PFINT_DYN_CTL0_INTENA_MASK | 875 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 876 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 877 i40e_flush(hw); 878 } 879 880 #ifdef CONFIG_PCI_IOV 881 /** 882 * i40e_alloc_vfs 883 * @pf: pointer to the pf structure 884 * @num_alloc_vfs: number of vfs to allocate 885 * 886 * allocate vf resources 887 **/ 888 static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 889 { 890 struct i40e_vf *vfs; 891 int i, ret = 0; 892 893 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 894 if (ret) { 895 dev_err(&pf->pdev->dev, 896 "pci_enable_sriov failed with error %d!\n", ret); 897 pf->num_alloc_vfs = 0; 898 goto err_iov; 899 } 900 901 /* allocate memory */ 902 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL); 903 if (!vfs) { 904 ret = -ENOMEM; 905 goto err_alloc; 906 } 907 908 /* apply default profile */ 909 for (i = 0; i < num_alloc_vfs; i++) { 910 vfs[i].pf = pf; 911 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 912 vfs[i].vf_id = i; 913 914 /* assign default capabilities */ 915 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 916 917 ret = i40e_alloc_vf_res(&vfs[i]); 918 i40e_reset_vf(&vfs[i], true); 919 if (ret) 920 break; 921 922 /* enable vf vplan_qtable mappings */ 923 i40e_enable_vf_mappings(&vfs[i]); 924 } 925 pf->vf = vfs; 926 pf->num_alloc_vfs = num_alloc_vfs; 927 928 err_alloc: 929 if (ret) 930 i40e_free_vfs(pf); 931 err_iov: 932 return ret; 933 } 934 935 #endif 936 /** 937 * i40e_pci_sriov_enable 938 * @pdev: pointer to a pci_dev structure 939 * @num_vfs: number of vfs to allocate 940 * 941 * Enable or change the number of VFs 942 **/ 943 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 944 { 945 #ifdef CONFIG_PCI_IOV 946 struct i40e_pf *pf = pci_get_drvdata(pdev); 947 int pre_existing_vfs = pci_num_vf(pdev); 948 int err = 0; 949 950 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 951 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 952 i40e_free_vfs(pf); 953 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 954 goto out; 955 956 if (num_vfs > pf->num_req_vfs) { 957 err = -EPERM; 958 goto err_out; 959 } 960 961 err = i40e_alloc_vfs(pf, num_vfs); 962 if (err) { 963 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 964 goto err_out; 965 } 966 967 out: 968 return num_vfs; 969 970 err_out: 971 return err; 972 #endif 973 return 0; 974 } 975 976 /** 977 * i40e_pci_sriov_configure 978 * @pdev: pointer to a pci_dev structure 979 * @num_vfs: number of vfs to allocate 980 * 981 * Enable or change the number of VFs. Called when the user updates the number 982 * of VFs in sysfs. 983 **/ 984 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 985 { 986 struct i40e_pf *pf = pci_get_drvdata(pdev); 987 988 if (num_vfs) 989 return i40e_pci_sriov_enable(pdev, num_vfs); 990 991 i40e_free_vfs(pf); 992 return 0; 993 } 994 995 /***********************virtual channel routines******************/ 996 997 /** 998 * i40e_vc_send_msg_to_vf 999 * @vf: pointer to the vf info 1000 * @v_opcode: virtual channel opcode 1001 * @v_retval: virtual channel return value 1002 * @msg: pointer to the msg buffer 1003 * @msglen: msg length 1004 * 1005 * send msg to vf 1006 **/ 1007 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1008 u32 v_retval, u8 *msg, u16 msglen) 1009 { 1010 struct i40e_pf *pf = vf->pf; 1011 struct i40e_hw *hw = &pf->hw; 1012 i40e_status aq_ret; 1013 1014 /* single place to detect unsuccessful return values */ 1015 if (v_retval) { 1016 vf->num_invalid_msgs++; 1017 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", 1018 v_opcode, v_retval); 1019 if (vf->num_invalid_msgs > 1020 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1021 dev_err(&pf->pdev->dev, 1022 "Number of invalid messages exceeded for VF %d\n", 1023 vf->vf_id); 1024 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1025 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 1026 } 1027 } else { 1028 vf->num_valid_msgs++; 1029 } 1030 1031 aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, 1032 msg, msglen, NULL); 1033 if (aq_ret) { 1034 dev_err(&pf->pdev->dev, 1035 "Unable to send the message to VF %d aq_err %d\n", 1036 vf->vf_id, pf->hw.aq.asq_last_status); 1037 return -EIO; 1038 } 1039 1040 return 0; 1041 } 1042 1043 /** 1044 * i40e_vc_send_resp_to_vf 1045 * @vf: pointer to the vf info 1046 * @opcode: operation code 1047 * @retval: return value 1048 * 1049 * send resp msg to vf 1050 **/ 1051 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1052 enum i40e_virtchnl_ops opcode, 1053 i40e_status retval) 1054 { 1055 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1056 } 1057 1058 /** 1059 * i40e_vc_get_version_msg 1060 * @vf: pointer to the vf info 1061 * 1062 * called from the vf to request the API version used by the PF 1063 **/ 1064 static int i40e_vc_get_version_msg(struct i40e_vf *vf) 1065 { 1066 struct i40e_virtchnl_version_info info = { 1067 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 1068 }; 1069 1070 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 1071 I40E_SUCCESS, (u8 *)&info, 1072 sizeof(struct 1073 i40e_virtchnl_version_info)); 1074 } 1075 1076 /** 1077 * i40e_vc_get_vf_resources_msg 1078 * @vf: pointer to the vf info 1079 * @msg: pointer to the msg buffer 1080 * @msglen: msg length 1081 * 1082 * called from the vf to request its resources 1083 **/ 1084 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) 1085 { 1086 struct i40e_virtchnl_vf_resource *vfres = NULL; 1087 struct i40e_pf *pf = vf->pf; 1088 i40e_status aq_ret = 0; 1089 struct i40e_vsi *vsi; 1090 int i = 0, len = 0; 1091 int num_vsis = 1; 1092 int ret; 1093 1094 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1095 aq_ret = I40E_ERR_PARAM; 1096 goto err; 1097 } 1098 1099 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1100 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1101 1102 vfres = kzalloc(len, GFP_KERNEL); 1103 if (!vfres) { 1104 aq_ret = I40E_ERR_NO_MEMORY; 1105 len = 0; 1106 goto err; 1107 } 1108 1109 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1110 vsi = pf->vsi[vf->lan_vsi_index]; 1111 if (!vsi->info.pvid) 1112 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1113 1114 vfres->num_vsis = num_vsis; 1115 vfres->num_queue_pairs = vf->num_queue_pairs; 1116 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1117 if (vf->lan_vsi_index) { 1118 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; 1119 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1120 vfres->vsi_res[i].num_queue_pairs = 1121 pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 1122 memcpy(vfres->vsi_res[i].default_mac_addr, 1123 vf->default_lan_addr.addr, ETH_ALEN); 1124 i++; 1125 } 1126 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1127 1128 err: 1129 /* send the response back to the vf */ 1130 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1131 aq_ret, (u8 *)vfres, len); 1132 1133 kfree(vfres); 1134 return ret; 1135 } 1136 1137 /** 1138 * i40e_vc_reset_vf_msg 1139 * @vf: pointer to the vf info 1140 * @msg: pointer to the msg buffer 1141 * @msglen: msg length 1142 * 1143 * called from the vf to reset itself, 1144 * unlike other virtchnl messages, pf driver 1145 * doesn't send the response back to the vf 1146 **/ 1147 static int i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1148 { 1149 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1150 return -ENOENT; 1151 1152 return i40e_reset_vf(vf, false); 1153 } 1154 1155 /** 1156 * i40e_vc_config_promiscuous_mode_msg 1157 * @vf: pointer to the vf info 1158 * @msg: pointer to the msg buffer 1159 * @msglen: msg length 1160 * 1161 * called from the vf to configure the promiscuous mode of 1162 * vf vsis 1163 **/ 1164 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1165 u8 *msg, u16 msglen) 1166 { 1167 struct i40e_virtchnl_promisc_info *info = 1168 (struct i40e_virtchnl_promisc_info *)msg; 1169 struct i40e_pf *pf = vf->pf; 1170 struct i40e_hw *hw = &pf->hw; 1171 bool allmulti = false; 1172 bool promisc = false; 1173 i40e_status aq_ret; 1174 1175 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1176 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1177 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1178 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { 1179 aq_ret = I40E_ERR_PARAM; 1180 goto error_param; 1181 } 1182 1183 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) 1184 promisc = true; 1185 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id, 1186 promisc, NULL); 1187 if (aq_ret) 1188 goto error_param; 1189 1190 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1191 allmulti = true; 1192 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id, 1193 allmulti, NULL); 1194 1195 error_param: 1196 /* send the response to the vf */ 1197 return i40e_vc_send_resp_to_vf(vf, 1198 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1199 aq_ret); 1200 } 1201 1202 /** 1203 * i40e_vc_config_queues_msg 1204 * @vf: pointer to the vf info 1205 * @msg: pointer to the msg buffer 1206 * @msglen: msg length 1207 * 1208 * called from the vf to configure the rx/tx 1209 * queues 1210 **/ 1211 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1212 { 1213 struct i40e_virtchnl_vsi_queue_config_info *qci = 1214 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1215 struct i40e_virtchnl_queue_pair_info *qpi; 1216 u16 vsi_id, vsi_queue_id; 1217 i40e_status aq_ret = 0; 1218 int i; 1219 1220 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1221 aq_ret = I40E_ERR_PARAM; 1222 goto error_param; 1223 } 1224 1225 vsi_id = qci->vsi_id; 1226 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1227 aq_ret = I40E_ERR_PARAM; 1228 goto error_param; 1229 } 1230 for (i = 0; i < qci->num_queue_pairs; i++) { 1231 qpi = &qci->qpair[i]; 1232 vsi_queue_id = qpi->txq.queue_id; 1233 if ((qpi->txq.vsi_id != vsi_id) || 1234 (qpi->rxq.vsi_id != vsi_id) || 1235 (qpi->rxq.queue_id != vsi_queue_id) || 1236 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1237 aq_ret = I40E_ERR_PARAM; 1238 goto error_param; 1239 } 1240 1241 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1242 &qpi->rxq) || 1243 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1244 &qpi->txq)) { 1245 aq_ret = I40E_ERR_PARAM; 1246 goto error_param; 1247 } 1248 } 1249 1250 error_param: 1251 /* send the response to the vf */ 1252 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1253 aq_ret); 1254 } 1255 1256 /** 1257 * i40e_vc_config_irq_map_msg 1258 * @vf: pointer to the vf info 1259 * @msg: pointer to the msg buffer 1260 * @msglen: msg length 1261 * 1262 * called from the vf to configure the irq to 1263 * queue map 1264 **/ 1265 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1266 { 1267 struct i40e_virtchnl_irq_map_info *irqmap_info = 1268 (struct i40e_virtchnl_irq_map_info *)msg; 1269 struct i40e_virtchnl_vector_map *map; 1270 u16 vsi_id, vsi_queue_id, vector_id; 1271 i40e_status aq_ret = 0; 1272 unsigned long tempmap; 1273 int i; 1274 1275 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1276 aq_ret = I40E_ERR_PARAM; 1277 goto error_param; 1278 } 1279 1280 for (i = 0; i < irqmap_info->num_vectors; i++) { 1281 map = &irqmap_info->vecmap[i]; 1282 1283 vector_id = map->vector_id; 1284 vsi_id = map->vsi_id; 1285 /* validate msg params */ 1286 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1287 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1288 aq_ret = I40E_ERR_PARAM; 1289 goto error_param; 1290 } 1291 1292 /* lookout for the invalid queue index */ 1293 tempmap = map->rxq_map; 1294 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1295 while (vsi_queue_id < I40E_MAX_VSI_QP) { 1296 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1297 vsi_queue_id)) { 1298 aq_ret = I40E_ERR_PARAM; 1299 goto error_param; 1300 } 1301 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1302 vsi_queue_id + 1); 1303 } 1304 1305 tempmap = map->txq_map; 1306 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1307 while (vsi_queue_id < I40E_MAX_VSI_QP) { 1308 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1309 vsi_queue_id)) { 1310 aq_ret = I40E_ERR_PARAM; 1311 goto error_param; 1312 } 1313 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1314 vsi_queue_id + 1); 1315 } 1316 1317 i40e_config_irq_link_list(vf, vsi_id, map); 1318 } 1319 error_param: 1320 /* send the response to the vf */ 1321 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1322 aq_ret); 1323 } 1324 1325 /** 1326 * i40e_vc_enable_queues_msg 1327 * @vf: pointer to the vf info 1328 * @msg: pointer to the msg buffer 1329 * @msglen: msg length 1330 * 1331 * called from the vf to enable all or specific queue(s) 1332 **/ 1333 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1334 { 1335 struct i40e_virtchnl_queue_select *vqs = 1336 (struct i40e_virtchnl_queue_select *)msg; 1337 struct i40e_pf *pf = vf->pf; 1338 u16 vsi_id = vqs->vsi_id; 1339 i40e_status aq_ret = 0; 1340 unsigned long tempmap; 1341 u16 queue_id; 1342 1343 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1344 aq_ret = I40E_ERR_PARAM; 1345 goto error_param; 1346 } 1347 1348 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1349 aq_ret = I40E_ERR_PARAM; 1350 goto error_param; 1351 } 1352 1353 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1354 aq_ret = I40E_ERR_PARAM; 1355 goto error_param; 1356 } 1357 1358 tempmap = vqs->rx_queues; 1359 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1360 while (queue_id < I40E_MAX_VSI_QP) { 1361 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { 1362 aq_ret = I40E_ERR_PARAM; 1363 goto error_param; 1364 } 1365 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, 1366 I40E_QUEUE_CTRL_ENABLE); 1367 1368 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1369 queue_id + 1); 1370 } 1371 1372 tempmap = vqs->tx_queues; 1373 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1374 while (queue_id < I40E_MAX_VSI_QP) { 1375 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { 1376 aq_ret = I40E_ERR_PARAM; 1377 goto error_param; 1378 } 1379 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, 1380 I40E_QUEUE_CTRL_ENABLE); 1381 1382 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1383 queue_id + 1); 1384 } 1385 1386 /* Poll the status register to make sure that the 1387 * requested op was completed successfully 1388 */ 1389 udelay(10); 1390 1391 tempmap = vqs->rx_queues; 1392 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1393 while (queue_id < I40E_MAX_VSI_QP) { 1394 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, 1395 I40E_QUEUE_CTRL_ENABLECHECK)) { 1396 dev_err(&pf->pdev->dev, 1397 "Queue control check failed on RX queue %d of VSI %d VF %d\n", 1398 queue_id, vsi_id, vf->vf_id); 1399 } 1400 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1401 queue_id + 1); 1402 } 1403 1404 tempmap = vqs->tx_queues; 1405 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1406 while (queue_id < I40E_MAX_VSI_QP) { 1407 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, 1408 I40E_QUEUE_CTRL_ENABLECHECK)) { 1409 dev_err(&pf->pdev->dev, 1410 "Queue control check failed on TX queue %d of VSI %d VF %d\n", 1411 queue_id, vsi_id, vf->vf_id); 1412 } 1413 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1414 queue_id + 1); 1415 } 1416 1417 error_param: 1418 /* send the response to the vf */ 1419 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1420 aq_ret); 1421 } 1422 1423 /** 1424 * i40e_vc_disable_queues_msg 1425 * @vf: pointer to the vf info 1426 * @msg: pointer to the msg buffer 1427 * @msglen: msg length 1428 * 1429 * called from the vf to disable all or specific 1430 * queue(s) 1431 **/ 1432 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1433 { 1434 struct i40e_virtchnl_queue_select *vqs = 1435 (struct i40e_virtchnl_queue_select *)msg; 1436 struct i40e_pf *pf = vf->pf; 1437 u16 vsi_id = vqs->vsi_id; 1438 i40e_status aq_ret = 0; 1439 unsigned long tempmap; 1440 u16 queue_id; 1441 1442 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1443 aq_ret = I40E_ERR_PARAM; 1444 goto error_param; 1445 } 1446 1447 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1448 aq_ret = I40E_ERR_PARAM; 1449 goto error_param; 1450 } 1451 1452 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1453 aq_ret = I40E_ERR_PARAM; 1454 goto error_param; 1455 } 1456 1457 tempmap = vqs->rx_queues; 1458 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1459 while (queue_id < I40E_MAX_VSI_QP) { 1460 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { 1461 aq_ret = I40E_ERR_PARAM; 1462 goto error_param; 1463 } 1464 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, 1465 I40E_QUEUE_CTRL_DISABLE); 1466 1467 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1468 queue_id + 1); 1469 } 1470 1471 tempmap = vqs->tx_queues; 1472 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1473 while (queue_id < I40E_MAX_VSI_QP) { 1474 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { 1475 aq_ret = I40E_ERR_PARAM; 1476 goto error_param; 1477 } 1478 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, 1479 I40E_QUEUE_CTRL_DISABLE); 1480 1481 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1482 queue_id + 1); 1483 } 1484 1485 /* Poll the status register to make sure that the 1486 * requested op was completed successfully 1487 */ 1488 udelay(10); 1489 1490 tempmap = vqs->rx_queues; 1491 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1492 while (queue_id < I40E_MAX_VSI_QP) { 1493 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, 1494 I40E_QUEUE_CTRL_DISABLECHECK)) { 1495 dev_err(&pf->pdev->dev, 1496 "Queue control check failed on RX queue %d of VSI %d VF %d\n", 1497 queue_id, vsi_id, vf->vf_id); 1498 } 1499 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1500 queue_id + 1); 1501 } 1502 1503 tempmap = vqs->tx_queues; 1504 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); 1505 while (queue_id < I40E_MAX_VSI_QP) { 1506 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, 1507 I40E_QUEUE_CTRL_DISABLECHECK)) { 1508 dev_err(&pf->pdev->dev, 1509 "Queue control check failed on TX queue %d of VSI %d VF %d\n", 1510 queue_id, vsi_id, vf->vf_id); 1511 } 1512 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, 1513 queue_id + 1); 1514 } 1515 1516 error_param: 1517 /* send the response to the vf */ 1518 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1519 aq_ret); 1520 } 1521 1522 /** 1523 * i40e_vc_get_stats_msg 1524 * @vf: pointer to the vf info 1525 * @msg: pointer to the msg buffer 1526 * @msglen: msg length 1527 * 1528 * called from the vf to get vsi stats 1529 **/ 1530 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1531 { 1532 struct i40e_virtchnl_queue_select *vqs = 1533 (struct i40e_virtchnl_queue_select *)msg; 1534 struct i40e_pf *pf = vf->pf; 1535 struct i40e_eth_stats stats; 1536 i40e_status aq_ret = 0; 1537 struct i40e_vsi *vsi; 1538 1539 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1540 1541 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1542 aq_ret = I40E_ERR_PARAM; 1543 goto error_param; 1544 } 1545 1546 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1547 aq_ret = I40E_ERR_PARAM; 1548 goto error_param; 1549 } 1550 1551 vsi = pf->vsi[vqs->vsi_id]; 1552 if (!vsi) { 1553 aq_ret = I40E_ERR_PARAM; 1554 goto error_param; 1555 } 1556 i40e_update_eth_stats(vsi); 1557 memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats)); 1558 1559 error_param: 1560 /* send the response back to the vf */ 1561 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1562 (u8 *)&stats, sizeof(stats)); 1563 } 1564 1565 /** 1566 * i40e_vc_add_mac_addr_msg 1567 * @vf: pointer to the vf info 1568 * @msg: pointer to the msg buffer 1569 * @msglen: msg length 1570 * 1571 * add guest mac address filter 1572 **/ 1573 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1574 { 1575 struct i40e_virtchnl_ether_addr_list *al = 1576 (struct i40e_virtchnl_ether_addr_list *)msg; 1577 struct i40e_pf *pf = vf->pf; 1578 struct i40e_vsi *vsi = NULL; 1579 u16 vsi_id = al->vsi_id; 1580 i40e_status aq_ret = 0; 1581 int i; 1582 1583 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1584 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1585 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1586 aq_ret = I40E_ERR_PARAM; 1587 goto error_param; 1588 } 1589 1590 for (i = 0; i < al->num_elements; i++) { 1591 if (is_broadcast_ether_addr(al->list[i].addr) || 1592 is_zero_ether_addr(al->list[i].addr)) { 1593 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n", 1594 al->list[i].addr); 1595 aq_ret = I40E_ERR_PARAM; 1596 goto error_param; 1597 } 1598 } 1599 vsi = pf->vsi[vsi_id]; 1600 1601 /* add new addresses to the list */ 1602 for (i = 0; i < al->num_elements; i++) { 1603 struct i40e_mac_filter *f; 1604 1605 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1606 if (f) { 1607 if (i40e_is_vsi_in_vlan(vsi)) 1608 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1609 true, false); 1610 else 1611 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1612 true, false); 1613 } 1614 1615 if (!f) { 1616 dev_err(&pf->pdev->dev, 1617 "Unable to add VF MAC filter\n"); 1618 aq_ret = I40E_ERR_PARAM; 1619 goto error_param; 1620 } 1621 } 1622 1623 /* program the updated filter list */ 1624 if (i40e_sync_vsi_filters(vsi)) 1625 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1626 1627 error_param: 1628 /* send the response to the vf */ 1629 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1630 aq_ret); 1631 } 1632 1633 /** 1634 * i40e_vc_del_mac_addr_msg 1635 * @vf: pointer to the vf info 1636 * @msg: pointer to the msg buffer 1637 * @msglen: msg length 1638 * 1639 * remove guest mac address filter 1640 **/ 1641 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1642 { 1643 struct i40e_virtchnl_ether_addr_list *al = 1644 (struct i40e_virtchnl_ether_addr_list *)msg; 1645 struct i40e_pf *pf = vf->pf; 1646 struct i40e_vsi *vsi = NULL; 1647 u16 vsi_id = al->vsi_id; 1648 i40e_status aq_ret = 0; 1649 int i; 1650 1651 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1652 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1653 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1654 aq_ret = I40E_ERR_PARAM; 1655 goto error_param; 1656 } 1657 vsi = pf->vsi[vsi_id]; 1658 1659 /* delete addresses from the list */ 1660 for (i = 0; i < al->num_elements; i++) 1661 i40e_del_filter(vsi, al->list[i].addr, 1662 I40E_VLAN_ANY, true, false); 1663 1664 /* program the updated filter list */ 1665 if (i40e_sync_vsi_filters(vsi)) 1666 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1667 1668 error_param: 1669 /* send the response to the vf */ 1670 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1671 aq_ret); 1672 } 1673 1674 /** 1675 * i40e_vc_add_vlan_msg 1676 * @vf: pointer to the vf info 1677 * @msg: pointer to the msg buffer 1678 * @msglen: msg length 1679 * 1680 * program guest vlan id 1681 **/ 1682 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1683 { 1684 struct i40e_virtchnl_vlan_filter_list *vfl = 1685 (struct i40e_virtchnl_vlan_filter_list *)msg; 1686 struct i40e_pf *pf = vf->pf; 1687 struct i40e_vsi *vsi = NULL; 1688 u16 vsi_id = vfl->vsi_id; 1689 i40e_status aq_ret = 0; 1690 int i; 1691 1692 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1693 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1694 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1695 aq_ret = I40E_ERR_PARAM; 1696 goto error_param; 1697 } 1698 1699 for (i = 0; i < vfl->num_elements; i++) { 1700 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1701 aq_ret = I40E_ERR_PARAM; 1702 dev_err(&pf->pdev->dev, 1703 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1704 goto error_param; 1705 } 1706 } 1707 vsi = pf->vsi[vsi_id]; 1708 if (vsi->info.pvid) { 1709 aq_ret = I40E_ERR_PARAM; 1710 goto error_param; 1711 } 1712 1713 i40e_vlan_stripping_enable(vsi); 1714 for (i = 0; i < vfl->num_elements; i++) { 1715 /* add new VLAN filter */ 1716 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1717 if (ret) 1718 dev_err(&pf->pdev->dev, 1719 "Unable to add VF vlan filter %d, error %d\n", 1720 vfl->vlan_id[i], ret); 1721 } 1722 1723 error_param: 1724 /* send the response to the vf */ 1725 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1726 } 1727 1728 /** 1729 * i40e_vc_remove_vlan_msg 1730 * @vf: pointer to the vf info 1731 * @msg: pointer to the msg buffer 1732 * @msglen: msg length 1733 * 1734 * remove programmed guest vlan id 1735 **/ 1736 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1737 { 1738 struct i40e_virtchnl_vlan_filter_list *vfl = 1739 (struct i40e_virtchnl_vlan_filter_list *)msg; 1740 struct i40e_pf *pf = vf->pf; 1741 struct i40e_vsi *vsi = NULL; 1742 u16 vsi_id = vfl->vsi_id; 1743 i40e_status aq_ret = 0; 1744 int i; 1745 1746 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1747 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1748 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1749 aq_ret = I40E_ERR_PARAM; 1750 goto error_param; 1751 } 1752 1753 for (i = 0; i < vfl->num_elements; i++) { 1754 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1755 aq_ret = I40E_ERR_PARAM; 1756 goto error_param; 1757 } 1758 } 1759 1760 vsi = pf->vsi[vsi_id]; 1761 if (vsi->info.pvid) { 1762 aq_ret = I40E_ERR_PARAM; 1763 goto error_param; 1764 } 1765 1766 for (i = 0; i < vfl->num_elements; i++) { 1767 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1768 if (ret) 1769 dev_err(&pf->pdev->dev, 1770 "Unable to delete VF vlan filter %d, error %d\n", 1771 vfl->vlan_id[i], ret); 1772 } 1773 1774 error_param: 1775 /* send the response to the vf */ 1776 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1777 } 1778 1779 /** 1780 * i40e_vc_fcoe_msg 1781 * @vf: pointer to the vf info 1782 * @msg: pointer to the msg buffer 1783 * @msglen: msg length 1784 * 1785 * called from the vf for the fcoe msgs 1786 **/ 1787 static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1788 { 1789 i40e_status aq_ret = 0; 1790 1791 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1792 !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) { 1793 aq_ret = I40E_ERR_PARAM; 1794 goto error_param; 1795 } 1796 aq_ret = I40E_ERR_NOT_IMPLEMENTED; 1797 1798 error_param: 1799 /* send the response to the vf */ 1800 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret); 1801 } 1802 1803 /** 1804 * i40e_vc_validate_vf_msg 1805 * @vf: pointer to the vf info 1806 * @msg: pointer to the msg buffer 1807 * @msglen: msg length 1808 * @msghndl: msg handle 1809 * 1810 * validate msg 1811 **/ 1812 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1813 u32 v_retval, u8 *msg, u16 msglen) 1814 { 1815 bool err_msg_format = false; 1816 int valid_len; 1817 1818 /* Check if VF is disabled. */ 1819 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1820 return I40E_ERR_PARAM; 1821 1822 /* Validate message length. */ 1823 switch (v_opcode) { 1824 case I40E_VIRTCHNL_OP_VERSION: 1825 valid_len = sizeof(struct i40e_virtchnl_version_info); 1826 break; 1827 case I40E_VIRTCHNL_OP_RESET_VF: 1828 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1829 valid_len = 0; 1830 break; 1831 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1832 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1833 break; 1834 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1835 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1836 break; 1837 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1838 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1839 if (msglen >= valid_len) { 1840 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1841 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1842 valid_len += (vqc->num_queue_pairs * 1843 sizeof(struct 1844 i40e_virtchnl_queue_pair_info)); 1845 if (vqc->num_queue_pairs == 0) 1846 err_msg_format = true; 1847 } 1848 break; 1849 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1850 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1851 if (msglen >= valid_len) { 1852 struct i40e_virtchnl_irq_map_info *vimi = 1853 (struct i40e_virtchnl_irq_map_info *)msg; 1854 valid_len += (vimi->num_vectors * 1855 sizeof(struct i40e_virtchnl_vector_map)); 1856 if (vimi->num_vectors == 0) 1857 err_msg_format = true; 1858 } 1859 break; 1860 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1861 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1862 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1863 break; 1864 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1865 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1866 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1867 if (msglen >= valid_len) { 1868 struct i40e_virtchnl_ether_addr_list *veal = 1869 (struct i40e_virtchnl_ether_addr_list *)msg; 1870 valid_len += veal->num_elements * 1871 sizeof(struct i40e_virtchnl_ether_addr); 1872 if (veal->num_elements == 0) 1873 err_msg_format = true; 1874 } 1875 break; 1876 case I40E_VIRTCHNL_OP_ADD_VLAN: 1877 case I40E_VIRTCHNL_OP_DEL_VLAN: 1878 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1879 if (msglen >= valid_len) { 1880 struct i40e_virtchnl_vlan_filter_list *vfl = 1881 (struct i40e_virtchnl_vlan_filter_list *)msg; 1882 valid_len += vfl->num_elements * sizeof(u16); 1883 if (vfl->num_elements == 0) 1884 err_msg_format = true; 1885 } 1886 break; 1887 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1888 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1889 break; 1890 case I40E_VIRTCHNL_OP_GET_STATS: 1891 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1892 break; 1893 /* These are always errors coming from the VF. */ 1894 case I40E_VIRTCHNL_OP_EVENT: 1895 case I40E_VIRTCHNL_OP_UNKNOWN: 1896 default: 1897 return -EPERM; 1898 break; 1899 } 1900 /* few more checks */ 1901 if ((valid_len != msglen) || (err_msg_format)) { 1902 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1903 return -EINVAL; 1904 } else { 1905 return 0; 1906 } 1907 } 1908 1909 /** 1910 * i40e_vc_process_vf_msg 1911 * @pf: pointer to the pf structure 1912 * @vf_id: source vf id 1913 * @msg: pointer to the msg buffer 1914 * @msglen: msg length 1915 * @msghndl: msg handle 1916 * 1917 * called from the common aeq/arq handler to 1918 * process request from vf 1919 **/ 1920 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1921 u32 v_retval, u8 *msg, u16 msglen) 1922 { 1923 struct i40e_vf *vf = &(pf->vf[vf_id]); 1924 struct i40e_hw *hw = &pf->hw; 1925 int ret; 1926 1927 pf->vf_aq_requests++; 1928 /* perform basic checks on the msg */ 1929 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1930 1931 if (ret) { 1932 dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id); 1933 return ret; 1934 } 1935 wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE); 1936 switch (v_opcode) { 1937 case I40E_VIRTCHNL_OP_VERSION: 1938 ret = i40e_vc_get_version_msg(vf); 1939 break; 1940 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1941 ret = i40e_vc_get_vf_resources_msg(vf); 1942 break; 1943 case I40E_VIRTCHNL_OP_RESET_VF: 1944 ret = i40e_vc_reset_vf_msg(vf); 1945 break; 1946 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1947 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1948 break; 1949 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1950 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1951 break; 1952 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1953 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1954 break; 1955 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1956 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1957 break; 1958 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1959 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1960 break; 1961 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1962 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1963 break; 1964 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1965 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1966 break; 1967 case I40E_VIRTCHNL_OP_ADD_VLAN: 1968 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1969 break; 1970 case I40E_VIRTCHNL_OP_DEL_VLAN: 1971 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1972 break; 1973 case I40E_VIRTCHNL_OP_GET_STATS: 1974 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1975 break; 1976 case I40E_VIRTCHNL_OP_FCOE: 1977 ret = i40e_vc_fcoe_msg(vf, msg, msglen); 1978 break; 1979 case I40E_VIRTCHNL_OP_UNKNOWN: 1980 default: 1981 dev_err(&pf->pdev->dev, 1982 "Unsupported opcode %d from vf %d\n", v_opcode, vf_id); 1983 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 1984 I40E_ERR_NOT_IMPLEMENTED); 1985 break; 1986 } 1987 1988 return ret; 1989 } 1990 1991 /** 1992 * i40e_vc_process_vflr_event 1993 * @pf: pointer to the pf structure 1994 * 1995 * called from the vlfr irq handler to 1996 * free up vf resources and state variables 1997 **/ 1998 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 1999 { 2000 u32 reg, reg_idx, bit_idx, vf_id; 2001 struct i40e_hw *hw = &pf->hw; 2002 struct i40e_vf *vf; 2003 2004 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 2005 return 0; 2006 2007 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 2008 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 2009 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 2010 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 2011 /* read GLGEN_VFLRSTAT register to find out the flr vfs */ 2012 vf = &pf->vf[vf_id]; 2013 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2014 if (reg & (1 << bit_idx)) { 2015 /* clear the bit in GLGEN_VFLRSTAT */ 2016 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 2017 2018 if (i40e_reset_vf(vf, true)) 2019 dev_err(&pf->pdev->dev, 2020 "Unable to reset the VF %d\n", vf_id); 2021 /* free up vf resources to destroy vsi state */ 2022 i40e_free_vf_res(vf); 2023 2024 /* allocate new vf resources with the default state */ 2025 if (i40e_alloc_vf_res(vf)) 2026 dev_err(&pf->pdev->dev, 2027 "Unable to allocate VF resources %d\n", 2028 vf_id); 2029 2030 i40e_enable_vf_mappings(vf); 2031 } 2032 } 2033 2034 /* re-enable vflr interrupt cause */ 2035 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 2036 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 2037 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 2038 i40e_flush(hw); 2039 2040 return 0; 2041 } 2042 2043 /** 2044 * i40e_vc_vf_broadcast 2045 * @pf: pointer to the pf structure 2046 * @opcode: operation code 2047 * @retval: return value 2048 * @msg: pointer to the msg buffer 2049 * @msglen: msg length 2050 * 2051 * send a message to all VFs on a given PF 2052 **/ 2053 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 2054 enum i40e_virtchnl_ops v_opcode, 2055 i40e_status v_retval, u8 *msg, 2056 u16 msglen) 2057 { 2058 struct i40e_hw *hw = &pf->hw; 2059 struct i40e_vf *vf = pf->vf; 2060 int i; 2061 2062 for (i = 0; i < pf->num_alloc_vfs; i++) { 2063 /* Ignore return value on purpose - a given VF may fail, but 2064 * we need to keep going and send to all of them 2065 */ 2066 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, 2067 msg, msglen, NULL); 2068 vf++; 2069 } 2070 } 2071 2072 /** 2073 * i40e_vc_notify_link_state 2074 * @pf: pointer to the pf structure 2075 * 2076 * send a link status message to all VFs on a given PF 2077 **/ 2078 void i40e_vc_notify_link_state(struct i40e_pf *pf) 2079 { 2080 struct i40e_virtchnl_pf_event pfe; 2081 2082 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2083 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2084 pfe.event_data.link_event.link_status = 2085 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 2086 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; 2087 2088 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 2089 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 2090 } 2091 2092 /** 2093 * i40e_vc_notify_reset 2094 * @pf: pointer to the pf structure 2095 * 2096 * indicate a pending reset to all VFs on a given PF 2097 **/ 2098 void i40e_vc_notify_reset(struct i40e_pf *pf) 2099 { 2100 struct i40e_virtchnl_pf_event pfe; 2101 2102 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 2103 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 2104 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 2105 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 2106 } 2107 2108 /** 2109 * i40e_vc_notify_vf_reset 2110 * @vf: pointer to the vf structure 2111 * 2112 * indicate a pending reset to the given VF 2113 **/ 2114 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 2115 { 2116 struct i40e_virtchnl_pf_event pfe; 2117 2118 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 2119 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 2120 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 2121 I40E_SUCCESS, (u8 *)&pfe, 2122 sizeof(struct i40e_virtchnl_pf_event), NULL); 2123 } 2124 2125 /** 2126 * i40e_ndo_set_vf_mac 2127 * @netdev: network interface device structure 2128 * @vf_id: vf identifier 2129 * @mac: mac address 2130 * 2131 * program vf mac address 2132 **/ 2133 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2134 { 2135 struct i40e_netdev_priv *np = netdev_priv(netdev); 2136 struct i40e_vsi *vsi = np->vsi; 2137 struct i40e_pf *pf = vsi->back; 2138 struct i40e_mac_filter *f; 2139 struct i40e_vf *vf; 2140 int ret = 0; 2141 2142 /* validate the request */ 2143 if (vf_id >= pf->num_alloc_vfs) { 2144 dev_err(&pf->pdev->dev, 2145 "Invalid VF Identifier %d\n", vf_id); 2146 ret = -EINVAL; 2147 goto error_param; 2148 } 2149 2150 vf = &(pf->vf[vf_id]); 2151 vsi = pf->vsi[vf->lan_vsi_index]; 2152 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2153 dev_err(&pf->pdev->dev, 2154 "Uninitialized VF %d\n", vf_id); 2155 ret = -EINVAL; 2156 goto error_param; 2157 } 2158 2159 if (!is_valid_ether_addr(mac)) { 2160 dev_err(&pf->pdev->dev, 2161 "Invalid VF ethernet address\n"); 2162 ret = -EINVAL; 2163 goto error_param; 2164 } 2165 2166 /* delete the temporary mac address */ 2167 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); 2168 2169 /* add the new mac address */ 2170 f = i40e_add_filter(vsi, mac, 0, true, false); 2171 if (!f) { 2172 dev_err(&pf->pdev->dev, 2173 "Unable to add VF ucast filter\n"); 2174 ret = -ENOMEM; 2175 goto error_param; 2176 } 2177 2178 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2179 /* program mac filter */ 2180 if (i40e_sync_vsi_filters(vsi)) { 2181 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2182 ret = -EIO; 2183 goto error_param; 2184 } 2185 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); 2186 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2187 ret = 0; 2188 2189 error_param: 2190 return ret; 2191 } 2192 2193 /** 2194 * i40e_ndo_set_vf_port_vlan 2195 * @netdev: network interface device structure 2196 * @vf_id: vf identifier 2197 * @vlan_id: mac address 2198 * @qos: priority setting 2199 * 2200 * program vf vlan id and/or qos 2201 **/ 2202 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2203 int vf_id, u16 vlan_id, u8 qos) 2204 { 2205 struct i40e_netdev_priv *np = netdev_priv(netdev); 2206 struct i40e_pf *pf = np->vsi->back; 2207 struct i40e_vsi *vsi; 2208 struct i40e_vf *vf; 2209 int ret = 0; 2210 2211 /* validate the request */ 2212 if (vf_id >= pf->num_alloc_vfs) { 2213 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2214 ret = -EINVAL; 2215 goto error_pvid; 2216 } 2217 2218 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2219 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2220 ret = -EINVAL; 2221 goto error_pvid; 2222 } 2223 2224 vf = &(pf->vf[vf_id]); 2225 vsi = pf->vsi[vf->lan_vsi_index]; 2226 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2227 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2228 ret = -EINVAL; 2229 goto error_pvid; 2230 } 2231 2232 if (vsi->info.pvid) { 2233 /* kill old VLAN */ 2234 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2235 VLAN_VID_MASK)); 2236 if (ret) { 2237 dev_info(&vsi->back->pdev->dev, 2238 "remove VLAN failed, ret=%d, aq_err=%d\n", 2239 ret, pf->hw.aq.asq_last_status); 2240 } 2241 } 2242 if (vlan_id || qos) 2243 ret = i40e_vsi_add_pvid(vsi, 2244 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); 2245 else 2246 i40e_vlan_stripping_disable(vsi); 2247 2248 if (vlan_id) { 2249 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2250 vlan_id, qos, vf_id); 2251 2252 /* add new VLAN filter */ 2253 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2254 if (ret) { 2255 dev_info(&vsi->back->pdev->dev, 2256 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2257 vsi->back->hw.aq.asq_last_status); 2258 goto error_pvid; 2259 } 2260 } 2261 2262 if (ret) { 2263 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2264 goto error_pvid; 2265 } 2266 ret = 0; 2267 2268 error_pvid: 2269 return ret; 2270 } 2271 2272 /** 2273 * i40e_ndo_set_vf_bw 2274 * @netdev: network interface device structure 2275 * @vf_id: vf identifier 2276 * @tx_rate: tx rate 2277 * 2278 * configure vf tx rate 2279 **/ 2280 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) 2281 { 2282 return -EOPNOTSUPP; 2283 } 2284 2285 /** 2286 * i40e_ndo_get_vf_config 2287 * @netdev: network interface device structure 2288 * @vf_id: vf identifier 2289 * @ivi: vf configuration structure 2290 * 2291 * return vf configuration 2292 **/ 2293 int i40e_ndo_get_vf_config(struct net_device *netdev, 2294 int vf_id, struct ifla_vf_info *ivi) 2295 { 2296 struct i40e_netdev_priv *np = netdev_priv(netdev); 2297 struct i40e_mac_filter *f, *ftmp; 2298 struct i40e_vsi *vsi = np->vsi; 2299 struct i40e_pf *pf = vsi->back; 2300 struct i40e_vf *vf; 2301 int ret = 0; 2302 2303 /* validate the request */ 2304 if (vf_id >= pf->num_alloc_vfs) { 2305 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2306 ret = -EINVAL; 2307 goto error_param; 2308 } 2309 2310 vf = &(pf->vf[vf_id]); 2311 /* first vsi is always the LAN vsi */ 2312 vsi = pf->vsi[vf->lan_vsi_index]; 2313 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2314 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2315 ret = -EINVAL; 2316 goto error_param; 2317 } 2318 2319 ivi->vf = vf_id; 2320 2321 /* first entry of the list is the default ethernet address */ 2322 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 2323 memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS); 2324 break; 2325 } 2326 2327 ivi->tx_rate = 0; 2328 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2329 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2330 I40E_VLAN_PRIORITY_SHIFT; 2331 ret = 0; 2332 2333 error_param: 2334 return ret; 2335 } 2336