1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /***********************misc routines*****************************/ 30 31 /** 32 * i40e_vc_disable_vf 33 * @pf: pointer to the PF info 34 * @vf: pointer to the VF info 35 * 36 * Disable the VF through a SW reset 37 **/ 38 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 39 { 40 struct i40e_hw *hw = &pf->hw; 41 u32 reg; 42 43 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 44 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 45 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 46 i40e_flush(hw); 47 } 48 49 /** 50 * i40e_vc_isvalid_vsi_id 51 * @vf: pointer to the VF info 52 * @vsi_id: VF relative VSI id 53 * 54 * check for the valid VSI id 55 **/ 56 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) 57 { 58 struct i40e_pf *pf = vf->pf; 59 60 return pf->vsi[vsi_id]->vf_id == vf->vf_id; 61 } 62 63 /** 64 * i40e_vc_isvalid_queue_id 65 * @vf: pointer to the VF info 66 * @vsi_id: vsi id 67 * @qid: vsi relative queue id 68 * 69 * check for the valid queue id 70 **/ 71 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, 72 u8 qid) 73 { 74 struct i40e_pf *pf = vf->pf; 75 76 return qid < pf->vsi[vsi_id]->alloc_queue_pairs; 77 } 78 79 /** 80 * i40e_vc_isvalid_vector_id 81 * @vf: pointer to the VF info 82 * @vector_id: VF relative vector id 83 * 84 * check for the valid vector id 85 **/ 86 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 87 { 88 struct i40e_pf *pf = vf->pf; 89 90 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 91 } 92 93 /***********************vf resource mgmt routines*****************/ 94 95 /** 96 * i40e_vc_get_pf_queue_id 97 * @vf: pointer to the VF info 98 * @vsi_idx: index of VSI in PF struct 99 * @vsi_queue_id: vsi relative queue id 100 * 101 * return PF relative queue id 102 **/ 103 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, 104 u8 vsi_queue_id) 105 { 106 struct i40e_pf *pf = vf->pf; 107 struct i40e_vsi *vsi = pf->vsi[vsi_idx]; 108 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 109 110 if (le16_to_cpu(vsi->info.mapping_flags) & 111 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 112 pf_queue_id = 113 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 114 else 115 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 116 vsi_queue_id; 117 118 return pf_queue_id; 119 } 120 121 /** 122 * i40e_config_irq_link_list 123 * @vf: pointer to the VF info 124 * @vsi_idx: index of VSI in PF struct 125 * @vecmap: irq map info 126 * 127 * configure irq link list from the map 128 **/ 129 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, 130 struct i40e_virtchnl_vector_map *vecmap) 131 { 132 unsigned long linklistmap = 0, tempmap; 133 struct i40e_pf *pf = vf->pf; 134 struct i40e_hw *hw = &pf->hw; 135 u16 vsi_queue_id, pf_queue_id; 136 enum i40e_queue_type qtype; 137 u16 next_q, vector_id; 138 u32 reg, reg_idx; 139 u16 itr_idx = 0; 140 141 vector_id = vecmap->vector_id; 142 /* setup the head */ 143 if (0 == vector_id) 144 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 145 else 146 reg_idx = I40E_VPINT_LNKLSTN( 147 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 148 (vector_id - 1)); 149 150 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 151 /* Special case - No queues mapped on this vector */ 152 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 153 goto irq_list_done; 154 } 155 tempmap = vecmap->rxq_map; 156 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 157 linklistmap |= (1 << 158 (I40E_VIRTCHNL_SUPPORTED_QTYPES * 159 vsi_queue_id)); 160 } 161 162 tempmap = vecmap->txq_map; 163 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 164 linklistmap |= (1 << 165 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id 166 + 1)); 167 } 168 169 next_q = find_first_bit(&linklistmap, 170 (I40E_MAX_VSI_QP * 171 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 172 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 173 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 174 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 175 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 176 177 wr32(hw, reg_idx, reg); 178 179 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 180 switch (qtype) { 181 case I40E_QUEUE_TYPE_RX: 182 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 183 itr_idx = vecmap->rxitr_idx; 184 break; 185 case I40E_QUEUE_TYPE_TX: 186 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 187 itr_idx = vecmap->txitr_idx; 188 break; 189 default: 190 break; 191 } 192 193 next_q = find_next_bit(&linklistmap, 194 (I40E_MAX_VSI_QP * 195 I40E_VIRTCHNL_SUPPORTED_QTYPES), 196 next_q + 1); 197 if (next_q < 198 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 199 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 200 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 201 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, 202 vsi_queue_id); 203 } else { 204 pf_queue_id = I40E_QUEUE_END_OF_LIST; 205 qtype = 0; 206 } 207 208 /* format for the RQCTL & TQCTL regs is same */ 209 reg = (vector_id) | 210 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 211 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 212 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 213 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 214 wr32(hw, reg_idx, reg); 215 } 216 217 irq_list_done: 218 i40e_flush(hw); 219 } 220 221 /** 222 * i40e_config_vsi_tx_queue 223 * @vf: pointer to the VF info 224 * @vsi_idx: index of VSI in PF struct 225 * @vsi_queue_id: vsi relative queue index 226 * @info: config. info 227 * 228 * configure tx queue 229 **/ 230 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, 231 u16 vsi_queue_id, 232 struct i40e_virtchnl_txq_info *info) 233 { 234 struct i40e_pf *pf = vf->pf; 235 struct i40e_hw *hw = &pf->hw; 236 struct i40e_hmc_obj_txq tx_ctx; 237 u16 pf_queue_id; 238 u32 qtx_ctl; 239 int ret = 0; 240 241 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 242 243 /* clear the context structure first */ 244 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 245 246 /* only set the required fields */ 247 tx_ctx.base = info->dma_ring_addr / 128; 248 tx_ctx.qlen = info->ring_len; 249 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 250 tx_ctx.rdylist_act = 0; 251 tx_ctx.head_wb_ena = info->headwb_enabled; 252 tx_ctx.head_wb_addr = info->dma_headwb_addr; 253 254 /* clear the context in the HMC */ 255 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 256 if (ret) { 257 dev_err(&pf->pdev->dev, 258 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 259 pf_queue_id, ret); 260 ret = -ENOENT; 261 goto error_context; 262 } 263 264 /* set the context in the HMC */ 265 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 266 if (ret) { 267 dev_err(&pf->pdev->dev, 268 "Failed to set VF LAN Tx queue context %d error: %d\n", 269 pf_queue_id, ret); 270 ret = -ENOENT; 271 goto error_context; 272 } 273 274 /* associate this queue with the PCI VF function */ 275 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 276 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 277 & I40E_QTX_CTL_PF_INDX_MASK); 278 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 279 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 280 & I40E_QTX_CTL_VFVM_INDX_MASK); 281 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 282 i40e_flush(hw); 283 284 error_context: 285 return ret; 286 } 287 288 /** 289 * i40e_config_vsi_rx_queue 290 * @vf: pointer to the VF info 291 * @vsi_idx: index of VSI in PF struct 292 * @vsi_queue_id: vsi relative queue index 293 * @info: config. info 294 * 295 * configure rx queue 296 **/ 297 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, 298 u16 vsi_queue_id, 299 struct i40e_virtchnl_rxq_info *info) 300 { 301 struct i40e_pf *pf = vf->pf; 302 struct i40e_hw *hw = &pf->hw; 303 struct i40e_hmc_obj_rxq rx_ctx; 304 u16 pf_queue_id; 305 int ret = 0; 306 307 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 308 309 /* clear the context structure first */ 310 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 311 312 /* only set the required fields */ 313 rx_ctx.base = info->dma_ring_addr / 128; 314 rx_ctx.qlen = info->ring_len; 315 316 if (info->splithdr_enabled) { 317 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 318 I40E_RX_SPLIT_IP | 319 I40E_RX_SPLIT_TCP_UDP | 320 I40E_RX_SPLIT_SCTP; 321 /* header length validation */ 322 if (info->hdr_size > ((2 * 1024) - 64)) { 323 ret = -EINVAL; 324 goto error_param; 325 } 326 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 327 328 /* set splitalways mode 10b */ 329 rx_ctx.dtype = 0x2; 330 } 331 332 /* databuffer length validation */ 333 if (info->databuffer_size > ((16 * 1024) - 128)) { 334 ret = -EINVAL; 335 goto error_param; 336 } 337 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 338 339 /* max pkt. length validation */ 340 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 341 ret = -EINVAL; 342 goto error_param; 343 } 344 rx_ctx.rxmax = info->max_pkt_size; 345 346 /* enable 32bytes desc always */ 347 rx_ctx.dsize = 1; 348 349 /* default values */ 350 rx_ctx.lrxqthresh = 2; 351 rx_ctx.crcstrip = 1; 352 rx_ctx.prefena = 1; 353 rx_ctx.l2tsel = 1; 354 355 /* clear the context in the HMC */ 356 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 357 if (ret) { 358 dev_err(&pf->pdev->dev, 359 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 360 pf_queue_id, ret); 361 ret = -ENOENT; 362 goto error_param; 363 } 364 365 /* set the context in the HMC */ 366 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 367 if (ret) { 368 dev_err(&pf->pdev->dev, 369 "Failed to set VF LAN Rx queue context %d error: %d\n", 370 pf_queue_id, ret); 371 ret = -ENOENT; 372 goto error_param; 373 } 374 375 error_param: 376 return ret; 377 } 378 379 /** 380 * i40e_alloc_vsi_res 381 * @vf: pointer to the VF info 382 * @type: type of VSI to allocate 383 * 384 * alloc VF vsi context & resources 385 **/ 386 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 387 { 388 struct i40e_mac_filter *f = NULL; 389 struct i40e_pf *pf = vf->pf; 390 struct i40e_vsi *vsi; 391 int ret = 0; 392 393 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 394 395 if (!vsi) { 396 dev_err(&pf->pdev->dev, 397 "add vsi failed for VF %d, aq_err %d\n", 398 vf->vf_id, pf->hw.aq.asq_last_status); 399 ret = -ENOENT; 400 goto error_alloc_vsi_res; 401 } 402 if (type == I40E_VSI_SRIOV) { 403 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 404 vf->lan_vsi_index = vsi->idx; 405 vf->lan_vsi_id = vsi->id; 406 /* If the port VLAN has been configured and then the 407 * VF driver was removed then the VSI port VLAN 408 * configuration was destroyed. Check if there is 409 * a port VLAN and restore the VSI configuration if 410 * needed. 411 */ 412 if (vf->port_vlan_id) 413 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 414 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 415 vf->port_vlan_id, true, false); 416 if (!f) 417 dev_info(&pf->pdev->dev, 418 "Could not allocate VF MAC addr\n"); 419 f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, 420 true, false); 421 if (!f) 422 dev_info(&pf->pdev->dev, 423 "Could not allocate VF broadcast filter\n"); 424 } 425 426 /* program mac filter */ 427 ret = i40e_sync_vsi_filters(vsi); 428 if (ret) 429 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 430 431 /* Set VF bandwidth if specified */ 432 if (vf->tx_rate) { 433 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 434 vf->tx_rate / 50, 0, NULL); 435 if (ret) 436 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 437 vf->vf_id, ret); 438 } 439 440 error_alloc_vsi_res: 441 return ret; 442 } 443 444 /** 445 * i40e_enable_vf_mappings 446 * @vf: pointer to the VF info 447 * 448 * enable VF mappings 449 **/ 450 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 451 { 452 struct i40e_pf *pf = vf->pf; 453 struct i40e_hw *hw = &pf->hw; 454 u32 reg, total_queue_pairs = 0; 455 int j; 456 457 /* Tell the hardware we're using noncontiguous mapping. HW requires 458 * that VF queues be mapped using this method, even when they are 459 * contiguous in real life 460 */ 461 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 462 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 463 464 /* enable VF vplan_qtable mappings */ 465 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 466 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 467 468 /* map PF queues to VF queues */ 469 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) { 470 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 471 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 472 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 473 total_queue_pairs++; 474 } 475 476 /* map PF queues to VSI */ 477 for (j = 0; j < 7; j++) { 478 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) { 479 reg = 0x07FF07FF; /* unused */ 480 } else { 481 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 482 j * 2); 483 reg = qid; 484 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 485 (j * 2) + 1); 486 reg |= qid << 16; 487 } 488 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); 489 } 490 491 i40e_flush(hw); 492 } 493 494 /** 495 * i40e_disable_vf_mappings 496 * @vf: pointer to the VF info 497 * 498 * disable VF mappings 499 **/ 500 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 501 { 502 struct i40e_pf *pf = vf->pf; 503 struct i40e_hw *hw = &pf->hw; 504 int i; 505 506 /* disable qp mappings */ 507 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 508 for (i = 0; i < I40E_MAX_VSI_QP; i++) 509 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 510 I40E_QUEUE_END_OF_LIST); 511 i40e_flush(hw); 512 } 513 514 /** 515 * i40e_free_vf_res 516 * @vf: pointer to the VF info 517 * 518 * free VF resources 519 **/ 520 static void i40e_free_vf_res(struct i40e_vf *vf) 521 { 522 struct i40e_pf *pf = vf->pf; 523 struct i40e_hw *hw = &pf->hw; 524 u32 reg_idx, reg; 525 int i, msix_vf; 526 527 /* free vsi & disconnect it from the parent uplink */ 528 if (vf->lan_vsi_index) { 529 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); 530 vf->lan_vsi_index = 0; 531 vf->lan_vsi_id = 0; 532 } 533 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 534 535 /* disable interrupts so the VF starts in a known state */ 536 for (i = 0; i < msix_vf; i++) { 537 /* format is same for both registers */ 538 if (0 == i) 539 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 540 else 541 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 542 (vf->vf_id)) 543 + (i - 1)); 544 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 545 i40e_flush(hw); 546 } 547 548 /* clear the irq settings */ 549 for (i = 0; i < msix_vf; i++) { 550 /* format is same for both registers */ 551 if (0 == i) 552 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 553 else 554 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 555 (vf->vf_id)) 556 + (i - 1)); 557 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 558 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 559 wr32(hw, reg_idx, reg); 560 i40e_flush(hw); 561 } 562 /* reset some of the state varibles keeping 563 * track of the resources 564 */ 565 vf->num_queue_pairs = 0; 566 vf->vf_states = 0; 567 } 568 569 /** 570 * i40e_alloc_vf_res 571 * @vf: pointer to the VF info 572 * 573 * allocate VF resources 574 **/ 575 static int i40e_alloc_vf_res(struct i40e_vf *vf) 576 { 577 struct i40e_pf *pf = vf->pf; 578 int total_queue_pairs = 0; 579 int ret; 580 581 /* allocate hw vsi context & associated resources */ 582 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 583 if (ret) 584 goto error_alloc; 585 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; 586 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 587 588 /* store the total qps number for the runtime 589 * VF req validation 590 */ 591 vf->num_queue_pairs = total_queue_pairs; 592 593 /* VF is now completely initialized */ 594 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 595 596 error_alloc: 597 if (ret) 598 i40e_free_vf_res(vf); 599 600 return ret; 601 } 602 603 #define VF_DEVICE_STATUS 0xAA 604 #define VF_TRANS_PENDING_MASK 0x20 605 /** 606 * i40e_quiesce_vf_pci 607 * @vf: pointer to the VF structure 608 * 609 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 610 * if the transactions never clear. 611 **/ 612 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 613 { 614 struct i40e_pf *pf = vf->pf; 615 struct i40e_hw *hw = &pf->hw; 616 int vf_abs_id, i; 617 u32 reg; 618 619 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 620 621 wr32(hw, I40E_PF_PCI_CIAA, 622 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 623 for (i = 0; i < 100; i++) { 624 reg = rd32(hw, I40E_PF_PCI_CIAD); 625 if ((reg & VF_TRANS_PENDING_MASK) == 0) 626 return 0; 627 udelay(1); 628 } 629 return -EIO; 630 } 631 632 /** 633 * i40e_reset_vf 634 * @vf: pointer to the VF structure 635 * @flr: VFLR was issued or not 636 * 637 * reset the VF 638 **/ 639 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 640 { 641 struct i40e_pf *pf = vf->pf; 642 struct i40e_hw *hw = &pf->hw; 643 bool rsd = false; 644 int i; 645 u32 reg; 646 647 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 648 return; 649 650 /* warn the VF */ 651 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 652 653 /* In the case of a VFLR, the HW has already reset the VF and we 654 * just need to clean up, so don't hit the VFRTRIG register. 655 */ 656 if (!flr) { 657 /* reset VF using VPGEN_VFRTRIG reg */ 658 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 659 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 660 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 661 i40e_flush(hw); 662 } 663 664 if (i40e_quiesce_vf_pci(vf)) 665 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 666 vf->vf_id); 667 668 /* poll VPGEN_VFRSTAT reg to make sure 669 * that reset is complete 670 */ 671 for (i = 0; i < 10; i++) { 672 /* VF reset requires driver to first reset the VF and then 673 * poll the status register to make sure that the reset 674 * completed successfully. Due to internal HW FIFO flushes, 675 * we must wait 10ms before the register will be valid. 676 */ 677 usleep_range(10000, 20000); 678 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 679 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 680 rsd = true; 681 break; 682 } 683 } 684 685 if (!rsd) 686 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 687 vf->vf_id); 688 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 689 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 690 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 691 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 692 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 693 694 /* On initial reset, we won't have any queues */ 695 if (vf->lan_vsi_index == 0) 696 goto complete_reset; 697 698 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); 699 complete_reset: 700 /* reallocate VF resources to reset the VSI state */ 701 i40e_free_vf_res(vf); 702 i40e_alloc_vf_res(vf); 703 i40e_enable_vf_mappings(vf); 704 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 705 706 /* tell the VF the reset is done */ 707 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 708 i40e_flush(hw); 709 clear_bit(__I40E_VF_DISABLE, &pf->state); 710 } 711 712 /** 713 * i40e_free_vfs 714 * @pf: pointer to the PF structure 715 * 716 * free VF resources 717 **/ 718 void i40e_free_vfs(struct i40e_pf *pf) 719 { 720 struct i40e_hw *hw = &pf->hw; 721 u32 reg_idx, bit_idx; 722 int i, tmp, vf_id; 723 724 if (!pf->vf) 725 return; 726 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 727 usleep_range(1000, 2000); 728 729 /* Disable IOV before freeing resources. This lets any VF drivers 730 * running in the host get themselves cleaned up before we yank 731 * the carpet out from underneath their feet. 732 */ 733 if (!pci_vfs_assigned(pf->pdev)) 734 pci_disable_sriov(pf->pdev); 735 736 msleep(20); /* let any messages in transit get finished up */ 737 738 /* free up VF resources */ 739 tmp = pf->num_alloc_vfs; 740 pf->num_alloc_vfs = 0; 741 for (i = 0; i < tmp; i++) { 742 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 743 i40e_free_vf_res(&pf->vf[i]); 744 /* disable qp mappings */ 745 i40e_disable_vf_mappings(&pf->vf[i]); 746 } 747 748 kfree(pf->vf); 749 pf->vf = NULL; 750 751 /* This check is for when the driver is unloaded while VFs are 752 * assigned. Setting the number of VFs to 0 through sysfs is caught 753 * before this function ever gets called. 754 */ 755 if (!pci_vfs_assigned(pf->pdev)) { 756 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 757 * work correctly when SR-IOV gets re-enabled. 758 */ 759 for (vf_id = 0; vf_id < tmp; vf_id++) { 760 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 761 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 762 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 763 } 764 } else { 765 dev_warn(&pf->pdev->dev, 766 "unable to disable SR-IOV because VFs are assigned.\n"); 767 } 768 clear_bit(__I40E_VF_DISABLE, &pf->state); 769 } 770 771 #ifdef CONFIG_PCI_IOV 772 /** 773 * i40e_alloc_vfs 774 * @pf: pointer to the PF structure 775 * @num_alloc_vfs: number of VFs to allocate 776 * 777 * allocate VF resources 778 **/ 779 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 780 { 781 struct i40e_vf *vfs; 782 int i, ret = 0; 783 784 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 785 i40e_irq_dynamic_disable_icr0(pf); 786 787 /* Check to see if we're just allocating resources for extant VFs */ 788 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 789 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 790 if (ret) { 791 dev_err(&pf->pdev->dev, 792 "Failed to enable SR-IOV, error %d.\n", ret); 793 pf->num_alloc_vfs = 0; 794 goto err_iov; 795 } 796 } 797 /* allocate memory */ 798 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 799 if (!vfs) { 800 ret = -ENOMEM; 801 goto err_alloc; 802 } 803 pf->vf = vfs; 804 805 /* apply default profile */ 806 for (i = 0; i < num_alloc_vfs; i++) { 807 vfs[i].pf = pf; 808 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 809 vfs[i].vf_id = i; 810 811 /* assign default capabilities */ 812 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 813 vfs[i].spoofchk = true; 814 /* VF resources get allocated during reset */ 815 i40e_reset_vf(&vfs[i], false); 816 817 /* enable VF vplan_qtable mappings */ 818 i40e_enable_vf_mappings(&vfs[i]); 819 } 820 pf->num_alloc_vfs = num_alloc_vfs; 821 822 err_alloc: 823 if (ret) 824 i40e_free_vfs(pf); 825 err_iov: 826 /* Re-enable interrupt 0. */ 827 i40e_irq_dynamic_enable_icr0(pf); 828 return ret; 829 } 830 831 #endif 832 /** 833 * i40e_pci_sriov_enable 834 * @pdev: pointer to a pci_dev structure 835 * @num_vfs: number of VFs to allocate 836 * 837 * Enable or change the number of VFs 838 **/ 839 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 840 { 841 #ifdef CONFIG_PCI_IOV 842 struct i40e_pf *pf = pci_get_drvdata(pdev); 843 int pre_existing_vfs = pci_num_vf(pdev); 844 int err = 0; 845 846 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 847 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 848 i40e_free_vfs(pf); 849 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 850 goto out; 851 852 if (num_vfs > pf->num_req_vfs) { 853 err = -EPERM; 854 goto err_out; 855 } 856 857 err = i40e_alloc_vfs(pf, num_vfs); 858 if (err) { 859 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 860 goto err_out; 861 } 862 863 out: 864 return num_vfs; 865 866 err_out: 867 return err; 868 #endif 869 return 0; 870 } 871 872 /** 873 * i40e_pci_sriov_configure 874 * @pdev: pointer to a pci_dev structure 875 * @num_vfs: number of VFs to allocate 876 * 877 * Enable or change the number of VFs. Called when the user updates the number 878 * of VFs in sysfs. 879 **/ 880 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 881 { 882 struct i40e_pf *pf = pci_get_drvdata(pdev); 883 884 if (num_vfs) 885 return i40e_pci_sriov_enable(pdev, num_vfs); 886 887 if (!pci_vfs_assigned(pf->pdev)) { 888 i40e_free_vfs(pf); 889 } else { 890 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 891 return -EINVAL; 892 } 893 return 0; 894 } 895 896 /***********************virtual channel routines******************/ 897 898 /** 899 * i40e_vc_send_msg_to_vf 900 * @vf: pointer to the VF info 901 * @v_opcode: virtual channel opcode 902 * @v_retval: virtual channel return value 903 * @msg: pointer to the msg buffer 904 * @msglen: msg length 905 * 906 * send msg to VF 907 **/ 908 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 909 u32 v_retval, u8 *msg, u16 msglen) 910 { 911 struct i40e_pf *pf; 912 struct i40e_hw *hw; 913 int abs_vf_id; 914 i40e_status aq_ret; 915 916 /* validate the request */ 917 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 918 return -EINVAL; 919 920 pf = vf->pf; 921 hw = &pf->hw; 922 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 923 924 /* single place to detect unsuccessful return values */ 925 if (v_retval) { 926 vf->num_invalid_msgs++; 927 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", 928 v_opcode, v_retval); 929 if (vf->num_invalid_msgs > 930 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 931 dev_err(&pf->pdev->dev, 932 "Number of invalid messages exceeded for VF %d\n", 933 vf->vf_id); 934 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 935 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 936 } 937 } else { 938 vf->num_valid_msgs++; 939 } 940 941 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 942 msg, msglen, NULL); 943 if (aq_ret) { 944 dev_err(&pf->pdev->dev, 945 "Unable to send the message to VF %d aq_err %d\n", 946 vf->vf_id, pf->hw.aq.asq_last_status); 947 return -EIO; 948 } 949 950 return 0; 951 } 952 953 /** 954 * i40e_vc_send_resp_to_vf 955 * @vf: pointer to the VF info 956 * @opcode: operation code 957 * @retval: return value 958 * 959 * send resp msg to VF 960 **/ 961 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 962 enum i40e_virtchnl_ops opcode, 963 i40e_status retval) 964 { 965 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 966 } 967 968 /** 969 * i40e_vc_get_version_msg 970 * @vf: pointer to the VF info 971 * 972 * called from the VF to request the API version used by the PF 973 **/ 974 static int i40e_vc_get_version_msg(struct i40e_vf *vf) 975 { 976 struct i40e_virtchnl_version_info info = { 977 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 978 }; 979 980 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 981 I40E_SUCCESS, (u8 *)&info, 982 sizeof(struct 983 i40e_virtchnl_version_info)); 984 } 985 986 /** 987 * i40e_vc_get_vf_resources_msg 988 * @vf: pointer to the VF info 989 * @msg: pointer to the msg buffer 990 * @msglen: msg length 991 * 992 * called from the VF to request its resources 993 **/ 994 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) 995 { 996 struct i40e_virtchnl_vf_resource *vfres = NULL; 997 struct i40e_pf *pf = vf->pf; 998 i40e_status aq_ret = 0; 999 struct i40e_vsi *vsi; 1000 int i = 0, len = 0; 1001 int num_vsis = 1; 1002 int ret; 1003 1004 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1005 aq_ret = I40E_ERR_PARAM; 1006 goto err; 1007 } 1008 1009 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1010 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1011 1012 vfres = kzalloc(len, GFP_KERNEL); 1013 if (!vfres) { 1014 aq_ret = I40E_ERR_NO_MEMORY; 1015 len = 0; 1016 goto err; 1017 } 1018 1019 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1020 vsi = pf->vsi[vf->lan_vsi_index]; 1021 if (!vsi->info.pvid) 1022 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1023 1024 vfres->num_vsis = num_vsis; 1025 vfres->num_queue_pairs = vf->num_queue_pairs; 1026 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1027 if (vf->lan_vsi_index) { 1028 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; 1029 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1030 vfres->vsi_res[i].num_queue_pairs = 1031 pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; 1032 memcpy(vfres->vsi_res[i].default_mac_addr, 1033 vf->default_lan_addr.addr, ETH_ALEN); 1034 i++; 1035 } 1036 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1037 1038 err: 1039 /* send the response back to the VF */ 1040 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1041 aq_ret, (u8 *)vfres, len); 1042 1043 kfree(vfres); 1044 return ret; 1045 } 1046 1047 /** 1048 * i40e_vc_reset_vf_msg 1049 * @vf: pointer to the VF info 1050 * @msg: pointer to the msg buffer 1051 * @msglen: msg length 1052 * 1053 * called from the VF to reset itself, 1054 * unlike other virtchnl messages, PF driver 1055 * doesn't send the response back to the VF 1056 **/ 1057 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1058 { 1059 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1060 i40e_reset_vf(vf, false); 1061 } 1062 1063 /** 1064 * i40e_vc_config_promiscuous_mode_msg 1065 * @vf: pointer to the VF info 1066 * @msg: pointer to the msg buffer 1067 * @msglen: msg length 1068 * 1069 * called from the VF to configure the promiscuous mode of 1070 * VF vsis 1071 **/ 1072 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1073 u8 *msg, u16 msglen) 1074 { 1075 struct i40e_virtchnl_promisc_info *info = 1076 (struct i40e_virtchnl_promisc_info *)msg; 1077 struct i40e_pf *pf = vf->pf; 1078 struct i40e_hw *hw = &pf->hw; 1079 struct i40e_vsi *vsi; 1080 bool allmulti = false; 1081 i40e_status aq_ret; 1082 1083 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1084 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1085 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1086 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { 1087 aq_ret = I40E_ERR_PARAM; 1088 goto error_param; 1089 } 1090 vsi = pf->vsi[info->vsi_id]; 1091 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1092 allmulti = true; 1093 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1094 allmulti, NULL); 1095 1096 error_param: 1097 /* send the response to the VF */ 1098 return i40e_vc_send_resp_to_vf(vf, 1099 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1100 aq_ret); 1101 } 1102 1103 /** 1104 * i40e_vc_config_queues_msg 1105 * @vf: pointer to the VF info 1106 * @msg: pointer to the msg buffer 1107 * @msglen: msg length 1108 * 1109 * called from the VF to configure the rx/tx 1110 * queues 1111 **/ 1112 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1113 { 1114 struct i40e_virtchnl_vsi_queue_config_info *qci = 1115 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1116 struct i40e_virtchnl_queue_pair_info *qpi; 1117 struct i40e_pf *pf = vf->pf; 1118 u16 vsi_id, vsi_queue_id; 1119 i40e_status aq_ret = 0; 1120 int i; 1121 1122 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1123 aq_ret = I40E_ERR_PARAM; 1124 goto error_param; 1125 } 1126 1127 vsi_id = qci->vsi_id; 1128 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1129 aq_ret = I40E_ERR_PARAM; 1130 goto error_param; 1131 } 1132 for (i = 0; i < qci->num_queue_pairs; i++) { 1133 qpi = &qci->qpair[i]; 1134 vsi_queue_id = qpi->txq.queue_id; 1135 if ((qpi->txq.vsi_id != vsi_id) || 1136 (qpi->rxq.vsi_id != vsi_id) || 1137 (qpi->rxq.queue_id != vsi_queue_id) || 1138 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1139 aq_ret = I40E_ERR_PARAM; 1140 goto error_param; 1141 } 1142 1143 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1144 &qpi->rxq) || 1145 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1146 &qpi->txq)) { 1147 aq_ret = I40E_ERR_PARAM; 1148 goto error_param; 1149 } 1150 } 1151 /* set vsi num_queue_pairs in use to num configured by VF */ 1152 pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; 1153 1154 error_param: 1155 /* send the response to the VF */ 1156 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1157 aq_ret); 1158 } 1159 1160 /** 1161 * i40e_vc_config_irq_map_msg 1162 * @vf: pointer to the VF info 1163 * @msg: pointer to the msg buffer 1164 * @msglen: msg length 1165 * 1166 * called from the VF to configure the irq to 1167 * queue map 1168 **/ 1169 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1170 { 1171 struct i40e_virtchnl_irq_map_info *irqmap_info = 1172 (struct i40e_virtchnl_irq_map_info *)msg; 1173 struct i40e_virtchnl_vector_map *map; 1174 u16 vsi_id, vsi_queue_id, vector_id; 1175 i40e_status aq_ret = 0; 1176 unsigned long tempmap; 1177 int i; 1178 1179 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1180 aq_ret = I40E_ERR_PARAM; 1181 goto error_param; 1182 } 1183 1184 for (i = 0; i < irqmap_info->num_vectors; i++) { 1185 map = &irqmap_info->vecmap[i]; 1186 1187 vector_id = map->vector_id; 1188 vsi_id = map->vsi_id; 1189 /* validate msg params */ 1190 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1191 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1192 aq_ret = I40E_ERR_PARAM; 1193 goto error_param; 1194 } 1195 1196 /* lookout for the invalid queue index */ 1197 tempmap = map->rxq_map; 1198 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1199 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1200 vsi_queue_id)) { 1201 aq_ret = I40E_ERR_PARAM; 1202 goto error_param; 1203 } 1204 } 1205 1206 tempmap = map->txq_map; 1207 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1208 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1209 vsi_queue_id)) { 1210 aq_ret = I40E_ERR_PARAM; 1211 goto error_param; 1212 } 1213 } 1214 1215 i40e_config_irq_link_list(vf, vsi_id, map); 1216 } 1217 error_param: 1218 /* send the response to the VF */ 1219 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1220 aq_ret); 1221 } 1222 1223 /** 1224 * i40e_vc_enable_queues_msg 1225 * @vf: pointer to the VF info 1226 * @msg: pointer to the msg buffer 1227 * @msglen: msg length 1228 * 1229 * called from the VF to enable all or specific queue(s) 1230 **/ 1231 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1232 { 1233 struct i40e_virtchnl_queue_select *vqs = 1234 (struct i40e_virtchnl_queue_select *)msg; 1235 struct i40e_pf *pf = vf->pf; 1236 u16 vsi_id = vqs->vsi_id; 1237 i40e_status aq_ret = 0; 1238 1239 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1240 aq_ret = I40E_ERR_PARAM; 1241 goto error_param; 1242 } 1243 1244 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1245 aq_ret = I40E_ERR_PARAM; 1246 goto error_param; 1247 } 1248 1249 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1250 aq_ret = I40E_ERR_PARAM; 1251 goto error_param; 1252 } 1253 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) 1254 aq_ret = I40E_ERR_TIMEOUT; 1255 error_param: 1256 /* send the response to the VF */ 1257 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1258 aq_ret); 1259 } 1260 1261 /** 1262 * i40e_vc_disable_queues_msg 1263 * @vf: pointer to the VF info 1264 * @msg: pointer to the msg buffer 1265 * @msglen: msg length 1266 * 1267 * called from the VF to disable all or specific 1268 * queue(s) 1269 **/ 1270 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1271 { 1272 struct i40e_virtchnl_queue_select *vqs = 1273 (struct i40e_virtchnl_queue_select *)msg; 1274 struct i40e_pf *pf = vf->pf; 1275 u16 vsi_id = vqs->vsi_id; 1276 i40e_status aq_ret = 0; 1277 1278 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1279 aq_ret = I40E_ERR_PARAM; 1280 goto error_param; 1281 } 1282 1283 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1284 aq_ret = I40E_ERR_PARAM; 1285 goto error_param; 1286 } 1287 1288 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1289 aq_ret = I40E_ERR_PARAM; 1290 goto error_param; 1291 } 1292 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) 1293 aq_ret = I40E_ERR_TIMEOUT; 1294 1295 error_param: 1296 /* send the response to the VF */ 1297 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1298 aq_ret); 1299 } 1300 1301 /** 1302 * i40e_vc_get_stats_msg 1303 * @vf: pointer to the VF info 1304 * @msg: pointer to the msg buffer 1305 * @msglen: msg length 1306 * 1307 * called from the VF to get vsi stats 1308 **/ 1309 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1310 { 1311 struct i40e_virtchnl_queue_select *vqs = 1312 (struct i40e_virtchnl_queue_select *)msg; 1313 struct i40e_pf *pf = vf->pf; 1314 struct i40e_eth_stats stats; 1315 i40e_status aq_ret = 0; 1316 struct i40e_vsi *vsi; 1317 1318 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1319 1320 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1321 aq_ret = I40E_ERR_PARAM; 1322 goto error_param; 1323 } 1324 1325 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1326 aq_ret = I40E_ERR_PARAM; 1327 goto error_param; 1328 } 1329 1330 vsi = pf->vsi[vqs->vsi_id]; 1331 if (!vsi) { 1332 aq_ret = I40E_ERR_PARAM; 1333 goto error_param; 1334 } 1335 i40e_update_eth_stats(vsi); 1336 stats = vsi->eth_stats; 1337 1338 error_param: 1339 /* send the response back to the VF */ 1340 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1341 (u8 *)&stats, sizeof(stats)); 1342 } 1343 1344 /** 1345 * i40e_check_vf_permission 1346 * @vf: pointer to the VF info 1347 * @macaddr: pointer to the MAC Address being checked 1348 * 1349 * Check if the VF has permission to add or delete unicast MAC address 1350 * filters and return error code -EPERM if not. Then check if the 1351 * address filter requested is broadcast or zero and if so return 1352 * an invalid MAC address error code. 1353 **/ 1354 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1355 { 1356 struct i40e_pf *pf = vf->pf; 1357 int ret = 0; 1358 1359 if (is_broadcast_ether_addr(macaddr) || 1360 is_zero_ether_addr(macaddr)) { 1361 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1362 ret = I40E_ERR_INVALID_MAC_ADDR; 1363 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1364 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1365 /* If the host VMM administrator has set the VF MAC address 1366 * administratively via the ndo_set_vf_mac command then deny 1367 * permission to the VF to add or delete unicast MAC addresses. 1368 * The VF may request to set the MAC address filter already 1369 * assigned to it so do not return an error in that case. 1370 */ 1371 dev_err(&pf->pdev->dev, 1372 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); 1373 ret = -EPERM; 1374 } 1375 return ret; 1376 } 1377 1378 /** 1379 * i40e_vc_add_mac_addr_msg 1380 * @vf: pointer to the VF info 1381 * @msg: pointer to the msg buffer 1382 * @msglen: msg length 1383 * 1384 * add guest mac address filter 1385 **/ 1386 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1387 { 1388 struct i40e_virtchnl_ether_addr_list *al = 1389 (struct i40e_virtchnl_ether_addr_list *)msg; 1390 struct i40e_pf *pf = vf->pf; 1391 struct i40e_vsi *vsi = NULL; 1392 u16 vsi_id = al->vsi_id; 1393 i40e_status ret = 0; 1394 int i; 1395 1396 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1397 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1398 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1399 ret = I40E_ERR_PARAM; 1400 goto error_param; 1401 } 1402 1403 for (i = 0; i < al->num_elements; i++) { 1404 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1405 if (ret) 1406 goto error_param; 1407 } 1408 vsi = pf->vsi[vsi_id]; 1409 1410 /* add new addresses to the list */ 1411 for (i = 0; i < al->num_elements; i++) { 1412 struct i40e_mac_filter *f; 1413 1414 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1415 if (!f) { 1416 if (i40e_is_vsi_in_vlan(vsi)) 1417 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1418 true, false); 1419 else 1420 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1421 true, false); 1422 } 1423 1424 if (!f) { 1425 dev_err(&pf->pdev->dev, 1426 "Unable to add VF MAC filter\n"); 1427 ret = I40E_ERR_PARAM; 1428 goto error_param; 1429 } 1430 } 1431 1432 /* program the updated filter list */ 1433 if (i40e_sync_vsi_filters(vsi)) 1434 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1435 1436 error_param: 1437 /* send the response to the VF */ 1438 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1439 ret); 1440 } 1441 1442 /** 1443 * i40e_vc_del_mac_addr_msg 1444 * @vf: pointer to the VF info 1445 * @msg: pointer to the msg buffer 1446 * @msglen: msg length 1447 * 1448 * remove guest mac address filter 1449 **/ 1450 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1451 { 1452 struct i40e_virtchnl_ether_addr_list *al = 1453 (struct i40e_virtchnl_ether_addr_list *)msg; 1454 struct i40e_pf *pf = vf->pf; 1455 struct i40e_vsi *vsi = NULL; 1456 u16 vsi_id = al->vsi_id; 1457 i40e_status ret = 0; 1458 int i; 1459 1460 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1461 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1462 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1463 ret = I40E_ERR_PARAM; 1464 goto error_param; 1465 } 1466 1467 for (i = 0; i < al->num_elements; i++) { 1468 if (is_broadcast_ether_addr(al->list[i].addr) || 1469 is_zero_ether_addr(al->list[i].addr)) { 1470 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 1471 al->list[i].addr); 1472 ret = I40E_ERR_INVALID_MAC_ADDR; 1473 goto error_param; 1474 } 1475 } 1476 vsi = pf->vsi[vsi_id]; 1477 1478 /* delete addresses from the list */ 1479 for (i = 0; i < al->num_elements; i++) 1480 i40e_del_filter(vsi, al->list[i].addr, 1481 I40E_VLAN_ANY, true, false); 1482 1483 /* program the updated filter list */ 1484 if (i40e_sync_vsi_filters(vsi)) 1485 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); 1486 1487 error_param: 1488 /* send the response to the VF */ 1489 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 1490 ret); 1491 } 1492 1493 /** 1494 * i40e_vc_add_vlan_msg 1495 * @vf: pointer to the VF info 1496 * @msg: pointer to the msg buffer 1497 * @msglen: msg length 1498 * 1499 * program guest vlan id 1500 **/ 1501 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1502 { 1503 struct i40e_virtchnl_vlan_filter_list *vfl = 1504 (struct i40e_virtchnl_vlan_filter_list *)msg; 1505 struct i40e_pf *pf = vf->pf; 1506 struct i40e_vsi *vsi = NULL; 1507 u16 vsi_id = vfl->vsi_id; 1508 i40e_status aq_ret = 0; 1509 int i; 1510 1511 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1512 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1513 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1514 aq_ret = I40E_ERR_PARAM; 1515 goto error_param; 1516 } 1517 1518 for (i = 0; i < vfl->num_elements; i++) { 1519 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1520 aq_ret = I40E_ERR_PARAM; 1521 dev_err(&pf->pdev->dev, 1522 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 1523 goto error_param; 1524 } 1525 } 1526 vsi = pf->vsi[vsi_id]; 1527 if (vsi->info.pvid) { 1528 aq_ret = I40E_ERR_PARAM; 1529 goto error_param; 1530 } 1531 1532 i40e_vlan_stripping_enable(vsi); 1533 for (i = 0; i < vfl->num_elements; i++) { 1534 /* add new VLAN filter */ 1535 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 1536 if (ret) 1537 dev_err(&pf->pdev->dev, 1538 "Unable to add VF vlan filter %d, error %d\n", 1539 vfl->vlan_id[i], ret); 1540 } 1541 1542 error_param: 1543 /* send the response to the VF */ 1544 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 1545 } 1546 1547 /** 1548 * i40e_vc_remove_vlan_msg 1549 * @vf: pointer to the VF info 1550 * @msg: pointer to the msg buffer 1551 * @msglen: msg length 1552 * 1553 * remove programmed guest vlan id 1554 **/ 1555 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1556 { 1557 struct i40e_virtchnl_vlan_filter_list *vfl = 1558 (struct i40e_virtchnl_vlan_filter_list *)msg; 1559 struct i40e_pf *pf = vf->pf; 1560 struct i40e_vsi *vsi = NULL; 1561 u16 vsi_id = vfl->vsi_id; 1562 i40e_status aq_ret = 0; 1563 int i; 1564 1565 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1566 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1567 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1568 aq_ret = I40E_ERR_PARAM; 1569 goto error_param; 1570 } 1571 1572 for (i = 0; i < vfl->num_elements; i++) { 1573 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 1574 aq_ret = I40E_ERR_PARAM; 1575 goto error_param; 1576 } 1577 } 1578 1579 vsi = pf->vsi[vsi_id]; 1580 if (vsi->info.pvid) { 1581 aq_ret = I40E_ERR_PARAM; 1582 goto error_param; 1583 } 1584 1585 for (i = 0; i < vfl->num_elements; i++) { 1586 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 1587 if (ret) 1588 dev_err(&pf->pdev->dev, 1589 "Unable to delete VF vlan filter %d, error %d\n", 1590 vfl->vlan_id[i], ret); 1591 } 1592 1593 error_param: 1594 /* send the response to the VF */ 1595 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 1596 } 1597 1598 /** 1599 * i40e_vc_validate_vf_msg 1600 * @vf: pointer to the VF info 1601 * @msg: pointer to the msg buffer 1602 * @msglen: msg length 1603 * @msghndl: msg handle 1604 * 1605 * validate msg 1606 **/ 1607 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 1608 u32 v_retval, u8 *msg, u16 msglen) 1609 { 1610 bool err_msg_format = false; 1611 int valid_len; 1612 1613 /* Check if VF is disabled. */ 1614 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 1615 return I40E_ERR_PARAM; 1616 1617 /* Validate message length. */ 1618 switch (v_opcode) { 1619 case I40E_VIRTCHNL_OP_VERSION: 1620 valid_len = sizeof(struct i40e_virtchnl_version_info); 1621 break; 1622 case I40E_VIRTCHNL_OP_RESET_VF: 1623 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1624 valid_len = 0; 1625 break; 1626 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 1627 valid_len = sizeof(struct i40e_virtchnl_txq_info); 1628 break; 1629 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 1630 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 1631 break; 1632 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1633 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 1634 if (msglen >= valid_len) { 1635 struct i40e_virtchnl_vsi_queue_config_info *vqc = 1636 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1637 valid_len += (vqc->num_queue_pairs * 1638 sizeof(struct 1639 i40e_virtchnl_queue_pair_info)); 1640 if (vqc->num_queue_pairs == 0) 1641 err_msg_format = true; 1642 } 1643 break; 1644 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1645 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 1646 if (msglen >= valid_len) { 1647 struct i40e_virtchnl_irq_map_info *vimi = 1648 (struct i40e_virtchnl_irq_map_info *)msg; 1649 valid_len += (vimi->num_vectors * 1650 sizeof(struct i40e_virtchnl_vector_map)); 1651 if (vimi->num_vectors == 0) 1652 err_msg_format = true; 1653 } 1654 break; 1655 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1656 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1657 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1658 break; 1659 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1660 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1661 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 1662 if (msglen >= valid_len) { 1663 struct i40e_virtchnl_ether_addr_list *veal = 1664 (struct i40e_virtchnl_ether_addr_list *)msg; 1665 valid_len += veal->num_elements * 1666 sizeof(struct i40e_virtchnl_ether_addr); 1667 if (veal->num_elements == 0) 1668 err_msg_format = true; 1669 } 1670 break; 1671 case I40E_VIRTCHNL_OP_ADD_VLAN: 1672 case I40E_VIRTCHNL_OP_DEL_VLAN: 1673 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 1674 if (msglen >= valid_len) { 1675 struct i40e_virtchnl_vlan_filter_list *vfl = 1676 (struct i40e_virtchnl_vlan_filter_list *)msg; 1677 valid_len += vfl->num_elements * sizeof(u16); 1678 if (vfl->num_elements == 0) 1679 err_msg_format = true; 1680 } 1681 break; 1682 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1683 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 1684 break; 1685 case I40E_VIRTCHNL_OP_GET_STATS: 1686 valid_len = sizeof(struct i40e_virtchnl_queue_select); 1687 break; 1688 /* These are always errors coming from the VF. */ 1689 case I40E_VIRTCHNL_OP_EVENT: 1690 case I40E_VIRTCHNL_OP_UNKNOWN: 1691 default: 1692 return -EPERM; 1693 break; 1694 } 1695 /* few more checks */ 1696 if ((valid_len != msglen) || (err_msg_format)) { 1697 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 1698 return -EINVAL; 1699 } else { 1700 return 0; 1701 } 1702 } 1703 1704 /** 1705 * i40e_vc_process_vf_msg 1706 * @pf: pointer to the PF structure 1707 * @vf_id: source VF id 1708 * @msg: pointer to the msg buffer 1709 * @msglen: msg length 1710 * @msghndl: msg handle 1711 * 1712 * called from the common aeq/arq handler to 1713 * process request from VF 1714 **/ 1715 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 1716 u32 v_retval, u8 *msg, u16 msglen) 1717 { 1718 struct i40e_hw *hw = &pf->hw; 1719 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1720 struct i40e_vf *vf; 1721 int ret; 1722 1723 pf->vf_aq_requests++; 1724 if (local_vf_id >= pf->num_alloc_vfs) 1725 return -EINVAL; 1726 vf = &(pf->vf[local_vf_id]); 1727 /* perform basic checks on the msg */ 1728 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 1729 1730 if (ret) { 1731 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 1732 local_vf_id, v_opcode, msglen); 1733 return ret; 1734 } 1735 1736 switch (v_opcode) { 1737 case I40E_VIRTCHNL_OP_VERSION: 1738 ret = i40e_vc_get_version_msg(vf); 1739 break; 1740 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 1741 ret = i40e_vc_get_vf_resources_msg(vf); 1742 break; 1743 case I40E_VIRTCHNL_OP_RESET_VF: 1744 i40e_vc_reset_vf_msg(vf); 1745 ret = 0; 1746 break; 1747 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1748 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 1749 break; 1750 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1751 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 1752 break; 1753 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 1754 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 1755 break; 1756 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 1757 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 1758 break; 1759 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 1760 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 1761 break; 1762 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 1763 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 1764 break; 1765 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 1766 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 1767 break; 1768 case I40E_VIRTCHNL_OP_ADD_VLAN: 1769 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 1770 break; 1771 case I40E_VIRTCHNL_OP_DEL_VLAN: 1772 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 1773 break; 1774 case I40E_VIRTCHNL_OP_GET_STATS: 1775 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 1776 break; 1777 case I40E_VIRTCHNL_OP_UNKNOWN: 1778 default: 1779 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 1780 v_opcode, local_vf_id); 1781 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 1782 I40E_ERR_NOT_IMPLEMENTED); 1783 break; 1784 } 1785 1786 return ret; 1787 } 1788 1789 /** 1790 * i40e_vc_process_vflr_event 1791 * @pf: pointer to the PF structure 1792 * 1793 * called from the vlfr irq handler to 1794 * free up VF resources and state variables 1795 **/ 1796 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 1797 { 1798 u32 reg, reg_idx, bit_idx, vf_id; 1799 struct i40e_hw *hw = &pf->hw; 1800 struct i40e_vf *vf; 1801 1802 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 1803 return 0; 1804 1805 /* re-enable vflr interrupt cause */ 1806 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1807 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 1808 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1809 i40e_flush(hw); 1810 1811 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 1812 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 1813 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1814 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1815 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1816 vf = &pf->vf[vf_id]; 1817 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 1818 if (reg & (1 << bit_idx)) { 1819 /* clear the bit in GLGEN_VFLRSTAT */ 1820 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 1821 1822 if (!test_bit(__I40E_DOWN, &pf->state)) 1823 i40e_reset_vf(vf, true); 1824 } 1825 } 1826 1827 return 0; 1828 } 1829 1830 /** 1831 * i40e_vc_vf_broadcast 1832 * @pf: pointer to the PF structure 1833 * @opcode: operation code 1834 * @retval: return value 1835 * @msg: pointer to the msg buffer 1836 * @msglen: msg length 1837 * 1838 * send a message to all VFs on a given PF 1839 **/ 1840 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 1841 enum i40e_virtchnl_ops v_opcode, 1842 i40e_status v_retval, u8 *msg, 1843 u16 msglen) 1844 { 1845 struct i40e_hw *hw = &pf->hw; 1846 struct i40e_vf *vf = pf->vf; 1847 int i; 1848 1849 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 1850 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1851 /* Not all VFs are enabled so skip the ones that are not */ 1852 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 1853 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1854 continue; 1855 1856 /* Ignore return value on purpose - a given VF may fail, but 1857 * we need to keep going and send to all of them 1858 */ 1859 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1860 msg, msglen, NULL); 1861 } 1862 } 1863 1864 /** 1865 * i40e_vc_notify_link_state 1866 * @pf: pointer to the PF structure 1867 * 1868 * send a link status message to all VFs on a given PF 1869 **/ 1870 void i40e_vc_notify_link_state(struct i40e_pf *pf) 1871 { 1872 struct i40e_virtchnl_pf_event pfe; 1873 struct i40e_hw *hw = &pf->hw; 1874 struct i40e_vf *vf = pf->vf; 1875 struct i40e_link_status *ls = &pf->hw.phy.link_info; 1876 int i; 1877 1878 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1879 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1880 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 1881 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1882 if (vf->link_forced) { 1883 pfe.event_data.link_event.link_status = vf->link_up; 1884 pfe.event_data.link_event.link_speed = 1885 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 1886 } else { 1887 pfe.event_data.link_event.link_status = 1888 ls->link_info & I40E_AQ_LINK_UP; 1889 pfe.event_data.link_event.link_speed = ls->link_speed; 1890 } 1891 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 1892 0, (u8 *)&pfe, sizeof(pfe), 1893 NULL); 1894 } 1895 } 1896 1897 /** 1898 * i40e_vc_notify_reset 1899 * @pf: pointer to the PF structure 1900 * 1901 * indicate a pending reset to all VFs on a given PF 1902 **/ 1903 void i40e_vc_notify_reset(struct i40e_pf *pf) 1904 { 1905 struct i40e_virtchnl_pf_event pfe; 1906 1907 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1908 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1909 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1910 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1911 } 1912 1913 /** 1914 * i40e_vc_notify_vf_reset 1915 * @vf: pointer to the VF structure 1916 * 1917 * indicate a pending reset to the given VF 1918 **/ 1919 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 1920 { 1921 struct i40e_virtchnl_pf_event pfe; 1922 int abs_vf_id; 1923 1924 /* validate the request */ 1925 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1926 return; 1927 1928 /* verify if the VF is in either init or active before proceeding */ 1929 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 1930 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1931 return; 1932 1933 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 1934 1935 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 1936 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 1937 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 1938 I40E_SUCCESS, (u8 *)&pfe, 1939 sizeof(struct i40e_virtchnl_pf_event), NULL); 1940 } 1941 1942 /** 1943 * i40e_ndo_set_vf_mac 1944 * @netdev: network interface device structure 1945 * @vf_id: VF identifier 1946 * @mac: mac address 1947 * 1948 * program VF mac address 1949 **/ 1950 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1951 { 1952 struct i40e_netdev_priv *np = netdev_priv(netdev); 1953 struct i40e_vsi *vsi = np->vsi; 1954 struct i40e_pf *pf = vsi->back; 1955 struct i40e_mac_filter *f; 1956 struct i40e_vf *vf; 1957 int ret = 0; 1958 1959 /* validate the request */ 1960 if (vf_id >= pf->num_alloc_vfs) { 1961 dev_err(&pf->pdev->dev, 1962 "Invalid VF Identifier %d\n", vf_id); 1963 ret = -EINVAL; 1964 goto error_param; 1965 } 1966 1967 vf = &(pf->vf[vf_id]); 1968 vsi = pf->vsi[vf->lan_vsi_index]; 1969 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1970 dev_err(&pf->pdev->dev, 1971 "Uninitialized VF %d\n", vf_id); 1972 ret = -EINVAL; 1973 goto error_param; 1974 } 1975 1976 if (!is_valid_ether_addr(mac)) { 1977 dev_err(&pf->pdev->dev, 1978 "Invalid VF ethernet address\n"); 1979 ret = -EINVAL; 1980 goto error_param; 1981 } 1982 1983 /* delete the temporary mac address */ 1984 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, 1985 true, false); 1986 1987 /* Delete all the filters for this VSI - we're going to kill it 1988 * anyway. 1989 */ 1990 list_for_each_entry(f, &vsi->mac_filter_list, list) 1991 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); 1992 1993 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 1994 /* program mac filter */ 1995 if (i40e_sync_vsi_filters(vsi)) { 1996 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 1997 ret = -EIO; 1998 goto error_param; 1999 } 2000 ether_addr_copy(vf->default_lan_addr.addr, mac); 2001 vf->pf_set_mac = true; 2002 /* Force the VF driver stop so it has to reload with new MAC address */ 2003 i40e_vc_disable_vf(pf, vf); 2004 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2005 2006 error_param: 2007 return ret; 2008 } 2009 2010 /** 2011 * i40e_ndo_set_vf_port_vlan 2012 * @netdev: network interface device structure 2013 * @vf_id: VF identifier 2014 * @vlan_id: mac address 2015 * @qos: priority setting 2016 * 2017 * program VF vlan id and/or qos 2018 **/ 2019 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2020 int vf_id, u16 vlan_id, u8 qos) 2021 { 2022 struct i40e_netdev_priv *np = netdev_priv(netdev); 2023 struct i40e_pf *pf = np->vsi->back; 2024 struct i40e_vsi *vsi; 2025 struct i40e_vf *vf; 2026 int ret = 0; 2027 2028 /* validate the request */ 2029 if (vf_id >= pf->num_alloc_vfs) { 2030 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2031 ret = -EINVAL; 2032 goto error_pvid; 2033 } 2034 2035 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2036 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2037 ret = -EINVAL; 2038 goto error_pvid; 2039 } 2040 2041 vf = &(pf->vf[vf_id]); 2042 vsi = pf->vsi[vf->lan_vsi_index]; 2043 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2044 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2045 ret = -EINVAL; 2046 goto error_pvid; 2047 } 2048 2049 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { 2050 dev_err(&pf->pdev->dev, 2051 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2052 vf_id); 2053 /* Administrator Error - knock the VF offline until he does 2054 * the right thing by reconfiguring his network correctly 2055 * and then reloading the VF driver. 2056 */ 2057 i40e_vc_disable_vf(pf, vf); 2058 } 2059 2060 /* Check for condition where there was already a port VLAN ID 2061 * filter set and now it is being deleted by setting it to zero. 2062 * Additionally check for the condition where there was a port 2063 * VLAN but now there is a new and different port VLAN being set. 2064 * Before deleting all the old VLAN filters we must add new ones 2065 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2066 * MAC addresses deleted. 2067 */ 2068 if ((!(vlan_id || qos) || 2069 (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && 2070 vsi->info.pvid) 2071 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2072 2073 if (vsi->info.pvid) { 2074 /* kill old VLAN */ 2075 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2076 VLAN_VID_MASK)); 2077 if (ret) { 2078 dev_info(&vsi->back->pdev->dev, 2079 "remove VLAN failed, ret=%d, aq_err=%d\n", 2080 ret, pf->hw.aq.asq_last_status); 2081 } 2082 } 2083 if (vlan_id || qos) 2084 ret = i40e_vsi_add_pvid(vsi, 2085 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); 2086 else 2087 i40e_vsi_remove_pvid(vsi); 2088 2089 if (vlan_id) { 2090 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2091 vlan_id, qos, vf_id); 2092 2093 /* add new VLAN filter */ 2094 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2095 if (ret) { 2096 dev_info(&vsi->back->pdev->dev, 2097 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2098 vsi->back->hw.aq.asq_last_status); 2099 goto error_pvid; 2100 } 2101 /* Kill non-vlan MAC filters - ignore error return since 2102 * there might not be any non-vlan MAC filters. 2103 */ 2104 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2105 } 2106 2107 if (ret) { 2108 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2109 goto error_pvid; 2110 } 2111 /* The Port VLAN needs to be saved across resets the same as the 2112 * default LAN MAC address. 2113 */ 2114 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2115 ret = 0; 2116 2117 error_pvid: 2118 return ret; 2119 } 2120 2121 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2122 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2123 /** 2124 * i40e_ndo_set_vf_bw 2125 * @netdev: network interface device structure 2126 * @vf_id: VF identifier 2127 * @tx_rate: Tx rate 2128 * 2129 * configure VF Tx rate 2130 **/ 2131 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 2132 int max_tx_rate) 2133 { 2134 struct i40e_netdev_priv *np = netdev_priv(netdev); 2135 struct i40e_pf *pf = np->vsi->back; 2136 struct i40e_vsi *vsi; 2137 struct i40e_vf *vf; 2138 int speed = 0; 2139 int ret = 0; 2140 2141 /* validate the request */ 2142 if (vf_id >= pf->num_alloc_vfs) { 2143 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 2144 ret = -EINVAL; 2145 goto error; 2146 } 2147 2148 if (min_tx_rate) { 2149 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 2150 min_tx_rate, vf_id); 2151 return -EINVAL; 2152 } 2153 2154 vf = &(pf->vf[vf_id]); 2155 vsi = pf->vsi[vf->lan_vsi_index]; 2156 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2157 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); 2158 ret = -EINVAL; 2159 goto error; 2160 } 2161 2162 switch (pf->hw.phy.link_info.link_speed) { 2163 case I40E_LINK_SPEED_40GB: 2164 speed = 40000; 2165 break; 2166 case I40E_LINK_SPEED_10GB: 2167 speed = 10000; 2168 break; 2169 case I40E_LINK_SPEED_1GB: 2170 speed = 1000; 2171 break; 2172 default: 2173 break; 2174 } 2175 2176 if (max_tx_rate > speed) { 2177 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", 2178 max_tx_rate, vf->vf_id); 2179 ret = -EINVAL; 2180 goto error; 2181 } 2182 2183 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 2184 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 2185 max_tx_rate = 50; 2186 } 2187 2188 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 2189 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 2190 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 2191 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 2192 if (ret) { 2193 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 2194 ret); 2195 ret = -EIO; 2196 goto error; 2197 } 2198 vf->tx_rate = max_tx_rate; 2199 error: 2200 return ret; 2201 } 2202 2203 /** 2204 * i40e_ndo_get_vf_config 2205 * @netdev: network interface device structure 2206 * @vf_id: VF identifier 2207 * @ivi: VF configuration structure 2208 * 2209 * return VF configuration 2210 **/ 2211 int i40e_ndo_get_vf_config(struct net_device *netdev, 2212 int vf_id, struct ifla_vf_info *ivi) 2213 { 2214 struct i40e_netdev_priv *np = netdev_priv(netdev); 2215 struct i40e_vsi *vsi = np->vsi; 2216 struct i40e_pf *pf = vsi->back; 2217 struct i40e_vf *vf; 2218 int ret = 0; 2219 2220 /* validate the request */ 2221 if (vf_id >= pf->num_alloc_vfs) { 2222 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2223 ret = -EINVAL; 2224 goto error_param; 2225 } 2226 2227 vf = &(pf->vf[vf_id]); 2228 /* first vsi is always the LAN vsi */ 2229 vsi = pf->vsi[vf->lan_vsi_index]; 2230 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2231 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2232 ret = -EINVAL; 2233 goto error_param; 2234 } 2235 2236 ivi->vf = vf_id; 2237 2238 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2239 2240 ivi->max_tx_rate = vf->tx_rate; 2241 ivi->min_tx_rate = 0; 2242 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2243 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2244 I40E_VLAN_PRIORITY_SHIFT; 2245 if (vf->link_forced == false) 2246 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 2247 else if (vf->link_up == true) 2248 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 2249 else 2250 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 2251 ivi->spoofchk = vf->spoofchk; 2252 ret = 0; 2253 2254 error_param: 2255 return ret; 2256 } 2257 2258 /** 2259 * i40e_ndo_set_vf_link_state 2260 * @netdev: network interface device structure 2261 * @vf_id: VF identifier 2262 * @link: required link state 2263 * 2264 * Set the link state of a specified VF, regardless of physical link state 2265 **/ 2266 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 2267 { 2268 struct i40e_netdev_priv *np = netdev_priv(netdev); 2269 struct i40e_pf *pf = np->vsi->back; 2270 struct i40e_virtchnl_pf_event pfe; 2271 struct i40e_hw *hw = &pf->hw; 2272 struct i40e_vf *vf; 2273 int abs_vf_id; 2274 int ret = 0; 2275 2276 /* validate the request */ 2277 if (vf_id >= pf->num_alloc_vfs) { 2278 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2279 ret = -EINVAL; 2280 goto error_out; 2281 } 2282 2283 vf = &pf->vf[vf_id]; 2284 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 2285 2286 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2287 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2288 2289 switch (link) { 2290 case IFLA_VF_LINK_STATE_AUTO: 2291 vf->link_forced = false; 2292 pfe.event_data.link_event.link_status = 2293 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 2294 pfe.event_data.link_event.link_speed = 2295 pf->hw.phy.link_info.link_speed; 2296 break; 2297 case IFLA_VF_LINK_STATE_ENABLE: 2298 vf->link_forced = true; 2299 vf->link_up = true; 2300 pfe.event_data.link_event.link_status = true; 2301 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 2302 break; 2303 case IFLA_VF_LINK_STATE_DISABLE: 2304 vf->link_forced = true; 2305 vf->link_up = false; 2306 pfe.event_data.link_event.link_status = false; 2307 pfe.event_data.link_event.link_speed = 0; 2308 break; 2309 default: 2310 ret = -EINVAL; 2311 goto error_out; 2312 } 2313 /* Notify the VF of its new link state */ 2314 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 2315 0, (u8 *)&pfe, sizeof(pfe), NULL); 2316 2317 error_out: 2318 return ret; 2319 } 2320 2321 /** 2322 * i40e_ndo_set_vf_spoofchk 2323 * @netdev: network interface device structure 2324 * @vf_id: VF identifier 2325 * @enable: flag to enable or disable feature 2326 * 2327 * Enable or disable VF spoof checking 2328 **/ 2329 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 2330 { 2331 struct i40e_netdev_priv *np = netdev_priv(netdev); 2332 struct i40e_vsi *vsi = np->vsi; 2333 struct i40e_pf *pf = vsi->back; 2334 struct i40e_vsi_context ctxt; 2335 struct i40e_hw *hw = &pf->hw; 2336 struct i40e_vf *vf; 2337 int ret = 0; 2338 2339 /* validate the request */ 2340 if (vf_id >= pf->num_alloc_vfs) { 2341 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2342 ret = -EINVAL; 2343 goto out; 2344 } 2345 2346 vf = &(pf->vf[vf_id]); 2347 2348 if (enable == vf->spoofchk) 2349 goto out; 2350 2351 vf->spoofchk = enable; 2352 memset(&ctxt, 0, sizeof(ctxt)); 2353 ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid; 2354 ctxt.pf_num = pf->hw.pf_id; 2355 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 2356 if (enable) 2357 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 2358 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 2359 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 2360 if (ret) { 2361 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 2362 ret); 2363 ret = -EIO; 2364 } 2365 out: 2366 return ret; 2367 } 2368