1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /*********************notification routines***********************/ 30 31 /** 32 * i40e_vc_vf_broadcast 33 * @pf: pointer to the PF structure 34 * @opcode: operation code 35 * @retval: return value 36 * @msg: pointer to the msg buffer 37 * @msglen: msg length 38 * 39 * send a message to all VFs on a given PF 40 **/ 41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 42 enum virtchnl_ops v_opcode, 43 i40e_status v_retval, u8 *msg, 44 u16 msglen) 45 { 46 struct i40e_hw *hw = &pf->hw; 47 struct i40e_vf *vf = pf->vf; 48 int i; 49 50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 51 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 52 /* Not all vfs are enabled so skip the ones that are not */ 53 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 54 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 55 continue; 56 57 /* Ignore return value on purpose - a given VF may fail, but 58 * we need to keep going and send to all of them 59 */ 60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 61 msg, msglen, NULL); 62 } 63 } 64 65 /** 66 * i40e_vc_notify_vf_link_state 67 * @vf: pointer to the VF structure 68 * 69 * send a link status message to a single VF 70 **/ 71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 72 { 73 struct virtchnl_pf_event pfe; 74 struct i40e_pf *pf = vf->pf; 75 struct i40e_hw *hw = &pf->hw; 76 struct i40e_link_status *ls = &pf->hw.phy.link_info; 77 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 78 79 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 80 pfe.severity = PF_EVENT_SEVERITY_INFO; 81 if (vf->link_forced) { 82 pfe.event_data.link_event.link_status = vf->link_up; 83 pfe.event_data.link_event.link_speed = 84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 85 } else { 86 pfe.event_data.link_event.link_status = 87 ls->link_info & I40E_AQ_LINK_UP; 88 pfe.event_data.link_event.link_speed = 89 (enum virtchnl_link_speed)ls->link_speed; 90 } 91 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 92 0, (u8 *)&pfe, sizeof(pfe), NULL); 93 } 94 95 /** 96 * i40e_vc_notify_link_state 97 * @pf: pointer to the PF structure 98 * 99 * send a link status message to all VFs on a given PF 100 **/ 101 void i40e_vc_notify_link_state(struct i40e_pf *pf) 102 { 103 int i; 104 105 for (i = 0; i < pf->num_alloc_vfs; i++) 106 i40e_vc_notify_vf_link_state(&pf->vf[i]); 107 } 108 109 /** 110 * i40e_vc_notify_reset 111 * @pf: pointer to the PF structure 112 * 113 * indicate a pending reset to all VFs on a given PF 114 **/ 115 void i40e_vc_notify_reset(struct i40e_pf *pf) 116 { 117 struct virtchnl_pf_event pfe; 118 119 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 120 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 121 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 122 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 123 } 124 125 /** 126 * i40e_vc_notify_vf_reset 127 * @vf: pointer to the VF structure 128 * 129 * indicate a pending reset to the given VF 130 **/ 131 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 132 { 133 struct virtchnl_pf_event pfe; 134 int abs_vf_id; 135 136 /* validate the request */ 137 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 138 return; 139 140 /* verify if the VF is in either init or active before proceeding */ 141 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 142 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 143 return; 144 145 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 146 147 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 148 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 149 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 150 0, (u8 *)&pfe, 151 sizeof(struct virtchnl_pf_event), NULL); 152 } 153 /***********************misc routines*****************************/ 154 155 /** 156 * i40e_vc_disable_vf 157 * @pf: pointer to the PF info 158 * @vf: pointer to the VF info 159 * 160 * Disable the VF through a SW reset 161 **/ 162 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 163 { 164 i40e_vc_notify_vf_reset(vf); 165 i40e_reset_vf(vf, false); 166 } 167 168 /** 169 * i40e_vc_isvalid_vsi_id 170 * @vf: pointer to the VF info 171 * @vsi_id: VF relative VSI id 172 * 173 * check for the valid VSI id 174 **/ 175 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 176 { 177 struct i40e_pf *pf = vf->pf; 178 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 179 180 return (vsi && (vsi->vf_id == vf->vf_id)); 181 } 182 183 /** 184 * i40e_vc_isvalid_queue_id 185 * @vf: pointer to the VF info 186 * @vsi_id: vsi id 187 * @qid: vsi relative queue id 188 * 189 * check for the valid queue id 190 **/ 191 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 192 u8 qid) 193 { 194 struct i40e_pf *pf = vf->pf; 195 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 196 197 return (vsi && (qid < vsi->alloc_queue_pairs)); 198 } 199 200 /** 201 * i40e_vc_isvalid_vector_id 202 * @vf: pointer to the VF info 203 * @vector_id: VF relative vector id 204 * 205 * check for the valid vector id 206 **/ 207 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 208 { 209 struct i40e_pf *pf = vf->pf; 210 211 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 212 } 213 214 /***********************vf resource mgmt routines*****************/ 215 216 /** 217 * i40e_vc_get_pf_queue_id 218 * @vf: pointer to the VF info 219 * @vsi_id: id of VSI as provided by the FW 220 * @vsi_queue_id: vsi relative queue id 221 * 222 * return PF relative queue id 223 **/ 224 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 225 u8 vsi_queue_id) 226 { 227 struct i40e_pf *pf = vf->pf; 228 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 229 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 230 231 if (!vsi) 232 return pf_queue_id; 233 234 if (le16_to_cpu(vsi->info.mapping_flags) & 235 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 236 pf_queue_id = 237 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 238 else 239 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 240 vsi_queue_id; 241 242 return pf_queue_id; 243 } 244 245 /** 246 * i40e_config_irq_link_list 247 * @vf: pointer to the VF info 248 * @vsi_id: id of VSI as given by the FW 249 * @vecmap: irq map info 250 * 251 * configure irq link list from the map 252 **/ 253 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 254 struct virtchnl_vector_map *vecmap) 255 { 256 unsigned long linklistmap = 0, tempmap; 257 struct i40e_pf *pf = vf->pf; 258 struct i40e_hw *hw = &pf->hw; 259 u16 vsi_queue_id, pf_queue_id; 260 enum i40e_queue_type qtype; 261 u16 next_q, vector_id; 262 u32 reg, reg_idx; 263 u16 itr_idx = 0; 264 265 vector_id = vecmap->vector_id; 266 /* setup the head */ 267 if (0 == vector_id) 268 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 269 else 270 reg_idx = I40E_VPINT_LNKLSTN( 271 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 272 (vector_id - 1)); 273 274 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 275 /* Special case - No queues mapped on this vector */ 276 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 277 goto irq_list_done; 278 } 279 tempmap = vecmap->rxq_map; 280 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 281 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 282 vsi_queue_id)); 283 } 284 285 tempmap = vecmap->txq_map; 286 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 287 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 288 vsi_queue_id + 1)); 289 } 290 291 next_q = find_first_bit(&linklistmap, 292 (I40E_MAX_VSI_QP * 293 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 294 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 295 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 296 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 297 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 298 299 wr32(hw, reg_idx, reg); 300 301 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 302 switch (qtype) { 303 case I40E_QUEUE_TYPE_RX: 304 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 305 itr_idx = vecmap->rxitr_idx; 306 break; 307 case I40E_QUEUE_TYPE_TX: 308 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 309 itr_idx = vecmap->txitr_idx; 310 break; 311 default: 312 break; 313 } 314 315 next_q = find_next_bit(&linklistmap, 316 (I40E_MAX_VSI_QP * 317 I40E_VIRTCHNL_SUPPORTED_QTYPES), 318 next_q + 1); 319 if (next_q < 320 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 321 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 323 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 324 vsi_queue_id); 325 } else { 326 pf_queue_id = I40E_QUEUE_END_OF_LIST; 327 qtype = 0; 328 } 329 330 /* format for the RQCTL & TQCTL regs is same */ 331 reg = (vector_id) | 332 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 333 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 334 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 335 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 336 wr32(hw, reg_idx, reg); 337 } 338 339 /* if the vf is running in polling mode and using interrupt zero, 340 * need to disable auto-mask on enabling zero interrupt for VFs. 341 */ 342 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 343 (vector_id == 0)) { 344 reg = rd32(hw, I40E_GLINT_CTL); 345 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 346 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 347 wr32(hw, I40E_GLINT_CTL, reg); 348 } 349 } 350 351 irq_list_done: 352 i40e_flush(hw); 353 } 354 355 /** 356 * i40e_release_iwarp_qvlist 357 * @vf: pointer to the VF. 358 * 359 **/ 360 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 361 { 362 struct i40e_pf *pf = vf->pf; 363 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 364 u32 msix_vf; 365 u32 i; 366 367 if (!vf->qvlist_info) 368 return; 369 370 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 371 for (i = 0; i < qvlist_info->num_vectors; i++) { 372 struct virtchnl_iwarp_qv_info *qv_info; 373 u32 next_q_index, next_q_type; 374 struct i40e_hw *hw = &pf->hw; 375 u32 v_idx, reg_idx, reg; 376 377 qv_info = &qvlist_info->qv_info[i]; 378 if (!qv_info) 379 continue; 380 v_idx = qv_info->v_idx; 381 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 382 /* Figure out the queue after CEQ and make that the 383 * first queue. 384 */ 385 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 386 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 387 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 388 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 389 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 390 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 391 392 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 393 reg = (next_q_index & 394 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 395 (next_q_type << 396 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 397 398 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 399 } 400 } 401 kfree(vf->qvlist_info); 402 vf->qvlist_info = NULL; 403 } 404 405 /** 406 * i40e_config_iwarp_qvlist 407 * @vf: pointer to the VF info 408 * @qvlist_info: queue and vector list 409 * 410 * Return 0 on success or < 0 on error 411 **/ 412 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 413 struct virtchnl_iwarp_qvlist_info *qvlist_info) 414 { 415 struct i40e_pf *pf = vf->pf; 416 struct i40e_hw *hw = &pf->hw; 417 struct virtchnl_iwarp_qv_info *qv_info; 418 u32 v_idx, i, reg_idx, reg; 419 u32 next_q_idx, next_q_type; 420 u32 msix_vf, size; 421 422 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 423 (sizeof(struct virtchnl_iwarp_qv_info) * 424 (qvlist_info->num_vectors - 1)); 425 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 426 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 427 428 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 429 for (i = 0; i < qvlist_info->num_vectors; i++) { 430 qv_info = &qvlist_info->qv_info[i]; 431 if (!qv_info) 432 continue; 433 v_idx = qv_info->v_idx; 434 435 /* Validate vector id belongs to this vf */ 436 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) 437 goto err; 438 439 vf->qvlist_info->qv_info[i] = *qv_info; 440 441 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 442 /* We might be sharing the interrupt, so get the first queue 443 * index and type, push it down the list by adding the new 444 * queue on top. Also link it with the new queue in CEQCTL. 445 */ 446 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 447 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 448 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 449 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 450 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 451 452 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 453 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 454 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 455 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 456 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 457 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 458 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 459 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 460 461 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 462 reg = (qv_info->ceq_idx & 463 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 464 (I40E_QUEUE_TYPE_PE_CEQ << 465 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 466 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 467 } 468 469 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 470 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 471 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 472 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 473 474 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 475 } 476 } 477 478 return 0; 479 err: 480 kfree(vf->qvlist_info); 481 vf->qvlist_info = NULL; 482 return -EINVAL; 483 } 484 485 /** 486 * i40e_config_vsi_tx_queue 487 * @vf: pointer to the VF info 488 * @vsi_id: id of VSI as provided by the FW 489 * @vsi_queue_id: vsi relative queue index 490 * @info: config. info 491 * 492 * configure tx queue 493 **/ 494 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 495 u16 vsi_queue_id, 496 struct virtchnl_txq_info *info) 497 { 498 struct i40e_pf *pf = vf->pf; 499 struct i40e_hw *hw = &pf->hw; 500 struct i40e_hmc_obj_txq tx_ctx; 501 struct i40e_vsi *vsi; 502 u16 pf_queue_id; 503 u32 qtx_ctl; 504 int ret = 0; 505 506 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 507 ret = -ENOENT; 508 goto error_context; 509 } 510 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 511 vsi = i40e_find_vsi_from_id(pf, vsi_id); 512 if (!vsi) { 513 ret = -ENOENT; 514 goto error_context; 515 } 516 517 /* clear the context structure first */ 518 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 519 520 /* only set the required fields */ 521 tx_ctx.base = info->dma_ring_addr / 128; 522 tx_ctx.qlen = info->ring_len; 523 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 524 tx_ctx.rdylist_act = 0; 525 tx_ctx.head_wb_ena = info->headwb_enabled; 526 tx_ctx.head_wb_addr = info->dma_headwb_addr; 527 528 /* clear the context in the HMC */ 529 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 530 if (ret) { 531 dev_err(&pf->pdev->dev, 532 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 533 pf_queue_id, ret); 534 ret = -ENOENT; 535 goto error_context; 536 } 537 538 /* set the context in the HMC */ 539 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 540 if (ret) { 541 dev_err(&pf->pdev->dev, 542 "Failed to set VF LAN Tx queue context %d error: %d\n", 543 pf_queue_id, ret); 544 ret = -ENOENT; 545 goto error_context; 546 } 547 548 /* associate this queue with the PCI VF function */ 549 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 550 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 551 & I40E_QTX_CTL_PF_INDX_MASK); 552 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 553 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 554 & I40E_QTX_CTL_VFVM_INDX_MASK); 555 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 556 i40e_flush(hw); 557 558 error_context: 559 return ret; 560 } 561 562 /** 563 * i40e_config_vsi_rx_queue 564 * @vf: pointer to the VF info 565 * @vsi_id: id of VSI as provided by the FW 566 * @vsi_queue_id: vsi relative queue index 567 * @info: config. info 568 * 569 * configure rx queue 570 **/ 571 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 572 u16 vsi_queue_id, 573 struct virtchnl_rxq_info *info) 574 { 575 struct i40e_pf *pf = vf->pf; 576 struct i40e_hw *hw = &pf->hw; 577 struct i40e_hmc_obj_rxq rx_ctx; 578 u16 pf_queue_id; 579 int ret = 0; 580 581 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 582 583 /* clear the context structure first */ 584 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 585 586 /* only set the required fields */ 587 rx_ctx.base = info->dma_ring_addr / 128; 588 rx_ctx.qlen = info->ring_len; 589 590 if (info->splithdr_enabled) { 591 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 592 I40E_RX_SPLIT_IP | 593 I40E_RX_SPLIT_TCP_UDP | 594 I40E_RX_SPLIT_SCTP; 595 /* header length validation */ 596 if (info->hdr_size > ((2 * 1024) - 64)) { 597 ret = -EINVAL; 598 goto error_param; 599 } 600 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 601 602 /* set split mode 10b */ 603 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 604 } 605 606 /* databuffer length validation */ 607 if (info->databuffer_size > ((16 * 1024) - 128)) { 608 ret = -EINVAL; 609 goto error_param; 610 } 611 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 612 613 /* max pkt. length validation */ 614 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 615 ret = -EINVAL; 616 goto error_param; 617 } 618 rx_ctx.rxmax = info->max_pkt_size; 619 620 /* enable 32bytes desc always */ 621 rx_ctx.dsize = 1; 622 623 /* default values */ 624 rx_ctx.lrxqthresh = 2; 625 rx_ctx.crcstrip = 1; 626 rx_ctx.prefena = 1; 627 rx_ctx.l2tsel = 1; 628 629 /* clear the context in the HMC */ 630 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 631 if (ret) { 632 dev_err(&pf->pdev->dev, 633 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 634 pf_queue_id, ret); 635 ret = -ENOENT; 636 goto error_param; 637 } 638 639 /* set the context in the HMC */ 640 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 641 if (ret) { 642 dev_err(&pf->pdev->dev, 643 "Failed to set VF LAN Rx queue context %d error: %d\n", 644 pf_queue_id, ret); 645 ret = -ENOENT; 646 goto error_param; 647 } 648 649 error_param: 650 return ret; 651 } 652 653 /** 654 * i40e_alloc_vsi_res 655 * @vf: pointer to the VF info 656 * @type: type of VSI to allocate 657 * 658 * alloc VF vsi context & resources 659 **/ 660 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 661 { 662 struct i40e_mac_filter *f = NULL; 663 struct i40e_pf *pf = vf->pf; 664 struct i40e_vsi *vsi; 665 int ret = 0; 666 667 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 668 669 if (!vsi) { 670 dev_err(&pf->pdev->dev, 671 "add vsi failed for VF %d, aq_err %d\n", 672 vf->vf_id, pf->hw.aq.asq_last_status); 673 ret = -ENOENT; 674 goto error_alloc_vsi_res; 675 } 676 if (type == I40E_VSI_SRIOV) { 677 u64 hena = i40e_pf_get_default_rss_hena(pf); 678 u8 broadcast[ETH_ALEN]; 679 680 vf->lan_vsi_idx = vsi->idx; 681 vf->lan_vsi_id = vsi->id; 682 /* If the port VLAN has been configured and then the 683 * VF driver was removed then the VSI port VLAN 684 * configuration was destroyed. Check if there is 685 * a port VLAN and restore the VSI configuration if 686 * needed. 687 */ 688 if (vf->port_vlan_id) 689 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 690 691 spin_lock_bh(&vsi->mac_filter_hash_lock); 692 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 693 f = i40e_add_mac_filter(vsi, 694 vf->default_lan_addr.addr); 695 if (!f) 696 dev_info(&pf->pdev->dev, 697 "Could not add MAC filter %pM for VF %d\n", 698 vf->default_lan_addr.addr, vf->vf_id); 699 } 700 eth_broadcast_addr(broadcast); 701 f = i40e_add_mac_filter(vsi, broadcast); 702 if (!f) 703 dev_info(&pf->pdev->dev, 704 "Could not allocate VF broadcast filter\n"); 705 spin_unlock_bh(&vsi->mac_filter_hash_lock); 706 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 707 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 708 } 709 710 /* program mac filter */ 711 ret = i40e_sync_vsi_filters(vsi); 712 if (ret) 713 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 714 715 /* Set VF bandwidth if specified */ 716 if (vf->tx_rate) { 717 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 718 vf->tx_rate / 50, 0, NULL); 719 if (ret) 720 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 721 vf->vf_id, ret); 722 } 723 724 error_alloc_vsi_res: 725 return ret; 726 } 727 728 /** 729 * i40e_enable_vf_mappings 730 * @vf: pointer to the VF info 731 * 732 * enable VF mappings 733 **/ 734 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 735 { 736 struct i40e_pf *pf = vf->pf; 737 struct i40e_hw *hw = &pf->hw; 738 u32 reg, total_queue_pairs = 0; 739 int j; 740 741 /* Tell the hardware we're using noncontiguous mapping. HW requires 742 * that VF queues be mapped using this method, even when they are 743 * contiguous in real life 744 */ 745 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 746 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 747 748 /* enable VF vplan_qtable mappings */ 749 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 750 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 751 752 /* map PF queues to VF queues */ 753 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 754 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 755 756 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 757 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 758 total_queue_pairs++; 759 } 760 761 /* map PF queues to VSI */ 762 for (j = 0; j < 7; j++) { 763 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 764 reg = 0x07FF07FF; /* unused */ 765 } else { 766 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 767 j * 2); 768 reg = qid; 769 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 770 (j * 2) + 1); 771 reg |= qid << 16; 772 } 773 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), 774 reg); 775 } 776 777 i40e_flush(hw); 778 } 779 780 /** 781 * i40e_disable_vf_mappings 782 * @vf: pointer to the VF info 783 * 784 * disable VF mappings 785 **/ 786 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 787 { 788 struct i40e_pf *pf = vf->pf; 789 struct i40e_hw *hw = &pf->hw; 790 int i; 791 792 /* disable qp mappings */ 793 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 794 for (i = 0; i < I40E_MAX_VSI_QP; i++) 795 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 796 I40E_QUEUE_END_OF_LIST); 797 i40e_flush(hw); 798 } 799 800 /** 801 * i40e_free_vf_res 802 * @vf: pointer to the VF info 803 * 804 * free VF resources 805 **/ 806 static void i40e_free_vf_res(struct i40e_vf *vf) 807 { 808 struct i40e_pf *pf = vf->pf; 809 struct i40e_hw *hw = &pf->hw; 810 u32 reg_idx, reg; 811 int i, msix_vf; 812 813 /* Start by disabling VF's configuration API to prevent the OS from 814 * accessing the VF's VSI after it's freed / invalidated. 815 */ 816 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 817 818 /* free vsi & disconnect it from the parent uplink */ 819 if (vf->lan_vsi_idx) { 820 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 821 vf->lan_vsi_idx = 0; 822 vf->lan_vsi_id = 0; 823 vf->num_mac = 0; 824 } 825 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 826 827 /* disable interrupts so the VF starts in a known state */ 828 for (i = 0; i < msix_vf; i++) { 829 /* format is same for both registers */ 830 if (0 == i) 831 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 832 else 833 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 834 (vf->vf_id)) 835 + (i - 1)); 836 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 837 i40e_flush(hw); 838 } 839 840 /* clear the irq settings */ 841 for (i = 0; i < msix_vf; i++) { 842 /* format is same for both registers */ 843 if (0 == i) 844 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 845 else 846 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 847 (vf->vf_id)) 848 + (i - 1)); 849 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 850 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 851 wr32(hw, reg_idx, reg); 852 i40e_flush(hw); 853 } 854 /* reset some of the state variables keeping track of the resources */ 855 vf->num_queue_pairs = 0; 856 vf->vf_states = 0; 857 } 858 859 /** 860 * i40e_alloc_vf_res 861 * @vf: pointer to the VF info 862 * 863 * allocate VF resources 864 **/ 865 static int i40e_alloc_vf_res(struct i40e_vf *vf) 866 { 867 struct i40e_pf *pf = vf->pf; 868 int total_queue_pairs = 0; 869 int ret; 870 871 /* allocate hw vsi context & associated resources */ 872 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 873 if (ret) 874 goto error_alloc; 875 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 876 877 if (vf->trusted) 878 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 879 else 880 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 881 882 /* store the total qps number for the runtime 883 * VF req validation 884 */ 885 vf->num_queue_pairs = total_queue_pairs; 886 887 /* VF is now completely initialized */ 888 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 889 890 error_alloc: 891 if (ret) 892 i40e_free_vf_res(vf); 893 894 return ret; 895 } 896 897 #define VF_DEVICE_STATUS 0xAA 898 #define VF_TRANS_PENDING_MASK 0x20 899 /** 900 * i40e_quiesce_vf_pci 901 * @vf: pointer to the VF structure 902 * 903 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 904 * if the transactions never clear. 905 **/ 906 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 907 { 908 struct i40e_pf *pf = vf->pf; 909 struct i40e_hw *hw = &pf->hw; 910 int vf_abs_id, i; 911 u32 reg; 912 913 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 914 915 wr32(hw, I40E_PF_PCI_CIAA, 916 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 917 for (i = 0; i < 100; i++) { 918 reg = rd32(hw, I40E_PF_PCI_CIAD); 919 if ((reg & VF_TRANS_PENDING_MASK) == 0) 920 return 0; 921 udelay(1); 922 } 923 return -EIO; 924 } 925 926 /** 927 * i40e_trigger_vf_reset 928 * @vf: pointer to the VF structure 929 * @flr: VFLR was issued or not 930 * 931 * Trigger hardware to start a reset for a particular VF. Expects the caller 932 * to wait the proper amount of time to allow hardware to reset the VF before 933 * it cleans up and restores VF functionality. 934 **/ 935 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 936 { 937 struct i40e_pf *pf = vf->pf; 938 struct i40e_hw *hw = &pf->hw; 939 u32 reg, reg_idx, bit_idx; 940 941 /* warn the VF */ 942 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 943 944 /* Disable VF's configuration API during reset. The flag is re-enabled 945 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 946 * It's normally disabled in i40e_free_vf_res(), but it's safer 947 * to do it earlier to give some time to finish to any VF config 948 * functions that may still be running at this point. 949 */ 950 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 951 952 /* In the case of a VFLR, the HW has already reset the VF and we 953 * just need to clean up, so don't hit the VFRTRIG register. 954 */ 955 if (!flr) { 956 /* reset VF using VPGEN_VFRTRIG reg */ 957 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 958 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 959 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 960 i40e_flush(hw); 961 } 962 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 963 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 964 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 965 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 966 i40e_flush(hw); 967 968 if (i40e_quiesce_vf_pci(vf)) 969 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 970 vf->vf_id); 971 } 972 973 /** 974 * i40e_cleanup_reset_vf 975 * @vf: pointer to the VF structure 976 * 977 * Cleanup a VF after the hardware reset is finished. Expects the caller to 978 * have verified whether the reset is finished properly, and ensure the 979 * minimum amount of wait time has passed. 980 **/ 981 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 982 { 983 struct i40e_pf *pf = vf->pf; 984 struct i40e_hw *hw = &pf->hw; 985 u32 reg; 986 987 /* free VF resources to begin resetting the VSI state */ 988 i40e_free_vf_res(vf); 989 990 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 991 * By doing this we allow HW to access VF memory at any point. If we 992 * did it any sooner, HW could access memory while it was being freed 993 * in i40e_free_vf_res(), causing an IOMMU fault. 994 * 995 * On the other hand, this needs to be done ASAP, because the VF driver 996 * is waiting for this to happen and may report a timeout. It's 997 * harmless, but it gets logged into Guest OS kernel log, so best avoid 998 * it. 999 */ 1000 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1001 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1002 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1003 1004 /* reallocate VF resources to finish resetting the VSI state */ 1005 if (!i40e_alloc_vf_res(vf)) { 1006 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1007 i40e_enable_vf_mappings(vf); 1008 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1009 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1010 /* Do not notify the client during VF init */ 1011 if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1012 &vf->vf_states)) 1013 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1014 vf->num_vlan = 0; 1015 } 1016 1017 /* Tell the VF driver the reset is done. This needs to be done only 1018 * after VF has been fully initialized, because the VF driver may 1019 * request resources immediately after setting this flag. 1020 */ 1021 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1022 } 1023 1024 /** 1025 * i40e_reset_vf 1026 * @vf: pointer to the VF structure 1027 * @flr: VFLR was issued or not 1028 * 1029 * reset the VF 1030 **/ 1031 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 1032 { 1033 struct i40e_pf *pf = vf->pf; 1034 struct i40e_hw *hw = &pf->hw; 1035 bool rsd = false; 1036 u32 reg; 1037 int i; 1038 1039 /* If VFs have been disabled, there is no need to reset */ 1040 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1041 return; 1042 1043 i40e_trigger_vf_reset(vf, flr); 1044 1045 /* poll VPGEN_VFRSTAT reg to make sure 1046 * that reset is complete 1047 */ 1048 for (i = 0; i < 10; i++) { 1049 /* VF reset requires driver to first reset the VF and then 1050 * poll the status register to make sure that the reset 1051 * completed successfully. Due to internal HW FIFO flushes, 1052 * we must wait 10ms before the register will be valid. 1053 */ 1054 usleep_range(10000, 20000); 1055 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1056 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1057 rsd = true; 1058 break; 1059 } 1060 } 1061 1062 if (flr) 1063 usleep_range(10000, 20000); 1064 1065 if (!rsd) 1066 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1067 vf->vf_id); 1068 usleep_range(10000, 20000); 1069 1070 /* On initial reset, we don't have any queues to disable */ 1071 if (vf->lan_vsi_idx != 0) 1072 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1073 1074 i40e_cleanup_reset_vf(vf); 1075 1076 i40e_flush(hw); 1077 clear_bit(__I40E_VF_DISABLE, pf->state); 1078 } 1079 1080 /** 1081 * i40e_reset_all_vfs 1082 * @pf: pointer to the PF structure 1083 * @flr: VFLR was issued or not 1084 * 1085 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1086 * VF, then do all the waiting in one chunk, and finally finish restoring each 1087 * VF after the wait. This is useful during PF routines which need to reset 1088 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1089 **/ 1090 void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1091 { 1092 struct i40e_hw *hw = &pf->hw; 1093 struct i40e_vf *vf; 1094 int i, v; 1095 u32 reg; 1096 1097 /* If we don't have any VFs, then there is nothing to reset */ 1098 if (!pf->num_alloc_vfs) 1099 return; 1100 1101 /* If VFs have been disabled, there is no need to reset */ 1102 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1103 return; 1104 1105 /* Begin reset on all VFs at once */ 1106 for (v = 0; v < pf->num_alloc_vfs; v++) 1107 i40e_trigger_vf_reset(&pf->vf[v], flr); 1108 1109 /* HW requires some time to make sure it can flush the FIFO for a VF 1110 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1111 * sequence to make sure that it has completed. We'll keep track of 1112 * the VFs using a simple iterator that increments once that VF has 1113 * finished resetting. 1114 */ 1115 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1116 usleep_range(10000, 20000); 1117 1118 /* Check each VF in sequence, beginning with the VF to fail 1119 * the previous check. 1120 */ 1121 while (v < pf->num_alloc_vfs) { 1122 vf = &pf->vf[v]; 1123 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1124 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1125 break; 1126 1127 /* If the current VF has finished resetting, move on 1128 * to the next VF in sequence. 1129 */ 1130 v++; 1131 } 1132 } 1133 1134 if (flr) 1135 usleep_range(10000, 20000); 1136 1137 /* Display a warning if at least one VF didn't manage to reset in 1138 * time, but continue on with the operation. 1139 */ 1140 if (v < pf->num_alloc_vfs) 1141 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1142 pf->vf[v].vf_id); 1143 usleep_range(10000, 20000); 1144 1145 /* Begin disabling all the rings associated with VFs, but do not wait 1146 * between each VF. 1147 */ 1148 for (v = 0; v < pf->num_alloc_vfs; v++) { 1149 /* On initial reset, we don't have any queues to disable */ 1150 if (pf->vf[v].lan_vsi_idx == 0) 1151 continue; 1152 1153 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1154 } 1155 1156 /* Now that we've notified HW to disable all of the VF rings, wait 1157 * until they finish. 1158 */ 1159 for (v = 0; v < pf->num_alloc_vfs; v++) { 1160 /* On initial reset, we don't have any queues to disable */ 1161 if (pf->vf[v].lan_vsi_idx == 0) 1162 continue; 1163 1164 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1165 } 1166 1167 /* Hw may need up to 50ms to finish disabling the RX queues. We 1168 * minimize the wait by delaying only once for all VFs. 1169 */ 1170 mdelay(50); 1171 1172 /* Finish the reset on each VF */ 1173 for (v = 0; v < pf->num_alloc_vfs; v++) 1174 i40e_cleanup_reset_vf(&pf->vf[v]); 1175 1176 i40e_flush(hw); 1177 clear_bit(__I40E_VF_DISABLE, pf->state); 1178 } 1179 1180 /** 1181 * i40e_free_vfs 1182 * @pf: pointer to the PF structure 1183 * 1184 * free VF resources 1185 **/ 1186 void i40e_free_vfs(struct i40e_pf *pf) 1187 { 1188 struct i40e_hw *hw = &pf->hw; 1189 u32 reg_idx, bit_idx; 1190 int i, tmp, vf_id; 1191 1192 if (!pf->vf) 1193 return; 1194 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1195 usleep_range(1000, 2000); 1196 1197 i40e_notify_client_of_vf_enable(pf, 0); 1198 1199 /* Amortize wait time by stopping all VFs at the same time */ 1200 for (i = 0; i < pf->num_alloc_vfs; i++) { 1201 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1202 continue; 1203 1204 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1205 } 1206 1207 for (i = 0; i < pf->num_alloc_vfs; i++) { 1208 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1209 continue; 1210 1211 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1212 } 1213 1214 /* Disable IOV before freeing resources. This lets any VF drivers 1215 * running in the host get themselves cleaned up before we yank 1216 * the carpet out from underneath their feet. 1217 */ 1218 if (!pci_vfs_assigned(pf->pdev)) 1219 pci_disable_sriov(pf->pdev); 1220 else 1221 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1222 1223 /* free up VF resources */ 1224 tmp = pf->num_alloc_vfs; 1225 pf->num_alloc_vfs = 0; 1226 for (i = 0; i < tmp; i++) { 1227 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1228 i40e_free_vf_res(&pf->vf[i]); 1229 /* disable qp mappings */ 1230 i40e_disable_vf_mappings(&pf->vf[i]); 1231 } 1232 1233 kfree(pf->vf); 1234 pf->vf = NULL; 1235 1236 /* This check is for when the driver is unloaded while VFs are 1237 * assigned. Setting the number of VFs to 0 through sysfs is caught 1238 * before this function ever gets called. 1239 */ 1240 if (!pci_vfs_assigned(pf->pdev)) { 1241 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1242 * work correctly when SR-IOV gets re-enabled. 1243 */ 1244 for (vf_id = 0; vf_id < tmp; vf_id++) { 1245 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1246 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1247 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1248 } 1249 } 1250 clear_bit(__I40E_VF_DISABLE, pf->state); 1251 } 1252 1253 #ifdef CONFIG_PCI_IOV 1254 /** 1255 * i40e_alloc_vfs 1256 * @pf: pointer to the PF structure 1257 * @num_alloc_vfs: number of VFs to allocate 1258 * 1259 * allocate VF resources 1260 **/ 1261 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1262 { 1263 struct i40e_vf *vfs; 1264 int i, ret = 0; 1265 1266 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1267 i40e_irq_dynamic_disable_icr0(pf); 1268 1269 /* Check to see if we're just allocating resources for extant VFs */ 1270 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1271 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1272 if (ret) { 1273 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1274 pf->num_alloc_vfs = 0; 1275 goto err_iov; 1276 } 1277 } 1278 /* allocate memory */ 1279 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1280 if (!vfs) { 1281 ret = -ENOMEM; 1282 goto err_alloc; 1283 } 1284 pf->vf = vfs; 1285 1286 /* apply default profile */ 1287 for (i = 0; i < num_alloc_vfs; i++) { 1288 vfs[i].pf = pf; 1289 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1290 vfs[i].vf_id = i; 1291 1292 /* assign default capabilities */ 1293 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1294 vfs[i].spoofchk = true; 1295 1296 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1297 1298 } 1299 pf->num_alloc_vfs = num_alloc_vfs; 1300 1301 /* VF resources get allocated during reset */ 1302 i40e_reset_all_vfs(pf, false); 1303 1304 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1305 1306 err_alloc: 1307 if (ret) 1308 i40e_free_vfs(pf); 1309 err_iov: 1310 /* Re-enable interrupt 0. */ 1311 i40e_irq_dynamic_enable_icr0(pf, false); 1312 return ret; 1313 } 1314 1315 #endif 1316 /** 1317 * i40e_pci_sriov_enable 1318 * @pdev: pointer to a pci_dev structure 1319 * @num_vfs: number of VFs to allocate 1320 * 1321 * Enable or change the number of VFs 1322 **/ 1323 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1324 { 1325 #ifdef CONFIG_PCI_IOV 1326 struct i40e_pf *pf = pci_get_drvdata(pdev); 1327 int pre_existing_vfs = pci_num_vf(pdev); 1328 int err = 0; 1329 1330 if (test_bit(__I40E_TESTING, pf->state)) { 1331 dev_warn(&pdev->dev, 1332 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1333 err = -EPERM; 1334 goto err_out; 1335 } 1336 1337 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1338 i40e_free_vfs(pf); 1339 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1340 goto out; 1341 1342 if (num_vfs > pf->num_req_vfs) { 1343 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1344 num_vfs, pf->num_req_vfs); 1345 err = -EPERM; 1346 goto err_out; 1347 } 1348 1349 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1350 err = i40e_alloc_vfs(pf, num_vfs); 1351 if (err) { 1352 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1353 goto err_out; 1354 } 1355 1356 out: 1357 return num_vfs; 1358 1359 err_out: 1360 return err; 1361 #endif 1362 return 0; 1363 } 1364 1365 /** 1366 * i40e_pci_sriov_configure 1367 * @pdev: pointer to a pci_dev structure 1368 * @num_vfs: number of VFs to allocate 1369 * 1370 * Enable or change the number of VFs. Called when the user updates the number 1371 * of VFs in sysfs. 1372 **/ 1373 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1374 { 1375 struct i40e_pf *pf = pci_get_drvdata(pdev); 1376 1377 if (num_vfs) { 1378 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1379 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1380 i40e_do_reset_safe(pf, 1381 BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1382 } 1383 return i40e_pci_sriov_enable(pdev, num_vfs); 1384 } 1385 1386 if (!pci_vfs_assigned(pf->pdev)) { 1387 i40e_free_vfs(pf); 1388 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1389 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1390 } else { 1391 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1392 return -EINVAL; 1393 } 1394 return 0; 1395 } 1396 1397 /***********************virtual channel routines******************/ 1398 1399 /** 1400 * i40e_vc_send_msg_to_vf 1401 * @vf: pointer to the VF info 1402 * @v_opcode: virtual channel opcode 1403 * @v_retval: virtual channel return value 1404 * @msg: pointer to the msg buffer 1405 * @msglen: msg length 1406 * 1407 * send msg to VF 1408 **/ 1409 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1410 u32 v_retval, u8 *msg, u16 msglen) 1411 { 1412 struct i40e_pf *pf; 1413 struct i40e_hw *hw; 1414 int abs_vf_id; 1415 i40e_status aq_ret; 1416 1417 /* validate the request */ 1418 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1419 return -EINVAL; 1420 1421 pf = vf->pf; 1422 hw = &pf->hw; 1423 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1424 1425 /* single place to detect unsuccessful return values */ 1426 if (v_retval) { 1427 vf->num_invalid_msgs++; 1428 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1429 vf->vf_id, v_opcode, v_retval); 1430 if (vf->num_invalid_msgs > 1431 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1432 dev_err(&pf->pdev->dev, 1433 "Number of invalid messages exceeded for VF %d\n", 1434 vf->vf_id); 1435 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1436 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1437 } 1438 } else { 1439 vf->num_valid_msgs++; 1440 /* reset the invalid counter, if a valid message is received. */ 1441 vf->num_invalid_msgs = 0; 1442 } 1443 1444 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1445 msg, msglen, NULL); 1446 if (aq_ret) { 1447 dev_info(&pf->pdev->dev, 1448 "Unable to send the message to VF %d aq_err %d\n", 1449 vf->vf_id, pf->hw.aq.asq_last_status); 1450 return -EIO; 1451 } 1452 1453 return 0; 1454 } 1455 1456 /** 1457 * i40e_vc_send_resp_to_vf 1458 * @vf: pointer to the VF info 1459 * @opcode: operation code 1460 * @retval: return value 1461 * 1462 * send resp msg to VF 1463 **/ 1464 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1465 enum virtchnl_ops opcode, 1466 i40e_status retval) 1467 { 1468 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1469 } 1470 1471 /** 1472 * i40e_vc_get_version_msg 1473 * @vf: pointer to the VF info 1474 * 1475 * called from the VF to request the API version used by the PF 1476 **/ 1477 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1478 { 1479 struct virtchnl_version_info info = { 1480 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1481 }; 1482 1483 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1484 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1485 if (VF_IS_V10(&vf->vf_ver)) 1486 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1487 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1488 I40E_SUCCESS, (u8 *)&info, 1489 sizeof(struct virtchnl_version_info)); 1490 } 1491 1492 /** 1493 * i40e_vc_get_vf_resources_msg 1494 * @vf: pointer to the VF info 1495 * @msg: pointer to the msg buffer 1496 * @msglen: msg length 1497 * 1498 * called from the VF to request its resources 1499 **/ 1500 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1501 { 1502 struct virtchnl_vf_resource *vfres = NULL; 1503 struct i40e_pf *pf = vf->pf; 1504 i40e_status aq_ret = 0; 1505 struct i40e_vsi *vsi; 1506 int num_vsis = 1; 1507 int len = 0; 1508 int ret; 1509 1510 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1511 aq_ret = I40E_ERR_PARAM; 1512 goto err; 1513 } 1514 1515 len = (sizeof(struct virtchnl_vf_resource) + 1516 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1517 1518 vfres = kzalloc(len, GFP_KERNEL); 1519 if (!vfres) { 1520 aq_ret = I40E_ERR_NO_MEMORY; 1521 len = 0; 1522 goto err; 1523 } 1524 if (VF_IS_V11(&vf->vf_ver)) 1525 vf->driver_caps = *(u32 *)msg; 1526 else 1527 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1528 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1529 VIRTCHNL_VF_OFFLOAD_VLAN; 1530 1531 vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2; 1532 vsi = pf->vsi[vf->lan_vsi_idx]; 1533 if (!vsi->info.pvid) 1534 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1535 1536 if (i40e_vf_client_capable(pf, vf->vf_id) && 1537 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1538 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1539 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1540 } 1541 1542 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1543 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1544 } else { 1545 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 1546 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1547 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1548 else 1549 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1550 } 1551 1552 if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1553 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1554 vfres->vf_offload_flags |= 1555 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1556 } 1557 1558 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1559 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1560 1561 if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && 1562 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1563 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1564 1565 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1566 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1567 dev_err(&pf->pdev->dev, 1568 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1569 vf->vf_id); 1570 ret = I40E_ERR_PARAM; 1571 goto err; 1572 } 1573 vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1574 } 1575 1576 if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { 1577 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1578 vfres->vf_offload_flags |= 1579 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1580 } 1581 1582 vfres->num_vsis = num_vsis; 1583 vfres->num_queue_pairs = vf->num_queue_pairs; 1584 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1585 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1586 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1587 1588 if (vf->lan_vsi_idx) { 1589 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1590 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1591 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1592 /* VFs only use TC 0 */ 1593 vfres->vsi_res[0].qset_handle 1594 = le16_to_cpu(vsi->info.qs_handle[0]); 1595 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1596 vf->default_lan_addr.addr); 1597 } 1598 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1599 1600 err: 1601 /* send the response back to the VF */ 1602 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1603 aq_ret, (u8 *)vfres, len); 1604 1605 kfree(vfres); 1606 return ret; 1607 } 1608 1609 /** 1610 * i40e_vc_reset_vf_msg 1611 * @vf: pointer to the VF info 1612 * @msg: pointer to the msg buffer 1613 * @msglen: msg length 1614 * 1615 * called from the VF to reset itself, 1616 * unlike other virtchnl messages, PF driver 1617 * doesn't send the response back to the VF 1618 **/ 1619 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1620 { 1621 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1622 i40e_reset_vf(vf, false); 1623 } 1624 1625 /** 1626 * i40e_getnum_vf_vsi_vlan_filters 1627 * @vsi: pointer to the vsi 1628 * 1629 * called to get the number of VLANs offloaded on this VF 1630 **/ 1631 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1632 { 1633 struct i40e_mac_filter *f; 1634 int num_vlans = 0, bkt; 1635 1636 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1637 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1638 num_vlans++; 1639 } 1640 1641 return num_vlans; 1642 } 1643 1644 /** 1645 * i40e_vc_config_promiscuous_mode_msg 1646 * @vf: pointer to the VF info 1647 * @msg: pointer to the msg buffer 1648 * @msglen: msg length 1649 * 1650 * called from the VF to configure the promiscuous mode of 1651 * VF vsis 1652 **/ 1653 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1654 u8 *msg, u16 msglen) 1655 { 1656 struct virtchnl_promisc_info *info = 1657 (struct virtchnl_promisc_info *)msg; 1658 struct i40e_pf *pf = vf->pf; 1659 struct i40e_hw *hw = &pf->hw; 1660 struct i40e_mac_filter *f; 1661 i40e_status aq_ret = 0; 1662 bool allmulti = false; 1663 struct i40e_vsi *vsi; 1664 bool alluni = false; 1665 int aq_err = 0; 1666 int bkt; 1667 1668 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); 1669 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 1670 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1671 !vsi) { 1672 aq_ret = I40E_ERR_PARAM; 1673 goto error_param; 1674 } 1675 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 1676 dev_err(&pf->pdev->dev, 1677 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1678 vf->vf_id); 1679 /* Lie to the VF on purpose. */ 1680 aq_ret = 0; 1681 goto error_param; 1682 } 1683 /* Multicast promiscuous handling*/ 1684 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 1685 allmulti = true; 1686 1687 if (vf->port_vlan_id) { 1688 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1689 allmulti, 1690 vf->port_vlan_id, 1691 NULL); 1692 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1693 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1694 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1695 continue; 1696 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1697 vsi->seid, 1698 allmulti, 1699 f->vlan, 1700 NULL); 1701 aq_err = pf->hw.aq.asq_last_status; 1702 if (aq_ret) { 1703 dev_err(&pf->pdev->dev, 1704 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1705 f->vlan, 1706 i40e_stat_str(&pf->hw, aq_ret), 1707 i40e_aq_str(&pf->hw, aq_err)); 1708 break; 1709 } 1710 } 1711 } else { 1712 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1713 allmulti, NULL); 1714 aq_err = pf->hw.aq.asq_last_status; 1715 if (aq_ret) { 1716 dev_err(&pf->pdev->dev, 1717 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1718 vf->vf_id, 1719 i40e_stat_str(&pf->hw, aq_ret), 1720 i40e_aq_str(&pf->hw, aq_err)); 1721 goto error_param; 1722 } 1723 } 1724 1725 if (!aq_ret) { 1726 dev_info(&pf->pdev->dev, 1727 "VF %d successfully set multicast promiscuous mode\n", 1728 vf->vf_id); 1729 if (allmulti) 1730 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1731 else 1732 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1733 } 1734 1735 if (info->flags & FLAG_VF_UNICAST_PROMISC) 1736 alluni = true; 1737 if (vf->port_vlan_id) { 1738 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1739 alluni, 1740 vf->port_vlan_id, 1741 NULL); 1742 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1743 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1744 aq_ret = 0; 1745 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { 1746 aq_ret = 1747 i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1748 vsi->seid, 1749 alluni, 1750 f->vlan, 1751 NULL); 1752 aq_err = pf->hw.aq.asq_last_status; 1753 } 1754 if (aq_ret) 1755 dev_err(&pf->pdev->dev, 1756 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1757 f->vlan, 1758 i40e_stat_str(&pf->hw, aq_ret), 1759 i40e_aq_str(&pf->hw, aq_err)); 1760 } 1761 } else { 1762 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, 1763 allmulti, NULL, 1764 true); 1765 aq_err = pf->hw.aq.asq_last_status; 1766 if (aq_ret) { 1767 dev_err(&pf->pdev->dev, 1768 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", 1769 vf->vf_id, info->flags, 1770 i40e_stat_str(&pf->hw, aq_ret), 1771 i40e_aq_str(&pf->hw, aq_err)); 1772 goto error_param; 1773 } 1774 } 1775 1776 if (!aq_ret) { 1777 dev_info(&pf->pdev->dev, 1778 "VF %d successfully set unicast promiscuous mode\n", 1779 vf->vf_id); 1780 if (alluni) 1781 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1782 else 1783 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1784 } 1785 1786 error_param: 1787 /* send the response to the VF */ 1788 return i40e_vc_send_resp_to_vf(vf, 1789 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1790 aq_ret); 1791 } 1792 1793 /** 1794 * i40e_vc_config_queues_msg 1795 * @vf: pointer to the VF info 1796 * @msg: pointer to the msg buffer 1797 * @msglen: msg length 1798 * 1799 * called from the VF to configure the rx/tx 1800 * queues 1801 **/ 1802 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1803 { 1804 struct virtchnl_vsi_queue_config_info *qci = 1805 (struct virtchnl_vsi_queue_config_info *)msg; 1806 struct virtchnl_queue_pair_info *qpi; 1807 struct i40e_pf *pf = vf->pf; 1808 u16 vsi_id, vsi_queue_id; 1809 i40e_status aq_ret = 0; 1810 int i; 1811 1812 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1813 aq_ret = I40E_ERR_PARAM; 1814 goto error_param; 1815 } 1816 1817 vsi_id = qci->vsi_id; 1818 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1819 aq_ret = I40E_ERR_PARAM; 1820 goto error_param; 1821 } 1822 for (i = 0; i < qci->num_queue_pairs; i++) { 1823 qpi = &qci->qpair[i]; 1824 vsi_queue_id = qpi->txq.queue_id; 1825 if ((qpi->txq.vsi_id != vsi_id) || 1826 (qpi->rxq.vsi_id != vsi_id) || 1827 (qpi->rxq.queue_id != vsi_queue_id) || 1828 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1829 aq_ret = I40E_ERR_PARAM; 1830 goto error_param; 1831 } 1832 1833 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1834 &qpi->rxq) || 1835 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1836 &qpi->txq)) { 1837 aq_ret = I40E_ERR_PARAM; 1838 goto error_param; 1839 } 1840 } 1841 /* set vsi num_queue_pairs in use to num configured by VF */ 1842 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1843 1844 error_param: 1845 /* send the response to the VF */ 1846 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1847 aq_ret); 1848 } 1849 1850 /** 1851 * i40e_vc_config_irq_map_msg 1852 * @vf: pointer to the VF info 1853 * @msg: pointer to the msg buffer 1854 * @msglen: msg length 1855 * 1856 * called from the VF to configure the irq to 1857 * queue map 1858 **/ 1859 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1860 { 1861 struct virtchnl_irq_map_info *irqmap_info = 1862 (struct virtchnl_irq_map_info *)msg; 1863 struct virtchnl_vector_map *map; 1864 u16 vsi_id, vsi_queue_id, vector_id; 1865 i40e_status aq_ret = 0; 1866 unsigned long tempmap; 1867 int i; 1868 1869 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1870 aq_ret = I40E_ERR_PARAM; 1871 goto error_param; 1872 } 1873 1874 for (i = 0; i < irqmap_info->num_vectors; i++) { 1875 map = &irqmap_info->vecmap[i]; 1876 1877 vector_id = map->vector_id; 1878 vsi_id = map->vsi_id; 1879 /* validate msg params */ 1880 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1881 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1882 aq_ret = I40E_ERR_PARAM; 1883 goto error_param; 1884 } 1885 1886 /* lookout for the invalid queue index */ 1887 tempmap = map->rxq_map; 1888 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1889 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1890 vsi_queue_id)) { 1891 aq_ret = I40E_ERR_PARAM; 1892 goto error_param; 1893 } 1894 } 1895 1896 tempmap = map->txq_map; 1897 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1898 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1899 vsi_queue_id)) { 1900 aq_ret = I40E_ERR_PARAM; 1901 goto error_param; 1902 } 1903 } 1904 1905 i40e_config_irq_link_list(vf, vsi_id, map); 1906 } 1907 error_param: 1908 /* send the response to the VF */ 1909 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 1910 aq_ret); 1911 } 1912 1913 /** 1914 * i40e_vc_enable_queues_msg 1915 * @vf: pointer to the VF info 1916 * @msg: pointer to the msg buffer 1917 * @msglen: msg length 1918 * 1919 * called from the VF to enable all or specific queue(s) 1920 **/ 1921 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1922 { 1923 struct virtchnl_queue_select *vqs = 1924 (struct virtchnl_queue_select *)msg; 1925 struct i40e_pf *pf = vf->pf; 1926 u16 vsi_id = vqs->vsi_id; 1927 i40e_status aq_ret = 0; 1928 1929 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1930 aq_ret = I40E_ERR_PARAM; 1931 goto error_param; 1932 } 1933 1934 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1935 aq_ret = I40E_ERR_PARAM; 1936 goto error_param; 1937 } 1938 1939 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1940 aq_ret = I40E_ERR_PARAM; 1941 goto error_param; 1942 } 1943 1944 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) 1945 aq_ret = I40E_ERR_TIMEOUT; 1946 error_param: 1947 /* send the response to the VF */ 1948 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 1949 aq_ret); 1950 } 1951 1952 /** 1953 * i40e_vc_disable_queues_msg 1954 * @vf: pointer to the VF info 1955 * @msg: pointer to the msg buffer 1956 * @msglen: msg length 1957 * 1958 * called from the VF to disable all or specific 1959 * queue(s) 1960 **/ 1961 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1962 { 1963 struct virtchnl_queue_select *vqs = 1964 (struct virtchnl_queue_select *)msg; 1965 struct i40e_pf *pf = vf->pf; 1966 i40e_status aq_ret = 0; 1967 1968 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1969 aq_ret = I40E_ERR_PARAM; 1970 goto error_param; 1971 } 1972 1973 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1974 aq_ret = I40E_ERR_PARAM; 1975 goto error_param; 1976 } 1977 1978 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1979 aq_ret = I40E_ERR_PARAM; 1980 goto error_param; 1981 } 1982 1983 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1984 1985 error_param: 1986 /* send the response to the VF */ 1987 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 1988 aq_ret); 1989 } 1990 1991 /** 1992 * i40e_vc_get_stats_msg 1993 * @vf: pointer to the VF info 1994 * @msg: pointer to the msg buffer 1995 * @msglen: msg length 1996 * 1997 * called from the VF to get vsi stats 1998 **/ 1999 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2000 { 2001 struct virtchnl_queue_select *vqs = 2002 (struct virtchnl_queue_select *)msg; 2003 struct i40e_pf *pf = vf->pf; 2004 struct i40e_eth_stats stats; 2005 i40e_status aq_ret = 0; 2006 struct i40e_vsi *vsi; 2007 2008 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2009 2010 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2011 aq_ret = I40E_ERR_PARAM; 2012 goto error_param; 2013 } 2014 2015 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2016 aq_ret = I40E_ERR_PARAM; 2017 goto error_param; 2018 } 2019 2020 vsi = pf->vsi[vf->lan_vsi_idx]; 2021 if (!vsi) { 2022 aq_ret = I40E_ERR_PARAM; 2023 goto error_param; 2024 } 2025 i40e_update_eth_stats(vsi); 2026 stats = vsi->eth_stats; 2027 2028 error_param: 2029 /* send the response back to the VF */ 2030 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2031 (u8 *)&stats, sizeof(stats)); 2032 } 2033 2034 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */ 2035 #define I40E_VC_MAX_MAC_ADDR_PER_VF 12 2036 #define I40E_VC_MAX_VLAN_PER_VF 8 2037 2038 /** 2039 * i40e_check_vf_permission 2040 * @vf: pointer to the VF info 2041 * @macaddr: pointer to the MAC Address being checked 2042 * 2043 * Check if the VF has permission to add or delete unicast MAC address 2044 * filters and return error code -EPERM if not. Then check if the 2045 * address filter requested is broadcast or zero and if so return 2046 * an invalid MAC address error code. 2047 **/ 2048 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 2049 { 2050 struct i40e_pf *pf = vf->pf; 2051 int ret = 0; 2052 2053 if (is_broadcast_ether_addr(macaddr) || 2054 is_zero_ether_addr(macaddr)) { 2055 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 2056 ret = I40E_ERR_INVALID_MAC_ADDR; 2057 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 2058 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2059 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 2060 /* If the host VMM administrator has set the VF MAC address 2061 * administratively via the ndo_set_vf_mac command then deny 2062 * permission to the VF to add or delete unicast MAC addresses. 2063 * Unless the VF is privileged and then it can do whatever. 2064 * The VF may request to set the MAC address filter already 2065 * assigned to it so do not return an error in that case. 2066 */ 2067 dev_err(&pf->pdev->dev, 2068 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); 2069 ret = -EPERM; 2070 } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) && 2071 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2072 dev_err(&pf->pdev->dev, 2073 "VF is not trusted, switch the VF to trusted to add more functionality\n"); 2074 ret = -EPERM; 2075 } 2076 return ret; 2077 } 2078 2079 /** 2080 * i40e_vc_add_mac_addr_msg 2081 * @vf: pointer to the VF info 2082 * @msg: pointer to the msg buffer 2083 * @msglen: msg length 2084 * 2085 * add guest mac address filter 2086 **/ 2087 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2088 { 2089 struct virtchnl_ether_addr_list *al = 2090 (struct virtchnl_ether_addr_list *)msg; 2091 struct i40e_pf *pf = vf->pf; 2092 struct i40e_vsi *vsi = NULL; 2093 u16 vsi_id = al->vsi_id; 2094 i40e_status ret = 0; 2095 int i; 2096 2097 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2098 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2099 ret = I40E_ERR_PARAM; 2100 goto error_param; 2101 } 2102 2103 for (i = 0; i < al->num_elements; i++) { 2104 ret = i40e_check_vf_permission(vf, al->list[i].addr); 2105 if (ret) 2106 goto error_param; 2107 } 2108 vsi = pf->vsi[vf->lan_vsi_idx]; 2109 2110 /* Lock once, because all function inside for loop accesses VSI's 2111 * MAC filter list which needs to be protected using same lock. 2112 */ 2113 spin_lock_bh(&vsi->mac_filter_hash_lock); 2114 2115 /* add new addresses to the list */ 2116 for (i = 0; i < al->num_elements; i++) { 2117 struct i40e_mac_filter *f; 2118 2119 f = i40e_find_mac(vsi, al->list[i].addr); 2120 if (!f) 2121 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2122 2123 if (!f) { 2124 dev_err(&pf->pdev->dev, 2125 "Unable to add MAC filter %pM for VF %d\n", 2126 al->list[i].addr, vf->vf_id); 2127 ret = I40E_ERR_PARAM; 2128 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2129 goto error_param; 2130 } else { 2131 vf->num_mac++; 2132 } 2133 } 2134 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2135 2136 /* program the updated filter list */ 2137 ret = i40e_sync_vsi_filters(vsi); 2138 if (ret) 2139 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2140 vf->vf_id, ret); 2141 2142 error_param: 2143 /* send the response to the VF */ 2144 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2145 ret); 2146 } 2147 2148 /** 2149 * i40e_vc_del_mac_addr_msg 2150 * @vf: pointer to the VF info 2151 * @msg: pointer to the msg buffer 2152 * @msglen: msg length 2153 * 2154 * remove guest mac address filter 2155 **/ 2156 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2157 { 2158 struct virtchnl_ether_addr_list *al = 2159 (struct virtchnl_ether_addr_list *)msg; 2160 struct i40e_pf *pf = vf->pf; 2161 struct i40e_vsi *vsi = NULL; 2162 u16 vsi_id = al->vsi_id; 2163 i40e_status ret = 0; 2164 int i; 2165 2166 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2167 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2168 ret = I40E_ERR_PARAM; 2169 goto error_param; 2170 } 2171 2172 for (i = 0; i < al->num_elements; i++) { 2173 if (is_broadcast_ether_addr(al->list[i].addr) || 2174 is_zero_ether_addr(al->list[i].addr)) { 2175 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2176 al->list[i].addr, vf->vf_id); 2177 ret = I40E_ERR_INVALID_MAC_ADDR; 2178 goto error_param; 2179 } 2180 } 2181 vsi = pf->vsi[vf->lan_vsi_idx]; 2182 2183 spin_lock_bh(&vsi->mac_filter_hash_lock); 2184 /* delete addresses from the list */ 2185 for (i = 0; i < al->num_elements; i++) 2186 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2187 ret = I40E_ERR_INVALID_MAC_ADDR; 2188 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2189 goto error_param; 2190 } else { 2191 vf->num_mac--; 2192 } 2193 2194 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2195 2196 /* program the updated filter list */ 2197 ret = i40e_sync_vsi_filters(vsi); 2198 if (ret) 2199 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2200 vf->vf_id, ret); 2201 2202 error_param: 2203 /* send the response to the VF */ 2204 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2205 ret); 2206 } 2207 2208 /** 2209 * i40e_vc_add_vlan_msg 2210 * @vf: pointer to the VF info 2211 * @msg: pointer to the msg buffer 2212 * @msglen: msg length 2213 * 2214 * program guest vlan id 2215 **/ 2216 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2217 { 2218 struct virtchnl_vlan_filter_list *vfl = 2219 (struct virtchnl_vlan_filter_list *)msg; 2220 struct i40e_pf *pf = vf->pf; 2221 struct i40e_vsi *vsi = NULL; 2222 u16 vsi_id = vfl->vsi_id; 2223 i40e_status aq_ret = 0; 2224 int i; 2225 2226 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2227 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2228 dev_err(&pf->pdev->dev, 2229 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2230 goto error_param; 2231 } 2232 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2233 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2234 aq_ret = I40E_ERR_PARAM; 2235 goto error_param; 2236 } 2237 2238 for (i = 0; i < vfl->num_elements; i++) { 2239 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2240 aq_ret = I40E_ERR_PARAM; 2241 dev_err(&pf->pdev->dev, 2242 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2243 goto error_param; 2244 } 2245 } 2246 vsi = pf->vsi[vf->lan_vsi_idx]; 2247 if (vsi->info.pvid) { 2248 aq_ret = I40E_ERR_PARAM; 2249 goto error_param; 2250 } 2251 2252 i40e_vlan_stripping_enable(vsi); 2253 for (i = 0; i < vfl->num_elements; i++) { 2254 /* add new VLAN filter */ 2255 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2256 if (!ret) 2257 vf->num_vlan++; 2258 2259 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2260 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2261 true, 2262 vfl->vlan_id[i], 2263 NULL); 2264 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2265 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2266 true, 2267 vfl->vlan_id[i], 2268 NULL); 2269 2270 if (ret) 2271 dev_err(&pf->pdev->dev, 2272 "Unable to add VLAN filter %d for VF %d, error %d\n", 2273 vfl->vlan_id[i], vf->vf_id, ret); 2274 } 2275 2276 error_param: 2277 /* send the response to the VF */ 2278 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2279 } 2280 2281 /** 2282 * i40e_vc_remove_vlan_msg 2283 * @vf: pointer to the VF info 2284 * @msg: pointer to the msg buffer 2285 * @msglen: msg length 2286 * 2287 * remove programmed guest vlan id 2288 **/ 2289 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2290 { 2291 struct virtchnl_vlan_filter_list *vfl = 2292 (struct virtchnl_vlan_filter_list *)msg; 2293 struct i40e_pf *pf = vf->pf; 2294 struct i40e_vsi *vsi = NULL; 2295 u16 vsi_id = vfl->vsi_id; 2296 i40e_status aq_ret = 0; 2297 int i; 2298 2299 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2300 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2301 aq_ret = I40E_ERR_PARAM; 2302 goto error_param; 2303 } 2304 2305 for (i = 0; i < vfl->num_elements; i++) { 2306 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2307 aq_ret = I40E_ERR_PARAM; 2308 goto error_param; 2309 } 2310 } 2311 2312 vsi = pf->vsi[vf->lan_vsi_idx]; 2313 if (vsi->info.pvid) { 2314 aq_ret = I40E_ERR_PARAM; 2315 goto error_param; 2316 } 2317 2318 for (i = 0; i < vfl->num_elements; i++) { 2319 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2320 vf->num_vlan--; 2321 2322 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2323 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2324 false, 2325 vfl->vlan_id[i], 2326 NULL); 2327 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2328 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2329 false, 2330 vfl->vlan_id[i], 2331 NULL); 2332 } 2333 2334 error_param: 2335 /* send the response to the VF */ 2336 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2337 } 2338 2339 /** 2340 * i40e_vc_iwarp_msg 2341 * @vf: pointer to the VF info 2342 * @msg: pointer to the msg buffer 2343 * @msglen: msg length 2344 * 2345 * called from the VF for the iwarp msgs 2346 **/ 2347 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2348 { 2349 struct i40e_pf *pf = vf->pf; 2350 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2351 i40e_status aq_ret = 0; 2352 2353 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2354 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2355 aq_ret = I40E_ERR_PARAM; 2356 goto error_param; 2357 } 2358 2359 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2360 msg, msglen); 2361 2362 error_param: 2363 /* send the response to the VF */ 2364 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2365 aq_ret); 2366 } 2367 2368 /** 2369 * i40e_vc_iwarp_qvmap_msg 2370 * @vf: pointer to the VF info 2371 * @msg: pointer to the msg buffer 2372 * @msglen: msg length 2373 * @config: config qvmap or release it 2374 * 2375 * called from the VF for the iwarp msgs 2376 **/ 2377 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, 2378 bool config) 2379 { 2380 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2381 (struct virtchnl_iwarp_qvlist_info *)msg; 2382 i40e_status aq_ret = 0; 2383 2384 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2385 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2386 aq_ret = I40E_ERR_PARAM; 2387 goto error_param; 2388 } 2389 2390 if (config) { 2391 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2392 aq_ret = I40E_ERR_PARAM; 2393 } else { 2394 i40e_release_iwarp_qvlist(vf); 2395 } 2396 2397 error_param: 2398 /* send the response to the VF */ 2399 return i40e_vc_send_resp_to_vf(vf, 2400 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2401 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2402 aq_ret); 2403 } 2404 2405 /** 2406 * i40e_vc_config_rss_key 2407 * @vf: pointer to the VF info 2408 * @msg: pointer to the msg buffer 2409 * @msglen: msg length 2410 * 2411 * Configure the VF's RSS key 2412 **/ 2413 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) 2414 { 2415 struct virtchnl_rss_key *vrk = 2416 (struct virtchnl_rss_key *)msg; 2417 struct i40e_pf *pf = vf->pf; 2418 struct i40e_vsi *vsi = NULL; 2419 u16 vsi_id = vrk->vsi_id; 2420 i40e_status aq_ret = 0; 2421 2422 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2423 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2424 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2425 aq_ret = I40E_ERR_PARAM; 2426 goto err; 2427 } 2428 2429 vsi = pf->vsi[vf->lan_vsi_idx]; 2430 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2431 err: 2432 /* send the response to the VF */ 2433 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2434 aq_ret); 2435 } 2436 2437 /** 2438 * i40e_vc_config_rss_lut 2439 * @vf: pointer to the VF info 2440 * @msg: pointer to the msg buffer 2441 * @msglen: msg length 2442 * 2443 * Configure the VF's RSS LUT 2444 **/ 2445 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) 2446 { 2447 struct virtchnl_rss_lut *vrl = 2448 (struct virtchnl_rss_lut *)msg; 2449 struct i40e_pf *pf = vf->pf; 2450 struct i40e_vsi *vsi = NULL; 2451 u16 vsi_id = vrl->vsi_id; 2452 i40e_status aq_ret = 0; 2453 2454 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2455 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2456 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2457 aq_ret = I40E_ERR_PARAM; 2458 goto err; 2459 } 2460 2461 vsi = pf->vsi[vf->lan_vsi_idx]; 2462 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2463 /* send the response to the VF */ 2464 err: 2465 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2466 aq_ret); 2467 } 2468 2469 /** 2470 * i40e_vc_get_rss_hena 2471 * @vf: pointer to the VF info 2472 * @msg: pointer to the msg buffer 2473 * @msglen: msg length 2474 * 2475 * Return the RSS HENA bits allowed by the hardware 2476 **/ 2477 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2478 { 2479 struct virtchnl_rss_hena *vrh = NULL; 2480 struct i40e_pf *pf = vf->pf; 2481 i40e_status aq_ret = 0; 2482 int len = 0; 2483 2484 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2485 aq_ret = I40E_ERR_PARAM; 2486 goto err; 2487 } 2488 len = sizeof(struct virtchnl_rss_hena); 2489 2490 vrh = kzalloc(len, GFP_KERNEL); 2491 if (!vrh) { 2492 aq_ret = I40E_ERR_NO_MEMORY; 2493 len = 0; 2494 goto err; 2495 } 2496 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2497 err: 2498 /* send the response back to the VF */ 2499 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 2500 aq_ret, (u8 *)vrh, len); 2501 kfree(vrh); 2502 return aq_ret; 2503 } 2504 2505 /** 2506 * i40e_vc_set_rss_hena 2507 * @vf: pointer to the VF info 2508 * @msg: pointer to the msg buffer 2509 * @msglen: msg length 2510 * 2511 * Set the RSS HENA bits for the VF 2512 **/ 2513 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2514 { 2515 struct virtchnl_rss_hena *vrh = 2516 (struct virtchnl_rss_hena *)msg; 2517 struct i40e_pf *pf = vf->pf; 2518 struct i40e_hw *hw = &pf->hw; 2519 i40e_status aq_ret = 0; 2520 2521 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2522 aq_ret = I40E_ERR_PARAM; 2523 goto err; 2524 } 2525 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 2526 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 2527 (u32)(vrh->hena >> 32)); 2528 2529 /* send the response to the VF */ 2530 err: 2531 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 2532 } 2533 2534 /** 2535 * i40e_vc_process_vf_msg 2536 * @pf: pointer to the PF structure 2537 * @vf_id: source VF id 2538 * @msg: pointer to the msg buffer 2539 * @msglen: msg length 2540 * @msghndl: msg handle 2541 * 2542 * called from the common aeq/arq handler to 2543 * process request from VF 2544 **/ 2545 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 2546 u32 v_retval, u8 *msg, u16 msglen) 2547 { 2548 struct i40e_hw *hw = &pf->hw; 2549 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 2550 struct i40e_vf *vf; 2551 int ret; 2552 2553 pf->vf_aq_requests++; 2554 if (local_vf_id >= pf->num_alloc_vfs) 2555 return -EINVAL; 2556 vf = &(pf->vf[local_vf_id]); 2557 2558 /* Check if VF is disabled. */ 2559 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 2560 return I40E_ERR_PARAM; 2561 2562 /* perform basic checks on the msg */ 2563 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 2564 2565 /* perform additional checks specific to this driver */ 2566 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { 2567 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; 2568 2569 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) 2570 ret = -EINVAL; 2571 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { 2572 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 2573 2574 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) 2575 ret = -EINVAL; 2576 } 2577 2578 if (ret) { 2579 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 2580 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 2581 local_vf_id, v_opcode, msglen); 2582 switch (ret) { 2583 case VIRTCHNL_ERR_PARAM: 2584 return -EPERM; 2585 default: 2586 return -EINVAL; 2587 } 2588 } 2589 2590 switch (v_opcode) { 2591 case VIRTCHNL_OP_VERSION: 2592 ret = i40e_vc_get_version_msg(vf, msg); 2593 break; 2594 case VIRTCHNL_OP_GET_VF_RESOURCES: 2595 ret = i40e_vc_get_vf_resources_msg(vf, msg); 2596 break; 2597 case VIRTCHNL_OP_RESET_VF: 2598 i40e_vc_reset_vf_msg(vf); 2599 ret = 0; 2600 break; 2601 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 2602 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 2603 break; 2604 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 2605 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 2606 break; 2607 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2608 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 2609 break; 2610 case VIRTCHNL_OP_ENABLE_QUEUES: 2611 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 2612 i40e_vc_notify_vf_link_state(vf); 2613 break; 2614 case VIRTCHNL_OP_DISABLE_QUEUES: 2615 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 2616 break; 2617 case VIRTCHNL_OP_ADD_ETH_ADDR: 2618 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 2619 break; 2620 case VIRTCHNL_OP_DEL_ETH_ADDR: 2621 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 2622 break; 2623 case VIRTCHNL_OP_ADD_VLAN: 2624 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 2625 break; 2626 case VIRTCHNL_OP_DEL_VLAN: 2627 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 2628 break; 2629 case VIRTCHNL_OP_GET_STATS: 2630 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 2631 break; 2632 case VIRTCHNL_OP_IWARP: 2633 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 2634 break; 2635 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2636 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); 2637 break; 2638 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 2639 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); 2640 break; 2641 case VIRTCHNL_OP_CONFIG_RSS_KEY: 2642 ret = i40e_vc_config_rss_key(vf, msg, msglen); 2643 break; 2644 case VIRTCHNL_OP_CONFIG_RSS_LUT: 2645 ret = i40e_vc_config_rss_lut(vf, msg, msglen); 2646 break; 2647 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 2648 ret = i40e_vc_get_rss_hena(vf, msg, msglen); 2649 break; 2650 case VIRTCHNL_OP_SET_RSS_HENA: 2651 ret = i40e_vc_set_rss_hena(vf, msg, msglen); 2652 break; 2653 2654 case VIRTCHNL_OP_UNKNOWN: 2655 default: 2656 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 2657 v_opcode, local_vf_id); 2658 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 2659 I40E_ERR_NOT_IMPLEMENTED); 2660 break; 2661 } 2662 2663 return ret; 2664 } 2665 2666 /** 2667 * i40e_vc_process_vflr_event 2668 * @pf: pointer to the PF structure 2669 * 2670 * called from the vlfr irq handler to 2671 * free up VF resources and state variables 2672 **/ 2673 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 2674 { 2675 struct i40e_hw *hw = &pf->hw; 2676 u32 reg, reg_idx, bit_idx; 2677 struct i40e_vf *vf; 2678 int vf_id; 2679 2680 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 2681 return 0; 2682 2683 /* Re-enable the VFLR interrupt cause here, before looking for which 2684 * VF got reset. Otherwise, if another VF gets a reset while the 2685 * first one is being processed, that interrupt will be lost, and 2686 * that VF will be stuck in reset forever. 2687 */ 2688 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 2689 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 2690 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 2691 i40e_flush(hw); 2692 2693 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 2694 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 2695 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 2696 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 2697 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 2698 vf = &pf->vf[vf_id]; 2699 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2700 if (reg & BIT(bit_idx)) 2701 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 2702 i40e_reset_vf(vf, true); 2703 } 2704 2705 return 0; 2706 } 2707 2708 /** 2709 * i40e_ndo_set_vf_mac 2710 * @netdev: network interface device structure 2711 * @vf_id: VF identifier 2712 * @mac: mac address 2713 * 2714 * program VF mac address 2715 **/ 2716 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2717 { 2718 struct i40e_netdev_priv *np = netdev_priv(netdev); 2719 struct i40e_vsi *vsi = np->vsi; 2720 struct i40e_pf *pf = vsi->back; 2721 struct i40e_mac_filter *f; 2722 struct i40e_vf *vf; 2723 int ret = 0; 2724 int bkt; 2725 2726 /* validate the request */ 2727 if (vf_id >= pf->num_alloc_vfs) { 2728 dev_err(&pf->pdev->dev, 2729 "Invalid VF Identifier %d\n", vf_id); 2730 ret = -EINVAL; 2731 goto error_param; 2732 } 2733 2734 vf = &(pf->vf[vf_id]); 2735 vsi = pf->vsi[vf->lan_vsi_idx]; 2736 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 2737 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2738 vf_id); 2739 ret = -EAGAIN; 2740 goto error_param; 2741 } 2742 2743 if (is_multicast_ether_addr(mac)) { 2744 dev_err(&pf->pdev->dev, 2745 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 2746 ret = -EINVAL; 2747 goto error_param; 2748 } 2749 2750 /* Lock once because below invoked function add/del_filter requires 2751 * mac_filter_hash_lock to be held 2752 */ 2753 spin_lock_bh(&vsi->mac_filter_hash_lock); 2754 2755 /* delete the temporary mac address */ 2756 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 2757 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 2758 2759 /* Delete all the filters for this VSI - we're going to kill it 2760 * anyway. 2761 */ 2762 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) 2763 __i40e_del_filter(vsi, f); 2764 2765 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2766 2767 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2768 /* program mac filter */ 2769 if (i40e_sync_vsi_filters(vsi)) { 2770 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2771 ret = -EIO; 2772 goto error_param; 2773 } 2774 ether_addr_copy(vf->default_lan_addr.addr, mac); 2775 vf->pf_set_mac = true; 2776 /* Force the VF driver stop so it has to reload with new MAC address */ 2777 i40e_vc_disable_vf(pf, vf); 2778 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2779 2780 error_param: 2781 return ret; 2782 } 2783 2784 /** 2785 * i40e_ndo_set_vf_port_vlan 2786 * @netdev: network interface device structure 2787 * @vf_id: VF identifier 2788 * @vlan_id: mac address 2789 * @qos: priority setting 2790 * @vlan_proto: vlan protocol 2791 * 2792 * program VF vlan id and/or qos 2793 **/ 2794 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 2795 u16 vlan_id, u8 qos, __be16 vlan_proto) 2796 { 2797 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 2798 struct i40e_netdev_priv *np = netdev_priv(netdev); 2799 struct i40e_pf *pf = np->vsi->back; 2800 struct i40e_vsi *vsi; 2801 struct i40e_vf *vf; 2802 int ret = 0; 2803 2804 /* validate the request */ 2805 if (vf_id >= pf->num_alloc_vfs) { 2806 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2807 ret = -EINVAL; 2808 goto error_pvid; 2809 } 2810 2811 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2812 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2813 ret = -EINVAL; 2814 goto error_pvid; 2815 } 2816 2817 if (vlan_proto != htons(ETH_P_8021Q)) { 2818 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 2819 ret = -EPROTONOSUPPORT; 2820 goto error_pvid; 2821 } 2822 2823 vf = &(pf->vf[vf_id]); 2824 vsi = pf->vsi[vf->lan_vsi_idx]; 2825 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 2826 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2827 vf_id); 2828 ret = -EAGAIN; 2829 goto error_pvid; 2830 } 2831 2832 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 2833 /* duplicate request, so just return success */ 2834 goto error_pvid; 2835 2836 /* Locked once because multiple functions below iterate list */ 2837 spin_lock_bh(&vsi->mac_filter_hash_lock); 2838 2839 if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { 2840 dev_err(&pf->pdev->dev, 2841 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2842 vf_id); 2843 /* Administrator Error - knock the VF offline until he does 2844 * the right thing by reconfiguring his network correctly 2845 * and then reloading the VF driver. 2846 */ 2847 i40e_vc_disable_vf(pf, vf); 2848 /* During reset the VF got a new VSI, so refresh the pointer. */ 2849 vsi = pf->vsi[vf->lan_vsi_idx]; 2850 } 2851 2852 /* Check for condition where there was already a port VLAN ID 2853 * filter set and now it is being deleted by setting it to zero. 2854 * Additionally check for the condition where there was a port 2855 * VLAN but now there is a new and different port VLAN being set. 2856 * Before deleting all the old VLAN filters we must add new ones 2857 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2858 * MAC addresses deleted. 2859 */ 2860 if ((!(vlan_id || qos) || 2861 vlanprio != le16_to_cpu(vsi->info.pvid)) && 2862 vsi->info.pvid) { 2863 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 2864 if (ret) { 2865 dev_info(&vsi->back->pdev->dev, 2866 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2867 vsi->back->hw.aq.asq_last_status); 2868 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2869 goto error_pvid; 2870 } 2871 } 2872 2873 if (vsi->info.pvid) { 2874 /* remove all filters on the old VLAN */ 2875 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 2876 VLAN_VID_MASK)); 2877 } 2878 2879 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2880 if (vlan_id || qos) 2881 ret = i40e_vsi_add_pvid(vsi, vlanprio); 2882 else 2883 i40e_vsi_remove_pvid(vsi); 2884 spin_lock_bh(&vsi->mac_filter_hash_lock); 2885 2886 if (vlan_id) { 2887 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2888 vlan_id, qos, vf_id); 2889 2890 /* add new VLAN filter for each MAC */ 2891 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 2892 if (ret) { 2893 dev_info(&vsi->back->pdev->dev, 2894 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2895 vsi->back->hw.aq.asq_last_status); 2896 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2897 goto error_pvid; 2898 } 2899 2900 /* remove the previously added non-VLAN MAC filters */ 2901 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 2902 } 2903 2904 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2905 2906 /* Schedule the worker thread to take care of applying changes */ 2907 i40e_service_event_schedule(vsi->back); 2908 2909 if (ret) { 2910 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2911 goto error_pvid; 2912 } 2913 2914 /* The Port VLAN needs to be saved across resets the same as the 2915 * default LAN MAC address. 2916 */ 2917 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2918 ret = 0; 2919 2920 error_pvid: 2921 return ret; 2922 } 2923 2924 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2925 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2926 /** 2927 * i40e_ndo_set_vf_bw 2928 * @netdev: network interface device structure 2929 * @vf_id: VF identifier 2930 * @tx_rate: Tx rate 2931 * 2932 * configure VF Tx rate 2933 **/ 2934 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 2935 int max_tx_rate) 2936 { 2937 struct i40e_netdev_priv *np = netdev_priv(netdev); 2938 struct i40e_pf *pf = np->vsi->back; 2939 struct i40e_vsi *vsi; 2940 struct i40e_vf *vf; 2941 int speed = 0; 2942 int ret = 0; 2943 2944 /* validate the request */ 2945 if (vf_id >= pf->num_alloc_vfs) { 2946 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 2947 ret = -EINVAL; 2948 goto error; 2949 } 2950 2951 if (min_tx_rate) { 2952 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 2953 min_tx_rate, vf_id); 2954 return -EINVAL; 2955 } 2956 2957 vf = &(pf->vf[vf_id]); 2958 vsi = pf->vsi[vf->lan_vsi_idx]; 2959 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 2960 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2961 vf_id); 2962 ret = -EAGAIN; 2963 goto error; 2964 } 2965 2966 switch (pf->hw.phy.link_info.link_speed) { 2967 case I40E_LINK_SPEED_40GB: 2968 speed = 40000; 2969 break; 2970 case I40E_LINK_SPEED_25GB: 2971 speed = 25000; 2972 break; 2973 case I40E_LINK_SPEED_20GB: 2974 speed = 20000; 2975 break; 2976 case I40E_LINK_SPEED_10GB: 2977 speed = 10000; 2978 break; 2979 case I40E_LINK_SPEED_1GB: 2980 speed = 1000; 2981 break; 2982 default: 2983 break; 2984 } 2985 2986 if (max_tx_rate > speed) { 2987 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n", 2988 max_tx_rate, vf->vf_id); 2989 ret = -EINVAL; 2990 goto error; 2991 } 2992 2993 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 2994 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 2995 max_tx_rate = 50; 2996 } 2997 2998 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 2999 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 3000 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 3001 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 3002 if (ret) { 3003 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 3004 ret); 3005 ret = -EIO; 3006 goto error; 3007 } 3008 vf->tx_rate = max_tx_rate; 3009 error: 3010 return ret; 3011 } 3012 3013 /** 3014 * i40e_ndo_get_vf_config 3015 * @netdev: network interface device structure 3016 * @vf_id: VF identifier 3017 * @ivi: VF configuration structure 3018 * 3019 * return VF configuration 3020 **/ 3021 int i40e_ndo_get_vf_config(struct net_device *netdev, 3022 int vf_id, struct ifla_vf_info *ivi) 3023 { 3024 struct i40e_netdev_priv *np = netdev_priv(netdev); 3025 struct i40e_vsi *vsi = np->vsi; 3026 struct i40e_pf *pf = vsi->back; 3027 struct i40e_vf *vf; 3028 int ret = 0; 3029 3030 /* validate the request */ 3031 if (vf_id >= pf->num_alloc_vfs) { 3032 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3033 ret = -EINVAL; 3034 goto error_param; 3035 } 3036 3037 vf = &(pf->vf[vf_id]); 3038 /* first vsi is always the LAN vsi */ 3039 vsi = pf->vsi[vf->lan_vsi_idx]; 3040 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3041 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3042 vf_id); 3043 ret = -EAGAIN; 3044 goto error_param; 3045 } 3046 3047 ivi->vf = vf_id; 3048 3049 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 3050 3051 ivi->max_tx_rate = vf->tx_rate; 3052 ivi->min_tx_rate = 0; 3053 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 3054 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 3055 I40E_VLAN_PRIORITY_SHIFT; 3056 if (vf->link_forced == false) 3057 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 3058 else if (vf->link_up == true) 3059 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 3060 else 3061 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 3062 ivi->spoofchk = vf->spoofchk; 3063 ivi->trusted = vf->trusted; 3064 ret = 0; 3065 3066 error_param: 3067 return ret; 3068 } 3069 3070 /** 3071 * i40e_ndo_set_vf_link_state 3072 * @netdev: network interface device structure 3073 * @vf_id: VF identifier 3074 * @link: required link state 3075 * 3076 * Set the link state of a specified VF, regardless of physical link state 3077 **/ 3078 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 3079 { 3080 struct i40e_netdev_priv *np = netdev_priv(netdev); 3081 struct i40e_pf *pf = np->vsi->back; 3082 struct virtchnl_pf_event pfe; 3083 struct i40e_hw *hw = &pf->hw; 3084 struct i40e_vf *vf; 3085 int abs_vf_id; 3086 int ret = 0; 3087 3088 /* validate the request */ 3089 if (vf_id >= pf->num_alloc_vfs) { 3090 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3091 ret = -EINVAL; 3092 goto error_out; 3093 } 3094 3095 vf = &pf->vf[vf_id]; 3096 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 3097 3098 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 3099 pfe.severity = PF_EVENT_SEVERITY_INFO; 3100 3101 switch (link) { 3102 case IFLA_VF_LINK_STATE_AUTO: 3103 vf->link_forced = false; 3104 pfe.event_data.link_event.link_status = 3105 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 3106 pfe.event_data.link_event.link_speed = 3107 (enum virtchnl_link_speed) 3108 pf->hw.phy.link_info.link_speed; 3109 break; 3110 case IFLA_VF_LINK_STATE_ENABLE: 3111 vf->link_forced = true; 3112 vf->link_up = true; 3113 pfe.event_data.link_event.link_status = true; 3114 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 3115 break; 3116 case IFLA_VF_LINK_STATE_DISABLE: 3117 vf->link_forced = true; 3118 vf->link_up = false; 3119 pfe.event_data.link_event.link_status = false; 3120 pfe.event_data.link_event.link_speed = 0; 3121 break; 3122 default: 3123 ret = -EINVAL; 3124 goto error_out; 3125 } 3126 /* Notify the VF of its new link state */ 3127 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 3128 0, (u8 *)&pfe, sizeof(pfe), NULL); 3129 3130 error_out: 3131 return ret; 3132 } 3133 3134 /** 3135 * i40e_ndo_set_vf_spoofchk 3136 * @netdev: network interface device structure 3137 * @vf_id: VF identifier 3138 * @enable: flag to enable or disable feature 3139 * 3140 * Enable or disable VF spoof checking 3141 **/ 3142 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 3143 { 3144 struct i40e_netdev_priv *np = netdev_priv(netdev); 3145 struct i40e_vsi *vsi = np->vsi; 3146 struct i40e_pf *pf = vsi->back; 3147 struct i40e_vsi_context ctxt; 3148 struct i40e_hw *hw = &pf->hw; 3149 struct i40e_vf *vf; 3150 int ret = 0; 3151 3152 /* validate the request */ 3153 if (vf_id >= pf->num_alloc_vfs) { 3154 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3155 ret = -EINVAL; 3156 goto out; 3157 } 3158 3159 vf = &(pf->vf[vf_id]); 3160 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3161 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3162 vf_id); 3163 ret = -EAGAIN; 3164 goto out; 3165 } 3166 3167 if (enable == vf->spoofchk) 3168 goto out; 3169 3170 vf->spoofchk = enable; 3171 memset(&ctxt, 0, sizeof(ctxt)); 3172 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 3173 ctxt.pf_num = pf->hw.pf_id; 3174 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 3175 if (enable) 3176 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 3177 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 3178 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 3179 if (ret) { 3180 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 3181 ret); 3182 ret = -EIO; 3183 } 3184 out: 3185 return ret; 3186 } 3187 3188 /** 3189 * i40e_ndo_set_vf_trust 3190 * @netdev: network interface device structure of the pf 3191 * @vf_id: VF identifier 3192 * @setting: trust setting 3193 * 3194 * Enable or disable VF trust setting 3195 **/ 3196 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 3197 { 3198 struct i40e_netdev_priv *np = netdev_priv(netdev); 3199 struct i40e_pf *pf = np->vsi->back; 3200 struct i40e_vf *vf; 3201 int ret = 0; 3202 3203 /* validate the request */ 3204 if (vf_id >= pf->num_alloc_vfs) { 3205 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3206 return -EINVAL; 3207 } 3208 3209 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3210 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 3211 return -EINVAL; 3212 } 3213 3214 vf = &pf->vf[vf_id]; 3215 3216 if (!vf) 3217 return -EINVAL; 3218 if (setting == vf->trusted) 3219 goto out; 3220 3221 vf->trusted = setting; 3222 i40e_vc_notify_vf_reset(vf); 3223 i40e_reset_vf(vf, false); 3224 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 3225 vf_id, setting ? "" : "un"); 3226 out: 3227 return ret; 3228 } 3229