1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /*********************notification routines***********************/ 30 31 /** 32 * i40e_vc_vf_broadcast 33 * @pf: pointer to the PF structure 34 * @opcode: operation code 35 * @retval: return value 36 * @msg: pointer to the msg buffer 37 * @msglen: msg length 38 * 39 * send a message to all VFs on a given PF 40 **/ 41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 42 enum virtchnl_ops v_opcode, 43 i40e_status v_retval, u8 *msg, 44 u16 msglen) 45 { 46 struct i40e_hw *hw = &pf->hw; 47 struct i40e_vf *vf = pf->vf; 48 int i; 49 50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 51 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 52 /* Not all vfs are enabled so skip the ones that are not */ 53 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 54 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 55 continue; 56 57 /* Ignore return value on purpose - a given VF may fail, but 58 * we need to keep going and send to all of them 59 */ 60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 61 msg, msglen, NULL); 62 } 63 } 64 65 /** 66 * i40e_vc_notify_vf_link_state 67 * @vf: pointer to the VF structure 68 * 69 * send a link status message to a single VF 70 **/ 71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 72 { 73 struct virtchnl_pf_event pfe; 74 struct i40e_pf *pf = vf->pf; 75 struct i40e_hw *hw = &pf->hw; 76 struct i40e_link_status *ls = &pf->hw.phy.link_info; 77 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 78 79 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 80 pfe.severity = PF_EVENT_SEVERITY_INFO; 81 if (vf->link_forced) { 82 pfe.event_data.link_event.link_status = vf->link_up; 83 pfe.event_data.link_event.link_speed = 84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 85 } else { 86 pfe.event_data.link_event.link_status = 87 ls->link_info & I40E_AQ_LINK_UP; 88 pfe.event_data.link_event.link_speed = 89 (enum virtchnl_link_speed)ls->link_speed; 90 } 91 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 92 0, (u8 *)&pfe, sizeof(pfe), NULL); 93 } 94 95 /** 96 * i40e_vc_notify_link_state 97 * @pf: pointer to the PF structure 98 * 99 * send a link status message to all VFs on a given PF 100 **/ 101 void i40e_vc_notify_link_state(struct i40e_pf *pf) 102 { 103 int i; 104 105 for (i = 0; i < pf->num_alloc_vfs; i++) 106 i40e_vc_notify_vf_link_state(&pf->vf[i]); 107 } 108 109 /** 110 * i40e_vc_notify_reset 111 * @pf: pointer to the PF structure 112 * 113 * indicate a pending reset to all VFs on a given PF 114 **/ 115 void i40e_vc_notify_reset(struct i40e_pf *pf) 116 { 117 struct virtchnl_pf_event pfe; 118 119 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 120 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 121 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 122 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 123 } 124 125 /** 126 * i40e_vc_notify_vf_reset 127 * @vf: pointer to the VF structure 128 * 129 * indicate a pending reset to the given VF 130 **/ 131 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 132 { 133 struct virtchnl_pf_event pfe; 134 int abs_vf_id; 135 136 /* validate the request */ 137 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 138 return; 139 140 /* verify if the VF is in either init or active before proceeding */ 141 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 142 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 143 return; 144 145 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 146 147 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 148 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 149 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 150 0, (u8 *)&pfe, 151 sizeof(struct virtchnl_pf_event), NULL); 152 } 153 /***********************misc routines*****************************/ 154 155 /** 156 * i40e_vc_disable_vf 157 * @vf: pointer to the VF info 158 * 159 * Disable the VF through a SW reset. 160 **/ 161 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 162 { 163 int i; 164 165 i40e_vc_notify_vf_reset(vf); 166 167 /* We want to ensure that an actual reset occurs initiated after this 168 * function was called. However, we do not want to wait forever, so 169 * we'll give a reasonable time and print a message if we failed to 170 * ensure a reset. 171 */ 172 for (i = 0; i < 20; i++) { 173 if (i40e_reset_vf(vf, false)) 174 return; 175 usleep_range(10000, 20000); 176 } 177 178 dev_warn(&vf->pf->pdev->dev, 179 "Failed to initiate reset for VF %d after 200 milliseconds\n", 180 vf->vf_id); 181 } 182 183 /** 184 * i40e_vc_isvalid_vsi_id 185 * @vf: pointer to the VF info 186 * @vsi_id: VF relative VSI id 187 * 188 * check for the valid VSI id 189 **/ 190 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 191 { 192 struct i40e_pf *pf = vf->pf; 193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 194 195 return (vsi && (vsi->vf_id == vf->vf_id)); 196 } 197 198 /** 199 * i40e_vc_isvalid_queue_id 200 * @vf: pointer to the VF info 201 * @vsi_id: vsi id 202 * @qid: vsi relative queue id 203 * 204 * check for the valid queue id 205 **/ 206 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 207 u8 qid) 208 { 209 struct i40e_pf *pf = vf->pf; 210 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 211 212 return (vsi && (qid < vsi->alloc_queue_pairs)); 213 } 214 215 /** 216 * i40e_vc_isvalid_vector_id 217 * @vf: pointer to the VF info 218 * @vector_id: VF relative vector id 219 * 220 * check for the valid vector id 221 **/ 222 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 223 { 224 struct i40e_pf *pf = vf->pf; 225 226 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 227 } 228 229 /***********************vf resource mgmt routines*****************/ 230 231 /** 232 * i40e_vc_get_pf_queue_id 233 * @vf: pointer to the VF info 234 * @vsi_id: id of VSI as provided by the FW 235 * @vsi_queue_id: vsi relative queue id 236 * 237 * return PF relative queue id 238 **/ 239 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 240 u8 vsi_queue_id) 241 { 242 struct i40e_pf *pf = vf->pf; 243 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 244 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 245 246 if (!vsi) 247 return pf_queue_id; 248 249 if (le16_to_cpu(vsi->info.mapping_flags) & 250 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 251 pf_queue_id = 252 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 253 else 254 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 255 vsi_queue_id; 256 257 return pf_queue_id; 258 } 259 260 /** 261 * i40e_config_irq_link_list 262 * @vf: pointer to the VF info 263 * @vsi_id: id of VSI as given by the FW 264 * @vecmap: irq map info 265 * 266 * configure irq link list from the map 267 **/ 268 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 269 struct virtchnl_vector_map *vecmap) 270 { 271 unsigned long linklistmap = 0, tempmap; 272 struct i40e_pf *pf = vf->pf; 273 struct i40e_hw *hw = &pf->hw; 274 u16 vsi_queue_id, pf_queue_id; 275 enum i40e_queue_type qtype; 276 u16 next_q, vector_id, size; 277 u32 reg, reg_idx; 278 u16 itr_idx = 0; 279 280 vector_id = vecmap->vector_id; 281 /* setup the head */ 282 if (0 == vector_id) 283 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 284 else 285 reg_idx = I40E_VPINT_LNKLSTN( 286 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 287 (vector_id - 1)); 288 289 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 290 /* Special case - No queues mapped on this vector */ 291 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 292 goto irq_list_done; 293 } 294 tempmap = vecmap->rxq_map; 295 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 296 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 297 vsi_queue_id)); 298 } 299 300 tempmap = vecmap->txq_map; 301 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 302 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 303 vsi_queue_id + 1)); 304 } 305 306 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 307 next_q = find_first_bit(&linklistmap, size); 308 if (unlikely(next_q == size)) 309 goto irq_list_done; 310 311 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 312 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 313 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 314 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 315 316 wr32(hw, reg_idx, reg); 317 318 while (next_q < size) { 319 switch (qtype) { 320 case I40E_QUEUE_TYPE_RX: 321 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 322 itr_idx = vecmap->rxitr_idx; 323 break; 324 case I40E_QUEUE_TYPE_TX: 325 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 326 itr_idx = vecmap->txitr_idx; 327 break; 328 default: 329 break; 330 } 331 332 next_q = find_next_bit(&linklistmap, size, next_q + 1); 333 if (next_q < size) { 334 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 335 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 336 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 337 vsi_queue_id); 338 } else { 339 pf_queue_id = I40E_QUEUE_END_OF_LIST; 340 qtype = 0; 341 } 342 343 /* format for the RQCTL & TQCTL regs is same */ 344 reg = (vector_id) | 345 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 346 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 347 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 348 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 349 wr32(hw, reg_idx, reg); 350 } 351 352 /* if the vf is running in polling mode and using interrupt zero, 353 * need to disable auto-mask on enabling zero interrupt for VFs. 354 */ 355 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 356 (vector_id == 0)) { 357 reg = rd32(hw, I40E_GLINT_CTL); 358 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 359 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 360 wr32(hw, I40E_GLINT_CTL, reg); 361 } 362 } 363 364 irq_list_done: 365 i40e_flush(hw); 366 } 367 368 /** 369 * i40e_release_iwarp_qvlist 370 * @vf: pointer to the VF. 371 * 372 **/ 373 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 374 { 375 struct i40e_pf *pf = vf->pf; 376 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 377 u32 msix_vf; 378 u32 i; 379 380 if (!vf->qvlist_info) 381 return; 382 383 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 384 for (i = 0; i < qvlist_info->num_vectors; i++) { 385 struct virtchnl_iwarp_qv_info *qv_info; 386 u32 next_q_index, next_q_type; 387 struct i40e_hw *hw = &pf->hw; 388 u32 v_idx, reg_idx, reg; 389 390 qv_info = &qvlist_info->qv_info[i]; 391 if (!qv_info) 392 continue; 393 v_idx = qv_info->v_idx; 394 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 395 /* Figure out the queue after CEQ and make that the 396 * first queue. 397 */ 398 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 399 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 400 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 401 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 402 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 403 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 404 405 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 406 reg = (next_q_index & 407 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 408 (next_q_type << 409 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 410 411 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 412 } 413 } 414 kfree(vf->qvlist_info); 415 vf->qvlist_info = NULL; 416 } 417 418 /** 419 * i40e_config_iwarp_qvlist 420 * @vf: pointer to the VF info 421 * @qvlist_info: queue and vector list 422 * 423 * Return 0 on success or < 0 on error 424 **/ 425 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 426 struct virtchnl_iwarp_qvlist_info *qvlist_info) 427 { 428 struct i40e_pf *pf = vf->pf; 429 struct i40e_hw *hw = &pf->hw; 430 struct virtchnl_iwarp_qv_info *qv_info; 431 u32 v_idx, i, reg_idx, reg; 432 u32 next_q_idx, next_q_type; 433 u32 msix_vf, size; 434 435 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 436 (sizeof(struct virtchnl_iwarp_qv_info) * 437 (qvlist_info->num_vectors - 1)); 438 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 439 if (!vf->qvlist_info) 440 return -ENOMEM; 441 442 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 443 444 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 445 for (i = 0; i < qvlist_info->num_vectors; i++) { 446 qv_info = &qvlist_info->qv_info[i]; 447 if (!qv_info) 448 continue; 449 v_idx = qv_info->v_idx; 450 451 /* Validate vector id belongs to this vf */ 452 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) 453 goto err; 454 455 vf->qvlist_info->qv_info[i] = *qv_info; 456 457 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 458 /* We might be sharing the interrupt, so get the first queue 459 * index and type, push it down the list by adding the new 460 * queue on top. Also link it with the new queue in CEQCTL. 461 */ 462 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 463 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 464 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 465 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 466 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 467 468 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 469 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 470 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 471 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 472 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 473 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 474 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 475 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 476 477 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 478 reg = (qv_info->ceq_idx & 479 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 480 (I40E_QUEUE_TYPE_PE_CEQ << 481 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 482 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 483 } 484 485 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 486 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 487 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 488 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 489 490 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 491 } 492 } 493 494 return 0; 495 err: 496 kfree(vf->qvlist_info); 497 vf->qvlist_info = NULL; 498 return -EINVAL; 499 } 500 501 /** 502 * i40e_config_vsi_tx_queue 503 * @vf: pointer to the VF info 504 * @vsi_id: id of VSI as provided by the FW 505 * @vsi_queue_id: vsi relative queue index 506 * @info: config. info 507 * 508 * configure tx queue 509 **/ 510 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 511 u16 vsi_queue_id, 512 struct virtchnl_txq_info *info) 513 { 514 struct i40e_pf *pf = vf->pf; 515 struct i40e_hw *hw = &pf->hw; 516 struct i40e_hmc_obj_txq tx_ctx; 517 struct i40e_vsi *vsi; 518 u16 pf_queue_id; 519 u32 qtx_ctl; 520 int ret = 0; 521 522 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 523 ret = -ENOENT; 524 goto error_context; 525 } 526 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 527 vsi = i40e_find_vsi_from_id(pf, vsi_id); 528 if (!vsi) { 529 ret = -ENOENT; 530 goto error_context; 531 } 532 533 /* clear the context structure first */ 534 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 535 536 /* only set the required fields */ 537 tx_ctx.base = info->dma_ring_addr / 128; 538 tx_ctx.qlen = info->ring_len; 539 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 540 tx_ctx.rdylist_act = 0; 541 tx_ctx.head_wb_ena = info->headwb_enabled; 542 tx_ctx.head_wb_addr = info->dma_headwb_addr; 543 544 /* clear the context in the HMC */ 545 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 546 if (ret) { 547 dev_err(&pf->pdev->dev, 548 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 549 pf_queue_id, ret); 550 ret = -ENOENT; 551 goto error_context; 552 } 553 554 /* set the context in the HMC */ 555 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 556 if (ret) { 557 dev_err(&pf->pdev->dev, 558 "Failed to set VF LAN Tx queue context %d error: %d\n", 559 pf_queue_id, ret); 560 ret = -ENOENT; 561 goto error_context; 562 } 563 564 /* associate this queue with the PCI VF function */ 565 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 566 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 567 & I40E_QTX_CTL_PF_INDX_MASK); 568 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 569 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 570 & I40E_QTX_CTL_VFVM_INDX_MASK); 571 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 572 i40e_flush(hw); 573 574 error_context: 575 return ret; 576 } 577 578 /** 579 * i40e_config_vsi_rx_queue 580 * @vf: pointer to the VF info 581 * @vsi_id: id of VSI as provided by the FW 582 * @vsi_queue_id: vsi relative queue index 583 * @info: config. info 584 * 585 * configure rx queue 586 **/ 587 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 588 u16 vsi_queue_id, 589 struct virtchnl_rxq_info *info) 590 { 591 struct i40e_pf *pf = vf->pf; 592 struct i40e_hw *hw = &pf->hw; 593 struct i40e_hmc_obj_rxq rx_ctx; 594 u16 pf_queue_id; 595 int ret = 0; 596 597 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 598 599 /* clear the context structure first */ 600 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 601 602 /* only set the required fields */ 603 rx_ctx.base = info->dma_ring_addr / 128; 604 rx_ctx.qlen = info->ring_len; 605 606 if (info->splithdr_enabled) { 607 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 608 I40E_RX_SPLIT_IP | 609 I40E_RX_SPLIT_TCP_UDP | 610 I40E_RX_SPLIT_SCTP; 611 /* header length validation */ 612 if (info->hdr_size > ((2 * 1024) - 64)) { 613 ret = -EINVAL; 614 goto error_param; 615 } 616 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 617 618 /* set split mode 10b */ 619 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 620 } 621 622 /* databuffer length validation */ 623 if (info->databuffer_size > ((16 * 1024) - 128)) { 624 ret = -EINVAL; 625 goto error_param; 626 } 627 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 628 629 /* max pkt. length validation */ 630 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 631 ret = -EINVAL; 632 goto error_param; 633 } 634 rx_ctx.rxmax = info->max_pkt_size; 635 636 /* enable 32bytes desc always */ 637 rx_ctx.dsize = 1; 638 639 /* default values */ 640 rx_ctx.lrxqthresh = 1; 641 rx_ctx.crcstrip = 1; 642 rx_ctx.prefena = 1; 643 rx_ctx.l2tsel = 1; 644 645 /* clear the context in the HMC */ 646 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 647 if (ret) { 648 dev_err(&pf->pdev->dev, 649 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 650 pf_queue_id, ret); 651 ret = -ENOENT; 652 goto error_param; 653 } 654 655 /* set the context in the HMC */ 656 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 657 if (ret) { 658 dev_err(&pf->pdev->dev, 659 "Failed to set VF LAN Rx queue context %d error: %d\n", 660 pf_queue_id, ret); 661 ret = -ENOENT; 662 goto error_param; 663 } 664 665 error_param: 666 return ret; 667 } 668 669 /** 670 * i40e_alloc_vsi_res 671 * @vf: pointer to the VF info 672 * @type: type of VSI to allocate 673 * 674 * alloc VF vsi context & resources 675 **/ 676 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 677 { 678 struct i40e_mac_filter *f = NULL; 679 struct i40e_pf *pf = vf->pf; 680 struct i40e_vsi *vsi; 681 int ret = 0; 682 683 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 684 685 if (!vsi) { 686 dev_err(&pf->pdev->dev, 687 "add vsi failed for VF %d, aq_err %d\n", 688 vf->vf_id, pf->hw.aq.asq_last_status); 689 ret = -ENOENT; 690 goto error_alloc_vsi_res; 691 } 692 if (type == I40E_VSI_SRIOV) { 693 u64 hena = i40e_pf_get_default_rss_hena(pf); 694 u8 broadcast[ETH_ALEN]; 695 696 vf->lan_vsi_idx = vsi->idx; 697 vf->lan_vsi_id = vsi->id; 698 /* If the port VLAN has been configured and then the 699 * VF driver was removed then the VSI port VLAN 700 * configuration was destroyed. Check if there is 701 * a port VLAN and restore the VSI configuration if 702 * needed. 703 */ 704 if (vf->port_vlan_id) 705 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 706 707 spin_lock_bh(&vsi->mac_filter_hash_lock); 708 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 709 f = i40e_add_mac_filter(vsi, 710 vf->default_lan_addr.addr); 711 if (!f) 712 dev_info(&pf->pdev->dev, 713 "Could not add MAC filter %pM for VF %d\n", 714 vf->default_lan_addr.addr, vf->vf_id); 715 } 716 eth_broadcast_addr(broadcast); 717 f = i40e_add_mac_filter(vsi, broadcast); 718 if (!f) 719 dev_info(&pf->pdev->dev, 720 "Could not allocate VF broadcast filter\n"); 721 spin_unlock_bh(&vsi->mac_filter_hash_lock); 722 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 723 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 724 } 725 726 /* program mac filter */ 727 ret = i40e_sync_vsi_filters(vsi); 728 if (ret) 729 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 730 731 /* Set VF bandwidth if specified */ 732 if (vf->tx_rate) { 733 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 734 vf->tx_rate / 50, 0, NULL); 735 if (ret) 736 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 737 vf->vf_id, ret); 738 } 739 740 error_alloc_vsi_res: 741 return ret; 742 } 743 744 /** 745 * i40e_enable_vf_mappings 746 * @vf: pointer to the VF info 747 * 748 * enable VF mappings 749 **/ 750 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 751 { 752 struct i40e_pf *pf = vf->pf; 753 struct i40e_hw *hw = &pf->hw; 754 u32 reg, total_queue_pairs = 0; 755 int j; 756 757 /* Tell the hardware we're using noncontiguous mapping. HW requires 758 * that VF queues be mapped using this method, even when they are 759 * contiguous in real life 760 */ 761 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 762 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 763 764 /* enable VF vplan_qtable mappings */ 765 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 766 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 767 768 /* map PF queues to VF queues */ 769 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 770 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 771 772 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 773 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 774 total_queue_pairs++; 775 } 776 777 /* map PF queues to VSI */ 778 for (j = 0; j < 7; j++) { 779 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 780 reg = 0x07FF07FF; /* unused */ 781 } else { 782 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 783 j * 2); 784 reg = qid; 785 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 786 (j * 2) + 1); 787 reg |= qid << 16; 788 } 789 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), 790 reg); 791 } 792 793 i40e_flush(hw); 794 } 795 796 /** 797 * i40e_disable_vf_mappings 798 * @vf: pointer to the VF info 799 * 800 * disable VF mappings 801 **/ 802 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 803 { 804 struct i40e_pf *pf = vf->pf; 805 struct i40e_hw *hw = &pf->hw; 806 int i; 807 808 /* disable qp mappings */ 809 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 810 for (i = 0; i < I40E_MAX_VSI_QP; i++) 811 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 812 I40E_QUEUE_END_OF_LIST); 813 i40e_flush(hw); 814 } 815 816 /** 817 * i40e_free_vf_res 818 * @vf: pointer to the VF info 819 * 820 * free VF resources 821 **/ 822 static void i40e_free_vf_res(struct i40e_vf *vf) 823 { 824 struct i40e_pf *pf = vf->pf; 825 struct i40e_hw *hw = &pf->hw; 826 u32 reg_idx, reg; 827 int i, msix_vf; 828 829 /* Start by disabling VF's configuration API to prevent the OS from 830 * accessing the VF's VSI after it's freed / invalidated. 831 */ 832 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 833 834 /* It's possible the VF had requeuested more queues than the default so 835 * do the accounting here when we're about to free them. 836 */ 837 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 838 pf->queues_left += vf->num_queue_pairs - 839 I40E_DEFAULT_QUEUES_PER_VF; 840 } 841 842 /* free vsi & disconnect it from the parent uplink */ 843 if (vf->lan_vsi_idx) { 844 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 845 vf->lan_vsi_idx = 0; 846 vf->lan_vsi_id = 0; 847 vf->num_mac = 0; 848 } 849 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 850 851 /* disable interrupts so the VF starts in a known state */ 852 for (i = 0; i < msix_vf; i++) { 853 /* format is same for both registers */ 854 if (0 == i) 855 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 856 else 857 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 858 (vf->vf_id)) 859 + (i - 1)); 860 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 861 i40e_flush(hw); 862 } 863 864 /* clear the irq settings */ 865 for (i = 0; i < msix_vf; i++) { 866 /* format is same for both registers */ 867 if (0 == i) 868 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 869 else 870 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 871 (vf->vf_id)) 872 + (i - 1)); 873 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 874 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 875 wr32(hw, reg_idx, reg); 876 i40e_flush(hw); 877 } 878 /* reset some of the state variables keeping track of the resources */ 879 vf->num_queue_pairs = 0; 880 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 881 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 882 } 883 884 /** 885 * i40e_alloc_vf_res 886 * @vf: pointer to the VF info 887 * 888 * allocate VF resources 889 **/ 890 static int i40e_alloc_vf_res(struct i40e_vf *vf) 891 { 892 struct i40e_pf *pf = vf->pf; 893 int total_queue_pairs = 0; 894 int ret; 895 896 if (vf->num_req_queues && 897 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 898 pf->num_vf_qps = vf->num_req_queues; 899 else 900 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 901 902 /* allocate hw vsi context & associated resources */ 903 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 904 if (ret) 905 goto error_alloc; 906 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 907 908 /* We account for each VF to get a default number of queue pairs. If 909 * the VF has now requested more, we need to account for that to make 910 * certain we never request more queues than we actually have left in 911 * HW. 912 */ 913 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 914 pf->queues_left -= 915 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 916 917 if (vf->trusted) 918 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 919 else 920 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 921 922 /* store the total qps number for the runtime 923 * VF req validation 924 */ 925 vf->num_queue_pairs = total_queue_pairs; 926 927 /* VF is now completely initialized */ 928 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 929 930 error_alloc: 931 if (ret) 932 i40e_free_vf_res(vf); 933 934 return ret; 935 } 936 937 #define VF_DEVICE_STATUS 0xAA 938 #define VF_TRANS_PENDING_MASK 0x20 939 /** 940 * i40e_quiesce_vf_pci 941 * @vf: pointer to the VF structure 942 * 943 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 944 * if the transactions never clear. 945 **/ 946 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 947 { 948 struct i40e_pf *pf = vf->pf; 949 struct i40e_hw *hw = &pf->hw; 950 int vf_abs_id, i; 951 u32 reg; 952 953 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 954 955 wr32(hw, I40E_PF_PCI_CIAA, 956 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 957 for (i = 0; i < 100; i++) { 958 reg = rd32(hw, I40E_PF_PCI_CIAD); 959 if ((reg & VF_TRANS_PENDING_MASK) == 0) 960 return 0; 961 udelay(1); 962 } 963 return -EIO; 964 } 965 966 /** 967 * i40e_trigger_vf_reset 968 * @vf: pointer to the VF structure 969 * @flr: VFLR was issued or not 970 * 971 * Trigger hardware to start a reset for a particular VF. Expects the caller 972 * to wait the proper amount of time to allow hardware to reset the VF before 973 * it cleans up and restores VF functionality. 974 **/ 975 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 976 { 977 struct i40e_pf *pf = vf->pf; 978 struct i40e_hw *hw = &pf->hw; 979 u32 reg, reg_idx, bit_idx; 980 981 /* warn the VF */ 982 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 983 984 /* Disable VF's configuration API during reset. The flag is re-enabled 985 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 986 * It's normally disabled in i40e_free_vf_res(), but it's safer 987 * to do it earlier to give some time to finish to any VF config 988 * functions that may still be running at this point. 989 */ 990 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 991 992 /* In the case of a VFLR, the HW has already reset the VF and we 993 * just need to clean up, so don't hit the VFRTRIG register. 994 */ 995 if (!flr) { 996 /* reset VF using VPGEN_VFRTRIG reg */ 997 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 998 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 999 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1000 i40e_flush(hw); 1001 } 1002 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1003 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1004 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1005 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1006 i40e_flush(hw); 1007 1008 if (i40e_quiesce_vf_pci(vf)) 1009 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1010 vf->vf_id); 1011 } 1012 1013 /** 1014 * i40e_cleanup_reset_vf 1015 * @vf: pointer to the VF structure 1016 * 1017 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1018 * have verified whether the reset is finished properly, and ensure the 1019 * minimum amount of wait time has passed. 1020 **/ 1021 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1022 { 1023 struct i40e_pf *pf = vf->pf; 1024 struct i40e_hw *hw = &pf->hw; 1025 u32 reg; 1026 1027 /* free VF resources to begin resetting the VSI state */ 1028 i40e_free_vf_res(vf); 1029 1030 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1031 * By doing this we allow HW to access VF memory at any point. If we 1032 * did it any sooner, HW could access memory while it was being freed 1033 * in i40e_free_vf_res(), causing an IOMMU fault. 1034 * 1035 * On the other hand, this needs to be done ASAP, because the VF driver 1036 * is waiting for this to happen and may report a timeout. It's 1037 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1038 * it. 1039 */ 1040 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1041 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1042 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1043 1044 /* reallocate VF resources to finish resetting the VSI state */ 1045 if (!i40e_alloc_vf_res(vf)) { 1046 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1047 i40e_enable_vf_mappings(vf); 1048 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1049 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1050 /* Do not notify the client during VF init */ 1051 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1052 &vf->vf_states)) 1053 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1054 vf->num_vlan = 0; 1055 } 1056 1057 /* Tell the VF driver the reset is done. This needs to be done only 1058 * after VF has been fully initialized, because the VF driver may 1059 * request resources immediately after setting this flag. 1060 */ 1061 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1062 } 1063 1064 /** 1065 * i40e_reset_vf 1066 * @vf: pointer to the VF structure 1067 * @flr: VFLR was issued or not 1068 * 1069 * Returns true if the VF is reset, false otherwise. 1070 **/ 1071 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1072 { 1073 struct i40e_pf *pf = vf->pf; 1074 struct i40e_hw *hw = &pf->hw; 1075 bool rsd = false; 1076 u32 reg; 1077 int i; 1078 1079 /* If the VFs have been disabled, this means something else is 1080 * resetting the VF, so we shouldn't continue. 1081 */ 1082 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1083 return false; 1084 1085 i40e_trigger_vf_reset(vf, flr); 1086 1087 /* poll VPGEN_VFRSTAT reg to make sure 1088 * that reset is complete 1089 */ 1090 for (i = 0; i < 10; i++) { 1091 /* VF reset requires driver to first reset the VF and then 1092 * poll the status register to make sure that the reset 1093 * completed successfully. Due to internal HW FIFO flushes, 1094 * we must wait 10ms before the register will be valid. 1095 */ 1096 usleep_range(10000, 20000); 1097 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1098 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1099 rsd = true; 1100 break; 1101 } 1102 } 1103 1104 if (flr) 1105 usleep_range(10000, 20000); 1106 1107 if (!rsd) 1108 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1109 vf->vf_id); 1110 usleep_range(10000, 20000); 1111 1112 /* On initial reset, we don't have any queues to disable */ 1113 if (vf->lan_vsi_idx != 0) 1114 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1115 1116 i40e_cleanup_reset_vf(vf); 1117 1118 i40e_flush(hw); 1119 clear_bit(__I40E_VF_DISABLE, pf->state); 1120 1121 return true; 1122 } 1123 1124 /** 1125 * i40e_reset_all_vfs 1126 * @pf: pointer to the PF structure 1127 * @flr: VFLR was issued or not 1128 * 1129 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1130 * VF, then do all the waiting in one chunk, and finally finish restoring each 1131 * VF after the wait. This is useful during PF routines which need to reset 1132 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1133 * 1134 * Returns true if any VFs were reset, and false otherwise. 1135 **/ 1136 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1137 { 1138 struct i40e_hw *hw = &pf->hw; 1139 struct i40e_vf *vf; 1140 int i, v; 1141 u32 reg; 1142 1143 /* If we don't have any VFs, then there is nothing to reset */ 1144 if (!pf->num_alloc_vfs) 1145 return false; 1146 1147 /* If VFs have been disabled, there is no need to reset */ 1148 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1149 return false; 1150 1151 /* Begin reset on all VFs at once */ 1152 for (v = 0; v < pf->num_alloc_vfs; v++) 1153 i40e_trigger_vf_reset(&pf->vf[v], flr); 1154 1155 /* HW requires some time to make sure it can flush the FIFO for a VF 1156 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1157 * sequence to make sure that it has completed. We'll keep track of 1158 * the VFs using a simple iterator that increments once that VF has 1159 * finished resetting. 1160 */ 1161 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1162 usleep_range(10000, 20000); 1163 1164 /* Check each VF in sequence, beginning with the VF to fail 1165 * the previous check. 1166 */ 1167 while (v < pf->num_alloc_vfs) { 1168 vf = &pf->vf[v]; 1169 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1170 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1171 break; 1172 1173 /* If the current VF has finished resetting, move on 1174 * to the next VF in sequence. 1175 */ 1176 v++; 1177 } 1178 } 1179 1180 if (flr) 1181 usleep_range(10000, 20000); 1182 1183 /* Display a warning if at least one VF didn't manage to reset in 1184 * time, but continue on with the operation. 1185 */ 1186 if (v < pf->num_alloc_vfs) 1187 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1188 pf->vf[v].vf_id); 1189 usleep_range(10000, 20000); 1190 1191 /* Begin disabling all the rings associated with VFs, but do not wait 1192 * between each VF. 1193 */ 1194 for (v = 0; v < pf->num_alloc_vfs; v++) { 1195 /* On initial reset, we don't have any queues to disable */ 1196 if (pf->vf[v].lan_vsi_idx == 0) 1197 continue; 1198 1199 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1200 } 1201 1202 /* Now that we've notified HW to disable all of the VF rings, wait 1203 * until they finish. 1204 */ 1205 for (v = 0; v < pf->num_alloc_vfs; v++) { 1206 /* On initial reset, we don't have any queues to disable */ 1207 if (pf->vf[v].lan_vsi_idx == 0) 1208 continue; 1209 1210 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1211 } 1212 1213 /* Hw may need up to 50ms to finish disabling the RX queues. We 1214 * minimize the wait by delaying only once for all VFs. 1215 */ 1216 mdelay(50); 1217 1218 /* Finish the reset on each VF */ 1219 for (v = 0; v < pf->num_alloc_vfs; v++) 1220 i40e_cleanup_reset_vf(&pf->vf[v]); 1221 1222 i40e_flush(hw); 1223 clear_bit(__I40E_VF_DISABLE, pf->state); 1224 1225 return true; 1226 } 1227 1228 /** 1229 * i40e_free_vfs 1230 * @pf: pointer to the PF structure 1231 * 1232 * free VF resources 1233 **/ 1234 void i40e_free_vfs(struct i40e_pf *pf) 1235 { 1236 struct i40e_hw *hw = &pf->hw; 1237 u32 reg_idx, bit_idx; 1238 int i, tmp, vf_id; 1239 1240 if (!pf->vf) 1241 return; 1242 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1243 usleep_range(1000, 2000); 1244 1245 i40e_notify_client_of_vf_enable(pf, 0); 1246 1247 /* Amortize wait time by stopping all VFs at the same time */ 1248 for (i = 0; i < pf->num_alloc_vfs; i++) { 1249 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1250 continue; 1251 1252 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1253 } 1254 1255 for (i = 0; i < pf->num_alloc_vfs; i++) { 1256 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1257 continue; 1258 1259 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1260 } 1261 1262 /* Disable IOV before freeing resources. This lets any VF drivers 1263 * running in the host get themselves cleaned up before we yank 1264 * the carpet out from underneath their feet. 1265 */ 1266 if (!pci_vfs_assigned(pf->pdev)) 1267 pci_disable_sriov(pf->pdev); 1268 else 1269 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1270 1271 /* free up VF resources */ 1272 tmp = pf->num_alloc_vfs; 1273 pf->num_alloc_vfs = 0; 1274 for (i = 0; i < tmp; i++) { 1275 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1276 i40e_free_vf_res(&pf->vf[i]); 1277 /* disable qp mappings */ 1278 i40e_disable_vf_mappings(&pf->vf[i]); 1279 } 1280 1281 kfree(pf->vf); 1282 pf->vf = NULL; 1283 1284 /* This check is for when the driver is unloaded while VFs are 1285 * assigned. Setting the number of VFs to 0 through sysfs is caught 1286 * before this function ever gets called. 1287 */ 1288 if (!pci_vfs_assigned(pf->pdev)) { 1289 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1290 * work correctly when SR-IOV gets re-enabled. 1291 */ 1292 for (vf_id = 0; vf_id < tmp; vf_id++) { 1293 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1294 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1295 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1296 } 1297 } 1298 clear_bit(__I40E_VF_DISABLE, pf->state); 1299 } 1300 1301 #ifdef CONFIG_PCI_IOV 1302 /** 1303 * i40e_alloc_vfs 1304 * @pf: pointer to the PF structure 1305 * @num_alloc_vfs: number of VFs to allocate 1306 * 1307 * allocate VF resources 1308 **/ 1309 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1310 { 1311 struct i40e_vf *vfs; 1312 int i, ret = 0; 1313 1314 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1315 i40e_irq_dynamic_disable_icr0(pf); 1316 1317 /* Check to see if we're just allocating resources for extant VFs */ 1318 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1319 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1320 if (ret) { 1321 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1322 pf->num_alloc_vfs = 0; 1323 goto err_iov; 1324 } 1325 } 1326 /* allocate memory */ 1327 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1328 if (!vfs) { 1329 ret = -ENOMEM; 1330 goto err_alloc; 1331 } 1332 pf->vf = vfs; 1333 1334 /* apply default profile */ 1335 for (i = 0; i < num_alloc_vfs; i++) { 1336 vfs[i].pf = pf; 1337 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1338 vfs[i].vf_id = i; 1339 1340 /* assign default capabilities */ 1341 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1342 vfs[i].spoofchk = true; 1343 1344 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1345 1346 } 1347 pf->num_alloc_vfs = num_alloc_vfs; 1348 1349 /* VF resources get allocated during reset */ 1350 i40e_reset_all_vfs(pf, false); 1351 1352 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1353 1354 err_alloc: 1355 if (ret) 1356 i40e_free_vfs(pf); 1357 err_iov: 1358 /* Re-enable interrupt 0. */ 1359 i40e_irq_dynamic_enable_icr0(pf); 1360 return ret; 1361 } 1362 1363 #endif 1364 /** 1365 * i40e_pci_sriov_enable 1366 * @pdev: pointer to a pci_dev structure 1367 * @num_vfs: number of VFs to allocate 1368 * 1369 * Enable or change the number of VFs 1370 **/ 1371 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1372 { 1373 #ifdef CONFIG_PCI_IOV 1374 struct i40e_pf *pf = pci_get_drvdata(pdev); 1375 int pre_existing_vfs = pci_num_vf(pdev); 1376 int err = 0; 1377 1378 if (test_bit(__I40E_TESTING, pf->state)) { 1379 dev_warn(&pdev->dev, 1380 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1381 err = -EPERM; 1382 goto err_out; 1383 } 1384 1385 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1386 i40e_free_vfs(pf); 1387 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1388 goto out; 1389 1390 if (num_vfs > pf->num_req_vfs) { 1391 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1392 num_vfs, pf->num_req_vfs); 1393 err = -EPERM; 1394 goto err_out; 1395 } 1396 1397 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1398 err = i40e_alloc_vfs(pf, num_vfs); 1399 if (err) { 1400 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1401 goto err_out; 1402 } 1403 1404 out: 1405 return num_vfs; 1406 1407 err_out: 1408 return err; 1409 #endif 1410 return 0; 1411 } 1412 1413 /** 1414 * i40e_pci_sriov_configure 1415 * @pdev: pointer to a pci_dev structure 1416 * @num_vfs: number of VFs to allocate 1417 * 1418 * Enable or change the number of VFs. Called when the user updates the number 1419 * of VFs in sysfs. 1420 **/ 1421 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1422 { 1423 struct i40e_pf *pf = pci_get_drvdata(pdev); 1424 1425 if (num_vfs) { 1426 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1427 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1428 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1429 } 1430 return i40e_pci_sriov_enable(pdev, num_vfs); 1431 } 1432 1433 if (!pci_vfs_assigned(pf->pdev)) { 1434 i40e_free_vfs(pf); 1435 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1436 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1437 } else { 1438 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1439 return -EINVAL; 1440 } 1441 return 0; 1442 } 1443 1444 /***********************virtual channel routines******************/ 1445 1446 /** 1447 * i40e_vc_send_msg_to_vf 1448 * @vf: pointer to the VF info 1449 * @v_opcode: virtual channel opcode 1450 * @v_retval: virtual channel return value 1451 * @msg: pointer to the msg buffer 1452 * @msglen: msg length 1453 * 1454 * send msg to VF 1455 **/ 1456 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1457 u32 v_retval, u8 *msg, u16 msglen) 1458 { 1459 struct i40e_pf *pf; 1460 struct i40e_hw *hw; 1461 int abs_vf_id; 1462 i40e_status aq_ret; 1463 1464 /* validate the request */ 1465 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1466 return -EINVAL; 1467 1468 pf = vf->pf; 1469 hw = &pf->hw; 1470 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1471 1472 /* single place to detect unsuccessful return values */ 1473 if (v_retval) { 1474 vf->num_invalid_msgs++; 1475 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1476 vf->vf_id, v_opcode, v_retval); 1477 if (vf->num_invalid_msgs > 1478 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1479 dev_err(&pf->pdev->dev, 1480 "Number of invalid messages exceeded for VF %d\n", 1481 vf->vf_id); 1482 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1483 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1484 } 1485 } else { 1486 vf->num_valid_msgs++; 1487 /* reset the invalid counter, if a valid message is received. */ 1488 vf->num_invalid_msgs = 0; 1489 } 1490 1491 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1492 msg, msglen, NULL); 1493 if (aq_ret) { 1494 dev_info(&pf->pdev->dev, 1495 "Unable to send the message to VF %d aq_err %d\n", 1496 vf->vf_id, pf->hw.aq.asq_last_status); 1497 return -EIO; 1498 } 1499 1500 return 0; 1501 } 1502 1503 /** 1504 * i40e_vc_send_resp_to_vf 1505 * @vf: pointer to the VF info 1506 * @opcode: operation code 1507 * @retval: return value 1508 * 1509 * send resp msg to VF 1510 **/ 1511 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1512 enum virtchnl_ops opcode, 1513 i40e_status retval) 1514 { 1515 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1516 } 1517 1518 /** 1519 * i40e_vc_get_version_msg 1520 * @vf: pointer to the VF info 1521 * 1522 * called from the VF to request the API version used by the PF 1523 **/ 1524 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1525 { 1526 struct virtchnl_version_info info = { 1527 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1528 }; 1529 1530 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1531 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1532 if (VF_IS_V10(&vf->vf_ver)) 1533 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1534 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1535 I40E_SUCCESS, (u8 *)&info, 1536 sizeof(struct virtchnl_version_info)); 1537 } 1538 1539 /** 1540 * i40e_vc_get_vf_resources_msg 1541 * @vf: pointer to the VF info 1542 * @msg: pointer to the msg buffer 1543 * @msglen: msg length 1544 * 1545 * called from the VF to request its resources 1546 **/ 1547 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1548 { 1549 struct virtchnl_vf_resource *vfres = NULL; 1550 struct i40e_pf *pf = vf->pf; 1551 i40e_status aq_ret = 0; 1552 struct i40e_vsi *vsi; 1553 int num_vsis = 1; 1554 int len = 0; 1555 int ret; 1556 1557 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1558 aq_ret = I40E_ERR_PARAM; 1559 goto err; 1560 } 1561 1562 len = (sizeof(struct virtchnl_vf_resource) + 1563 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1564 1565 vfres = kzalloc(len, GFP_KERNEL); 1566 if (!vfres) { 1567 aq_ret = I40E_ERR_NO_MEMORY; 1568 len = 0; 1569 goto err; 1570 } 1571 if (VF_IS_V11(&vf->vf_ver)) 1572 vf->driver_caps = *(u32 *)msg; 1573 else 1574 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1575 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1576 VIRTCHNL_VF_OFFLOAD_VLAN; 1577 1578 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1579 vsi = pf->vsi[vf->lan_vsi_idx]; 1580 if (!vsi->info.pvid) 1581 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1582 1583 if (i40e_vf_client_capable(pf, vf->vf_id) && 1584 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1585 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1586 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1587 } else { 1588 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1589 } 1590 1591 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1592 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1593 } else { 1594 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1595 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1596 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1597 else 1598 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1599 } 1600 1601 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1602 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1603 vfres->vf_cap_flags |= 1604 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1605 } 1606 1607 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1608 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1609 1610 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1611 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1612 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1613 1614 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1615 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1616 dev_err(&pf->pdev->dev, 1617 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1618 vf->vf_id); 1619 aq_ret = I40E_ERR_PARAM; 1620 goto err; 1621 } 1622 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1623 } 1624 1625 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1626 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1627 vfres->vf_cap_flags |= 1628 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1629 } 1630 1631 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1632 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1633 1634 vfres->num_vsis = num_vsis; 1635 vfres->num_queue_pairs = vf->num_queue_pairs; 1636 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1637 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1638 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1639 1640 if (vf->lan_vsi_idx) { 1641 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1642 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1643 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1644 /* VFs only use TC 0 */ 1645 vfres->vsi_res[0].qset_handle 1646 = le16_to_cpu(vsi->info.qs_handle[0]); 1647 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1648 vf->default_lan_addr.addr); 1649 } 1650 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1651 1652 err: 1653 /* send the response back to the VF */ 1654 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1655 aq_ret, (u8 *)vfres, len); 1656 1657 kfree(vfres); 1658 return ret; 1659 } 1660 1661 /** 1662 * i40e_vc_reset_vf_msg 1663 * @vf: pointer to the VF info 1664 * @msg: pointer to the msg buffer 1665 * @msglen: msg length 1666 * 1667 * called from the VF to reset itself, 1668 * unlike other virtchnl messages, PF driver 1669 * doesn't send the response back to the VF 1670 **/ 1671 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1672 { 1673 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1674 i40e_reset_vf(vf, false); 1675 } 1676 1677 /** 1678 * i40e_getnum_vf_vsi_vlan_filters 1679 * @vsi: pointer to the vsi 1680 * 1681 * called to get the number of VLANs offloaded on this VF 1682 **/ 1683 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1684 { 1685 struct i40e_mac_filter *f; 1686 int num_vlans = 0, bkt; 1687 1688 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1689 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1690 num_vlans++; 1691 } 1692 1693 return num_vlans; 1694 } 1695 1696 /** 1697 * i40e_vc_config_promiscuous_mode_msg 1698 * @vf: pointer to the VF info 1699 * @msg: pointer to the msg buffer 1700 * @msglen: msg length 1701 * 1702 * called from the VF to configure the promiscuous mode of 1703 * VF vsis 1704 **/ 1705 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1706 u8 *msg, u16 msglen) 1707 { 1708 struct virtchnl_promisc_info *info = 1709 (struct virtchnl_promisc_info *)msg; 1710 struct i40e_pf *pf = vf->pf; 1711 struct i40e_hw *hw = &pf->hw; 1712 struct i40e_mac_filter *f; 1713 i40e_status aq_ret = 0; 1714 bool allmulti = false; 1715 struct i40e_vsi *vsi; 1716 bool alluni = false; 1717 int aq_err = 0; 1718 int bkt; 1719 1720 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); 1721 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 1722 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1723 !vsi) { 1724 aq_ret = I40E_ERR_PARAM; 1725 goto error_param; 1726 } 1727 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 1728 dev_err(&pf->pdev->dev, 1729 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1730 vf->vf_id); 1731 /* Lie to the VF on purpose. */ 1732 aq_ret = 0; 1733 goto error_param; 1734 } 1735 /* Multicast promiscuous handling*/ 1736 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 1737 allmulti = true; 1738 1739 if (vf->port_vlan_id) { 1740 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1741 allmulti, 1742 vf->port_vlan_id, 1743 NULL); 1744 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1745 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1746 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1747 continue; 1748 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1749 vsi->seid, 1750 allmulti, 1751 f->vlan, 1752 NULL); 1753 aq_err = pf->hw.aq.asq_last_status; 1754 if (aq_ret) { 1755 dev_err(&pf->pdev->dev, 1756 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1757 f->vlan, 1758 i40e_stat_str(&pf->hw, aq_ret), 1759 i40e_aq_str(&pf->hw, aq_err)); 1760 break; 1761 } 1762 } 1763 } else { 1764 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1765 allmulti, NULL); 1766 aq_err = pf->hw.aq.asq_last_status; 1767 if (aq_ret) { 1768 dev_err(&pf->pdev->dev, 1769 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1770 vf->vf_id, 1771 i40e_stat_str(&pf->hw, aq_ret), 1772 i40e_aq_str(&pf->hw, aq_err)); 1773 goto error_param; 1774 } 1775 } 1776 1777 if (!aq_ret) { 1778 dev_info(&pf->pdev->dev, 1779 "VF %d successfully set multicast promiscuous mode\n", 1780 vf->vf_id); 1781 if (allmulti) 1782 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1783 else 1784 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1785 } 1786 1787 if (info->flags & FLAG_VF_UNICAST_PROMISC) 1788 alluni = true; 1789 if (vf->port_vlan_id) { 1790 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1791 alluni, 1792 vf->port_vlan_id, 1793 NULL); 1794 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1795 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1796 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1797 continue; 1798 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1799 vsi->seid, 1800 alluni, 1801 f->vlan, 1802 NULL); 1803 aq_err = pf->hw.aq.asq_last_status; 1804 if (aq_ret) 1805 dev_err(&pf->pdev->dev, 1806 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1807 f->vlan, 1808 i40e_stat_str(&pf->hw, aq_ret), 1809 i40e_aq_str(&pf->hw, aq_err)); 1810 } 1811 } else { 1812 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, 1813 alluni, NULL, 1814 true); 1815 aq_err = pf->hw.aq.asq_last_status; 1816 if (aq_ret) { 1817 dev_err(&pf->pdev->dev, 1818 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", 1819 vf->vf_id, info->flags, 1820 i40e_stat_str(&pf->hw, aq_ret), 1821 i40e_aq_str(&pf->hw, aq_err)); 1822 goto error_param; 1823 } 1824 } 1825 1826 if (!aq_ret) { 1827 dev_info(&pf->pdev->dev, 1828 "VF %d successfully set unicast promiscuous mode\n", 1829 vf->vf_id); 1830 if (alluni) 1831 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1832 else 1833 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1834 } 1835 1836 error_param: 1837 /* send the response to the VF */ 1838 return i40e_vc_send_resp_to_vf(vf, 1839 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1840 aq_ret); 1841 } 1842 1843 /** 1844 * i40e_vc_config_queues_msg 1845 * @vf: pointer to the VF info 1846 * @msg: pointer to the msg buffer 1847 * @msglen: msg length 1848 * 1849 * called from the VF to configure the rx/tx 1850 * queues 1851 **/ 1852 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1853 { 1854 struct virtchnl_vsi_queue_config_info *qci = 1855 (struct virtchnl_vsi_queue_config_info *)msg; 1856 struct virtchnl_queue_pair_info *qpi; 1857 struct i40e_pf *pf = vf->pf; 1858 u16 vsi_id, vsi_queue_id; 1859 i40e_status aq_ret = 0; 1860 int i; 1861 1862 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1863 aq_ret = I40E_ERR_PARAM; 1864 goto error_param; 1865 } 1866 1867 vsi_id = qci->vsi_id; 1868 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1869 aq_ret = I40E_ERR_PARAM; 1870 goto error_param; 1871 } 1872 for (i = 0; i < qci->num_queue_pairs; i++) { 1873 qpi = &qci->qpair[i]; 1874 vsi_queue_id = qpi->txq.queue_id; 1875 if ((qpi->txq.vsi_id != vsi_id) || 1876 (qpi->rxq.vsi_id != vsi_id) || 1877 (qpi->rxq.queue_id != vsi_queue_id) || 1878 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1879 aq_ret = I40E_ERR_PARAM; 1880 goto error_param; 1881 } 1882 1883 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1884 &qpi->rxq) || 1885 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1886 &qpi->txq)) { 1887 aq_ret = I40E_ERR_PARAM; 1888 goto error_param; 1889 } 1890 } 1891 /* set vsi num_queue_pairs in use to num configured by VF */ 1892 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1893 1894 error_param: 1895 /* send the response to the VF */ 1896 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1897 aq_ret); 1898 } 1899 1900 /** 1901 * i40e_vc_config_irq_map_msg 1902 * @vf: pointer to the VF info 1903 * @msg: pointer to the msg buffer 1904 * @msglen: msg length 1905 * 1906 * called from the VF to configure the irq to 1907 * queue map 1908 **/ 1909 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1910 { 1911 struct virtchnl_irq_map_info *irqmap_info = 1912 (struct virtchnl_irq_map_info *)msg; 1913 struct virtchnl_vector_map *map; 1914 u16 vsi_id, vsi_queue_id, vector_id; 1915 i40e_status aq_ret = 0; 1916 unsigned long tempmap; 1917 int i; 1918 1919 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1920 aq_ret = I40E_ERR_PARAM; 1921 goto error_param; 1922 } 1923 1924 for (i = 0; i < irqmap_info->num_vectors; i++) { 1925 map = &irqmap_info->vecmap[i]; 1926 1927 vector_id = map->vector_id; 1928 vsi_id = map->vsi_id; 1929 /* validate msg params */ 1930 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1931 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1932 aq_ret = I40E_ERR_PARAM; 1933 goto error_param; 1934 } 1935 1936 /* lookout for the invalid queue index */ 1937 tempmap = map->rxq_map; 1938 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1939 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1940 vsi_queue_id)) { 1941 aq_ret = I40E_ERR_PARAM; 1942 goto error_param; 1943 } 1944 } 1945 1946 tempmap = map->txq_map; 1947 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1948 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1949 vsi_queue_id)) { 1950 aq_ret = I40E_ERR_PARAM; 1951 goto error_param; 1952 } 1953 } 1954 1955 i40e_config_irq_link_list(vf, vsi_id, map); 1956 } 1957 error_param: 1958 /* send the response to the VF */ 1959 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 1960 aq_ret); 1961 } 1962 1963 /** 1964 * i40e_vc_enable_queues_msg 1965 * @vf: pointer to the VF info 1966 * @msg: pointer to the msg buffer 1967 * @msglen: msg length 1968 * 1969 * called from the VF to enable all or specific queue(s) 1970 **/ 1971 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1972 { 1973 struct virtchnl_queue_select *vqs = 1974 (struct virtchnl_queue_select *)msg; 1975 struct i40e_pf *pf = vf->pf; 1976 u16 vsi_id = vqs->vsi_id; 1977 i40e_status aq_ret = 0; 1978 1979 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1980 aq_ret = I40E_ERR_PARAM; 1981 goto error_param; 1982 } 1983 1984 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1985 aq_ret = I40E_ERR_PARAM; 1986 goto error_param; 1987 } 1988 1989 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1990 aq_ret = I40E_ERR_PARAM; 1991 goto error_param; 1992 } 1993 1994 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) 1995 aq_ret = I40E_ERR_TIMEOUT; 1996 error_param: 1997 /* send the response to the VF */ 1998 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 1999 aq_ret); 2000 } 2001 2002 /** 2003 * i40e_vc_disable_queues_msg 2004 * @vf: pointer to the VF info 2005 * @msg: pointer to the msg buffer 2006 * @msglen: msg length 2007 * 2008 * called from the VF to disable all or specific 2009 * queue(s) 2010 **/ 2011 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2012 { 2013 struct virtchnl_queue_select *vqs = 2014 (struct virtchnl_queue_select *)msg; 2015 struct i40e_pf *pf = vf->pf; 2016 i40e_status aq_ret = 0; 2017 2018 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2019 aq_ret = I40E_ERR_PARAM; 2020 goto error_param; 2021 } 2022 2023 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2024 aq_ret = I40E_ERR_PARAM; 2025 goto error_param; 2026 } 2027 2028 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2029 aq_ret = I40E_ERR_PARAM; 2030 goto error_param; 2031 } 2032 2033 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 2034 2035 error_param: 2036 /* send the response to the VF */ 2037 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2038 aq_ret); 2039 } 2040 2041 /** 2042 * i40e_vc_request_queues_msg 2043 * @vf: pointer to the VF info 2044 * @msg: pointer to the msg buffer 2045 * @msglen: msg length 2046 * 2047 * VFs get a default number of queues but can use this message to request a 2048 * different number. If the request is successful, PF will reset the VF and 2049 * return 0. If unsuccessful, PF will send message informing VF of number of 2050 * available queues and return result of sending VF a message. 2051 **/ 2052 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) 2053 { 2054 struct virtchnl_vf_res_request *vfres = 2055 (struct virtchnl_vf_res_request *)msg; 2056 int req_pairs = vfres->num_queue_pairs; 2057 int cur_pairs = vf->num_queue_pairs; 2058 struct i40e_pf *pf = vf->pf; 2059 2060 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2061 return -EINVAL; 2062 2063 if (req_pairs <= 0) { 2064 dev_err(&pf->pdev->dev, 2065 "VF %d tried to request %d queues. Ignoring.\n", 2066 vf->vf_id, req_pairs); 2067 } else if (req_pairs > I40E_MAX_VF_QUEUES) { 2068 dev_err(&pf->pdev->dev, 2069 "VF %d tried to request more than %d queues.\n", 2070 vf->vf_id, 2071 I40E_MAX_VF_QUEUES); 2072 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2073 } else if (req_pairs - cur_pairs > pf->queues_left) { 2074 dev_warn(&pf->pdev->dev, 2075 "VF %d requested %d more queues, but only %d left.\n", 2076 vf->vf_id, 2077 req_pairs - cur_pairs, 2078 pf->queues_left); 2079 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2080 } else { 2081 /* successful request */ 2082 vf->num_req_queues = req_pairs; 2083 i40e_vc_notify_vf_reset(vf); 2084 i40e_reset_vf(vf, false); 2085 return 0; 2086 } 2087 2088 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2089 (u8 *)vfres, sizeof(*vfres)); 2090 } 2091 2092 /** 2093 * i40e_vc_get_stats_msg 2094 * @vf: pointer to the VF info 2095 * @msg: pointer to the msg buffer 2096 * @msglen: msg length 2097 * 2098 * called from the VF to get vsi stats 2099 **/ 2100 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2101 { 2102 struct virtchnl_queue_select *vqs = 2103 (struct virtchnl_queue_select *)msg; 2104 struct i40e_pf *pf = vf->pf; 2105 struct i40e_eth_stats stats; 2106 i40e_status aq_ret = 0; 2107 struct i40e_vsi *vsi; 2108 2109 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2110 2111 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2112 aq_ret = I40E_ERR_PARAM; 2113 goto error_param; 2114 } 2115 2116 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2117 aq_ret = I40E_ERR_PARAM; 2118 goto error_param; 2119 } 2120 2121 vsi = pf->vsi[vf->lan_vsi_idx]; 2122 if (!vsi) { 2123 aq_ret = I40E_ERR_PARAM; 2124 goto error_param; 2125 } 2126 i40e_update_eth_stats(vsi); 2127 stats = vsi->eth_stats; 2128 2129 error_param: 2130 /* send the response back to the VF */ 2131 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2132 (u8 *)&stats, sizeof(stats)); 2133 } 2134 2135 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */ 2136 #define I40E_VC_MAX_MAC_ADDR_PER_VF 12 2137 #define I40E_VC_MAX_VLAN_PER_VF 8 2138 2139 /** 2140 * i40e_check_vf_permission 2141 * @vf: pointer to the VF info 2142 * @macaddr: pointer to the MAC Address being checked 2143 * 2144 * Check if the VF has permission to add or delete unicast MAC address 2145 * filters and return error code -EPERM if not. Then check if the 2146 * address filter requested is broadcast or zero and if so return 2147 * an invalid MAC address error code. 2148 **/ 2149 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 2150 { 2151 struct i40e_pf *pf = vf->pf; 2152 int ret = 0; 2153 2154 if (is_broadcast_ether_addr(macaddr) || 2155 is_zero_ether_addr(macaddr)) { 2156 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 2157 ret = I40E_ERR_INVALID_MAC_ADDR; 2158 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 2159 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2160 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 2161 /* If the host VMM administrator has set the VF MAC address 2162 * administratively via the ndo_set_vf_mac command then deny 2163 * permission to the VF to add or delete unicast MAC addresses. 2164 * Unless the VF is privileged and then it can do whatever. 2165 * The VF may request to set the MAC address filter already 2166 * assigned to it so do not return an error in that case. 2167 */ 2168 dev_err(&pf->pdev->dev, 2169 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); 2170 ret = -EPERM; 2171 } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) && 2172 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2173 dev_err(&pf->pdev->dev, 2174 "VF is not trusted, switch the VF to trusted to add more functionality\n"); 2175 ret = -EPERM; 2176 } 2177 return ret; 2178 } 2179 2180 /** 2181 * i40e_vc_add_mac_addr_msg 2182 * @vf: pointer to the VF info 2183 * @msg: pointer to the msg buffer 2184 * @msglen: msg length 2185 * 2186 * add guest mac address filter 2187 **/ 2188 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2189 { 2190 struct virtchnl_ether_addr_list *al = 2191 (struct virtchnl_ether_addr_list *)msg; 2192 struct i40e_pf *pf = vf->pf; 2193 struct i40e_vsi *vsi = NULL; 2194 u16 vsi_id = al->vsi_id; 2195 i40e_status ret = 0; 2196 int i; 2197 2198 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2199 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2200 ret = I40E_ERR_PARAM; 2201 goto error_param; 2202 } 2203 2204 for (i = 0; i < al->num_elements; i++) { 2205 ret = i40e_check_vf_permission(vf, al->list[i].addr); 2206 if (ret) 2207 goto error_param; 2208 } 2209 vsi = pf->vsi[vf->lan_vsi_idx]; 2210 2211 /* Lock once, because all function inside for loop accesses VSI's 2212 * MAC filter list which needs to be protected using same lock. 2213 */ 2214 spin_lock_bh(&vsi->mac_filter_hash_lock); 2215 2216 /* add new addresses to the list */ 2217 for (i = 0; i < al->num_elements; i++) { 2218 struct i40e_mac_filter *f; 2219 2220 f = i40e_find_mac(vsi, al->list[i].addr); 2221 if (!f) { 2222 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2223 2224 if (!f) { 2225 dev_err(&pf->pdev->dev, 2226 "Unable to add MAC filter %pM for VF %d\n", 2227 al->list[i].addr, vf->vf_id); 2228 ret = I40E_ERR_PARAM; 2229 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2230 goto error_param; 2231 } else { 2232 vf->num_mac++; 2233 } 2234 } 2235 } 2236 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2237 2238 /* program the updated filter list */ 2239 ret = i40e_sync_vsi_filters(vsi); 2240 if (ret) 2241 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2242 vf->vf_id, ret); 2243 2244 error_param: 2245 /* send the response to the VF */ 2246 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2247 ret); 2248 } 2249 2250 /** 2251 * i40e_vc_del_mac_addr_msg 2252 * @vf: pointer to the VF info 2253 * @msg: pointer to the msg buffer 2254 * @msglen: msg length 2255 * 2256 * remove guest mac address filter 2257 **/ 2258 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2259 { 2260 struct virtchnl_ether_addr_list *al = 2261 (struct virtchnl_ether_addr_list *)msg; 2262 struct i40e_pf *pf = vf->pf; 2263 struct i40e_vsi *vsi = NULL; 2264 u16 vsi_id = al->vsi_id; 2265 i40e_status ret = 0; 2266 int i; 2267 2268 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2269 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2270 ret = I40E_ERR_PARAM; 2271 goto error_param; 2272 } 2273 2274 for (i = 0; i < al->num_elements; i++) { 2275 if (is_broadcast_ether_addr(al->list[i].addr) || 2276 is_zero_ether_addr(al->list[i].addr)) { 2277 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2278 al->list[i].addr, vf->vf_id); 2279 ret = I40E_ERR_INVALID_MAC_ADDR; 2280 goto error_param; 2281 } 2282 } 2283 vsi = pf->vsi[vf->lan_vsi_idx]; 2284 2285 spin_lock_bh(&vsi->mac_filter_hash_lock); 2286 /* delete addresses from the list */ 2287 for (i = 0; i < al->num_elements; i++) 2288 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2289 ret = I40E_ERR_INVALID_MAC_ADDR; 2290 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2291 goto error_param; 2292 } else { 2293 vf->num_mac--; 2294 } 2295 2296 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2297 2298 /* program the updated filter list */ 2299 ret = i40e_sync_vsi_filters(vsi); 2300 if (ret) 2301 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2302 vf->vf_id, ret); 2303 2304 error_param: 2305 /* send the response to the VF */ 2306 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2307 ret); 2308 } 2309 2310 /** 2311 * i40e_vc_add_vlan_msg 2312 * @vf: pointer to the VF info 2313 * @msg: pointer to the msg buffer 2314 * @msglen: msg length 2315 * 2316 * program guest vlan id 2317 **/ 2318 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2319 { 2320 struct virtchnl_vlan_filter_list *vfl = 2321 (struct virtchnl_vlan_filter_list *)msg; 2322 struct i40e_pf *pf = vf->pf; 2323 struct i40e_vsi *vsi = NULL; 2324 u16 vsi_id = vfl->vsi_id; 2325 i40e_status aq_ret = 0; 2326 int i; 2327 2328 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2329 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2330 dev_err(&pf->pdev->dev, 2331 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2332 goto error_param; 2333 } 2334 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2335 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2336 aq_ret = I40E_ERR_PARAM; 2337 goto error_param; 2338 } 2339 2340 for (i = 0; i < vfl->num_elements; i++) { 2341 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2342 aq_ret = I40E_ERR_PARAM; 2343 dev_err(&pf->pdev->dev, 2344 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2345 goto error_param; 2346 } 2347 } 2348 vsi = pf->vsi[vf->lan_vsi_idx]; 2349 if (vsi->info.pvid) { 2350 aq_ret = I40E_ERR_PARAM; 2351 goto error_param; 2352 } 2353 2354 i40e_vlan_stripping_enable(vsi); 2355 for (i = 0; i < vfl->num_elements; i++) { 2356 /* add new VLAN filter */ 2357 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2358 if (!ret) 2359 vf->num_vlan++; 2360 2361 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2362 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2363 true, 2364 vfl->vlan_id[i], 2365 NULL); 2366 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2367 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2368 true, 2369 vfl->vlan_id[i], 2370 NULL); 2371 2372 if (ret) 2373 dev_err(&pf->pdev->dev, 2374 "Unable to add VLAN filter %d for VF %d, error %d\n", 2375 vfl->vlan_id[i], vf->vf_id, ret); 2376 } 2377 2378 error_param: 2379 /* send the response to the VF */ 2380 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2381 } 2382 2383 /** 2384 * i40e_vc_remove_vlan_msg 2385 * @vf: pointer to the VF info 2386 * @msg: pointer to the msg buffer 2387 * @msglen: msg length 2388 * 2389 * remove programmed guest vlan id 2390 **/ 2391 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2392 { 2393 struct virtchnl_vlan_filter_list *vfl = 2394 (struct virtchnl_vlan_filter_list *)msg; 2395 struct i40e_pf *pf = vf->pf; 2396 struct i40e_vsi *vsi = NULL; 2397 u16 vsi_id = vfl->vsi_id; 2398 i40e_status aq_ret = 0; 2399 int i; 2400 2401 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2402 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2403 aq_ret = I40E_ERR_PARAM; 2404 goto error_param; 2405 } 2406 2407 for (i = 0; i < vfl->num_elements; i++) { 2408 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2409 aq_ret = I40E_ERR_PARAM; 2410 goto error_param; 2411 } 2412 } 2413 2414 vsi = pf->vsi[vf->lan_vsi_idx]; 2415 if (vsi->info.pvid) { 2416 aq_ret = I40E_ERR_PARAM; 2417 goto error_param; 2418 } 2419 2420 for (i = 0; i < vfl->num_elements; i++) { 2421 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2422 vf->num_vlan--; 2423 2424 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2425 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2426 false, 2427 vfl->vlan_id[i], 2428 NULL); 2429 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2430 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2431 false, 2432 vfl->vlan_id[i], 2433 NULL); 2434 } 2435 2436 error_param: 2437 /* send the response to the VF */ 2438 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2439 } 2440 2441 /** 2442 * i40e_vc_iwarp_msg 2443 * @vf: pointer to the VF info 2444 * @msg: pointer to the msg buffer 2445 * @msglen: msg length 2446 * 2447 * called from the VF for the iwarp msgs 2448 **/ 2449 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2450 { 2451 struct i40e_pf *pf = vf->pf; 2452 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2453 i40e_status aq_ret = 0; 2454 2455 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2456 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2457 aq_ret = I40E_ERR_PARAM; 2458 goto error_param; 2459 } 2460 2461 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2462 msg, msglen); 2463 2464 error_param: 2465 /* send the response to the VF */ 2466 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2467 aq_ret); 2468 } 2469 2470 /** 2471 * i40e_vc_iwarp_qvmap_msg 2472 * @vf: pointer to the VF info 2473 * @msg: pointer to the msg buffer 2474 * @msglen: msg length 2475 * @config: config qvmap or release it 2476 * 2477 * called from the VF for the iwarp msgs 2478 **/ 2479 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, 2480 bool config) 2481 { 2482 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2483 (struct virtchnl_iwarp_qvlist_info *)msg; 2484 i40e_status aq_ret = 0; 2485 2486 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2487 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2488 aq_ret = I40E_ERR_PARAM; 2489 goto error_param; 2490 } 2491 2492 if (config) { 2493 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2494 aq_ret = I40E_ERR_PARAM; 2495 } else { 2496 i40e_release_iwarp_qvlist(vf); 2497 } 2498 2499 error_param: 2500 /* send the response to the VF */ 2501 return i40e_vc_send_resp_to_vf(vf, 2502 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2503 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2504 aq_ret); 2505 } 2506 2507 /** 2508 * i40e_vc_config_rss_key 2509 * @vf: pointer to the VF info 2510 * @msg: pointer to the msg buffer 2511 * @msglen: msg length 2512 * 2513 * Configure the VF's RSS key 2514 **/ 2515 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) 2516 { 2517 struct virtchnl_rss_key *vrk = 2518 (struct virtchnl_rss_key *)msg; 2519 struct i40e_pf *pf = vf->pf; 2520 struct i40e_vsi *vsi = NULL; 2521 u16 vsi_id = vrk->vsi_id; 2522 i40e_status aq_ret = 0; 2523 2524 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2525 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2526 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2527 aq_ret = I40E_ERR_PARAM; 2528 goto err; 2529 } 2530 2531 vsi = pf->vsi[vf->lan_vsi_idx]; 2532 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2533 err: 2534 /* send the response to the VF */ 2535 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2536 aq_ret); 2537 } 2538 2539 /** 2540 * i40e_vc_config_rss_lut 2541 * @vf: pointer to the VF info 2542 * @msg: pointer to the msg buffer 2543 * @msglen: msg length 2544 * 2545 * Configure the VF's RSS LUT 2546 **/ 2547 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) 2548 { 2549 struct virtchnl_rss_lut *vrl = 2550 (struct virtchnl_rss_lut *)msg; 2551 struct i40e_pf *pf = vf->pf; 2552 struct i40e_vsi *vsi = NULL; 2553 u16 vsi_id = vrl->vsi_id; 2554 i40e_status aq_ret = 0; 2555 2556 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2557 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2558 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2559 aq_ret = I40E_ERR_PARAM; 2560 goto err; 2561 } 2562 2563 vsi = pf->vsi[vf->lan_vsi_idx]; 2564 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2565 /* send the response to the VF */ 2566 err: 2567 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2568 aq_ret); 2569 } 2570 2571 /** 2572 * i40e_vc_get_rss_hena 2573 * @vf: pointer to the VF info 2574 * @msg: pointer to the msg buffer 2575 * @msglen: msg length 2576 * 2577 * Return the RSS HENA bits allowed by the hardware 2578 **/ 2579 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2580 { 2581 struct virtchnl_rss_hena *vrh = NULL; 2582 struct i40e_pf *pf = vf->pf; 2583 i40e_status aq_ret = 0; 2584 int len = 0; 2585 2586 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2587 aq_ret = I40E_ERR_PARAM; 2588 goto err; 2589 } 2590 len = sizeof(struct virtchnl_rss_hena); 2591 2592 vrh = kzalloc(len, GFP_KERNEL); 2593 if (!vrh) { 2594 aq_ret = I40E_ERR_NO_MEMORY; 2595 len = 0; 2596 goto err; 2597 } 2598 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2599 err: 2600 /* send the response back to the VF */ 2601 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 2602 aq_ret, (u8 *)vrh, len); 2603 kfree(vrh); 2604 return aq_ret; 2605 } 2606 2607 /** 2608 * i40e_vc_set_rss_hena 2609 * @vf: pointer to the VF info 2610 * @msg: pointer to the msg buffer 2611 * @msglen: msg length 2612 * 2613 * Set the RSS HENA bits for the VF 2614 **/ 2615 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2616 { 2617 struct virtchnl_rss_hena *vrh = 2618 (struct virtchnl_rss_hena *)msg; 2619 struct i40e_pf *pf = vf->pf; 2620 struct i40e_hw *hw = &pf->hw; 2621 i40e_status aq_ret = 0; 2622 2623 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2624 aq_ret = I40E_ERR_PARAM; 2625 goto err; 2626 } 2627 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 2628 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 2629 (u32)(vrh->hena >> 32)); 2630 2631 /* send the response to the VF */ 2632 err: 2633 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 2634 } 2635 2636 /** 2637 * i40e_vc_enable_vlan_stripping 2638 * @vf: pointer to the VF info 2639 * @msg: pointer to the msg buffer 2640 * @msglen: msg length 2641 * 2642 * Enable vlan header stripping for the VF 2643 **/ 2644 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, 2645 u16 msglen) 2646 { 2647 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 2648 i40e_status aq_ret = 0; 2649 2650 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2651 aq_ret = I40E_ERR_PARAM; 2652 goto err; 2653 } 2654 2655 i40e_vlan_stripping_enable(vsi); 2656 2657 /* send the response to the VF */ 2658 err: 2659 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 2660 aq_ret); 2661 } 2662 2663 /** 2664 * i40e_vc_disable_vlan_stripping 2665 * @vf: pointer to the VF info 2666 * @msg: pointer to the msg buffer 2667 * @msglen: msg length 2668 * 2669 * Disable vlan header stripping for the VF 2670 **/ 2671 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, 2672 u16 msglen) 2673 { 2674 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 2675 i40e_status aq_ret = 0; 2676 2677 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2678 aq_ret = I40E_ERR_PARAM; 2679 goto err; 2680 } 2681 2682 i40e_vlan_stripping_disable(vsi); 2683 2684 /* send the response to the VF */ 2685 err: 2686 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 2687 aq_ret); 2688 } 2689 2690 /** 2691 * i40e_vc_process_vf_msg 2692 * @pf: pointer to the PF structure 2693 * @vf_id: source VF id 2694 * @msg: pointer to the msg buffer 2695 * @msglen: msg length 2696 * @msghndl: msg handle 2697 * 2698 * called from the common aeq/arq handler to 2699 * process request from VF 2700 **/ 2701 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 2702 u32 v_retval, u8 *msg, u16 msglen) 2703 { 2704 struct i40e_hw *hw = &pf->hw; 2705 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 2706 struct i40e_vf *vf; 2707 int ret; 2708 2709 pf->vf_aq_requests++; 2710 if (local_vf_id >= pf->num_alloc_vfs) 2711 return -EINVAL; 2712 vf = &(pf->vf[local_vf_id]); 2713 2714 /* Check if VF is disabled. */ 2715 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 2716 return I40E_ERR_PARAM; 2717 2718 /* perform basic checks on the msg */ 2719 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 2720 2721 /* perform additional checks specific to this driver */ 2722 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { 2723 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; 2724 2725 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) 2726 ret = -EINVAL; 2727 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { 2728 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 2729 2730 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) 2731 ret = -EINVAL; 2732 } 2733 2734 if (ret) { 2735 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 2736 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 2737 local_vf_id, v_opcode, msglen); 2738 switch (ret) { 2739 case VIRTCHNL_ERR_PARAM: 2740 return -EPERM; 2741 default: 2742 return -EINVAL; 2743 } 2744 } 2745 2746 switch (v_opcode) { 2747 case VIRTCHNL_OP_VERSION: 2748 ret = i40e_vc_get_version_msg(vf, msg); 2749 break; 2750 case VIRTCHNL_OP_GET_VF_RESOURCES: 2751 ret = i40e_vc_get_vf_resources_msg(vf, msg); 2752 break; 2753 case VIRTCHNL_OP_RESET_VF: 2754 i40e_vc_reset_vf_msg(vf); 2755 ret = 0; 2756 break; 2757 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 2758 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 2759 break; 2760 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 2761 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 2762 break; 2763 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2764 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 2765 break; 2766 case VIRTCHNL_OP_ENABLE_QUEUES: 2767 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 2768 i40e_vc_notify_vf_link_state(vf); 2769 break; 2770 case VIRTCHNL_OP_DISABLE_QUEUES: 2771 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 2772 break; 2773 case VIRTCHNL_OP_ADD_ETH_ADDR: 2774 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 2775 break; 2776 case VIRTCHNL_OP_DEL_ETH_ADDR: 2777 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 2778 break; 2779 case VIRTCHNL_OP_ADD_VLAN: 2780 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 2781 break; 2782 case VIRTCHNL_OP_DEL_VLAN: 2783 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 2784 break; 2785 case VIRTCHNL_OP_GET_STATS: 2786 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 2787 break; 2788 case VIRTCHNL_OP_IWARP: 2789 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 2790 break; 2791 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2792 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); 2793 break; 2794 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 2795 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); 2796 break; 2797 case VIRTCHNL_OP_CONFIG_RSS_KEY: 2798 ret = i40e_vc_config_rss_key(vf, msg, msglen); 2799 break; 2800 case VIRTCHNL_OP_CONFIG_RSS_LUT: 2801 ret = i40e_vc_config_rss_lut(vf, msg, msglen); 2802 break; 2803 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 2804 ret = i40e_vc_get_rss_hena(vf, msg, msglen); 2805 break; 2806 case VIRTCHNL_OP_SET_RSS_HENA: 2807 ret = i40e_vc_set_rss_hena(vf, msg, msglen); 2808 break; 2809 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2810 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); 2811 break; 2812 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2813 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); 2814 break; 2815 case VIRTCHNL_OP_REQUEST_QUEUES: 2816 ret = i40e_vc_request_queues_msg(vf, msg, msglen); 2817 break; 2818 2819 case VIRTCHNL_OP_UNKNOWN: 2820 default: 2821 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 2822 v_opcode, local_vf_id); 2823 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 2824 I40E_ERR_NOT_IMPLEMENTED); 2825 break; 2826 } 2827 2828 return ret; 2829 } 2830 2831 /** 2832 * i40e_vc_process_vflr_event 2833 * @pf: pointer to the PF structure 2834 * 2835 * called from the vlfr irq handler to 2836 * free up VF resources and state variables 2837 **/ 2838 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 2839 { 2840 struct i40e_hw *hw = &pf->hw; 2841 u32 reg, reg_idx, bit_idx; 2842 struct i40e_vf *vf; 2843 int vf_id; 2844 2845 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 2846 return 0; 2847 2848 /* Re-enable the VFLR interrupt cause here, before looking for which 2849 * VF got reset. Otherwise, if another VF gets a reset while the 2850 * first one is being processed, that interrupt will be lost, and 2851 * that VF will be stuck in reset forever. 2852 */ 2853 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 2854 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 2855 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 2856 i40e_flush(hw); 2857 2858 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 2859 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 2860 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 2861 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 2862 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 2863 vf = &pf->vf[vf_id]; 2864 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2865 if (reg & BIT(bit_idx)) 2866 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 2867 i40e_reset_vf(vf, true); 2868 } 2869 2870 return 0; 2871 } 2872 2873 /** 2874 * i40e_ndo_set_vf_mac 2875 * @netdev: network interface device structure 2876 * @vf_id: VF identifier 2877 * @mac: mac address 2878 * 2879 * program VF mac address 2880 **/ 2881 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2882 { 2883 struct i40e_netdev_priv *np = netdev_priv(netdev); 2884 struct i40e_vsi *vsi = np->vsi; 2885 struct i40e_pf *pf = vsi->back; 2886 struct i40e_mac_filter *f; 2887 struct i40e_vf *vf; 2888 int ret = 0; 2889 struct hlist_node *h; 2890 int bkt; 2891 2892 /* validate the request */ 2893 if (vf_id >= pf->num_alloc_vfs) { 2894 dev_err(&pf->pdev->dev, 2895 "Invalid VF Identifier %d\n", vf_id); 2896 ret = -EINVAL; 2897 goto error_param; 2898 } 2899 2900 vf = &(pf->vf[vf_id]); 2901 vsi = pf->vsi[vf->lan_vsi_idx]; 2902 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 2903 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2904 vf_id); 2905 ret = -EAGAIN; 2906 goto error_param; 2907 } 2908 2909 if (is_multicast_ether_addr(mac)) { 2910 dev_err(&pf->pdev->dev, 2911 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 2912 ret = -EINVAL; 2913 goto error_param; 2914 } 2915 2916 /* Lock once because below invoked function add/del_filter requires 2917 * mac_filter_hash_lock to be held 2918 */ 2919 spin_lock_bh(&vsi->mac_filter_hash_lock); 2920 2921 /* delete the temporary mac address */ 2922 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 2923 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 2924 2925 /* Delete all the filters for this VSI - we're going to kill it 2926 * anyway. 2927 */ 2928 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 2929 __i40e_del_filter(vsi, f); 2930 2931 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2932 2933 /* program mac filter */ 2934 if (i40e_sync_vsi_filters(vsi)) { 2935 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2936 ret = -EIO; 2937 goto error_param; 2938 } 2939 ether_addr_copy(vf->default_lan_addr.addr, mac); 2940 2941 if (is_zero_ether_addr(mac)) { 2942 vf->pf_set_mac = false; 2943 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 2944 } else { 2945 vf->pf_set_mac = true; 2946 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 2947 mac, vf_id); 2948 } 2949 2950 /* Force the VF driver stop so it has to reload with new MAC address */ 2951 i40e_vc_disable_vf(vf); 2952 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2953 2954 error_param: 2955 return ret; 2956 } 2957 2958 /** 2959 * i40e_vsi_has_vlans - True if VSI has configured VLANs 2960 * @vsi: pointer to the vsi 2961 * 2962 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 2963 * we have no configured VLANs. Do not call while holding the 2964 * mac_filter_hash_lock. 2965 */ 2966 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 2967 { 2968 bool have_vlans; 2969 2970 /* If we have a port VLAN, then the VSI cannot have any VLANs 2971 * configured, as all MAC/VLAN filters will be assigned to the PVID. 2972 */ 2973 if (vsi->info.pvid) 2974 return false; 2975 2976 /* Since we don't have a PVID, we know that if the device is in VLAN 2977 * mode it must be because of a VLAN filter configured on this VSI. 2978 */ 2979 spin_lock_bh(&vsi->mac_filter_hash_lock); 2980 have_vlans = i40e_is_vsi_in_vlan(vsi); 2981 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2982 2983 return have_vlans; 2984 } 2985 2986 /** 2987 * i40e_ndo_set_vf_port_vlan 2988 * @netdev: network interface device structure 2989 * @vf_id: VF identifier 2990 * @vlan_id: mac address 2991 * @qos: priority setting 2992 * @vlan_proto: vlan protocol 2993 * 2994 * program VF vlan id and/or qos 2995 **/ 2996 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 2997 u16 vlan_id, u8 qos, __be16 vlan_proto) 2998 { 2999 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 3000 struct i40e_netdev_priv *np = netdev_priv(netdev); 3001 struct i40e_pf *pf = np->vsi->back; 3002 struct i40e_vsi *vsi; 3003 struct i40e_vf *vf; 3004 int ret = 0; 3005 3006 /* validate the request */ 3007 if (vf_id >= pf->num_alloc_vfs) { 3008 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3009 ret = -EINVAL; 3010 goto error_pvid; 3011 } 3012 3013 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 3014 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 3015 ret = -EINVAL; 3016 goto error_pvid; 3017 } 3018 3019 if (vlan_proto != htons(ETH_P_8021Q)) { 3020 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 3021 ret = -EPROTONOSUPPORT; 3022 goto error_pvid; 3023 } 3024 3025 vf = &(pf->vf[vf_id]); 3026 vsi = pf->vsi[vf->lan_vsi_idx]; 3027 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3028 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3029 vf_id); 3030 ret = -EAGAIN; 3031 goto error_pvid; 3032 } 3033 3034 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 3035 /* duplicate request, so just return success */ 3036 goto error_pvid; 3037 3038 if (i40e_vsi_has_vlans(vsi)) { 3039 dev_err(&pf->pdev->dev, 3040 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 3041 vf_id); 3042 /* Administrator Error - knock the VF offline until he does 3043 * the right thing by reconfiguring his network correctly 3044 * and then reloading the VF driver. 3045 */ 3046 i40e_vc_disable_vf(vf); 3047 /* During reset the VF got a new VSI, so refresh the pointer. */ 3048 vsi = pf->vsi[vf->lan_vsi_idx]; 3049 } 3050 3051 /* Locked once because multiple functions below iterate list */ 3052 spin_lock_bh(&vsi->mac_filter_hash_lock); 3053 3054 /* Check for condition where there was already a port VLAN ID 3055 * filter set and now it is being deleted by setting it to zero. 3056 * Additionally check for the condition where there was a port 3057 * VLAN but now there is a new and different port VLAN being set. 3058 * Before deleting all the old VLAN filters we must add new ones 3059 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 3060 * MAC addresses deleted. 3061 */ 3062 if ((!(vlan_id || qos) || 3063 vlanprio != le16_to_cpu(vsi->info.pvid)) && 3064 vsi->info.pvid) { 3065 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 3066 if (ret) { 3067 dev_info(&vsi->back->pdev->dev, 3068 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 3069 vsi->back->hw.aq.asq_last_status); 3070 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3071 goto error_pvid; 3072 } 3073 } 3074 3075 if (vsi->info.pvid) { 3076 /* remove all filters on the old VLAN */ 3077 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 3078 VLAN_VID_MASK)); 3079 } 3080 3081 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3082 if (vlan_id || qos) 3083 ret = i40e_vsi_add_pvid(vsi, vlanprio); 3084 else 3085 i40e_vsi_remove_pvid(vsi); 3086 spin_lock_bh(&vsi->mac_filter_hash_lock); 3087 3088 if (vlan_id) { 3089 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 3090 vlan_id, qos, vf_id); 3091 3092 /* add new VLAN filter for each MAC */ 3093 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 3094 if (ret) { 3095 dev_info(&vsi->back->pdev->dev, 3096 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 3097 vsi->back->hw.aq.asq_last_status); 3098 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3099 goto error_pvid; 3100 } 3101 3102 /* remove the previously added non-VLAN MAC filters */ 3103 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 3104 } 3105 3106 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3107 3108 /* Schedule the worker thread to take care of applying changes */ 3109 i40e_service_event_schedule(vsi->back); 3110 3111 if (ret) { 3112 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 3113 goto error_pvid; 3114 } 3115 3116 /* The Port VLAN needs to be saved across resets the same as the 3117 * default LAN MAC address. 3118 */ 3119 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 3120 ret = 0; 3121 3122 error_pvid: 3123 return ret; 3124 } 3125 3126 /** 3127 * i40e_ndo_set_vf_bw 3128 * @netdev: network interface device structure 3129 * @vf_id: VF identifier 3130 * @tx_rate: Tx rate 3131 * 3132 * configure VF Tx rate 3133 **/ 3134 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 3135 int max_tx_rate) 3136 { 3137 struct i40e_netdev_priv *np = netdev_priv(netdev); 3138 struct i40e_pf *pf = np->vsi->back; 3139 struct i40e_vsi *vsi; 3140 struct i40e_vf *vf; 3141 int ret = 0; 3142 3143 /* validate the request */ 3144 if (vf_id >= pf->num_alloc_vfs) { 3145 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 3146 ret = -EINVAL; 3147 goto error; 3148 } 3149 3150 if (min_tx_rate) { 3151 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 3152 min_tx_rate, vf_id); 3153 return -EINVAL; 3154 } 3155 3156 vf = &(pf->vf[vf_id]); 3157 vsi = pf->vsi[vf->lan_vsi_idx]; 3158 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3159 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3160 vf_id); 3161 ret = -EAGAIN; 3162 goto error; 3163 } 3164 3165 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 3166 if (ret) 3167 goto error; 3168 3169 vf->tx_rate = max_tx_rate; 3170 error: 3171 return ret; 3172 } 3173 3174 /** 3175 * i40e_ndo_get_vf_config 3176 * @netdev: network interface device structure 3177 * @vf_id: VF identifier 3178 * @ivi: VF configuration structure 3179 * 3180 * return VF configuration 3181 **/ 3182 int i40e_ndo_get_vf_config(struct net_device *netdev, 3183 int vf_id, struct ifla_vf_info *ivi) 3184 { 3185 struct i40e_netdev_priv *np = netdev_priv(netdev); 3186 struct i40e_vsi *vsi = np->vsi; 3187 struct i40e_pf *pf = vsi->back; 3188 struct i40e_vf *vf; 3189 int ret = 0; 3190 3191 /* validate the request */ 3192 if (vf_id >= pf->num_alloc_vfs) { 3193 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3194 ret = -EINVAL; 3195 goto error_param; 3196 } 3197 3198 vf = &(pf->vf[vf_id]); 3199 /* first vsi is always the LAN vsi */ 3200 vsi = pf->vsi[vf->lan_vsi_idx]; 3201 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3202 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3203 vf_id); 3204 ret = -EAGAIN; 3205 goto error_param; 3206 } 3207 3208 ivi->vf = vf_id; 3209 3210 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 3211 3212 ivi->max_tx_rate = vf->tx_rate; 3213 ivi->min_tx_rate = 0; 3214 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 3215 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 3216 I40E_VLAN_PRIORITY_SHIFT; 3217 if (vf->link_forced == false) 3218 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 3219 else if (vf->link_up == true) 3220 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 3221 else 3222 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 3223 ivi->spoofchk = vf->spoofchk; 3224 ivi->trusted = vf->trusted; 3225 ret = 0; 3226 3227 error_param: 3228 return ret; 3229 } 3230 3231 /** 3232 * i40e_ndo_set_vf_link_state 3233 * @netdev: network interface device structure 3234 * @vf_id: VF identifier 3235 * @link: required link state 3236 * 3237 * Set the link state of a specified VF, regardless of physical link state 3238 **/ 3239 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 3240 { 3241 struct i40e_netdev_priv *np = netdev_priv(netdev); 3242 struct i40e_pf *pf = np->vsi->back; 3243 struct virtchnl_pf_event pfe; 3244 struct i40e_hw *hw = &pf->hw; 3245 struct i40e_vf *vf; 3246 int abs_vf_id; 3247 int ret = 0; 3248 3249 /* validate the request */ 3250 if (vf_id >= pf->num_alloc_vfs) { 3251 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3252 ret = -EINVAL; 3253 goto error_out; 3254 } 3255 3256 vf = &pf->vf[vf_id]; 3257 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 3258 3259 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 3260 pfe.severity = PF_EVENT_SEVERITY_INFO; 3261 3262 switch (link) { 3263 case IFLA_VF_LINK_STATE_AUTO: 3264 vf->link_forced = false; 3265 pfe.event_data.link_event.link_status = 3266 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 3267 pfe.event_data.link_event.link_speed = 3268 (enum virtchnl_link_speed) 3269 pf->hw.phy.link_info.link_speed; 3270 break; 3271 case IFLA_VF_LINK_STATE_ENABLE: 3272 vf->link_forced = true; 3273 vf->link_up = true; 3274 pfe.event_data.link_event.link_status = true; 3275 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 3276 break; 3277 case IFLA_VF_LINK_STATE_DISABLE: 3278 vf->link_forced = true; 3279 vf->link_up = false; 3280 pfe.event_data.link_event.link_status = false; 3281 pfe.event_data.link_event.link_speed = 0; 3282 break; 3283 default: 3284 ret = -EINVAL; 3285 goto error_out; 3286 } 3287 /* Notify the VF of its new link state */ 3288 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 3289 0, (u8 *)&pfe, sizeof(pfe), NULL); 3290 3291 error_out: 3292 return ret; 3293 } 3294 3295 /** 3296 * i40e_ndo_set_vf_spoofchk 3297 * @netdev: network interface device structure 3298 * @vf_id: VF identifier 3299 * @enable: flag to enable or disable feature 3300 * 3301 * Enable or disable VF spoof checking 3302 **/ 3303 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 3304 { 3305 struct i40e_netdev_priv *np = netdev_priv(netdev); 3306 struct i40e_vsi *vsi = np->vsi; 3307 struct i40e_pf *pf = vsi->back; 3308 struct i40e_vsi_context ctxt; 3309 struct i40e_hw *hw = &pf->hw; 3310 struct i40e_vf *vf; 3311 int ret = 0; 3312 3313 /* validate the request */ 3314 if (vf_id >= pf->num_alloc_vfs) { 3315 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3316 ret = -EINVAL; 3317 goto out; 3318 } 3319 3320 vf = &(pf->vf[vf_id]); 3321 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3322 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3323 vf_id); 3324 ret = -EAGAIN; 3325 goto out; 3326 } 3327 3328 if (enable == vf->spoofchk) 3329 goto out; 3330 3331 vf->spoofchk = enable; 3332 memset(&ctxt, 0, sizeof(ctxt)); 3333 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 3334 ctxt.pf_num = pf->hw.pf_id; 3335 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 3336 if (enable) 3337 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 3338 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 3339 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 3340 if (ret) { 3341 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 3342 ret); 3343 ret = -EIO; 3344 } 3345 out: 3346 return ret; 3347 } 3348 3349 /** 3350 * i40e_ndo_set_vf_trust 3351 * @netdev: network interface device structure of the pf 3352 * @vf_id: VF identifier 3353 * @setting: trust setting 3354 * 3355 * Enable or disable VF trust setting 3356 **/ 3357 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 3358 { 3359 struct i40e_netdev_priv *np = netdev_priv(netdev); 3360 struct i40e_pf *pf = np->vsi->back; 3361 struct i40e_vf *vf; 3362 int ret = 0; 3363 3364 /* validate the request */ 3365 if (vf_id >= pf->num_alloc_vfs) { 3366 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3367 return -EINVAL; 3368 } 3369 3370 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3371 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 3372 return -EINVAL; 3373 } 3374 3375 vf = &pf->vf[vf_id]; 3376 3377 if (setting == vf->trusted) 3378 goto out; 3379 3380 vf->trusted = setting; 3381 i40e_vc_disable_vf(vf); 3382 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 3383 vf_id, setting ? "" : "un"); 3384 out: 3385 return ret; 3386 } 3387