1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_notify_vf_link_state 44 * @vf: pointer to the VF structure 45 * 46 * send a link status message to a single VF 47 **/ 48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 49 { 50 struct virtchnl_pf_event pfe; 51 struct i40e_pf *pf = vf->pf; 52 struct i40e_hw *hw = &pf->hw; 53 struct i40e_link_status *ls = &pf->hw.phy.link_info; 54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 55 56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 57 pfe.severity = PF_EVENT_SEVERITY_INFO; 58 if (vf->link_forced) { 59 pfe.event_data.link_event.link_status = vf->link_up; 60 pfe.event_data.link_event.link_speed = 61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); 62 } else { 63 pfe.event_data.link_event.link_status = 64 ls->link_info & I40E_AQ_LINK_UP; 65 pfe.event_data.link_event.link_speed = 66 i40e_virtchnl_link_speed(ls->link_speed); 67 } 68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 69 0, (u8 *)&pfe, sizeof(pfe), NULL); 70 } 71 72 /** 73 * i40e_vc_notify_link_state 74 * @pf: pointer to the PF structure 75 * 76 * send a link status message to all VFs on a given PF 77 **/ 78 void i40e_vc_notify_link_state(struct i40e_pf *pf) 79 { 80 int i; 81 82 for (i = 0; i < pf->num_alloc_vfs; i++) 83 i40e_vc_notify_vf_link_state(&pf->vf[i]); 84 } 85 86 /** 87 * i40e_vc_notify_reset 88 * @pf: pointer to the PF structure 89 * 90 * indicate a pending reset to all VFs on a given PF 91 **/ 92 void i40e_vc_notify_reset(struct i40e_pf *pf) 93 { 94 struct virtchnl_pf_event pfe; 95 96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 100 } 101 102 /** 103 * i40e_vc_notify_vf_reset 104 * @vf: pointer to the VF structure 105 * 106 * indicate a pending reset to the given VF 107 **/ 108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 int abs_vf_id; 112 113 /* validate the request */ 114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 115 return; 116 117 /* verify if the VF is in either init or active before proceeding */ 118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 120 return; 121 122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 123 124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 127 0, (u8 *)&pfe, 128 sizeof(struct virtchnl_pf_event), NULL); 129 } 130 /***********************misc routines*****************************/ 131 132 /** 133 * i40e_vc_disable_vf 134 * @vf: pointer to the VF info 135 * 136 * Disable the VF through a SW reset. 137 **/ 138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 139 { 140 int i; 141 142 i40e_vc_notify_vf_reset(vf); 143 144 /* We want to ensure that an actual reset occurs initiated after this 145 * function was called. However, we do not want to wait forever, so 146 * we'll give a reasonable time and print a message if we failed to 147 * ensure a reset. 148 */ 149 for (i = 0; i < 20; i++) { 150 if (i40e_reset_vf(vf, false)) 151 return; 152 usleep_range(10000, 20000); 153 } 154 155 dev_warn(&vf->pf->pdev->dev, 156 "Failed to initiate reset for VF %d after 200 milliseconds\n", 157 vf->vf_id); 158 } 159 160 /** 161 * i40e_vc_isvalid_vsi_id 162 * @vf: pointer to the VF info 163 * @vsi_id: VF relative VSI id 164 * 165 * check for the valid VSI id 166 **/ 167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 168 { 169 struct i40e_pf *pf = vf->pf; 170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 171 172 return (vsi && (vsi->vf_id == vf->vf_id)); 173 } 174 175 /** 176 * i40e_vc_isvalid_queue_id 177 * @vf: pointer to the VF info 178 * @vsi_id: vsi id 179 * @qid: vsi relative queue id 180 * 181 * check for the valid queue id 182 **/ 183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 184 u16 qid) 185 { 186 struct i40e_pf *pf = vf->pf; 187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 188 189 return (vsi && (qid < vsi->alloc_queue_pairs)); 190 } 191 192 /** 193 * i40e_vc_isvalid_vector_id 194 * @vf: pointer to the VF info 195 * @vector_id: VF relative vector id 196 * 197 * check for the valid vector id 198 **/ 199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 200 { 201 struct i40e_pf *pf = vf->pf; 202 203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 204 } 205 206 /***********************vf resource mgmt routines*****************/ 207 208 /** 209 * i40e_vc_get_pf_queue_id 210 * @vf: pointer to the VF info 211 * @vsi_id: id of VSI as provided by the FW 212 * @vsi_queue_id: vsi relative queue id 213 * 214 * return PF relative queue id 215 **/ 216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 217 u8 vsi_queue_id) 218 { 219 struct i40e_pf *pf = vf->pf; 220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 222 223 if (!vsi) 224 return pf_queue_id; 225 226 if (le16_to_cpu(vsi->info.mapping_flags) & 227 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 228 pf_queue_id = 229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 230 else 231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 232 vsi_queue_id; 233 234 return pf_queue_id; 235 } 236 237 /** 238 * i40e_get_real_pf_qid 239 * @vf: pointer to the VF info 240 * @vsi_id: vsi id 241 * @queue_id: queue number 242 * 243 * wrapper function to get pf_queue_id handling ADq code as well 244 **/ 245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 246 { 247 int i; 248 249 if (vf->adq_enabled) { 250 /* Although VF considers all the queues(can be 1 to 16) as its 251 * own but they may actually belong to different VSIs(up to 4). 252 * We need to find which queues belongs to which VSI. 253 */ 254 for (i = 0; i < vf->num_tc; i++) { 255 if (queue_id < vf->ch[i].num_qps) { 256 vsi_id = vf->ch[i].vsi_id; 257 break; 258 } 259 /* find right queue id which is relative to a 260 * given VSI. 261 */ 262 queue_id -= vf->ch[i].num_qps; 263 } 264 } 265 266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 267 } 268 269 /** 270 * i40e_config_irq_link_list 271 * @vf: pointer to the VF info 272 * @vsi_id: id of VSI as given by the FW 273 * @vecmap: irq map info 274 * 275 * configure irq link list from the map 276 **/ 277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 278 struct virtchnl_vector_map *vecmap) 279 { 280 unsigned long linklistmap = 0, tempmap; 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 u16 vsi_queue_id, pf_queue_id; 284 enum i40e_queue_type qtype; 285 u16 next_q, vector_id, size; 286 u32 reg, reg_idx; 287 u16 itr_idx = 0; 288 289 vector_id = vecmap->vector_id; 290 /* setup the head */ 291 if (0 == vector_id) 292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 293 else 294 reg_idx = I40E_VPINT_LNKLSTN( 295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 296 (vector_id - 1)); 297 298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 299 /* Special case - No queues mapped on this vector */ 300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 301 goto irq_list_done; 302 } 303 tempmap = vecmap->rxq_map; 304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 306 vsi_queue_id)); 307 } 308 309 tempmap = vecmap->txq_map; 310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 312 vsi_queue_id + 1)); 313 } 314 315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 316 next_q = find_first_bit(&linklistmap, size); 317 if (unlikely(next_q == size)) 318 goto irq_list_done; 319 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 324 325 wr32(hw, reg_idx, reg); 326 327 while (next_q < size) { 328 switch (qtype) { 329 case I40E_QUEUE_TYPE_RX: 330 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 331 itr_idx = vecmap->rxitr_idx; 332 break; 333 case I40E_QUEUE_TYPE_TX: 334 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 335 itr_idx = vecmap->txitr_idx; 336 break; 337 default: 338 break; 339 } 340 341 next_q = find_next_bit(&linklistmap, size, next_q + 1); 342 if (next_q < size) { 343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 pf_queue_id = i40e_get_real_pf_qid(vf, 346 vsi_id, 347 vsi_queue_id); 348 } else { 349 pf_queue_id = I40E_QUEUE_END_OF_LIST; 350 qtype = 0; 351 } 352 353 /* format for the RQCTL & TQCTL regs is same */ 354 reg = (vector_id) | 355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 359 wr32(hw, reg_idx, reg); 360 } 361 362 /* if the vf is running in polling mode and using interrupt zero, 363 * need to disable auto-mask on enabling zero interrupt for VFs. 364 */ 365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 366 (vector_id == 0)) { 367 reg = rd32(hw, I40E_GLINT_CTL); 368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 370 wr32(hw, I40E_GLINT_CTL, reg); 371 } 372 } 373 374 irq_list_done: 375 i40e_flush(hw); 376 } 377 378 /** 379 * i40e_release_iwarp_qvlist 380 * @vf: pointer to the VF. 381 * 382 **/ 383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 384 { 385 struct i40e_pf *pf = vf->pf; 386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 387 u32 msix_vf; 388 u32 i; 389 390 if (!vf->qvlist_info) 391 return; 392 393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 394 for (i = 0; i < qvlist_info->num_vectors; i++) { 395 struct virtchnl_iwarp_qv_info *qv_info; 396 u32 next_q_index, next_q_type; 397 struct i40e_hw *hw = &pf->hw; 398 u32 v_idx, reg_idx, reg; 399 400 qv_info = &qvlist_info->qv_info[i]; 401 if (!qv_info) 402 continue; 403 v_idx = qv_info->v_idx; 404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 405 /* Figure out the queue after CEQ and make that the 406 * first queue. 407 */ 408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 414 415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 416 reg = (next_q_index & 417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 418 (next_q_type << 419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 420 421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 422 } 423 } 424 kfree(vf->qvlist_info); 425 vf->qvlist_info = NULL; 426 } 427 428 /** 429 * i40e_config_iwarp_qvlist 430 * @vf: pointer to the VF info 431 * @qvlist_info: queue and vector list 432 * 433 * Return 0 on success or < 0 on error 434 **/ 435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 436 struct virtchnl_iwarp_qvlist_info *qvlist_info) 437 { 438 struct i40e_pf *pf = vf->pf; 439 struct i40e_hw *hw = &pf->hw; 440 struct virtchnl_iwarp_qv_info *qv_info; 441 u32 v_idx, i, reg_idx, reg; 442 u32 next_q_idx, next_q_type; 443 u32 msix_vf; 444 int ret = 0; 445 446 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 447 448 if (qvlist_info->num_vectors > msix_vf) { 449 dev_warn(&pf->pdev->dev, 450 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 451 qvlist_info->num_vectors, 452 msix_vf); 453 ret = -EINVAL; 454 goto err_out; 455 } 456 457 kfree(vf->qvlist_info); 458 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info, 459 qvlist_info->num_vectors - 1), 460 GFP_KERNEL); 461 if (!vf->qvlist_info) { 462 ret = -ENOMEM; 463 goto err_out; 464 } 465 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 466 467 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 468 for (i = 0; i < qvlist_info->num_vectors; i++) { 469 qv_info = &qvlist_info->qv_info[i]; 470 if (!qv_info) 471 continue; 472 473 /* Validate vector id belongs to this vf */ 474 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { 475 ret = -EINVAL; 476 goto err_free; 477 } 478 479 v_idx = qv_info->v_idx; 480 481 vf->qvlist_info->qv_info[i] = *qv_info; 482 483 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 484 /* We might be sharing the interrupt, so get the first queue 485 * index and type, push it down the list by adding the new 486 * queue on top. Also link it with the new queue in CEQCTL. 487 */ 488 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 489 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 490 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 491 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 492 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 493 494 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 495 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 496 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 497 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 498 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 499 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 500 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 501 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 502 503 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 504 reg = (qv_info->ceq_idx & 505 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 506 (I40E_QUEUE_TYPE_PE_CEQ << 507 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 508 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 509 } 510 511 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 512 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 513 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 514 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 515 516 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 517 } 518 } 519 520 return 0; 521 err_free: 522 kfree(vf->qvlist_info); 523 vf->qvlist_info = NULL; 524 err_out: 525 return ret; 526 } 527 528 /** 529 * i40e_config_vsi_tx_queue 530 * @vf: pointer to the VF info 531 * @vsi_id: id of VSI as provided by the FW 532 * @vsi_queue_id: vsi relative queue index 533 * @info: config. info 534 * 535 * configure tx queue 536 **/ 537 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 538 u16 vsi_queue_id, 539 struct virtchnl_txq_info *info) 540 { 541 struct i40e_pf *pf = vf->pf; 542 struct i40e_hw *hw = &pf->hw; 543 struct i40e_hmc_obj_txq tx_ctx; 544 struct i40e_vsi *vsi; 545 u16 pf_queue_id; 546 u32 qtx_ctl; 547 int ret = 0; 548 549 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 550 ret = -ENOENT; 551 goto error_context; 552 } 553 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 554 vsi = i40e_find_vsi_from_id(pf, vsi_id); 555 if (!vsi) { 556 ret = -ENOENT; 557 goto error_context; 558 } 559 560 /* clear the context structure first */ 561 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 562 563 /* only set the required fields */ 564 tx_ctx.base = info->dma_ring_addr / 128; 565 tx_ctx.qlen = info->ring_len; 566 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 567 tx_ctx.rdylist_act = 0; 568 tx_ctx.head_wb_ena = info->headwb_enabled; 569 tx_ctx.head_wb_addr = info->dma_headwb_addr; 570 571 /* clear the context in the HMC */ 572 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 573 if (ret) { 574 dev_err(&pf->pdev->dev, 575 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 576 pf_queue_id, ret); 577 ret = -ENOENT; 578 goto error_context; 579 } 580 581 /* set the context in the HMC */ 582 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 583 if (ret) { 584 dev_err(&pf->pdev->dev, 585 "Failed to set VF LAN Tx queue context %d error: %d\n", 586 pf_queue_id, ret); 587 ret = -ENOENT; 588 goto error_context; 589 } 590 591 /* associate this queue with the PCI VF function */ 592 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 593 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 594 & I40E_QTX_CTL_PF_INDX_MASK); 595 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 596 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 597 & I40E_QTX_CTL_VFVM_INDX_MASK); 598 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 599 i40e_flush(hw); 600 601 error_context: 602 return ret; 603 } 604 605 /** 606 * i40e_config_vsi_rx_queue 607 * @vf: pointer to the VF info 608 * @vsi_id: id of VSI as provided by the FW 609 * @vsi_queue_id: vsi relative queue index 610 * @info: config. info 611 * 612 * configure rx queue 613 **/ 614 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 615 u16 vsi_queue_id, 616 struct virtchnl_rxq_info *info) 617 { 618 struct i40e_pf *pf = vf->pf; 619 struct i40e_hw *hw = &pf->hw; 620 struct i40e_hmc_obj_rxq rx_ctx; 621 u16 pf_queue_id; 622 int ret = 0; 623 624 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 625 626 /* clear the context structure first */ 627 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 628 629 /* only set the required fields */ 630 rx_ctx.base = info->dma_ring_addr / 128; 631 rx_ctx.qlen = info->ring_len; 632 633 if (info->splithdr_enabled) { 634 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 635 I40E_RX_SPLIT_IP | 636 I40E_RX_SPLIT_TCP_UDP | 637 I40E_RX_SPLIT_SCTP; 638 /* header length validation */ 639 if (info->hdr_size > ((2 * 1024) - 64)) { 640 ret = -EINVAL; 641 goto error_param; 642 } 643 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 644 645 /* set split mode 10b */ 646 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 647 } 648 649 /* databuffer length validation */ 650 if (info->databuffer_size > ((16 * 1024) - 128)) { 651 ret = -EINVAL; 652 goto error_param; 653 } 654 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 655 656 /* max pkt. length validation */ 657 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 658 ret = -EINVAL; 659 goto error_param; 660 } 661 rx_ctx.rxmax = info->max_pkt_size; 662 663 /* enable 32bytes desc always */ 664 rx_ctx.dsize = 1; 665 666 /* default values */ 667 rx_ctx.lrxqthresh = 1; 668 rx_ctx.crcstrip = 1; 669 rx_ctx.prefena = 1; 670 rx_ctx.l2tsel = 1; 671 672 /* clear the context in the HMC */ 673 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 674 if (ret) { 675 dev_err(&pf->pdev->dev, 676 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 677 pf_queue_id, ret); 678 ret = -ENOENT; 679 goto error_param; 680 } 681 682 /* set the context in the HMC */ 683 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 684 if (ret) { 685 dev_err(&pf->pdev->dev, 686 "Failed to set VF LAN Rx queue context %d error: %d\n", 687 pf_queue_id, ret); 688 ret = -ENOENT; 689 goto error_param; 690 } 691 692 error_param: 693 return ret; 694 } 695 696 /** 697 * i40e_alloc_vsi_res 698 * @vf: pointer to the VF info 699 * @idx: VSI index, applies only for ADq mode, zero otherwise 700 * 701 * alloc VF vsi context & resources 702 **/ 703 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 704 { 705 struct i40e_mac_filter *f = NULL; 706 struct i40e_pf *pf = vf->pf; 707 struct i40e_vsi *vsi; 708 u64 max_tx_rate = 0; 709 int ret = 0; 710 711 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 712 vf->vf_id); 713 714 if (!vsi) { 715 dev_err(&pf->pdev->dev, 716 "add vsi failed for VF %d, aq_err %d\n", 717 vf->vf_id, pf->hw.aq.asq_last_status); 718 ret = -ENOENT; 719 goto error_alloc_vsi_res; 720 } 721 722 if (!idx) { 723 u64 hena = i40e_pf_get_default_rss_hena(pf); 724 u8 broadcast[ETH_ALEN]; 725 726 vf->lan_vsi_idx = vsi->idx; 727 vf->lan_vsi_id = vsi->id; 728 /* If the port VLAN has been configured and then the 729 * VF driver was removed then the VSI port VLAN 730 * configuration was destroyed. Check if there is 731 * a port VLAN and restore the VSI configuration if 732 * needed. 733 */ 734 if (vf->port_vlan_id) 735 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 736 737 spin_lock_bh(&vsi->mac_filter_hash_lock); 738 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 739 f = i40e_add_mac_filter(vsi, 740 vf->default_lan_addr.addr); 741 if (!f) 742 dev_info(&pf->pdev->dev, 743 "Could not add MAC filter %pM for VF %d\n", 744 vf->default_lan_addr.addr, vf->vf_id); 745 } 746 eth_broadcast_addr(broadcast); 747 f = i40e_add_mac_filter(vsi, broadcast); 748 if (!f) 749 dev_info(&pf->pdev->dev, 750 "Could not allocate VF broadcast filter\n"); 751 spin_unlock_bh(&vsi->mac_filter_hash_lock); 752 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 753 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 754 /* program mac filter only for VF VSI */ 755 ret = i40e_sync_vsi_filters(vsi); 756 if (ret) 757 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 758 } 759 760 /* storing VSI index and id for ADq and don't apply the mac filter */ 761 if (vf->adq_enabled) { 762 vf->ch[idx].vsi_idx = vsi->idx; 763 vf->ch[idx].vsi_id = vsi->id; 764 } 765 766 /* Set VF bandwidth if specified */ 767 if (vf->tx_rate) { 768 max_tx_rate = vf->tx_rate; 769 } else if (vf->ch[idx].max_tx_rate) { 770 max_tx_rate = vf->ch[idx].max_tx_rate; 771 } 772 773 if (max_tx_rate) { 774 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 775 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 776 max_tx_rate, 0, NULL); 777 if (ret) 778 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 779 vf->vf_id, ret); 780 } 781 782 error_alloc_vsi_res: 783 return ret; 784 } 785 786 /** 787 * i40e_map_pf_queues_to_vsi 788 * @vf: pointer to the VF info 789 * 790 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 791 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 792 **/ 793 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 794 { 795 struct i40e_pf *pf = vf->pf; 796 struct i40e_hw *hw = &pf->hw; 797 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 798 u16 vsi_id, qps; 799 int i, j; 800 801 if (vf->adq_enabled) 802 num_tc = vf->num_tc; 803 804 for (i = 0; i < num_tc; i++) { 805 if (vf->adq_enabled) { 806 qps = vf->ch[i].num_qps; 807 vsi_id = vf->ch[i].vsi_id; 808 } else { 809 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 810 vsi_id = vf->lan_vsi_id; 811 } 812 813 for (j = 0; j < 7; j++) { 814 if (j * 2 >= qps) { 815 /* end of list */ 816 reg = 0x07FF07FF; 817 } else { 818 u16 qid = i40e_vc_get_pf_queue_id(vf, 819 vsi_id, 820 j * 2); 821 reg = qid; 822 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 823 (j * 2) + 1); 824 reg |= qid << 16; 825 } 826 i40e_write_rx_ctl(hw, 827 I40E_VSILAN_QTABLE(j, vsi_id), 828 reg); 829 } 830 } 831 } 832 833 /** 834 * i40e_map_pf_to_vf_queues 835 * @vf: pointer to the VF info 836 * 837 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 838 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 839 **/ 840 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 841 { 842 struct i40e_pf *pf = vf->pf; 843 struct i40e_hw *hw = &pf->hw; 844 u32 reg, total_qps = 0; 845 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 846 u16 vsi_id, qid; 847 int i, j; 848 849 if (vf->adq_enabled) 850 num_tc = vf->num_tc; 851 852 for (i = 0; i < num_tc; i++) { 853 if (vf->adq_enabled) { 854 qps = vf->ch[i].num_qps; 855 vsi_id = vf->ch[i].vsi_id; 856 } else { 857 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 858 vsi_id = vf->lan_vsi_id; 859 } 860 861 for (j = 0; j < qps; j++) { 862 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 863 864 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 865 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 866 reg); 867 total_qps++; 868 } 869 } 870 } 871 872 /** 873 * i40e_enable_vf_mappings 874 * @vf: pointer to the VF info 875 * 876 * enable VF mappings 877 **/ 878 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 879 { 880 struct i40e_pf *pf = vf->pf; 881 struct i40e_hw *hw = &pf->hw; 882 u32 reg; 883 884 /* Tell the hardware we're using noncontiguous mapping. HW requires 885 * that VF queues be mapped using this method, even when they are 886 * contiguous in real life 887 */ 888 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 889 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 890 891 /* enable VF vplan_qtable mappings */ 892 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 893 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 894 895 i40e_map_pf_to_vf_queues(vf); 896 i40e_map_pf_queues_to_vsi(vf); 897 898 i40e_flush(hw); 899 } 900 901 /** 902 * i40e_disable_vf_mappings 903 * @vf: pointer to the VF info 904 * 905 * disable VF mappings 906 **/ 907 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 908 { 909 struct i40e_pf *pf = vf->pf; 910 struct i40e_hw *hw = &pf->hw; 911 int i; 912 913 /* disable qp mappings */ 914 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 915 for (i = 0; i < I40E_MAX_VSI_QP; i++) 916 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 917 I40E_QUEUE_END_OF_LIST); 918 i40e_flush(hw); 919 } 920 921 /** 922 * i40e_free_vf_res 923 * @vf: pointer to the VF info 924 * 925 * free VF resources 926 **/ 927 static void i40e_free_vf_res(struct i40e_vf *vf) 928 { 929 struct i40e_pf *pf = vf->pf; 930 struct i40e_hw *hw = &pf->hw; 931 u32 reg_idx, reg; 932 int i, j, msix_vf; 933 934 /* Start by disabling VF's configuration API to prevent the OS from 935 * accessing the VF's VSI after it's freed / invalidated. 936 */ 937 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 938 939 /* It's possible the VF had requeuested more queues than the default so 940 * do the accounting here when we're about to free them. 941 */ 942 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 943 pf->queues_left += vf->num_queue_pairs - 944 I40E_DEFAULT_QUEUES_PER_VF; 945 } 946 947 /* free vsi & disconnect it from the parent uplink */ 948 if (vf->lan_vsi_idx) { 949 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 950 vf->lan_vsi_idx = 0; 951 vf->lan_vsi_id = 0; 952 vf->num_mac = 0; 953 } 954 955 /* do the accounting and remove additional ADq VSI's */ 956 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 957 for (j = 0; j < vf->num_tc; j++) { 958 /* At this point VSI0 is already released so don't 959 * release it again and only clear their values in 960 * structure variables 961 */ 962 if (j) 963 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 964 vf->ch[j].vsi_idx = 0; 965 vf->ch[j].vsi_id = 0; 966 } 967 } 968 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 969 970 /* disable interrupts so the VF starts in a known state */ 971 for (i = 0; i < msix_vf; i++) { 972 /* format is same for both registers */ 973 if (0 == i) 974 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 975 else 976 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 977 (vf->vf_id)) 978 + (i - 1)); 979 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 980 i40e_flush(hw); 981 } 982 983 /* clear the irq settings */ 984 for (i = 0; i < msix_vf; i++) { 985 /* format is same for both registers */ 986 if (0 == i) 987 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 988 else 989 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 990 (vf->vf_id)) 991 + (i - 1)); 992 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 993 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 994 wr32(hw, reg_idx, reg); 995 i40e_flush(hw); 996 } 997 /* reset some of the state variables keeping track of the resources */ 998 vf->num_queue_pairs = 0; 999 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1000 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1001 } 1002 1003 /** 1004 * i40e_alloc_vf_res 1005 * @vf: pointer to the VF info 1006 * 1007 * allocate VF resources 1008 **/ 1009 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1010 { 1011 struct i40e_pf *pf = vf->pf; 1012 int total_queue_pairs = 0; 1013 int ret, idx; 1014 1015 if (vf->num_req_queues && 1016 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1017 pf->num_vf_qps = vf->num_req_queues; 1018 else 1019 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1020 1021 /* allocate hw vsi context & associated resources */ 1022 ret = i40e_alloc_vsi_res(vf, 0); 1023 if (ret) 1024 goto error_alloc; 1025 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1026 1027 /* allocate additional VSIs based on tc information for ADq */ 1028 if (vf->adq_enabled) { 1029 if (pf->queues_left >= 1030 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1031 /* TC 0 always belongs to VF VSI */ 1032 for (idx = 1; idx < vf->num_tc; idx++) { 1033 ret = i40e_alloc_vsi_res(vf, idx); 1034 if (ret) 1035 goto error_alloc; 1036 } 1037 /* send correct number of queues */ 1038 total_queue_pairs = I40E_MAX_VF_QUEUES; 1039 } else { 1040 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1041 vf->vf_id); 1042 vf->adq_enabled = false; 1043 } 1044 } 1045 1046 /* We account for each VF to get a default number of queue pairs. If 1047 * the VF has now requested more, we need to account for that to make 1048 * certain we never request more queues than we actually have left in 1049 * HW. 1050 */ 1051 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1052 pf->queues_left -= 1053 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1054 1055 if (vf->trusted) 1056 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1057 else 1058 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1059 1060 /* store the total qps number for the runtime 1061 * VF req validation 1062 */ 1063 vf->num_queue_pairs = total_queue_pairs; 1064 1065 /* VF is now completely initialized */ 1066 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1067 1068 error_alloc: 1069 if (ret) 1070 i40e_free_vf_res(vf); 1071 1072 return ret; 1073 } 1074 1075 #define VF_DEVICE_STATUS 0xAA 1076 #define VF_TRANS_PENDING_MASK 0x20 1077 /** 1078 * i40e_quiesce_vf_pci 1079 * @vf: pointer to the VF structure 1080 * 1081 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1082 * if the transactions never clear. 1083 **/ 1084 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1085 { 1086 struct i40e_pf *pf = vf->pf; 1087 struct i40e_hw *hw = &pf->hw; 1088 int vf_abs_id, i; 1089 u32 reg; 1090 1091 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1092 1093 wr32(hw, I40E_PF_PCI_CIAA, 1094 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1095 for (i = 0; i < 100; i++) { 1096 reg = rd32(hw, I40E_PF_PCI_CIAD); 1097 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1098 return 0; 1099 udelay(1); 1100 } 1101 return -EIO; 1102 } 1103 1104 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); 1105 1106 /** 1107 * i40e_config_vf_promiscuous_mode 1108 * @vf: pointer to the VF info 1109 * @vsi_id: VSI id 1110 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1111 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1112 * 1113 * Called from the VF to configure the promiscuous mode of 1114 * VF vsis and from the VF reset path to reset promiscuous mode. 1115 **/ 1116 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1117 u16 vsi_id, 1118 bool allmulti, 1119 bool alluni) 1120 { 1121 struct i40e_pf *pf = vf->pf; 1122 struct i40e_hw *hw = &pf->hw; 1123 struct i40e_mac_filter *f; 1124 i40e_status aq_ret = 0; 1125 struct i40e_vsi *vsi; 1126 int bkt; 1127 1128 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1129 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1130 return I40E_ERR_PARAM; 1131 1132 if (vf->port_vlan_id) { 1133 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1134 allmulti, 1135 vf->port_vlan_id, 1136 NULL); 1137 if (aq_ret) { 1138 int aq_err = pf->hw.aq.asq_last_status; 1139 1140 dev_err(&pf->pdev->dev, 1141 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1142 vf->vf_id, 1143 i40e_stat_str(&pf->hw, aq_ret), 1144 i40e_aq_str(&pf->hw, aq_err)); 1145 return aq_ret; 1146 } 1147 1148 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1149 alluni, 1150 vf->port_vlan_id, 1151 NULL); 1152 if (aq_ret) { 1153 int aq_err = pf->hw.aq.asq_last_status; 1154 1155 dev_err(&pf->pdev->dev, 1156 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1157 vf->vf_id, 1158 i40e_stat_str(&pf->hw, aq_ret), 1159 i40e_aq_str(&pf->hw, aq_err)); 1160 } 1161 return aq_ret; 1162 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1163 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1164 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1165 continue; 1166 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1167 vsi->seid, 1168 allmulti, 1169 f->vlan, 1170 NULL); 1171 if (aq_ret) { 1172 int aq_err = pf->hw.aq.asq_last_status; 1173 1174 dev_err(&pf->pdev->dev, 1175 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1176 f->vlan, 1177 i40e_stat_str(&pf->hw, aq_ret), 1178 i40e_aq_str(&pf->hw, aq_err)); 1179 } 1180 1181 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1182 vsi->seid, 1183 alluni, 1184 f->vlan, 1185 NULL); 1186 if (aq_ret) { 1187 int aq_err = pf->hw.aq.asq_last_status; 1188 1189 dev_err(&pf->pdev->dev, 1190 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1191 f->vlan, 1192 i40e_stat_str(&pf->hw, aq_ret), 1193 i40e_aq_str(&pf->hw, aq_err)); 1194 } 1195 } 1196 return aq_ret; 1197 } 1198 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, 1199 NULL); 1200 if (aq_ret) { 1201 int aq_err = pf->hw.aq.asq_last_status; 1202 1203 dev_err(&pf->pdev->dev, 1204 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1205 vf->vf_id, 1206 i40e_stat_str(&pf->hw, aq_ret), 1207 i40e_aq_str(&pf->hw, aq_err)); 1208 return aq_ret; 1209 } 1210 1211 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, 1212 NULL, true); 1213 if (aq_ret) { 1214 int aq_err = pf->hw.aq.asq_last_status; 1215 1216 dev_err(&pf->pdev->dev, 1217 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1218 vf->vf_id, 1219 i40e_stat_str(&pf->hw, aq_ret), 1220 i40e_aq_str(&pf->hw, aq_err)); 1221 } 1222 1223 return aq_ret; 1224 } 1225 1226 /** 1227 * i40e_trigger_vf_reset 1228 * @vf: pointer to the VF structure 1229 * @flr: VFLR was issued or not 1230 * 1231 * Trigger hardware to start a reset for a particular VF. Expects the caller 1232 * to wait the proper amount of time to allow hardware to reset the VF before 1233 * it cleans up and restores VF functionality. 1234 **/ 1235 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1236 { 1237 struct i40e_pf *pf = vf->pf; 1238 struct i40e_hw *hw = &pf->hw; 1239 u32 reg, reg_idx, bit_idx; 1240 1241 /* warn the VF */ 1242 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1243 1244 /* Disable VF's configuration API during reset. The flag is re-enabled 1245 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1246 * It's normally disabled in i40e_free_vf_res(), but it's safer 1247 * to do it earlier to give some time to finish to any VF config 1248 * functions that may still be running at this point. 1249 */ 1250 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1251 1252 /* In the case of a VFLR, the HW has already reset the VF and we 1253 * just need to clean up, so don't hit the VFRTRIG register. 1254 */ 1255 if (!flr) { 1256 /* reset VF using VPGEN_VFRTRIG reg */ 1257 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1258 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1259 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1260 i40e_flush(hw); 1261 } 1262 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1263 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1264 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1265 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1266 i40e_flush(hw); 1267 1268 if (i40e_quiesce_vf_pci(vf)) 1269 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1270 vf->vf_id); 1271 } 1272 1273 /** 1274 * i40e_cleanup_reset_vf 1275 * @vf: pointer to the VF structure 1276 * 1277 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1278 * have verified whether the reset is finished properly, and ensure the 1279 * minimum amount of wait time has passed. 1280 **/ 1281 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1282 { 1283 struct i40e_pf *pf = vf->pf; 1284 struct i40e_hw *hw = &pf->hw; 1285 u32 reg; 1286 1287 /* disable promisc modes in case they were enabled */ 1288 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1289 1290 /* free VF resources to begin resetting the VSI state */ 1291 i40e_free_vf_res(vf); 1292 1293 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1294 * By doing this we allow HW to access VF memory at any point. If we 1295 * did it any sooner, HW could access memory while it was being freed 1296 * in i40e_free_vf_res(), causing an IOMMU fault. 1297 * 1298 * On the other hand, this needs to be done ASAP, because the VF driver 1299 * is waiting for this to happen and may report a timeout. It's 1300 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1301 * it. 1302 */ 1303 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1304 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1305 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1306 1307 /* reallocate VF resources to finish resetting the VSI state */ 1308 if (!i40e_alloc_vf_res(vf)) { 1309 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1310 i40e_enable_vf_mappings(vf); 1311 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1312 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1313 /* Do not notify the client during VF init */ 1314 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1315 &vf->vf_states)) 1316 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1317 vf->num_vlan = 0; 1318 } 1319 1320 /* Tell the VF driver the reset is done. This needs to be done only 1321 * after VF has been fully initialized, because the VF driver may 1322 * request resources immediately after setting this flag. 1323 */ 1324 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1325 } 1326 1327 /** 1328 * i40e_reset_vf 1329 * @vf: pointer to the VF structure 1330 * @flr: VFLR was issued or not 1331 * 1332 * Returns true if the VF is reset, false otherwise. 1333 **/ 1334 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1335 { 1336 struct i40e_pf *pf = vf->pf; 1337 struct i40e_hw *hw = &pf->hw; 1338 bool rsd = false; 1339 u32 reg; 1340 int i; 1341 1342 /* If the VFs have been disabled, this means something else is 1343 * resetting the VF, so we shouldn't continue. 1344 */ 1345 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1346 return false; 1347 1348 i40e_trigger_vf_reset(vf, flr); 1349 1350 /* poll VPGEN_VFRSTAT reg to make sure 1351 * that reset is complete 1352 */ 1353 for (i = 0; i < 10; i++) { 1354 /* VF reset requires driver to first reset the VF and then 1355 * poll the status register to make sure that the reset 1356 * completed successfully. Due to internal HW FIFO flushes, 1357 * we must wait 10ms before the register will be valid. 1358 */ 1359 usleep_range(10000, 20000); 1360 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1361 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1362 rsd = true; 1363 break; 1364 } 1365 } 1366 1367 if (flr) 1368 usleep_range(10000, 20000); 1369 1370 if (!rsd) 1371 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1372 vf->vf_id); 1373 usleep_range(10000, 20000); 1374 1375 /* On initial reset, we don't have any queues to disable */ 1376 if (vf->lan_vsi_idx != 0) 1377 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1378 1379 i40e_cleanup_reset_vf(vf); 1380 1381 i40e_flush(hw); 1382 clear_bit(__I40E_VF_DISABLE, pf->state); 1383 1384 return true; 1385 } 1386 1387 /** 1388 * i40e_reset_all_vfs 1389 * @pf: pointer to the PF structure 1390 * @flr: VFLR was issued or not 1391 * 1392 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1393 * VF, then do all the waiting in one chunk, and finally finish restoring each 1394 * VF after the wait. This is useful during PF routines which need to reset 1395 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1396 * 1397 * Returns true if any VFs were reset, and false otherwise. 1398 **/ 1399 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1400 { 1401 struct i40e_hw *hw = &pf->hw; 1402 struct i40e_vf *vf; 1403 int i, v; 1404 u32 reg; 1405 1406 /* If we don't have any VFs, then there is nothing to reset */ 1407 if (!pf->num_alloc_vfs) 1408 return false; 1409 1410 /* If VFs have been disabled, there is no need to reset */ 1411 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1412 return false; 1413 1414 /* Begin reset on all VFs at once */ 1415 for (v = 0; v < pf->num_alloc_vfs; v++) 1416 i40e_trigger_vf_reset(&pf->vf[v], flr); 1417 1418 /* HW requires some time to make sure it can flush the FIFO for a VF 1419 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1420 * sequence to make sure that it has completed. We'll keep track of 1421 * the VFs using a simple iterator that increments once that VF has 1422 * finished resetting. 1423 */ 1424 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1425 usleep_range(10000, 20000); 1426 1427 /* Check each VF in sequence, beginning with the VF to fail 1428 * the previous check. 1429 */ 1430 while (v < pf->num_alloc_vfs) { 1431 vf = &pf->vf[v]; 1432 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1433 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1434 break; 1435 1436 /* If the current VF has finished resetting, move on 1437 * to the next VF in sequence. 1438 */ 1439 v++; 1440 } 1441 } 1442 1443 if (flr) 1444 usleep_range(10000, 20000); 1445 1446 /* Display a warning if at least one VF didn't manage to reset in 1447 * time, but continue on with the operation. 1448 */ 1449 if (v < pf->num_alloc_vfs) 1450 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1451 pf->vf[v].vf_id); 1452 usleep_range(10000, 20000); 1453 1454 /* Begin disabling all the rings associated with VFs, but do not wait 1455 * between each VF. 1456 */ 1457 for (v = 0; v < pf->num_alloc_vfs; v++) { 1458 /* On initial reset, we don't have any queues to disable */ 1459 if (pf->vf[v].lan_vsi_idx == 0) 1460 continue; 1461 1462 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1463 } 1464 1465 /* Now that we've notified HW to disable all of the VF rings, wait 1466 * until they finish. 1467 */ 1468 for (v = 0; v < pf->num_alloc_vfs; v++) { 1469 /* On initial reset, we don't have any queues to disable */ 1470 if (pf->vf[v].lan_vsi_idx == 0) 1471 continue; 1472 1473 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1474 } 1475 1476 /* Hw may need up to 50ms to finish disabling the RX queues. We 1477 * minimize the wait by delaying only once for all VFs. 1478 */ 1479 mdelay(50); 1480 1481 /* Finish the reset on each VF */ 1482 for (v = 0; v < pf->num_alloc_vfs; v++) 1483 i40e_cleanup_reset_vf(&pf->vf[v]); 1484 1485 i40e_flush(hw); 1486 clear_bit(__I40E_VF_DISABLE, pf->state); 1487 1488 return true; 1489 } 1490 1491 /** 1492 * i40e_free_vfs 1493 * @pf: pointer to the PF structure 1494 * 1495 * free VF resources 1496 **/ 1497 void i40e_free_vfs(struct i40e_pf *pf) 1498 { 1499 struct i40e_hw *hw = &pf->hw; 1500 u32 reg_idx, bit_idx; 1501 int i, tmp, vf_id; 1502 1503 if (!pf->vf) 1504 return; 1505 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1506 usleep_range(1000, 2000); 1507 1508 i40e_notify_client_of_vf_enable(pf, 0); 1509 1510 /* Amortize wait time by stopping all VFs at the same time */ 1511 for (i = 0; i < pf->num_alloc_vfs; i++) { 1512 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1513 continue; 1514 1515 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1516 } 1517 1518 for (i = 0; i < pf->num_alloc_vfs; i++) { 1519 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1520 continue; 1521 1522 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1523 } 1524 1525 /* Disable IOV before freeing resources. This lets any VF drivers 1526 * running in the host get themselves cleaned up before we yank 1527 * the carpet out from underneath their feet. 1528 */ 1529 if (!pci_vfs_assigned(pf->pdev)) 1530 pci_disable_sriov(pf->pdev); 1531 else 1532 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1533 1534 /* free up VF resources */ 1535 tmp = pf->num_alloc_vfs; 1536 pf->num_alloc_vfs = 0; 1537 for (i = 0; i < tmp; i++) { 1538 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1539 i40e_free_vf_res(&pf->vf[i]); 1540 /* disable qp mappings */ 1541 i40e_disable_vf_mappings(&pf->vf[i]); 1542 } 1543 1544 kfree(pf->vf); 1545 pf->vf = NULL; 1546 1547 /* This check is for when the driver is unloaded while VFs are 1548 * assigned. Setting the number of VFs to 0 through sysfs is caught 1549 * before this function ever gets called. 1550 */ 1551 if (!pci_vfs_assigned(pf->pdev)) { 1552 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1553 * work correctly when SR-IOV gets re-enabled. 1554 */ 1555 for (vf_id = 0; vf_id < tmp; vf_id++) { 1556 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1557 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1558 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1559 } 1560 } 1561 clear_bit(__I40E_VF_DISABLE, pf->state); 1562 } 1563 1564 #ifdef CONFIG_PCI_IOV 1565 /** 1566 * i40e_alloc_vfs 1567 * @pf: pointer to the PF structure 1568 * @num_alloc_vfs: number of VFs to allocate 1569 * 1570 * allocate VF resources 1571 **/ 1572 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1573 { 1574 struct i40e_vf *vfs; 1575 int i, ret = 0; 1576 1577 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1578 i40e_irq_dynamic_disable_icr0(pf); 1579 1580 /* Check to see if we're just allocating resources for extant VFs */ 1581 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1582 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1583 if (ret) { 1584 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1585 pf->num_alloc_vfs = 0; 1586 goto err_iov; 1587 } 1588 } 1589 /* allocate memory */ 1590 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1591 if (!vfs) { 1592 ret = -ENOMEM; 1593 goto err_alloc; 1594 } 1595 pf->vf = vfs; 1596 1597 /* apply default profile */ 1598 for (i = 0; i < num_alloc_vfs; i++) { 1599 vfs[i].pf = pf; 1600 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1601 vfs[i].vf_id = i; 1602 1603 /* assign default capabilities */ 1604 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1605 vfs[i].spoofchk = true; 1606 1607 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1608 1609 } 1610 pf->num_alloc_vfs = num_alloc_vfs; 1611 1612 /* VF resources get allocated during reset */ 1613 i40e_reset_all_vfs(pf, false); 1614 1615 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1616 1617 err_alloc: 1618 if (ret) 1619 i40e_free_vfs(pf); 1620 err_iov: 1621 /* Re-enable interrupt 0. */ 1622 i40e_irq_dynamic_enable_icr0(pf); 1623 return ret; 1624 } 1625 1626 #endif 1627 /** 1628 * i40e_pci_sriov_enable 1629 * @pdev: pointer to a pci_dev structure 1630 * @num_vfs: number of VFs to allocate 1631 * 1632 * Enable or change the number of VFs 1633 **/ 1634 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1635 { 1636 #ifdef CONFIG_PCI_IOV 1637 struct i40e_pf *pf = pci_get_drvdata(pdev); 1638 int pre_existing_vfs = pci_num_vf(pdev); 1639 int err = 0; 1640 1641 if (test_bit(__I40E_TESTING, pf->state)) { 1642 dev_warn(&pdev->dev, 1643 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1644 err = -EPERM; 1645 goto err_out; 1646 } 1647 1648 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1649 i40e_free_vfs(pf); 1650 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1651 goto out; 1652 1653 if (num_vfs > pf->num_req_vfs) { 1654 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1655 num_vfs, pf->num_req_vfs); 1656 err = -EPERM; 1657 goto err_out; 1658 } 1659 1660 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1661 err = i40e_alloc_vfs(pf, num_vfs); 1662 if (err) { 1663 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1664 goto err_out; 1665 } 1666 1667 out: 1668 return num_vfs; 1669 1670 err_out: 1671 return err; 1672 #endif 1673 return 0; 1674 } 1675 1676 /** 1677 * i40e_pci_sriov_configure 1678 * @pdev: pointer to a pci_dev structure 1679 * @num_vfs: number of VFs to allocate 1680 * 1681 * Enable or change the number of VFs. Called when the user updates the number 1682 * of VFs in sysfs. 1683 **/ 1684 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1685 { 1686 struct i40e_pf *pf = pci_get_drvdata(pdev); 1687 int ret = 0; 1688 1689 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1690 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1691 return -EAGAIN; 1692 } 1693 1694 if (num_vfs) { 1695 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1696 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1697 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1698 } 1699 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1700 goto sriov_configure_out; 1701 } 1702 1703 if (!pci_vfs_assigned(pf->pdev)) { 1704 i40e_free_vfs(pf); 1705 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1706 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1707 } else { 1708 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1709 ret = -EINVAL; 1710 goto sriov_configure_out; 1711 } 1712 sriov_configure_out: 1713 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1714 return ret; 1715 } 1716 1717 /***********************virtual channel routines******************/ 1718 1719 /** 1720 * i40e_vc_send_msg_to_vf 1721 * @vf: pointer to the VF info 1722 * @v_opcode: virtual channel opcode 1723 * @v_retval: virtual channel return value 1724 * @msg: pointer to the msg buffer 1725 * @msglen: msg length 1726 * 1727 * send msg to VF 1728 **/ 1729 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1730 u32 v_retval, u8 *msg, u16 msglen) 1731 { 1732 struct i40e_pf *pf; 1733 struct i40e_hw *hw; 1734 int abs_vf_id; 1735 i40e_status aq_ret; 1736 1737 /* validate the request */ 1738 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1739 return -EINVAL; 1740 1741 pf = vf->pf; 1742 hw = &pf->hw; 1743 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1744 1745 /* single place to detect unsuccessful return values */ 1746 if (v_retval) { 1747 vf->num_invalid_msgs++; 1748 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1749 vf->vf_id, v_opcode, v_retval); 1750 if (vf->num_invalid_msgs > 1751 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1752 dev_err(&pf->pdev->dev, 1753 "Number of invalid messages exceeded for VF %d\n", 1754 vf->vf_id); 1755 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1756 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1757 } 1758 } else { 1759 vf->num_valid_msgs++; 1760 /* reset the invalid counter, if a valid message is received. */ 1761 vf->num_invalid_msgs = 0; 1762 } 1763 1764 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1765 msg, msglen, NULL); 1766 if (aq_ret) { 1767 dev_info(&pf->pdev->dev, 1768 "Unable to send the message to VF %d aq_err %d\n", 1769 vf->vf_id, pf->hw.aq.asq_last_status); 1770 return -EIO; 1771 } 1772 1773 return 0; 1774 } 1775 1776 /** 1777 * i40e_vc_send_resp_to_vf 1778 * @vf: pointer to the VF info 1779 * @opcode: operation code 1780 * @retval: return value 1781 * 1782 * send resp msg to VF 1783 **/ 1784 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1785 enum virtchnl_ops opcode, 1786 i40e_status retval) 1787 { 1788 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1789 } 1790 1791 /** 1792 * i40e_vc_get_version_msg 1793 * @vf: pointer to the VF info 1794 * @msg: pointer to the msg buffer 1795 * 1796 * called from the VF to request the API version used by the PF 1797 **/ 1798 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1799 { 1800 struct virtchnl_version_info info = { 1801 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1802 }; 1803 1804 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1805 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1806 if (VF_IS_V10(&vf->vf_ver)) 1807 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1808 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1809 I40E_SUCCESS, (u8 *)&info, 1810 sizeof(struct virtchnl_version_info)); 1811 } 1812 1813 /** 1814 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1815 * @vf: pointer to VF structure 1816 **/ 1817 static void i40e_del_qch(struct i40e_vf *vf) 1818 { 1819 struct i40e_pf *pf = vf->pf; 1820 int i; 1821 1822 /* first element in the array belongs to primary VF VSI and we shouldn't 1823 * delete it. We should however delete the rest of the VSIs created 1824 */ 1825 for (i = 1; i < vf->num_tc; i++) { 1826 if (vf->ch[i].vsi_idx) { 1827 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1828 vf->ch[i].vsi_idx = 0; 1829 vf->ch[i].vsi_id = 0; 1830 } 1831 } 1832 } 1833 1834 /** 1835 * i40e_vc_get_vf_resources_msg 1836 * @vf: pointer to the VF info 1837 * @msg: pointer to the msg buffer 1838 * 1839 * called from the VF to request its resources 1840 **/ 1841 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1842 { 1843 struct virtchnl_vf_resource *vfres = NULL; 1844 struct i40e_pf *pf = vf->pf; 1845 i40e_status aq_ret = 0; 1846 struct i40e_vsi *vsi; 1847 int num_vsis = 1; 1848 size_t len = 0; 1849 int ret; 1850 1851 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1852 aq_ret = I40E_ERR_PARAM; 1853 goto err; 1854 } 1855 1856 len = struct_size(vfres, vsi_res, num_vsis); 1857 vfres = kzalloc(len, GFP_KERNEL); 1858 if (!vfres) { 1859 aq_ret = I40E_ERR_NO_MEMORY; 1860 len = 0; 1861 goto err; 1862 } 1863 if (VF_IS_V11(&vf->vf_ver)) 1864 vf->driver_caps = *(u32 *)msg; 1865 else 1866 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1867 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1868 VIRTCHNL_VF_OFFLOAD_VLAN; 1869 1870 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1871 vsi = pf->vsi[vf->lan_vsi_idx]; 1872 if (!vsi->info.pvid) 1873 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1874 1875 if (i40e_vf_client_capable(pf, vf->vf_id) && 1876 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1877 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1878 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1879 } else { 1880 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1881 } 1882 1883 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1884 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1885 } else { 1886 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1887 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1888 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1889 else 1890 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1891 } 1892 1893 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1894 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1895 vfres->vf_cap_flags |= 1896 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1897 } 1898 1899 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1900 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1901 1902 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1903 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1904 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1905 1906 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1907 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1908 dev_err(&pf->pdev->dev, 1909 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1910 vf->vf_id); 1911 aq_ret = I40E_ERR_PARAM; 1912 goto err; 1913 } 1914 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1915 } 1916 1917 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1918 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1919 vfres->vf_cap_flags |= 1920 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1921 } 1922 1923 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1924 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1925 1926 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1927 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 1928 1929 vfres->num_vsis = num_vsis; 1930 vfres->num_queue_pairs = vf->num_queue_pairs; 1931 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1932 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1933 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1934 1935 if (vf->lan_vsi_idx) { 1936 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1937 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1938 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1939 /* VFs only use TC 0 */ 1940 vfres->vsi_res[0].qset_handle 1941 = le16_to_cpu(vsi->info.qs_handle[0]); 1942 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1943 vf->default_lan_addr.addr); 1944 } 1945 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1946 1947 err: 1948 /* send the response back to the VF */ 1949 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1950 aq_ret, (u8 *)vfres, len); 1951 1952 kfree(vfres); 1953 return ret; 1954 } 1955 1956 /** 1957 * i40e_vc_reset_vf_msg 1958 * @vf: pointer to the VF info 1959 * 1960 * called from the VF to reset itself, 1961 * unlike other virtchnl messages, PF driver 1962 * doesn't send the response back to the VF 1963 **/ 1964 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1965 { 1966 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1967 i40e_reset_vf(vf, false); 1968 } 1969 1970 /** 1971 * i40e_getnum_vf_vsi_vlan_filters 1972 * @vsi: pointer to the vsi 1973 * 1974 * called to get the number of VLANs offloaded on this VF 1975 **/ 1976 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1977 { 1978 struct i40e_mac_filter *f; 1979 int num_vlans = 0, bkt; 1980 1981 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1982 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1983 num_vlans++; 1984 } 1985 1986 return num_vlans; 1987 } 1988 1989 /** 1990 * i40e_vc_config_promiscuous_mode_msg 1991 * @vf: pointer to the VF info 1992 * @msg: pointer to the msg buffer 1993 * 1994 * called from the VF to configure the promiscuous mode of 1995 * VF vsis 1996 **/ 1997 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 1998 { 1999 struct virtchnl_promisc_info *info = 2000 (struct virtchnl_promisc_info *)msg; 2001 struct i40e_pf *pf = vf->pf; 2002 i40e_status aq_ret = 0; 2003 bool allmulti = false; 2004 bool alluni = false; 2005 2006 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2007 aq_ret = I40E_ERR_PARAM; 2008 goto err_out; 2009 } 2010 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2011 dev_err(&pf->pdev->dev, 2012 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2013 vf->vf_id); 2014 2015 /* Lie to the VF on purpose, because this is an error we can 2016 * ignore. Unprivileged VF is not a virtual channel error. 2017 */ 2018 aq_ret = 0; 2019 goto err_out; 2020 } 2021 2022 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2023 aq_ret = I40E_ERR_PARAM; 2024 goto err_out; 2025 } 2026 2027 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2028 aq_ret = I40E_ERR_PARAM; 2029 goto err_out; 2030 } 2031 2032 /* Multicast promiscuous handling*/ 2033 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2034 allmulti = true; 2035 2036 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2037 alluni = true; 2038 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2039 alluni); 2040 if (!aq_ret) { 2041 if (allmulti) { 2042 dev_info(&pf->pdev->dev, 2043 "VF %d successfully set multicast promiscuous mode\n", 2044 vf->vf_id); 2045 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2046 } else { 2047 dev_info(&pf->pdev->dev, 2048 "VF %d successfully unset multicast promiscuous mode\n", 2049 vf->vf_id); 2050 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2051 } 2052 if (alluni) { 2053 dev_info(&pf->pdev->dev, 2054 "VF %d successfully set unicast promiscuous mode\n", 2055 vf->vf_id); 2056 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2057 } else { 2058 dev_info(&pf->pdev->dev, 2059 "VF %d successfully unset unicast promiscuous mode\n", 2060 vf->vf_id); 2061 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2062 } 2063 } 2064 err_out: 2065 /* send the response to the VF */ 2066 return i40e_vc_send_resp_to_vf(vf, 2067 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2068 aq_ret); 2069 } 2070 2071 /** 2072 * i40e_vc_config_queues_msg 2073 * @vf: pointer to the VF info 2074 * @msg: pointer to the msg buffer 2075 * 2076 * called from the VF to configure the rx/tx 2077 * queues 2078 **/ 2079 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2080 { 2081 struct virtchnl_vsi_queue_config_info *qci = 2082 (struct virtchnl_vsi_queue_config_info *)msg; 2083 struct virtchnl_queue_pair_info *qpi; 2084 struct i40e_pf *pf = vf->pf; 2085 u16 vsi_id, vsi_queue_id = 0; 2086 u16 num_qps_all = 0; 2087 i40e_status aq_ret = 0; 2088 int i, j = 0, idx = 0; 2089 2090 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2091 aq_ret = I40E_ERR_PARAM; 2092 goto error_param; 2093 } 2094 2095 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2096 aq_ret = I40E_ERR_PARAM; 2097 goto error_param; 2098 } 2099 2100 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2101 aq_ret = I40E_ERR_PARAM; 2102 goto error_param; 2103 } 2104 2105 if (vf->adq_enabled) { 2106 for (i = 0; i < I40E_MAX_VF_VSI; i++) 2107 num_qps_all += vf->ch[i].num_qps; 2108 if (num_qps_all != qci->num_queue_pairs) { 2109 aq_ret = I40E_ERR_PARAM; 2110 goto error_param; 2111 } 2112 } 2113 2114 vsi_id = qci->vsi_id; 2115 2116 for (i = 0; i < qci->num_queue_pairs; i++) { 2117 qpi = &qci->qpair[i]; 2118 2119 if (!vf->adq_enabled) { 2120 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2121 qpi->txq.queue_id)) { 2122 aq_ret = I40E_ERR_PARAM; 2123 goto error_param; 2124 } 2125 2126 vsi_queue_id = qpi->txq.queue_id; 2127 2128 if (qpi->txq.vsi_id != qci->vsi_id || 2129 qpi->rxq.vsi_id != qci->vsi_id || 2130 qpi->rxq.queue_id != vsi_queue_id) { 2131 aq_ret = I40E_ERR_PARAM; 2132 goto error_param; 2133 } 2134 } 2135 2136 if (vf->adq_enabled) { 2137 if (idx >= ARRAY_SIZE(vf->ch)) { 2138 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2139 goto error_param; 2140 } 2141 vsi_id = vf->ch[idx].vsi_id; 2142 } 2143 2144 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2145 &qpi->rxq) || 2146 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2147 &qpi->txq)) { 2148 aq_ret = I40E_ERR_PARAM; 2149 goto error_param; 2150 } 2151 2152 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2153 * VF does not know about these additional VSIs and all 2154 * it cares is about its own queues. PF configures these queues 2155 * to its appropriate VSIs based on TC mapping 2156 **/ 2157 if (vf->adq_enabled) { 2158 if (idx >= ARRAY_SIZE(vf->ch)) { 2159 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2160 goto error_param; 2161 } 2162 if (j == (vf->ch[idx].num_qps - 1)) { 2163 idx++; 2164 j = 0; /* resetting the queue count */ 2165 vsi_queue_id = 0; 2166 } else { 2167 j++; 2168 vsi_queue_id++; 2169 } 2170 } 2171 } 2172 /* set vsi num_queue_pairs in use to num configured by VF */ 2173 if (!vf->adq_enabled) { 2174 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2175 qci->num_queue_pairs; 2176 } else { 2177 for (i = 0; i < vf->num_tc; i++) 2178 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2179 vf->ch[i].num_qps; 2180 } 2181 2182 error_param: 2183 /* send the response to the VF */ 2184 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2185 aq_ret); 2186 } 2187 2188 /** 2189 * i40e_validate_queue_map 2190 * @vsi_id: vsi id 2191 * @queuemap: Tx or Rx queue map 2192 * 2193 * check if Tx or Rx queue map is valid 2194 **/ 2195 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2196 unsigned long queuemap) 2197 { 2198 u16 vsi_queue_id, queue_id; 2199 2200 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2201 if (vf->adq_enabled) { 2202 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2203 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2204 } else { 2205 queue_id = vsi_queue_id; 2206 } 2207 2208 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2209 return -EINVAL; 2210 } 2211 2212 return 0; 2213 } 2214 2215 /** 2216 * i40e_vc_config_irq_map_msg 2217 * @vf: pointer to the VF info 2218 * @msg: pointer to the msg buffer 2219 * 2220 * called from the VF to configure the irq to 2221 * queue map 2222 **/ 2223 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2224 { 2225 struct virtchnl_irq_map_info *irqmap_info = 2226 (struct virtchnl_irq_map_info *)msg; 2227 struct virtchnl_vector_map *map; 2228 u16 vsi_id; 2229 i40e_status aq_ret = 0; 2230 int i; 2231 2232 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2233 aq_ret = I40E_ERR_PARAM; 2234 goto error_param; 2235 } 2236 2237 if (irqmap_info->num_vectors > 2238 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2239 aq_ret = I40E_ERR_PARAM; 2240 goto error_param; 2241 } 2242 2243 for (i = 0; i < irqmap_info->num_vectors; i++) { 2244 map = &irqmap_info->vecmap[i]; 2245 /* validate msg params */ 2246 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2247 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2248 aq_ret = I40E_ERR_PARAM; 2249 goto error_param; 2250 } 2251 vsi_id = map->vsi_id; 2252 2253 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2254 aq_ret = I40E_ERR_PARAM; 2255 goto error_param; 2256 } 2257 2258 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2259 aq_ret = I40E_ERR_PARAM; 2260 goto error_param; 2261 } 2262 2263 i40e_config_irq_link_list(vf, vsi_id, map); 2264 } 2265 error_param: 2266 /* send the response to the VF */ 2267 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2268 aq_ret); 2269 } 2270 2271 /** 2272 * i40e_ctrl_vf_tx_rings 2273 * @vsi: the SRIOV VSI being configured 2274 * @q_map: bit map of the queues to be enabled 2275 * @enable: start or stop the queue 2276 **/ 2277 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2278 bool enable) 2279 { 2280 struct i40e_pf *pf = vsi->back; 2281 int ret = 0; 2282 u16 q_id; 2283 2284 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2285 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2286 vsi->base_queue + q_id, 2287 false /*is xdp*/, enable); 2288 if (ret) 2289 break; 2290 } 2291 return ret; 2292 } 2293 2294 /** 2295 * i40e_ctrl_vf_rx_rings 2296 * @vsi: the SRIOV VSI being configured 2297 * @q_map: bit map of the queues to be enabled 2298 * @enable: start or stop the queue 2299 **/ 2300 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2301 bool enable) 2302 { 2303 struct i40e_pf *pf = vsi->back; 2304 int ret = 0; 2305 u16 q_id; 2306 2307 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2308 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2309 enable); 2310 if (ret) 2311 break; 2312 } 2313 return ret; 2314 } 2315 2316 /** 2317 * i40e_vc_enable_queues_msg 2318 * @vf: pointer to the VF info 2319 * @msg: pointer to the msg buffer 2320 * 2321 * called from the VF to enable all or specific queue(s) 2322 **/ 2323 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2324 { 2325 struct virtchnl_queue_select *vqs = 2326 (struct virtchnl_queue_select *)msg; 2327 struct i40e_pf *pf = vf->pf; 2328 i40e_status aq_ret = 0; 2329 int i; 2330 2331 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2332 aq_ret = I40E_ERR_PARAM; 2333 goto error_param; 2334 } 2335 2336 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2337 aq_ret = I40E_ERR_PARAM; 2338 goto error_param; 2339 } 2340 2341 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2342 aq_ret = I40E_ERR_PARAM; 2343 goto error_param; 2344 } 2345 2346 /* Use the queue bit map sent by the VF */ 2347 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2348 true)) { 2349 aq_ret = I40E_ERR_TIMEOUT; 2350 goto error_param; 2351 } 2352 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2353 true)) { 2354 aq_ret = I40E_ERR_TIMEOUT; 2355 goto error_param; 2356 } 2357 2358 /* need to start the rings for additional ADq VSI's as well */ 2359 if (vf->adq_enabled) { 2360 /* zero belongs to LAN VSI */ 2361 for (i = 1; i < vf->num_tc; i++) { 2362 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2363 aq_ret = I40E_ERR_TIMEOUT; 2364 } 2365 } 2366 2367 error_param: 2368 /* send the response to the VF */ 2369 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2370 aq_ret); 2371 } 2372 2373 /** 2374 * i40e_vc_disable_queues_msg 2375 * @vf: pointer to the VF info 2376 * @msg: pointer to the msg buffer 2377 * 2378 * called from the VF to disable all or specific 2379 * queue(s) 2380 **/ 2381 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2382 { 2383 struct virtchnl_queue_select *vqs = 2384 (struct virtchnl_queue_select *)msg; 2385 struct i40e_pf *pf = vf->pf; 2386 i40e_status aq_ret = 0; 2387 2388 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2389 aq_ret = I40E_ERR_PARAM; 2390 goto error_param; 2391 } 2392 2393 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2394 aq_ret = I40E_ERR_PARAM; 2395 goto error_param; 2396 } 2397 2398 if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) || 2399 vqs->rx_queues > I40E_MAX_VF_QUEUES || 2400 vqs->tx_queues > I40E_MAX_VF_QUEUES) { 2401 aq_ret = I40E_ERR_PARAM; 2402 goto error_param; 2403 } 2404 2405 /* Use the queue bit map sent by the VF */ 2406 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2407 false)) { 2408 aq_ret = I40E_ERR_TIMEOUT; 2409 goto error_param; 2410 } 2411 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2412 false)) { 2413 aq_ret = I40E_ERR_TIMEOUT; 2414 goto error_param; 2415 } 2416 error_param: 2417 /* send the response to the VF */ 2418 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2419 aq_ret); 2420 } 2421 2422 /** 2423 * i40e_vc_request_queues_msg 2424 * @vf: pointer to the VF info 2425 * @msg: pointer to the msg buffer 2426 * 2427 * VFs get a default number of queues but can use this message to request a 2428 * different number. If the request is successful, PF will reset the VF and 2429 * return 0. If unsuccessful, PF will send message informing VF of number of 2430 * available queues and return result of sending VF a message. 2431 **/ 2432 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2433 { 2434 struct virtchnl_vf_res_request *vfres = 2435 (struct virtchnl_vf_res_request *)msg; 2436 u16 req_pairs = vfres->num_queue_pairs; 2437 u8 cur_pairs = vf->num_queue_pairs; 2438 struct i40e_pf *pf = vf->pf; 2439 2440 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2441 return -EINVAL; 2442 2443 if (req_pairs > I40E_MAX_VF_QUEUES) { 2444 dev_err(&pf->pdev->dev, 2445 "VF %d tried to request more than %d queues.\n", 2446 vf->vf_id, 2447 I40E_MAX_VF_QUEUES); 2448 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2449 } else if (req_pairs - cur_pairs > pf->queues_left) { 2450 dev_warn(&pf->pdev->dev, 2451 "VF %d requested %d more queues, but only %d left.\n", 2452 vf->vf_id, 2453 req_pairs - cur_pairs, 2454 pf->queues_left); 2455 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2456 } else { 2457 /* successful request */ 2458 vf->num_req_queues = req_pairs; 2459 i40e_vc_notify_vf_reset(vf); 2460 i40e_reset_vf(vf, false); 2461 return 0; 2462 } 2463 2464 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2465 (u8 *)vfres, sizeof(*vfres)); 2466 } 2467 2468 /** 2469 * i40e_vc_get_stats_msg 2470 * @vf: pointer to the VF info 2471 * @msg: pointer to the msg buffer 2472 * 2473 * called from the VF to get vsi stats 2474 **/ 2475 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2476 { 2477 struct virtchnl_queue_select *vqs = 2478 (struct virtchnl_queue_select *)msg; 2479 struct i40e_pf *pf = vf->pf; 2480 struct i40e_eth_stats stats; 2481 i40e_status aq_ret = 0; 2482 struct i40e_vsi *vsi; 2483 2484 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2485 2486 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2487 aq_ret = I40E_ERR_PARAM; 2488 goto error_param; 2489 } 2490 2491 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2492 aq_ret = I40E_ERR_PARAM; 2493 goto error_param; 2494 } 2495 2496 vsi = pf->vsi[vf->lan_vsi_idx]; 2497 if (!vsi) { 2498 aq_ret = I40E_ERR_PARAM; 2499 goto error_param; 2500 } 2501 i40e_update_eth_stats(vsi); 2502 stats = vsi->eth_stats; 2503 2504 error_param: 2505 /* send the response back to the VF */ 2506 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2507 (u8 *)&stats, sizeof(stats)); 2508 } 2509 2510 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2511 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2512 */ 2513 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2514 #define I40E_VC_MAX_VLAN_PER_VF 16 2515 2516 /** 2517 * i40e_check_vf_permission 2518 * @vf: pointer to the VF info 2519 * @al: MAC address list from virtchnl 2520 * 2521 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2522 * if any address in the list is not valid. Checks the following conditions: 2523 * 2524 * 1) broadcast and zero addresses are never valid 2525 * 2) unicast addresses are not allowed if the VMM has administratively set 2526 * the VF MAC address, unless the VF is marked as privileged. 2527 * 3) There is enough space to add all the addresses. 2528 * 2529 * Note that to guarantee consistency, it is expected this function be called 2530 * while holding the mac_filter_hash_lock, as otherwise the current number of 2531 * addresses might not be accurate. 2532 **/ 2533 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2534 struct virtchnl_ether_addr_list *al) 2535 { 2536 struct i40e_pf *pf = vf->pf; 2537 int i; 2538 2539 /* If this VF is not privileged, then we can't add more than a limited 2540 * number of addresses. Check to make sure that the additions do not 2541 * push us over the limit. 2542 */ 2543 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2544 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { 2545 dev_err(&pf->pdev->dev, 2546 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2547 return -EPERM; 2548 } 2549 2550 for (i = 0; i < al->num_elements; i++) { 2551 u8 *addr = al->list[i].addr; 2552 2553 if (is_broadcast_ether_addr(addr) || 2554 is_zero_ether_addr(addr)) { 2555 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2556 addr); 2557 return I40E_ERR_INVALID_MAC_ADDR; 2558 } 2559 2560 /* If the host VMM administrator has set the VF MAC address 2561 * administratively via the ndo_set_vf_mac command then deny 2562 * permission to the VF to add or delete unicast MAC addresses. 2563 * Unless the VF is privileged and then it can do whatever. 2564 * The VF may request to set the MAC address filter already 2565 * assigned to it so do not return an error in that case. 2566 */ 2567 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2568 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2569 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2570 dev_err(&pf->pdev->dev, 2571 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2572 return -EPERM; 2573 } 2574 } 2575 2576 return 0; 2577 } 2578 2579 /** 2580 * i40e_vc_add_mac_addr_msg 2581 * @vf: pointer to the VF info 2582 * @msg: pointer to the msg buffer 2583 * 2584 * add guest mac address filter 2585 **/ 2586 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2587 { 2588 struct virtchnl_ether_addr_list *al = 2589 (struct virtchnl_ether_addr_list *)msg; 2590 struct i40e_pf *pf = vf->pf; 2591 struct i40e_vsi *vsi = NULL; 2592 i40e_status ret = 0; 2593 int i; 2594 2595 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2596 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2597 ret = I40E_ERR_PARAM; 2598 goto error_param; 2599 } 2600 2601 vsi = pf->vsi[vf->lan_vsi_idx]; 2602 2603 /* Lock once, because all function inside for loop accesses VSI's 2604 * MAC filter list which needs to be protected using same lock. 2605 */ 2606 spin_lock_bh(&vsi->mac_filter_hash_lock); 2607 2608 ret = i40e_check_vf_permission(vf, al); 2609 if (ret) { 2610 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2611 goto error_param; 2612 } 2613 2614 /* add new addresses to the list */ 2615 for (i = 0; i < al->num_elements; i++) { 2616 struct i40e_mac_filter *f; 2617 2618 f = i40e_find_mac(vsi, al->list[i].addr); 2619 if (!f) { 2620 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2621 2622 if (!f) { 2623 dev_err(&pf->pdev->dev, 2624 "Unable to add MAC filter %pM for VF %d\n", 2625 al->list[i].addr, vf->vf_id); 2626 ret = I40E_ERR_PARAM; 2627 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2628 goto error_param; 2629 } else { 2630 vf->num_mac++; 2631 } 2632 } 2633 } 2634 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2635 2636 /* program the updated filter list */ 2637 ret = i40e_sync_vsi_filters(vsi); 2638 if (ret) 2639 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2640 vf->vf_id, ret); 2641 2642 error_param: 2643 /* send the response to the VF */ 2644 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2645 ret); 2646 } 2647 2648 /** 2649 * i40e_vc_del_mac_addr_msg 2650 * @vf: pointer to the VF info 2651 * @msg: pointer to the msg buffer 2652 * 2653 * remove guest mac address filter 2654 **/ 2655 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2656 { 2657 struct virtchnl_ether_addr_list *al = 2658 (struct virtchnl_ether_addr_list *)msg; 2659 struct i40e_pf *pf = vf->pf; 2660 struct i40e_vsi *vsi = NULL; 2661 i40e_status ret = 0; 2662 int i; 2663 2664 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2665 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2666 ret = I40E_ERR_PARAM; 2667 goto error_param; 2668 } 2669 2670 for (i = 0; i < al->num_elements; i++) { 2671 if (is_broadcast_ether_addr(al->list[i].addr) || 2672 is_zero_ether_addr(al->list[i].addr)) { 2673 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2674 al->list[i].addr, vf->vf_id); 2675 ret = I40E_ERR_INVALID_MAC_ADDR; 2676 goto error_param; 2677 } 2678 2679 if (vf->pf_set_mac && 2680 ether_addr_equal(al->list[i].addr, 2681 vf->default_lan_addr.addr)) { 2682 dev_err(&pf->pdev->dev, 2683 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", 2684 vf->default_lan_addr.addr, vf->vf_id); 2685 ret = I40E_ERR_PARAM; 2686 goto error_param; 2687 } 2688 } 2689 vsi = pf->vsi[vf->lan_vsi_idx]; 2690 2691 spin_lock_bh(&vsi->mac_filter_hash_lock); 2692 /* delete addresses from the list */ 2693 for (i = 0; i < al->num_elements; i++) 2694 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2695 ret = I40E_ERR_INVALID_MAC_ADDR; 2696 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2697 goto error_param; 2698 } else { 2699 vf->num_mac--; 2700 } 2701 2702 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2703 2704 /* program the updated filter list */ 2705 ret = i40e_sync_vsi_filters(vsi); 2706 if (ret) 2707 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2708 vf->vf_id, ret); 2709 2710 error_param: 2711 /* send the response to the VF */ 2712 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2713 ret); 2714 } 2715 2716 /** 2717 * i40e_vc_add_vlan_msg 2718 * @vf: pointer to the VF info 2719 * @msg: pointer to the msg buffer 2720 * 2721 * program guest vlan id 2722 **/ 2723 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2724 { 2725 struct virtchnl_vlan_filter_list *vfl = 2726 (struct virtchnl_vlan_filter_list *)msg; 2727 struct i40e_pf *pf = vf->pf; 2728 struct i40e_vsi *vsi = NULL; 2729 i40e_status aq_ret = 0; 2730 int i; 2731 2732 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2733 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2734 dev_err(&pf->pdev->dev, 2735 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2736 goto error_param; 2737 } 2738 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2739 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2740 aq_ret = I40E_ERR_PARAM; 2741 goto error_param; 2742 } 2743 2744 for (i = 0; i < vfl->num_elements; i++) { 2745 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2746 aq_ret = I40E_ERR_PARAM; 2747 dev_err(&pf->pdev->dev, 2748 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2749 goto error_param; 2750 } 2751 } 2752 vsi = pf->vsi[vf->lan_vsi_idx]; 2753 if (vsi->info.pvid) { 2754 aq_ret = I40E_ERR_PARAM; 2755 goto error_param; 2756 } 2757 2758 i40e_vlan_stripping_enable(vsi); 2759 for (i = 0; i < vfl->num_elements; i++) { 2760 /* add new VLAN filter */ 2761 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2762 if (!ret) 2763 vf->num_vlan++; 2764 2765 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2766 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2767 true, 2768 vfl->vlan_id[i], 2769 NULL); 2770 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2771 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2772 true, 2773 vfl->vlan_id[i], 2774 NULL); 2775 2776 if (ret) 2777 dev_err(&pf->pdev->dev, 2778 "Unable to add VLAN filter %d for VF %d, error %d\n", 2779 vfl->vlan_id[i], vf->vf_id, ret); 2780 } 2781 2782 error_param: 2783 /* send the response to the VF */ 2784 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2785 } 2786 2787 /** 2788 * i40e_vc_remove_vlan_msg 2789 * @vf: pointer to the VF info 2790 * @msg: pointer to the msg buffer 2791 * 2792 * remove programmed guest vlan id 2793 **/ 2794 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2795 { 2796 struct virtchnl_vlan_filter_list *vfl = 2797 (struct virtchnl_vlan_filter_list *)msg; 2798 struct i40e_pf *pf = vf->pf; 2799 struct i40e_vsi *vsi = NULL; 2800 i40e_status aq_ret = 0; 2801 int i; 2802 2803 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2804 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2805 aq_ret = I40E_ERR_PARAM; 2806 goto error_param; 2807 } 2808 2809 for (i = 0; i < vfl->num_elements; i++) { 2810 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2811 aq_ret = I40E_ERR_PARAM; 2812 goto error_param; 2813 } 2814 } 2815 2816 vsi = pf->vsi[vf->lan_vsi_idx]; 2817 if (vsi->info.pvid) { 2818 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 2819 aq_ret = I40E_ERR_PARAM; 2820 goto error_param; 2821 } 2822 2823 for (i = 0; i < vfl->num_elements; i++) { 2824 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2825 vf->num_vlan--; 2826 2827 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2828 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2829 false, 2830 vfl->vlan_id[i], 2831 NULL); 2832 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2833 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2834 false, 2835 vfl->vlan_id[i], 2836 NULL); 2837 } 2838 2839 error_param: 2840 /* send the response to the VF */ 2841 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2842 } 2843 2844 /** 2845 * i40e_vc_iwarp_msg 2846 * @vf: pointer to the VF info 2847 * @msg: pointer to the msg buffer 2848 * @msglen: msg length 2849 * 2850 * called from the VF for the iwarp msgs 2851 **/ 2852 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2853 { 2854 struct i40e_pf *pf = vf->pf; 2855 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2856 i40e_status aq_ret = 0; 2857 2858 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2859 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2860 aq_ret = I40E_ERR_PARAM; 2861 goto error_param; 2862 } 2863 2864 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2865 msg, msglen); 2866 2867 error_param: 2868 /* send the response to the VF */ 2869 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2870 aq_ret); 2871 } 2872 2873 /** 2874 * i40e_vc_iwarp_qvmap_msg 2875 * @vf: pointer to the VF info 2876 * @msg: pointer to the msg buffer 2877 * @config: config qvmap or release it 2878 * 2879 * called from the VF for the iwarp msgs 2880 **/ 2881 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 2882 { 2883 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2884 (struct virtchnl_iwarp_qvlist_info *)msg; 2885 i40e_status aq_ret = 0; 2886 2887 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2888 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2889 aq_ret = I40E_ERR_PARAM; 2890 goto error_param; 2891 } 2892 2893 if (config) { 2894 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2895 aq_ret = I40E_ERR_PARAM; 2896 } else { 2897 i40e_release_iwarp_qvlist(vf); 2898 } 2899 2900 error_param: 2901 /* send the response to the VF */ 2902 return i40e_vc_send_resp_to_vf(vf, 2903 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2904 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2905 aq_ret); 2906 } 2907 2908 /** 2909 * i40e_vc_config_rss_key 2910 * @vf: pointer to the VF info 2911 * @msg: pointer to the msg buffer 2912 * 2913 * Configure the VF's RSS key 2914 **/ 2915 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 2916 { 2917 struct virtchnl_rss_key *vrk = 2918 (struct virtchnl_rss_key *)msg; 2919 struct i40e_pf *pf = vf->pf; 2920 struct i40e_vsi *vsi = NULL; 2921 i40e_status aq_ret = 0; 2922 2923 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2924 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || 2925 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2926 aq_ret = I40E_ERR_PARAM; 2927 goto err; 2928 } 2929 2930 vsi = pf->vsi[vf->lan_vsi_idx]; 2931 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2932 err: 2933 /* send the response to the VF */ 2934 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2935 aq_ret); 2936 } 2937 2938 /** 2939 * i40e_vc_config_rss_lut 2940 * @vf: pointer to the VF info 2941 * @msg: pointer to the msg buffer 2942 * 2943 * Configure the VF's RSS LUT 2944 **/ 2945 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 2946 { 2947 struct virtchnl_rss_lut *vrl = 2948 (struct virtchnl_rss_lut *)msg; 2949 struct i40e_pf *pf = vf->pf; 2950 struct i40e_vsi *vsi = NULL; 2951 i40e_status aq_ret = 0; 2952 u16 i; 2953 2954 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2955 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || 2956 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2957 aq_ret = I40E_ERR_PARAM; 2958 goto err; 2959 } 2960 2961 for (i = 0; i < vrl->lut_entries; i++) 2962 if (vrl->lut[i] >= vf->num_queue_pairs) { 2963 aq_ret = I40E_ERR_PARAM; 2964 goto err; 2965 } 2966 2967 vsi = pf->vsi[vf->lan_vsi_idx]; 2968 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2969 /* send the response to the VF */ 2970 err: 2971 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2972 aq_ret); 2973 } 2974 2975 /** 2976 * i40e_vc_get_rss_hena 2977 * @vf: pointer to the VF info 2978 * @msg: pointer to the msg buffer 2979 * 2980 * Return the RSS HENA bits allowed by the hardware 2981 **/ 2982 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 2983 { 2984 struct virtchnl_rss_hena *vrh = NULL; 2985 struct i40e_pf *pf = vf->pf; 2986 i40e_status aq_ret = 0; 2987 int len = 0; 2988 2989 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2990 aq_ret = I40E_ERR_PARAM; 2991 goto err; 2992 } 2993 len = sizeof(struct virtchnl_rss_hena); 2994 2995 vrh = kzalloc(len, GFP_KERNEL); 2996 if (!vrh) { 2997 aq_ret = I40E_ERR_NO_MEMORY; 2998 len = 0; 2999 goto err; 3000 } 3001 vrh->hena = i40e_pf_get_default_rss_hena(pf); 3002 err: 3003 /* send the response back to the VF */ 3004 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 3005 aq_ret, (u8 *)vrh, len); 3006 kfree(vrh); 3007 return aq_ret; 3008 } 3009 3010 /** 3011 * i40e_vc_set_rss_hena 3012 * @vf: pointer to the VF info 3013 * @msg: pointer to the msg buffer 3014 * 3015 * Set the RSS HENA bits for the VF 3016 **/ 3017 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 3018 { 3019 struct virtchnl_rss_hena *vrh = 3020 (struct virtchnl_rss_hena *)msg; 3021 struct i40e_pf *pf = vf->pf; 3022 struct i40e_hw *hw = &pf->hw; 3023 i40e_status aq_ret = 0; 3024 3025 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3026 aq_ret = I40E_ERR_PARAM; 3027 goto err; 3028 } 3029 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 3030 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3031 (u32)(vrh->hena >> 32)); 3032 3033 /* send the response to the VF */ 3034 err: 3035 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 3036 } 3037 3038 /** 3039 * i40e_vc_enable_vlan_stripping 3040 * @vf: pointer to the VF info 3041 * @msg: pointer to the msg buffer 3042 * 3043 * Enable vlan header stripping for the VF 3044 **/ 3045 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3046 { 3047 i40e_status aq_ret = 0; 3048 struct i40e_vsi *vsi; 3049 3050 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3051 aq_ret = I40E_ERR_PARAM; 3052 goto err; 3053 } 3054 3055 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3056 i40e_vlan_stripping_enable(vsi); 3057 3058 /* send the response to the VF */ 3059 err: 3060 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3061 aq_ret); 3062 } 3063 3064 /** 3065 * i40e_vc_disable_vlan_stripping 3066 * @vf: pointer to the VF info 3067 * @msg: pointer to the msg buffer 3068 * 3069 * Disable vlan header stripping for the VF 3070 **/ 3071 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3072 { 3073 i40e_status aq_ret = 0; 3074 struct i40e_vsi *vsi; 3075 3076 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3077 aq_ret = I40E_ERR_PARAM; 3078 goto err; 3079 } 3080 3081 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3082 i40e_vlan_stripping_disable(vsi); 3083 3084 /* send the response to the VF */ 3085 err: 3086 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3087 aq_ret); 3088 } 3089 3090 /** 3091 * i40e_validate_cloud_filter 3092 * @mask: mask for TC filter 3093 * @data: data for TC filter 3094 * 3095 * This function validates cloud filter programmed as TC filter for ADq 3096 **/ 3097 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3098 struct virtchnl_filter *tc_filter) 3099 { 3100 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3101 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3102 struct i40e_pf *pf = vf->pf; 3103 struct i40e_vsi *vsi = NULL; 3104 struct i40e_mac_filter *f; 3105 struct hlist_node *h; 3106 bool found = false; 3107 int bkt; 3108 3109 if (!tc_filter->action) { 3110 dev_info(&pf->pdev->dev, 3111 "VF %d: Currently ADq doesn't support Drop Action\n", 3112 vf->vf_id); 3113 goto err; 3114 } 3115 3116 /* action_meta is TC number here to which the filter is applied */ 3117 if (!tc_filter->action_meta || 3118 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3119 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3120 vf->vf_id, tc_filter->action_meta); 3121 goto err; 3122 } 3123 3124 /* Check filter if it's programmed for advanced mode or basic mode. 3125 * There are two ADq modes (for VF only), 3126 * 1. Basic mode: intended to allow as many filter options as possible 3127 * to be added to a VF in Non-trusted mode. Main goal is 3128 * to add filters to its own MAC and VLAN id. 3129 * 2. Advanced mode: is for allowing filters to be applied other than 3130 * its own MAC or VLAN. This mode requires the VF to be 3131 * Trusted. 3132 */ 3133 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3134 vsi = pf->vsi[vf->lan_vsi_idx]; 3135 f = i40e_find_mac(vsi, data.dst_mac); 3136 3137 if (!f) { 3138 dev_info(&pf->pdev->dev, 3139 "Destination MAC %pM doesn't belong to VF %d\n", 3140 data.dst_mac, vf->vf_id); 3141 goto err; 3142 } 3143 3144 if (mask.vlan_id) { 3145 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3146 hlist) { 3147 if (f->vlan == ntohs(data.vlan_id)) { 3148 found = true; 3149 break; 3150 } 3151 } 3152 if (!found) { 3153 dev_info(&pf->pdev->dev, 3154 "VF %d doesn't have any VLAN id %u\n", 3155 vf->vf_id, ntohs(data.vlan_id)); 3156 goto err; 3157 } 3158 } 3159 } else { 3160 /* Check if VF is trusted */ 3161 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3162 dev_err(&pf->pdev->dev, 3163 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3164 vf->vf_id); 3165 return I40E_ERR_CONFIG; 3166 } 3167 } 3168 3169 if (mask.dst_mac[0] & data.dst_mac[0]) { 3170 if (is_broadcast_ether_addr(data.dst_mac) || 3171 is_zero_ether_addr(data.dst_mac)) { 3172 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3173 vf->vf_id, data.dst_mac); 3174 goto err; 3175 } 3176 } 3177 3178 if (mask.src_mac[0] & data.src_mac[0]) { 3179 if (is_broadcast_ether_addr(data.src_mac) || 3180 is_zero_ether_addr(data.src_mac)) { 3181 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3182 vf->vf_id, data.src_mac); 3183 goto err; 3184 } 3185 } 3186 3187 if (mask.dst_port & data.dst_port) { 3188 if (!data.dst_port) { 3189 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3190 vf->vf_id); 3191 goto err; 3192 } 3193 } 3194 3195 if (mask.src_port & data.src_port) { 3196 if (!data.src_port) { 3197 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3198 vf->vf_id); 3199 goto err; 3200 } 3201 } 3202 3203 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3204 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3205 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3206 vf->vf_id); 3207 goto err; 3208 } 3209 3210 if (mask.vlan_id & data.vlan_id) { 3211 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3212 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3213 vf->vf_id); 3214 goto err; 3215 } 3216 } 3217 3218 return I40E_SUCCESS; 3219 err: 3220 return I40E_ERR_CONFIG; 3221 } 3222 3223 /** 3224 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3225 * @vf: pointer to the VF info 3226 * @seid - seid of the vsi it is searching for 3227 **/ 3228 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3229 { 3230 struct i40e_pf *pf = vf->pf; 3231 struct i40e_vsi *vsi = NULL; 3232 int i; 3233 3234 for (i = 0; i < vf->num_tc ; i++) { 3235 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3236 if (vsi && vsi->seid == seid) 3237 return vsi; 3238 } 3239 return NULL; 3240 } 3241 3242 /** 3243 * i40e_del_all_cloud_filters 3244 * @vf: pointer to the VF info 3245 * 3246 * This function deletes all cloud filters 3247 **/ 3248 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3249 { 3250 struct i40e_cloud_filter *cfilter = NULL; 3251 struct i40e_pf *pf = vf->pf; 3252 struct i40e_vsi *vsi = NULL; 3253 struct hlist_node *node; 3254 int ret; 3255 3256 hlist_for_each_entry_safe(cfilter, node, 3257 &vf->cloud_filter_list, cloud_node) { 3258 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3259 3260 if (!vsi) { 3261 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3262 vf->vf_id, cfilter->seid); 3263 continue; 3264 } 3265 3266 if (cfilter->dst_port) 3267 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3268 false); 3269 else 3270 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3271 if (ret) 3272 dev_err(&pf->pdev->dev, 3273 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3274 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3275 i40e_aq_str(&pf->hw, 3276 pf->hw.aq.asq_last_status)); 3277 3278 hlist_del(&cfilter->cloud_node); 3279 kfree(cfilter); 3280 vf->num_cloud_filters--; 3281 } 3282 } 3283 3284 /** 3285 * i40e_vc_del_cloud_filter 3286 * @vf: pointer to the VF info 3287 * @msg: pointer to the msg buffer 3288 * 3289 * This function deletes a cloud filter programmed as TC filter for ADq 3290 **/ 3291 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3292 { 3293 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3294 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3295 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3296 struct i40e_cloud_filter cfilter, *cf = NULL; 3297 struct i40e_pf *pf = vf->pf; 3298 struct i40e_vsi *vsi = NULL; 3299 struct hlist_node *node; 3300 i40e_status aq_ret = 0; 3301 int i, ret; 3302 3303 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3304 aq_ret = I40E_ERR_PARAM; 3305 goto err; 3306 } 3307 3308 if (!vf->adq_enabled) { 3309 dev_info(&pf->pdev->dev, 3310 "VF %d: ADq not enabled, can't apply cloud filter\n", 3311 vf->vf_id); 3312 aq_ret = I40E_ERR_PARAM; 3313 goto err; 3314 } 3315 3316 if (i40e_validate_cloud_filter(vf, vcf)) { 3317 dev_info(&pf->pdev->dev, 3318 "VF %d: Invalid input, can't apply cloud filter\n", 3319 vf->vf_id); 3320 aq_ret = I40E_ERR_PARAM; 3321 goto err; 3322 } 3323 3324 memset(&cfilter, 0, sizeof(cfilter)); 3325 /* parse destination mac address */ 3326 for (i = 0; i < ETH_ALEN; i++) 3327 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3328 3329 /* parse source mac address */ 3330 for (i = 0; i < ETH_ALEN; i++) 3331 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3332 3333 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3334 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3335 cfilter.src_port = mask.src_port & tcf.src_port; 3336 3337 switch (vcf->flow_type) { 3338 case VIRTCHNL_TCP_V4_FLOW: 3339 cfilter.n_proto = ETH_P_IP; 3340 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3341 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3342 ARRAY_SIZE(tcf.dst_ip)); 3343 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3344 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3345 ARRAY_SIZE(tcf.dst_ip)); 3346 break; 3347 case VIRTCHNL_TCP_V6_FLOW: 3348 cfilter.n_proto = ETH_P_IPV6; 3349 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3350 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3351 sizeof(cfilter.ip.v6.dst_ip6)); 3352 if (mask.src_ip[3] & tcf.src_ip[3]) 3353 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3354 sizeof(cfilter.ip.v6.src_ip6)); 3355 break; 3356 default: 3357 /* TC filter can be configured based on different combinations 3358 * and in this case IP is not a part of filter config 3359 */ 3360 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3361 vf->vf_id); 3362 } 3363 3364 /* get the vsi to which the tc belongs to */ 3365 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3366 cfilter.seid = vsi->seid; 3367 cfilter.flags = vcf->field_flags; 3368 3369 /* Deleting TC filter */ 3370 if (tcf.dst_port) 3371 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3372 else 3373 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3374 if (ret) { 3375 dev_err(&pf->pdev->dev, 3376 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3377 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3378 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3379 goto err; 3380 } 3381 3382 hlist_for_each_entry_safe(cf, node, 3383 &vf->cloud_filter_list, cloud_node) { 3384 if (cf->seid != cfilter.seid) 3385 continue; 3386 if (mask.dst_port) 3387 if (cfilter.dst_port != cf->dst_port) 3388 continue; 3389 if (mask.dst_mac[0]) 3390 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3391 continue; 3392 /* for ipv4 data to be valid, only first byte of mask is set */ 3393 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3394 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3395 ARRAY_SIZE(tcf.dst_ip))) 3396 continue; 3397 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3398 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3399 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3400 sizeof(cfilter.ip.v6.src_ip6))) 3401 continue; 3402 if (mask.vlan_id) 3403 if (cfilter.vlan_id != cf->vlan_id) 3404 continue; 3405 3406 hlist_del(&cf->cloud_node); 3407 kfree(cf); 3408 vf->num_cloud_filters--; 3409 } 3410 3411 err: 3412 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3413 aq_ret); 3414 } 3415 3416 /** 3417 * i40e_vc_add_cloud_filter 3418 * @vf: pointer to the VF info 3419 * @msg: pointer to the msg buffer 3420 * 3421 * This function adds a cloud filter programmed as TC filter for ADq 3422 **/ 3423 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3424 { 3425 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3426 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3427 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3428 struct i40e_cloud_filter *cfilter = NULL; 3429 struct i40e_pf *pf = vf->pf; 3430 struct i40e_vsi *vsi = NULL; 3431 i40e_status aq_ret = 0; 3432 int i, ret; 3433 3434 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3435 aq_ret = I40E_ERR_PARAM; 3436 goto err_out; 3437 } 3438 3439 if (!vf->adq_enabled) { 3440 dev_info(&pf->pdev->dev, 3441 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3442 vf->vf_id); 3443 aq_ret = I40E_ERR_PARAM; 3444 goto err_out; 3445 } 3446 3447 if (i40e_validate_cloud_filter(vf, vcf)) { 3448 dev_info(&pf->pdev->dev, 3449 "VF %d: Invalid input/s, can't apply cloud filter\n", 3450 vf->vf_id); 3451 aq_ret = I40E_ERR_PARAM; 3452 goto err_out; 3453 } 3454 3455 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3456 if (!cfilter) 3457 return -ENOMEM; 3458 3459 /* parse destination mac address */ 3460 for (i = 0; i < ETH_ALEN; i++) 3461 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3462 3463 /* parse source mac address */ 3464 for (i = 0; i < ETH_ALEN; i++) 3465 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3466 3467 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3468 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3469 cfilter->src_port = mask.src_port & tcf.src_port; 3470 3471 switch (vcf->flow_type) { 3472 case VIRTCHNL_TCP_V4_FLOW: 3473 cfilter->n_proto = ETH_P_IP; 3474 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3475 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3476 ARRAY_SIZE(tcf.dst_ip)); 3477 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3478 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3479 ARRAY_SIZE(tcf.dst_ip)); 3480 break; 3481 case VIRTCHNL_TCP_V6_FLOW: 3482 cfilter->n_proto = ETH_P_IPV6; 3483 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3484 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3485 sizeof(cfilter->ip.v6.dst_ip6)); 3486 if (mask.src_ip[3] & tcf.src_ip[3]) 3487 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3488 sizeof(cfilter->ip.v6.src_ip6)); 3489 break; 3490 default: 3491 /* TC filter can be configured based on different combinations 3492 * and in this case IP is not a part of filter config 3493 */ 3494 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3495 vf->vf_id); 3496 } 3497 3498 /* get the VSI to which the TC belongs to */ 3499 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3500 cfilter->seid = vsi->seid; 3501 cfilter->flags = vcf->field_flags; 3502 3503 /* Adding cloud filter programmed as TC filter */ 3504 if (tcf.dst_port) 3505 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3506 else 3507 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3508 if (ret) { 3509 dev_err(&pf->pdev->dev, 3510 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3511 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3512 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3513 goto err_free; 3514 } 3515 3516 INIT_HLIST_NODE(&cfilter->cloud_node); 3517 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3518 /* release the pointer passing it to the collection */ 3519 cfilter = NULL; 3520 vf->num_cloud_filters++; 3521 err_free: 3522 kfree(cfilter); 3523 err_out: 3524 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3525 aq_ret); 3526 } 3527 3528 /** 3529 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3530 * @vf: pointer to the VF info 3531 * @msg: pointer to the msg buffer 3532 **/ 3533 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3534 { 3535 struct virtchnl_tc_info *tci = 3536 (struct virtchnl_tc_info *)msg; 3537 struct i40e_pf *pf = vf->pf; 3538 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3539 int i, adq_request_qps = 0; 3540 i40e_status aq_ret = 0; 3541 u64 speed = 0; 3542 3543 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3544 aq_ret = I40E_ERR_PARAM; 3545 goto err; 3546 } 3547 3548 /* ADq cannot be applied if spoof check is ON */ 3549 if (vf->spoofchk) { 3550 dev_err(&pf->pdev->dev, 3551 "Spoof check is ON, turn it OFF to enable ADq\n"); 3552 aq_ret = I40E_ERR_PARAM; 3553 goto err; 3554 } 3555 3556 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3557 dev_err(&pf->pdev->dev, 3558 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3559 vf->vf_id); 3560 aq_ret = I40E_ERR_PARAM; 3561 goto err; 3562 } 3563 3564 /* max number of traffic classes for VF currently capped at 4 */ 3565 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3566 dev_err(&pf->pdev->dev, 3567 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", 3568 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); 3569 aq_ret = I40E_ERR_PARAM; 3570 goto err; 3571 } 3572 3573 /* validate queues for each TC */ 3574 for (i = 0; i < tci->num_tc; i++) 3575 if (!tci->list[i].count || 3576 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3577 dev_err(&pf->pdev->dev, 3578 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", 3579 vf->vf_id, i, tci->list[i].count, 3580 I40E_DEFAULT_QUEUES_PER_VF); 3581 aq_ret = I40E_ERR_PARAM; 3582 goto err; 3583 } 3584 3585 /* need Max VF queues but already have default number of queues */ 3586 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3587 3588 if (pf->queues_left < adq_request_qps) { 3589 dev_err(&pf->pdev->dev, 3590 "No queues left to allocate to VF %d\n", 3591 vf->vf_id); 3592 aq_ret = I40E_ERR_PARAM; 3593 goto err; 3594 } else { 3595 /* we need to allocate max VF queues to enable ADq so as to 3596 * make sure ADq enabled VF always gets back queues when it 3597 * goes through a reset. 3598 */ 3599 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3600 } 3601 3602 /* get link speed in MB to validate rate limit */ 3603 switch (ls->link_speed) { 3604 case VIRTCHNL_LINK_SPEED_100MB: 3605 speed = SPEED_100; 3606 break; 3607 case VIRTCHNL_LINK_SPEED_1GB: 3608 speed = SPEED_1000; 3609 break; 3610 case VIRTCHNL_LINK_SPEED_10GB: 3611 speed = SPEED_10000; 3612 break; 3613 case VIRTCHNL_LINK_SPEED_20GB: 3614 speed = SPEED_20000; 3615 break; 3616 case VIRTCHNL_LINK_SPEED_25GB: 3617 speed = SPEED_25000; 3618 break; 3619 case VIRTCHNL_LINK_SPEED_40GB: 3620 speed = SPEED_40000; 3621 break; 3622 default: 3623 dev_err(&pf->pdev->dev, 3624 "Cannot detect link speed\n"); 3625 aq_ret = I40E_ERR_PARAM; 3626 goto err; 3627 } 3628 3629 /* parse data from the queue channel info */ 3630 vf->num_tc = tci->num_tc; 3631 for (i = 0; i < vf->num_tc; i++) { 3632 if (tci->list[i].max_tx_rate) { 3633 if (tci->list[i].max_tx_rate > speed) { 3634 dev_err(&pf->pdev->dev, 3635 "Invalid max tx rate %llu specified for VF %d.", 3636 tci->list[i].max_tx_rate, 3637 vf->vf_id); 3638 aq_ret = I40E_ERR_PARAM; 3639 goto err; 3640 } else { 3641 vf->ch[i].max_tx_rate = 3642 tci->list[i].max_tx_rate; 3643 } 3644 } 3645 vf->ch[i].num_qps = tci->list[i].count; 3646 } 3647 3648 /* set this flag only after making sure all inputs are sane */ 3649 vf->adq_enabled = true; 3650 /* num_req_queues is set when user changes number of queues via ethtool 3651 * and this causes issue for default VSI(which depends on this variable) 3652 * when ADq is enabled, hence reset it. 3653 */ 3654 vf->num_req_queues = 0; 3655 3656 /* reset the VF in order to allocate resources */ 3657 i40e_vc_notify_vf_reset(vf); 3658 i40e_reset_vf(vf, false); 3659 3660 return I40E_SUCCESS; 3661 3662 /* send the response to the VF */ 3663 err: 3664 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3665 aq_ret); 3666 } 3667 3668 /** 3669 * i40e_vc_del_qch_msg 3670 * @vf: pointer to the VF info 3671 * @msg: pointer to the msg buffer 3672 **/ 3673 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3674 { 3675 struct i40e_pf *pf = vf->pf; 3676 i40e_status aq_ret = 0; 3677 3678 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3679 aq_ret = I40E_ERR_PARAM; 3680 goto err; 3681 } 3682 3683 if (vf->adq_enabled) { 3684 i40e_del_all_cloud_filters(vf); 3685 i40e_del_qch(vf); 3686 vf->adq_enabled = false; 3687 vf->num_tc = 0; 3688 dev_info(&pf->pdev->dev, 3689 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3690 vf->vf_id); 3691 } else { 3692 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3693 vf->vf_id); 3694 aq_ret = I40E_ERR_PARAM; 3695 } 3696 3697 /* reset the VF in order to allocate resources */ 3698 i40e_vc_notify_vf_reset(vf); 3699 i40e_reset_vf(vf, false); 3700 3701 return I40E_SUCCESS; 3702 3703 err: 3704 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3705 aq_ret); 3706 } 3707 3708 /** 3709 * i40e_vc_process_vf_msg 3710 * @pf: pointer to the PF structure 3711 * @vf_id: source VF id 3712 * @v_opcode: operation code 3713 * @v_retval: unused return value code 3714 * @msg: pointer to the msg buffer 3715 * @msglen: msg length 3716 * 3717 * called from the common aeq/arq handler to 3718 * process request from VF 3719 **/ 3720 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3721 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3722 { 3723 struct i40e_hw *hw = &pf->hw; 3724 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3725 struct i40e_vf *vf; 3726 int ret; 3727 3728 pf->vf_aq_requests++; 3729 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 3730 return -EINVAL; 3731 vf = &(pf->vf[local_vf_id]); 3732 3733 /* Check if VF is disabled. */ 3734 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3735 return I40E_ERR_PARAM; 3736 3737 /* perform basic checks on the msg */ 3738 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3739 3740 if (ret) { 3741 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3742 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3743 local_vf_id, v_opcode, msglen); 3744 switch (ret) { 3745 case VIRTCHNL_STATUS_ERR_PARAM: 3746 return -EPERM; 3747 default: 3748 return -EINVAL; 3749 } 3750 } 3751 3752 switch (v_opcode) { 3753 case VIRTCHNL_OP_VERSION: 3754 ret = i40e_vc_get_version_msg(vf, msg); 3755 break; 3756 case VIRTCHNL_OP_GET_VF_RESOURCES: 3757 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3758 i40e_vc_notify_vf_link_state(vf); 3759 break; 3760 case VIRTCHNL_OP_RESET_VF: 3761 i40e_vc_reset_vf_msg(vf); 3762 ret = 0; 3763 break; 3764 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3765 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3766 break; 3767 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3768 ret = i40e_vc_config_queues_msg(vf, msg); 3769 break; 3770 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3771 ret = i40e_vc_config_irq_map_msg(vf, msg); 3772 break; 3773 case VIRTCHNL_OP_ENABLE_QUEUES: 3774 ret = i40e_vc_enable_queues_msg(vf, msg); 3775 i40e_vc_notify_vf_link_state(vf); 3776 break; 3777 case VIRTCHNL_OP_DISABLE_QUEUES: 3778 ret = i40e_vc_disable_queues_msg(vf, msg); 3779 break; 3780 case VIRTCHNL_OP_ADD_ETH_ADDR: 3781 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3782 break; 3783 case VIRTCHNL_OP_DEL_ETH_ADDR: 3784 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3785 break; 3786 case VIRTCHNL_OP_ADD_VLAN: 3787 ret = i40e_vc_add_vlan_msg(vf, msg); 3788 break; 3789 case VIRTCHNL_OP_DEL_VLAN: 3790 ret = i40e_vc_remove_vlan_msg(vf, msg); 3791 break; 3792 case VIRTCHNL_OP_GET_STATS: 3793 ret = i40e_vc_get_stats_msg(vf, msg); 3794 break; 3795 case VIRTCHNL_OP_IWARP: 3796 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3797 break; 3798 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3799 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3800 break; 3801 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3802 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3803 break; 3804 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3805 ret = i40e_vc_config_rss_key(vf, msg); 3806 break; 3807 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3808 ret = i40e_vc_config_rss_lut(vf, msg); 3809 break; 3810 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3811 ret = i40e_vc_get_rss_hena(vf, msg); 3812 break; 3813 case VIRTCHNL_OP_SET_RSS_HENA: 3814 ret = i40e_vc_set_rss_hena(vf, msg); 3815 break; 3816 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3817 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3818 break; 3819 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3820 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3821 break; 3822 case VIRTCHNL_OP_REQUEST_QUEUES: 3823 ret = i40e_vc_request_queues_msg(vf, msg); 3824 break; 3825 case VIRTCHNL_OP_ENABLE_CHANNELS: 3826 ret = i40e_vc_add_qch_msg(vf, msg); 3827 break; 3828 case VIRTCHNL_OP_DISABLE_CHANNELS: 3829 ret = i40e_vc_del_qch_msg(vf, msg); 3830 break; 3831 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3832 ret = i40e_vc_add_cloud_filter(vf, msg); 3833 break; 3834 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3835 ret = i40e_vc_del_cloud_filter(vf, msg); 3836 break; 3837 case VIRTCHNL_OP_UNKNOWN: 3838 default: 3839 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3840 v_opcode, local_vf_id); 3841 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3842 I40E_ERR_NOT_IMPLEMENTED); 3843 break; 3844 } 3845 3846 return ret; 3847 } 3848 3849 /** 3850 * i40e_vc_process_vflr_event 3851 * @pf: pointer to the PF structure 3852 * 3853 * called from the vlfr irq handler to 3854 * free up VF resources and state variables 3855 **/ 3856 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3857 { 3858 struct i40e_hw *hw = &pf->hw; 3859 u32 reg, reg_idx, bit_idx; 3860 struct i40e_vf *vf; 3861 int vf_id; 3862 3863 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3864 return 0; 3865 3866 /* Re-enable the VFLR interrupt cause here, before looking for which 3867 * VF got reset. Otherwise, if another VF gets a reset while the 3868 * first one is being processed, that interrupt will be lost, and 3869 * that VF will be stuck in reset forever. 3870 */ 3871 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3872 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 3873 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3874 i40e_flush(hw); 3875 3876 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3877 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 3878 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 3879 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 3880 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 3881 vf = &pf->vf[vf_id]; 3882 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 3883 if (reg & BIT(bit_idx)) 3884 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 3885 i40e_reset_vf(vf, true); 3886 } 3887 3888 return 0; 3889 } 3890 3891 /** 3892 * i40e_validate_vf 3893 * @pf: the physical function 3894 * @vf_id: VF identifier 3895 * 3896 * Check that the VF is enabled and the VSI exists. 3897 * 3898 * Returns 0 on success, negative on failure 3899 **/ 3900 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 3901 { 3902 struct i40e_vsi *vsi; 3903 struct i40e_vf *vf; 3904 int ret = 0; 3905 3906 if (vf_id >= pf->num_alloc_vfs) { 3907 dev_err(&pf->pdev->dev, 3908 "Invalid VF Identifier %d\n", vf_id); 3909 ret = -EINVAL; 3910 goto err_out; 3911 } 3912 vf = &pf->vf[vf_id]; 3913 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 3914 if (!vsi) 3915 ret = -EINVAL; 3916 err_out: 3917 return ret; 3918 } 3919 3920 /** 3921 * i40e_ndo_set_vf_mac 3922 * @netdev: network interface device structure 3923 * @vf_id: VF identifier 3924 * @mac: mac address 3925 * 3926 * program VF mac address 3927 **/ 3928 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 3929 { 3930 struct i40e_netdev_priv *np = netdev_priv(netdev); 3931 struct i40e_vsi *vsi = np->vsi; 3932 struct i40e_pf *pf = vsi->back; 3933 struct i40e_mac_filter *f; 3934 struct i40e_vf *vf; 3935 int ret = 0; 3936 struct hlist_node *h; 3937 int bkt; 3938 u8 i; 3939 3940 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 3941 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 3942 return -EAGAIN; 3943 } 3944 3945 /* validate the request */ 3946 ret = i40e_validate_vf(pf, vf_id); 3947 if (ret) 3948 goto error_param; 3949 3950 vf = &pf->vf[vf_id]; 3951 vsi = pf->vsi[vf->lan_vsi_idx]; 3952 3953 /* When the VF is resetting wait until it is done. 3954 * It can take up to 200 milliseconds, 3955 * but wait for up to 300 milliseconds to be safe. 3956 */ 3957 for (i = 0; i < 15; i++) { 3958 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 3959 break; 3960 msleep(20); 3961 } 3962 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3963 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3964 vf_id); 3965 ret = -EAGAIN; 3966 goto error_param; 3967 } 3968 3969 if (is_multicast_ether_addr(mac)) { 3970 dev_err(&pf->pdev->dev, 3971 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 3972 ret = -EINVAL; 3973 goto error_param; 3974 } 3975 3976 /* Lock once because below invoked function add/del_filter requires 3977 * mac_filter_hash_lock to be held 3978 */ 3979 spin_lock_bh(&vsi->mac_filter_hash_lock); 3980 3981 /* delete the temporary mac address */ 3982 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 3983 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 3984 3985 /* Delete all the filters for this VSI - we're going to kill it 3986 * anyway. 3987 */ 3988 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 3989 __i40e_del_filter(vsi, f); 3990 3991 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3992 3993 /* program mac filter */ 3994 if (i40e_sync_vsi_filters(vsi)) { 3995 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 3996 ret = -EIO; 3997 goto error_param; 3998 } 3999 ether_addr_copy(vf->default_lan_addr.addr, mac); 4000 4001 if (is_zero_ether_addr(mac)) { 4002 vf->pf_set_mac = false; 4003 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4004 } else { 4005 vf->pf_set_mac = true; 4006 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4007 mac, vf_id); 4008 } 4009 4010 /* Force the VF interface down so it has to bring up with new MAC 4011 * address 4012 */ 4013 i40e_vc_disable_vf(vf); 4014 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4015 4016 error_param: 4017 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4018 return ret; 4019 } 4020 4021 /** 4022 * i40e_vsi_has_vlans - True if VSI has configured VLANs 4023 * @vsi: pointer to the vsi 4024 * 4025 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 4026 * we have no configured VLANs. Do not call while holding the 4027 * mac_filter_hash_lock. 4028 */ 4029 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 4030 { 4031 bool have_vlans; 4032 4033 /* If we have a port VLAN, then the VSI cannot have any VLANs 4034 * configured, as all MAC/VLAN filters will be assigned to the PVID. 4035 */ 4036 if (vsi->info.pvid) 4037 return false; 4038 4039 /* Since we don't have a PVID, we know that if the device is in VLAN 4040 * mode it must be because of a VLAN filter configured on this VSI. 4041 */ 4042 spin_lock_bh(&vsi->mac_filter_hash_lock); 4043 have_vlans = i40e_is_vsi_in_vlan(vsi); 4044 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4045 4046 return have_vlans; 4047 } 4048 4049 /** 4050 * i40e_ndo_set_vf_port_vlan 4051 * @netdev: network interface device structure 4052 * @vf_id: VF identifier 4053 * @vlan_id: mac address 4054 * @qos: priority setting 4055 * @vlan_proto: vlan protocol 4056 * 4057 * program VF vlan id and/or qos 4058 **/ 4059 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4060 u16 vlan_id, u8 qos, __be16 vlan_proto) 4061 { 4062 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4063 struct i40e_netdev_priv *np = netdev_priv(netdev); 4064 bool allmulti = false, alluni = false; 4065 struct i40e_pf *pf = np->vsi->back; 4066 struct i40e_vsi *vsi; 4067 struct i40e_vf *vf; 4068 int ret = 0; 4069 4070 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4071 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4072 return -EAGAIN; 4073 } 4074 4075 /* validate the request */ 4076 ret = i40e_validate_vf(pf, vf_id); 4077 if (ret) 4078 goto error_pvid; 4079 4080 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4081 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4082 ret = -EINVAL; 4083 goto error_pvid; 4084 } 4085 4086 if (vlan_proto != htons(ETH_P_8021Q)) { 4087 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4088 ret = -EPROTONOSUPPORT; 4089 goto error_pvid; 4090 } 4091 4092 vf = &pf->vf[vf_id]; 4093 vsi = pf->vsi[vf->lan_vsi_idx]; 4094 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4095 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4096 vf_id); 4097 ret = -EAGAIN; 4098 goto error_pvid; 4099 } 4100 4101 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4102 /* duplicate request, so just return success */ 4103 goto error_pvid; 4104 4105 if (i40e_vsi_has_vlans(vsi)) { 4106 dev_err(&pf->pdev->dev, 4107 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4108 vf_id); 4109 /* Administrator Error - knock the VF offline until he does 4110 * the right thing by reconfiguring his network correctly 4111 * and then reloading the VF driver. 4112 */ 4113 i40e_vc_disable_vf(vf); 4114 /* During reset the VF got a new VSI, so refresh the pointer. */ 4115 vsi = pf->vsi[vf->lan_vsi_idx]; 4116 } 4117 4118 /* Locked once because multiple functions below iterate list */ 4119 spin_lock_bh(&vsi->mac_filter_hash_lock); 4120 4121 /* Check for condition where there was already a port VLAN ID 4122 * filter set and now it is being deleted by setting it to zero. 4123 * Additionally check for the condition where there was a port 4124 * VLAN but now there is a new and different port VLAN being set. 4125 * Before deleting all the old VLAN filters we must add new ones 4126 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4127 * MAC addresses deleted. 4128 */ 4129 if ((!(vlan_id || qos) || 4130 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4131 vsi->info.pvid) { 4132 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4133 if (ret) { 4134 dev_info(&vsi->back->pdev->dev, 4135 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4136 vsi->back->hw.aq.asq_last_status); 4137 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4138 goto error_pvid; 4139 } 4140 } 4141 4142 if (vsi->info.pvid) { 4143 /* remove all filters on the old VLAN */ 4144 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4145 VLAN_VID_MASK)); 4146 } 4147 4148 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4149 4150 /* disable promisc modes in case they were enabled */ 4151 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4152 allmulti, alluni); 4153 if (ret) { 4154 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4155 goto error_pvid; 4156 } 4157 4158 if (vlan_id || qos) 4159 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4160 else 4161 i40e_vsi_remove_pvid(vsi); 4162 spin_lock_bh(&vsi->mac_filter_hash_lock); 4163 4164 if (vlan_id) { 4165 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4166 vlan_id, qos, vf_id); 4167 4168 /* add new VLAN filter for each MAC */ 4169 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4170 if (ret) { 4171 dev_info(&vsi->back->pdev->dev, 4172 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4173 vsi->back->hw.aq.asq_last_status); 4174 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4175 goto error_pvid; 4176 } 4177 4178 /* remove the previously added non-VLAN MAC filters */ 4179 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4180 } 4181 4182 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4183 4184 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4185 alluni = true; 4186 4187 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4188 allmulti = true; 4189 4190 /* Schedule the worker thread to take care of applying changes */ 4191 i40e_service_event_schedule(vsi->back); 4192 4193 if (ret) { 4194 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4195 goto error_pvid; 4196 } 4197 4198 /* The Port VLAN needs to be saved across resets the same as the 4199 * default LAN MAC address. 4200 */ 4201 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4202 4203 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4204 if (ret) { 4205 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4206 goto error_pvid; 4207 } 4208 4209 ret = 0; 4210 4211 error_pvid: 4212 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4213 return ret; 4214 } 4215 4216 /** 4217 * i40e_ndo_set_vf_bw 4218 * @netdev: network interface device structure 4219 * @vf_id: VF identifier 4220 * @min_tx_rate: Minimum Tx rate 4221 * @max_tx_rate: Maximum Tx rate 4222 * 4223 * configure VF Tx rate 4224 **/ 4225 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4226 int max_tx_rate) 4227 { 4228 struct i40e_netdev_priv *np = netdev_priv(netdev); 4229 struct i40e_pf *pf = np->vsi->back; 4230 struct i40e_vsi *vsi; 4231 struct i40e_vf *vf; 4232 int ret = 0; 4233 4234 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4235 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4236 return -EAGAIN; 4237 } 4238 4239 /* validate the request */ 4240 ret = i40e_validate_vf(pf, vf_id); 4241 if (ret) 4242 goto error; 4243 4244 if (min_tx_rate) { 4245 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4246 min_tx_rate, vf_id); 4247 return -EINVAL; 4248 } 4249 4250 vf = &pf->vf[vf_id]; 4251 vsi = pf->vsi[vf->lan_vsi_idx]; 4252 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4253 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4254 vf_id); 4255 ret = -EAGAIN; 4256 goto error; 4257 } 4258 4259 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4260 if (ret) 4261 goto error; 4262 4263 vf->tx_rate = max_tx_rate; 4264 error: 4265 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4266 return ret; 4267 } 4268 4269 /** 4270 * i40e_ndo_get_vf_config 4271 * @netdev: network interface device structure 4272 * @vf_id: VF identifier 4273 * @ivi: VF configuration structure 4274 * 4275 * return VF configuration 4276 **/ 4277 int i40e_ndo_get_vf_config(struct net_device *netdev, 4278 int vf_id, struct ifla_vf_info *ivi) 4279 { 4280 struct i40e_netdev_priv *np = netdev_priv(netdev); 4281 struct i40e_vsi *vsi = np->vsi; 4282 struct i40e_pf *pf = vsi->back; 4283 struct i40e_vf *vf; 4284 int ret = 0; 4285 4286 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4287 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4288 return -EAGAIN; 4289 } 4290 4291 /* validate the request */ 4292 ret = i40e_validate_vf(pf, vf_id); 4293 if (ret) 4294 goto error_param; 4295 4296 vf = &pf->vf[vf_id]; 4297 /* first vsi is always the LAN vsi */ 4298 vsi = pf->vsi[vf->lan_vsi_idx]; 4299 if (!vsi) { 4300 ret = -ENOENT; 4301 goto error_param; 4302 } 4303 4304 ivi->vf = vf_id; 4305 4306 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4307 4308 ivi->max_tx_rate = vf->tx_rate; 4309 ivi->min_tx_rate = 0; 4310 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4311 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4312 I40E_VLAN_PRIORITY_SHIFT; 4313 if (vf->link_forced == false) 4314 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4315 else if (vf->link_up == true) 4316 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4317 else 4318 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4319 ivi->spoofchk = vf->spoofchk; 4320 ivi->trusted = vf->trusted; 4321 ret = 0; 4322 4323 error_param: 4324 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4325 return ret; 4326 } 4327 4328 /** 4329 * i40e_ndo_set_vf_link_state 4330 * @netdev: network interface device structure 4331 * @vf_id: VF identifier 4332 * @link: required link state 4333 * 4334 * Set the link state of a specified VF, regardless of physical link state 4335 **/ 4336 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4337 { 4338 struct i40e_netdev_priv *np = netdev_priv(netdev); 4339 struct i40e_pf *pf = np->vsi->back; 4340 struct virtchnl_pf_event pfe; 4341 struct i40e_hw *hw = &pf->hw; 4342 struct i40e_vf *vf; 4343 int abs_vf_id; 4344 int ret = 0; 4345 4346 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4347 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4348 return -EAGAIN; 4349 } 4350 4351 /* validate the request */ 4352 if (vf_id >= pf->num_alloc_vfs) { 4353 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4354 ret = -EINVAL; 4355 goto error_out; 4356 } 4357 4358 vf = &pf->vf[vf_id]; 4359 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4360 4361 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4362 pfe.severity = PF_EVENT_SEVERITY_INFO; 4363 4364 switch (link) { 4365 case IFLA_VF_LINK_STATE_AUTO: 4366 vf->link_forced = false; 4367 pfe.event_data.link_event.link_status = 4368 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 4369 pfe.event_data.link_event.link_speed = 4370 (enum virtchnl_link_speed) 4371 pf->hw.phy.link_info.link_speed; 4372 break; 4373 case IFLA_VF_LINK_STATE_ENABLE: 4374 vf->link_forced = true; 4375 vf->link_up = true; 4376 pfe.event_data.link_event.link_status = true; 4377 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; 4378 break; 4379 case IFLA_VF_LINK_STATE_DISABLE: 4380 vf->link_forced = true; 4381 vf->link_up = false; 4382 pfe.event_data.link_event.link_status = false; 4383 pfe.event_data.link_event.link_speed = 0; 4384 break; 4385 default: 4386 ret = -EINVAL; 4387 goto error_out; 4388 } 4389 /* Notify the VF of its new link state */ 4390 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4391 0, (u8 *)&pfe, sizeof(pfe), NULL); 4392 4393 error_out: 4394 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4395 return ret; 4396 } 4397 4398 /** 4399 * i40e_ndo_set_vf_spoofchk 4400 * @netdev: network interface device structure 4401 * @vf_id: VF identifier 4402 * @enable: flag to enable or disable feature 4403 * 4404 * Enable or disable VF spoof checking 4405 **/ 4406 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4407 { 4408 struct i40e_netdev_priv *np = netdev_priv(netdev); 4409 struct i40e_vsi *vsi = np->vsi; 4410 struct i40e_pf *pf = vsi->back; 4411 struct i40e_vsi_context ctxt; 4412 struct i40e_hw *hw = &pf->hw; 4413 struct i40e_vf *vf; 4414 int ret = 0; 4415 4416 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4417 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4418 return -EAGAIN; 4419 } 4420 4421 /* validate the request */ 4422 if (vf_id >= pf->num_alloc_vfs) { 4423 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4424 ret = -EINVAL; 4425 goto out; 4426 } 4427 4428 vf = &(pf->vf[vf_id]); 4429 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4430 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4431 vf_id); 4432 ret = -EAGAIN; 4433 goto out; 4434 } 4435 4436 if (enable == vf->spoofchk) 4437 goto out; 4438 4439 vf->spoofchk = enable; 4440 memset(&ctxt, 0, sizeof(ctxt)); 4441 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4442 ctxt.pf_num = pf->hw.pf_id; 4443 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4444 if (enable) 4445 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4446 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4447 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4448 if (ret) { 4449 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4450 ret); 4451 ret = -EIO; 4452 } 4453 out: 4454 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4455 return ret; 4456 } 4457 4458 /** 4459 * i40e_ndo_set_vf_trust 4460 * @netdev: network interface device structure of the pf 4461 * @vf_id: VF identifier 4462 * @setting: trust setting 4463 * 4464 * Enable or disable VF trust setting 4465 **/ 4466 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4467 { 4468 struct i40e_netdev_priv *np = netdev_priv(netdev); 4469 struct i40e_pf *pf = np->vsi->back; 4470 struct i40e_vf *vf; 4471 int ret = 0; 4472 4473 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4474 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4475 return -EAGAIN; 4476 } 4477 4478 /* validate the request */ 4479 if (vf_id >= pf->num_alloc_vfs) { 4480 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4481 ret = -EINVAL; 4482 goto out; 4483 } 4484 4485 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4486 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4487 ret = -EINVAL; 4488 goto out; 4489 } 4490 4491 vf = &pf->vf[vf_id]; 4492 4493 if (setting == vf->trusted) 4494 goto out; 4495 4496 vf->trusted = setting; 4497 i40e_vc_disable_vf(vf); 4498 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4499 vf_id, setting ? "" : "un"); 4500 4501 if (vf->adq_enabled) { 4502 if (!vf->trusted) { 4503 dev_info(&pf->pdev->dev, 4504 "VF %u no longer Trusted, deleting all cloud filters\n", 4505 vf_id); 4506 i40e_del_all_cloud_filters(vf); 4507 } 4508 } 4509 4510 out: 4511 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4512 return ret; 4513 } 4514