1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_notify_vf_link_state 44 * @vf: pointer to the VF structure 45 * 46 * send a link status message to a single VF 47 **/ 48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 49 { 50 struct virtchnl_pf_event pfe; 51 struct i40e_pf *pf = vf->pf; 52 struct i40e_hw *hw = &pf->hw; 53 struct i40e_link_status *ls = &pf->hw.phy.link_info; 54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 55 56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 57 pfe.severity = PF_EVENT_SEVERITY_INFO; 58 if (vf->link_forced) { 59 pfe.event_data.link_event.link_status = vf->link_up; 60 pfe.event_data.link_event.link_speed = 61 (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0); 62 } else { 63 pfe.event_data.link_event.link_status = 64 ls->link_info & I40E_AQ_LINK_UP; 65 pfe.event_data.link_event.link_speed = 66 i40e_virtchnl_link_speed(ls->link_speed); 67 } 68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 69 0, (u8 *)&pfe, sizeof(pfe), NULL); 70 } 71 72 /** 73 * i40e_vc_notify_link_state 74 * @pf: pointer to the PF structure 75 * 76 * send a link status message to all VFs on a given PF 77 **/ 78 void i40e_vc_notify_link_state(struct i40e_pf *pf) 79 { 80 int i; 81 82 for (i = 0; i < pf->num_alloc_vfs; i++) 83 i40e_vc_notify_vf_link_state(&pf->vf[i]); 84 } 85 86 /** 87 * i40e_vc_notify_reset 88 * @pf: pointer to the PF structure 89 * 90 * indicate a pending reset to all VFs on a given PF 91 **/ 92 void i40e_vc_notify_reset(struct i40e_pf *pf) 93 { 94 struct virtchnl_pf_event pfe; 95 96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 100 } 101 102 /** 103 * i40e_vc_notify_vf_reset 104 * @vf: pointer to the VF structure 105 * 106 * indicate a pending reset to the given VF 107 **/ 108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 int abs_vf_id; 112 113 /* validate the request */ 114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 115 return; 116 117 /* verify if the VF is in either init or active before proceeding */ 118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 120 return; 121 122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 123 124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 127 0, (u8 *)&pfe, 128 sizeof(struct virtchnl_pf_event), NULL); 129 } 130 /***********************misc routines*****************************/ 131 132 /** 133 * i40e_vc_disable_vf 134 * @vf: pointer to the VF info 135 * 136 * Disable the VF through a SW reset. 137 **/ 138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 139 { 140 int i; 141 142 i40e_vc_notify_vf_reset(vf); 143 144 /* We want to ensure that an actual reset occurs initiated after this 145 * function was called. However, we do not want to wait forever, so 146 * we'll give a reasonable time and print a message if we failed to 147 * ensure a reset. 148 */ 149 for (i = 0; i < 20; i++) { 150 if (i40e_reset_vf(vf, false)) 151 return; 152 usleep_range(10000, 20000); 153 } 154 155 dev_warn(&vf->pf->pdev->dev, 156 "Failed to initiate reset for VF %d after 200 milliseconds\n", 157 vf->vf_id); 158 } 159 160 /** 161 * i40e_vc_isvalid_vsi_id 162 * @vf: pointer to the VF info 163 * @vsi_id: VF relative VSI id 164 * 165 * check for the valid VSI id 166 **/ 167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 168 { 169 struct i40e_pf *pf = vf->pf; 170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 171 172 return (vsi && (vsi->vf_id == vf->vf_id)); 173 } 174 175 /** 176 * i40e_vc_isvalid_queue_id 177 * @vf: pointer to the VF info 178 * @vsi_id: vsi id 179 * @qid: vsi relative queue id 180 * 181 * check for the valid queue id 182 **/ 183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 184 u16 qid) 185 { 186 struct i40e_pf *pf = vf->pf; 187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 188 189 return (vsi && (qid < vsi->alloc_queue_pairs)); 190 } 191 192 /** 193 * i40e_vc_isvalid_vector_id 194 * @vf: pointer to the VF info 195 * @vector_id: VF relative vector id 196 * 197 * check for the valid vector id 198 **/ 199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 200 { 201 struct i40e_pf *pf = vf->pf; 202 203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 204 } 205 206 /***********************vf resource mgmt routines*****************/ 207 208 /** 209 * i40e_vc_get_pf_queue_id 210 * @vf: pointer to the VF info 211 * @vsi_id: id of VSI as provided by the FW 212 * @vsi_queue_id: vsi relative queue id 213 * 214 * return PF relative queue id 215 **/ 216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 217 u8 vsi_queue_id) 218 { 219 struct i40e_pf *pf = vf->pf; 220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 222 223 if (!vsi) 224 return pf_queue_id; 225 226 if (le16_to_cpu(vsi->info.mapping_flags) & 227 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 228 pf_queue_id = 229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 230 else 231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 232 vsi_queue_id; 233 234 return pf_queue_id; 235 } 236 237 /** 238 * i40e_get_real_pf_qid 239 * @vf: pointer to the VF info 240 * @vsi_id: vsi id 241 * @queue_id: queue number 242 * 243 * wrapper function to get pf_queue_id handling ADq code as well 244 **/ 245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 246 { 247 int i; 248 249 if (vf->adq_enabled) { 250 /* Although VF considers all the queues(can be 1 to 16) as its 251 * own but they may actually belong to different VSIs(up to 4). 252 * We need to find which queues belongs to which VSI. 253 */ 254 for (i = 0; i < vf->num_tc; i++) { 255 if (queue_id < vf->ch[i].num_qps) { 256 vsi_id = vf->ch[i].vsi_id; 257 break; 258 } 259 /* find right queue id which is relative to a 260 * given VSI. 261 */ 262 queue_id -= vf->ch[i].num_qps; 263 } 264 } 265 266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 267 } 268 269 /** 270 * i40e_config_irq_link_list 271 * @vf: pointer to the VF info 272 * @vsi_id: id of VSI as given by the FW 273 * @vecmap: irq map info 274 * 275 * configure irq link list from the map 276 **/ 277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 278 struct virtchnl_vector_map *vecmap) 279 { 280 unsigned long linklistmap = 0, tempmap; 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 u16 vsi_queue_id, pf_queue_id; 284 enum i40e_queue_type qtype; 285 u16 next_q, vector_id, size; 286 u32 reg, reg_idx; 287 u16 itr_idx = 0; 288 289 vector_id = vecmap->vector_id; 290 /* setup the head */ 291 if (0 == vector_id) 292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 293 else 294 reg_idx = I40E_VPINT_LNKLSTN( 295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 296 (vector_id - 1)); 297 298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 299 /* Special case - No queues mapped on this vector */ 300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 301 goto irq_list_done; 302 } 303 tempmap = vecmap->rxq_map; 304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 306 vsi_queue_id)); 307 } 308 309 tempmap = vecmap->txq_map; 310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 312 vsi_queue_id + 1)); 313 } 314 315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 316 next_q = find_first_bit(&linklistmap, size); 317 if (unlikely(next_q == size)) 318 goto irq_list_done; 319 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 324 325 wr32(hw, reg_idx, reg); 326 327 while (next_q < size) { 328 switch (qtype) { 329 case I40E_QUEUE_TYPE_RX: 330 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 331 itr_idx = vecmap->rxitr_idx; 332 break; 333 case I40E_QUEUE_TYPE_TX: 334 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 335 itr_idx = vecmap->txitr_idx; 336 break; 337 default: 338 break; 339 } 340 341 next_q = find_next_bit(&linklistmap, size, next_q + 1); 342 if (next_q < size) { 343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 pf_queue_id = i40e_get_real_pf_qid(vf, 346 vsi_id, 347 vsi_queue_id); 348 } else { 349 pf_queue_id = I40E_QUEUE_END_OF_LIST; 350 qtype = 0; 351 } 352 353 /* format for the RQCTL & TQCTL regs is same */ 354 reg = (vector_id) | 355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 359 wr32(hw, reg_idx, reg); 360 } 361 362 /* if the vf is running in polling mode and using interrupt zero, 363 * need to disable auto-mask on enabling zero interrupt for VFs. 364 */ 365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 366 (vector_id == 0)) { 367 reg = rd32(hw, I40E_GLINT_CTL); 368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 370 wr32(hw, I40E_GLINT_CTL, reg); 371 } 372 } 373 374 irq_list_done: 375 i40e_flush(hw); 376 } 377 378 /** 379 * i40e_release_iwarp_qvlist 380 * @vf: pointer to the VF. 381 * 382 **/ 383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 384 { 385 struct i40e_pf *pf = vf->pf; 386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 387 u32 msix_vf; 388 u32 i; 389 390 if (!vf->qvlist_info) 391 return; 392 393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 394 for (i = 0; i < qvlist_info->num_vectors; i++) { 395 struct virtchnl_iwarp_qv_info *qv_info; 396 u32 next_q_index, next_q_type; 397 struct i40e_hw *hw = &pf->hw; 398 u32 v_idx, reg_idx, reg; 399 400 qv_info = &qvlist_info->qv_info[i]; 401 if (!qv_info) 402 continue; 403 v_idx = qv_info->v_idx; 404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 405 /* Figure out the queue after CEQ and make that the 406 * first queue. 407 */ 408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 414 415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 416 reg = (next_q_index & 417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 418 (next_q_type << 419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 420 421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 422 } 423 } 424 kfree(vf->qvlist_info); 425 vf->qvlist_info = NULL; 426 } 427 428 /** 429 * i40e_config_iwarp_qvlist 430 * @vf: pointer to the VF info 431 * @qvlist_info: queue and vector list 432 * 433 * Return 0 on success or < 0 on error 434 **/ 435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 436 struct virtchnl_iwarp_qvlist_info *qvlist_info) 437 { 438 struct i40e_pf *pf = vf->pf; 439 struct i40e_hw *hw = &pf->hw; 440 struct virtchnl_iwarp_qv_info *qv_info; 441 u32 v_idx, i, reg_idx, reg; 442 u32 next_q_idx, next_q_type; 443 u32 msix_vf; 444 int ret = 0; 445 446 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 447 448 if (qvlist_info->num_vectors > msix_vf) { 449 dev_warn(&pf->pdev->dev, 450 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 451 qvlist_info->num_vectors, 452 msix_vf); 453 ret = -EINVAL; 454 goto err_out; 455 } 456 457 kfree(vf->qvlist_info); 458 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info, 459 qvlist_info->num_vectors - 1), 460 GFP_KERNEL); 461 if (!vf->qvlist_info) { 462 ret = -ENOMEM; 463 goto err_out; 464 } 465 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 466 467 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 468 for (i = 0; i < qvlist_info->num_vectors; i++) { 469 qv_info = &qvlist_info->qv_info[i]; 470 if (!qv_info) 471 continue; 472 473 /* Validate vector id belongs to this vf */ 474 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { 475 ret = -EINVAL; 476 goto err_free; 477 } 478 479 v_idx = qv_info->v_idx; 480 481 vf->qvlist_info->qv_info[i] = *qv_info; 482 483 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 484 /* We might be sharing the interrupt, so get the first queue 485 * index and type, push it down the list by adding the new 486 * queue on top. Also link it with the new queue in CEQCTL. 487 */ 488 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 489 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 490 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 491 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 492 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 493 494 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 495 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 496 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 497 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 498 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 499 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 500 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 501 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 502 503 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 504 reg = (qv_info->ceq_idx & 505 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 506 (I40E_QUEUE_TYPE_PE_CEQ << 507 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 508 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 509 } 510 511 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 512 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 513 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 514 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 515 516 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 517 } 518 } 519 520 return 0; 521 err_free: 522 kfree(vf->qvlist_info); 523 vf->qvlist_info = NULL; 524 err_out: 525 return ret; 526 } 527 528 /** 529 * i40e_config_vsi_tx_queue 530 * @vf: pointer to the VF info 531 * @vsi_id: id of VSI as provided by the FW 532 * @vsi_queue_id: vsi relative queue index 533 * @info: config. info 534 * 535 * configure tx queue 536 **/ 537 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 538 u16 vsi_queue_id, 539 struct virtchnl_txq_info *info) 540 { 541 struct i40e_pf *pf = vf->pf; 542 struct i40e_hw *hw = &pf->hw; 543 struct i40e_hmc_obj_txq tx_ctx; 544 struct i40e_vsi *vsi; 545 u16 pf_queue_id; 546 u32 qtx_ctl; 547 int ret = 0; 548 549 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 550 ret = -ENOENT; 551 goto error_context; 552 } 553 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 554 vsi = i40e_find_vsi_from_id(pf, vsi_id); 555 if (!vsi) { 556 ret = -ENOENT; 557 goto error_context; 558 } 559 560 /* clear the context structure first */ 561 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 562 563 /* only set the required fields */ 564 tx_ctx.base = info->dma_ring_addr / 128; 565 tx_ctx.qlen = info->ring_len; 566 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 567 tx_ctx.rdylist_act = 0; 568 tx_ctx.head_wb_ena = info->headwb_enabled; 569 tx_ctx.head_wb_addr = info->dma_headwb_addr; 570 571 /* clear the context in the HMC */ 572 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 573 if (ret) { 574 dev_err(&pf->pdev->dev, 575 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 576 pf_queue_id, ret); 577 ret = -ENOENT; 578 goto error_context; 579 } 580 581 /* set the context in the HMC */ 582 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 583 if (ret) { 584 dev_err(&pf->pdev->dev, 585 "Failed to set VF LAN Tx queue context %d error: %d\n", 586 pf_queue_id, ret); 587 ret = -ENOENT; 588 goto error_context; 589 } 590 591 /* associate this queue with the PCI VF function */ 592 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 593 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 594 & I40E_QTX_CTL_PF_INDX_MASK); 595 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 596 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 597 & I40E_QTX_CTL_VFVM_INDX_MASK); 598 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 599 i40e_flush(hw); 600 601 error_context: 602 return ret; 603 } 604 605 /** 606 * i40e_config_vsi_rx_queue 607 * @vf: pointer to the VF info 608 * @vsi_id: id of VSI as provided by the FW 609 * @vsi_queue_id: vsi relative queue index 610 * @info: config. info 611 * 612 * configure rx queue 613 **/ 614 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 615 u16 vsi_queue_id, 616 struct virtchnl_rxq_info *info) 617 { 618 struct i40e_pf *pf = vf->pf; 619 struct i40e_hw *hw = &pf->hw; 620 struct i40e_hmc_obj_rxq rx_ctx; 621 u16 pf_queue_id; 622 int ret = 0; 623 624 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 625 626 /* clear the context structure first */ 627 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 628 629 /* only set the required fields */ 630 rx_ctx.base = info->dma_ring_addr / 128; 631 rx_ctx.qlen = info->ring_len; 632 633 if (info->splithdr_enabled) { 634 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 635 I40E_RX_SPLIT_IP | 636 I40E_RX_SPLIT_TCP_UDP | 637 I40E_RX_SPLIT_SCTP; 638 /* header length validation */ 639 if (info->hdr_size > ((2 * 1024) - 64)) { 640 ret = -EINVAL; 641 goto error_param; 642 } 643 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 644 645 /* set split mode 10b */ 646 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 647 } 648 649 /* databuffer length validation */ 650 if (info->databuffer_size > ((16 * 1024) - 128)) { 651 ret = -EINVAL; 652 goto error_param; 653 } 654 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 655 656 /* max pkt. length validation */ 657 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 658 ret = -EINVAL; 659 goto error_param; 660 } 661 rx_ctx.rxmax = info->max_pkt_size; 662 663 /* enable 32bytes desc always */ 664 rx_ctx.dsize = 1; 665 666 /* default values */ 667 rx_ctx.lrxqthresh = 1; 668 rx_ctx.crcstrip = 1; 669 rx_ctx.prefena = 1; 670 rx_ctx.l2tsel = 1; 671 672 /* clear the context in the HMC */ 673 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 674 if (ret) { 675 dev_err(&pf->pdev->dev, 676 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 677 pf_queue_id, ret); 678 ret = -ENOENT; 679 goto error_param; 680 } 681 682 /* set the context in the HMC */ 683 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 684 if (ret) { 685 dev_err(&pf->pdev->dev, 686 "Failed to set VF LAN Rx queue context %d error: %d\n", 687 pf_queue_id, ret); 688 ret = -ENOENT; 689 goto error_param; 690 } 691 692 error_param: 693 return ret; 694 } 695 696 /** 697 * i40e_alloc_vsi_res 698 * @vf: pointer to the VF info 699 * @idx: VSI index, applies only for ADq mode, zero otherwise 700 * 701 * alloc VF vsi context & resources 702 **/ 703 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 704 { 705 struct i40e_mac_filter *f = NULL; 706 struct i40e_pf *pf = vf->pf; 707 struct i40e_vsi *vsi; 708 u64 max_tx_rate = 0; 709 int ret = 0; 710 711 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 712 vf->vf_id); 713 714 if (!vsi) { 715 dev_err(&pf->pdev->dev, 716 "add vsi failed for VF %d, aq_err %d\n", 717 vf->vf_id, pf->hw.aq.asq_last_status); 718 ret = -ENOENT; 719 goto error_alloc_vsi_res; 720 } 721 722 if (!idx) { 723 u64 hena = i40e_pf_get_default_rss_hena(pf); 724 u8 broadcast[ETH_ALEN]; 725 726 vf->lan_vsi_idx = vsi->idx; 727 vf->lan_vsi_id = vsi->id; 728 /* If the port VLAN has been configured and then the 729 * VF driver was removed then the VSI port VLAN 730 * configuration was destroyed. Check if there is 731 * a port VLAN and restore the VSI configuration if 732 * needed. 733 */ 734 if (vf->port_vlan_id) 735 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 736 737 spin_lock_bh(&vsi->mac_filter_hash_lock); 738 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 739 f = i40e_add_mac_filter(vsi, 740 vf->default_lan_addr.addr); 741 if (!f) 742 dev_info(&pf->pdev->dev, 743 "Could not add MAC filter %pM for VF %d\n", 744 vf->default_lan_addr.addr, vf->vf_id); 745 } 746 eth_broadcast_addr(broadcast); 747 f = i40e_add_mac_filter(vsi, broadcast); 748 if (!f) 749 dev_info(&pf->pdev->dev, 750 "Could not allocate VF broadcast filter\n"); 751 spin_unlock_bh(&vsi->mac_filter_hash_lock); 752 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 753 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 754 /* program mac filter only for VF VSI */ 755 ret = i40e_sync_vsi_filters(vsi); 756 if (ret) 757 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 758 } 759 760 /* storing VSI index and id for ADq and don't apply the mac filter */ 761 if (vf->adq_enabled) { 762 vf->ch[idx].vsi_idx = vsi->idx; 763 vf->ch[idx].vsi_id = vsi->id; 764 } 765 766 /* Set VF bandwidth if specified */ 767 if (vf->tx_rate) { 768 max_tx_rate = vf->tx_rate; 769 } else if (vf->ch[idx].max_tx_rate) { 770 max_tx_rate = vf->ch[idx].max_tx_rate; 771 } 772 773 if (max_tx_rate) { 774 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 775 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 776 max_tx_rate, 0, NULL); 777 if (ret) 778 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 779 vf->vf_id, ret); 780 } 781 782 error_alloc_vsi_res: 783 return ret; 784 } 785 786 /** 787 * i40e_map_pf_queues_to_vsi 788 * @vf: pointer to the VF info 789 * 790 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 791 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 792 **/ 793 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 794 { 795 struct i40e_pf *pf = vf->pf; 796 struct i40e_hw *hw = &pf->hw; 797 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 798 u16 vsi_id, qps; 799 int i, j; 800 801 if (vf->adq_enabled) 802 num_tc = vf->num_tc; 803 804 for (i = 0; i < num_tc; i++) { 805 if (vf->adq_enabled) { 806 qps = vf->ch[i].num_qps; 807 vsi_id = vf->ch[i].vsi_id; 808 } else { 809 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 810 vsi_id = vf->lan_vsi_id; 811 } 812 813 for (j = 0; j < 7; j++) { 814 if (j * 2 >= qps) { 815 /* end of list */ 816 reg = 0x07FF07FF; 817 } else { 818 u16 qid = i40e_vc_get_pf_queue_id(vf, 819 vsi_id, 820 j * 2); 821 reg = qid; 822 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 823 (j * 2) + 1); 824 reg |= qid << 16; 825 } 826 i40e_write_rx_ctl(hw, 827 I40E_VSILAN_QTABLE(j, vsi_id), 828 reg); 829 } 830 } 831 } 832 833 /** 834 * i40e_map_pf_to_vf_queues 835 * @vf: pointer to the VF info 836 * 837 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 838 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 839 **/ 840 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 841 { 842 struct i40e_pf *pf = vf->pf; 843 struct i40e_hw *hw = &pf->hw; 844 u32 reg, total_qps = 0; 845 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 846 u16 vsi_id, qid; 847 int i, j; 848 849 if (vf->adq_enabled) 850 num_tc = vf->num_tc; 851 852 for (i = 0; i < num_tc; i++) { 853 if (vf->adq_enabled) { 854 qps = vf->ch[i].num_qps; 855 vsi_id = vf->ch[i].vsi_id; 856 } else { 857 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 858 vsi_id = vf->lan_vsi_id; 859 } 860 861 for (j = 0; j < qps; j++) { 862 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 863 864 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 865 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 866 reg); 867 total_qps++; 868 } 869 } 870 } 871 872 /** 873 * i40e_enable_vf_mappings 874 * @vf: pointer to the VF info 875 * 876 * enable VF mappings 877 **/ 878 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 879 { 880 struct i40e_pf *pf = vf->pf; 881 struct i40e_hw *hw = &pf->hw; 882 u32 reg; 883 884 /* Tell the hardware we're using noncontiguous mapping. HW requires 885 * that VF queues be mapped using this method, even when they are 886 * contiguous in real life 887 */ 888 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 889 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 890 891 /* enable VF vplan_qtable mappings */ 892 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 893 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 894 895 i40e_map_pf_to_vf_queues(vf); 896 i40e_map_pf_queues_to_vsi(vf); 897 898 i40e_flush(hw); 899 } 900 901 /** 902 * i40e_disable_vf_mappings 903 * @vf: pointer to the VF info 904 * 905 * disable VF mappings 906 **/ 907 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 908 { 909 struct i40e_pf *pf = vf->pf; 910 struct i40e_hw *hw = &pf->hw; 911 int i; 912 913 /* disable qp mappings */ 914 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 915 for (i = 0; i < I40E_MAX_VSI_QP; i++) 916 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 917 I40E_QUEUE_END_OF_LIST); 918 i40e_flush(hw); 919 } 920 921 /** 922 * i40e_free_vf_res 923 * @vf: pointer to the VF info 924 * 925 * free VF resources 926 **/ 927 static void i40e_free_vf_res(struct i40e_vf *vf) 928 { 929 struct i40e_pf *pf = vf->pf; 930 struct i40e_hw *hw = &pf->hw; 931 u32 reg_idx, reg; 932 int i, j, msix_vf; 933 934 /* Start by disabling VF's configuration API to prevent the OS from 935 * accessing the VF's VSI after it's freed / invalidated. 936 */ 937 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 938 939 /* It's possible the VF had requeuested more queues than the default so 940 * do the accounting here when we're about to free them. 941 */ 942 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 943 pf->queues_left += vf->num_queue_pairs - 944 I40E_DEFAULT_QUEUES_PER_VF; 945 } 946 947 /* free vsi & disconnect it from the parent uplink */ 948 if (vf->lan_vsi_idx) { 949 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 950 vf->lan_vsi_idx = 0; 951 vf->lan_vsi_id = 0; 952 } 953 954 /* do the accounting and remove additional ADq VSI's */ 955 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 956 for (j = 0; j < vf->num_tc; j++) { 957 /* At this point VSI0 is already released so don't 958 * release it again and only clear their values in 959 * structure variables 960 */ 961 if (j) 962 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 963 vf->ch[j].vsi_idx = 0; 964 vf->ch[j].vsi_id = 0; 965 } 966 } 967 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 968 969 /* disable interrupts so the VF starts in a known state */ 970 for (i = 0; i < msix_vf; i++) { 971 /* format is same for both registers */ 972 if (0 == i) 973 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 974 else 975 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 976 (vf->vf_id)) 977 + (i - 1)); 978 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 979 i40e_flush(hw); 980 } 981 982 /* clear the irq settings */ 983 for (i = 0; i < msix_vf; i++) { 984 /* format is same for both registers */ 985 if (0 == i) 986 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 987 else 988 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 989 (vf->vf_id)) 990 + (i - 1)); 991 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 992 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 993 wr32(hw, reg_idx, reg); 994 i40e_flush(hw); 995 } 996 /* reset some of the state variables keeping track of the resources */ 997 vf->num_queue_pairs = 0; 998 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 999 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1000 } 1001 1002 /** 1003 * i40e_alloc_vf_res 1004 * @vf: pointer to the VF info 1005 * 1006 * allocate VF resources 1007 **/ 1008 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1009 { 1010 struct i40e_pf *pf = vf->pf; 1011 int total_queue_pairs = 0; 1012 int ret, idx; 1013 1014 if (vf->num_req_queues && 1015 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1016 pf->num_vf_qps = vf->num_req_queues; 1017 else 1018 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1019 1020 /* allocate hw vsi context & associated resources */ 1021 ret = i40e_alloc_vsi_res(vf, 0); 1022 if (ret) 1023 goto error_alloc; 1024 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1025 1026 /* allocate additional VSIs based on tc information for ADq */ 1027 if (vf->adq_enabled) { 1028 if (pf->queues_left >= 1029 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1030 /* TC 0 always belongs to VF VSI */ 1031 for (idx = 1; idx < vf->num_tc; idx++) { 1032 ret = i40e_alloc_vsi_res(vf, idx); 1033 if (ret) 1034 goto error_alloc; 1035 } 1036 /* send correct number of queues */ 1037 total_queue_pairs = I40E_MAX_VF_QUEUES; 1038 } else { 1039 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1040 vf->vf_id); 1041 vf->adq_enabled = false; 1042 } 1043 } 1044 1045 /* We account for each VF to get a default number of queue pairs. If 1046 * the VF has now requested more, we need to account for that to make 1047 * certain we never request more queues than we actually have left in 1048 * HW. 1049 */ 1050 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1051 pf->queues_left -= 1052 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1053 1054 if (vf->trusted) 1055 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1056 else 1057 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1058 1059 /* store the total qps number for the runtime 1060 * VF req validation 1061 */ 1062 vf->num_queue_pairs = total_queue_pairs; 1063 1064 /* VF is now completely initialized */ 1065 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1066 1067 error_alloc: 1068 if (ret) 1069 i40e_free_vf_res(vf); 1070 1071 return ret; 1072 } 1073 1074 #define VF_DEVICE_STATUS 0xAA 1075 #define VF_TRANS_PENDING_MASK 0x20 1076 /** 1077 * i40e_quiesce_vf_pci 1078 * @vf: pointer to the VF structure 1079 * 1080 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1081 * if the transactions never clear. 1082 **/ 1083 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1084 { 1085 struct i40e_pf *pf = vf->pf; 1086 struct i40e_hw *hw = &pf->hw; 1087 int vf_abs_id, i; 1088 u32 reg; 1089 1090 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1091 1092 wr32(hw, I40E_PF_PCI_CIAA, 1093 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1094 for (i = 0; i < 100; i++) { 1095 reg = rd32(hw, I40E_PF_PCI_CIAD); 1096 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1097 return 0; 1098 udelay(1); 1099 } 1100 return -EIO; 1101 } 1102 1103 /** 1104 * i40e_getnum_vf_vsi_vlan_filters 1105 * @vsi: pointer to the vsi 1106 * 1107 * called to get the number of VLANs offloaded on this VF 1108 **/ 1109 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1110 { 1111 struct i40e_mac_filter *f; 1112 u16 num_vlans = 0, bkt; 1113 1114 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1115 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1116 num_vlans++; 1117 } 1118 1119 return num_vlans; 1120 } 1121 1122 /** 1123 * i40e_get_vlan_list_sync 1124 * @vsi: pointer to the VSI 1125 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller 1126 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. 1127 * This array is allocated here, but has to be freed in caller. 1128 * 1129 * Called to get number of VLANs and VLAN list present in mac_filter_hash. 1130 **/ 1131 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, 1132 s16 **vlan_list) 1133 { 1134 struct i40e_mac_filter *f; 1135 int i = 0; 1136 int bkt; 1137 1138 spin_lock_bh(&vsi->mac_filter_hash_lock); 1139 *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi); 1140 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); 1141 if (!(*vlan_list)) 1142 goto err; 1143 1144 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1145 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1146 continue; 1147 (*vlan_list)[i++] = f->vlan; 1148 } 1149 err: 1150 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1151 } 1152 1153 /** 1154 * i40e_set_vsi_promisc 1155 * @vf: pointer to the VF struct 1156 * @seid: VSI number 1157 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable 1158 * for a given VLAN 1159 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable 1160 * for a given VLAN 1161 * @vl: List of VLANs - apply filter for given VLANs 1162 * @num_vlans: Number of elements in @vl 1163 **/ 1164 static i40e_status 1165 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, 1166 bool unicast_enable, s16 *vl, u16 num_vlans) 1167 { 1168 i40e_status aq_ret, aq_tmp = 0; 1169 struct i40e_pf *pf = vf->pf; 1170 struct i40e_hw *hw = &pf->hw; 1171 int i; 1172 1173 /* No VLAN to set promisc on, set on VSI */ 1174 if (!num_vlans || !vl) { 1175 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid, 1176 multi_enable, 1177 NULL); 1178 if (aq_ret) { 1179 int aq_err = pf->hw.aq.asq_last_status; 1180 1181 dev_err(&pf->pdev->dev, 1182 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1183 vf->vf_id, 1184 i40e_stat_str(&pf->hw, aq_ret), 1185 i40e_aq_str(&pf->hw, aq_err)); 1186 1187 return aq_ret; 1188 } 1189 1190 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid, 1191 unicast_enable, 1192 NULL, true); 1193 1194 if (aq_ret) { 1195 int aq_err = pf->hw.aq.asq_last_status; 1196 1197 dev_err(&pf->pdev->dev, 1198 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1199 vf->vf_id, 1200 i40e_stat_str(&pf->hw, aq_ret), 1201 i40e_aq_str(&pf->hw, aq_err)); 1202 } 1203 1204 return aq_ret; 1205 } 1206 1207 for (i = 0; i < num_vlans; i++) { 1208 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, 1209 multi_enable, 1210 vl[i], NULL); 1211 if (aq_ret) { 1212 int aq_err = pf->hw.aq.asq_last_status; 1213 1214 dev_err(&pf->pdev->dev, 1215 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1216 vf->vf_id, 1217 i40e_stat_str(&pf->hw, aq_ret), 1218 i40e_aq_str(&pf->hw, aq_err)); 1219 1220 if (!aq_tmp) 1221 aq_tmp = aq_ret; 1222 } 1223 1224 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, 1225 unicast_enable, 1226 vl[i], NULL); 1227 if (aq_ret) { 1228 int aq_err = pf->hw.aq.asq_last_status; 1229 1230 dev_err(&pf->pdev->dev, 1231 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1232 vf->vf_id, 1233 i40e_stat_str(&pf->hw, aq_ret), 1234 i40e_aq_str(&pf->hw, aq_err)); 1235 1236 if (!aq_tmp) 1237 aq_tmp = aq_ret; 1238 } 1239 } 1240 1241 if (aq_tmp) 1242 aq_ret = aq_tmp; 1243 1244 return aq_ret; 1245 } 1246 1247 /** 1248 * i40e_config_vf_promiscuous_mode 1249 * @vf: pointer to the VF info 1250 * @vsi_id: VSI id 1251 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1252 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1253 * 1254 * Called from the VF to configure the promiscuous mode of 1255 * VF vsis and from the VF reset path to reset promiscuous mode. 1256 **/ 1257 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1258 u16 vsi_id, 1259 bool allmulti, 1260 bool alluni) 1261 { 1262 i40e_status aq_ret = I40E_SUCCESS; 1263 struct i40e_pf *pf = vf->pf; 1264 struct i40e_vsi *vsi; 1265 u16 num_vlans; 1266 s16 *vl; 1267 1268 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1269 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1270 return I40E_ERR_PARAM; 1271 1272 if (vf->port_vlan_id) { 1273 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, 1274 alluni, &vf->port_vlan_id, 1); 1275 return aq_ret; 1276 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1277 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); 1278 1279 if (!vl) 1280 return I40E_ERR_NO_MEMORY; 1281 1282 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1283 vl, num_vlans); 1284 kfree(vl); 1285 return aq_ret; 1286 } 1287 1288 /* no VLANs to set on, set on VSI */ 1289 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1290 NULL, 0); 1291 return aq_ret; 1292 } 1293 1294 /** 1295 * i40e_trigger_vf_reset 1296 * @vf: pointer to the VF structure 1297 * @flr: VFLR was issued or not 1298 * 1299 * Trigger hardware to start a reset for a particular VF. Expects the caller 1300 * to wait the proper amount of time to allow hardware to reset the VF before 1301 * it cleans up and restores VF functionality. 1302 **/ 1303 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1304 { 1305 struct i40e_pf *pf = vf->pf; 1306 struct i40e_hw *hw = &pf->hw; 1307 u32 reg, reg_idx, bit_idx; 1308 1309 /* warn the VF */ 1310 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1311 1312 /* Disable VF's configuration API during reset. The flag is re-enabled 1313 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1314 * It's normally disabled in i40e_free_vf_res(), but it's safer 1315 * to do it earlier to give some time to finish to any VF config 1316 * functions that may still be running at this point. 1317 */ 1318 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1319 1320 /* In the case of a VFLR, the HW has already reset the VF and we 1321 * just need to clean up, so don't hit the VFRTRIG register. 1322 */ 1323 if (!flr) { 1324 /* reset VF using VPGEN_VFRTRIG reg */ 1325 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1326 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1327 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1328 i40e_flush(hw); 1329 } 1330 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1331 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1332 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1333 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1334 i40e_flush(hw); 1335 1336 if (i40e_quiesce_vf_pci(vf)) 1337 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1338 vf->vf_id); 1339 } 1340 1341 /** 1342 * i40e_cleanup_reset_vf 1343 * @vf: pointer to the VF structure 1344 * 1345 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1346 * have verified whether the reset is finished properly, and ensure the 1347 * minimum amount of wait time has passed. 1348 **/ 1349 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1350 { 1351 struct i40e_pf *pf = vf->pf; 1352 struct i40e_hw *hw = &pf->hw; 1353 u32 reg; 1354 1355 /* disable promisc modes in case they were enabled */ 1356 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1357 1358 /* free VF resources to begin resetting the VSI state */ 1359 i40e_free_vf_res(vf); 1360 1361 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1362 * By doing this we allow HW to access VF memory at any point. If we 1363 * did it any sooner, HW could access memory while it was being freed 1364 * in i40e_free_vf_res(), causing an IOMMU fault. 1365 * 1366 * On the other hand, this needs to be done ASAP, because the VF driver 1367 * is waiting for this to happen and may report a timeout. It's 1368 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1369 * it. 1370 */ 1371 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1372 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1373 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1374 1375 /* reallocate VF resources to finish resetting the VSI state */ 1376 if (!i40e_alloc_vf_res(vf)) { 1377 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1378 i40e_enable_vf_mappings(vf); 1379 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1380 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1381 /* Do not notify the client during VF init */ 1382 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1383 &vf->vf_states)) 1384 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1385 vf->num_vlan = 0; 1386 } 1387 1388 /* Tell the VF driver the reset is done. This needs to be done only 1389 * after VF has been fully initialized, because the VF driver may 1390 * request resources immediately after setting this flag. 1391 */ 1392 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1393 } 1394 1395 /** 1396 * i40e_reset_vf 1397 * @vf: pointer to the VF structure 1398 * @flr: VFLR was issued or not 1399 * 1400 * Returns true if the VF is in reset, resets successfully, or resets 1401 * are disabled and false otherwise. 1402 **/ 1403 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1404 { 1405 struct i40e_pf *pf = vf->pf; 1406 struct i40e_hw *hw = &pf->hw; 1407 bool rsd = false; 1408 u32 reg; 1409 int i; 1410 1411 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) 1412 return true; 1413 1414 /* If the VFs have been disabled, this means something else is 1415 * resetting the VF, so we shouldn't continue. 1416 */ 1417 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1418 return true; 1419 1420 i40e_trigger_vf_reset(vf, flr); 1421 1422 /* poll VPGEN_VFRSTAT reg to make sure 1423 * that reset is complete 1424 */ 1425 for (i = 0; i < 10; i++) { 1426 /* VF reset requires driver to first reset the VF and then 1427 * poll the status register to make sure that the reset 1428 * completed successfully. Due to internal HW FIFO flushes, 1429 * we must wait 10ms before the register will be valid. 1430 */ 1431 usleep_range(10000, 20000); 1432 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1433 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1434 rsd = true; 1435 break; 1436 } 1437 } 1438 1439 if (flr) 1440 usleep_range(10000, 20000); 1441 1442 if (!rsd) 1443 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1444 vf->vf_id); 1445 usleep_range(10000, 20000); 1446 1447 /* On initial reset, we don't have any queues to disable */ 1448 if (vf->lan_vsi_idx != 0) 1449 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1450 1451 i40e_cleanup_reset_vf(vf); 1452 1453 i40e_flush(hw); 1454 clear_bit(__I40E_VF_DISABLE, pf->state); 1455 1456 return true; 1457 } 1458 1459 /** 1460 * i40e_reset_all_vfs 1461 * @pf: pointer to the PF structure 1462 * @flr: VFLR was issued or not 1463 * 1464 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1465 * VF, then do all the waiting in one chunk, and finally finish restoring each 1466 * VF after the wait. This is useful during PF routines which need to reset 1467 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1468 * 1469 * Returns true if any VFs were reset, and false otherwise. 1470 **/ 1471 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1472 { 1473 struct i40e_hw *hw = &pf->hw; 1474 struct i40e_vf *vf; 1475 int i, v; 1476 u32 reg; 1477 1478 /* If we don't have any VFs, then there is nothing to reset */ 1479 if (!pf->num_alloc_vfs) 1480 return false; 1481 1482 /* If VFs have been disabled, there is no need to reset */ 1483 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1484 return false; 1485 1486 /* Begin reset on all VFs at once */ 1487 for (v = 0; v < pf->num_alloc_vfs; v++) 1488 i40e_trigger_vf_reset(&pf->vf[v], flr); 1489 1490 /* HW requires some time to make sure it can flush the FIFO for a VF 1491 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1492 * sequence to make sure that it has completed. We'll keep track of 1493 * the VFs using a simple iterator that increments once that VF has 1494 * finished resetting. 1495 */ 1496 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1497 usleep_range(10000, 20000); 1498 1499 /* Check each VF in sequence, beginning with the VF to fail 1500 * the previous check. 1501 */ 1502 while (v < pf->num_alloc_vfs) { 1503 vf = &pf->vf[v]; 1504 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1505 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1506 break; 1507 1508 /* If the current VF has finished resetting, move on 1509 * to the next VF in sequence. 1510 */ 1511 v++; 1512 } 1513 } 1514 1515 if (flr) 1516 usleep_range(10000, 20000); 1517 1518 /* Display a warning if at least one VF didn't manage to reset in 1519 * time, but continue on with the operation. 1520 */ 1521 if (v < pf->num_alloc_vfs) 1522 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1523 pf->vf[v].vf_id); 1524 usleep_range(10000, 20000); 1525 1526 /* Begin disabling all the rings associated with VFs, but do not wait 1527 * between each VF. 1528 */ 1529 for (v = 0; v < pf->num_alloc_vfs; v++) { 1530 /* On initial reset, we don't have any queues to disable */ 1531 if (pf->vf[v].lan_vsi_idx == 0) 1532 continue; 1533 1534 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1535 } 1536 1537 /* Now that we've notified HW to disable all of the VF rings, wait 1538 * until they finish. 1539 */ 1540 for (v = 0; v < pf->num_alloc_vfs; v++) { 1541 /* On initial reset, we don't have any queues to disable */ 1542 if (pf->vf[v].lan_vsi_idx == 0) 1543 continue; 1544 1545 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1546 } 1547 1548 /* Hw may need up to 50ms to finish disabling the RX queues. We 1549 * minimize the wait by delaying only once for all VFs. 1550 */ 1551 mdelay(50); 1552 1553 /* Finish the reset on each VF */ 1554 for (v = 0; v < pf->num_alloc_vfs; v++) 1555 i40e_cleanup_reset_vf(&pf->vf[v]); 1556 1557 i40e_flush(hw); 1558 clear_bit(__I40E_VF_DISABLE, pf->state); 1559 1560 return true; 1561 } 1562 1563 /** 1564 * i40e_free_vfs 1565 * @pf: pointer to the PF structure 1566 * 1567 * free VF resources 1568 **/ 1569 void i40e_free_vfs(struct i40e_pf *pf) 1570 { 1571 struct i40e_hw *hw = &pf->hw; 1572 u32 reg_idx, bit_idx; 1573 int i, tmp, vf_id; 1574 1575 if (!pf->vf) 1576 return; 1577 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1578 usleep_range(1000, 2000); 1579 1580 i40e_notify_client_of_vf_enable(pf, 0); 1581 1582 /* Disable IOV before freeing resources. This lets any VF drivers 1583 * running in the host get themselves cleaned up before we yank 1584 * the carpet out from underneath their feet. 1585 */ 1586 if (!pci_vfs_assigned(pf->pdev)) 1587 pci_disable_sriov(pf->pdev); 1588 else 1589 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1590 1591 /* Amortize wait time by stopping all VFs at the same time */ 1592 for (i = 0; i < pf->num_alloc_vfs; i++) { 1593 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1594 continue; 1595 1596 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1597 } 1598 1599 for (i = 0; i < pf->num_alloc_vfs; i++) { 1600 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1601 continue; 1602 1603 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1604 } 1605 1606 /* free up VF resources */ 1607 tmp = pf->num_alloc_vfs; 1608 pf->num_alloc_vfs = 0; 1609 for (i = 0; i < tmp; i++) { 1610 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1611 i40e_free_vf_res(&pf->vf[i]); 1612 /* disable qp mappings */ 1613 i40e_disable_vf_mappings(&pf->vf[i]); 1614 } 1615 1616 kfree(pf->vf); 1617 pf->vf = NULL; 1618 1619 /* This check is for when the driver is unloaded while VFs are 1620 * assigned. Setting the number of VFs to 0 through sysfs is caught 1621 * before this function ever gets called. 1622 */ 1623 if (!pci_vfs_assigned(pf->pdev)) { 1624 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1625 * work correctly when SR-IOV gets re-enabled. 1626 */ 1627 for (vf_id = 0; vf_id < tmp; vf_id++) { 1628 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1629 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1630 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1631 } 1632 } 1633 clear_bit(__I40E_VF_DISABLE, pf->state); 1634 } 1635 1636 #ifdef CONFIG_PCI_IOV 1637 /** 1638 * i40e_alloc_vfs 1639 * @pf: pointer to the PF structure 1640 * @num_alloc_vfs: number of VFs to allocate 1641 * 1642 * allocate VF resources 1643 **/ 1644 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1645 { 1646 struct i40e_vf *vfs; 1647 int i, ret = 0; 1648 1649 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1650 i40e_irq_dynamic_disable_icr0(pf); 1651 1652 /* Check to see if we're just allocating resources for extant VFs */ 1653 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1654 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1655 if (ret) { 1656 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1657 pf->num_alloc_vfs = 0; 1658 goto err_iov; 1659 } 1660 } 1661 /* allocate memory */ 1662 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1663 if (!vfs) { 1664 ret = -ENOMEM; 1665 goto err_alloc; 1666 } 1667 pf->vf = vfs; 1668 1669 /* apply default profile */ 1670 for (i = 0; i < num_alloc_vfs; i++) { 1671 vfs[i].pf = pf; 1672 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1673 vfs[i].vf_id = i; 1674 1675 /* assign default capabilities */ 1676 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1677 vfs[i].spoofchk = true; 1678 1679 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1680 1681 } 1682 pf->num_alloc_vfs = num_alloc_vfs; 1683 1684 /* VF resources get allocated during reset */ 1685 i40e_reset_all_vfs(pf, false); 1686 1687 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1688 1689 err_alloc: 1690 if (ret) 1691 i40e_free_vfs(pf); 1692 err_iov: 1693 /* Re-enable interrupt 0. */ 1694 i40e_irq_dynamic_enable_icr0(pf); 1695 return ret; 1696 } 1697 1698 #endif 1699 /** 1700 * i40e_pci_sriov_enable 1701 * @pdev: pointer to a pci_dev structure 1702 * @num_vfs: number of VFs to allocate 1703 * 1704 * Enable or change the number of VFs 1705 **/ 1706 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1707 { 1708 #ifdef CONFIG_PCI_IOV 1709 struct i40e_pf *pf = pci_get_drvdata(pdev); 1710 int pre_existing_vfs = pci_num_vf(pdev); 1711 int err = 0; 1712 1713 if (test_bit(__I40E_TESTING, pf->state)) { 1714 dev_warn(&pdev->dev, 1715 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1716 err = -EPERM; 1717 goto err_out; 1718 } 1719 1720 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1721 i40e_free_vfs(pf); 1722 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1723 goto out; 1724 1725 if (num_vfs > pf->num_req_vfs) { 1726 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1727 num_vfs, pf->num_req_vfs); 1728 err = -EPERM; 1729 goto err_out; 1730 } 1731 1732 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1733 err = i40e_alloc_vfs(pf, num_vfs); 1734 if (err) { 1735 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1736 goto err_out; 1737 } 1738 1739 out: 1740 return num_vfs; 1741 1742 err_out: 1743 return err; 1744 #endif 1745 return 0; 1746 } 1747 1748 /** 1749 * i40e_pci_sriov_configure 1750 * @pdev: pointer to a pci_dev structure 1751 * @num_vfs: number of VFs to allocate 1752 * 1753 * Enable or change the number of VFs. Called when the user updates the number 1754 * of VFs in sysfs. 1755 **/ 1756 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1757 { 1758 struct i40e_pf *pf = pci_get_drvdata(pdev); 1759 int ret = 0; 1760 1761 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1762 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1763 return -EAGAIN; 1764 } 1765 1766 if (num_vfs) { 1767 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1768 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1769 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1770 } 1771 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1772 goto sriov_configure_out; 1773 } 1774 1775 if (!pci_vfs_assigned(pf->pdev)) { 1776 i40e_free_vfs(pf); 1777 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1778 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1779 } else { 1780 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1781 ret = -EINVAL; 1782 goto sriov_configure_out; 1783 } 1784 sriov_configure_out: 1785 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1786 return ret; 1787 } 1788 1789 /***********************virtual channel routines******************/ 1790 1791 /** 1792 * i40e_vc_send_msg_to_vf 1793 * @vf: pointer to the VF info 1794 * @v_opcode: virtual channel opcode 1795 * @v_retval: virtual channel return value 1796 * @msg: pointer to the msg buffer 1797 * @msglen: msg length 1798 * 1799 * send msg to VF 1800 **/ 1801 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1802 u32 v_retval, u8 *msg, u16 msglen) 1803 { 1804 struct i40e_pf *pf; 1805 struct i40e_hw *hw; 1806 int abs_vf_id; 1807 i40e_status aq_ret; 1808 1809 /* validate the request */ 1810 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1811 return -EINVAL; 1812 1813 pf = vf->pf; 1814 hw = &pf->hw; 1815 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1816 1817 /* single place to detect unsuccessful return values */ 1818 if (v_retval) { 1819 vf->num_invalid_msgs++; 1820 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1821 vf->vf_id, v_opcode, v_retval); 1822 if (vf->num_invalid_msgs > 1823 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1824 dev_err(&pf->pdev->dev, 1825 "Number of invalid messages exceeded for VF %d\n", 1826 vf->vf_id); 1827 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1828 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1829 } 1830 } else { 1831 vf->num_valid_msgs++; 1832 /* reset the invalid counter, if a valid message is received. */ 1833 vf->num_invalid_msgs = 0; 1834 } 1835 1836 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1837 msg, msglen, NULL); 1838 if (aq_ret) { 1839 dev_info(&pf->pdev->dev, 1840 "Unable to send the message to VF %d aq_err %d\n", 1841 vf->vf_id, pf->hw.aq.asq_last_status); 1842 return -EIO; 1843 } 1844 1845 return 0; 1846 } 1847 1848 /** 1849 * i40e_vc_send_resp_to_vf 1850 * @vf: pointer to the VF info 1851 * @opcode: operation code 1852 * @retval: return value 1853 * 1854 * send resp msg to VF 1855 **/ 1856 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1857 enum virtchnl_ops opcode, 1858 i40e_status retval) 1859 { 1860 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1861 } 1862 1863 /** 1864 * i40e_vc_get_version_msg 1865 * @vf: pointer to the VF info 1866 * @msg: pointer to the msg buffer 1867 * 1868 * called from the VF to request the API version used by the PF 1869 **/ 1870 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1871 { 1872 struct virtchnl_version_info info = { 1873 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1874 }; 1875 1876 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1877 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1878 if (VF_IS_V10(&vf->vf_ver)) 1879 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1880 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1881 I40E_SUCCESS, (u8 *)&info, 1882 sizeof(struct virtchnl_version_info)); 1883 } 1884 1885 /** 1886 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1887 * @vf: pointer to VF structure 1888 **/ 1889 static void i40e_del_qch(struct i40e_vf *vf) 1890 { 1891 struct i40e_pf *pf = vf->pf; 1892 int i; 1893 1894 /* first element in the array belongs to primary VF VSI and we shouldn't 1895 * delete it. We should however delete the rest of the VSIs created 1896 */ 1897 for (i = 1; i < vf->num_tc; i++) { 1898 if (vf->ch[i].vsi_idx) { 1899 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1900 vf->ch[i].vsi_idx = 0; 1901 vf->ch[i].vsi_id = 0; 1902 } 1903 } 1904 } 1905 1906 /** 1907 * i40e_vc_get_vf_resources_msg 1908 * @vf: pointer to the VF info 1909 * @msg: pointer to the msg buffer 1910 * 1911 * called from the VF to request its resources 1912 **/ 1913 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1914 { 1915 struct virtchnl_vf_resource *vfres = NULL; 1916 struct i40e_pf *pf = vf->pf; 1917 i40e_status aq_ret = 0; 1918 struct i40e_vsi *vsi; 1919 int num_vsis = 1; 1920 size_t len = 0; 1921 int ret; 1922 1923 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1924 aq_ret = I40E_ERR_PARAM; 1925 goto err; 1926 } 1927 1928 len = struct_size(vfres, vsi_res, num_vsis); 1929 vfres = kzalloc(len, GFP_KERNEL); 1930 if (!vfres) { 1931 aq_ret = I40E_ERR_NO_MEMORY; 1932 len = 0; 1933 goto err; 1934 } 1935 if (VF_IS_V11(&vf->vf_ver)) 1936 vf->driver_caps = *(u32 *)msg; 1937 else 1938 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1939 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1940 VIRTCHNL_VF_OFFLOAD_VLAN; 1941 1942 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1943 vsi = pf->vsi[vf->lan_vsi_idx]; 1944 if (!vsi->info.pvid) 1945 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1946 1947 if (i40e_vf_client_capable(pf, vf->vf_id) && 1948 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1949 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1950 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1951 } else { 1952 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1953 } 1954 1955 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1956 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1957 } else { 1958 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1959 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1960 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1961 else 1962 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1963 } 1964 1965 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1966 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1967 vfres->vf_cap_flags |= 1968 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1969 } 1970 1971 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1972 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1973 1974 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1975 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1976 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1977 1978 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1979 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1980 dev_err(&pf->pdev->dev, 1981 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1982 vf->vf_id); 1983 aq_ret = I40E_ERR_PARAM; 1984 goto err; 1985 } 1986 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1987 } 1988 1989 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1990 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1991 vfres->vf_cap_flags |= 1992 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1993 } 1994 1995 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1996 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1997 1998 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1999 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 2000 2001 vfres->num_vsis = num_vsis; 2002 vfres->num_queue_pairs = vf->num_queue_pairs; 2003 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 2004 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 2005 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 2006 2007 if (vf->lan_vsi_idx) { 2008 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 2009 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 2010 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 2011 /* VFs only use TC 0 */ 2012 vfres->vsi_res[0].qset_handle 2013 = le16_to_cpu(vsi->info.qs_handle[0]); 2014 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 2015 vf->default_lan_addr.addr); 2016 } 2017 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 2018 2019 err: 2020 /* send the response back to the VF */ 2021 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 2022 aq_ret, (u8 *)vfres, len); 2023 2024 kfree(vfres); 2025 return ret; 2026 } 2027 2028 /** 2029 * i40e_vc_reset_vf_msg 2030 * @vf: pointer to the VF info 2031 * 2032 * called from the VF to reset itself, 2033 * unlike other virtchnl messages, PF driver 2034 * doesn't send the response back to the VF 2035 **/ 2036 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 2037 { 2038 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2039 i40e_reset_vf(vf, false); 2040 } 2041 2042 /** 2043 * i40e_vc_config_promiscuous_mode_msg 2044 * @vf: pointer to the VF info 2045 * @msg: pointer to the msg buffer 2046 * 2047 * called from the VF to configure the promiscuous mode of 2048 * VF vsis 2049 **/ 2050 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 2051 { 2052 struct virtchnl_promisc_info *info = 2053 (struct virtchnl_promisc_info *)msg; 2054 struct i40e_pf *pf = vf->pf; 2055 i40e_status aq_ret = 0; 2056 bool allmulti = false; 2057 bool alluni = false; 2058 2059 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2060 aq_ret = I40E_ERR_PARAM; 2061 goto err_out; 2062 } 2063 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2064 dev_err(&pf->pdev->dev, 2065 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2066 vf->vf_id); 2067 2068 /* Lie to the VF on purpose, because this is an error we can 2069 * ignore. Unprivileged VF is not a virtual channel error. 2070 */ 2071 aq_ret = 0; 2072 goto err_out; 2073 } 2074 2075 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2076 aq_ret = I40E_ERR_PARAM; 2077 goto err_out; 2078 } 2079 2080 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2081 aq_ret = I40E_ERR_PARAM; 2082 goto err_out; 2083 } 2084 2085 /* Multicast promiscuous handling*/ 2086 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2087 allmulti = true; 2088 2089 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2090 alluni = true; 2091 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2092 alluni); 2093 if (aq_ret) 2094 goto err_out; 2095 2096 if (allmulti) { 2097 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, 2098 &vf->vf_states)) 2099 dev_info(&pf->pdev->dev, 2100 "VF %d successfully set multicast promiscuous mode\n", 2101 vf->vf_id); 2102 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, 2103 &vf->vf_states)) 2104 dev_info(&pf->pdev->dev, 2105 "VF %d successfully unset multicast promiscuous mode\n", 2106 vf->vf_id); 2107 2108 if (alluni) { 2109 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, 2110 &vf->vf_states)) 2111 dev_info(&pf->pdev->dev, 2112 "VF %d successfully set unicast promiscuous mode\n", 2113 vf->vf_id); 2114 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, 2115 &vf->vf_states)) 2116 dev_info(&pf->pdev->dev, 2117 "VF %d successfully unset unicast promiscuous mode\n", 2118 vf->vf_id); 2119 2120 err_out: 2121 /* send the response to the VF */ 2122 return i40e_vc_send_resp_to_vf(vf, 2123 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2124 aq_ret); 2125 } 2126 2127 /** 2128 * i40e_vc_config_queues_msg 2129 * @vf: pointer to the VF info 2130 * @msg: pointer to the msg buffer 2131 * 2132 * called from the VF to configure the rx/tx 2133 * queues 2134 **/ 2135 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2136 { 2137 struct virtchnl_vsi_queue_config_info *qci = 2138 (struct virtchnl_vsi_queue_config_info *)msg; 2139 struct virtchnl_queue_pair_info *qpi; 2140 struct i40e_pf *pf = vf->pf; 2141 u16 vsi_id, vsi_queue_id = 0; 2142 u16 num_qps_all = 0; 2143 i40e_status aq_ret = 0; 2144 int i, j = 0, idx = 0; 2145 2146 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2147 aq_ret = I40E_ERR_PARAM; 2148 goto error_param; 2149 } 2150 2151 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2152 aq_ret = I40E_ERR_PARAM; 2153 goto error_param; 2154 } 2155 2156 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2157 aq_ret = I40E_ERR_PARAM; 2158 goto error_param; 2159 } 2160 2161 if (vf->adq_enabled) { 2162 for (i = 0; i < I40E_MAX_VF_VSI; i++) 2163 num_qps_all += vf->ch[i].num_qps; 2164 if (num_qps_all != qci->num_queue_pairs) { 2165 aq_ret = I40E_ERR_PARAM; 2166 goto error_param; 2167 } 2168 } 2169 2170 vsi_id = qci->vsi_id; 2171 2172 for (i = 0; i < qci->num_queue_pairs; i++) { 2173 qpi = &qci->qpair[i]; 2174 2175 if (!vf->adq_enabled) { 2176 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2177 qpi->txq.queue_id)) { 2178 aq_ret = I40E_ERR_PARAM; 2179 goto error_param; 2180 } 2181 2182 vsi_queue_id = qpi->txq.queue_id; 2183 2184 if (qpi->txq.vsi_id != qci->vsi_id || 2185 qpi->rxq.vsi_id != qci->vsi_id || 2186 qpi->rxq.queue_id != vsi_queue_id) { 2187 aq_ret = I40E_ERR_PARAM; 2188 goto error_param; 2189 } 2190 } 2191 2192 if (vf->adq_enabled) { 2193 if (idx >= ARRAY_SIZE(vf->ch)) { 2194 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2195 goto error_param; 2196 } 2197 vsi_id = vf->ch[idx].vsi_id; 2198 } 2199 2200 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2201 &qpi->rxq) || 2202 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2203 &qpi->txq)) { 2204 aq_ret = I40E_ERR_PARAM; 2205 goto error_param; 2206 } 2207 2208 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2209 * VF does not know about these additional VSIs and all 2210 * it cares is about its own queues. PF configures these queues 2211 * to its appropriate VSIs based on TC mapping 2212 */ 2213 if (vf->adq_enabled) { 2214 if (idx >= ARRAY_SIZE(vf->ch)) { 2215 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2216 goto error_param; 2217 } 2218 if (j == (vf->ch[idx].num_qps - 1)) { 2219 idx++; 2220 j = 0; /* resetting the queue count */ 2221 vsi_queue_id = 0; 2222 } else { 2223 j++; 2224 vsi_queue_id++; 2225 } 2226 } 2227 } 2228 /* set vsi num_queue_pairs in use to num configured by VF */ 2229 if (!vf->adq_enabled) { 2230 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2231 qci->num_queue_pairs; 2232 } else { 2233 for (i = 0; i < vf->num_tc; i++) 2234 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2235 vf->ch[i].num_qps; 2236 } 2237 2238 error_param: 2239 /* send the response to the VF */ 2240 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2241 aq_ret); 2242 } 2243 2244 /** 2245 * i40e_validate_queue_map - check queue map is valid 2246 * @vf: the VF structure pointer 2247 * @vsi_id: vsi id 2248 * @queuemap: Tx or Rx queue map 2249 * 2250 * check if Tx or Rx queue map is valid 2251 **/ 2252 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2253 unsigned long queuemap) 2254 { 2255 u16 vsi_queue_id, queue_id; 2256 2257 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2258 if (vf->adq_enabled) { 2259 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2260 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2261 } else { 2262 queue_id = vsi_queue_id; 2263 } 2264 2265 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2266 return -EINVAL; 2267 } 2268 2269 return 0; 2270 } 2271 2272 /** 2273 * i40e_vc_config_irq_map_msg 2274 * @vf: pointer to the VF info 2275 * @msg: pointer to the msg buffer 2276 * 2277 * called from the VF to configure the irq to 2278 * queue map 2279 **/ 2280 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2281 { 2282 struct virtchnl_irq_map_info *irqmap_info = 2283 (struct virtchnl_irq_map_info *)msg; 2284 struct virtchnl_vector_map *map; 2285 u16 vsi_id; 2286 i40e_status aq_ret = 0; 2287 int i; 2288 2289 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2290 aq_ret = I40E_ERR_PARAM; 2291 goto error_param; 2292 } 2293 2294 if (irqmap_info->num_vectors > 2295 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2296 aq_ret = I40E_ERR_PARAM; 2297 goto error_param; 2298 } 2299 2300 for (i = 0; i < irqmap_info->num_vectors; i++) { 2301 map = &irqmap_info->vecmap[i]; 2302 /* validate msg params */ 2303 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2304 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2305 aq_ret = I40E_ERR_PARAM; 2306 goto error_param; 2307 } 2308 vsi_id = map->vsi_id; 2309 2310 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2311 aq_ret = I40E_ERR_PARAM; 2312 goto error_param; 2313 } 2314 2315 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2316 aq_ret = I40E_ERR_PARAM; 2317 goto error_param; 2318 } 2319 2320 i40e_config_irq_link_list(vf, vsi_id, map); 2321 } 2322 error_param: 2323 /* send the response to the VF */ 2324 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2325 aq_ret); 2326 } 2327 2328 /** 2329 * i40e_ctrl_vf_tx_rings 2330 * @vsi: the SRIOV VSI being configured 2331 * @q_map: bit map of the queues to be enabled 2332 * @enable: start or stop the queue 2333 **/ 2334 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2335 bool enable) 2336 { 2337 struct i40e_pf *pf = vsi->back; 2338 int ret = 0; 2339 u16 q_id; 2340 2341 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2342 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2343 vsi->base_queue + q_id, 2344 false /*is xdp*/, enable); 2345 if (ret) 2346 break; 2347 } 2348 return ret; 2349 } 2350 2351 /** 2352 * i40e_ctrl_vf_rx_rings 2353 * @vsi: the SRIOV VSI being configured 2354 * @q_map: bit map of the queues to be enabled 2355 * @enable: start or stop the queue 2356 **/ 2357 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2358 bool enable) 2359 { 2360 struct i40e_pf *pf = vsi->back; 2361 int ret = 0; 2362 u16 q_id; 2363 2364 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2365 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2366 enable); 2367 if (ret) 2368 break; 2369 } 2370 return ret; 2371 } 2372 2373 /** 2374 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL 2375 * @vqs: virtchnl_queue_select structure containing bitmaps to validate 2376 * 2377 * Returns true if validation was successful, else false. 2378 */ 2379 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 2380 { 2381 if ((!vqs->rx_queues && !vqs->tx_queues) || 2382 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || 2383 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) 2384 return false; 2385 2386 return true; 2387 } 2388 2389 /** 2390 * i40e_vc_enable_queues_msg 2391 * @vf: pointer to the VF info 2392 * @msg: pointer to the msg buffer 2393 * 2394 * called from the VF to enable all or specific queue(s) 2395 **/ 2396 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2397 { 2398 struct virtchnl_queue_select *vqs = 2399 (struct virtchnl_queue_select *)msg; 2400 struct i40e_pf *pf = vf->pf; 2401 i40e_status aq_ret = 0; 2402 int i; 2403 2404 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2405 aq_ret = I40E_ERR_PARAM; 2406 goto error_param; 2407 } 2408 2409 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2410 aq_ret = I40E_ERR_PARAM; 2411 goto error_param; 2412 } 2413 2414 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2415 aq_ret = I40E_ERR_PARAM; 2416 goto error_param; 2417 } 2418 2419 /* Use the queue bit map sent by the VF */ 2420 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2421 true)) { 2422 aq_ret = I40E_ERR_TIMEOUT; 2423 goto error_param; 2424 } 2425 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2426 true)) { 2427 aq_ret = I40E_ERR_TIMEOUT; 2428 goto error_param; 2429 } 2430 2431 /* need to start the rings for additional ADq VSI's as well */ 2432 if (vf->adq_enabled) { 2433 /* zero belongs to LAN VSI */ 2434 for (i = 1; i < vf->num_tc; i++) { 2435 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2436 aq_ret = I40E_ERR_TIMEOUT; 2437 } 2438 } 2439 2440 error_param: 2441 /* send the response to the VF */ 2442 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2443 aq_ret); 2444 } 2445 2446 /** 2447 * i40e_vc_disable_queues_msg 2448 * @vf: pointer to the VF info 2449 * @msg: pointer to the msg buffer 2450 * 2451 * called from the VF to disable all or specific 2452 * queue(s) 2453 **/ 2454 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2455 { 2456 struct virtchnl_queue_select *vqs = 2457 (struct virtchnl_queue_select *)msg; 2458 struct i40e_pf *pf = vf->pf; 2459 i40e_status aq_ret = 0; 2460 2461 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2462 aq_ret = I40E_ERR_PARAM; 2463 goto error_param; 2464 } 2465 2466 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2467 aq_ret = I40E_ERR_PARAM; 2468 goto error_param; 2469 } 2470 2471 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2472 aq_ret = I40E_ERR_PARAM; 2473 goto error_param; 2474 } 2475 2476 /* Use the queue bit map sent by the VF */ 2477 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2478 false)) { 2479 aq_ret = I40E_ERR_TIMEOUT; 2480 goto error_param; 2481 } 2482 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2483 false)) { 2484 aq_ret = I40E_ERR_TIMEOUT; 2485 goto error_param; 2486 } 2487 error_param: 2488 /* send the response to the VF */ 2489 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2490 aq_ret); 2491 } 2492 2493 /** 2494 * i40e_vc_request_queues_msg 2495 * @vf: pointer to the VF info 2496 * @msg: pointer to the msg buffer 2497 * 2498 * VFs get a default number of queues but can use this message to request a 2499 * different number. If the request is successful, PF will reset the VF and 2500 * return 0. If unsuccessful, PF will send message informing VF of number of 2501 * available queues and return result of sending VF a message. 2502 **/ 2503 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2504 { 2505 struct virtchnl_vf_res_request *vfres = 2506 (struct virtchnl_vf_res_request *)msg; 2507 u16 req_pairs = vfres->num_queue_pairs; 2508 u8 cur_pairs = vf->num_queue_pairs; 2509 struct i40e_pf *pf = vf->pf; 2510 2511 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2512 return -EINVAL; 2513 2514 if (req_pairs > I40E_MAX_VF_QUEUES) { 2515 dev_err(&pf->pdev->dev, 2516 "VF %d tried to request more than %d queues.\n", 2517 vf->vf_id, 2518 I40E_MAX_VF_QUEUES); 2519 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2520 } else if (req_pairs - cur_pairs > pf->queues_left) { 2521 dev_warn(&pf->pdev->dev, 2522 "VF %d requested %d more queues, but only %d left.\n", 2523 vf->vf_id, 2524 req_pairs - cur_pairs, 2525 pf->queues_left); 2526 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2527 } else { 2528 /* successful request */ 2529 vf->num_req_queues = req_pairs; 2530 i40e_vc_notify_vf_reset(vf); 2531 i40e_reset_vf(vf, false); 2532 return 0; 2533 } 2534 2535 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2536 (u8 *)vfres, sizeof(*vfres)); 2537 } 2538 2539 /** 2540 * i40e_vc_get_stats_msg 2541 * @vf: pointer to the VF info 2542 * @msg: pointer to the msg buffer 2543 * 2544 * called from the VF to get vsi stats 2545 **/ 2546 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2547 { 2548 struct virtchnl_queue_select *vqs = 2549 (struct virtchnl_queue_select *)msg; 2550 struct i40e_pf *pf = vf->pf; 2551 struct i40e_eth_stats stats; 2552 i40e_status aq_ret = 0; 2553 struct i40e_vsi *vsi; 2554 2555 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2556 2557 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2558 aq_ret = I40E_ERR_PARAM; 2559 goto error_param; 2560 } 2561 2562 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2563 aq_ret = I40E_ERR_PARAM; 2564 goto error_param; 2565 } 2566 2567 vsi = pf->vsi[vf->lan_vsi_idx]; 2568 if (!vsi) { 2569 aq_ret = I40E_ERR_PARAM; 2570 goto error_param; 2571 } 2572 i40e_update_eth_stats(vsi); 2573 stats = vsi->eth_stats; 2574 2575 error_param: 2576 /* send the response back to the VF */ 2577 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2578 (u8 *)&stats, sizeof(stats)); 2579 } 2580 2581 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2582 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2583 */ 2584 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2585 #define I40E_VC_MAX_VLAN_PER_VF 16 2586 2587 /** 2588 * i40e_check_vf_permission 2589 * @vf: pointer to the VF info 2590 * @al: MAC address list from virtchnl 2591 * 2592 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2593 * if any address in the list is not valid. Checks the following conditions: 2594 * 2595 * 1) broadcast and zero addresses are never valid 2596 * 2) unicast addresses are not allowed if the VMM has administratively set 2597 * the VF MAC address, unless the VF is marked as privileged. 2598 * 3) There is enough space to add all the addresses. 2599 * 2600 * Note that to guarantee consistency, it is expected this function be called 2601 * while holding the mac_filter_hash_lock, as otherwise the current number of 2602 * addresses might not be accurate. 2603 **/ 2604 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2605 struct virtchnl_ether_addr_list *al) 2606 { 2607 struct i40e_pf *pf = vf->pf; 2608 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 2609 int mac2add_cnt = 0; 2610 int i; 2611 2612 for (i = 0; i < al->num_elements; i++) { 2613 struct i40e_mac_filter *f; 2614 u8 *addr = al->list[i].addr; 2615 2616 if (is_broadcast_ether_addr(addr) || 2617 is_zero_ether_addr(addr)) { 2618 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2619 addr); 2620 return I40E_ERR_INVALID_MAC_ADDR; 2621 } 2622 2623 /* If the host VMM administrator has set the VF MAC address 2624 * administratively via the ndo_set_vf_mac command then deny 2625 * permission to the VF to add or delete unicast MAC addresses. 2626 * Unless the VF is privileged and then it can do whatever. 2627 * The VF may request to set the MAC address filter already 2628 * assigned to it so do not return an error in that case. 2629 */ 2630 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2631 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2632 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2633 dev_err(&pf->pdev->dev, 2634 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2635 return -EPERM; 2636 } 2637 2638 /*count filters that really will be added*/ 2639 f = i40e_find_mac(vsi, addr); 2640 if (!f) 2641 ++mac2add_cnt; 2642 } 2643 2644 /* If this VF is not privileged, then we can't add more than a limited 2645 * number of addresses. Check to make sure that the additions do not 2646 * push us over the limit. 2647 */ 2648 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2649 (i40e_count_filters(vsi) + mac2add_cnt) > 2650 I40E_VC_MAX_MAC_ADDR_PER_VF) { 2651 dev_err(&pf->pdev->dev, 2652 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2653 return -EPERM; 2654 } 2655 return 0; 2656 } 2657 2658 /** 2659 * i40e_vc_add_mac_addr_msg 2660 * @vf: pointer to the VF info 2661 * @msg: pointer to the msg buffer 2662 * 2663 * add guest mac address filter 2664 **/ 2665 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2666 { 2667 struct virtchnl_ether_addr_list *al = 2668 (struct virtchnl_ether_addr_list *)msg; 2669 struct i40e_pf *pf = vf->pf; 2670 struct i40e_vsi *vsi = NULL; 2671 i40e_status ret = 0; 2672 int i; 2673 2674 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2675 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2676 ret = I40E_ERR_PARAM; 2677 goto error_param; 2678 } 2679 2680 vsi = pf->vsi[vf->lan_vsi_idx]; 2681 2682 /* Lock once, because all function inside for loop accesses VSI's 2683 * MAC filter list which needs to be protected using same lock. 2684 */ 2685 spin_lock_bh(&vsi->mac_filter_hash_lock); 2686 2687 ret = i40e_check_vf_permission(vf, al); 2688 if (ret) { 2689 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2690 goto error_param; 2691 } 2692 2693 /* add new addresses to the list */ 2694 for (i = 0; i < al->num_elements; i++) { 2695 struct i40e_mac_filter *f; 2696 2697 f = i40e_find_mac(vsi, al->list[i].addr); 2698 if (!f) { 2699 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2700 2701 if (!f) { 2702 dev_err(&pf->pdev->dev, 2703 "Unable to add MAC filter %pM for VF %d\n", 2704 al->list[i].addr, vf->vf_id); 2705 ret = I40E_ERR_PARAM; 2706 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2707 goto error_param; 2708 } 2709 if (is_valid_ether_addr(al->list[i].addr) && 2710 is_zero_ether_addr(vf->default_lan_addr.addr)) 2711 ether_addr_copy(vf->default_lan_addr.addr, 2712 al->list[i].addr); 2713 } 2714 } 2715 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2716 2717 /* program the updated filter list */ 2718 ret = i40e_sync_vsi_filters(vsi); 2719 if (ret) 2720 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2721 vf->vf_id, ret); 2722 2723 error_param: 2724 /* send the response to the VF */ 2725 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2726 ret); 2727 } 2728 2729 /** 2730 * i40e_vc_del_mac_addr_msg 2731 * @vf: pointer to the VF info 2732 * @msg: pointer to the msg buffer 2733 * 2734 * remove guest mac address filter 2735 **/ 2736 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2737 { 2738 struct virtchnl_ether_addr_list *al = 2739 (struct virtchnl_ether_addr_list *)msg; 2740 bool was_unimac_deleted = false; 2741 struct i40e_pf *pf = vf->pf; 2742 struct i40e_vsi *vsi = NULL; 2743 i40e_status ret = 0; 2744 int i; 2745 2746 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2747 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2748 ret = I40E_ERR_PARAM; 2749 goto error_param; 2750 } 2751 2752 for (i = 0; i < al->num_elements; i++) { 2753 if (is_broadcast_ether_addr(al->list[i].addr) || 2754 is_zero_ether_addr(al->list[i].addr)) { 2755 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2756 al->list[i].addr, vf->vf_id); 2757 ret = I40E_ERR_INVALID_MAC_ADDR; 2758 goto error_param; 2759 } 2760 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) 2761 was_unimac_deleted = true; 2762 } 2763 vsi = pf->vsi[vf->lan_vsi_idx]; 2764 2765 spin_lock_bh(&vsi->mac_filter_hash_lock); 2766 /* delete addresses from the list */ 2767 for (i = 0; i < al->num_elements; i++) 2768 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2769 ret = I40E_ERR_INVALID_MAC_ADDR; 2770 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2771 goto error_param; 2772 } 2773 2774 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2775 2776 /* program the updated filter list */ 2777 ret = i40e_sync_vsi_filters(vsi); 2778 if (ret) 2779 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2780 vf->vf_id, ret); 2781 2782 if (vf->trusted && was_unimac_deleted) { 2783 struct i40e_mac_filter *f; 2784 struct hlist_node *h; 2785 u8 *macaddr = NULL; 2786 int bkt; 2787 2788 /* set last unicast mac address as default */ 2789 spin_lock_bh(&vsi->mac_filter_hash_lock); 2790 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2791 if (is_valid_ether_addr(f->macaddr)) 2792 macaddr = f->macaddr; 2793 } 2794 if (macaddr) 2795 ether_addr_copy(vf->default_lan_addr.addr, macaddr); 2796 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2797 } 2798 error_param: 2799 /* send the response to the VF */ 2800 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); 2801 } 2802 2803 /** 2804 * i40e_vc_add_vlan_msg 2805 * @vf: pointer to the VF info 2806 * @msg: pointer to the msg buffer 2807 * 2808 * program guest vlan id 2809 **/ 2810 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2811 { 2812 struct virtchnl_vlan_filter_list *vfl = 2813 (struct virtchnl_vlan_filter_list *)msg; 2814 struct i40e_pf *pf = vf->pf; 2815 struct i40e_vsi *vsi = NULL; 2816 i40e_status aq_ret = 0; 2817 int i; 2818 2819 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2820 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2821 dev_err(&pf->pdev->dev, 2822 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2823 goto error_param; 2824 } 2825 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2826 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2827 aq_ret = I40E_ERR_PARAM; 2828 goto error_param; 2829 } 2830 2831 for (i = 0; i < vfl->num_elements; i++) { 2832 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2833 aq_ret = I40E_ERR_PARAM; 2834 dev_err(&pf->pdev->dev, 2835 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2836 goto error_param; 2837 } 2838 } 2839 vsi = pf->vsi[vf->lan_vsi_idx]; 2840 if (vsi->info.pvid) { 2841 aq_ret = I40E_ERR_PARAM; 2842 goto error_param; 2843 } 2844 2845 i40e_vlan_stripping_enable(vsi); 2846 for (i = 0; i < vfl->num_elements; i++) { 2847 /* add new VLAN filter */ 2848 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2849 if (!ret) 2850 vf->num_vlan++; 2851 2852 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2853 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2854 true, 2855 vfl->vlan_id[i], 2856 NULL); 2857 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2858 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2859 true, 2860 vfl->vlan_id[i], 2861 NULL); 2862 2863 if (ret) 2864 dev_err(&pf->pdev->dev, 2865 "Unable to add VLAN filter %d for VF %d, error %d\n", 2866 vfl->vlan_id[i], vf->vf_id, ret); 2867 } 2868 2869 error_param: 2870 /* send the response to the VF */ 2871 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2872 } 2873 2874 /** 2875 * i40e_vc_remove_vlan_msg 2876 * @vf: pointer to the VF info 2877 * @msg: pointer to the msg buffer 2878 * 2879 * remove programmed guest vlan id 2880 **/ 2881 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2882 { 2883 struct virtchnl_vlan_filter_list *vfl = 2884 (struct virtchnl_vlan_filter_list *)msg; 2885 struct i40e_pf *pf = vf->pf; 2886 struct i40e_vsi *vsi = NULL; 2887 i40e_status aq_ret = 0; 2888 int i; 2889 2890 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2891 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2892 aq_ret = I40E_ERR_PARAM; 2893 goto error_param; 2894 } 2895 2896 for (i = 0; i < vfl->num_elements; i++) { 2897 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2898 aq_ret = I40E_ERR_PARAM; 2899 goto error_param; 2900 } 2901 } 2902 2903 vsi = pf->vsi[vf->lan_vsi_idx]; 2904 if (vsi->info.pvid) { 2905 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 2906 aq_ret = I40E_ERR_PARAM; 2907 goto error_param; 2908 } 2909 2910 for (i = 0; i < vfl->num_elements; i++) { 2911 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2912 vf->num_vlan--; 2913 2914 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2915 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2916 false, 2917 vfl->vlan_id[i], 2918 NULL); 2919 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2920 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2921 false, 2922 vfl->vlan_id[i], 2923 NULL); 2924 } 2925 2926 error_param: 2927 /* send the response to the VF */ 2928 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2929 } 2930 2931 /** 2932 * i40e_vc_iwarp_msg 2933 * @vf: pointer to the VF info 2934 * @msg: pointer to the msg buffer 2935 * @msglen: msg length 2936 * 2937 * called from the VF for the iwarp msgs 2938 **/ 2939 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2940 { 2941 struct i40e_pf *pf = vf->pf; 2942 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2943 i40e_status aq_ret = 0; 2944 2945 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2946 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2947 aq_ret = I40E_ERR_PARAM; 2948 goto error_param; 2949 } 2950 2951 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2952 msg, msglen); 2953 2954 error_param: 2955 /* send the response to the VF */ 2956 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2957 aq_ret); 2958 } 2959 2960 /** 2961 * i40e_vc_iwarp_qvmap_msg 2962 * @vf: pointer to the VF info 2963 * @msg: pointer to the msg buffer 2964 * @config: config qvmap or release it 2965 * 2966 * called from the VF for the iwarp msgs 2967 **/ 2968 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 2969 { 2970 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2971 (struct virtchnl_iwarp_qvlist_info *)msg; 2972 i40e_status aq_ret = 0; 2973 2974 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2975 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2976 aq_ret = I40E_ERR_PARAM; 2977 goto error_param; 2978 } 2979 2980 if (config) { 2981 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2982 aq_ret = I40E_ERR_PARAM; 2983 } else { 2984 i40e_release_iwarp_qvlist(vf); 2985 } 2986 2987 error_param: 2988 /* send the response to the VF */ 2989 return i40e_vc_send_resp_to_vf(vf, 2990 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2991 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2992 aq_ret); 2993 } 2994 2995 /** 2996 * i40e_vc_config_rss_key 2997 * @vf: pointer to the VF info 2998 * @msg: pointer to the msg buffer 2999 * 3000 * Configure the VF's RSS key 3001 **/ 3002 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 3003 { 3004 struct virtchnl_rss_key *vrk = 3005 (struct virtchnl_rss_key *)msg; 3006 struct i40e_pf *pf = vf->pf; 3007 struct i40e_vsi *vsi = NULL; 3008 i40e_status aq_ret = 0; 3009 3010 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3011 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || 3012 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 3013 aq_ret = I40E_ERR_PARAM; 3014 goto err; 3015 } 3016 3017 vsi = pf->vsi[vf->lan_vsi_idx]; 3018 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 3019 err: 3020 /* send the response to the VF */ 3021 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 3022 aq_ret); 3023 } 3024 3025 /** 3026 * i40e_vc_config_rss_lut 3027 * @vf: pointer to the VF info 3028 * @msg: pointer to the msg buffer 3029 * 3030 * Configure the VF's RSS LUT 3031 **/ 3032 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 3033 { 3034 struct virtchnl_rss_lut *vrl = 3035 (struct virtchnl_rss_lut *)msg; 3036 struct i40e_pf *pf = vf->pf; 3037 struct i40e_vsi *vsi = NULL; 3038 i40e_status aq_ret = 0; 3039 u16 i; 3040 3041 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3042 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || 3043 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 3044 aq_ret = I40E_ERR_PARAM; 3045 goto err; 3046 } 3047 3048 for (i = 0; i < vrl->lut_entries; i++) 3049 if (vrl->lut[i] >= vf->num_queue_pairs) { 3050 aq_ret = I40E_ERR_PARAM; 3051 goto err; 3052 } 3053 3054 vsi = pf->vsi[vf->lan_vsi_idx]; 3055 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 3056 /* send the response to the VF */ 3057 err: 3058 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 3059 aq_ret); 3060 } 3061 3062 /** 3063 * i40e_vc_get_rss_hena 3064 * @vf: pointer to the VF info 3065 * @msg: pointer to the msg buffer 3066 * 3067 * Return the RSS HENA bits allowed by the hardware 3068 **/ 3069 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 3070 { 3071 struct virtchnl_rss_hena *vrh = NULL; 3072 struct i40e_pf *pf = vf->pf; 3073 i40e_status aq_ret = 0; 3074 int len = 0; 3075 3076 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3077 aq_ret = I40E_ERR_PARAM; 3078 goto err; 3079 } 3080 len = sizeof(struct virtchnl_rss_hena); 3081 3082 vrh = kzalloc(len, GFP_KERNEL); 3083 if (!vrh) { 3084 aq_ret = I40E_ERR_NO_MEMORY; 3085 len = 0; 3086 goto err; 3087 } 3088 vrh->hena = i40e_pf_get_default_rss_hena(pf); 3089 err: 3090 /* send the response back to the VF */ 3091 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 3092 aq_ret, (u8 *)vrh, len); 3093 kfree(vrh); 3094 return aq_ret; 3095 } 3096 3097 /** 3098 * i40e_vc_set_rss_hena 3099 * @vf: pointer to the VF info 3100 * @msg: pointer to the msg buffer 3101 * 3102 * Set the RSS HENA bits for the VF 3103 **/ 3104 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 3105 { 3106 struct virtchnl_rss_hena *vrh = 3107 (struct virtchnl_rss_hena *)msg; 3108 struct i40e_pf *pf = vf->pf; 3109 struct i40e_hw *hw = &pf->hw; 3110 i40e_status aq_ret = 0; 3111 3112 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3113 aq_ret = I40E_ERR_PARAM; 3114 goto err; 3115 } 3116 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 3117 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3118 (u32)(vrh->hena >> 32)); 3119 3120 /* send the response to the VF */ 3121 err: 3122 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 3123 } 3124 3125 /** 3126 * i40e_vc_enable_vlan_stripping 3127 * @vf: pointer to the VF info 3128 * @msg: pointer to the msg buffer 3129 * 3130 * Enable vlan header stripping for the VF 3131 **/ 3132 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3133 { 3134 i40e_status aq_ret = 0; 3135 struct i40e_vsi *vsi; 3136 3137 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3138 aq_ret = I40E_ERR_PARAM; 3139 goto err; 3140 } 3141 3142 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3143 i40e_vlan_stripping_enable(vsi); 3144 3145 /* send the response to the VF */ 3146 err: 3147 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3148 aq_ret); 3149 } 3150 3151 /** 3152 * i40e_vc_disable_vlan_stripping 3153 * @vf: pointer to the VF info 3154 * @msg: pointer to the msg buffer 3155 * 3156 * Disable vlan header stripping for the VF 3157 **/ 3158 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3159 { 3160 i40e_status aq_ret = 0; 3161 struct i40e_vsi *vsi; 3162 3163 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3164 aq_ret = I40E_ERR_PARAM; 3165 goto err; 3166 } 3167 3168 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3169 i40e_vlan_stripping_disable(vsi); 3170 3171 /* send the response to the VF */ 3172 err: 3173 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3174 aq_ret); 3175 } 3176 3177 /** 3178 * i40e_validate_cloud_filter 3179 * @vf: pointer to VF structure 3180 * @tc_filter: pointer to filter requested 3181 * 3182 * This function validates cloud filter programmed as TC filter for ADq 3183 **/ 3184 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3185 struct virtchnl_filter *tc_filter) 3186 { 3187 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3188 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3189 struct i40e_pf *pf = vf->pf; 3190 struct i40e_vsi *vsi = NULL; 3191 struct i40e_mac_filter *f; 3192 struct hlist_node *h; 3193 bool found = false; 3194 int bkt; 3195 3196 if (!tc_filter->action) { 3197 dev_info(&pf->pdev->dev, 3198 "VF %d: Currently ADq doesn't support Drop Action\n", 3199 vf->vf_id); 3200 goto err; 3201 } 3202 3203 /* action_meta is TC number here to which the filter is applied */ 3204 if (!tc_filter->action_meta || 3205 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3206 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3207 vf->vf_id, tc_filter->action_meta); 3208 goto err; 3209 } 3210 3211 /* Check filter if it's programmed for advanced mode or basic mode. 3212 * There are two ADq modes (for VF only), 3213 * 1. Basic mode: intended to allow as many filter options as possible 3214 * to be added to a VF in Non-trusted mode. Main goal is 3215 * to add filters to its own MAC and VLAN id. 3216 * 2. Advanced mode: is for allowing filters to be applied other than 3217 * its own MAC or VLAN. This mode requires the VF to be 3218 * Trusted. 3219 */ 3220 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3221 vsi = pf->vsi[vf->lan_vsi_idx]; 3222 f = i40e_find_mac(vsi, data.dst_mac); 3223 3224 if (!f) { 3225 dev_info(&pf->pdev->dev, 3226 "Destination MAC %pM doesn't belong to VF %d\n", 3227 data.dst_mac, vf->vf_id); 3228 goto err; 3229 } 3230 3231 if (mask.vlan_id) { 3232 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3233 hlist) { 3234 if (f->vlan == ntohs(data.vlan_id)) { 3235 found = true; 3236 break; 3237 } 3238 } 3239 if (!found) { 3240 dev_info(&pf->pdev->dev, 3241 "VF %d doesn't have any VLAN id %u\n", 3242 vf->vf_id, ntohs(data.vlan_id)); 3243 goto err; 3244 } 3245 } 3246 } else { 3247 /* Check if VF is trusted */ 3248 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3249 dev_err(&pf->pdev->dev, 3250 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3251 vf->vf_id); 3252 return I40E_ERR_CONFIG; 3253 } 3254 } 3255 3256 if (mask.dst_mac[0] & data.dst_mac[0]) { 3257 if (is_broadcast_ether_addr(data.dst_mac) || 3258 is_zero_ether_addr(data.dst_mac)) { 3259 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3260 vf->vf_id, data.dst_mac); 3261 goto err; 3262 } 3263 } 3264 3265 if (mask.src_mac[0] & data.src_mac[0]) { 3266 if (is_broadcast_ether_addr(data.src_mac) || 3267 is_zero_ether_addr(data.src_mac)) { 3268 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3269 vf->vf_id, data.src_mac); 3270 goto err; 3271 } 3272 } 3273 3274 if (mask.dst_port & data.dst_port) { 3275 if (!data.dst_port) { 3276 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3277 vf->vf_id); 3278 goto err; 3279 } 3280 } 3281 3282 if (mask.src_port & data.src_port) { 3283 if (!data.src_port) { 3284 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3285 vf->vf_id); 3286 goto err; 3287 } 3288 } 3289 3290 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3291 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3292 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3293 vf->vf_id); 3294 goto err; 3295 } 3296 3297 if (mask.vlan_id & data.vlan_id) { 3298 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3299 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3300 vf->vf_id); 3301 goto err; 3302 } 3303 } 3304 3305 return I40E_SUCCESS; 3306 err: 3307 return I40E_ERR_CONFIG; 3308 } 3309 3310 /** 3311 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3312 * @vf: pointer to the VF info 3313 * @seid: seid of the vsi it is searching for 3314 **/ 3315 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3316 { 3317 struct i40e_pf *pf = vf->pf; 3318 struct i40e_vsi *vsi = NULL; 3319 int i; 3320 3321 for (i = 0; i < vf->num_tc ; i++) { 3322 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3323 if (vsi && vsi->seid == seid) 3324 return vsi; 3325 } 3326 return NULL; 3327 } 3328 3329 /** 3330 * i40e_del_all_cloud_filters 3331 * @vf: pointer to the VF info 3332 * 3333 * This function deletes all cloud filters 3334 **/ 3335 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3336 { 3337 struct i40e_cloud_filter *cfilter = NULL; 3338 struct i40e_pf *pf = vf->pf; 3339 struct i40e_vsi *vsi = NULL; 3340 struct hlist_node *node; 3341 int ret; 3342 3343 hlist_for_each_entry_safe(cfilter, node, 3344 &vf->cloud_filter_list, cloud_node) { 3345 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3346 3347 if (!vsi) { 3348 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3349 vf->vf_id, cfilter->seid); 3350 continue; 3351 } 3352 3353 if (cfilter->dst_port) 3354 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3355 false); 3356 else 3357 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3358 if (ret) 3359 dev_err(&pf->pdev->dev, 3360 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3361 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3362 i40e_aq_str(&pf->hw, 3363 pf->hw.aq.asq_last_status)); 3364 3365 hlist_del(&cfilter->cloud_node); 3366 kfree(cfilter); 3367 vf->num_cloud_filters--; 3368 } 3369 } 3370 3371 /** 3372 * i40e_vc_del_cloud_filter 3373 * @vf: pointer to the VF info 3374 * @msg: pointer to the msg buffer 3375 * 3376 * This function deletes a cloud filter programmed as TC filter for ADq 3377 **/ 3378 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3379 { 3380 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3381 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3382 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3383 struct i40e_cloud_filter cfilter, *cf = NULL; 3384 struct i40e_pf *pf = vf->pf; 3385 struct i40e_vsi *vsi = NULL; 3386 struct hlist_node *node; 3387 i40e_status aq_ret = 0; 3388 int i, ret; 3389 3390 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3391 aq_ret = I40E_ERR_PARAM; 3392 goto err; 3393 } 3394 3395 if (!vf->adq_enabled) { 3396 dev_info(&pf->pdev->dev, 3397 "VF %d: ADq not enabled, can't apply cloud filter\n", 3398 vf->vf_id); 3399 aq_ret = I40E_ERR_PARAM; 3400 goto err; 3401 } 3402 3403 if (i40e_validate_cloud_filter(vf, vcf)) { 3404 dev_info(&pf->pdev->dev, 3405 "VF %d: Invalid input, can't apply cloud filter\n", 3406 vf->vf_id); 3407 aq_ret = I40E_ERR_PARAM; 3408 goto err; 3409 } 3410 3411 memset(&cfilter, 0, sizeof(cfilter)); 3412 /* parse destination mac address */ 3413 for (i = 0; i < ETH_ALEN; i++) 3414 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3415 3416 /* parse source mac address */ 3417 for (i = 0; i < ETH_ALEN; i++) 3418 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3419 3420 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3421 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3422 cfilter.src_port = mask.src_port & tcf.src_port; 3423 3424 switch (vcf->flow_type) { 3425 case VIRTCHNL_TCP_V4_FLOW: 3426 cfilter.n_proto = ETH_P_IP; 3427 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3428 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3429 ARRAY_SIZE(tcf.dst_ip)); 3430 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3431 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3432 ARRAY_SIZE(tcf.dst_ip)); 3433 break; 3434 case VIRTCHNL_TCP_V6_FLOW: 3435 cfilter.n_proto = ETH_P_IPV6; 3436 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3437 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3438 sizeof(cfilter.ip.v6.dst_ip6)); 3439 if (mask.src_ip[3] & tcf.src_ip[3]) 3440 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3441 sizeof(cfilter.ip.v6.src_ip6)); 3442 break; 3443 default: 3444 /* TC filter can be configured based on different combinations 3445 * and in this case IP is not a part of filter config 3446 */ 3447 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3448 vf->vf_id); 3449 } 3450 3451 /* get the vsi to which the tc belongs to */ 3452 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3453 cfilter.seid = vsi->seid; 3454 cfilter.flags = vcf->field_flags; 3455 3456 /* Deleting TC filter */ 3457 if (tcf.dst_port) 3458 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3459 else 3460 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3461 if (ret) { 3462 dev_err(&pf->pdev->dev, 3463 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3464 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3465 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3466 goto err; 3467 } 3468 3469 hlist_for_each_entry_safe(cf, node, 3470 &vf->cloud_filter_list, cloud_node) { 3471 if (cf->seid != cfilter.seid) 3472 continue; 3473 if (mask.dst_port) 3474 if (cfilter.dst_port != cf->dst_port) 3475 continue; 3476 if (mask.dst_mac[0]) 3477 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3478 continue; 3479 /* for ipv4 data to be valid, only first byte of mask is set */ 3480 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3481 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3482 ARRAY_SIZE(tcf.dst_ip))) 3483 continue; 3484 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3485 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3486 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3487 sizeof(cfilter.ip.v6.src_ip6))) 3488 continue; 3489 if (mask.vlan_id) 3490 if (cfilter.vlan_id != cf->vlan_id) 3491 continue; 3492 3493 hlist_del(&cf->cloud_node); 3494 kfree(cf); 3495 vf->num_cloud_filters--; 3496 } 3497 3498 err: 3499 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3500 aq_ret); 3501 } 3502 3503 /** 3504 * i40e_vc_add_cloud_filter 3505 * @vf: pointer to the VF info 3506 * @msg: pointer to the msg buffer 3507 * 3508 * This function adds a cloud filter programmed as TC filter for ADq 3509 **/ 3510 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3511 { 3512 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3513 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3514 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3515 struct i40e_cloud_filter *cfilter = NULL; 3516 struct i40e_pf *pf = vf->pf; 3517 struct i40e_vsi *vsi = NULL; 3518 i40e_status aq_ret = 0; 3519 int i, ret; 3520 3521 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3522 aq_ret = I40E_ERR_PARAM; 3523 goto err_out; 3524 } 3525 3526 if (!vf->adq_enabled) { 3527 dev_info(&pf->pdev->dev, 3528 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3529 vf->vf_id); 3530 aq_ret = I40E_ERR_PARAM; 3531 goto err_out; 3532 } 3533 3534 if (i40e_validate_cloud_filter(vf, vcf)) { 3535 dev_info(&pf->pdev->dev, 3536 "VF %d: Invalid input/s, can't apply cloud filter\n", 3537 vf->vf_id); 3538 aq_ret = I40E_ERR_PARAM; 3539 goto err_out; 3540 } 3541 3542 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3543 if (!cfilter) 3544 return -ENOMEM; 3545 3546 /* parse destination mac address */ 3547 for (i = 0; i < ETH_ALEN; i++) 3548 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3549 3550 /* parse source mac address */ 3551 for (i = 0; i < ETH_ALEN; i++) 3552 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3553 3554 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3555 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3556 cfilter->src_port = mask.src_port & tcf.src_port; 3557 3558 switch (vcf->flow_type) { 3559 case VIRTCHNL_TCP_V4_FLOW: 3560 cfilter->n_proto = ETH_P_IP; 3561 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3562 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3563 ARRAY_SIZE(tcf.dst_ip)); 3564 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3565 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3566 ARRAY_SIZE(tcf.dst_ip)); 3567 break; 3568 case VIRTCHNL_TCP_V6_FLOW: 3569 cfilter->n_proto = ETH_P_IPV6; 3570 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3571 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3572 sizeof(cfilter->ip.v6.dst_ip6)); 3573 if (mask.src_ip[3] & tcf.src_ip[3]) 3574 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3575 sizeof(cfilter->ip.v6.src_ip6)); 3576 break; 3577 default: 3578 /* TC filter can be configured based on different combinations 3579 * and in this case IP is not a part of filter config 3580 */ 3581 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3582 vf->vf_id); 3583 } 3584 3585 /* get the VSI to which the TC belongs to */ 3586 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3587 cfilter->seid = vsi->seid; 3588 cfilter->flags = vcf->field_flags; 3589 3590 /* Adding cloud filter programmed as TC filter */ 3591 if (tcf.dst_port) 3592 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3593 else 3594 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3595 if (ret) { 3596 dev_err(&pf->pdev->dev, 3597 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3598 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3599 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3600 goto err_free; 3601 } 3602 3603 INIT_HLIST_NODE(&cfilter->cloud_node); 3604 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3605 /* release the pointer passing it to the collection */ 3606 cfilter = NULL; 3607 vf->num_cloud_filters++; 3608 err_free: 3609 kfree(cfilter); 3610 err_out: 3611 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3612 aq_ret); 3613 } 3614 3615 /** 3616 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3617 * @vf: pointer to the VF info 3618 * @msg: pointer to the msg buffer 3619 **/ 3620 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3621 { 3622 struct virtchnl_tc_info *tci = 3623 (struct virtchnl_tc_info *)msg; 3624 struct i40e_pf *pf = vf->pf; 3625 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3626 int i, adq_request_qps = 0; 3627 i40e_status aq_ret = 0; 3628 u64 speed = 0; 3629 3630 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3631 aq_ret = I40E_ERR_PARAM; 3632 goto err; 3633 } 3634 3635 /* ADq cannot be applied if spoof check is ON */ 3636 if (vf->spoofchk) { 3637 dev_err(&pf->pdev->dev, 3638 "Spoof check is ON, turn it OFF to enable ADq\n"); 3639 aq_ret = I40E_ERR_PARAM; 3640 goto err; 3641 } 3642 3643 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3644 dev_err(&pf->pdev->dev, 3645 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3646 vf->vf_id); 3647 aq_ret = I40E_ERR_PARAM; 3648 goto err; 3649 } 3650 3651 /* max number of traffic classes for VF currently capped at 4 */ 3652 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3653 dev_err(&pf->pdev->dev, 3654 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", 3655 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); 3656 aq_ret = I40E_ERR_PARAM; 3657 goto err; 3658 } 3659 3660 /* validate queues for each TC */ 3661 for (i = 0; i < tci->num_tc; i++) 3662 if (!tci->list[i].count || 3663 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3664 dev_err(&pf->pdev->dev, 3665 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", 3666 vf->vf_id, i, tci->list[i].count, 3667 I40E_DEFAULT_QUEUES_PER_VF); 3668 aq_ret = I40E_ERR_PARAM; 3669 goto err; 3670 } 3671 3672 /* need Max VF queues but already have default number of queues */ 3673 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3674 3675 if (pf->queues_left < adq_request_qps) { 3676 dev_err(&pf->pdev->dev, 3677 "No queues left to allocate to VF %d\n", 3678 vf->vf_id); 3679 aq_ret = I40E_ERR_PARAM; 3680 goto err; 3681 } else { 3682 /* we need to allocate max VF queues to enable ADq so as to 3683 * make sure ADq enabled VF always gets back queues when it 3684 * goes through a reset. 3685 */ 3686 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3687 } 3688 3689 /* get link speed in MB to validate rate limit */ 3690 switch (ls->link_speed) { 3691 case VIRTCHNL_LINK_SPEED_100MB: 3692 speed = SPEED_100; 3693 break; 3694 case VIRTCHNL_LINK_SPEED_1GB: 3695 speed = SPEED_1000; 3696 break; 3697 case VIRTCHNL_LINK_SPEED_10GB: 3698 speed = SPEED_10000; 3699 break; 3700 case VIRTCHNL_LINK_SPEED_20GB: 3701 speed = SPEED_20000; 3702 break; 3703 case VIRTCHNL_LINK_SPEED_25GB: 3704 speed = SPEED_25000; 3705 break; 3706 case VIRTCHNL_LINK_SPEED_40GB: 3707 speed = SPEED_40000; 3708 break; 3709 default: 3710 dev_err(&pf->pdev->dev, 3711 "Cannot detect link speed\n"); 3712 aq_ret = I40E_ERR_PARAM; 3713 goto err; 3714 } 3715 3716 /* parse data from the queue channel info */ 3717 vf->num_tc = tci->num_tc; 3718 for (i = 0; i < vf->num_tc; i++) { 3719 if (tci->list[i].max_tx_rate) { 3720 if (tci->list[i].max_tx_rate > speed) { 3721 dev_err(&pf->pdev->dev, 3722 "Invalid max tx rate %llu specified for VF %d.", 3723 tci->list[i].max_tx_rate, 3724 vf->vf_id); 3725 aq_ret = I40E_ERR_PARAM; 3726 goto err; 3727 } else { 3728 vf->ch[i].max_tx_rate = 3729 tci->list[i].max_tx_rate; 3730 } 3731 } 3732 vf->ch[i].num_qps = tci->list[i].count; 3733 } 3734 3735 /* set this flag only after making sure all inputs are sane */ 3736 vf->adq_enabled = true; 3737 /* num_req_queues is set when user changes number of queues via ethtool 3738 * and this causes issue for default VSI(which depends on this variable) 3739 * when ADq is enabled, hence reset it. 3740 */ 3741 vf->num_req_queues = 0; 3742 3743 /* reset the VF in order to allocate resources */ 3744 i40e_vc_notify_vf_reset(vf); 3745 i40e_reset_vf(vf, false); 3746 3747 return I40E_SUCCESS; 3748 3749 /* send the response to the VF */ 3750 err: 3751 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3752 aq_ret); 3753 } 3754 3755 /** 3756 * i40e_vc_del_qch_msg 3757 * @vf: pointer to the VF info 3758 * @msg: pointer to the msg buffer 3759 **/ 3760 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3761 { 3762 struct i40e_pf *pf = vf->pf; 3763 i40e_status aq_ret = 0; 3764 3765 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3766 aq_ret = I40E_ERR_PARAM; 3767 goto err; 3768 } 3769 3770 if (vf->adq_enabled) { 3771 i40e_del_all_cloud_filters(vf); 3772 i40e_del_qch(vf); 3773 vf->adq_enabled = false; 3774 vf->num_tc = 0; 3775 dev_info(&pf->pdev->dev, 3776 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3777 vf->vf_id); 3778 } else { 3779 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3780 vf->vf_id); 3781 aq_ret = I40E_ERR_PARAM; 3782 } 3783 3784 /* reset the VF in order to allocate resources */ 3785 i40e_vc_notify_vf_reset(vf); 3786 i40e_reset_vf(vf, false); 3787 3788 return I40E_SUCCESS; 3789 3790 err: 3791 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3792 aq_ret); 3793 } 3794 3795 /** 3796 * i40e_vc_process_vf_msg 3797 * @pf: pointer to the PF structure 3798 * @vf_id: source VF id 3799 * @v_opcode: operation code 3800 * @v_retval: unused return value code 3801 * @msg: pointer to the msg buffer 3802 * @msglen: msg length 3803 * 3804 * called from the common aeq/arq handler to 3805 * process request from VF 3806 **/ 3807 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3808 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3809 { 3810 struct i40e_hw *hw = &pf->hw; 3811 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3812 struct i40e_vf *vf; 3813 int ret; 3814 3815 pf->vf_aq_requests++; 3816 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 3817 return -EINVAL; 3818 vf = &(pf->vf[local_vf_id]); 3819 3820 /* Check if VF is disabled. */ 3821 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3822 return I40E_ERR_PARAM; 3823 3824 /* perform basic checks on the msg */ 3825 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3826 3827 if (ret) { 3828 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3829 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3830 local_vf_id, v_opcode, msglen); 3831 switch (ret) { 3832 case VIRTCHNL_STATUS_ERR_PARAM: 3833 return -EPERM; 3834 default: 3835 return -EINVAL; 3836 } 3837 } 3838 3839 switch (v_opcode) { 3840 case VIRTCHNL_OP_VERSION: 3841 ret = i40e_vc_get_version_msg(vf, msg); 3842 break; 3843 case VIRTCHNL_OP_GET_VF_RESOURCES: 3844 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3845 i40e_vc_notify_vf_link_state(vf); 3846 break; 3847 case VIRTCHNL_OP_RESET_VF: 3848 i40e_vc_reset_vf_msg(vf); 3849 ret = 0; 3850 break; 3851 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3852 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3853 break; 3854 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3855 ret = i40e_vc_config_queues_msg(vf, msg); 3856 break; 3857 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3858 ret = i40e_vc_config_irq_map_msg(vf, msg); 3859 break; 3860 case VIRTCHNL_OP_ENABLE_QUEUES: 3861 ret = i40e_vc_enable_queues_msg(vf, msg); 3862 i40e_vc_notify_vf_link_state(vf); 3863 break; 3864 case VIRTCHNL_OP_DISABLE_QUEUES: 3865 ret = i40e_vc_disable_queues_msg(vf, msg); 3866 break; 3867 case VIRTCHNL_OP_ADD_ETH_ADDR: 3868 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3869 break; 3870 case VIRTCHNL_OP_DEL_ETH_ADDR: 3871 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3872 break; 3873 case VIRTCHNL_OP_ADD_VLAN: 3874 ret = i40e_vc_add_vlan_msg(vf, msg); 3875 break; 3876 case VIRTCHNL_OP_DEL_VLAN: 3877 ret = i40e_vc_remove_vlan_msg(vf, msg); 3878 break; 3879 case VIRTCHNL_OP_GET_STATS: 3880 ret = i40e_vc_get_stats_msg(vf, msg); 3881 break; 3882 case VIRTCHNL_OP_IWARP: 3883 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3884 break; 3885 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3886 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3887 break; 3888 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3889 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3890 break; 3891 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3892 ret = i40e_vc_config_rss_key(vf, msg); 3893 break; 3894 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3895 ret = i40e_vc_config_rss_lut(vf, msg); 3896 break; 3897 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3898 ret = i40e_vc_get_rss_hena(vf, msg); 3899 break; 3900 case VIRTCHNL_OP_SET_RSS_HENA: 3901 ret = i40e_vc_set_rss_hena(vf, msg); 3902 break; 3903 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3904 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3905 break; 3906 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3907 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3908 break; 3909 case VIRTCHNL_OP_REQUEST_QUEUES: 3910 ret = i40e_vc_request_queues_msg(vf, msg); 3911 break; 3912 case VIRTCHNL_OP_ENABLE_CHANNELS: 3913 ret = i40e_vc_add_qch_msg(vf, msg); 3914 break; 3915 case VIRTCHNL_OP_DISABLE_CHANNELS: 3916 ret = i40e_vc_del_qch_msg(vf, msg); 3917 break; 3918 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3919 ret = i40e_vc_add_cloud_filter(vf, msg); 3920 break; 3921 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3922 ret = i40e_vc_del_cloud_filter(vf, msg); 3923 break; 3924 case VIRTCHNL_OP_UNKNOWN: 3925 default: 3926 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3927 v_opcode, local_vf_id); 3928 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3929 I40E_ERR_NOT_IMPLEMENTED); 3930 break; 3931 } 3932 3933 return ret; 3934 } 3935 3936 /** 3937 * i40e_vc_process_vflr_event 3938 * @pf: pointer to the PF structure 3939 * 3940 * called from the vlfr irq handler to 3941 * free up VF resources and state variables 3942 **/ 3943 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3944 { 3945 struct i40e_hw *hw = &pf->hw; 3946 u32 reg, reg_idx, bit_idx; 3947 struct i40e_vf *vf; 3948 int vf_id; 3949 3950 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3951 return 0; 3952 3953 /* Re-enable the VFLR interrupt cause here, before looking for which 3954 * VF got reset. Otherwise, if another VF gets a reset while the 3955 * first one is being processed, that interrupt will be lost, and 3956 * that VF will be stuck in reset forever. 3957 */ 3958 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3959 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 3960 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3961 i40e_flush(hw); 3962 3963 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3964 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 3965 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 3966 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 3967 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 3968 vf = &pf->vf[vf_id]; 3969 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 3970 if (reg & BIT(bit_idx)) 3971 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 3972 i40e_reset_vf(vf, true); 3973 } 3974 3975 return 0; 3976 } 3977 3978 /** 3979 * i40e_validate_vf 3980 * @pf: the physical function 3981 * @vf_id: VF identifier 3982 * 3983 * Check that the VF is enabled and the VSI exists. 3984 * 3985 * Returns 0 on success, negative on failure 3986 **/ 3987 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 3988 { 3989 struct i40e_vsi *vsi; 3990 struct i40e_vf *vf; 3991 int ret = 0; 3992 3993 if (vf_id >= pf->num_alloc_vfs) { 3994 dev_err(&pf->pdev->dev, 3995 "Invalid VF Identifier %d\n", vf_id); 3996 ret = -EINVAL; 3997 goto err_out; 3998 } 3999 vf = &pf->vf[vf_id]; 4000 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 4001 if (!vsi) 4002 ret = -EINVAL; 4003 err_out: 4004 return ret; 4005 } 4006 4007 /** 4008 * i40e_ndo_set_vf_mac 4009 * @netdev: network interface device structure 4010 * @vf_id: VF identifier 4011 * @mac: mac address 4012 * 4013 * program VF mac address 4014 **/ 4015 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 4016 { 4017 struct i40e_netdev_priv *np = netdev_priv(netdev); 4018 struct i40e_vsi *vsi = np->vsi; 4019 struct i40e_pf *pf = vsi->back; 4020 struct i40e_mac_filter *f; 4021 struct i40e_vf *vf; 4022 int ret = 0; 4023 struct hlist_node *h; 4024 int bkt; 4025 u8 i; 4026 4027 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4028 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4029 return -EAGAIN; 4030 } 4031 4032 /* validate the request */ 4033 ret = i40e_validate_vf(pf, vf_id); 4034 if (ret) 4035 goto error_param; 4036 4037 vf = &pf->vf[vf_id]; 4038 4039 /* When the VF is resetting wait until it is done. 4040 * It can take up to 200 milliseconds, 4041 * but wait for up to 300 milliseconds to be safe. 4042 * Acquire the VSI pointer only after the VF has been 4043 * properly initialized. 4044 */ 4045 for (i = 0; i < 15; i++) { 4046 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 4047 break; 4048 msleep(20); 4049 } 4050 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4051 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4052 vf_id); 4053 ret = -EAGAIN; 4054 goto error_param; 4055 } 4056 vsi = pf->vsi[vf->lan_vsi_idx]; 4057 4058 if (is_multicast_ether_addr(mac)) { 4059 dev_err(&pf->pdev->dev, 4060 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 4061 ret = -EINVAL; 4062 goto error_param; 4063 } 4064 4065 /* Lock once because below invoked function add/del_filter requires 4066 * mac_filter_hash_lock to be held 4067 */ 4068 spin_lock_bh(&vsi->mac_filter_hash_lock); 4069 4070 /* delete the temporary mac address */ 4071 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 4072 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 4073 4074 /* Delete all the filters for this VSI - we're going to kill it 4075 * anyway. 4076 */ 4077 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 4078 __i40e_del_filter(vsi, f); 4079 4080 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4081 4082 /* program mac filter */ 4083 if (i40e_sync_vsi_filters(vsi)) { 4084 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 4085 ret = -EIO; 4086 goto error_param; 4087 } 4088 ether_addr_copy(vf->default_lan_addr.addr, mac); 4089 4090 if (is_zero_ether_addr(mac)) { 4091 vf->pf_set_mac = false; 4092 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4093 } else { 4094 vf->pf_set_mac = true; 4095 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4096 mac, vf_id); 4097 } 4098 4099 /* Force the VF interface down so it has to bring up with new MAC 4100 * address 4101 */ 4102 i40e_vc_disable_vf(vf); 4103 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4104 4105 error_param: 4106 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4107 return ret; 4108 } 4109 4110 /** 4111 * i40e_vsi_has_vlans - True if VSI has configured VLANs 4112 * @vsi: pointer to the vsi 4113 * 4114 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 4115 * we have no configured VLANs. Do not call while holding the 4116 * mac_filter_hash_lock. 4117 */ 4118 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 4119 { 4120 bool have_vlans; 4121 4122 /* If we have a port VLAN, then the VSI cannot have any VLANs 4123 * configured, as all MAC/VLAN filters will be assigned to the PVID. 4124 */ 4125 if (vsi->info.pvid) 4126 return false; 4127 4128 /* Since we don't have a PVID, we know that if the device is in VLAN 4129 * mode it must be because of a VLAN filter configured on this VSI. 4130 */ 4131 spin_lock_bh(&vsi->mac_filter_hash_lock); 4132 have_vlans = i40e_is_vsi_in_vlan(vsi); 4133 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4134 4135 return have_vlans; 4136 } 4137 4138 /** 4139 * i40e_ndo_set_vf_port_vlan 4140 * @netdev: network interface device structure 4141 * @vf_id: VF identifier 4142 * @vlan_id: mac address 4143 * @qos: priority setting 4144 * @vlan_proto: vlan protocol 4145 * 4146 * program VF vlan id and/or qos 4147 **/ 4148 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4149 u16 vlan_id, u8 qos, __be16 vlan_proto) 4150 { 4151 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4152 struct i40e_netdev_priv *np = netdev_priv(netdev); 4153 bool allmulti = false, alluni = false; 4154 struct i40e_pf *pf = np->vsi->back; 4155 struct i40e_vsi *vsi; 4156 struct i40e_vf *vf; 4157 int ret = 0; 4158 4159 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4160 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4161 return -EAGAIN; 4162 } 4163 4164 /* validate the request */ 4165 ret = i40e_validate_vf(pf, vf_id); 4166 if (ret) 4167 goto error_pvid; 4168 4169 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4170 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4171 ret = -EINVAL; 4172 goto error_pvid; 4173 } 4174 4175 if (vlan_proto != htons(ETH_P_8021Q)) { 4176 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4177 ret = -EPROTONOSUPPORT; 4178 goto error_pvid; 4179 } 4180 4181 vf = &pf->vf[vf_id]; 4182 vsi = pf->vsi[vf->lan_vsi_idx]; 4183 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4184 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4185 vf_id); 4186 ret = -EAGAIN; 4187 goto error_pvid; 4188 } 4189 4190 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4191 /* duplicate request, so just return success */ 4192 goto error_pvid; 4193 4194 if (i40e_vsi_has_vlans(vsi)) { 4195 dev_err(&pf->pdev->dev, 4196 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4197 vf_id); 4198 /* Administrator Error - knock the VF offline until he does 4199 * the right thing by reconfiguring his network correctly 4200 * and then reloading the VF driver. 4201 */ 4202 i40e_vc_disable_vf(vf); 4203 /* During reset the VF got a new VSI, so refresh the pointer. */ 4204 vsi = pf->vsi[vf->lan_vsi_idx]; 4205 } 4206 4207 /* Locked once because multiple functions below iterate list */ 4208 spin_lock_bh(&vsi->mac_filter_hash_lock); 4209 4210 /* Check for condition where there was already a port VLAN ID 4211 * filter set and now it is being deleted by setting it to zero. 4212 * Additionally check for the condition where there was a port 4213 * VLAN but now there is a new and different port VLAN being set. 4214 * Before deleting all the old VLAN filters we must add new ones 4215 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4216 * MAC addresses deleted. 4217 */ 4218 if ((!(vlan_id || qos) || 4219 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4220 vsi->info.pvid) { 4221 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4222 if (ret) { 4223 dev_info(&vsi->back->pdev->dev, 4224 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4225 vsi->back->hw.aq.asq_last_status); 4226 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4227 goto error_pvid; 4228 } 4229 } 4230 4231 if (vsi->info.pvid) { 4232 /* remove all filters on the old VLAN */ 4233 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4234 VLAN_VID_MASK)); 4235 } 4236 4237 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4238 4239 /* disable promisc modes in case they were enabled */ 4240 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4241 allmulti, alluni); 4242 if (ret) { 4243 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4244 goto error_pvid; 4245 } 4246 4247 if (vlan_id || qos) 4248 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4249 else 4250 i40e_vsi_remove_pvid(vsi); 4251 spin_lock_bh(&vsi->mac_filter_hash_lock); 4252 4253 if (vlan_id) { 4254 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4255 vlan_id, qos, vf_id); 4256 4257 /* add new VLAN filter for each MAC */ 4258 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4259 if (ret) { 4260 dev_info(&vsi->back->pdev->dev, 4261 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4262 vsi->back->hw.aq.asq_last_status); 4263 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4264 goto error_pvid; 4265 } 4266 4267 /* remove the previously added non-VLAN MAC filters */ 4268 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4269 } 4270 4271 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4272 4273 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4274 alluni = true; 4275 4276 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4277 allmulti = true; 4278 4279 /* Schedule the worker thread to take care of applying changes */ 4280 i40e_service_event_schedule(vsi->back); 4281 4282 if (ret) { 4283 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4284 goto error_pvid; 4285 } 4286 4287 /* The Port VLAN needs to be saved across resets the same as the 4288 * default LAN MAC address. 4289 */ 4290 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4291 4292 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4293 if (ret) { 4294 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4295 goto error_pvid; 4296 } 4297 4298 ret = 0; 4299 4300 error_pvid: 4301 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4302 return ret; 4303 } 4304 4305 /** 4306 * i40e_ndo_set_vf_bw 4307 * @netdev: network interface device structure 4308 * @vf_id: VF identifier 4309 * @min_tx_rate: Minimum Tx rate 4310 * @max_tx_rate: Maximum Tx rate 4311 * 4312 * configure VF Tx rate 4313 **/ 4314 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4315 int max_tx_rate) 4316 { 4317 struct i40e_netdev_priv *np = netdev_priv(netdev); 4318 struct i40e_pf *pf = np->vsi->back; 4319 struct i40e_vsi *vsi; 4320 struct i40e_vf *vf; 4321 int ret = 0; 4322 4323 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4324 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4325 return -EAGAIN; 4326 } 4327 4328 /* validate the request */ 4329 ret = i40e_validate_vf(pf, vf_id); 4330 if (ret) 4331 goto error; 4332 4333 if (min_tx_rate) { 4334 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4335 min_tx_rate, vf_id); 4336 ret = -EINVAL; 4337 goto error; 4338 } 4339 4340 vf = &pf->vf[vf_id]; 4341 vsi = pf->vsi[vf->lan_vsi_idx]; 4342 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4343 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4344 vf_id); 4345 ret = -EAGAIN; 4346 goto error; 4347 } 4348 4349 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4350 if (ret) 4351 goto error; 4352 4353 vf->tx_rate = max_tx_rate; 4354 error: 4355 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4356 return ret; 4357 } 4358 4359 /** 4360 * i40e_ndo_get_vf_config 4361 * @netdev: network interface device structure 4362 * @vf_id: VF identifier 4363 * @ivi: VF configuration structure 4364 * 4365 * return VF configuration 4366 **/ 4367 int i40e_ndo_get_vf_config(struct net_device *netdev, 4368 int vf_id, struct ifla_vf_info *ivi) 4369 { 4370 struct i40e_netdev_priv *np = netdev_priv(netdev); 4371 struct i40e_vsi *vsi = np->vsi; 4372 struct i40e_pf *pf = vsi->back; 4373 struct i40e_vf *vf; 4374 int ret = 0; 4375 4376 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4377 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4378 return -EAGAIN; 4379 } 4380 4381 /* validate the request */ 4382 ret = i40e_validate_vf(pf, vf_id); 4383 if (ret) 4384 goto error_param; 4385 4386 vf = &pf->vf[vf_id]; 4387 /* first vsi is always the LAN vsi */ 4388 vsi = pf->vsi[vf->lan_vsi_idx]; 4389 if (!vsi) { 4390 ret = -ENOENT; 4391 goto error_param; 4392 } 4393 4394 ivi->vf = vf_id; 4395 4396 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4397 4398 ivi->max_tx_rate = vf->tx_rate; 4399 ivi->min_tx_rate = 0; 4400 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4401 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4402 I40E_VLAN_PRIORITY_SHIFT; 4403 if (vf->link_forced == false) 4404 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4405 else if (vf->link_up == true) 4406 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4407 else 4408 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4409 ivi->spoofchk = vf->spoofchk; 4410 ivi->trusted = vf->trusted; 4411 ret = 0; 4412 4413 error_param: 4414 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4415 return ret; 4416 } 4417 4418 /** 4419 * i40e_ndo_set_vf_link_state 4420 * @netdev: network interface device structure 4421 * @vf_id: VF identifier 4422 * @link: required link state 4423 * 4424 * Set the link state of a specified VF, regardless of physical link state 4425 **/ 4426 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4427 { 4428 struct i40e_netdev_priv *np = netdev_priv(netdev); 4429 struct i40e_pf *pf = np->vsi->back; 4430 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4431 struct virtchnl_pf_event pfe; 4432 struct i40e_hw *hw = &pf->hw; 4433 struct i40e_vf *vf; 4434 int abs_vf_id; 4435 int ret = 0; 4436 4437 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4438 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4439 return -EAGAIN; 4440 } 4441 4442 /* validate the request */ 4443 if (vf_id >= pf->num_alloc_vfs) { 4444 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4445 ret = -EINVAL; 4446 goto error_out; 4447 } 4448 4449 vf = &pf->vf[vf_id]; 4450 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4451 4452 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4453 pfe.severity = PF_EVENT_SEVERITY_INFO; 4454 4455 switch (link) { 4456 case IFLA_VF_LINK_STATE_AUTO: 4457 vf->link_forced = false; 4458 pfe.event_data.link_event.link_status = 4459 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 4460 pfe.event_data.link_event.link_speed = 4461 (enum virtchnl_link_speed) 4462 pf->hw.phy.link_info.link_speed; 4463 break; 4464 case IFLA_VF_LINK_STATE_ENABLE: 4465 vf->link_forced = true; 4466 vf->link_up = true; 4467 pfe.event_data.link_event.link_status = true; 4468 pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); 4469 break; 4470 case IFLA_VF_LINK_STATE_DISABLE: 4471 vf->link_forced = true; 4472 vf->link_up = false; 4473 pfe.event_data.link_event.link_status = false; 4474 pfe.event_data.link_event.link_speed = 0; 4475 break; 4476 default: 4477 ret = -EINVAL; 4478 goto error_out; 4479 } 4480 /* Notify the VF of its new link state */ 4481 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4482 0, (u8 *)&pfe, sizeof(pfe), NULL); 4483 4484 error_out: 4485 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4486 return ret; 4487 } 4488 4489 /** 4490 * i40e_ndo_set_vf_spoofchk 4491 * @netdev: network interface device structure 4492 * @vf_id: VF identifier 4493 * @enable: flag to enable or disable feature 4494 * 4495 * Enable or disable VF spoof checking 4496 **/ 4497 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4498 { 4499 struct i40e_netdev_priv *np = netdev_priv(netdev); 4500 struct i40e_vsi *vsi = np->vsi; 4501 struct i40e_pf *pf = vsi->back; 4502 struct i40e_vsi_context ctxt; 4503 struct i40e_hw *hw = &pf->hw; 4504 struct i40e_vf *vf; 4505 int ret = 0; 4506 4507 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4508 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4509 return -EAGAIN; 4510 } 4511 4512 /* validate the request */ 4513 if (vf_id >= pf->num_alloc_vfs) { 4514 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4515 ret = -EINVAL; 4516 goto out; 4517 } 4518 4519 vf = &(pf->vf[vf_id]); 4520 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4521 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4522 vf_id); 4523 ret = -EAGAIN; 4524 goto out; 4525 } 4526 4527 if (enable == vf->spoofchk) 4528 goto out; 4529 4530 vf->spoofchk = enable; 4531 memset(&ctxt, 0, sizeof(ctxt)); 4532 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4533 ctxt.pf_num = pf->hw.pf_id; 4534 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4535 if (enable) 4536 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4537 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4538 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4539 if (ret) { 4540 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4541 ret); 4542 ret = -EIO; 4543 } 4544 out: 4545 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4546 return ret; 4547 } 4548 4549 /** 4550 * i40e_ndo_set_vf_trust 4551 * @netdev: network interface device structure of the pf 4552 * @vf_id: VF identifier 4553 * @setting: trust setting 4554 * 4555 * Enable or disable VF trust setting 4556 **/ 4557 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4558 { 4559 struct i40e_netdev_priv *np = netdev_priv(netdev); 4560 struct i40e_pf *pf = np->vsi->back; 4561 struct i40e_vf *vf; 4562 int ret = 0; 4563 4564 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4565 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4566 return -EAGAIN; 4567 } 4568 4569 /* validate the request */ 4570 if (vf_id >= pf->num_alloc_vfs) { 4571 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4572 ret = -EINVAL; 4573 goto out; 4574 } 4575 4576 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4577 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4578 ret = -EINVAL; 4579 goto out; 4580 } 4581 4582 vf = &pf->vf[vf_id]; 4583 4584 if (setting == vf->trusted) 4585 goto out; 4586 4587 vf->trusted = setting; 4588 i40e_vc_disable_vf(vf); 4589 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4590 vf_id, setting ? "" : "un"); 4591 4592 if (vf->adq_enabled) { 4593 if (!vf->trusted) { 4594 dev_info(&pf->pdev->dev, 4595 "VF %u no longer Trusted, deleting all cloud filters\n", 4596 vf_id); 4597 i40e_del_all_cloud_filters(vf); 4598 } 4599 } 4600 4601 out: 4602 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4603 return ret; 4604 } 4605 4606 /** 4607 * i40e_get_vf_stats - populate some stats for the VF 4608 * @netdev: the netdev of the PF 4609 * @vf_id: the host OS identifier (0-127) 4610 * @vf_stats: pointer to the OS memory to be initialized 4611 */ 4612 int i40e_get_vf_stats(struct net_device *netdev, int vf_id, 4613 struct ifla_vf_stats *vf_stats) 4614 { 4615 struct i40e_netdev_priv *np = netdev_priv(netdev); 4616 struct i40e_pf *pf = np->vsi->back; 4617 struct i40e_eth_stats *stats; 4618 struct i40e_vsi *vsi; 4619 struct i40e_vf *vf; 4620 4621 /* validate the request */ 4622 if (i40e_validate_vf(pf, vf_id)) 4623 return -EINVAL; 4624 4625 vf = &pf->vf[vf_id]; 4626 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4627 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); 4628 return -EBUSY; 4629 } 4630 4631 vsi = pf->vsi[vf->lan_vsi_idx]; 4632 if (!vsi) 4633 return -EINVAL; 4634 4635 i40e_update_eth_stats(vsi); 4636 stats = &vsi->eth_stats; 4637 4638 memset(vf_stats, 0, sizeof(*vf_stats)); 4639 4640 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 4641 stats->rx_multicast; 4642 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 4643 stats->tx_multicast; 4644 vf_stats->rx_bytes = stats->rx_bytes; 4645 vf_stats->tx_bytes = stats->tx_bytes; 4646 vf_stats->broadcast = stats->rx_broadcast; 4647 vf_stats->multicast = stats->rx_multicast; 4648 vf_stats->rx_dropped = stats->rx_discards; 4649 vf_stats->tx_dropped = stats->tx_discards; 4650 4651 return 0; 4652 } 4653