1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_notify_vf_link_state 44 * @vf: pointer to the VF structure 45 * 46 * send a link status message to a single VF 47 **/ 48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 49 { 50 struct virtchnl_pf_event pfe; 51 struct i40e_pf *pf = vf->pf; 52 struct i40e_hw *hw = &pf->hw; 53 struct i40e_link_status *ls = &pf->hw.phy.link_info; 54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 55 56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 57 pfe.severity = PF_EVENT_SEVERITY_INFO; 58 if (vf->link_forced) { 59 pfe.event_data.link_event.link_status = vf->link_up; 60 pfe.event_data.link_event.link_speed = 61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); 62 } else { 63 pfe.event_data.link_event.link_status = 64 ls->link_info & I40E_AQ_LINK_UP; 65 pfe.event_data.link_event.link_speed = 66 i40e_virtchnl_link_speed(ls->link_speed); 67 } 68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 69 0, (u8 *)&pfe, sizeof(pfe), NULL); 70 } 71 72 /** 73 * i40e_vc_notify_link_state 74 * @pf: pointer to the PF structure 75 * 76 * send a link status message to all VFs on a given PF 77 **/ 78 void i40e_vc_notify_link_state(struct i40e_pf *pf) 79 { 80 int i; 81 82 for (i = 0; i < pf->num_alloc_vfs; i++) 83 i40e_vc_notify_vf_link_state(&pf->vf[i]); 84 } 85 86 /** 87 * i40e_vc_notify_reset 88 * @pf: pointer to the PF structure 89 * 90 * indicate a pending reset to all VFs on a given PF 91 **/ 92 void i40e_vc_notify_reset(struct i40e_pf *pf) 93 { 94 struct virtchnl_pf_event pfe; 95 96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 100 } 101 102 /** 103 * i40e_vc_notify_vf_reset 104 * @vf: pointer to the VF structure 105 * 106 * indicate a pending reset to the given VF 107 **/ 108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 int abs_vf_id; 112 113 /* validate the request */ 114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 115 return; 116 117 /* verify if the VF is in either init or active before proceeding */ 118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 120 return; 121 122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 123 124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 127 0, (u8 *)&pfe, 128 sizeof(struct virtchnl_pf_event), NULL); 129 } 130 /***********************misc routines*****************************/ 131 132 /** 133 * i40e_vc_disable_vf 134 * @vf: pointer to the VF info 135 * 136 * Disable the VF through a SW reset. 137 **/ 138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 139 { 140 int i; 141 142 i40e_vc_notify_vf_reset(vf); 143 144 /* We want to ensure that an actual reset occurs initiated after this 145 * function was called. However, we do not want to wait forever, so 146 * we'll give a reasonable time and print a message if we failed to 147 * ensure a reset. 148 */ 149 for (i = 0; i < 20; i++) { 150 if (i40e_reset_vf(vf, false)) 151 return; 152 usleep_range(10000, 20000); 153 } 154 155 dev_warn(&vf->pf->pdev->dev, 156 "Failed to initiate reset for VF %d after 200 milliseconds\n", 157 vf->vf_id); 158 } 159 160 /** 161 * i40e_vc_isvalid_vsi_id 162 * @vf: pointer to the VF info 163 * @vsi_id: VF relative VSI id 164 * 165 * check for the valid VSI id 166 **/ 167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 168 { 169 struct i40e_pf *pf = vf->pf; 170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 171 172 return (vsi && (vsi->vf_id == vf->vf_id)); 173 } 174 175 /** 176 * i40e_vc_isvalid_queue_id 177 * @vf: pointer to the VF info 178 * @vsi_id: vsi id 179 * @qid: vsi relative queue id 180 * 181 * check for the valid queue id 182 **/ 183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 184 u8 qid) 185 { 186 struct i40e_pf *pf = vf->pf; 187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 188 189 return (vsi && (qid < vsi->alloc_queue_pairs)); 190 } 191 192 /** 193 * i40e_vc_isvalid_vector_id 194 * @vf: pointer to the VF info 195 * @vector_id: VF relative vector id 196 * 197 * check for the valid vector id 198 **/ 199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 200 { 201 struct i40e_pf *pf = vf->pf; 202 203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 204 } 205 206 /***********************vf resource mgmt routines*****************/ 207 208 /** 209 * i40e_vc_get_pf_queue_id 210 * @vf: pointer to the VF info 211 * @vsi_id: id of VSI as provided by the FW 212 * @vsi_queue_id: vsi relative queue id 213 * 214 * return PF relative queue id 215 **/ 216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 217 u8 vsi_queue_id) 218 { 219 struct i40e_pf *pf = vf->pf; 220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 222 223 if (!vsi) 224 return pf_queue_id; 225 226 if (le16_to_cpu(vsi->info.mapping_flags) & 227 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 228 pf_queue_id = 229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 230 else 231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 232 vsi_queue_id; 233 234 return pf_queue_id; 235 } 236 237 /** 238 * i40e_get_real_pf_qid 239 * @vf: pointer to the VF info 240 * @vsi_id: vsi id 241 * @queue_id: queue number 242 * 243 * wrapper function to get pf_queue_id handling ADq code as well 244 **/ 245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 246 { 247 int i; 248 249 if (vf->adq_enabled) { 250 /* Although VF considers all the queues(can be 1 to 16) as its 251 * own but they may actually belong to different VSIs(up to 4). 252 * We need to find which queues belongs to which VSI. 253 */ 254 for (i = 0; i < vf->num_tc; i++) { 255 if (queue_id < vf->ch[i].num_qps) { 256 vsi_id = vf->ch[i].vsi_id; 257 break; 258 } 259 /* find right queue id which is relative to a 260 * given VSI. 261 */ 262 queue_id -= vf->ch[i].num_qps; 263 } 264 } 265 266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 267 } 268 269 /** 270 * i40e_config_irq_link_list 271 * @vf: pointer to the VF info 272 * @vsi_id: id of VSI as given by the FW 273 * @vecmap: irq map info 274 * 275 * configure irq link list from the map 276 **/ 277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 278 struct virtchnl_vector_map *vecmap) 279 { 280 unsigned long linklistmap = 0, tempmap; 281 struct i40e_pf *pf = vf->pf; 282 struct i40e_hw *hw = &pf->hw; 283 u16 vsi_queue_id, pf_queue_id; 284 enum i40e_queue_type qtype; 285 u16 next_q, vector_id, size; 286 u32 reg, reg_idx; 287 u16 itr_idx = 0; 288 289 vector_id = vecmap->vector_id; 290 /* setup the head */ 291 if (0 == vector_id) 292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 293 else 294 reg_idx = I40E_VPINT_LNKLSTN( 295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 296 (vector_id - 1)); 297 298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 299 /* Special case - No queues mapped on this vector */ 300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 301 goto irq_list_done; 302 } 303 tempmap = vecmap->rxq_map; 304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 306 vsi_queue_id)); 307 } 308 309 tempmap = vecmap->txq_map; 310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 312 vsi_queue_id + 1)); 313 } 314 315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 316 next_q = find_first_bit(&linklistmap, size); 317 if (unlikely(next_q == size)) 318 goto irq_list_done; 319 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 324 325 wr32(hw, reg_idx, reg); 326 327 while (next_q < size) { 328 switch (qtype) { 329 case I40E_QUEUE_TYPE_RX: 330 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 331 itr_idx = vecmap->rxitr_idx; 332 break; 333 case I40E_QUEUE_TYPE_TX: 334 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 335 itr_idx = vecmap->txitr_idx; 336 break; 337 default: 338 break; 339 } 340 341 next_q = find_next_bit(&linklistmap, size, next_q + 1); 342 if (next_q < size) { 343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 pf_queue_id = i40e_get_real_pf_qid(vf, 346 vsi_id, 347 vsi_queue_id); 348 } else { 349 pf_queue_id = I40E_QUEUE_END_OF_LIST; 350 qtype = 0; 351 } 352 353 /* format for the RQCTL & TQCTL regs is same */ 354 reg = (vector_id) | 355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 359 wr32(hw, reg_idx, reg); 360 } 361 362 /* if the vf is running in polling mode and using interrupt zero, 363 * need to disable auto-mask on enabling zero interrupt for VFs. 364 */ 365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 366 (vector_id == 0)) { 367 reg = rd32(hw, I40E_GLINT_CTL); 368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 370 wr32(hw, I40E_GLINT_CTL, reg); 371 } 372 } 373 374 irq_list_done: 375 i40e_flush(hw); 376 } 377 378 /** 379 * i40e_release_iwarp_qvlist 380 * @vf: pointer to the VF. 381 * 382 **/ 383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 384 { 385 struct i40e_pf *pf = vf->pf; 386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 387 u32 msix_vf; 388 u32 i; 389 390 if (!vf->qvlist_info) 391 return; 392 393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 394 for (i = 0; i < qvlist_info->num_vectors; i++) { 395 struct virtchnl_iwarp_qv_info *qv_info; 396 u32 next_q_index, next_q_type; 397 struct i40e_hw *hw = &pf->hw; 398 u32 v_idx, reg_idx, reg; 399 400 qv_info = &qvlist_info->qv_info[i]; 401 if (!qv_info) 402 continue; 403 v_idx = qv_info->v_idx; 404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 405 /* Figure out the queue after CEQ and make that the 406 * first queue. 407 */ 408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 414 415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 416 reg = (next_q_index & 417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 418 (next_q_type << 419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 420 421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 422 } 423 } 424 kfree(vf->qvlist_info); 425 vf->qvlist_info = NULL; 426 } 427 428 /** 429 * i40e_config_iwarp_qvlist 430 * @vf: pointer to the VF info 431 * @qvlist_info: queue and vector list 432 * 433 * Return 0 on success or < 0 on error 434 **/ 435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 436 struct virtchnl_iwarp_qvlist_info *qvlist_info) 437 { 438 struct i40e_pf *pf = vf->pf; 439 struct i40e_hw *hw = &pf->hw; 440 struct virtchnl_iwarp_qv_info *qv_info; 441 u32 v_idx, i, reg_idx, reg; 442 u32 next_q_idx, next_q_type; 443 u32 msix_vf, size; 444 445 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 446 (sizeof(struct virtchnl_iwarp_qv_info) * 447 (qvlist_info->num_vectors - 1)); 448 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 449 if (!vf->qvlist_info) 450 return -ENOMEM; 451 452 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 453 454 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 455 for (i = 0; i < qvlist_info->num_vectors; i++) { 456 qv_info = &qvlist_info->qv_info[i]; 457 if (!qv_info) 458 continue; 459 v_idx = qv_info->v_idx; 460 461 /* Validate vector id belongs to this vf */ 462 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) 463 goto err; 464 465 vf->qvlist_info->qv_info[i] = *qv_info; 466 467 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 468 /* We might be sharing the interrupt, so get the first queue 469 * index and type, push it down the list by adding the new 470 * queue on top. Also link it with the new queue in CEQCTL. 471 */ 472 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 473 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 474 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 475 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 476 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 477 478 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 479 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 480 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 481 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 482 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 483 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 484 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 485 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 486 487 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 488 reg = (qv_info->ceq_idx & 489 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 490 (I40E_QUEUE_TYPE_PE_CEQ << 491 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 492 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 493 } 494 495 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 496 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 497 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 498 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 499 500 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 501 } 502 } 503 504 return 0; 505 err: 506 kfree(vf->qvlist_info); 507 vf->qvlist_info = NULL; 508 return -EINVAL; 509 } 510 511 /** 512 * i40e_config_vsi_tx_queue 513 * @vf: pointer to the VF info 514 * @vsi_id: id of VSI as provided by the FW 515 * @vsi_queue_id: vsi relative queue index 516 * @info: config. info 517 * 518 * configure tx queue 519 **/ 520 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 521 u16 vsi_queue_id, 522 struct virtchnl_txq_info *info) 523 { 524 struct i40e_pf *pf = vf->pf; 525 struct i40e_hw *hw = &pf->hw; 526 struct i40e_hmc_obj_txq tx_ctx; 527 struct i40e_vsi *vsi; 528 u16 pf_queue_id; 529 u32 qtx_ctl; 530 int ret = 0; 531 532 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 533 ret = -ENOENT; 534 goto error_context; 535 } 536 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 537 vsi = i40e_find_vsi_from_id(pf, vsi_id); 538 if (!vsi) { 539 ret = -ENOENT; 540 goto error_context; 541 } 542 543 /* clear the context structure first */ 544 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 545 546 /* only set the required fields */ 547 tx_ctx.base = info->dma_ring_addr / 128; 548 tx_ctx.qlen = info->ring_len; 549 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 550 tx_ctx.rdylist_act = 0; 551 tx_ctx.head_wb_ena = info->headwb_enabled; 552 tx_ctx.head_wb_addr = info->dma_headwb_addr; 553 554 /* clear the context in the HMC */ 555 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 556 if (ret) { 557 dev_err(&pf->pdev->dev, 558 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 559 pf_queue_id, ret); 560 ret = -ENOENT; 561 goto error_context; 562 } 563 564 /* set the context in the HMC */ 565 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 566 if (ret) { 567 dev_err(&pf->pdev->dev, 568 "Failed to set VF LAN Tx queue context %d error: %d\n", 569 pf_queue_id, ret); 570 ret = -ENOENT; 571 goto error_context; 572 } 573 574 /* associate this queue with the PCI VF function */ 575 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 576 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 577 & I40E_QTX_CTL_PF_INDX_MASK); 578 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 579 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 580 & I40E_QTX_CTL_VFVM_INDX_MASK); 581 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 582 i40e_flush(hw); 583 584 error_context: 585 return ret; 586 } 587 588 /** 589 * i40e_config_vsi_rx_queue 590 * @vf: pointer to the VF info 591 * @vsi_id: id of VSI as provided by the FW 592 * @vsi_queue_id: vsi relative queue index 593 * @info: config. info 594 * 595 * configure rx queue 596 **/ 597 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 598 u16 vsi_queue_id, 599 struct virtchnl_rxq_info *info) 600 { 601 struct i40e_pf *pf = vf->pf; 602 struct i40e_hw *hw = &pf->hw; 603 struct i40e_hmc_obj_rxq rx_ctx; 604 u16 pf_queue_id; 605 int ret = 0; 606 607 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 608 609 /* clear the context structure first */ 610 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 611 612 /* only set the required fields */ 613 rx_ctx.base = info->dma_ring_addr / 128; 614 rx_ctx.qlen = info->ring_len; 615 616 if (info->splithdr_enabled) { 617 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 618 I40E_RX_SPLIT_IP | 619 I40E_RX_SPLIT_TCP_UDP | 620 I40E_RX_SPLIT_SCTP; 621 /* header length validation */ 622 if (info->hdr_size > ((2 * 1024) - 64)) { 623 ret = -EINVAL; 624 goto error_param; 625 } 626 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 627 628 /* set split mode 10b */ 629 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 630 } 631 632 /* databuffer length validation */ 633 if (info->databuffer_size > ((16 * 1024) - 128)) { 634 ret = -EINVAL; 635 goto error_param; 636 } 637 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 638 639 /* max pkt. length validation */ 640 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 641 ret = -EINVAL; 642 goto error_param; 643 } 644 rx_ctx.rxmax = info->max_pkt_size; 645 646 /* enable 32bytes desc always */ 647 rx_ctx.dsize = 1; 648 649 /* default values */ 650 rx_ctx.lrxqthresh = 1; 651 rx_ctx.crcstrip = 1; 652 rx_ctx.prefena = 1; 653 rx_ctx.l2tsel = 1; 654 655 /* clear the context in the HMC */ 656 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 657 if (ret) { 658 dev_err(&pf->pdev->dev, 659 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 660 pf_queue_id, ret); 661 ret = -ENOENT; 662 goto error_param; 663 } 664 665 /* set the context in the HMC */ 666 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 667 if (ret) { 668 dev_err(&pf->pdev->dev, 669 "Failed to set VF LAN Rx queue context %d error: %d\n", 670 pf_queue_id, ret); 671 ret = -ENOENT; 672 goto error_param; 673 } 674 675 error_param: 676 return ret; 677 } 678 679 /** 680 * i40e_alloc_vsi_res 681 * @vf: pointer to the VF info 682 * @idx: VSI index, applies only for ADq mode, zero otherwise 683 * 684 * alloc VF vsi context & resources 685 **/ 686 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 687 { 688 struct i40e_mac_filter *f = NULL; 689 struct i40e_pf *pf = vf->pf; 690 struct i40e_vsi *vsi; 691 u64 max_tx_rate = 0; 692 int ret = 0; 693 694 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 695 vf->vf_id); 696 697 if (!vsi) { 698 dev_err(&pf->pdev->dev, 699 "add vsi failed for VF %d, aq_err %d\n", 700 vf->vf_id, pf->hw.aq.asq_last_status); 701 ret = -ENOENT; 702 goto error_alloc_vsi_res; 703 } 704 705 if (!idx) { 706 u64 hena = i40e_pf_get_default_rss_hena(pf); 707 u8 broadcast[ETH_ALEN]; 708 709 vf->lan_vsi_idx = vsi->idx; 710 vf->lan_vsi_id = vsi->id; 711 /* If the port VLAN has been configured and then the 712 * VF driver was removed then the VSI port VLAN 713 * configuration was destroyed. Check if there is 714 * a port VLAN and restore the VSI configuration if 715 * needed. 716 */ 717 if (vf->port_vlan_id) 718 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 719 720 spin_lock_bh(&vsi->mac_filter_hash_lock); 721 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 722 f = i40e_add_mac_filter(vsi, 723 vf->default_lan_addr.addr); 724 if (!f) 725 dev_info(&pf->pdev->dev, 726 "Could not add MAC filter %pM for VF %d\n", 727 vf->default_lan_addr.addr, vf->vf_id); 728 } 729 eth_broadcast_addr(broadcast); 730 f = i40e_add_mac_filter(vsi, broadcast); 731 if (!f) 732 dev_info(&pf->pdev->dev, 733 "Could not allocate VF broadcast filter\n"); 734 spin_unlock_bh(&vsi->mac_filter_hash_lock); 735 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 736 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 737 /* program mac filter only for VF VSI */ 738 ret = i40e_sync_vsi_filters(vsi); 739 if (ret) 740 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 741 } 742 743 /* storing VSI index and id for ADq and don't apply the mac filter */ 744 if (vf->adq_enabled) { 745 vf->ch[idx].vsi_idx = vsi->idx; 746 vf->ch[idx].vsi_id = vsi->id; 747 } 748 749 /* Set VF bandwidth if specified */ 750 if (vf->tx_rate) { 751 max_tx_rate = vf->tx_rate; 752 } else if (vf->ch[idx].max_tx_rate) { 753 max_tx_rate = vf->ch[idx].max_tx_rate; 754 } 755 756 if (max_tx_rate) { 757 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 758 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 759 max_tx_rate, 0, NULL); 760 if (ret) 761 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 762 vf->vf_id, ret); 763 } 764 765 error_alloc_vsi_res: 766 return ret; 767 } 768 769 /** 770 * i40e_map_pf_queues_to_vsi 771 * @vf: pointer to the VF info 772 * 773 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 774 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 775 **/ 776 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 777 { 778 struct i40e_pf *pf = vf->pf; 779 struct i40e_hw *hw = &pf->hw; 780 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 781 u16 vsi_id, qps; 782 int i, j; 783 784 if (vf->adq_enabled) 785 num_tc = vf->num_tc; 786 787 for (i = 0; i < num_tc; i++) { 788 if (vf->adq_enabled) { 789 qps = vf->ch[i].num_qps; 790 vsi_id = vf->ch[i].vsi_id; 791 } else { 792 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 793 vsi_id = vf->lan_vsi_id; 794 } 795 796 for (j = 0; j < 7; j++) { 797 if (j * 2 >= qps) { 798 /* end of list */ 799 reg = 0x07FF07FF; 800 } else { 801 u16 qid = i40e_vc_get_pf_queue_id(vf, 802 vsi_id, 803 j * 2); 804 reg = qid; 805 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 806 (j * 2) + 1); 807 reg |= qid << 16; 808 } 809 i40e_write_rx_ctl(hw, 810 I40E_VSILAN_QTABLE(j, vsi_id), 811 reg); 812 } 813 } 814 } 815 816 /** 817 * i40e_map_pf_to_vf_queues 818 * @vf: pointer to the VF info 819 * 820 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 821 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 822 **/ 823 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 824 { 825 struct i40e_pf *pf = vf->pf; 826 struct i40e_hw *hw = &pf->hw; 827 u32 reg, total_qps = 0; 828 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 829 u16 vsi_id, qid; 830 int i, j; 831 832 if (vf->adq_enabled) 833 num_tc = vf->num_tc; 834 835 for (i = 0; i < num_tc; i++) { 836 if (vf->adq_enabled) { 837 qps = vf->ch[i].num_qps; 838 vsi_id = vf->ch[i].vsi_id; 839 } else { 840 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 841 vsi_id = vf->lan_vsi_id; 842 } 843 844 for (j = 0; j < qps; j++) { 845 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 846 847 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 848 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 849 reg); 850 total_qps++; 851 } 852 } 853 } 854 855 /** 856 * i40e_enable_vf_mappings 857 * @vf: pointer to the VF info 858 * 859 * enable VF mappings 860 **/ 861 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 862 { 863 struct i40e_pf *pf = vf->pf; 864 struct i40e_hw *hw = &pf->hw; 865 u32 reg; 866 867 /* Tell the hardware we're using noncontiguous mapping. HW requires 868 * that VF queues be mapped using this method, even when they are 869 * contiguous in real life 870 */ 871 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 872 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 873 874 /* enable VF vplan_qtable mappings */ 875 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 876 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 877 878 i40e_map_pf_to_vf_queues(vf); 879 i40e_map_pf_queues_to_vsi(vf); 880 881 i40e_flush(hw); 882 } 883 884 /** 885 * i40e_disable_vf_mappings 886 * @vf: pointer to the VF info 887 * 888 * disable VF mappings 889 **/ 890 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 891 { 892 struct i40e_pf *pf = vf->pf; 893 struct i40e_hw *hw = &pf->hw; 894 int i; 895 896 /* disable qp mappings */ 897 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 898 for (i = 0; i < I40E_MAX_VSI_QP; i++) 899 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 900 I40E_QUEUE_END_OF_LIST); 901 i40e_flush(hw); 902 } 903 904 /** 905 * i40e_free_vf_res 906 * @vf: pointer to the VF info 907 * 908 * free VF resources 909 **/ 910 static void i40e_free_vf_res(struct i40e_vf *vf) 911 { 912 struct i40e_pf *pf = vf->pf; 913 struct i40e_hw *hw = &pf->hw; 914 u32 reg_idx, reg; 915 int i, j, msix_vf; 916 917 /* Start by disabling VF's configuration API to prevent the OS from 918 * accessing the VF's VSI after it's freed / invalidated. 919 */ 920 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 921 922 /* It's possible the VF had requeuested more queues than the default so 923 * do the accounting here when we're about to free them. 924 */ 925 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 926 pf->queues_left += vf->num_queue_pairs - 927 I40E_DEFAULT_QUEUES_PER_VF; 928 } 929 930 /* free vsi & disconnect it from the parent uplink */ 931 if (vf->lan_vsi_idx) { 932 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 933 vf->lan_vsi_idx = 0; 934 vf->lan_vsi_id = 0; 935 vf->num_mac = 0; 936 } 937 938 /* do the accounting and remove additional ADq VSI's */ 939 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 940 for (j = 0; j < vf->num_tc; j++) { 941 /* At this point VSI0 is already released so don't 942 * release it again and only clear their values in 943 * structure variables 944 */ 945 if (j) 946 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 947 vf->ch[j].vsi_idx = 0; 948 vf->ch[j].vsi_id = 0; 949 } 950 } 951 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 952 953 /* disable interrupts so the VF starts in a known state */ 954 for (i = 0; i < msix_vf; i++) { 955 /* format is same for both registers */ 956 if (0 == i) 957 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 958 else 959 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 960 (vf->vf_id)) 961 + (i - 1)); 962 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 963 i40e_flush(hw); 964 } 965 966 /* clear the irq settings */ 967 for (i = 0; i < msix_vf; i++) { 968 /* format is same for both registers */ 969 if (0 == i) 970 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 971 else 972 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 973 (vf->vf_id)) 974 + (i - 1)); 975 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 976 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 977 wr32(hw, reg_idx, reg); 978 i40e_flush(hw); 979 } 980 /* reset some of the state variables keeping track of the resources */ 981 vf->num_queue_pairs = 0; 982 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 983 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 984 } 985 986 /** 987 * i40e_alloc_vf_res 988 * @vf: pointer to the VF info 989 * 990 * allocate VF resources 991 **/ 992 static int i40e_alloc_vf_res(struct i40e_vf *vf) 993 { 994 struct i40e_pf *pf = vf->pf; 995 int total_queue_pairs = 0; 996 int ret, idx; 997 998 if (vf->num_req_queues && 999 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1000 pf->num_vf_qps = vf->num_req_queues; 1001 else 1002 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1003 1004 /* allocate hw vsi context & associated resources */ 1005 ret = i40e_alloc_vsi_res(vf, 0); 1006 if (ret) 1007 goto error_alloc; 1008 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1009 1010 /* allocate additional VSIs based on tc information for ADq */ 1011 if (vf->adq_enabled) { 1012 if (pf->queues_left >= 1013 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1014 /* TC 0 always belongs to VF VSI */ 1015 for (idx = 1; idx < vf->num_tc; idx++) { 1016 ret = i40e_alloc_vsi_res(vf, idx); 1017 if (ret) 1018 goto error_alloc; 1019 } 1020 /* send correct number of queues */ 1021 total_queue_pairs = I40E_MAX_VF_QUEUES; 1022 } else { 1023 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1024 vf->vf_id); 1025 vf->adq_enabled = false; 1026 } 1027 } 1028 1029 /* We account for each VF to get a default number of queue pairs. If 1030 * the VF has now requested more, we need to account for that to make 1031 * certain we never request more queues than we actually have left in 1032 * HW. 1033 */ 1034 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1035 pf->queues_left -= 1036 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1037 1038 if (vf->trusted) 1039 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1040 else 1041 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1042 1043 /* store the total qps number for the runtime 1044 * VF req validation 1045 */ 1046 vf->num_queue_pairs = total_queue_pairs; 1047 1048 /* VF is now completely initialized */ 1049 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1050 1051 error_alloc: 1052 if (ret) 1053 i40e_free_vf_res(vf); 1054 1055 return ret; 1056 } 1057 1058 #define VF_DEVICE_STATUS 0xAA 1059 #define VF_TRANS_PENDING_MASK 0x20 1060 /** 1061 * i40e_quiesce_vf_pci 1062 * @vf: pointer to the VF structure 1063 * 1064 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1065 * if the transactions never clear. 1066 **/ 1067 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1068 { 1069 struct i40e_pf *pf = vf->pf; 1070 struct i40e_hw *hw = &pf->hw; 1071 int vf_abs_id, i; 1072 u32 reg; 1073 1074 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1075 1076 wr32(hw, I40E_PF_PCI_CIAA, 1077 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1078 for (i = 0; i < 100; i++) { 1079 reg = rd32(hw, I40E_PF_PCI_CIAD); 1080 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1081 return 0; 1082 udelay(1); 1083 } 1084 return -EIO; 1085 } 1086 1087 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); 1088 1089 /** 1090 * i40e_config_vf_promiscuous_mode 1091 * @vf: pointer to the VF info 1092 * @vsi_id: VSI id 1093 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1094 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1095 * 1096 * Called from the VF to configure the promiscuous mode of 1097 * VF vsis and from the VF reset path to reset promiscuous mode. 1098 **/ 1099 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1100 u16 vsi_id, 1101 bool allmulti, 1102 bool alluni) 1103 { 1104 struct i40e_pf *pf = vf->pf; 1105 struct i40e_hw *hw = &pf->hw; 1106 struct i40e_mac_filter *f; 1107 i40e_status aq_ret = 0; 1108 struct i40e_vsi *vsi; 1109 int bkt; 1110 1111 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1112 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1113 return I40E_ERR_PARAM; 1114 1115 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 1116 (allmulti || alluni)) { 1117 dev_err(&pf->pdev->dev, 1118 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1119 vf->vf_id); 1120 /* Lie to the VF on purpose. */ 1121 return 0; 1122 } 1123 1124 if (vf->port_vlan_id) { 1125 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1126 allmulti, 1127 vf->port_vlan_id, 1128 NULL); 1129 if (aq_ret) { 1130 int aq_err = pf->hw.aq.asq_last_status; 1131 1132 dev_err(&pf->pdev->dev, 1133 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1134 vf->vf_id, 1135 i40e_stat_str(&pf->hw, aq_ret), 1136 i40e_aq_str(&pf->hw, aq_err)); 1137 return aq_ret; 1138 } 1139 1140 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1141 alluni, 1142 vf->port_vlan_id, 1143 NULL); 1144 if (aq_ret) { 1145 int aq_err = pf->hw.aq.asq_last_status; 1146 1147 dev_err(&pf->pdev->dev, 1148 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1149 vf->vf_id, 1150 i40e_stat_str(&pf->hw, aq_ret), 1151 i40e_aq_str(&pf->hw, aq_err)); 1152 } 1153 return aq_ret; 1154 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1155 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1156 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1157 continue; 1158 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1159 vsi->seid, 1160 allmulti, 1161 f->vlan, 1162 NULL); 1163 if (aq_ret) { 1164 int aq_err = pf->hw.aq.asq_last_status; 1165 1166 dev_err(&pf->pdev->dev, 1167 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1168 f->vlan, 1169 i40e_stat_str(&pf->hw, aq_ret), 1170 i40e_aq_str(&pf->hw, aq_err)); 1171 } 1172 1173 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1174 vsi->seid, 1175 alluni, 1176 f->vlan, 1177 NULL); 1178 if (aq_ret) { 1179 int aq_err = pf->hw.aq.asq_last_status; 1180 1181 dev_err(&pf->pdev->dev, 1182 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1183 f->vlan, 1184 i40e_stat_str(&pf->hw, aq_ret), 1185 i40e_aq_str(&pf->hw, aq_err)); 1186 } 1187 } 1188 return aq_ret; 1189 } 1190 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, 1191 NULL); 1192 if (aq_ret) { 1193 int aq_err = pf->hw.aq.asq_last_status; 1194 1195 dev_err(&pf->pdev->dev, 1196 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1197 vf->vf_id, 1198 i40e_stat_str(&pf->hw, aq_ret), 1199 i40e_aq_str(&pf->hw, aq_err)); 1200 return aq_ret; 1201 } 1202 1203 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, 1204 NULL, true); 1205 if (aq_ret) { 1206 int aq_err = pf->hw.aq.asq_last_status; 1207 1208 dev_err(&pf->pdev->dev, 1209 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1210 vf->vf_id, 1211 i40e_stat_str(&pf->hw, aq_ret), 1212 i40e_aq_str(&pf->hw, aq_err)); 1213 } 1214 1215 return aq_ret; 1216 } 1217 1218 /** 1219 * i40e_trigger_vf_reset 1220 * @vf: pointer to the VF structure 1221 * @flr: VFLR was issued or not 1222 * 1223 * Trigger hardware to start a reset for a particular VF. Expects the caller 1224 * to wait the proper amount of time to allow hardware to reset the VF before 1225 * it cleans up and restores VF functionality. 1226 **/ 1227 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1228 { 1229 struct i40e_pf *pf = vf->pf; 1230 struct i40e_hw *hw = &pf->hw; 1231 u32 reg, reg_idx, bit_idx; 1232 1233 /* warn the VF */ 1234 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1235 1236 /* Disable VF's configuration API during reset. The flag is re-enabled 1237 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1238 * It's normally disabled in i40e_free_vf_res(), but it's safer 1239 * to do it earlier to give some time to finish to any VF config 1240 * functions that may still be running at this point. 1241 */ 1242 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1243 1244 /* In the case of a VFLR, the HW has already reset the VF and we 1245 * just need to clean up, so don't hit the VFRTRIG register. 1246 */ 1247 if (!flr) { 1248 /* reset VF using VPGEN_VFRTRIG reg */ 1249 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1250 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1251 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1252 i40e_flush(hw); 1253 } 1254 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1255 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1256 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1257 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1258 i40e_flush(hw); 1259 1260 if (i40e_quiesce_vf_pci(vf)) 1261 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1262 vf->vf_id); 1263 } 1264 1265 /** 1266 * i40e_cleanup_reset_vf 1267 * @vf: pointer to the VF structure 1268 * 1269 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1270 * have verified whether the reset is finished properly, and ensure the 1271 * minimum amount of wait time has passed. 1272 **/ 1273 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1274 { 1275 struct i40e_pf *pf = vf->pf; 1276 struct i40e_hw *hw = &pf->hw; 1277 u32 reg; 1278 1279 /* disable promisc modes in case they were enabled */ 1280 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1281 1282 /* free VF resources to begin resetting the VSI state */ 1283 i40e_free_vf_res(vf); 1284 1285 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1286 * By doing this we allow HW to access VF memory at any point. If we 1287 * did it any sooner, HW could access memory while it was being freed 1288 * in i40e_free_vf_res(), causing an IOMMU fault. 1289 * 1290 * On the other hand, this needs to be done ASAP, because the VF driver 1291 * is waiting for this to happen and may report a timeout. It's 1292 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1293 * it. 1294 */ 1295 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1296 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1297 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1298 1299 /* reallocate VF resources to finish resetting the VSI state */ 1300 if (!i40e_alloc_vf_res(vf)) { 1301 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1302 i40e_enable_vf_mappings(vf); 1303 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1304 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1305 /* Do not notify the client during VF init */ 1306 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1307 &vf->vf_states)) 1308 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1309 vf->num_vlan = 0; 1310 } 1311 1312 /* Tell the VF driver the reset is done. This needs to be done only 1313 * after VF has been fully initialized, because the VF driver may 1314 * request resources immediately after setting this flag. 1315 */ 1316 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1317 } 1318 1319 /** 1320 * i40e_reset_vf 1321 * @vf: pointer to the VF structure 1322 * @flr: VFLR was issued or not 1323 * 1324 * Returns true if the VF is reset, false otherwise. 1325 **/ 1326 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1327 { 1328 struct i40e_pf *pf = vf->pf; 1329 struct i40e_hw *hw = &pf->hw; 1330 bool rsd = false; 1331 u32 reg; 1332 int i; 1333 1334 /* If the VFs have been disabled, this means something else is 1335 * resetting the VF, so we shouldn't continue. 1336 */ 1337 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1338 return false; 1339 1340 i40e_trigger_vf_reset(vf, flr); 1341 1342 /* poll VPGEN_VFRSTAT reg to make sure 1343 * that reset is complete 1344 */ 1345 for (i = 0; i < 10; i++) { 1346 /* VF reset requires driver to first reset the VF and then 1347 * poll the status register to make sure that the reset 1348 * completed successfully. Due to internal HW FIFO flushes, 1349 * we must wait 10ms before the register will be valid. 1350 */ 1351 usleep_range(10000, 20000); 1352 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1353 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1354 rsd = true; 1355 break; 1356 } 1357 } 1358 1359 if (flr) 1360 usleep_range(10000, 20000); 1361 1362 if (!rsd) 1363 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1364 vf->vf_id); 1365 usleep_range(10000, 20000); 1366 1367 /* On initial reset, we don't have any queues to disable */ 1368 if (vf->lan_vsi_idx != 0) 1369 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1370 1371 i40e_cleanup_reset_vf(vf); 1372 1373 i40e_flush(hw); 1374 clear_bit(__I40E_VF_DISABLE, pf->state); 1375 1376 return true; 1377 } 1378 1379 /** 1380 * i40e_reset_all_vfs 1381 * @pf: pointer to the PF structure 1382 * @flr: VFLR was issued or not 1383 * 1384 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1385 * VF, then do all the waiting in one chunk, and finally finish restoring each 1386 * VF after the wait. This is useful during PF routines which need to reset 1387 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1388 * 1389 * Returns true if any VFs were reset, and false otherwise. 1390 **/ 1391 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1392 { 1393 struct i40e_hw *hw = &pf->hw; 1394 struct i40e_vf *vf; 1395 int i, v; 1396 u32 reg; 1397 1398 /* If we don't have any VFs, then there is nothing to reset */ 1399 if (!pf->num_alloc_vfs) 1400 return false; 1401 1402 /* If VFs have been disabled, there is no need to reset */ 1403 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1404 return false; 1405 1406 /* Begin reset on all VFs at once */ 1407 for (v = 0; v < pf->num_alloc_vfs; v++) 1408 i40e_trigger_vf_reset(&pf->vf[v], flr); 1409 1410 /* HW requires some time to make sure it can flush the FIFO for a VF 1411 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1412 * sequence to make sure that it has completed. We'll keep track of 1413 * the VFs using a simple iterator that increments once that VF has 1414 * finished resetting. 1415 */ 1416 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1417 usleep_range(10000, 20000); 1418 1419 /* Check each VF in sequence, beginning with the VF to fail 1420 * the previous check. 1421 */ 1422 while (v < pf->num_alloc_vfs) { 1423 vf = &pf->vf[v]; 1424 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1425 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1426 break; 1427 1428 /* If the current VF has finished resetting, move on 1429 * to the next VF in sequence. 1430 */ 1431 v++; 1432 } 1433 } 1434 1435 if (flr) 1436 usleep_range(10000, 20000); 1437 1438 /* Display a warning if at least one VF didn't manage to reset in 1439 * time, but continue on with the operation. 1440 */ 1441 if (v < pf->num_alloc_vfs) 1442 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1443 pf->vf[v].vf_id); 1444 usleep_range(10000, 20000); 1445 1446 /* Begin disabling all the rings associated with VFs, but do not wait 1447 * between each VF. 1448 */ 1449 for (v = 0; v < pf->num_alloc_vfs; v++) { 1450 /* On initial reset, we don't have any queues to disable */ 1451 if (pf->vf[v].lan_vsi_idx == 0) 1452 continue; 1453 1454 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1455 } 1456 1457 /* Now that we've notified HW to disable all of the VF rings, wait 1458 * until they finish. 1459 */ 1460 for (v = 0; v < pf->num_alloc_vfs; v++) { 1461 /* On initial reset, we don't have any queues to disable */ 1462 if (pf->vf[v].lan_vsi_idx == 0) 1463 continue; 1464 1465 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1466 } 1467 1468 /* Hw may need up to 50ms to finish disabling the RX queues. We 1469 * minimize the wait by delaying only once for all VFs. 1470 */ 1471 mdelay(50); 1472 1473 /* Finish the reset on each VF */ 1474 for (v = 0; v < pf->num_alloc_vfs; v++) 1475 i40e_cleanup_reset_vf(&pf->vf[v]); 1476 1477 i40e_flush(hw); 1478 clear_bit(__I40E_VF_DISABLE, pf->state); 1479 1480 return true; 1481 } 1482 1483 /** 1484 * i40e_free_vfs 1485 * @pf: pointer to the PF structure 1486 * 1487 * free VF resources 1488 **/ 1489 void i40e_free_vfs(struct i40e_pf *pf) 1490 { 1491 struct i40e_hw *hw = &pf->hw; 1492 u32 reg_idx, bit_idx; 1493 int i, tmp, vf_id; 1494 1495 if (!pf->vf) 1496 return; 1497 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1498 usleep_range(1000, 2000); 1499 1500 i40e_notify_client_of_vf_enable(pf, 0); 1501 1502 /* Amortize wait time by stopping all VFs at the same time */ 1503 for (i = 0; i < pf->num_alloc_vfs; i++) { 1504 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1505 continue; 1506 1507 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1508 } 1509 1510 for (i = 0; i < pf->num_alloc_vfs; i++) { 1511 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1512 continue; 1513 1514 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1515 } 1516 1517 /* Disable IOV before freeing resources. This lets any VF drivers 1518 * running in the host get themselves cleaned up before we yank 1519 * the carpet out from underneath their feet. 1520 */ 1521 if (!pci_vfs_assigned(pf->pdev)) 1522 pci_disable_sriov(pf->pdev); 1523 else 1524 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1525 1526 /* free up VF resources */ 1527 tmp = pf->num_alloc_vfs; 1528 pf->num_alloc_vfs = 0; 1529 for (i = 0; i < tmp; i++) { 1530 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1531 i40e_free_vf_res(&pf->vf[i]); 1532 /* disable qp mappings */ 1533 i40e_disable_vf_mappings(&pf->vf[i]); 1534 } 1535 1536 kfree(pf->vf); 1537 pf->vf = NULL; 1538 1539 /* This check is for when the driver is unloaded while VFs are 1540 * assigned. Setting the number of VFs to 0 through sysfs is caught 1541 * before this function ever gets called. 1542 */ 1543 if (!pci_vfs_assigned(pf->pdev)) { 1544 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1545 * work correctly when SR-IOV gets re-enabled. 1546 */ 1547 for (vf_id = 0; vf_id < tmp; vf_id++) { 1548 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1549 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1550 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1551 } 1552 } 1553 clear_bit(__I40E_VF_DISABLE, pf->state); 1554 } 1555 1556 #ifdef CONFIG_PCI_IOV 1557 /** 1558 * i40e_alloc_vfs 1559 * @pf: pointer to the PF structure 1560 * @num_alloc_vfs: number of VFs to allocate 1561 * 1562 * allocate VF resources 1563 **/ 1564 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1565 { 1566 struct i40e_vf *vfs; 1567 int i, ret = 0; 1568 1569 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1570 i40e_irq_dynamic_disable_icr0(pf); 1571 1572 /* Check to see if we're just allocating resources for extant VFs */ 1573 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1574 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1575 if (ret) { 1576 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1577 pf->num_alloc_vfs = 0; 1578 goto err_iov; 1579 } 1580 } 1581 /* allocate memory */ 1582 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1583 if (!vfs) { 1584 ret = -ENOMEM; 1585 goto err_alloc; 1586 } 1587 pf->vf = vfs; 1588 1589 /* apply default profile */ 1590 for (i = 0; i < num_alloc_vfs; i++) { 1591 vfs[i].pf = pf; 1592 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1593 vfs[i].vf_id = i; 1594 1595 /* assign default capabilities */ 1596 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1597 vfs[i].spoofchk = true; 1598 1599 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1600 1601 } 1602 pf->num_alloc_vfs = num_alloc_vfs; 1603 1604 /* VF resources get allocated during reset */ 1605 i40e_reset_all_vfs(pf, false); 1606 1607 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1608 1609 err_alloc: 1610 if (ret) 1611 i40e_free_vfs(pf); 1612 err_iov: 1613 /* Re-enable interrupt 0. */ 1614 i40e_irq_dynamic_enable_icr0(pf); 1615 return ret; 1616 } 1617 1618 #endif 1619 /** 1620 * i40e_pci_sriov_enable 1621 * @pdev: pointer to a pci_dev structure 1622 * @num_vfs: number of VFs to allocate 1623 * 1624 * Enable or change the number of VFs 1625 **/ 1626 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1627 { 1628 #ifdef CONFIG_PCI_IOV 1629 struct i40e_pf *pf = pci_get_drvdata(pdev); 1630 int pre_existing_vfs = pci_num_vf(pdev); 1631 int err = 0; 1632 1633 if (test_bit(__I40E_TESTING, pf->state)) { 1634 dev_warn(&pdev->dev, 1635 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1636 err = -EPERM; 1637 goto err_out; 1638 } 1639 1640 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1641 i40e_free_vfs(pf); 1642 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1643 goto out; 1644 1645 if (num_vfs > pf->num_req_vfs) { 1646 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1647 num_vfs, pf->num_req_vfs); 1648 err = -EPERM; 1649 goto err_out; 1650 } 1651 1652 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1653 err = i40e_alloc_vfs(pf, num_vfs); 1654 if (err) { 1655 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1656 goto err_out; 1657 } 1658 1659 out: 1660 return num_vfs; 1661 1662 err_out: 1663 return err; 1664 #endif 1665 return 0; 1666 } 1667 1668 /** 1669 * i40e_pci_sriov_configure 1670 * @pdev: pointer to a pci_dev structure 1671 * @num_vfs: number of VFs to allocate 1672 * 1673 * Enable or change the number of VFs. Called when the user updates the number 1674 * of VFs in sysfs. 1675 **/ 1676 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1677 { 1678 struct i40e_pf *pf = pci_get_drvdata(pdev); 1679 int ret = 0; 1680 1681 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1682 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1683 return -EAGAIN; 1684 } 1685 1686 if (num_vfs) { 1687 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1688 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1689 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1690 } 1691 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1692 goto sriov_configure_out; 1693 } 1694 1695 if (!pci_vfs_assigned(pf->pdev)) { 1696 i40e_free_vfs(pf); 1697 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1698 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 1699 } else { 1700 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1701 ret = -EINVAL; 1702 goto sriov_configure_out; 1703 } 1704 sriov_configure_out: 1705 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1706 return ret; 1707 } 1708 1709 /***********************virtual channel routines******************/ 1710 1711 /** 1712 * i40e_vc_send_msg_to_vf 1713 * @vf: pointer to the VF info 1714 * @v_opcode: virtual channel opcode 1715 * @v_retval: virtual channel return value 1716 * @msg: pointer to the msg buffer 1717 * @msglen: msg length 1718 * 1719 * send msg to VF 1720 **/ 1721 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1722 u32 v_retval, u8 *msg, u16 msglen) 1723 { 1724 struct i40e_pf *pf; 1725 struct i40e_hw *hw; 1726 int abs_vf_id; 1727 i40e_status aq_ret; 1728 1729 /* validate the request */ 1730 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1731 return -EINVAL; 1732 1733 pf = vf->pf; 1734 hw = &pf->hw; 1735 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1736 1737 /* single place to detect unsuccessful return values */ 1738 if (v_retval) { 1739 vf->num_invalid_msgs++; 1740 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1741 vf->vf_id, v_opcode, v_retval); 1742 if (vf->num_invalid_msgs > 1743 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1744 dev_err(&pf->pdev->dev, 1745 "Number of invalid messages exceeded for VF %d\n", 1746 vf->vf_id); 1747 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1748 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1749 } 1750 } else { 1751 vf->num_valid_msgs++; 1752 /* reset the invalid counter, if a valid message is received. */ 1753 vf->num_invalid_msgs = 0; 1754 } 1755 1756 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1757 msg, msglen, NULL); 1758 if (aq_ret) { 1759 dev_info(&pf->pdev->dev, 1760 "Unable to send the message to VF %d aq_err %d\n", 1761 vf->vf_id, pf->hw.aq.asq_last_status); 1762 return -EIO; 1763 } 1764 1765 return 0; 1766 } 1767 1768 /** 1769 * i40e_vc_send_resp_to_vf 1770 * @vf: pointer to the VF info 1771 * @opcode: operation code 1772 * @retval: return value 1773 * 1774 * send resp msg to VF 1775 **/ 1776 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1777 enum virtchnl_ops opcode, 1778 i40e_status retval) 1779 { 1780 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1781 } 1782 1783 /** 1784 * i40e_vc_get_version_msg 1785 * @vf: pointer to the VF info 1786 * @msg: pointer to the msg buffer 1787 * 1788 * called from the VF to request the API version used by the PF 1789 **/ 1790 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1791 { 1792 struct virtchnl_version_info info = { 1793 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1794 }; 1795 1796 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1797 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1798 if (VF_IS_V10(&vf->vf_ver)) 1799 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1800 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1801 I40E_SUCCESS, (u8 *)&info, 1802 sizeof(struct virtchnl_version_info)); 1803 } 1804 1805 /** 1806 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1807 * @vf: pointer to VF structure 1808 **/ 1809 static void i40e_del_qch(struct i40e_vf *vf) 1810 { 1811 struct i40e_pf *pf = vf->pf; 1812 int i; 1813 1814 /* first element in the array belongs to primary VF VSI and we shouldn't 1815 * delete it. We should however delete the rest of the VSIs created 1816 */ 1817 for (i = 1; i < vf->num_tc; i++) { 1818 if (vf->ch[i].vsi_idx) { 1819 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1820 vf->ch[i].vsi_idx = 0; 1821 vf->ch[i].vsi_id = 0; 1822 } 1823 } 1824 } 1825 1826 /** 1827 * i40e_vc_get_vf_resources_msg 1828 * @vf: pointer to the VF info 1829 * @msg: pointer to the msg buffer 1830 * 1831 * called from the VF to request its resources 1832 **/ 1833 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1834 { 1835 struct virtchnl_vf_resource *vfres = NULL; 1836 struct i40e_pf *pf = vf->pf; 1837 i40e_status aq_ret = 0; 1838 struct i40e_vsi *vsi; 1839 int num_vsis = 1; 1840 int len = 0; 1841 int ret; 1842 1843 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1844 aq_ret = I40E_ERR_PARAM; 1845 goto err; 1846 } 1847 1848 len = (sizeof(struct virtchnl_vf_resource) + 1849 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1850 1851 vfres = kzalloc(len, GFP_KERNEL); 1852 if (!vfres) { 1853 aq_ret = I40E_ERR_NO_MEMORY; 1854 len = 0; 1855 goto err; 1856 } 1857 if (VF_IS_V11(&vf->vf_ver)) 1858 vf->driver_caps = *(u32 *)msg; 1859 else 1860 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1861 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1862 VIRTCHNL_VF_OFFLOAD_VLAN; 1863 1864 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1865 vsi = pf->vsi[vf->lan_vsi_idx]; 1866 if (!vsi->info.pvid) 1867 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1868 1869 if (i40e_vf_client_capable(pf, vf->vf_id) && 1870 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1871 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1872 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1873 } else { 1874 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1875 } 1876 1877 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1878 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1879 } else { 1880 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1881 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1882 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1883 else 1884 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1885 } 1886 1887 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1888 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1889 vfres->vf_cap_flags |= 1890 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1891 } 1892 1893 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1894 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1895 1896 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1897 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1898 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1899 1900 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1901 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1902 dev_err(&pf->pdev->dev, 1903 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1904 vf->vf_id); 1905 aq_ret = I40E_ERR_PARAM; 1906 goto err; 1907 } 1908 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1909 } 1910 1911 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1912 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1913 vfres->vf_cap_flags |= 1914 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1915 } 1916 1917 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1918 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1919 1920 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1921 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 1922 1923 vfres->num_vsis = num_vsis; 1924 vfres->num_queue_pairs = vf->num_queue_pairs; 1925 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1926 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1927 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1928 1929 if (vf->lan_vsi_idx) { 1930 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1931 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1932 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1933 /* VFs only use TC 0 */ 1934 vfres->vsi_res[0].qset_handle 1935 = le16_to_cpu(vsi->info.qs_handle[0]); 1936 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1937 vf->default_lan_addr.addr); 1938 } 1939 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1940 1941 err: 1942 /* send the response back to the VF */ 1943 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1944 aq_ret, (u8 *)vfres, len); 1945 1946 kfree(vfres); 1947 return ret; 1948 } 1949 1950 /** 1951 * i40e_vc_reset_vf_msg 1952 * @vf: pointer to the VF info 1953 * 1954 * called from the VF to reset itself, 1955 * unlike other virtchnl messages, PF driver 1956 * doesn't send the response back to the VF 1957 **/ 1958 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1959 { 1960 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1961 i40e_reset_vf(vf, false); 1962 } 1963 1964 /** 1965 * i40e_getnum_vf_vsi_vlan_filters 1966 * @vsi: pointer to the vsi 1967 * 1968 * called to get the number of VLANs offloaded on this VF 1969 **/ 1970 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1971 { 1972 struct i40e_mac_filter *f; 1973 int num_vlans = 0, bkt; 1974 1975 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1976 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1977 num_vlans++; 1978 } 1979 1980 return num_vlans; 1981 } 1982 1983 /** 1984 * i40e_vc_config_promiscuous_mode_msg 1985 * @vf: pointer to the VF info 1986 * @msg: pointer to the msg buffer 1987 * 1988 * called from the VF to configure the promiscuous mode of 1989 * VF vsis 1990 **/ 1991 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 1992 { 1993 struct virtchnl_promisc_info *info = 1994 (struct virtchnl_promisc_info *)msg; 1995 struct i40e_pf *pf = vf->pf; 1996 i40e_status aq_ret = 0; 1997 bool allmulti = false; 1998 bool alluni = false; 1999 2000 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2001 return I40E_ERR_PARAM; 2002 2003 /* Multicast promiscuous handling*/ 2004 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2005 allmulti = true; 2006 2007 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2008 alluni = true; 2009 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2010 alluni); 2011 if (!aq_ret) { 2012 if (allmulti) { 2013 dev_info(&pf->pdev->dev, 2014 "VF %d successfully set multicast promiscuous mode\n", 2015 vf->vf_id); 2016 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2017 } else { 2018 dev_info(&pf->pdev->dev, 2019 "VF %d successfully unset multicast promiscuous mode\n", 2020 vf->vf_id); 2021 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 2022 } 2023 if (alluni) { 2024 dev_info(&pf->pdev->dev, 2025 "VF %d successfully set unicast promiscuous mode\n", 2026 vf->vf_id); 2027 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2028 } else { 2029 dev_info(&pf->pdev->dev, 2030 "VF %d successfully unset unicast promiscuous mode\n", 2031 vf->vf_id); 2032 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 2033 } 2034 } 2035 2036 /* send the response to the VF */ 2037 return i40e_vc_send_resp_to_vf(vf, 2038 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2039 aq_ret); 2040 } 2041 2042 /** 2043 * i40e_vc_config_queues_msg 2044 * @vf: pointer to the VF info 2045 * @msg: pointer to the msg buffer 2046 * 2047 * called from the VF to configure the rx/tx 2048 * queues 2049 **/ 2050 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2051 { 2052 struct virtchnl_vsi_queue_config_info *qci = 2053 (struct virtchnl_vsi_queue_config_info *)msg; 2054 struct virtchnl_queue_pair_info *qpi; 2055 struct i40e_pf *pf = vf->pf; 2056 u16 vsi_id, vsi_queue_id = 0; 2057 i40e_status aq_ret = 0; 2058 int i, j = 0, idx = 0; 2059 2060 vsi_id = qci->vsi_id; 2061 2062 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2063 aq_ret = I40E_ERR_PARAM; 2064 goto error_param; 2065 } 2066 2067 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2068 aq_ret = I40E_ERR_PARAM; 2069 goto error_param; 2070 } 2071 2072 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2073 aq_ret = I40E_ERR_PARAM; 2074 goto error_param; 2075 } 2076 2077 for (i = 0; i < qci->num_queue_pairs; i++) { 2078 qpi = &qci->qpair[i]; 2079 2080 if (!vf->adq_enabled) { 2081 vsi_queue_id = qpi->txq.queue_id; 2082 2083 if (qpi->txq.vsi_id != qci->vsi_id || 2084 qpi->rxq.vsi_id != qci->vsi_id || 2085 qpi->rxq.queue_id != vsi_queue_id) { 2086 aq_ret = I40E_ERR_PARAM; 2087 goto error_param; 2088 } 2089 } 2090 2091 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 2092 aq_ret = I40E_ERR_PARAM; 2093 goto error_param; 2094 } 2095 2096 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2097 &qpi->rxq) || 2098 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2099 &qpi->txq)) { 2100 aq_ret = I40E_ERR_PARAM; 2101 goto error_param; 2102 } 2103 2104 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2105 * VF does not know about these additional VSIs and all 2106 * it cares is about its own queues. PF configures these queues 2107 * to its appropriate VSIs based on TC mapping 2108 **/ 2109 if (vf->adq_enabled) { 2110 if (j == (vf->ch[idx].num_qps - 1)) { 2111 idx++; 2112 j = 0; /* resetting the queue count */ 2113 vsi_queue_id = 0; 2114 } else { 2115 j++; 2116 vsi_queue_id++; 2117 } 2118 vsi_id = vf->ch[idx].vsi_id; 2119 } 2120 } 2121 /* set vsi num_queue_pairs in use to num configured by VF */ 2122 if (!vf->adq_enabled) { 2123 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2124 qci->num_queue_pairs; 2125 } else { 2126 for (i = 0; i < vf->num_tc; i++) 2127 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2128 vf->ch[i].num_qps; 2129 } 2130 2131 error_param: 2132 /* send the response to the VF */ 2133 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2134 aq_ret); 2135 } 2136 2137 /** 2138 * i40e_validate_queue_map 2139 * @vsi_id: vsi id 2140 * @queuemap: Tx or Rx queue map 2141 * 2142 * check if Tx or Rx queue map is valid 2143 **/ 2144 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2145 unsigned long queuemap) 2146 { 2147 u16 vsi_queue_id, queue_id; 2148 2149 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2150 if (vf->adq_enabled) { 2151 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2152 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2153 } else { 2154 queue_id = vsi_queue_id; 2155 } 2156 2157 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2158 return -EINVAL; 2159 } 2160 2161 return 0; 2162 } 2163 2164 /** 2165 * i40e_vc_config_irq_map_msg 2166 * @vf: pointer to the VF info 2167 * @msg: pointer to the msg buffer 2168 * 2169 * called from the VF to configure the irq to 2170 * queue map 2171 **/ 2172 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2173 { 2174 struct virtchnl_irq_map_info *irqmap_info = 2175 (struct virtchnl_irq_map_info *)msg; 2176 struct virtchnl_vector_map *map; 2177 u16 vsi_id, vector_id; 2178 i40e_status aq_ret = 0; 2179 int i; 2180 2181 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2182 aq_ret = I40E_ERR_PARAM; 2183 goto error_param; 2184 } 2185 2186 for (i = 0; i < irqmap_info->num_vectors; i++) { 2187 map = &irqmap_info->vecmap[i]; 2188 vector_id = map->vector_id; 2189 vsi_id = map->vsi_id; 2190 /* validate msg params */ 2191 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 2192 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2193 aq_ret = I40E_ERR_PARAM; 2194 goto error_param; 2195 } 2196 2197 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2198 aq_ret = I40E_ERR_PARAM; 2199 goto error_param; 2200 } 2201 2202 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2203 aq_ret = I40E_ERR_PARAM; 2204 goto error_param; 2205 } 2206 2207 i40e_config_irq_link_list(vf, vsi_id, map); 2208 } 2209 error_param: 2210 /* send the response to the VF */ 2211 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2212 aq_ret); 2213 } 2214 2215 /** 2216 * i40e_ctrl_vf_tx_rings 2217 * @vsi: the SRIOV VSI being configured 2218 * @q_map: bit map of the queues to be enabled 2219 * @enable: start or stop the queue 2220 **/ 2221 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2222 bool enable) 2223 { 2224 struct i40e_pf *pf = vsi->back; 2225 int ret = 0; 2226 u16 q_id; 2227 2228 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2229 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2230 vsi->base_queue + q_id, 2231 false /*is xdp*/, enable); 2232 if (ret) 2233 break; 2234 } 2235 return ret; 2236 } 2237 2238 /** 2239 * i40e_ctrl_vf_rx_rings 2240 * @vsi: the SRIOV VSI being configured 2241 * @q_map: bit map of the queues to be enabled 2242 * @enable: start or stop the queue 2243 **/ 2244 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2245 bool enable) 2246 { 2247 struct i40e_pf *pf = vsi->back; 2248 int ret = 0; 2249 u16 q_id; 2250 2251 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2252 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2253 enable); 2254 if (ret) 2255 break; 2256 } 2257 return ret; 2258 } 2259 2260 /** 2261 * i40e_vc_enable_queues_msg 2262 * @vf: pointer to the VF info 2263 * @msg: pointer to the msg buffer 2264 * 2265 * called from the VF to enable all or specific queue(s) 2266 **/ 2267 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2268 { 2269 struct virtchnl_queue_select *vqs = 2270 (struct virtchnl_queue_select *)msg; 2271 struct i40e_pf *pf = vf->pf; 2272 u16 vsi_id = vqs->vsi_id; 2273 i40e_status aq_ret = 0; 2274 int i; 2275 2276 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2277 aq_ret = I40E_ERR_PARAM; 2278 goto error_param; 2279 } 2280 2281 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2282 aq_ret = I40E_ERR_PARAM; 2283 goto error_param; 2284 } 2285 2286 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2287 aq_ret = I40E_ERR_PARAM; 2288 goto error_param; 2289 } 2290 2291 /* Use the queue bit map sent by the VF */ 2292 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2293 true)) { 2294 aq_ret = I40E_ERR_TIMEOUT; 2295 goto error_param; 2296 } 2297 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2298 true)) { 2299 aq_ret = I40E_ERR_TIMEOUT; 2300 goto error_param; 2301 } 2302 2303 /* need to start the rings for additional ADq VSI's as well */ 2304 if (vf->adq_enabled) { 2305 /* zero belongs to LAN VSI */ 2306 for (i = 1; i < vf->num_tc; i++) { 2307 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2308 aq_ret = I40E_ERR_TIMEOUT; 2309 } 2310 } 2311 2312 error_param: 2313 /* send the response to the VF */ 2314 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2315 aq_ret); 2316 } 2317 2318 /** 2319 * i40e_vc_disable_queues_msg 2320 * @vf: pointer to the VF info 2321 * @msg: pointer to the msg buffer 2322 * 2323 * called from the VF to disable all or specific 2324 * queue(s) 2325 **/ 2326 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2327 { 2328 struct virtchnl_queue_select *vqs = 2329 (struct virtchnl_queue_select *)msg; 2330 struct i40e_pf *pf = vf->pf; 2331 i40e_status aq_ret = 0; 2332 2333 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2334 aq_ret = I40E_ERR_PARAM; 2335 goto error_param; 2336 } 2337 2338 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2339 aq_ret = I40E_ERR_PARAM; 2340 goto error_param; 2341 } 2342 2343 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 2344 aq_ret = I40E_ERR_PARAM; 2345 goto error_param; 2346 } 2347 2348 /* Use the queue bit map sent by the VF */ 2349 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2350 false)) { 2351 aq_ret = I40E_ERR_TIMEOUT; 2352 goto error_param; 2353 } 2354 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2355 false)) { 2356 aq_ret = I40E_ERR_TIMEOUT; 2357 goto error_param; 2358 } 2359 error_param: 2360 /* send the response to the VF */ 2361 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2362 aq_ret); 2363 } 2364 2365 /** 2366 * i40e_vc_request_queues_msg 2367 * @vf: pointer to the VF info 2368 * @msg: pointer to the msg buffer 2369 * 2370 * VFs get a default number of queues but can use this message to request a 2371 * different number. If the request is successful, PF will reset the VF and 2372 * return 0. If unsuccessful, PF will send message informing VF of number of 2373 * available queues and return result of sending VF a message. 2374 **/ 2375 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2376 { 2377 struct virtchnl_vf_res_request *vfres = 2378 (struct virtchnl_vf_res_request *)msg; 2379 int req_pairs = vfres->num_queue_pairs; 2380 int cur_pairs = vf->num_queue_pairs; 2381 struct i40e_pf *pf = vf->pf; 2382 2383 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2384 return -EINVAL; 2385 2386 if (req_pairs <= 0) { 2387 dev_err(&pf->pdev->dev, 2388 "VF %d tried to request %d queues. Ignoring.\n", 2389 vf->vf_id, req_pairs); 2390 } else if (req_pairs > I40E_MAX_VF_QUEUES) { 2391 dev_err(&pf->pdev->dev, 2392 "VF %d tried to request more than %d queues.\n", 2393 vf->vf_id, 2394 I40E_MAX_VF_QUEUES); 2395 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2396 } else if (req_pairs - cur_pairs > pf->queues_left) { 2397 dev_warn(&pf->pdev->dev, 2398 "VF %d requested %d more queues, but only %d left.\n", 2399 vf->vf_id, 2400 req_pairs - cur_pairs, 2401 pf->queues_left); 2402 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2403 } else { 2404 /* successful request */ 2405 vf->num_req_queues = req_pairs; 2406 i40e_vc_notify_vf_reset(vf); 2407 i40e_reset_vf(vf, false); 2408 return 0; 2409 } 2410 2411 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2412 (u8 *)vfres, sizeof(*vfres)); 2413 } 2414 2415 /** 2416 * i40e_vc_get_stats_msg 2417 * @vf: pointer to the VF info 2418 * @msg: pointer to the msg buffer 2419 * 2420 * called from the VF to get vsi stats 2421 **/ 2422 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2423 { 2424 struct virtchnl_queue_select *vqs = 2425 (struct virtchnl_queue_select *)msg; 2426 struct i40e_pf *pf = vf->pf; 2427 struct i40e_eth_stats stats; 2428 i40e_status aq_ret = 0; 2429 struct i40e_vsi *vsi; 2430 2431 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2432 2433 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2434 aq_ret = I40E_ERR_PARAM; 2435 goto error_param; 2436 } 2437 2438 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2439 aq_ret = I40E_ERR_PARAM; 2440 goto error_param; 2441 } 2442 2443 vsi = pf->vsi[vf->lan_vsi_idx]; 2444 if (!vsi) { 2445 aq_ret = I40E_ERR_PARAM; 2446 goto error_param; 2447 } 2448 i40e_update_eth_stats(vsi); 2449 stats = vsi->eth_stats; 2450 2451 error_param: 2452 /* send the response back to the VF */ 2453 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2454 (u8 *)&stats, sizeof(stats)); 2455 } 2456 2457 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2458 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2459 */ 2460 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2461 #define I40E_VC_MAX_VLAN_PER_VF 8 2462 2463 /** 2464 * i40e_check_vf_permission 2465 * @vf: pointer to the VF info 2466 * @al: MAC address list from virtchnl 2467 * 2468 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2469 * if any address in the list is not valid. Checks the following conditions: 2470 * 2471 * 1) broadcast and zero addresses are never valid 2472 * 2) unicast addresses are not allowed if the VMM has administratively set 2473 * the VF MAC address, unless the VF is marked as privileged. 2474 * 3) There is enough space to add all the addresses. 2475 * 2476 * Note that to guarantee consistency, it is expected this function be called 2477 * while holding the mac_filter_hash_lock, as otherwise the current number of 2478 * addresses might not be accurate. 2479 **/ 2480 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2481 struct virtchnl_ether_addr_list *al) 2482 { 2483 struct i40e_pf *pf = vf->pf; 2484 int i; 2485 2486 /* If this VF is not privileged, then we can't add more than a limited 2487 * number of addresses. Check to make sure that the additions do not 2488 * push us over the limit. 2489 */ 2490 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2491 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { 2492 dev_err(&pf->pdev->dev, 2493 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2494 return -EPERM; 2495 } 2496 2497 for (i = 0; i < al->num_elements; i++) { 2498 u8 *addr = al->list[i].addr; 2499 2500 if (is_broadcast_ether_addr(addr) || 2501 is_zero_ether_addr(addr)) { 2502 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2503 addr); 2504 return I40E_ERR_INVALID_MAC_ADDR; 2505 } 2506 2507 /* If the host VMM administrator has set the VF MAC address 2508 * administratively via the ndo_set_vf_mac command then deny 2509 * permission to the VF to add or delete unicast MAC addresses. 2510 * Unless the VF is privileged and then it can do whatever. 2511 * The VF may request to set the MAC address filter already 2512 * assigned to it so do not return an error in that case. 2513 */ 2514 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2515 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2516 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2517 dev_err(&pf->pdev->dev, 2518 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2519 return -EPERM; 2520 } 2521 } 2522 2523 return 0; 2524 } 2525 2526 /** 2527 * i40e_vc_add_mac_addr_msg 2528 * @vf: pointer to the VF info 2529 * @msg: pointer to the msg buffer 2530 * 2531 * add guest mac address filter 2532 **/ 2533 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2534 { 2535 struct virtchnl_ether_addr_list *al = 2536 (struct virtchnl_ether_addr_list *)msg; 2537 struct i40e_pf *pf = vf->pf; 2538 struct i40e_vsi *vsi = NULL; 2539 u16 vsi_id = al->vsi_id; 2540 i40e_status ret = 0; 2541 int i; 2542 2543 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2544 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2545 ret = I40E_ERR_PARAM; 2546 goto error_param; 2547 } 2548 2549 vsi = pf->vsi[vf->lan_vsi_idx]; 2550 2551 /* Lock once, because all function inside for loop accesses VSI's 2552 * MAC filter list which needs to be protected using same lock. 2553 */ 2554 spin_lock_bh(&vsi->mac_filter_hash_lock); 2555 2556 ret = i40e_check_vf_permission(vf, al); 2557 if (ret) { 2558 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2559 goto error_param; 2560 } 2561 2562 /* add new addresses to the list */ 2563 for (i = 0; i < al->num_elements; i++) { 2564 struct i40e_mac_filter *f; 2565 2566 f = i40e_find_mac(vsi, al->list[i].addr); 2567 if (!f) { 2568 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2569 2570 if (!f) { 2571 dev_err(&pf->pdev->dev, 2572 "Unable to add MAC filter %pM for VF %d\n", 2573 al->list[i].addr, vf->vf_id); 2574 ret = I40E_ERR_PARAM; 2575 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2576 goto error_param; 2577 } else { 2578 vf->num_mac++; 2579 } 2580 } 2581 } 2582 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2583 2584 /* program the updated filter list */ 2585 ret = i40e_sync_vsi_filters(vsi); 2586 if (ret) 2587 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2588 vf->vf_id, ret); 2589 2590 error_param: 2591 /* send the response to the VF */ 2592 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2593 ret); 2594 } 2595 2596 /** 2597 * i40e_vc_del_mac_addr_msg 2598 * @vf: pointer to the VF info 2599 * @msg: pointer to the msg buffer 2600 * 2601 * remove guest mac address filter 2602 **/ 2603 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2604 { 2605 struct virtchnl_ether_addr_list *al = 2606 (struct virtchnl_ether_addr_list *)msg; 2607 struct i40e_pf *pf = vf->pf; 2608 struct i40e_vsi *vsi = NULL; 2609 u16 vsi_id = al->vsi_id; 2610 i40e_status ret = 0; 2611 int i; 2612 2613 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2614 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2615 ret = I40E_ERR_PARAM; 2616 goto error_param; 2617 } 2618 2619 for (i = 0; i < al->num_elements; i++) { 2620 if (is_broadcast_ether_addr(al->list[i].addr) || 2621 is_zero_ether_addr(al->list[i].addr)) { 2622 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2623 al->list[i].addr, vf->vf_id); 2624 ret = I40E_ERR_INVALID_MAC_ADDR; 2625 goto error_param; 2626 } 2627 2628 if (vf->pf_set_mac && 2629 ether_addr_equal(al->list[i].addr, 2630 vf->default_lan_addr.addr)) { 2631 dev_err(&pf->pdev->dev, 2632 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", 2633 vf->default_lan_addr.addr, vf->vf_id); 2634 ret = I40E_ERR_PARAM; 2635 goto error_param; 2636 } 2637 } 2638 vsi = pf->vsi[vf->lan_vsi_idx]; 2639 2640 spin_lock_bh(&vsi->mac_filter_hash_lock); 2641 /* delete addresses from the list */ 2642 for (i = 0; i < al->num_elements; i++) 2643 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2644 ret = I40E_ERR_INVALID_MAC_ADDR; 2645 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2646 goto error_param; 2647 } else { 2648 vf->num_mac--; 2649 } 2650 2651 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2652 2653 /* program the updated filter list */ 2654 ret = i40e_sync_vsi_filters(vsi); 2655 if (ret) 2656 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2657 vf->vf_id, ret); 2658 2659 error_param: 2660 /* send the response to the VF */ 2661 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2662 ret); 2663 } 2664 2665 /** 2666 * i40e_vc_add_vlan_msg 2667 * @vf: pointer to the VF info 2668 * @msg: pointer to the msg buffer 2669 * 2670 * program guest vlan id 2671 **/ 2672 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2673 { 2674 struct virtchnl_vlan_filter_list *vfl = 2675 (struct virtchnl_vlan_filter_list *)msg; 2676 struct i40e_pf *pf = vf->pf; 2677 struct i40e_vsi *vsi = NULL; 2678 u16 vsi_id = vfl->vsi_id; 2679 i40e_status aq_ret = 0; 2680 int i; 2681 2682 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2683 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2684 dev_err(&pf->pdev->dev, 2685 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2686 goto error_param; 2687 } 2688 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2689 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2690 aq_ret = I40E_ERR_PARAM; 2691 goto error_param; 2692 } 2693 2694 for (i = 0; i < vfl->num_elements; i++) { 2695 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2696 aq_ret = I40E_ERR_PARAM; 2697 dev_err(&pf->pdev->dev, 2698 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2699 goto error_param; 2700 } 2701 } 2702 vsi = pf->vsi[vf->lan_vsi_idx]; 2703 if (vsi->info.pvid) { 2704 aq_ret = I40E_ERR_PARAM; 2705 goto error_param; 2706 } 2707 2708 i40e_vlan_stripping_enable(vsi); 2709 for (i = 0; i < vfl->num_elements; i++) { 2710 /* add new VLAN filter */ 2711 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2712 if (!ret) 2713 vf->num_vlan++; 2714 2715 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2716 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2717 true, 2718 vfl->vlan_id[i], 2719 NULL); 2720 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2721 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2722 true, 2723 vfl->vlan_id[i], 2724 NULL); 2725 2726 if (ret) 2727 dev_err(&pf->pdev->dev, 2728 "Unable to add VLAN filter %d for VF %d, error %d\n", 2729 vfl->vlan_id[i], vf->vf_id, ret); 2730 } 2731 2732 error_param: 2733 /* send the response to the VF */ 2734 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2735 } 2736 2737 /** 2738 * i40e_vc_remove_vlan_msg 2739 * @vf: pointer to the VF info 2740 * @msg: pointer to the msg buffer 2741 * 2742 * remove programmed guest vlan id 2743 **/ 2744 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2745 { 2746 struct virtchnl_vlan_filter_list *vfl = 2747 (struct virtchnl_vlan_filter_list *)msg; 2748 struct i40e_pf *pf = vf->pf; 2749 struct i40e_vsi *vsi = NULL; 2750 u16 vsi_id = vfl->vsi_id; 2751 i40e_status aq_ret = 0; 2752 int i; 2753 2754 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2755 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2756 aq_ret = I40E_ERR_PARAM; 2757 goto error_param; 2758 } 2759 2760 for (i = 0; i < vfl->num_elements; i++) { 2761 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2762 aq_ret = I40E_ERR_PARAM; 2763 goto error_param; 2764 } 2765 } 2766 2767 vsi = pf->vsi[vf->lan_vsi_idx]; 2768 if (vsi->info.pvid) { 2769 aq_ret = I40E_ERR_PARAM; 2770 goto error_param; 2771 } 2772 2773 for (i = 0; i < vfl->num_elements; i++) { 2774 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2775 vf->num_vlan--; 2776 2777 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2778 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2779 false, 2780 vfl->vlan_id[i], 2781 NULL); 2782 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2783 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2784 false, 2785 vfl->vlan_id[i], 2786 NULL); 2787 } 2788 2789 error_param: 2790 /* send the response to the VF */ 2791 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2792 } 2793 2794 /** 2795 * i40e_vc_iwarp_msg 2796 * @vf: pointer to the VF info 2797 * @msg: pointer to the msg buffer 2798 * @msglen: msg length 2799 * 2800 * called from the VF for the iwarp msgs 2801 **/ 2802 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2803 { 2804 struct i40e_pf *pf = vf->pf; 2805 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2806 i40e_status aq_ret = 0; 2807 2808 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2809 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2810 aq_ret = I40E_ERR_PARAM; 2811 goto error_param; 2812 } 2813 2814 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2815 msg, msglen); 2816 2817 error_param: 2818 /* send the response to the VF */ 2819 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2820 aq_ret); 2821 } 2822 2823 /** 2824 * i40e_vc_iwarp_qvmap_msg 2825 * @vf: pointer to the VF info 2826 * @msg: pointer to the msg buffer 2827 * @config: config qvmap or release it 2828 * 2829 * called from the VF for the iwarp msgs 2830 **/ 2831 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 2832 { 2833 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2834 (struct virtchnl_iwarp_qvlist_info *)msg; 2835 i40e_status aq_ret = 0; 2836 2837 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2838 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2839 aq_ret = I40E_ERR_PARAM; 2840 goto error_param; 2841 } 2842 2843 if (config) { 2844 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2845 aq_ret = I40E_ERR_PARAM; 2846 } else { 2847 i40e_release_iwarp_qvlist(vf); 2848 } 2849 2850 error_param: 2851 /* send the response to the VF */ 2852 return i40e_vc_send_resp_to_vf(vf, 2853 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2854 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2855 aq_ret); 2856 } 2857 2858 /** 2859 * i40e_vc_config_rss_key 2860 * @vf: pointer to the VF info 2861 * @msg: pointer to the msg buffer 2862 * 2863 * Configure the VF's RSS key 2864 **/ 2865 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 2866 { 2867 struct virtchnl_rss_key *vrk = 2868 (struct virtchnl_rss_key *)msg; 2869 struct i40e_pf *pf = vf->pf; 2870 struct i40e_vsi *vsi = NULL; 2871 u16 vsi_id = vrk->vsi_id; 2872 i40e_status aq_ret = 0; 2873 2874 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2875 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2876 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2877 aq_ret = I40E_ERR_PARAM; 2878 goto err; 2879 } 2880 2881 vsi = pf->vsi[vf->lan_vsi_idx]; 2882 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2883 err: 2884 /* send the response to the VF */ 2885 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2886 aq_ret); 2887 } 2888 2889 /** 2890 * i40e_vc_config_rss_lut 2891 * @vf: pointer to the VF info 2892 * @msg: pointer to the msg buffer 2893 * 2894 * Configure the VF's RSS LUT 2895 **/ 2896 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 2897 { 2898 struct virtchnl_rss_lut *vrl = 2899 (struct virtchnl_rss_lut *)msg; 2900 struct i40e_pf *pf = vf->pf; 2901 struct i40e_vsi *vsi = NULL; 2902 u16 vsi_id = vrl->vsi_id; 2903 i40e_status aq_ret = 0; 2904 2905 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2906 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2907 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2908 aq_ret = I40E_ERR_PARAM; 2909 goto err; 2910 } 2911 2912 vsi = pf->vsi[vf->lan_vsi_idx]; 2913 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2914 /* send the response to the VF */ 2915 err: 2916 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2917 aq_ret); 2918 } 2919 2920 /** 2921 * i40e_vc_get_rss_hena 2922 * @vf: pointer to the VF info 2923 * @msg: pointer to the msg buffer 2924 * 2925 * Return the RSS HENA bits allowed by the hardware 2926 **/ 2927 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 2928 { 2929 struct virtchnl_rss_hena *vrh = NULL; 2930 struct i40e_pf *pf = vf->pf; 2931 i40e_status aq_ret = 0; 2932 int len = 0; 2933 2934 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2935 aq_ret = I40E_ERR_PARAM; 2936 goto err; 2937 } 2938 len = sizeof(struct virtchnl_rss_hena); 2939 2940 vrh = kzalloc(len, GFP_KERNEL); 2941 if (!vrh) { 2942 aq_ret = I40E_ERR_NO_MEMORY; 2943 len = 0; 2944 goto err; 2945 } 2946 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2947 err: 2948 /* send the response back to the VF */ 2949 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 2950 aq_ret, (u8 *)vrh, len); 2951 kfree(vrh); 2952 return aq_ret; 2953 } 2954 2955 /** 2956 * i40e_vc_set_rss_hena 2957 * @vf: pointer to the VF info 2958 * @msg: pointer to the msg buffer 2959 * 2960 * Set the RSS HENA bits for the VF 2961 **/ 2962 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 2963 { 2964 struct virtchnl_rss_hena *vrh = 2965 (struct virtchnl_rss_hena *)msg; 2966 struct i40e_pf *pf = vf->pf; 2967 struct i40e_hw *hw = &pf->hw; 2968 i40e_status aq_ret = 0; 2969 2970 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2971 aq_ret = I40E_ERR_PARAM; 2972 goto err; 2973 } 2974 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 2975 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 2976 (u32)(vrh->hena >> 32)); 2977 2978 /* send the response to the VF */ 2979 err: 2980 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 2981 } 2982 2983 /** 2984 * i40e_vc_enable_vlan_stripping 2985 * @vf: pointer to the VF info 2986 * @msg: pointer to the msg buffer 2987 * 2988 * Enable vlan header stripping for the VF 2989 **/ 2990 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 2991 { 2992 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 2993 i40e_status aq_ret = 0; 2994 2995 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2996 aq_ret = I40E_ERR_PARAM; 2997 goto err; 2998 } 2999 3000 i40e_vlan_stripping_enable(vsi); 3001 3002 /* send the response to the VF */ 3003 err: 3004 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3005 aq_ret); 3006 } 3007 3008 /** 3009 * i40e_vc_disable_vlan_stripping 3010 * @vf: pointer to the VF info 3011 * @msg: pointer to the msg buffer 3012 * 3013 * Disable vlan header stripping for the VF 3014 **/ 3015 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3016 { 3017 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3018 i40e_status aq_ret = 0; 3019 3020 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3021 aq_ret = I40E_ERR_PARAM; 3022 goto err; 3023 } 3024 3025 i40e_vlan_stripping_disable(vsi); 3026 3027 /* send the response to the VF */ 3028 err: 3029 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3030 aq_ret); 3031 } 3032 3033 /** 3034 * i40e_validate_cloud_filter 3035 * @mask: mask for TC filter 3036 * @data: data for TC filter 3037 * 3038 * This function validates cloud filter programmed as TC filter for ADq 3039 **/ 3040 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3041 struct virtchnl_filter *tc_filter) 3042 { 3043 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3044 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3045 struct i40e_pf *pf = vf->pf; 3046 struct i40e_vsi *vsi = NULL; 3047 struct i40e_mac_filter *f; 3048 struct hlist_node *h; 3049 bool found = false; 3050 int bkt; 3051 3052 if (!tc_filter->action) { 3053 dev_info(&pf->pdev->dev, 3054 "VF %d: Currently ADq doesn't support Drop Action\n", 3055 vf->vf_id); 3056 goto err; 3057 } 3058 3059 /* action_meta is TC number here to which the filter is applied */ 3060 if (!tc_filter->action_meta || 3061 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3062 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3063 vf->vf_id, tc_filter->action_meta); 3064 goto err; 3065 } 3066 3067 /* Check filter if it's programmed for advanced mode or basic mode. 3068 * There are two ADq modes (for VF only), 3069 * 1. Basic mode: intended to allow as many filter options as possible 3070 * to be added to a VF in Non-trusted mode. Main goal is 3071 * to add filters to its own MAC and VLAN id. 3072 * 2. Advanced mode: is for allowing filters to be applied other than 3073 * its own MAC or VLAN. This mode requires the VF to be 3074 * Trusted. 3075 */ 3076 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3077 vsi = pf->vsi[vf->lan_vsi_idx]; 3078 f = i40e_find_mac(vsi, data.dst_mac); 3079 3080 if (!f) { 3081 dev_info(&pf->pdev->dev, 3082 "Destination MAC %pM doesn't belong to VF %d\n", 3083 data.dst_mac, vf->vf_id); 3084 goto err; 3085 } 3086 3087 if (mask.vlan_id) { 3088 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3089 hlist) { 3090 if (f->vlan == ntohs(data.vlan_id)) { 3091 found = true; 3092 break; 3093 } 3094 } 3095 if (!found) { 3096 dev_info(&pf->pdev->dev, 3097 "VF %d doesn't have any VLAN id %u\n", 3098 vf->vf_id, ntohs(data.vlan_id)); 3099 goto err; 3100 } 3101 } 3102 } else { 3103 /* Check if VF is trusted */ 3104 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3105 dev_err(&pf->pdev->dev, 3106 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3107 vf->vf_id); 3108 return I40E_ERR_CONFIG; 3109 } 3110 } 3111 3112 if (mask.dst_mac[0] & data.dst_mac[0]) { 3113 if (is_broadcast_ether_addr(data.dst_mac) || 3114 is_zero_ether_addr(data.dst_mac)) { 3115 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3116 vf->vf_id, data.dst_mac); 3117 goto err; 3118 } 3119 } 3120 3121 if (mask.src_mac[0] & data.src_mac[0]) { 3122 if (is_broadcast_ether_addr(data.src_mac) || 3123 is_zero_ether_addr(data.src_mac)) { 3124 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3125 vf->vf_id, data.src_mac); 3126 goto err; 3127 } 3128 } 3129 3130 if (mask.dst_port & data.dst_port) { 3131 if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) { 3132 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3133 vf->vf_id); 3134 goto err; 3135 } 3136 } 3137 3138 if (mask.src_port & data.src_port) { 3139 if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) { 3140 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3141 vf->vf_id); 3142 goto err; 3143 } 3144 } 3145 3146 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3147 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3148 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3149 vf->vf_id); 3150 goto err; 3151 } 3152 3153 if (mask.vlan_id & data.vlan_id) { 3154 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3155 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3156 vf->vf_id); 3157 goto err; 3158 } 3159 } 3160 3161 return I40E_SUCCESS; 3162 err: 3163 return I40E_ERR_CONFIG; 3164 } 3165 3166 /** 3167 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3168 * @vf: pointer to the VF info 3169 * @seid - seid of the vsi it is searching for 3170 **/ 3171 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3172 { 3173 struct i40e_pf *pf = vf->pf; 3174 struct i40e_vsi *vsi = NULL; 3175 int i; 3176 3177 for (i = 0; i < vf->num_tc ; i++) { 3178 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3179 if (vsi && vsi->seid == seid) 3180 return vsi; 3181 } 3182 return NULL; 3183 } 3184 3185 /** 3186 * i40e_del_all_cloud_filters 3187 * @vf: pointer to the VF info 3188 * 3189 * This function deletes all cloud filters 3190 **/ 3191 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3192 { 3193 struct i40e_cloud_filter *cfilter = NULL; 3194 struct i40e_pf *pf = vf->pf; 3195 struct i40e_vsi *vsi = NULL; 3196 struct hlist_node *node; 3197 int ret; 3198 3199 hlist_for_each_entry_safe(cfilter, node, 3200 &vf->cloud_filter_list, cloud_node) { 3201 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3202 3203 if (!vsi) { 3204 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3205 vf->vf_id, cfilter->seid); 3206 continue; 3207 } 3208 3209 if (cfilter->dst_port) 3210 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3211 false); 3212 else 3213 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3214 if (ret) 3215 dev_err(&pf->pdev->dev, 3216 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3217 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3218 i40e_aq_str(&pf->hw, 3219 pf->hw.aq.asq_last_status)); 3220 3221 hlist_del(&cfilter->cloud_node); 3222 kfree(cfilter); 3223 vf->num_cloud_filters--; 3224 } 3225 } 3226 3227 /** 3228 * i40e_vc_del_cloud_filter 3229 * @vf: pointer to the VF info 3230 * @msg: pointer to the msg buffer 3231 * 3232 * This function deletes a cloud filter programmed as TC filter for ADq 3233 **/ 3234 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3235 { 3236 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3237 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3238 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3239 struct i40e_cloud_filter cfilter, *cf = NULL; 3240 struct i40e_pf *pf = vf->pf; 3241 struct i40e_vsi *vsi = NULL; 3242 struct hlist_node *node; 3243 i40e_status aq_ret = 0; 3244 int i, ret; 3245 3246 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3247 aq_ret = I40E_ERR_PARAM; 3248 goto err; 3249 } 3250 3251 if (!vf->adq_enabled) { 3252 dev_info(&pf->pdev->dev, 3253 "VF %d: ADq not enabled, can't apply cloud filter\n", 3254 vf->vf_id); 3255 aq_ret = I40E_ERR_PARAM; 3256 goto err; 3257 } 3258 3259 if (i40e_validate_cloud_filter(vf, vcf)) { 3260 dev_info(&pf->pdev->dev, 3261 "VF %d: Invalid input, can't apply cloud filter\n", 3262 vf->vf_id); 3263 aq_ret = I40E_ERR_PARAM; 3264 goto err; 3265 } 3266 3267 memset(&cfilter, 0, sizeof(cfilter)); 3268 /* parse destination mac address */ 3269 for (i = 0; i < ETH_ALEN; i++) 3270 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3271 3272 /* parse source mac address */ 3273 for (i = 0; i < ETH_ALEN; i++) 3274 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3275 3276 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3277 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3278 cfilter.src_port = mask.src_port & tcf.src_port; 3279 3280 switch (vcf->flow_type) { 3281 case VIRTCHNL_TCP_V4_FLOW: 3282 cfilter.n_proto = ETH_P_IP; 3283 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3284 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3285 ARRAY_SIZE(tcf.dst_ip)); 3286 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3287 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3288 ARRAY_SIZE(tcf.dst_ip)); 3289 break; 3290 case VIRTCHNL_TCP_V6_FLOW: 3291 cfilter.n_proto = ETH_P_IPV6; 3292 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3293 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3294 sizeof(cfilter.ip.v6.dst_ip6)); 3295 if (mask.src_ip[3] & tcf.src_ip[3]) 3296 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3297 sizeof(cfilter.ip.v6.src_ip6)); 3298 break; 3299 default: 3300 /* TC filter can be configured based on different combinations 3301 * and in this case IP is not a part of filter config 3302 */ 3303 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3304 vf->vf_id); 3305 } 3306 3307 /* get the vsi to which the tc belongs to */ 3308 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3309 cfilter.seid = vsi->seid; 3310 cfilter.flags = vcf->field_flags; 3311 3312 /* Deleting TC filter */ 3313 if (tcf.dst_port) 3314 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3315 else 3316 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3317 if (ret) { 3318 dev_err(&pf->pdev->dev, 3319 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3320 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3321 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3322 goto err; 3323 } 3324 3325 hlist_for_each_entry_safe(cf, node, 3326 &vf->cloud_filter_list, cloud_node) { 3327 if (cf->seid != cfilter.seid) 3328 continue; 3329 if (mask.dst_port) 3330 if (cfilter.dst_port != cf->dst_port) 3331 continue; 3332 if (mask.dst_mac[0]) 3333 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3334 continue; 3335 /* for ipv4 data to be valid, only first byte of mask is set */ 3336 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3337 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3338 ARRAY_SIZE(tcf.dst_ip))) 3339 continue; 3340 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3341 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3342 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3343 sizeof(cfilter.ip.v6.src_ip6))) 3344 continue; 3345 if (mask.vlan_id) 3346 if (cfilter.vlan_id != cf->vlan_id) 3347 continue; 3348 3349 hlist_del(&cf->cloud_node); 3350 kfree(cf); 3351 vf->num_cloud_filters--; 3352 } 3353 3354 err: 3355 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3356 aq_ret); 3357 } 3358 3359 /** 3360 * i40e_vc_add_cloud_filter 3361 * @vf: pointer to the VF info 3362 * @msg: pointer to the msg buffer 3363 * 3364 * This function adds a cloud filter programmed as TC filter for ADq 3365 **/ 3366 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3367 { 3368 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3369 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3370 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3371 struct i40e_cloud_filter *cfilter = NULL; 3372 struct i40e_pf *pf = vf->pf; 3373 struct i40e_vsi *vsi = NULL; 3374 i40e_status aq_ret = 0; 3375 int i, ret; 3376 3377 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3378 aq_ret = I40E_ERR_PARAM; 3379 goto err; 3380 } 3381 3382 if (!vf->adq_enabled) { 3383 dev_info(&pf->pdev->dev, 3384 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3385 vf->vf_id); 3386 aq_ret = I40E_ERR_PARAM; 3387 goto err; 3388 } 3389 3390 if (i40e_validate_cloud_filter(vf, vcf)) { 3391 dev_info(&pf->pdev->dev, 3392 "VF %d: Invalid input/s, can't apply cloud filter\n", 3393 vf->vf_id); 3394 aq_ret = I40E_ERR_PARAM; 3395 goto err; 3396 } 3397 3398 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3399 if (!cfilter) 3400 return -ENOMEM; 3401 3402 /* parse destination mac address */ 3403 for (i = 0; i < ETH_ALEN; i++) 3404 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3405 3406 /* parse source mac address */ 3407 for (i = 0; i < ETH_ALEN; i++) 3408 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3409 3410 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3411 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3412 cfilter->src_port = mask.src_port & tcf.src_port; 3413 3414 switch (vcf->flow_type) { 3415 case VIRTCHNL_TCP_V4_FLOW: 3416 cfilter->n_proto = ETH_P_IP; 3417 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3418 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3419 ARRAY_SIZE(tcf.dst_ip)); 3420 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3421 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3422 ARRAY_SIZE(tcf.dst_ip)); 3423 break; 3424 case VIRTCHNL_TCP_V6_FLOW: 3425 cfilter->n_proto = ETH_P_IPV6; 3426 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3427 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3428 sizeof(cfilter->ip.v6.dst_ip6)); 3429 if (mask.src_ip[3] & tcf.src_ip[3]) 3430 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3431 sizeof(cfilter->ip.v6.src_ip6)); 3432 break; 3433 default: 3434 /* TC filter can be configured based on different combinations 3435 * and in this case IP is not a part of filter config 3436 */ 3437 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3438 vf->vf_id); 3439 } 3440 3441 /* get the VSI to which the TC belongs to */ 3442 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3443 cfilter->seid = vsi->seid; 3444 cfilter->flags = vcf->field_flags; 3445 3446 /* Adding cloud filter programmed as TC filter */ 3447 if (tcf.dst_port) 3448 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3449 else 3450 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3451 if (ret) { 3452 dev_err(&pf->pdev->dev, 3453 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3454 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3455 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3456 goto err; 3457 } 3458 3459 INIT_HLIST_NODE(&cfilter->cloud_node); 3460 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3461 vf->num_cloud_filters++; 3462 err: 3463 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3464 aq_ret); 3465 } 3466 3467 /** 3468 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3469 * @vf: pointer to the VF info 3470 * @msg: pointer to the msg buffer 3471 **/ 3472 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3473 { 3474 struct virtchnl_tc_info *tci = 3475 (struct virtchnl_tc_info *)msg; 3476 struct i40e_pf *pf = vf->pf; 3477 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3478 int i, adq_request_qps = 0, speed = 0; 3479 i40e_status aq_ret = 0; 3480 3481 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3482 aq_ret = I40E_ERR_PARAM; 3483 goto err; 3484 } 3485 3486 /* ADq cannot be applied if spoof check is ON */ 3487 if (vf->spoofchk) { 3488 dev_err(&pf->pdev->dev, 3489 "Spoof check is ON, turn it OFF to enable ADq\n"); 3490 aq_ret = I40E_ERR_PARAM; 3491 goto err; 3492 } 3493 3494 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3495 dev_err(&pf->pdev->dev, 3496 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3497 vf->vf_id); 3498 aq_ret = I40E_ERR_PARAM; 3499 goto err; 3500 } 3501 3502 /* max number of traffic classes for VF currently capped at 4 */ 3503 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3504 dev_err(&pf->pdev->dev, 3505 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n", 3506 vf->vf_id, tci->num_tc); 3507 aq_ret = I40E_ERR_PARAM; 3508 goto err; 3509 } 3510 3511 /* validate queues for each TC */ 3512 for (i = 0; i < tci->num_tc; i++) 3513 if (!tci->list[i].count || 3514 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3515 dev_err(&pf->pdev->dev, 3516 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n", 3517 vf->vf_id, i, tci->list[i].count); 3518 aq_ret = I40E_ERR_PARAM; 3519 goto err; 3520 } 3521 3522 /* need Max VF queues but already have default number of queues */ 3523 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3524 3525 if (pf->queues_left < adq_request_qps) { 3526 dev_err(&pf->pdev->dev, 3527 "No queues left to allocate to VF %d\n", 3528 vf->vf_id); 3529 aq_ret = I40E_ERR_PARAM; 3530 goto err; 3531 } else { 3532 /* we need to allocate max VF queues to enable ADq so as to 3533 * make sure ADq enabled VF always gets back queues when it 3534 * goes through a reset. 3535 */ 3536 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3537 } 3538 3539 /* get link speed in MB to validate rate limit */ 3540 switch (ls->link_speed) { 3541 case VIRTCHNL_LINK_SPEED_100MB: 3542 speed = SPEED_100; 3543 break; 3544 case VIRTCHNL_LINK_SPEED_1GB: 3545 speed = SPEED_1000; 3546 break; 3547 case VIRTCHNL_LINK_SPEED_10GB: 3548 speed = SPEED_10000; 3549 break; 3550 case VIRTCHNL_LINK_SPEED_20GB: 3551 speed = SPEED_20000; 3552 break; 3553 case VIRTCHNL_LINK_SPEED_25GB: 3554 speed = SPEED_25000; 3555 break; 3556 case VIRTCHNL_LINK_SPEED_40GB: 3557 speed = SPEED_40000; 3558 break; 3559 default: 3560 dev_err(&pf->pdev->dev, 3561 "Cannot detect link speed\n"); 3562 aq_ret = I40E_ERR_PARAM; 3563 goto err; 3564 } 3565 3566 /* parse data from the queue channel info */ 3567 vf->num_tc = tci->num_tc; 3568 for (i = 0; i < vf->num_tc; i++) { 3569 if (tci->list[i].max_tx_rate) { 3570 if (tci->list[i].max_tx_rate > speed) { 3571 dev_err(&pf->pdev->dev, 3572 "Invalid max tx rate %llu specified for VF %d.", 3573 tci->list[i].max_tx_rate, 3574 vf->vf_id); 3575 aq_ret = I40E_ERR_PARAM; 3576 goto err; 3577 } else { 3578 vf->ch[i].max_tx_rate = 3579 tci->list[i].max_tx_rate; 3580 } 3581 } 3582 vf->ch[i].num_qps = tci->list[i].count; 3583 } 3584 3585 /* set this flag only after making sure all inputs are sane */ 3586 vf->adq_enabled = true; 3587 /* num_req_queues is set when user changes number of queues via ethtool 3588 * and this causes issue for default VSI(which depends on this variable) 3589 * when ADq is enabled, hence reset it. 3590 */ 3591 vf->num_req_queues = 0; 3592 3593 /* reset the VF in order to allocate resources */ 3594 i40e_vc_notify_vf_reset(vf); 3595 i40e_reset_vf(vf, false); 3596 3597 return I40E_SUCCESS; 3598 3599 /* send the response to the VF */ 3600 err: 3601 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3602 aq_ret); 3603 } 3604 3605 /** 3606 * i40e_vc_del_qch_msg 3607 * @vf: pointer to the VF info 3608 * @msg: pointer to the msg buffer 3609 **/ 3610 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3611 { 3612 struct i40e_pf *pf = vf->pf; 3613 i40e_status aq_ret = 0; 3614 3615 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3616 aq_ret = I40E_ERR_PARAM; 3617 goto err; 3618 } 3619 3620 if (vf->adq_enabled) { 3621 i40e_del_all_cloud_filters(vf); 3622 i40e_del_qch(vf); 3623 vf->adq_enabled = false; 3624 vf->num_tc = 0; 3625 dev_info(&pf->pdev->dev, 3626 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3627 vf->vf_id); 3628 } else { 3629 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3630 vf->vf_id); 3631 aq_ret = I40E_ERR_PARAM; 3632 } 3633 3634 /* reset the VF in order to allocate resources */ 3635 i40e_vc_notify_vf_reset(vf); 3636 i40e_reset_vf(vf, false); 3637 3638 return I40E_SUCCESS; 3639 3640 err: 3641 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3642 aq_ret); 3643 } 3644 3645 /** 3646 * i40e_vc_process_vf_msg 3647 * @pf: pointer to the PF structure 3648 * @vf_id: source VF id 3649 * @v_opcode: operation code 3650 * @v_retval: unused return value code 3651 * @msg: pointer to the msg buffer 3652 * @msglen: msg length 3653 * 3654 * called from the common aeq/arq handler to 3655 * process request from VF 3656 **/ 3657 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3658 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3659 { 3660 struct i40e_hw *hw = &pf->hw; 3661 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3662 struct i40e_vf *vf; 3663 int ret; 3664 3665 pf->vf_aq_requests++; 3666 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 3667 return -EINVAL; 3668 vf = &(pf->vf[local_vf_id]); 3669 3670 /* Check if VF is disabled. */ 3671 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3672 return I40E_ERR_PARAM; 3673 3674 /* perform basic checks on the msg */ 3675 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3676 3677 /* perform additional checks specific to this driver */ 3678 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { 3679 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; 3680 3681 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) 3682 ret = -EINVAL; 3683 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { 3684 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 3685 3686 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) 3687 ret = -EINVAL; 3688 } 3689 3690 if (ret) { 3691 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3692 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3693 local_vf_id, v_opcode, msglen); 3694 switch (ret) { 3695 case VIRTCHNL_STATUS_ERR_PARAM: 3696 return -EPERM; 3697 default: 3698 return -EINVAL; 3699 } 3700 } 3701 3702 switch (v_opcode) { 3703 case VIRTCHNL_OP_VERSION: 3704 ret = i40e_vc_get_version_msg(vf, msg); 3705 break; 3706 case VIRTCHNL_OP_GET_VF_RESOURCES: 3707 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3708 i40e_vc_notify_vf_link_state(vf); 3709 break; 3710 case VIRTCHNL_OP_RESET_VF: 3711 i40e_vc_reset_vf_msg(vf); 3712 ret = 0; 3713 break; 3714 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3715 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3716 break; 3717 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3718 ret = i40e_vc_config_queues_msg(vf, msg); 3719 break; 3720 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3721 ret = i40e_vc_config_irq_map_msg(vf, msg); 3722 break; 3723 case VIRTCHNL_OP_ENABLE_QUEUES: 3724 ret = i40e_vc_enable_queues_msg(vf, msg); 3725 i40e_vc_notify_vf_link_state(vf); 3726 break; 3727 case VIRTCHNL_OP_DISABLE_QUEUES: 3728 ret = i40e_vc_disable_queues_msg(vf, msg); 3729 break; 3730 case VIRTCHNL_OP_ADD_ETH_ADDR: 3731 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3732 break; 3733 case VIRTCHNL_OP_DEL_ETH_ADDR: 3734 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3735 break; 3736 case VIRTCHNL_OP_ADD_VLAN: 3737 ret = i40e_vc_add_vlan_msg(vf, msg); 3738 break; 3739 case VIRTCHNL_OP_DEL_VLAN: 3740 ret = i40e_vc_remove_vlan_msg(vf, msg); 3741 break; 3742 case VIRTCHNL_OP_GET_STATS: 3743 ret = i40e_vc_get_stats_msg(vf, msg); 3744 break; 3745 case VIRTCHNL_OP_IWARP: 3746 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3747 break; 3748 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3749 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3750 break; 3751 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3752 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3753 break; 3754 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3755 ret = i40e_vc_config_rss_key(vf, msg); 3756 break; 3757 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3758 ret = i40e_vc_config_rss_lut(vf, msg); 3759 break; 3760 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3761 ret = i40e_vc_get_rss_hena(vf, msg); 3762 break; 3763 case VIRTCHNL_OP_SET_RSS_HENA: 3764 ret = i40e_vc_set_rss_hena(vf, msg); 3765 break; 3766 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3767 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3768 break; 3769 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3770 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3771 break; 3772 case VIRTCHNL_OP_REQUEST_QUEUES: 3773 ret = i40e_vc_request_queues_msg(vf, msg); 3774 break; 3775 case VIRTCHNL_OP_ENABLE_CHANNELS: 3776 ret = i40e_vc_add_qch_msg(vf, msg); 3777 break; 3778 case VIRTCHNL_OP_DISABLE_CHANNELS: 3779 ret = i40e_vc_del_qch_msg(vf, msg); 3780 break; 3781 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3782 ret = i40e_vc_add_cloud_filter(vf, msg); 3783 break; 3784 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3785 ret = i40e_vc_del_cloud_filter(vf, msg); 3786 break; 3787 case VIRTCHNL_OP_UNKNOWN: 3788 default: 3789 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3790 v_opcode, local_vf_id); 3791 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3792 I40E_ERR_NOT_IMPLEMENTED); 3793 break; 3794 } 3795 3796 return ret; 3797 } 3798 3799 /** 3800 * i40e_vc_process_vflr_event 3801 * @pf: pointer to the PF structure 3802 * 3803 * called from the vlfr irq handler to 3804 * free up VF resources and state variables 3805 **/ 3806 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3807 { 3808 struct i40e_hw *hw = &pf->hw; 3809 u32 reg, reg_idx, bit_idx; 3810 struct i40e_vf *vf; 3811 int vf_id; 3812 3813 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3814 return 0; 3815 3816 /* Re-enable the VFLR interrupt cause here, before looking for which 3817 * VF got reset. Otherwise, if another VF gets a reset while the 3818 * first one is being processed, that interrupt will be lost, and 3819 * that VF will be stuck in reset forever. 3820 */ 3821 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3822 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 3823 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3824 i40e_flush(hw); 3825 3826 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3827 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 3828 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 3829 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 3830 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 3831 vf = &pf->vf[vf_id]; 3832 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 3833 if (reg & BIT(bit_idx)) 3834 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 3835 i40e_reset_vf(vf, true); 3836 } 3837 3838 return 0; 3839 } 3840 3841 /** 3842 * i40e_validate_vf 3843 * @pf: the physical function 3844 * @vf_id: VF identifier 3845 * 3846 * Check that the VF is enabled and the VSI exists. 3847 * 3848 * Returns 0 on success, negative on failure 3849 **/ 3850 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 3851 { 3852 struct i40e_vsi *vsi; 3853 struct i40e_vf *vf; 3854 int ret = 0; 3855 3856 if (vf_id >= pf->num_alloc_vfs) { 3857 dev_err(&pf->pdev->dev, 3858 "Invalid VF Identifier %d\n", vf_id); 3859 ret = -EINVAL; 3860 goto err_out; 3861 } 3862 vf = &pf->vf[vf_id]; 3863 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 3864 if (!vsi) 3865 ret = -EINVAL; 3866 err_out: 3867 return ret; 3868 } 3869 3870 /** 3871 * i40e_ndo_set_vf_mac 3872 * @netdev: network interface device structure 3873 * @vf_id: VF identifier 3874 * @mac: mac address 3875 * 3876 * program VF mac address 3877 **/ 3878 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 3879 { 3880 struct i40e_netdev_priv *np = netdev_priv(netdev); 3881 struct i40e_vsi *vsi = np->vsi; 3882 struct i40e_pf *pf = vsi->back; 3883 struct i40e_mac_filter *f; 3884 struct i40e_vf *vf; 3885 int ret = 0; 3886 struct hlist_node *h; 3887 int bkt; 3888 u8 i; 3889 3890 /* validate the request */ 3891 ret = i40e_validate_vf(pf, vf_id); 3892 if (ret) 3893 goto error_param; 3894 3895 vf = &pf->vf[vf_id]; 3896 vsi = pf->vsi[vf->lan_vsi_idx]; 3897 3898 /* When the VF is resetting wait until it is done. 3899 * It can take up to 200 milliseconds, 3900 * but wait for up to 300 milliseconds to be safe. 3901 */ 3902 for (i = 0; i < 15; i++) { 3903 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 3904 break; 3905 msleep(20); 3906 } 3907 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3908 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3909 vf_id); 3910 ret = -EAGAIN; 3911 goto error_param; 3912 } 3913 3914 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 3915 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 3916 return -EAGAIN; 3917 } 3918 3919 if (is_multicast_ether_addr(mac)) { 3920 dev_err(&pf->pdev->dev, 3921 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 3922 ret = -EINVAL; 3923 goto error_param; 3924 } 3925 3926 /* Lock once because below invoked function add/del_filter requires 3927 * mac_filter_hash_lock to be held 3928 */ 3929 spin_lock_bh(&vsi->mac_filter_hash_lock); 3930 3931 /* delete the temporary mac address */ 3932 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 3933 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 3934 3935 /* Delete all the filters for this VSI - we're going to kill it 3936 * anyway. 3937 */ 3938 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 3939 __i40e_del_filter(vsi, f); 3940 3941 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3942 3943 /* program mac filter */ 3944 if (i40e_sync_vsi_filters(vsi)) { 3945 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 3946 ret = -EIO; 3947 goto error_param; 3948 } 3949 ether_addr_copy(vf->default_lan_addr.addr, mac); 3950 3951 if (is_zero_ether_addr(mac)) { 3952 vf->pf_set_mac = false; 3953 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 3954 } else { 3955 vf->pf_set_mac = true; 3956 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 3957 mac, vf_id); 3958 } 3959 3960 /* Force the VF interface down so it has to bring up with new MAC 3961 * address 3962 */ 3963 i40e_vc_disable_vf(vf); 3964 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 3965 3966 error_param: 3967 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 3968 return ret; 3969 } 3970 3971 /** 3972 * i40e_vsi_has_vlans - True if VSI has configured VLANs 3973 * @vsi: pointer to the vsi 3974 * 3975 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 3976 * we have no configured VLANs. Do not call while holding the 3977 * mac_filter_hash_lock. 3978 */ 3979 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 3980 { 3981 bool have_vlans; 3982 3983 /* If we have a port VLAN, then the VSI cannot have any VLANs 3984 * configured, as all MAC/VLAN filters will be assigned to the PVID. 3985 */ 3986 if (vsi->info.pvid) 3987 return false; 3988 3989 /* Since we don't have a PVID, we know that if the device is in VLAN 3990 * mode it must be because of a VLAN filter configured on this VSI. 3991 */ 3992 spin_lock_bh(&vsi->mac_filter_hash_lock); 3993 have_vlans = i40e_is_vsi_in_vlan(vsi); 3994 spin_unlock_bh(&vsi->mac_filter_hash_lock); 3995 3996 return have_vlans; 3997 } 3998 3999 /** 4000 * i40e_ndo_set_vf_port_vlan 4001 * @netdev: network interface device structure 4002 * @vf_id: VF identifier 4003 * @vlan_id: mac address 4004 * @qos: priority setting 4005 * @vlan_proto: vlan protocol 4006 * 4007 * program VF vlan id and/or qos 4008 **/ 4009 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4010 u16 vlan_id, u8 qos, __be16 vlan_proto) 4011 { 4012 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4013 struct i40e_netdev_priv *np = netdev_priv(netdev); 4014 struct i40e_pf *pf = np->vsi->back; 4015 struct i40e_vsi *vsi; 4016 struct i40e_vf *vf; 4017 int ret = 0; 4018 4019 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4020 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4021 return -EAGAIN; 4022 } 4023 4024 /* validate the request */ 4025 ret = i40e_validate_vf(pf, vf_id); 4026 if (ret) 4027 goto error_pvid; 4028 4029 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4030 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4031 ret = -EINVAL; 4032 goto error_pvid; 4033 } 4034 4035 if (vlan_proto != htons(ETH_P_8021Q)) { 4036 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4037 ret = -EPROTONOSUPPORT; 4038 goto error_pvid; 4039 } 4040 4041 vf = &pf->vf[vf_id]; 4042 vsi = pf->vsi[vf->lan_vsi_idx]; 4043 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4044 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4045 vf_id); 4046 ret = -EAGAIN; 4047 goto error_pvid; 4048 } 4049 4050 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4051 /* duplicate request, so just return success */ 4052 goto error_pvid; 4053 4054 if (i40e_vsi_has_vlans(vsi)) { 4055 dev_err(&pf->pdev->dev, 4056 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4057 vf_id); 4058 /* Administrator Error - knock the VF offline until he does 4059 * the right thing by reconfiguring his network correctly 4060 * and then reloading the VF driver. 4061 */ 4062 i40e_vc_disable_vf(vf); 4063 /* During reset the VF got a new VSI, so refresh the pointer. */ 4064 vsi = pf->vsi[vf->lan_vsi_idx]; 4065 } 4066 4067 /* Locked once because multiple functions below iterate list */ 4068 spin_lock_bh(&vsi->mac_filter_hash_lock); 4069 4070 /* Check for condition where there was already a port VLAN ID 4071 * filter set and now it is being deleted by setting it to zero. 4072 * Additionally check for the condition where there was a port 4073 * VLAN but now there is a new and different port VLAN being set. 4074 * Before deleting all the old VLAN filters we must add new ones 4075 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4076 * MAC addresses deleted. 4077 */ 4078 if ((!(vlan_id || qos) || 4079 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4080 vsi->info.pvid) { 4081 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4082 if (ret) { 4083 dev_info(&vsi->back->pdev->dev, 4084 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4085 vsi->back->hw.aq.asq_last_status); 4086 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4087 goto error_pvid; 4088 } 4089 } 4090 4091 if (vsi->info.pvid) { 4092 /* remove all filters on the old VLAN */ 4093 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4094 VLAN_VID_MASK)); 4095 } 4096 4097 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4098 if (vlan_id || qos) 4099 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4100 else 4101 i40e_vsi_remove_pvid(vsi); 4102 spin_lock_bh(&vsi->mac_filter_hash_lock); 4103 4104 if (vlan_id) { 4105 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4106 vlan_id, qos, vf_id); 4107 4108 /* add new VLAN filter for each MAC */ 4109 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4110 if (ret) { 4111 dev_info(&vsi->back->pdev->dev, 4112 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4113 vsi->back->hw.aq.asq_last_status); 4114 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4115 goto error_pvid; 4116 } 4117 4118 /* remove the previously added non-VLAN MAC filters */ 4119 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4120 } 4121 4122 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4123 4124 /* Schedule the worker thread to take care of applying changes */ 4125 i40e_service_event_schedule(vsi->back); 4126 4127 if (ret) { 4128 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4129 goto error_pvid; 4130 } 4131 4132 /* The Port VLAN needs to be saved across resets the same as the 4133 * default LAN MAC address. 4134 */ 4135 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4136 ret = 0; 4137 4138 error_pvid: 4139 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4140 return ret; 4141 } 4142 4143 /** 4144 * i40e_ndo_set_vf_bw 4145 * @netdev: network interface device structure 4146 * @vf_id: VF identifier 4147 * @min_tx_rate: Minimum Tx rate 4148 * @max_tx_rate: Maximum Tx rate 4149 * 4150 * configure VF Tx rate 4151 **/ 4152 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4153 int max_tx_rate) 4154 { 4155 struct i40e_netdev_priv *np = netdev_priv(netdev); 4156 struct i40e_pf *pf = np->vsi->back; 4157 struct i40e_vsi *vsi; 4158 struct i40e_vf *vf; 4159 int ret = 0; 4160 4161 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4162 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4163 return -EAGAIN; 4164 } 4165 4166 /* validate the request */ 4167 ret = i40e_validate_vf(pf, vf_id); 4168 if (ret) 4169 goto error; 4170 4171 if (min_tx_rate) { 4172 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4173 min_tx_rate, vf_id); 4174 return -EINVAL; 4175 } 4176 4177 vf = &pf->vf[vf_id]; 4178 vsi = pf->vsi[vf->lan_vsi_idx]; 4179 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4180 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4181 vf_id); 4182 ret = -EAGAIN; 4183 goto error; 4184 } 4185 4186 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4187 if (ret) 4188 goto error; 4189 4190 vf->tx_rate = max_tx_rate; 4191 error: 4192 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4193 return ret; 4194 } 4195 4196 /** 4197 * i40e_ndo_get_vf_config 4198 * @netdev: network interface device structure 4199 * @vf_id: VF identifier 4200 * @ivi: VF configuration structure 4201 * 4202 * return VF configuration 4203 **/ 4204 int i40e_ndo_get_vf_config(struct net_device *netdev, 4205 int vf_id, struct ifla_vf_info *ivi) 4206 { 4207 struct i40e_netdev_priv *np = netdev_priv(netdev); 4208 struct i40e_vsi *vsi = np->vsi; 4209 struct i40e_pf *pf = vsi->back; 4210 struct i40e_vf *vf; 4211 int ret = 0; 4212 4213 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4214 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4215 return -EAGAIN; 4216 } 4217 4218 /* validate the request */ 4219 ret = i40e_validate_vf(pf, vf_id); 4220 if (ret) 4221 goto error_param; 4222 4223 vf = &pf->vf[vf_id]; 4224 /* first vsi is always the LAN vsi */ 4225 vsi = pf->vsi[vf->lan_vsi_idx]; 4226 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4227 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4228 vf_id); 4229 ret = -EAGAIN; 4230 goto error_param; 4231 } 4232 4233 ivi->vf = vf_id; 4234 4235 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4236 4237 ivi->max_tx_rate = vf->tx_rate; 4238 ivi->min_tx_rate = 0; 4239 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4240 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4241 I40E_VLAN_PRIORITY_SHIFT; 4242 if (vf->link_forced == false) 4243 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4244 else if (vf->link_up == true) 4245 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4246 else 4247 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4248 ivi->spoofchk = vf->spoofchk; 4249 ivi->trusted = vf->trusted; 4250 ret = 0; 4251 4252 error_param: 4253 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4254 return ret; 4255 } 4256 4257 /** 4258 * i40e_ndo_set_vf_link_state 4259 * @netdev: network interface device structure 4260 * @vf_id: VF identifier 4261 * @link: required link state 4262 * 4263 * Set the link state of a specified VF, regardless of physical link state 4264 **/ 4265 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4266 { 4267 struct i40e_netdev_priv *np = netdev_priv(netdev); 4268 struct i40e_pf *pf = np->vsi->back; 4269 struct virtchnl_pf_event pfe; 4270 struct i40e_hw *hw = &pf->hw; 4271 struct i40e_vf *vf; 4272 int abs_vf_id; 4273 int ret = 0; 4274 4275 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4276 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4277 return -EAGAIN; 4278 } 4279 4280 /* validate the request */ 4281 if (vf_id >= pf->num_alloc_vfs) { 4282 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4283 ret = -EINVAL; 4284 goto error_out; 4285 } 4286 4287 vf = &pf->vf[vf_id]; 4288 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4289 4290 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4291 pfe.severity = PF_EVENT_SEVERITY_INFO; 4292 4293 switch (link) { 4294 case IFLA_VF_LINK_STATE_AUTO: 4295 vf->link_forced = false; 4296 pfe.event_data.link_event.link_status = 4297 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 4298 pfe.event_data.link_event.link_speed = 4299 (enum virtchnl_link_speed) 4300 pf->hw.phy.link_info.link_speed; 4301 break; 4302 case IFLA_VF_LINK_STATE_ENABLE: 4303 vf->link_forced = true; 4304 vf->link_up = true; 4305 pfe.event_data.link_event.link_status = true; 4306 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; 4307 break; 4308 case IFLA_VF_LINK_STATE_DISABLE: 4309 vf->link_forced = true; 4310 vf->link_up = false; 4311 pfe.event_data.link_event.link_status = false; 4312 pfe.event_data.link_event.link_speed = 0; 4313 break; 4314 default: 4315 ret = -EINVAL; 4316 goto error_out; 4317 } 4318 /* Notify the VF of its new link state */ 4319 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4320 0, (u8 *)&pfe, sizeof(pfe), NULL); 4321 4322 error_out: 4323 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4324 return ret; 4325 } 4326 4327 /** 4328 * i40e_ndo_set_vf_spoofchk 4329 * @netdev: network interface device structure 4330 * @vf_id: VF identifier 4331 * @enable: flag to enable or disable feature 4332 * 4333 * Enable or disable VF spoof checking 4334 **/ 4335 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4336 { 4337 struct i40e_netdev_priv *np = netdev_priv(netdev); 4338 struct i40e_vsi *vsi = np->vsi; 4339 struct i40e_pf *pf = vsi->back; 4340 struct i40e_vsi_context ctxt; 4341 struct i40e_hw *hw = &pf->hw; 4342 struct i40e_vf *vf; 4343 int ret = 0; 4344 4345 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4346 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4347 return -EAGAIN; 4348 } 4349 4350 /* validate the request */ 4351 if (vf_id >= pf->num_alloc_vfs) { 4352 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4353 ret = -EINVAL; 4354 goto out; 4355 } 4356 4357 vf = &(pf->vf[vf_id]); 4358 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4359 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4360 vf_id); 4361 ret = -EAGAIN; 4362 goto out; 4363 } 4364 4365 if (enable == vf->spoofchk) 4366 goto out; 4367 4368 vf->spoofchk = enable; 4369 memset(&ctxt, 0, sizeof(ctxt)); 4370 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4371 ctxt.pf_num = pf->hw.pf_id; 4372 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4373 if (enable) 4374 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4375 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4376 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4377 if (ret) { 4378 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4379 ret); 4380 ret = -EIO; 4381 } 4382 out: 4383 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4384 return ret; 4385 } 4386 4387 /** 4388 * i40e_ndo_set_vf_trust 4389 * @netdev: network interface device structure of the pf 4390 * @vf_id: VF identifier 4391 * @setting: trust setting 4392 * 4393 * Enable or disable VF trust setting 4394 **/ 4395 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4396 { 4397 struct i40e_netdev_priv *np = netdev_priv(netdev); 4398 struct i40e_pf *pf = np->vsi->back; 4399 struct i40e_vf *vf; 4400 int ret = 0; 4401 4402 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4403 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4404 return -EAGAIN; 4405 } 4406 4407 /* validate the request */ 4408 if (vf_id >= pf->num_alloc_vfs) { 4409 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4410 ret = -EINVAL; 4411 goto out; 4412 } 4413 4414 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4415 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4416 ret = -EINVAL; 4417 goto out; 4418 } 4419 4420 vf = &pf->vf[vf_id]; 4421 4422 if (setting == vf->trusted) 4423 goto out; 4424 4425 vf->trusted = setting; 4426 i40e_vc_disable_vf(vf); 4427 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4428 vf_id, setting ? "" : "un"); 4429 4430 if (vf->adq_enabled) { 4431 if (!vf->trusted) { 4432 dev_info(&pf->pdev->dev, 4433 "VF %u no longer Trusted, deleting all cloud filters\n", 4434 vf_id); 4435 i40e_del_all_cloud_filters(vf); 4436 } 4437 } 4438 4439 out: 4440 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4441 return ret; 4442 } 4443