1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 6 /*********************notification routines***********************/ 7 8 /** 9 * i40e_vc_vf_broadcast 10 * @pf: pointer to the PF structure 11 * @v_opcode: operation code 12 * @v_retval: return value 13 * @msg: pointer to the msg buffer 14 * @msglen: msg length 15 * 16 * send a message to all VFs on a given PF 17 **/ 18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 19 enum virtchnl_ops v_opcode, 20 i40e_status v_retval, u8 *msg, 21 u16 msglen) 22 { 23 struct i40e_hw *hw = &pf->hw; 24 struct i40e_vf *vf = pf->vf; 25 int i; 26 27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 29 /* Not all vfs are enabled so skip the ones that are not */ 30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 32 continue; 33 34 /* Ignore return value on purpose - a given VF may fail, but 35 * we need to keep going and send to all of them 36 */ 37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 38 msg, msglen, NULL); 39 } 40 } 41 42 /** 43 * i40e_vc_link_speed2mbps 44 * converts i40e_aq_link_speed to integer value of Mbps 45 * @link_speed: the speed to convert 46 * 47 * return the speed as direct value of Mbps. 48 **/ 49 static u32 50 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) 51 { 52 switch (link_speed) { 53 case I40E_LINK_SPEED_100MB: 54 return SPEED_100; 55 case I40E_LINK_SPEED_1GB: 56 return SPEED_1000; 57 case I40E_LINK_SPEED_2_5GB: 58 return SPEED_2500; 59 case I40E_LINK_SPEED_5GB: 60 return SPEED_5000; 61 case I40E_LINK_SPEED_10GB: 62 return SPEED_10000; 63 case I40E_LINK_SPEED_20GB: 64 return SPEED_20000; 65 case I40E_LINK_SPEED_25GB: 66 return SPEED_25000; 67 case I40E_LINK_SPEED_40GB: 68 return SPEED_40000; 69 case I40E_LINK_SPEED_UNKNOWN: 70 return SPEED_UNKNOWN; 71 } 72 return SPEED_UNKNOWN; 73 } 74 75 /** 76 * i40e_set_vf_link_state 77 * @vf: pointer to the VF structure 78 * @pfe: pointer to PF event structure 79 * @ls: pointer to link status structure 80 * 81 * set a link state on a single vf 82 **/ 83 static void i40e_set_vf_link_state(struct i40e_vf *vf, 84 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls) 85 { 86 u8 link_status = ls->link_info & I40E_AQ_LINK_UP; 87 88 if (vf->link_forced) 89 link_status = vf->link_up; 90 91 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { 92 pfe->event_data.link_event_adv.link_speed = link_status ? 93 i40e_vc_link_speed2mbps(ls->link_speed) : 0; 94 pfe->event_data.link_event_adv.link_status = link_status; 95 } else { 96 pfe->event_data.link_event.link_speed = link_status ? 97 i40e_virtchnl_link_speed(ls->link_speed) : 0; 98 pfe->event_data.link_event.link_status = link_status; 99 } 100 } 101 102 /** 103 * i40e_vc_notify_vf_link_state 104 * @vf: pointer to the VF structure 105 * 106 * send a link status message to a single VF 107 **/ 108 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 109 { 110 struct virtchnl_pf_event pfe; 111 struct i40e_pf *pf = vf->pf; 112 struct i40e_hw *hw = &pf->hw; 113 struct i40e_link_status *ls = &pf->hw.phy.link_info; 114 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 115 116 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 117 pfe.severity = PF_EVENT_SEVERITY_INFO; 118 119 i40e_set_vf_link_state(vf, &pfe, ls); 120 121 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 122 0, (u8 *)&pfe, sizeof(pfe), NULL); 123 } 124 125 /** 126 * i40e_vc_notify_link_state 127 * @pf: pointer to the PF structure 128 * 129 * send a link status message to all VFs on a given PF 130 **/ 131 void i40e_vc_notify_link_state(struct i40e_pf *pf) 132 { 133 int i; 134 135 for (i = 0; i < pf->num_alloc_vfs; i++) 136 i40e_vc_notify_vf_link_state(&pf->vf[i]); 137 } 138 139 /** 140 * i40e_vc_notify_reset 141 * @pf: pointer to the PF structure 142 * 143 * indicate a pending reset to all VFs on a given PF 144 **/ 145 void i40e_vc_notify_reset(struct i40e_pf *pf) 146 { 147 struct virtchnl_pf_event pfe; 148 149 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 150 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 151 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 152 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 153 } 154 155 /** 156 * i40e_vc_notify_vf_reset 157 * @vf: pointer to the VF structure 158 * 159 * indicate a pending reset to the given VF 160 **/ 161 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 162 { 163 struct virtchnl_pf_event pfe; 164 int abs_vf_id; 165 166 /* validate the request */ 167 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 168 return; 169 170 /* verify if the VF is in either init or active before proceeding */ 171 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 172 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 173 return; 174 175 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 176 177 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 178 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 179 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 180 0, (u8 *)&pfe, 181 sizeof(struct virtchnl_pf_event), NULL); 182 } 183 /***********************misc routines*****************************/ 184 185 /** 186 * i40e_vc_disable_vf 187 * @vf: pointer to the VF info 188 * 189 * Disable the VF through a SW reset. 190 **/ 191 static inline void i40e_vc_disable_vf(struct i40e_vf *vf) 192 { 193 struct i40e_pf *pf = vf->pf; 194 int i; 195 196 i40e_vc_notify_vf_reset(vf); 197 198 /* We want to ensure that an actual reset occurs initiated after this 199 * function was called. However, we do not want to wait forever, so 200 * we'll give a reasonable time and print a message if we failed to 201 * ensure a reset. 202 */ 203 for (i = 0; i < 20; i++) { 204 /* If PF is in VFs releasing state reset VF is impossible, 205 * so leave it. 206 */ 207 if (test_bit(__I40E_VFS_RELEASING, pf->state)) 208 return; 209 if (i40e_reset_vf(vf, false)) 210 return; 211 usleep_range(10000, 20000); 212 } 213 214 dev_warn(&vf->pf->pdev->dev, 215 "Failed to initiate reset for VF %d after 200 milliseconds\n", 216 vf->vf_id); 217 } 218 219 /** 220 * i40e_vc_isvalid_vsi_id 221 * @vf: pointer to the VF info 222 * @vsi_id: VF relative VSI id 223 * 224 * check for the valid VSI id 225 **/ 226 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 227 { 228 struct i40e_pf *pf = vf->pf; 229 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 230 231 return (vsi && (vsi->vf_id == vf->vf_id)); 232 } 233 234 /** 235 * i40e_vc_isvalid_queue_id 236 * @vf: pointer to the VF info 237 * @vsi_id: vsi id 238 * @qid: vsi relative queue id 239 * 240 * check for the valid queue id 241 **/ 242 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 243 u16 qid) 244 { 245 struct i40e_pf *pf = vf->pf; 246 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 247 248 return (vsi && (qid < vsi->alloc_queue_pairs)); 249 } 250 251 /** 252 * i40e_vc_isvalid_vector_id 253 * @vf: pointer to the VF info 254 * @vector_id: VF relative vector id 255 * 256 * check for the valid vector id 257 **/ 258 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) 259 { 260 struct i40e_pf *pf = vf->pf; 261 262 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 263 } 264 265 /***********************vf resource mgmt routines*****************/ 266 267 /** 268 * i40e_vc_get_pf_queue_id 269 * @vf: pointer to the VF info 270 * @vsi_id: id of VSI as provided by the FW 271 * @vsi_queue_id: vsi relative queue id 272 * 273 * return PF relative queue id 274 **/ 275 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 276 u8 vsi_queue_id) 277 { 278 struct i40e_pf *pf = vf->pf; 279 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 280 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 281 282 if (!vsi) 283 return pf_queue_id; 284 285 if (le16_to_cpu(vsi->info.mapping_flags) & 286 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 287 pf_queue_id = 288 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 289 else 290 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 291 vsi_queue_id; 292 293 return pf_queue_id; 294 } 295 296 /** 297 * i40e_get_real_pf_qid 298 * @vf: pointer to the VF info 299 * @vsi_id: vsi id 300 * @queue_id: queue number 301 * 302 * wrapper function to get pf_queue_id handling ADq code as well 303 **/ 304 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 305 { 306 int i; 307 308 if (vf->adq_enabled) { 309 /* Although VF considers all the queues(can be 1 to 16) as its 310 * own but they may actually belong to different VSIs(up to 4). 311 * We need to find which queues belongs to which VSI. 312 */ 313 for (i = 0; i < vf->num_tc; i++) { 314 if (queue_id < vf->ch[i].num_qps) { 315 vsi_id = vf->ch[i].vsi_id; 316 break; 317 } 318 /* find right queue id which is relative to a 319 * given VSI. 320 */ 321 queue_id -= vf->ch[i].num_qps; 322 } 323 } 324 325 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 326 } 327 328 /** 329 * i40e_config_irq_link_list 330 * @vf: pointer to the VF info 331 * @vsi_id: id of VSI as given by the FW 332 * @vecmap: irq map info 333 * 334 * configure irq link list from the map 335 **/ 336 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 337 struct virtchnl_vector_map *vecmap) 338 { 339 unsigned long linklistmap = 0, tempmap; 340 struct i40e_pf *pf = vf->pf; 341 struct i40e_hw *hw = &pf->hw; 342 u16 vsi_queue_id, pf_queue_id; 343 enum i40e_queue_type qtype; 344 u16 next_q, vector_id, size; 345 u32 reg, reg_idx; 346 u16 itr_idx = 0; 347 348 vector_id = vecmap->vector_id; 349 /* setup the head */ 350 if (0 == vector_id) 351 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 352 else 353 reg_idx = I40E_VPINT_LNKLSTN( 354 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 355 (vector_id - 1)); 356 357 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 358 /* Special case - No queues mapped on this vector */ 359 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 360 goto irq_list_done; 361 } 362 tempmap = vecmap->rxq_map; 363 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 364 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 365 vsi_queue_id)); 366 } 367 368 tempmap = vecmap->txq_map; 369 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 370 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 371 vsi_queue_id + 1)); 372 } 373 374 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; 375 next_q = find_first_bit(&linklistmap, size); 376 if (unlikely(next_q == size)) 377 goto irq_list_done; 378 379 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 380 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 381 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 382 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 383 384 wr32(hw, reg_idx, reg); 385 386 while (next_q < size) { 387 switch (qtype) { 388 case I40E_QUEUE_TYPE_RX: 389 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 390 itr_idx = vecmap->rxitr_idx; 391 break; 392 case I40E_QUEUE_TYPE_TX: 393 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 394 itr_idx = vecmap->txitr_idx; 395 break; 396 default: 397 break; 398 } 399 400 next_q = find_next_bit(&linklistmap, size, next_q + 1); 401 if (next_q < size) { 402 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 403 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 404 pf_queue_id = i40e_get_real_pf_qid(vf, 405 vsi_id, 406 vsi_queue_id); 407 } else { 408 pf_queue_id = I40E_QUEUE_END_OF_LIST; 409 qtype = 0; 410 } 411 412 /* format for the RQCTL & TQCTL regs is same */ 413 reg = (vector_id) | 414 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 415 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 416 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 417 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 418 wr32(hw, reg_idx, reg); 419 } 420 421 /* if the vf is running in polling mode and using interrupt zero, 422 * need to disable auto-mask on enabling zero interrupt for VFs. 423 */ 424 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 425 (vector_id == 0)) { 426 reg = rd32(hw, I40E_GLINT_CTL); 427 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 428 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 429 wr32(hw, I40E_GLINT_CTL, reg); 430 } 431 } 432 433 irq_list_done: 434 i40e_flush(hw); 435 } 436 437 /** 438 * i40e_release_iwarp_qvlist 439 * @vf: pointer to the VF. 440 * 441 **/ 442 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 443 { 444 struct i40e_pf *pf = vf->pf; 445 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 446 u32 msix_vf; 447 u32 i; 448 449 if (!vf->qvlist_info) 450 return; 451 452 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 453 for (i = 0; i < qvlist_info->num_vectors; i++) { 454 struct virtchnl_iwarp_qv_info *qv_info; 455 u32 next_q_index, next_q_type; 456 struct i40e_hw *hw = &pf->hw; 457 u32 v_idx, reg_idx, reg; 458 459 qv_info = &qvlist_info->qv_info[i]; 460 if (!qv_info) 461 continue; 462 v_idx = qv_info->v_idx; 463 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 464 /* Figure out the queue after CEQ and make that the 465 * first queue. 466 */ 467 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 468 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 469 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 470 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 471 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 472 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 473 474 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 475 reg = (next_q_index & 476 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 477 (next_q_type << 478 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 479 480 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 481 } 482 } 483 kfree(vf->qvlist_info); 484 vf->qvlist_info = NULL; 485 } 486 487 /** 488 * i40e_config_iwarp_qvlist 489 * @vf: pointer to the VF info 490 * @qvlist_info: queue and vector list 491 * 492 * Return 0 on success or < 0 on error 493 **/ 494 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 495 struct virtchnl_iwarp_qvlist_info *qvlist_info) 496 { 497 struct i40e_pf *pf = vf->pf; 498 struct i40e_hw *hw = &pf->hw; 499 struct virtchnl_iwarp_qv_info *qv_info; 500 u32 v_idx, i, reg_idx, reg; 501 u32 next_q_idx, next_q_type; 502 u32 msix_vf; 503 int ret = 0; 504 505 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 506 507 if (qvlist_info->num_vectors > msix_vf) { 508 dev_warn(&pf->pdev->dev, 509 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", 510 qvlist_info->num_vectors, 511 msix_vf); 512 ret = -EINVAL; 513 goto err_out; 514 } 515 516 kfree(vf->qvlist_info); 517 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info, 518 qvlist_info->num_vectors - 1), 519 GFP_KERNEL); 520 if (!vf->qvlist_info) { 521 ret = -ENOMEM; 522 goto err_out; 523 } 524 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 525 526 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 527 for (i = 0; i < qvlist_info->num_vectors; i++) { 528 qv_info = &qvlist_info->qv_info[i]; 529 if (!qv_info) 530 continue; 531 532 /* Validate vector id belongs to this vf */ 533 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { 534 ret = -EINVAL; 535 goto err_free; 536 } 537 538 v_idx = qv_info->v_idx; 539 540 vf->qvlist_info->qv_info[i] = *qv_info; 541 542 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 543 /* We might be sharing the interrupt, so get the first queue 544 * index and type, push it down the list by adding the new 545 * queue on top. Also link it with the new queue in CEQCTL. 546 */ 547 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 548 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 549 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 550 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 551 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 552 553 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 554 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 555 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 556 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 557 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 558 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 559 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 560 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 561 562 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 563 reg = (qv_info->ceq_idx & 564 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 565 (I40E_QUEUE_TYPE_PE_CEQ << 566 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 567 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 568 } 569 570 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 571 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 572 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 573 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 574 575 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 576 } 577 } 578 579 return 0; 580 err_free: 581 kfree(vf->qvlist_info); 582 vf->qvlist_info = NULL; 583 err_out: 584 return ret; 585 } 586 587 /** 588 * i40e_config_vsi_tx_queue 589 * @vf: pointer to the VF info 590 * @vsi_id: id of VSI as provided by the FW 591 * @vsi_queue_id: vsi relative queue index 592 * @info: config. info 593 * 594 * configure tx queue 595 **/ 596 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 597 u16 vsi_queue_id, 598 struct virtchnl_txq_info *info) 599 { 600 struct i40e_pf *pf = vf->pf; 601 struct i40e_hw *hw = &pf->hw; 602 struct i40e_hmc_obj_txq tx_ctx; 603 struct i40e_vsi *vsi; 604 u16 pf_queue_id; 605 u32 qtx_ctl; 606 int ret = 0; 607 608 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 609 ret = -ENOENT; 610 goto error_context; 611 } 612 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 613 vsi = i40e_find_vsi_from_id(pf, vsi_id); 614 if (!vsi) { 615 ret = -ENOENT; 616 goto error_context; 617 } 618 619 /* clear the context structure first */ 620 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 621 622 /* only set the required fields */ 623 tx_ctx.base = info->dma_ring_addr / 128; 624 tx_ctx.qlen = info->ring_len; 625 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 626 tx_ctx.rdylist_act = 0; 627 tx_ctx.head_wb_ena = info->headwb_enabled; 628 tx_ctx.head_wb_addr = info->dma_headwb_addr; 629 630 /* clear the context in the HMC */ 631 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 632 if (ret) { 633 dev_err(&pf->pdev->dev, 634 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 635 pf_queue_id, ret); 636 ret = -ENOENT; 637 goto error_context; 638 } 639 640 /* set the context in the HMC */ 641 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 642 if (ret) { 643 dev_err(&pf->pdev->dev, 644 "Failed to set VF LAN Tx queue context %d error: %d\n", 645 pf_queue_id, ret); 646 ret = -ENOENT; 647 goto error_context; 648 } 649 650 /* associate this queue with the PCI VF function */ 651 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 652 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 653 & I40E_QTX_CTL_PF_INDX_MASK); 654 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 655 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 656 & I40E_QTX_CTL_VFVM_INDX_MASK); 657 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 658 i40e_flush(hw); 659 660 error_context: 661 return ret; 662 } 663 664 /** 665 * i40e_config_vsi_rx_queue 666 * @vf: pointer to the VF info 667 * @vsi_id: id of VSI as provided by the FW 668 * @vsi_queue_id: vsi relative queue index 669 * @info: config. info 670 * 671 * configure rx queue 672 **/ 673 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 674 u16 vsi_queue_id, 675 struct virtchnl_rxq_info *info) 676 { 677 struct i40e_pf *pf = vf->pf; 678 struct i40e_hw *hw = &pf->hw; 679 struct i40e_hmc_obj_rxq rx_ctx; 680 u16 pf_queue_id; 681 int ret = 0; 682 683 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 684 685 /* clear the context structure first */ 686 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 687 688 /* only set the required fields */ 689 rx_ctx.base = info->dma_ring_addr / 128; 690 rx_ctx.qlen = info->ring_len; 691 692 if (info->splithdr_enabled) { 693 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 694 I40E_RX_SPLIT_IP | 695 I40E_RX_SPLIT_TCP_UDP | 696 I40E_RX_SPLIT_SCTP; 697 /* header length validation */ 698 if (info->hdr_size > ((2 * 1024) - 64)) { 699 ret = -EINVAL; 700 goto error_param; 701 } 702 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 703 704 /* set split mode 10b */ 705 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 706 } 707 708 /* databuffer length validation */ 709 if (info->databuffer_size > ((16 * 1024) - 128)) { 710 ret = -EINVAL; 711 goto error_param; 712 } 713 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 714 715 /* max pkt. length validation */ 716 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 717 ret = -EINVAL; 718 goto error_param; 719 } 720 rx_ctx.rxmax = info->max_pkt_size; 721 722 /* enable 32bytes desc always */ 723 rx_ctx.dsize = 1; 724 725 /* default values */ 726 rx_ctx.lrxqthresh = 1; 727 rx_ctx.crcstrip = 1; 728 rx_ctx.prefena = 1; 729 rx_ctx.l2tsel = 1; 730 731 /* clear the context in the HMC */ 732 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 733 if (ret) { 734 dev_err(&pf->pdev->dev, 735 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 736 pf_queue_id, ret); 737 ret = -ENOENT; 738 goto error_param; 739 } 740 741 /* set the context in the HMC */ 742 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 743 if (ret) { 744 dev_err(&pf->pdev->dev, 745 "Failed to set VF LAN Rx queue context %d error: %d\n", 746 pf_queue_id, ret); 747 ret = -ENOENT; 748 goto error_param; 749 } 750 751 error_param: 752 return ret; 753 } 754 755 /** 756 * i40e_alloc_vsi_res 757 * @vf: pointer to the VF info 758 * @idx: VSI index, applies only for ADq mode, zero otherwise 759 * 760 * alloc VF vsi context & resources 761 **/ 762 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 763 { 764 struct i40e_mac_filter *f = NULL; 765 struct i40e_pf *pf = vf->pf; 766 struct i40e_vsi *vsi; 767 u64 max_tx_rate = 0; 768 int ret = 0; 769 770 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 771 vf->vf_id); 772 773 if (!vsi) { 774 dev_err(&pf->pdev->dev, 775 "add vsi failed for VF %d, aq_err %d\n", 776 vf->vf_id, pf->hw.aq.asq_last_status); 777 ret = -ENOENT; 778 goto error_alloc_vsi_res; 779 } 780 781 if (!idx) { 782 u64 hena = i40e_pf_get_default_rss_hena(pf); 783 u8 broadcast[ETH_ALEN]; 784 785 vf->lan_vsi_idx = vsi->idx; 786 vf->lan_vsi_id = vsi->id; 787 /* If the port VLAN has been configured and then the 788 * VF driver was removed then the VSI port VLAN 789 * configuration was destroyed. Check if there is 790 * a port VLAN and restore the VSI configuration if 791 * needed. 792 */ 793 if (vf->port_vlan_id) 794 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 795 796 spin_lock_bh(&vsi->mac_filter_hash_lock); 797 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 798 f = i40e_add_mac_filter(vsi, 799 vf->default_lan_addr.addr); 800 if (!f) 801 dev_info(&pf->pdev->dev, 802 "Could not add MAC filter %pM for VF %d\n", 803 vf->default_lan_addr.addr, vf->vf_id); 804 } 805 eth_broadcast_addr(broadcast); 806 f = i40e_add_mac_filter(vsi, broadcast); 807 if (!f) 808 dev_info(&pf->pdev->dev, 809 "Could not allocate VF broadcast filter\n"); 810 spin_unlock_bh(&vsi->mac_filter_hash_lock); 811 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 812 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 813 /* program mac filter only for VF VSI */ 814 ret = i40e_sync_vsi_filters(vsi); 815 if (ret) 816 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 817 } 818 819 /* storing VSI index and id for ADq and don't apply the mac filter */ 820 if (vf->adq_enabled) { 821 vf->ch[idx].vsi_idx = vsi->idx; 822 vf->ch[idx].vsi_id = vsi->id; 823 } 824 825 /* Set VF bandwidth if specified */ 826 if (vf->tx_rate) { 827 max_tx_rate = vf->tx_rate; 828 } else if (vf->ch[idx].max_tx_rate) { 829 max_tx_rate = vf->ch[idx].max_tx_rate; 830 } 831 832 if (max_tx_rate) { 833 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 834 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 835 max_tx_rate, 0, NULL); 836 if (ret) 837 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 838 vf->vf_id, ret); 839 } 840 841 error_alloc_vsi_res: 842 return ret; 843 } 844 845 /** 846 * i40e_map_pf_queues_to_vsi 847 * @vf: pointer to the VF info 848 * 849 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 850 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 851 **/ 852 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 853 { 854 struct i40e_pf *pf = vf->pf; 855 struct i40e_hw *hw = &pf->hw; 856 u32 reg, num_tc = 1; /* VF has at least one traffic class */ 857 u16 vsi_id, qps; 858 int i, j; 859 860 if (vf->adq_enabled) 861 num_tc = vf->num_tc; 862 863 for (i = 0; i < num_tc; i++) { 864 if (vf->adq_enabled) { 865 qps = vf->ch[i].num_qps; 866 vsi_id = vf->ch[i].vsi_id; 867 } else { 868 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 869 vsi_id = vf->lan_vsi_id; 870 } 871 872 for (j = 0; j < 7; j++) { 873 if (j * 2 >= qps) { 874 /* end of list */ 875 reg = 0x07FF07FF; 876 } else { 877 u16 qid = i40e_vc_get_pf_queue_id(vf, 878 vsi_id, 879 j * 2); 880 reg = qid; 881 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 882 (j * 2) + 1); 883 reg |= qid << 16; 884 } 885 i40e_write_rx_ctl(hw, 886 I40E_VSILAN_QTABLE(j, vsi_id), 887 reg); 888 } 889 } 890 } 891 892 /** 893 * i40e_map_pf_to_vf_queues 894 * @vf: pointer to the VF info 895 * 896 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 897 * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 898 **/ 899 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 900 { 901 struct i40e_pf *pf = vf->pf; 902 struct i40e_hw *hw = &pf->hw; 903 u32 reg, total_qps = 0; 904 u32 qps, num_tc = 1; /* VF has at least one traffic class */ 905 u16 vsi_id, qid; 906 int i, j; 907 908 if (vf->adq_enabled) 909 num_tc = vf->num_tc; 910 911 for (i = 0; i < num_tc; i++) { 912 if (vf->adq_enabled) { 913 qps = vf->ch[i].num_qps; 914 vsi_id = vf->ch[i].vsi_id; 915 } else { 916 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 917 vsi_id = vf->lan_vsi_id; 918 } 919 920 for (j = 0; j < qps; j++) { 921 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 922 923 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 924 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 925 reg); 926 total_qps++; 927 } 928 } 929 } 930 931 /** 932 * i40e_enable_vf_mappings 933 * @vf: pointer to the VF info 934 * 935 * enable VF mappings 936 **/ 937 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 938 { 939 struct i40e_pf *pf = vf->pf; 940 struct i40e_hw *hw = &pf->hw; 941 u32 reg; 942 943 /* Tell the hardware we're using noncontiguous mapping. HW requires 944 * that VF queues be mapped using this method, even when they are 945 * contiguous in real life 946 */ 947 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 948 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 949 950 /* enable VF vplan_qtable mappings */ 951 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 952 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 953 954 i40e_map_pf_to_vf_queues(vf); 955 i40e_map_pf_queues_to_vsi(vf); 956 957 i40e_flush(hw); 958 } 959 960 /** 961 * i40e_disable_vf_mappings 962 * @vf: pointer to the VF info 963 * 964 * disable VF mappings 965 **/ 966 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 967 { 968 struct i40e_pf *pf = vf->pf; 969 struct i40e_hw *hw = &pf->hw; 970 int i; 971 972 /* disable qp mappings */ 973 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 974 for (i = 0; i < I40E_MAX_VSI_QP; i++) 975 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 976 I40E_QUEUE_END_OF_LIST); 977 i40e_flush(hw); 978 } 979 980 /** 981 * i40e_free_vf_res 982 * @vf: pointer to the VF info 983 * 984 * free VF resources 985 **/ 986 static void i40e_free_vf_res(struct i40e_vf *vf) 987 { 988 struct i40e_pf *pf = vf->pf; 989 struct i40e_hw *hw = &pf->hw; 990 u32 reg_idx, reg; 991 int i, j, msix_vf; 992 993 /* Start by disabling VF's configuration API to prevent the OS from 994 * accessing the VF's VSI after it's freed / invalidated. 995 */ 996 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 997 998 /* It's possible the VF had requeuested more queues than the default so 999 * do the accounting here when we're about to free them. 1000 */ 1001 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { 1002 pf->queues_left += vf->num_queue_pairs - 1003 I40E_DEFAULT_QUEUES_PER_VF; 1004 } 1005 1006 /* free vsi & disconnect it from the parent uplink */ 1007 if (vf->lan_vsi_idx) { 1008 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 1009 vf->lan_vsi_idx = 0; 1010 vf->lan_vsi_id = 0; 1011 } 1012 1013 /* do the accounting and remove additional ADq VSI's */ 1014 if (vf->adq_enabled && vf->ch[0].vsi_idx) { 1015 for (j = 0; j < vf->num_tc; j++) { 1016 /* At this point VSI0 is already released so don't 1017 * release it again and only clear their values in 1018 * structure variables 1019 */ 1020 if (j) 1021 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 1022 vf->ch[j].vsi_idx = 0; 1023 vf->ch[j].vsi_id = 0; 1024 } 1025 } 1026 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 1027 1028 /* disable interrupts so the VF starts in a known state */ 1029 for (i = 0; i < msix_vf; i++) { 1030 /* format is same for both registers */ 1031 if (0 == i) 1032 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 1033 else 1034 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 1035 (vf->vf_id)) 1036 + (i - 1)); 1037 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 1038 i40e_flush(hw); 1039 } 1040 1041 /* clear the irq settings */ 1042 for (i = 0; i < msix_vf; i++) { 1043 /* format is same for both registers */ 1044 if (0 == i) 1045 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 1046 else 1047 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 1048 (vf->vf_id)) 1049 + (i - 1)); 1050 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 1051 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 1052 wr32(hw, reg_idx, reg); 1053 i40e_flush(hw); 1054 } 1055 /* reset some of the state variables keeping track of the resources */ 1056 vf->num_queue_pairs = 0; 1057 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1058 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1059 } 1060 1061 /** 1062 * i40e_alloc_vf_res 1063 * @vf: pointer to the VF info 1064 * 1065 * allocate VF resources 1066 **/ 1067 static int i40e_alloc_vf_res(struct i40e_vf *vf) 1068 { 1069 struct i40e_pf *pf = vf->pf; 1070 int total_queue_pairs = 0; 1071 int ret, idx; 1072 1073 if (vf->num_req_queues && 1074 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) 1075 pf->num_vf_qps = vf->num_req_queues; 1076 else 1077 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1078 1079 /* allocate hw vsi context & associated resources */ 1080 ret = i40e_alloc_vsi_res(vf, 0); 1081 if (ret) 1082 goto error_alloc; 1083 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 1084 1085 /* allocate additional VSIs based on tc information for ADq */ 1086 if (vf->adq_enabled) { 1087 if (pf->queues_left >= 1088 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 1089 /* TC 0 always belongs to VF VSI */ 1090 for (idx = 1; idx < vf->num_tc; idx++) { 1091 ret = i40e_alloc_vsi_res(vf, idx); 1092 if (ret) 1093 goto error_alloc; 1094 } 1095 /* send correct number of queues */ 1096 total_queue_pairs = I40E_MAX_VF_QUEUES; 1097 } else { 1098 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 1099 vf->vf_id); 1100 vf->adq_enabled = false; 1101 } 1102 } 1103 1104 /* We account for each VF to get a default number of queue pairs. If 1105 * the VF has now requested more, we need to account for that to make 1106 * certain we never request more queues than we actually have left in 1107 * HW. 1108 */ 1109 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) 1110 pf->queues_left -= 1111 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; 1112 1113 if (vf->trusted) 1114 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1115 else 1116 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1117 1118 /* store the total qps number for the runtime 1119 * VF req validation 1120 */ 1121 vf->num_queue_pairs = total_queue_pairs; 1122 1123 /* VF is now completely initialized */ 1124 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1125 1126 error_alloc: 1127 if (ret) 1128 i40e_free_vf_res(vf); 1129 1130 return ret; 1131 } 1132 1133 #define VF_DEVICE_STATUS 0xAA 1134 #define VF_TRANS_PENDING_MASK 0x20 1135 /** 1136 * i40e_quiesce_vf_pci 1137 * @vf: pointer to the VF structure 1138 * 1139 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 1140 * if the transactions never clear. 1141 **/ 1142 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 1143 { 1144 struct i40e_pf *pf = vf->pf; 1145 struct i40e_hw *hw = &pf->hw; 1146 int vf_abs_id, i; 1147 u32 reg; 1148 1149 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 1150 1151 wr32(hw, I40E_PF_PCI_CIAA, 1152 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 1153 for (i = 0; i < 100; i++) { 1154 reg = rd32(hw, I40E_PF_PCI_CIAD); 1155 if ((reg & VF_TRANS_PENDING_MASK) == 0) 1156 return 0; 1157 udelay(1); 1158 } 1159 return -EIO; 1160 } 1161 1162 /** 1163 * i40e_getnum_vf_vsi_vlan_filters 1164 * @vsi: pointer to the vsi 1165 * 1166 * called to get the number of VLANs offloaded on this VF 1167 **/ 1168 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1169 { 1170 struct i40e_mac_filter *f; 1171 u16 num_vlans = 0, bkt; 1172 1173 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1174 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1175 num_vlans++; 1176 } 1177 1178 return num_vlans; 1179 } 1180 1181 /** 1182 * i40e_get_vlan_list_sync 1183 * @vsi: pointer to the VSI 1184 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller 1185 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. 1186 * This array is allocated here, but has to be freed in caller. 1187 * 1188 * Called to get number of VLANs and VLAN list present in mac_filter_hash. 1189 **/ 1190 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, 1191 s16 **vlan_list) 1192 { 1193 struct i40e_mac_filter *f; 1194 int i = 0; 1195 int bkt; 1196 1197 spin_lock_bh(&vsi->mac_filter_hash_lock); 1198 *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi); 1199 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); 1200 if (!(*vlan_list)) 1201 goto err; 1202 1203 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1204 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1205 continue; 1206 (*vlan_list)[i++] = f->vlan; 1207 } 1208 err: 1209 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1210 } 1211 1212 /** 1213 * i40e_set_vsi_promisc 1214 * @vf: pointer to the VF struct 1215 * @seid: VSI number 1216 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable 1217 * for a given VLAN 1218 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable 1219 * for a given VLAN 1220 * @vl: List of VLANs - apply filter for given VLANs 1221 * @num_vlans: Number of elements in @vl 1222 **/ 1223 static i40e_status 1224 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, 1225 bool unicast_enable, s16 *vl, u16 num_vlans) 1226 { 1227 i40e_status aq_ret, aq_tmp = 0; 1228 struct i40e_pf *pf = vf->pf; 1229 struct i40e_hw *hw = &pf->hw; 1230 int i; 1231 1232 /* No VLAN to set promisc on, set on VSI */ 1233 if (!num_vlans || !vl) { 1234 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid, 1235 multi_enable, 1236 NULL); 1237 if (aq_ret) { 1238 int aq_err = pf->hw.aq.asq_last_status; 1239 1240 dev_err(&pf->pdev->dev, 1241 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1242 vf->vf_id, 1243 i40e_stat_str(&pf->hw, aq_ret), 1244 i40e_aq_str(&pf->hw, aq_err)); 1245 1246 return aq_ret; 1247 } 1248 1249 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid, 1250 unicast_enable, 1251 NULL, true); 1252 1253 if (aq_ret) { 1254 int aq_err = pf->hw.aq.asq_last_status; 1255 1256 dev_err(&pf->pdev->dev, 1257 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1258 vf->vf_id, 1259 i40e_stat_str(&pf->hw, aq_ret), 1260 i40e_aq_str(&pf->hw, aq_err)); 1261 } 1262 1263 return aq_ret; 1264 } 1265 1266 for (i = 0; i < num_vlans; i++) { 1267 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, 1268 multi_enable, 1269 vl[i], NULL); 1270 if (aq_ret) { 1271 int aq_err = pf->hw.aq.asq_last_status; 1272 1273 dev_err(&pf->pdev->dev, 1274 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1275 vf->vf_id, 1276 i40e_stat_str(&pf->hw, aq_ret), 1277 i40e_aq_str(&pf->hw, aq_err)); 1278 1279 if (!aq_tmp) 1280 aq_tmp = aq_ret; 1281 } 1282 1283 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, 1284 unicast_enable, 1285 vl[i], NULL); 1286 if (aq_ret) { 1287 int aq_err = pf->hw.aq.asq_last_status; 1288 1289 dev_err(&pf->pdev->dev, 1290 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", 1291 vf->vf_id, 1292 i40e_stat_str(&pf->hw, aq_ret), 1293 i40e_aq_str(&pf->hw, aq_err)); 1294 1295 if (!aq_tmp) 1296 aq_tmp = aq_ret; 1297 } 1298 } 1299 1300 if (aq_tmp) 1301 aq_ret = aq_tmp; 1302 1303 return aq_ret; 1304 } 1305 1306 /** 1307 * i40e_config_vf_promiscuous_mode 1308 * @vf: pointer to the VF info 1309 * @vsi_id: VSI id 1310 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable 1311 * @alluni: set MAC L2 layer unicast promiscuous enable/disable 1312 * 1313 * Called from the VF to configure the promiscuous mode of 1314 * VF vsis and from the VF reset path to reset promiscuous mode. 1315 **/ 1316 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, 1317 u16 vsi_id, 1318 bool allmulti, 1319 bool alluni) 1320 { 1321 i40e_status aq_ret = I40E_SUCCESS; 1322 struct i40e_pf *pf = vf->pf; 1323 struct i40e_vsi *vsi; 1324 u16 num_vlans; 1325 s16 *vl; 1326 1327 vsi = i40e_find_vsi_from_id(pf, vsi_id); 1328 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) 1329 return I40E_ERR_PARAM; 1330 1331 if (vf->port_vlan_id) { 1332 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, 1333 alluni, &vf->port_vlan_id, 1); 1334 return aq_ret; 1335 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1336 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); 1337 1338 if (!vl) 1339 return I40E_ERR_NO_MEMORY; 1340 1341 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1342 vl, num_vlans); 1343 kfree(vl); 1344 return aq_ret; 1345 } 1346 1347 /* no VLANs to set on, set on VSI */ 1348 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, 1349 NULL, 0); 1350 return aq_ret; 1351 } 1352 1353 /** 1354 * i40e_trigger_vf_reset 1355 * @vf: pointer to the VF structure 1356 * @flr: VFLR was issued or not 1357 * 1358 * Trigger hardware to start a reset for a particular VF. Expects the caller 1359 * to wait the proper amount of time to allow hardware to reset the VF before 1360 * it cleans up and restores VF functionality. 1361 **/ 1362 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 1363 { 1364 struct i40e_pf *pf = vf->pf; 1365 struct i40e_hw *hw = &pf->hw; 1366 u32 reg, reg_idx, bit_idx; 1367 1368 /* warn the VF */ 1369 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1370 1371 /* Disable VF's configuration API during reset. The flag is re-enabled 1372 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 1373 * It's normally disabled in i40e_free_vf_res(), but it's safer 1374 * to do it earlier to give some time to finish to any VF config 1375 * functions that may still be running at this point. 1376 */ 1377 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 1378 1379 /* In the case of a VFLR, the HW has already reset the VF and we 1380 * just need to clean up, so don't hit the VFRTRIG register. 1381 */ 1382 if (!flr) { 1383 /* reset VF using VPGEN_VFRTRIG reg */ 1384 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1385 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1386 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1387 i40e_flush(hw); 1388 } 1389 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 1390 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1391 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1392 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1393 i40e_flush(hw); 1394 1395 if (i40e_quiesce_vf_pci(vf)) 1396 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 1397 vf->vf_id); 1398 } 1399 1400 /** 1401 * i40e_cleanup_reset_vf 1402 * @vf: pointer to the VF structure 1403 * 1404 * Cleanup a VF after the hardware reset is finished. Expects the caller to 1405 * have verified whether the reset is finished properly, and ensure the 1406 * minimum amount of wait time has passed. 1407 **/ 1408 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 1409 { 1410 struct i40e_pf *pf = vf->pf; 1411 struct i40e_hw *hw = &pf->hw; 1412 u32 reg; 1413 1414 /* disable promisc modes in case they were enabled */ 1415 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); 1416 1417 /* free VF resources to begin resetting the VSI state */ 1418 i40e_free_vf_res(vf); 1419 1420 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 1421 * By doing this we allow HW to access VF memory at any point. If we 1422 * did it any sooner, HW could access memory while it was being freed 1423 * in i40e_free_vf_res(), causing an IOMMU fault. 1424 * 1425 * On the other hand, this needs to be done ASAP, because the VF driver 1426 * is waiting for this to happen and may report a timeout. It's 1427 * harmless, but it gets logged into Guest OS kernel log, so best avoid 1428 * it. 1429 */ 1430 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1431 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1432 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1433 1434 /* reallocate VF resources to finish resetting the VSI state */ 1435 if (!i40e_alloc_vf_res(vf)) { 1436 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1437 i40e_enable_vf_mappings(vf); 1438 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1439 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1440 /* Do not notify the client during VF init */ 1441 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1442 &vf->vf_states)) 1443 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1444 vf->num_vlan = 0; 1445 } 1446 1447 /* Tell the VF driver the reset is done. This needs to be done only 1448 * after VF has been fully initialized, because the VF driver may 1449 * request resources immediately after setting this flag. 1450 */ 1451 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1452 } 1453 1454 /** 1455 * i40e_reset_vf 1456 * @vf: pointer to the VF structure 1457 * @flr: VFLR was issued or not 1458 * 1459 * Returns true if the VF is in reset, resets successfully, or resets 1460 * are disabled and false otherwise. 1461 **/ 1462 bool i40e_reset_vf(struct i40e_vf *vf, bool flr) 1463 { 1464 struct i40e_pf *pf = vf->pf; 1465 struct i40e_hw *hw = &pf->hw; 1466 bool rsd = false; 1467 u32 reg; 1468 int i; 1469 1470 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) 1471 return true; 1472 1473 /* If the VFs have been disabled, this means something else is 1474 * resetting the VF, so we shouldn't continue. 1475 */ 1476 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1477 return true; 1478 1479 i40e_trigger_vf_reset(vf, flr); 1480 1481 /* poll VPGEN_VFRSTAT reg to make sure 1482 * that reset is complete 1483 */ 1484 for (i = 0; i < 10; i++) { 1485 /* VF reset requires driver to first reset the VF and then 1486 * poll the status register to make sure that the reset 1487 * completed successfully. Due to internal HW FIFO flushes, 1488 * we must wait 10ms before the register will be valid. 1489 */ 1490 usleep_range(10000, 20000); 1491 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1492 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1493 rsd = true; 1494 break; 1495 } 1496 } 1497 1498 if (flr) 1499 usleep_range(10000, 20000); 1500 1501 if (!rsd) 1502 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1503 vf->vf_id); 1504 usleep_range(10000, 20000); 1505 1506 /* On initial reset, we don't have any queues to disable */ 1507 if (vf->lan_vsi_idx != 0) 1508 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1509 1510 i40e_cleanup_reset_vf(vf); 1511 1512 i40e_flush(hw); 1513 clear_bit(__I40E_VF_DISABLE, pf->state); 1514 1515 return true; 1516 } 1517 1518 /** 1519 * i40e_reset_all_vfs 1520 * @pf: pointer to the PF structure 1521 * @flr: VFLR was issued or not 1522 * 1523 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1524 * VF, then do all the waiting in one chunk, and finally finish restoring each 1525 * VF after the wait. This is useful during PF routines which need to reset 1526 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1527 * 1528 * Returns true if any VFs were reset, and false otherwise. 1529 **/ 1530 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1531 { 1532 struct i40e_hw *hw = &pf->hw; 1533 struct i40e_vf *vf; 1534 int i, v; 1535 u32 reg; 1536 1537 /* If we don't have any VFs, then there is nothing to reset */ 1538 if (!pf->num_alloc_vfs) 1539 return false; 1540 1541 /* If VFs have been disabled, there is no need to reset */ 1542 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1543 return false; 1544 1545 /* Begin reset on all VFs at once */ 1546 for (v = 0; v < pf->num_alloc_vfs; v++) 1547 i40e_trigger_vf_reset(&pf->vf[v], flr); 1548 1549 /* HW requires some time to make sure it can flush the FIFO for a VF 1550 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1551 * sequence to make sure that it has completed. We'll keep track of 1552 * the VFs using a simple iterator that increments once that VF has 1553 * finished resetting. 1554 */ 1555 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1556 usleep_range(10000, 20000); 1557 1558 /* Check each VF in sequence, beginning with the VF to fail 1559 * the previous check. 1560 */ 1561 while (v < pf->num_alloc_vfs) { 1562 vf = &pf->vf[v]; 1563 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1564 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1565 break; 1566 1567 /* If the current VF has finished resetting, move on 1568 * to the next VF in sequence. 1569 */ 1570 v++; 1571 } 1572 } 1573 1574 if (flr) 1575 usleep_range(10000, 20000); 1576 1577 /* Display a warning if at least one VF didn't manage to reset in 1578 * time, but continue on with the operation. 1579 */ 1580 if (v < pf->num_alloc_vfs) 1581 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1582 pf->vf[v].vf_id); 1583 usleep_range(10000, 20000); 1584 1585 /* Begin disabling all the rings associated with VFs, but do not wait 1586 * between each VF. 1587 */ 1588 for (v = 0; v < pf->num_alloc_vfs; v++) { 1589 /* On initial reset, we don't have any queues to disable */ 1590 if (pf->vf[v].lan_vsi_idx == 0) 1591 continue; 1592 1593 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1594 } 1595 1596 /* Now that we've notified HW to disable all of the VF rings, wait 1597 * until they finish. 1598 */ 1599 for (v = 0; v < pf->num_alloc_vfs; v++) { 1600 /* On initial reset, we don't have any queues to disable */ 1601 if (pf->vf[v].lan_vsi_idx == 0) 1602 continue; 1603 1604 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1605 } 1606 1607 /* Hw may need up to 50ms to finish disabling the RX queues. We 1608 * minimize the wait by delaying only once for all VFs. 1609 */ 1610 mdelay(50); 1611 1612 /* Finish the reset on each VF */ 1613 for (v = 0; v < pf->num_alloc_vfs; v++) 1614 i40e_cleanup_reset_vf(&pf->vf[v]); 1615 1616 i40e_flush(hw); 1617 clear_bit(__I40E_VF_DISABLE, pf->state); 1618 1619 return true; 1620 } 1621 1622 /** 1623 * i40e_free_vfs 1624 * @pf: pointer to the PF structure 1625 * 1626 * free VF resources 1627 **/ 1628 void i40e_free_vfs(struct i40e_pf *pf) 1629 { 1630 struct i40e_hw *hw = &pf->hw; 1631 u32 reg_idx, bit_idx; 1632 int i, tmp, vf_id; 1633 1634 if (!pf->vf) 1635 return; 1636 1637 set_bit(__I40E_VFS_RELEASING, pf->state); 1638 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1639 usleep_range(1000, 2000); 1640 1641 i40e_notify_client_of_vf_enable(pf, 0); 1642 1643 /* Disable IOV before freeing resources. This lets any VF drivers 1644 * running in the host get themselves cleaned up before we yank 1645 * the carpet out from underneath their feet. 1646 */ 1647 if (!pci_vfs_assigned(pf->pdev)) 1648 pci_disable_sriov(pf->pdev); 1649 else 1650 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1651 1652 /* Amortize wait time by stopping all VFs at the same time */ 1653 for (i = 0; i < pf->num_alloc_vfs; i++) { 1654 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1655 continue; 1656 1657 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1658 } 1659 1660 for (i = 0; i < pf->num_alloc_vfs; i++) { 1661 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1662 continue; 1663 1664 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1665 } 1666 1667 /* free up VF resources */ 1668 tmp = pf->num_alloc_vfs; 1669 pf->num_alloc_vfs = 0; 1670 for (i = 0; i < tmp; i++) { 1671 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1672 i40e_free_vf_res(&pf->vf[i]); 1673 /* disable qp mappings */ 1674 i40e_disable_vf_mappings(&pf->vf[i]); 1675 } 1676 1677 kfree(pf->vf); 1678 pf->vf = NULL; 1679 1680 /* This check is for when the driver is unloaded while VFs are 1681 * assigned. Setting the number of VFs to 0 through sysfs is caught 1682 * before this function ever gets called. 1683 */ 1684 if (!pci_vfs_assigned(pf->pdev)) { 1685 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1686 * work correctly when SR-IOV gets re-enabled. 1687 */ 1688 for (vf_id = 0; vf_id < tmp; vf_id++) { 1689 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1690 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1691 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1692 } 1693 } 1694 clear_bit(__I40E_VF_DISABLE, pf->state); 1695 clear_bit(__I40E_VFS_RELEASING, pf->state); 1696 } 1697 1698 #ifdef CONFIG_PCI_IOV 1699 /** 1700 * i40e_alloc_vfs 1701 * @pf: pointer to the PF structure 1702 * @num_alloc_vfs: number of VFs to allocate 1703 * 1704 * allocate VF resources 1705 **/ 1706 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1707 { 1708 struct i40e_vf *vfs; 1709 int i, ret = 0; 1710 1711 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1712 i40e_irq_dynamic_disable_icr0(pf); 1713 1714 /* Check to see if we're just allocating resources for extant VFs */ 1715 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1716 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1717 if (ret) { 1718 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1719 pf->num_alloc_vfs = 0; 1720 goto err_iov; 1721 } 1722 } 1723 /* allocate memory */ 1724 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1725 if (!vfs) { 1726 ret = -ENOMEM; 1727 goto err_alloc; 1728 } 1729 pf->vf = vfs; 1730 1731 /* apply default profile */ 1732 for (i = 0; i < num_alloc_vfs; i++) { 1733 vfs[i].pf = pf; 1734 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1735 vfs[i].vf_id = i; 1736 1737 /* assign default capabilities */ 1738 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1739 vfs[i].spoofchk = true; 1740 1741 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1742 1743 } 1744 pf->num_alloc_vfs = num_alloc_vfs; 1745 1746 /* VF resources get allocated during reset */ 1747 i40e_reset_all_vfs(pf, false); 1748 1749 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1750 1751 err_alloc: 1752 if (ret) 1753 i40e_free_vfs(pf); 1754 err_iov: 1755 /* Re-enable interrupt 0. */ 1756 i40e_irq_dynamic_enable_icr0(pf); 1757 return ret; 1758 } 1759 1760 #endif 1761 /** 1762 * i40e_pci_sriov_enable 1763 * @pdev: pointer to a pci_dev structure 1764 * @num_vfs: number of VFs to allocate 1765 * 1766 * Enable or change the number of VFs 1767 **/ 1768 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1769 { 1770 #ifdef CONFIG_PCI_IOV 1771 struct i40e_pf *pf = pci_get_drvdata(pdev); 1772 int pre_existing_vfs = pci_num_vf(pdev); 1773 int err = 0; 1774 1775 if (test_bit(__I40E_TESTING, pf->state)) { 1776 dev_warn(&pdev->dev, 1777 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1778 err = -EPERM; 1779 goto err_out; 1780 } 1781 1782 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1783 i40e_free_vfs(pf); 1784 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1785 goto out; 1786 1787 if (num_vfs > pf->num_req_vfs) { 1788 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1789 num_vfs, pf->num_req_vfs); 1790 err = -EPERM; 1791 goto err_out; 1792 } 1793 1794 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1795 err = i40e_alloc_vfs(pf, num_vfs); 1796 if (err) { 1797 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1798 goto err_out; 1799 } 1800 1801 out: 1802 return num_vfs; 1803 1804 err_out: 1805 return err; 1806 #endif 1807 return 0; 1808 } 1809 1810 /** 1811 * i40e_pci_sriov_configure 1812 * @pdev: pointer to a pci_dev structure 1813 * @num_vfs: number of VFs to allocate 1814 * 1815 * Enable or change the number of VFs. Called when the user updates the number 1816 * of VFs in sysfs. 1817 **/ 1818 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1819 { 1820 struct i40e_pf *pf = pci_get_drvdata(pdev); 1821 int ret = 0; 1822 1823 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 1824 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 1825 return -EAGAIN; 1826 } 1827 1828 if (num_vfs) { 1829 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1830 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1831 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1832 } 1833 ret = i40e_pci_sriov_enable(pdev, num_vfs); 1834 goto sriov_configure_out; 1835 } 1836 1837 if (!pci_vfs_assigned(pf->pdev)) { 1838 i40e_free_vfs(pf); 1839 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1840 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); 1841 } else { 1842 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1843 ret = -EINVAL; 1844 goto sriov_configure_out; 1845 } 1846 sriov_configure_out: 1847 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 1848 return ret; 1849 } 1850 1851 /***********************virtual channel routines******************/ 1852 1853 /** 1854 * i40e_vc_send_msg_to_vf 1855 * @vf: pointer to the VF info 1856 * @v_opcode: virtual channel opcode 1857 * @v_retval: virtual channel return value 1858 * @msg: pointer to the msg buffer 1859 * @msglen: msg length 1860 * 1861 * send msg to VF 1862 **/ 1863 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1864 u32 v_retval, u8 *msg, u16 msglen) 1865 { 1866 struct i40e_pf *pf; 1867 struct i40e_hw *hw; 1868 int abs_vf_id; 1869 i40e_status aq_ret; 1870 1871 /* validate the request */ 1872 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1873 return -EINVAL; 1874 1875 pf = vf->pf; 1876 hw = &pf->hw; 1877 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1878 1879 /* single place to detect unsuccessful return values */ 1880 if (v_retval) { 1881 vf->num_invalid_msgs++; 1882 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1883 vf->vf_id, v_opcode, v_retval); 1884 if (vf->num_invalid_msgs > 1885 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1886 dev_err(&pf->pdev->dev, 1887 "Number of invalid messages exceeded for VF %d\n", 1888 vf->vf_id); 1889 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1890 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1891 } 1892 } else { 1893 vf->num_valid_msgs++; 1894 /* reset the invalid counter, if a valid message is received. */ 1895 vf->num_invalid_msgs = 0; 1896 } 1897 1898 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1899 msg, msglen, NULL); 1900 if (aq_ret) { 1901 dev_info(&pf->pdev->dev, 1902 "Unable to send the message to VF %d aq_err %d\n", 1903 vf->vf_id, pf->hw.aq.asq_last_status); 1904 return -EIO; 1905 } 1906 1907 return 0; 1908 } 1909 1910 /** 1911 * i40e_vc_send_resp_to_vf 1912 * @vf: pointer to the VF info 1913 * @opcode: operation code 1914 * @retval: return value 1915 * 1916 * send resp msg to VF 1917 **/ 1918 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1919 enum virtchnl_ops opcode, 1920 i40e_status retval) 1921 { 1922 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1923 } 1924 1925 /** 1926 * i40e_vc_get_version_msg 1927 * @vf: pointer to the VF info 1928 * @msg: pointer to the msg buffer 1929 * 1930 * called from the VF to request the API version used by the PF 1931 **/ 1932 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1933 { 1934 struct virtchnl_version_info info = { 1935 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1936 }; 1937 1938 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1939 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1940 if (VF_IS_V10(&vf->vf_ver)) 1941 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1942 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1943 I40E_SUCCESS, (u8 *)&info, 1944 sizeof(struct virtchnl_version_info)); 1945 } 1946 1947 /** 1948 * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1949 * @vf: pointer to VF structure 1950 **/ 1951 static void i40e_del_qch(struct i40e_vf *vf) 1952 { 1953 struct i40e_pf *pf = vf->pf; 1954 int i; 1955 1956 /* first element in the array belongs to primary VF VSI and we shouldn't 1957 * delete it. We should however delete the rest of the VSIs created 1958 */ 1959 for (i = 1; i < vf->num_tc; i++) { 1960 if (vf->ch[i].vsi_idx) { 1961 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1962 vf->ch[i].vsi_idx = 0; 1963 vf->ch[i].vsi_id = 0; 1964 } 1965 } 1966 } 1967 1968 /** 1969 * i40e_vc_get_vf_resources_msg 1970 * @vf: pointer to the VF info 1971 * @msg: pointer to the msg buffer 1972 * 1973 * called from the VF to request its resources 1974 **/ 1975 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1976 { 1977 struct virtchnl_vf_resource *vfres = NULL; 1978 struct i40e_pf *pf = vf->pf; 1979 i40e_status aq_ret = 0; 1980 struct i40e_vsi *vsi; 1981 int num_vsis = 1; 1982 size_t len = 0; 1983 int ret; 1984 1985 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1986 aq_ret = I40E_ERR_PARAM; 1987 goto err; 1988 } 1989 1990 len = struct_size(vfres, vsi_res, num_vsis); 1991 vfres = kzalloc(len, GFP_KERNEL); 1992 if (!vfres) { 1993 aq_ret = I40E_ERR_NO_MEMORY; 1994 len = 0; 1995 goto err; 1996 } 1997 if (VF_IS_V11(&vf->vf_ver)) 1998 vf->driver_caps = *(u32 *)msg; 1999 else 2000 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 2001 VIRTCHNL_VF_OFFLOAD_RSS_REG | 2002 VIRTCHNL_VF_OFFLOAD_VLAN; 2003 2004 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 2005 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 2006 vsi = pf->vsi[vf->lan_vsi_idx]; 2007 if (!vsi->info.pvid) 2008 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 2009 2010 if (i40e_vf_client_capable(pf, vf->vf_id) && 2011 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 2012 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 2013 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 2014 } else { 2015 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 2016 } 2017 2018 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2019 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 2020 } else { 2021 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 2022 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 2023 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 2024 else 2025 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 2026 } 2027 2028 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 2029 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 2030 vfres->vf_cap_flags |= 2031 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 2032 } 2033 2034 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 2035 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 2036 2037 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 2038 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 2039 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 2040 2041 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 2042 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 2043 dev_err(&pf->pdev->dev, 2044 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 2045 vf->vf_id); 2046 aq_ret = I40E_ERR_PARAM; 2047 goto err; 2048 } 2049 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 2050 } 2051 2052 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 2053 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 2054 vfres->vf_cap_flags |= 2055 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 2056 } 2057 2058 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 2059 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 2060 2061 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 2062 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 2063 2064 vfres->num_vsis = num_vsis; 2065 vfres->num_queue_pairs = vf->num_queue_pairs; 2066 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 2067 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 2068 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 2069 2070 if (vf->lan_vsi_idx) { 2071 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 2072 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 2073 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 2074 /* VFs only use TC 0 */ 2075 vfres->vsi_res[0].qset_handle 2076 = le16_to_cpu(vsi->info.qs_handle[0]); 2077 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 2078 vf->default_lan_addr.addr); 2079 } 2080 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 2081 2082 err: 2083 /* send the response back to the VF */ 2084 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 2085 aq_ret, (u8 *)vfres, len); 2086 2087 kfree(vfres); 2088 return ret; 2089 } 2090 2091 /** 2092 * i40e_vc_reset_vf_msg 2093 * @vf: pointer to the VF info 2094 * 2095 * called from the VF to reset itself, 2096 * unlike other virtchnl messages, PF driver 2097 * doesn't send the response back to the VF 2098 **/ 2099 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 2100 { 2101 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2102 i40e_reset_vf(vf, false); 2103 } 2104 2105 /** 2106 * i40e_vc_config_promiscuous_mode_msg 2107 * @vf: pointer to the VF info 2108 * @msg: pointer to the msg buffer 2109 * 2110 * called from the VF to configure the promiscuous mode of 2111 * VF vsis 2112 **/ 2113 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) 2114 { 2115 struct virtchnl_promisc_info *info = 2116 (struct virtchnl_promisc_info *)msg; 2117 struct i40e_pf *pf = vf->pf; 2118 i40e_status aq_ret = 0; 2119 bool allmulti = false; 2120 bool alluni = false; 2121 2122 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2123 aq_ret = I40E_ERR_PARAM; 2124 goto err_out; 2125 } 2126 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2127 dev_err(&pf->pdev->dev, 2128 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 2129 vf->vf_id); 2130 2131 /* Lie to the VF on purpose, because this is an error we can 2132 * ignore. Unprivileged VF is not a virtual channel error. 2133 */ 2134 aq_ret = 0; 2135 goto err_out; 2136 } 2137 2138 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { 2139 aq_ret = I40E_ERR_PARAM; 2140 goto err_out; 2141 } 2142 2143 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 2144 aq_ret = I40E_ERR_PARAM; 2145 goto err_out; 2146 } 2147 2148 /* Multicast promiscuous handling*/ 2149 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 2150 allmulti = true; 2151 2152 if (info->flags & FLAG_VF_UNICAST_PROMISC) 2153 alluni = true; 2154 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, 2155 alluni); 2156 if (aq_ret) 2157 goto err_out; 2158 2159 if (allmulti) { 2160 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, 2161 &vf->vf_states)) 2162 dev_info(&pf->pdev->dev, 2163 "VF %d successfully set multicast promiscuous mode\n", 2164 vf->vf_id); 2165 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, 2166 &vf->vf_states)) 2167 dev_info(&pf->pdev->dev, 2168 "VF %d successfully unset multicast promiscuous mode\n", 2169 vf->vf_id); 2170 2171 if (alluni) { 2172 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, 2173 &vf->vf_states)) 2174 dev_info(&pf->pdev->dev, 2175 "VF %d successfully set unicast promiscuous mode\n", 2176 vf->vf_id); 2177 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, 2178 &vf->vf_states)) 2179 dev_info(&pf->pdev->dev, 2180 "VF %d successfully unset unicast promiscuous mode\n", 2181 vf->vf_id); 2182 2183 err_out: 2184 /* send the response to the VF */ 2185 return i40e_vc_send_resp_to_vf(vf, 2186 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 2187 aq_ret); 2188 } 2189 2190 /** 2191 * i40e_vc_config_queues_msg 2192 * @vf: pointer to the VF info 2193 * @msg: pointer to the msg buffer 2194 * 2195 * called from the VF to configure the rx/tx 2196 * queues 2197 **/ 2198 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) 2199 { 2200 struct virtchnl_vsi_queue_config_info *qci = 2201 (struct virtchnl_vsi_queue_config_info *)msg; 2202 struct virtchnl_queue_pair_info *qpi; 2203 struct i40e_pf *pf = vf->pf; 2204 u16 vsi_id, vsi_queue_id = 0; 2205 u16 num_qps_all = 0; 2206 i40e_status aq_ret = 0; 2207 int i, j = 0, idx = 0; 2208 2209 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2210 aq_ret = I40E_ERR_PARAM; 2211 goto error_param; 2212 } 2213 2214 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 2215 aq_ret = I40E_ERR_PARAM; 2216 goto error_param; 2217 } 2218 2219 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { 2220 aq_ret = I40E_ERR_PARAM; 2221 goto error_param; 2222 } 2223 2224 if (vf->adq_enabled) { 2225 for (i = 0; i < I40E_MAX_VF_VSI; i++) 2226 num_qps_all += vf->ch[i].num_qps; 2227 if (num_qps_all != qci->num_queue_pairs) { 2228 aq_ret = I40E_ERR_PARAM; 2229 goto error_param; 2230 } 2231 } 2232 2233 vsi_id = qci->vsi_id; 2234 2235 for (i = 0; i < qci->num_queue_pairs; i++) { 2236 qpi = &qci->qpair[i]; 2237 2238 if (!vf->adq_enabled) { 2239 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2240 qpi->txq.queue_id)) { 2241 aq_ret = I40E_ERR_PARAM; 2242 goto error_param; 2243 } 2244 2245 vsi_queue_id = qpi->txq.queue_id; 2246 2247 if (qpi->txq.vsi_id != qci->vsi_id || 2248 qpi->rxq.vsi_id != qci->vsi_id || 2249 qpi->rxq.queue_id != vsi_queue_id) { 2250 aq_ret = I40E_ERR_PARAM; 2251 goto error_param; 2252 } 2253 } 2254 2255 if (vf->adq_enabled) { 2256 if (idx >= ARRAY_SIZE(vf->ch)) { 2257 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2258 goto error_param; 2259 } 2260 vsi_id = vf->ch[idx].vsi_id; 2261 } 2262 2263 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 2264 &qpi->rxq) || 2265 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 2266 &qpi->txq)) { 2267 aq_ret = I40E_ERR_PARAM; 2268 goto error_param; 2269 } 2270 2271 /* For ADq there can be up to 4 VSIs with max 4 queues each. 2272 * VF does not know about these additional VSIs and all 2273 * it cares is about its own queues. PF configures these queues 2274 * to its appropriate VSIs based on TC mapping 2275 */ 2276 if (vf->adq_enabled) { 2277 if (idx >= ARRAY_SIZE(vf->ch)) { 2278 aq_ret = I40E_ERR_NO_AVAILABLE_VSI; 2279 goto error_param; 2280 } 2281 if (j == (vf->ch[idx].num_qps - 1)) { 2282 idx++; 2283 j = 0; /* resetting the queue count */ 2284 vsi_queue_id = 0; 2285 } else { 2286 j++; 2287 vsi_queue_id++; 2288 } 2289 } 2290 } 2291 /* set vsi num_queue_pairs in use to num configured by VF */ 2292 if (!vf->adq_enabled) { 2293 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 2294 qci->num_queue_pairs; 2295 } else { 2296 for (i = 0; i < vf->num_tc; i++) 2297 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 2298 vf->ch[i].num_qps; 2299 } 2300 2301 error_param: 2302 /* send the response to the VF */ 2303 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2304 aq_ret); 2305 } 2306 2307 /** 2308 * i40e_validate_queue_map - check queue map is valid 2309 * @vf: the VF structure pointer 2310 * @vsi_id: vsi id 2311 * @queuemap: Tx or Rx queue map 2312 * 2313 * check if Tx or Rx queue map is valid 2314 **/ 2315 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 2316 unsigned long queuemap) 2317 { 2318 u16 vsi_queue_id, queue_id; 2319 2320 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 2321 if (vf->adq_enabled) { 2322 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 2323 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 2324 } else { 2325 queue_id = vsi_queue_id; 2326 } 2327 2328 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 2329 return -EINVAL; 2330 } 2331 2332 return 0; 2333 } 2334 2335 /** 2336 * i40e_vc_config_irq_map_msg 2337 * @vf: pointer to the VF info 2338 * @msg: pointer to the msg buffer 2339 * 2340 * called from the VF to configure the irq to 2341 * queue map 2342 **/ 2343 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) 2344 { 2345 struct virtchnl_irq_map_info *irqmap_info = 2346 (struct virtchnl_irq_map_info *)msg; 2347 struct virtchnl_vector_map *map; 2348 u16 vsi_id; 2349 i40e_status aq_ret = 0; 2350 int i; 2351 2352 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2353 aq_ret = I40E_ERR_PARAM; 2354 goto error_param; 2355 } 2356 2357 if (irqmap_info->num_vectors > 2358 vf->pf->hw.func_caps.num_msix_vectors_vf) { 2359 aq_ret = I40E_ERR_PARAM; 2360 goto error_param; 2361 } 2362 2363 for (i = 0; i < irqmap_info->num_vectors; i++) { 2364 map = &irqmap_info->vecmap[i]; 2365 /* validate msg params */ 2366 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || 2367 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { 2368 aq_ret = I40E_ERR_PARAM; 2369 goto error_param; 2370 } 2371 vsi_id = map->vsi_id; 2372 2373 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 2374 aq_ret = I40E_ERR_PARAM; 2375 goto error_param; 2376 } 2377 2378 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 2379 aq_ret = I40E_ERR_PARAM; 2380 goto error_param; 2381 } 2382 2383 i40e_config_irq_link_list(vf, vsi_id, map); 2384 } 2385 error_param: 2386 /* send the response to the VF */ 2387 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 2388 aq_ret); 2389 } 2390 2391 /** 2392 * i40e_ctrl_vf_tx_rings 2393 * @vsi: the SRIOV VSI being configured 2394 * @q_map: bit map of the queues to be enabled 2395 * @enable: start or stop the queue 2396 **/ 2397 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2398 bool enable) 2399 { 2400 struct i40e_pf *pf = vsi->back; 2401 int ret = 0; 2402 u16 q_id; 2403 2404 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2405 ret = i40e_control_wait_tx_q(vsi->seid, pf, 2406 vsi->base_queue + q_id, 2407 false /*is xdp*/, enable); 2408 if (ret) 2409 break; 2410 } 2411 return ret; 2412 } 2413 2414 /** 2415 * i40e_ctrl_vf_rx_rings 2416 * @vsi: the SRIOV VSI being configured 2417 * @q_map: bit map of the queues to be enabled 2418 * @enable: start or stop the queue 2419 **/ 2420 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, 2421 bool enable) 2422 { 2423 struct i40e_pf *pf = vsi->back; 2424 int ret = 0; 2425 u16 q_id; 2426 2427 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { 2428 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, 2429 enable); 2430 if (ret) 2431 break; 2432 } 2433 return ret; 2434 } 2435 2436 /** 2437 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL 2438 * @vqs: virtchnl_queue_select structure containing bitmaps to validate 2439 * 2440 * Returns true if validation was successful, else false. 2441 */ 2442 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 2443 { 2444 if ((!vqs->rx_queues && !vqs->tx_queues) || 2445 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || 2446 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) 2447 return false; 2448 2449 return true; 2450 } 2451 2452 /** 2453 * i40e_vc_enable_queues_msg 2454 * @vf: pointer to the VF info 2455 * @msg: pointer to the msg buffer 2456 * 2457 * called from the VF to enable all or specific queue(s) 2458 **/ 2459 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) 2460 { 2461 struct virtchnl_queue_select *vqs = 2462 (struct virtchnl_queue_select *)msg; 2463 struct i40e_pf *pf = vf->pf; 2464 i40e_status aq_ret = 0; 2465 int i; 2466 2467 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2468 aq_ret = I40E_ERR_PARAM; 2469 goto error_param; 2470 } 2471 2472 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2473 aq_ret = I40E_ERR_PARAM; 2474 goto error_param; 2475 } 2476 2477 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2478 aq_ret = I40E_ERR_PARAM; 2479 goto error_param; 2480 } 2481 2482 /* Use the queue bit map sent by the VF */ 2483 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2484 true)) { 2485 aq_ret = I40E_ERR_TIMEOUT; 2486 goto error_param; 2487 } 2488 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2489 true)) { 2490 aq_ret = I40E_ERR_TIMEOUT; 2491 goto error_param; 2492 } 2493 2494 /* need to start the rings for additional ADq VSI's as well */ 2495 if (vf->adq_enabled) { 2496 /* zero belongs to LAN VSI */ 2497 for (i = 1; i < vf->num_tc; i++) { 2498 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2499 aq_ret = I40E_ERR_TIMEOUT; 2500 } 2501 } 2502 2503 error_param: 2504 /* send the response to the VF */ 2505 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 2506 aq_ret); 2507 } 2508 2509 /** 2510 * i40e_vc_disable_queues_msg 2511 * @vf: pointer to the VF info 2512 * @msg: pointer to the msg buffer 2513 * 2514 * called from the VF to disable all or specific 2515 * queue(s) 2516 **/ 2517 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) 2518 { 2519 struct virtchnl_queue_select *vqs = 2520 (struct virtchnl_queue_select *)msg; 2521 struct i40e_pf *pf = vf->pf; 2522 i40e_status aq_ret = 0; 2523 2524 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2525 aq_ret = I40E_ERR_PARAM; 2526 goto error_param; 2527 } 2528 2529 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2530 aq_ret = I40E_ERR_PARAM; 2531 goto error_param; 2532 } 2533 2534 if (!i40e_vc_validate_vqs_bitmaps(vqs)) { 2535 aq_ret = I40E_ERR_PARAM; 2536 goto error_param; 2537 } 2538 2539 /* Use the queue bit map sent by the VF */ 2540 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, 2541 false)) { 2542 aq_ret = I40E_ERR_TIMEOUT; 2543 goto error_param; 2544 } 2545 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, 2546 false)) { 2547 aq_ret = I40E_ERR_TIMEOUT; 2548 goto error_param; 2549 } 2550 error_param: 2551 /* send the response to the VF */ 2552 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2553 aq_ret); 2554 } 2555 2556 /** 2557 * i40e_vc_request_queues_msg 2558 * @vf: pointer to the VF info 2559 * @msg: pointer to the msg buffer 2560 * 2561 * VFs get a default number of queues but can use this message to request a 2562 * different number. If the request is successful, PF will reset the VF and 2563 * return 0. If unsuccessful, PF will send message informing VF of number of 2564 * available queues and return result of sending VF a message. 2565 **/ 2566 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) 2567 { 2568 struct virtchnl_vf_res_request *vfres = 2569 (struct virtchnl_vf_res_request *)msg; 2570 u16 req_pairs = vfres->num_queue_pairs; 2571 u8 cur_pairs = vf->num_queue_pairs; 2572 struct i40e_pf *pf = vf->pf; 2573 2574 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 2575 return -EINVAL; 2576 2577 if (req_pairs > I40E_MAX_VF_QUEUES) { 2578 dev_err(&pf->pdev->dev, 2579 "VF %d tried to request more than %d queues.\n", 2580 vf->vf_id, 2581 I40E_MAX_VF_QUEUES); 2582 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; 2583 } else if (req_pairs - cur_pairs > pf->queues_left) { 2584 dev_warn(&pf->pdev->dev, 2585 "VF %d requested %d more queues, but only %d left.\n", 2586 vf->vf_id, 2587 req_pairs - cur_pairs, 2588 pf->queues_left); 2589 vfres->num_queue_pairs = pf->queues_left + cur_pairs; 2590 } else { 2591 /* successful request */ 2592 vf->num_req_queues = req_pairs; 2593 i40e_vc_notify_vf_reset(vf); 2594 i40e_reset_vf(vf, false); 2595 return 0; 2596 } 2597 2598 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2599 (u8 *)vfres, sizeof(*vfres)); 2600 } 2601 2602 /** 2603 * i40e_vc_get_stats_msg 2604 * @vf: pointer to the VF info 2605 * @msg: pointer to the msg buffer 2606 * 2607 * called from the VF to get vsi stats 2608 **/ 2609 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) 2610 { 2611 struct virtchnl_queue_select *vqs = 2612 (struct virtchnl_queue_select *)msg; 2613 struct i40e_pf *pf = vf->pf; 2614 struct i40e_eth_stats stats; 2615 i40e_status aq_ret = 0; 2616 struct i40e_vsi *vsi; 2617 2618 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2619 2620 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2621 aq_ret = I40E_ERR_PARAM; 2622 goto error_param; 2623 } 2624 2625 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2626 aq_ret = I40E_ERR_PARAM; 2627 goto error_param; 2628 } 2629 2630 vsi = pf->vsi[vf->lan_vsi_idx]; 2631 if (!vsi) { 2632 aq_ret = I40E_ERR_PARAM; 2633 goto error_param; 2634 } 2635 i40e_update_eth_stats(vsi); 2636 stats = vsi->eth_stats; 2637 2638 error_param: 2639 /* send the response back to the VF */ 2640 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2641 (u8 *)&stats, sizeof(stats)); 2642 } 2643 2644 /* If the VF is not trusted restrict the number of MAC/VLAN it can program 2645 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast 2646 */ 2647 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) 2648 #define I40E_VC_MAX_VLAN_PER_VF 16 2649 2650 /** 2651 * i40e_check_vf_permission 2652 * @vf: pointer to the VF info 2653 * @al: MAC address list from virtchnl 2654 * 2655 * Check that the given list of MAC addresses is allowed. Will return -EPERM 2656 * if any address in the list is not valid. Checks the following conditions: 2657 * 2658 * 1) broadcast and zero addresses are never valid 2659 * 2) unicast addresses are not allowed if the VMM has administratively set 2660 * the VF MAC address, unless the VF is marked as privileged. 2661 * 3) There is enough space to add all the addresses. 2662 * 2663 * Note that to guarantee consistency, it is expected this function be called 2664 * while holding the mac_filter_hash_lock, as otherwise the current number of 2665 * addresses might not be accurate. 2666 **/ 2667 static inline int i40e_check_vf_permission(struct i40e_vf *vf, 2668 struct virtchnl_ether_addr_list *al) 2669 { 2670 struct i40e_pf *pf = vf->pf; 2671 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; 2672 int mac2add_cnt = 0; 2673 int i; 2674 2675 for (i = 0; i < al->num_elements; i++) { 2676 struct i40e_mac_filter *f; 2677 u8 *addr = al->list[i].addr; 2678 2679 if (is_broadcast_ether_addr(addr) || 2680 is_zero_ether_addr(addr)) { 2681 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", 2682 addr); 2683 return I40E_ERR_INVALID_MAC_ADDR; 2684 } 2685 2686 /* If the host VMM administrator has set the VF MAC address 2687 * administratively via the ndo_set_vf_mac command then deny 2688 * permission to the VF to add or delete unicast MAC addresses. 2689 * Unless the VF is privileged and then it can do whatever. 2690 * The VF may request to set the MAC address filter already 2691 * assigned to it so do not return an error in that case. 2692 */ 2693 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2694 !is_multicast_ether_addr(addr) && vf->pf_set_mac && 2695 !ether_addr_equal(addr, vf->default_lan_addr.addr)) { 2696 dev_err(&pf->pdev->dev, 2697 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 2698 return -EPERM; 2699 } 2700 2701 /*count filters that really will be added*/ 2702 f = i40e_find_mac(vsi, addr); 2703 if (!f) 2704 ++mac2add_cnt; 2705 } 2706 2707 /* If this VF is not privileged, then we can't add more than a limited 2708 * number of addresses. Check to make sure that the additions do not 2709 * push us over the limit. 2710 */ 2711 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2712 (i40e_count_filters(vsi) + mac2add_cnt) > 2713 I40E_VC_MAX_MAC_ADDR_PER_VF) { 2714 dev_err(&pf->pdev->dev, 2715 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); 2716 return -EPERM; 2717 } 2718 return 0; 2719 } 2720 2721 /** 2722 * i40e_vc_add_mac_addr_msg 2723 * @vf: pointer to the VF info 2724 * @msg: pointer to the msg buffer 2725 * 2726 * add guest mac address filter 2727 **/ 2728 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2729 { 2730 struct virtchnl_ether_addr_list *al = 2731 (struct virtchnl_ether_addr_list *)msg; 2732 struct i40e_pf *pf = vf->pf; 2733 struct i40e_vsi *vsi = NULL; 2734 i40e_status ret = 0; 2735 int i; 2736 2737 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2738 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2739 ret = I40E_ERR_PARAM; 2740 goto error_param; 2741 } 2742 2743 vsi = pf->vsi[vf->lan_vsi_idx]; 2744 2745 /* Lock once, because all function inside for loop accesses VSI's 2746 * MAC filter list which needs to be protected using same lock. 2747 */ 2748 spin_lock_bh(&vsi->mac_filter_hash_lock); 2749 2750 ret = i40e_check_vf_permission(vf, al); 2751 if (ret) { 2752 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2753 goto error_param; 2754 } 2755 2756 /* add new addresses to the list */ 2757 for (i = 0; i < al->num_elements; i++) { 2758 struct i40e_mac_filter *f; 2759 2760 f = i40e_find_mac(vsi, al->list[i].addr); 2761 if (!f) { 2762 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2763 2764 if (!f) { 2765 dev_err(&pf->pdev->dev, 2766 "Unable to add MAC filter %pM for VF %d\n", 2767 al->list[i].addr, vf->vf_id); 2768 ret = I40E_ERR_PARAM; 2769 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2770 goto error_param; 2771 } 2772 if (is_valid_ether_addr(al->list[i].addr) && 2773 is_zero_ether_addr(vf->default_lan_addr.addr)) 2774 ether_addr_copy(vf->default_lan_addr.addr, 2775 al->list[i].addr); 2776 } 2777 } 2778 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2779 2780 /* program the updated filter list */ 2781 ret = i40e_sync_vsi_filters(vsi); 2782 if (ret) 2783 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2784 vf->vf_id, ret); 2785 2786 error_param: 2787 /* send the response to the VF */ 2788 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2789 ret); 2790 } 2791 2792 /** 2793 * i40e_vc_del_mac_addr_msg 2794 * @vf: pointer to the VF info 2795 * @msg: pointer to the msg buffer 2796 * 2797 * remove guest mac address filter 2798 **/ 2799 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) 2800 { 2801 struct virtchnl_ether_addr_list *al = 2802 (struct virtchnl_ether_addr_list *)msg; 2803 bool was_unimac_deleted = false; 2804 struct i40e_pf *pf = vf->pf; 2805 struct i40e_vsi *vsi = NULL; 2806 i40e_status ret = 0; 2807 int i; 2808 2809 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2810 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2811 ret = I40E_ERR_PARAM; 2812 goto error_param; 2813 } 2814 2815 for (i = 0; i < al->num_elements; i++) { 2816 if (is_broadcast_ether_addr(al->list[i].addr) || 2817 is_zero_ether_addr(al->list[i].addr)) { 2818 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2819 al->list[i].addr, vf->vf_id); 2820 ret = I40E_ERR_INVALID_MAC_ADDR; 2821 goto error_param; 2822 } 2823 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) 2824 was_unimac_deleted = true; 2825 } 2826 vsi = pf->vsi[vf->lan_vsi_idx]; 2827 2828 spin_lock_bh(&vsi->mac_filter_hash_lock); 2829 /* delete addresses from the list */ 2830 for (i = 0; i < al->num_elements; i++) 2831 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2832 ret = I40E_ERR_INVALID_MAC_ADDR; 2833 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2834 goto error_param; 2835 } 2836 2837 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2838 2839 /* program the updated filter list */ 2840 ret = i40e_sync_vsi_filters(vsi); 2841 if (ret) 2842 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2843 vf->vf_id, ret); 2844 2845 if (vf->trusted && was_unimac_deleted) { 2846 struct i40e_mac_filter *f; 2847 struct hlist_node *h; 2848 u8 *macaddr = NULL; 2849 int bkt; 2850 2851 /* set last unicast mac address as default */ 2852 spin_lock_bh(&vsi->mac_filter_hash_lock); 2853 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2854 if (is_valid_ether_addr(f->macaddr)) 2855 macaddr = f->macaddr; 2856 } 2857 if (macaddr) 2858 ether_addr_copy(vf->default_lan_addr.addr, macaddr); 2859 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2860 } 2861 error_param: 2862 /* send the response to the VF */ 2863 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); 2864 } 2865 2866 /** 2867 * i40e_vc_add_vlan_msg 2868 * @vf: pointer to the VF info 2869 * @msg: pointer to the msg buffer 2870 * 2871 * program guest vlan id 2872 **/ 2873 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) 2874 { 2875 struct virtchnl_vlan_filter_list *vfl = 2876 (struct virtchnl_vlan_filter_list *)msg; 2877 struct i40e_pf *pf = vf->pf; 2878 struct i40e_vsi *vsi = NULL; 2879 i40e_status aq_ret = 0; 2880 int i; 2881 2882 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2883 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2884 dev_err(&pf->pdev->dev, 2885 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2886 goto error_param; 2887 } 2888 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2889 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2890 aq_ret = I40E_ERR_PARAM; 2891 goto error_param; 2892 } 2893 2894 for (i = 0; i < vfl->num_elements; i++) { 2895 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2896 aq_ret = I40E_ERR_PARAM; 2897 dev_err(&pf->pdev->dev, 2898 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2899 goto error_param; 2900 } 2901 } 2902 vsi = pf->vsi[vf->lan_vsi_idx]; 2903 if (vsi->info.pvid) { 2904 aq_ret = I40E_ERR_PARAM; 2905 goto error_param; 2906 } 2907 2908 i40e_vlan_stripping_enable(vsi); 2909 for (i = 0; i < vfl->num_elements; i++) { 2910 /* add new VLAN filter */ 2911 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2912 if (!ret) 2913 vf->num_vlan++; 2914 2915 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2916 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2917 true, 2918 vfl->vlan_id[i], 2919 NULL); 2920 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2921 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2922 true, 2923 vfl->vlan_id[i], 2924 NULL); 2925 2926 if (ret) 2927 dev_err(&pf->pdev->dev, 2928 "Unable to add VLAN filter %d for VF %d, error %d\n", 2929 vfl->vlan_id[i], vf->vf_id, ret); 2930 } 2931 2932 error_param: 2933 /* send the response to the VF */ 2934 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2935 } 2936 2937 /** 2938 * i40e_vc_remove_vlan_msg 2939 * @vf: pointer to the VF info 2940 * @msg: pointer to the msg buffer 2941 * 2942 * remove programmed guest vlan id 2943 **/ 2944 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) 2945 { 2946 struct virtchnl_vlan_filter_list *vfl = 2947 (struct virtchnl_vlan_filter_list *)msg; 2948 struct i40e_pf *pf = vf->pf; 2949 struct i40e_vsi *vsi = NULL; 2950 i40e_status aq_ret = 0; 2951 int i; 2952 2953 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2954 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2955 aq_ret = I40E_ERR_PARAM; 2956 goto error_param; 2957 } 2958 2959 for (i = 0; i < vfl->num_elements; i++) { 2960 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2961 aq_ret = I40E_ERR_PARAM; 2962 goto error_param; 2963 } 2964 } 2965 2966 vsi = pf->vsi[vf->lan_vsi_idx]; 2967 if (vsi->info.pvid) { 2968 if (vfl->num_elements > 1 || vfl->vlan_id[0]) 2969 aq_ret = I40E_ERR_PARAM; 2970 goto error_param; 2971 } 2972 2973 for (i = 0; i < vfl->num_elements; i++) { 2974 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2975 vf->num_vlan--; 2976 2977 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2978 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2979 false, 2980 vfl->vlan_id[i], 2981 NULL); 2982 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2983 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2984 false, 2985 vfl->vlan_id[i], 2986 NULL); 2987 } 2988 2989 error_param: 2990 /* send the response to the VF */ 2991 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2992 } 2993 2994 /** 2995 * i40e_vc_iwarp_msg 2996 * @vf: pointer to the VF info 2997 * @msg: pointer to the msg buffer 2998 * @msglen: msg length 2999 * 3000 * called from the VF for the iwarp msgs 3001 **/ 3002 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 3003 { 3004 struct i40e_pf *pf = vf->pf; 3005 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 3006 i40e_status aq_ret = 0; 3007 3008 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3009 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 3010 aq_ret = I40E_ERR_PARAM; 3011 goto error_param; 3012 } 3013 3014 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 3015 msg, msglen); 3016 3017 error_param: 3018 /* send the response to the VF */ 3019 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 3020 aq_ret); 3021 } 3022 3023 /** 3024 * i40e_vc_iwarp_qvmap_msg 3025 * @vf: pointer to the VF info 3026 * @msg: pointer to the msg buffer 3027 * @config: config qvmap or release it 3028 * 3029 * called from the VF for the iwarp msgs 3030 **/ 3031 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) 3032 { 3033 struct virtchnl_iwarp_qvlist_info *qvlist_info = 3034 (struct virtchnl_iwarp_qvlist_info *)msg; 3035 i40e_status aq_ret = 0; 3036 3037 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3038 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 3039 aq_ret = I40E_ERR_PARAM; 3040 goto error_param; 3041 } 3042 3043 if (config) { 3044 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 3045 aq_ret = I40E_ERR_PARAM; 3046 } else { 3047 i40e_release_iwarp_qvlist(vf); 3048 } 3049 3050 error_param: 3051 /* send the response to the VF */ 3052 return i40e_vc_send_resp_to_vf(vf, 3053 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 3054 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 3055 aq_ret); 3056 } 3057 3058 /** 3059 * i40e_vc_config_rss_key 3060 * @vf: pointer to the VF info 3061 * @msg: pointer to the msg buffer 3062 * 3063 * Configure the VF's RSS key 3064 **/ 3065 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) 3066 { 3067 struct virtchnl_rss_key *vrk = 3068 (struct virtchnl_rss_key *)msg; 3069 struct i40e_pf *pf = vf->pf; 3070 struct i40e_vsi *vsi = NULL; 3071 i40e_status aq_ret = 0; 3072 3073 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3074 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || 3075 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 3076 aq_ret = I40E_ERR_PARAM; 3077 goto err; 3078 } 3079 3080 vsi = pf->vsi[vf->lan_vsi_idx]; 3081 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 3082 err: 3083 /* send the response to the VF */ 3084 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 3085 aq_ret); 3086 } 3087 3088 /** 3089 * i40e_vc_config_rss_lut 3090 * @vf: pointer to the VF info 3091 * @msg: pointer to the msg buffer 3092 * 3093 * Configure the VF's RSS LUT 3094 **/ 3095 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) 3096 { 3097 struct virtchnl_rss_lut *vrl = 3098 (struct virtchnl_rss_lut *)msg; 3099 struct i40e_pf *pf = vf->pf; 3100 struct i40e_vsi *vsi = NULL; 3101 i40e_status aq_ret = 0; 3102 u16 i; 3103 3104 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 3105 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || 3106 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 3107 aq_ret = I40E_ERR_PARAM; 3108 goto err; 3109 } 3110 3111 for (i = 0; i < vrl->lut_entries; i++) 3112 if (vrl->lut[i] >= vf->num_queue_pairs) { 3113 aq_ret = I40E_ERR_PARAM; 3114 goto err; 3115 } 3116 3117 vsi = pf->vsi[vf->lan_vsi_idx]; 3118 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 3119 /* send the response to the VF */ 3120 err: 3121 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 3122 aq_ret); 3123 } 3124 3125 /** 3126 * i40e_vc_get_rss_hena 3127 * @vf: pointer to the VF info 3128 * @msg: pointer to the msg buffer 3129 * 3130 * Return the RSS HENA bits allowed by the hardware 3131 **/ 3132 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) 3133 { 3134 struct virtchnl_rss_hena *vrh = NULL; 3135 struct i40e_pf *pf = vf->pf; 3136 i40e_status aq_ret = 0; 3137 int len = 0; 3138 3139 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3140 aq_ret = I40E_ERR_PARAM; 3141 goto err; 3142 } 3143 len = sizeof(struct virtchnl_rss_hena); 3144 3145 vrh = kzalloc(len, GFP_KERNEL); 3146 if (!vrh) { 3147 aq_ret = I40E_ERR_NO_MEMORY; 3148 len = 0; 3149 goto err; 3150 } 3151 vrh->hena = i40e_pf_get_default_rss_hena(pf); 3152 err: 3153 /* send the response back to the VF */ 3154 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 3155 aq_ret, (u8 *)vrh, len); 3156 kfree(vrh); 3157 return aq_ret; 3158 } 3159 3160 /** 3161 * i40e_vc_set_rss_hena 3162 * @vf: pointer to the VF info 3163 * @msg: pointer to the msg buffer 3164 * 3165 * Set the RSS HENA bits for the VF 3166 **/ 3167 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) 3168 { 3169 struct virtchnl_rss_hena *vrh = 3170 (struct virtchnl_rss_hena *)msg; 3171 struct i40e_pf *pf = vf->pf; 3172 struct i40e_hw *hw = &pf->hw; 3173 i40e_status aq_ret = 0; 3174 3175 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3176 aq_ret = I40E_ERR_PARAM; 3177 goto err; 3178 } 3179 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 3180 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 3181 (u32)(vrh->hena >> 32)); 3182 3183 /* send the response to the VF */ 3184 err: 3185 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 3186 } 3187 3188 /** 3189 * i40e_vc_enable_vlan_stripping 3190 * @vf: pointer to the VF info 3191 * @msg: pointer to the msg buffer 3192 * 3193 * Enable vlan header stripping for the VF 3194 **/ 3195 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3196 { 3197 i40e_status aq_ret = 0; 3198 struct i40e_vsi *vsi; 3199 3200 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3201 aq_ret = I40E_ERR_PARAM; 3202 goto err; 3203 } 3204 3205 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3206 i40e_vlan_stripping_enable(vsi); 3207 3208 /* send the response to the VF */ 3209 err: 3210 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 3211 aq_ret); 3212 } 3213 3214 /** 3215 * i40e_vc_disable_vlan_stripping 3216 * @vf: pointer to the VF info 3217 * @msg: pointer to the msg buffer 3218 * 3219 * Disable vlan header stripping for the VF 3220 **/ 3221 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) 3222 { 3223 i40e_status aq_ret = 0; 3224 struct i40e_vsi *vsi; 3225 3226 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3227 aq_ret = I40E_ERR_PARAM; 3228 goto err; 3229 } 3230 3231 vsi = vf->pf->vsi[vf->lan_vsi_idx]; 3232 i40e_vlan_stripping_disable(vsi); 3233 3234 /* send the response to the VF */ 3235 err: 3236 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 3237 aq_ret); 3238 } 3239 3240 /** 3241 * i40e_validate_cloud_filter 3242 * @vf: pointer to VF structure 3243 * @tc_filter: pointer to filter requested 3244 * 3245 * This function validates cloud filter programmed as TC filter for ADq 3246 **/ 3247 static int i40e_validate_cloud_filter(struct i40e_vf *vf, 3248 struct virtchnl_filter *tc_filter) 3249 { 3250 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 3251 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 3252 struct i40e_pf *pf = vf->pf; 3253 struct i40e_vsi *vsi = NULL; 3254 struct i40e_mac_filter *f; 3255 struct hlist_node *h; 3256 bool found = false; 3257 int bkt; 3258 3259 if (!tc_filter->action) { 3260 dev_info(&pf->pdev->dev, 3261 "VF %d: Currently ADq doesn't support Drop Action\n", 3262 vf->vf_id); 3263 goto err; 3264 } 3265 3266 /* action_meta is TC number here to which the filter is applied */ 3267 if (!tc_filter->action_meta || 3268 tc_filter->action_meta > I40E_MAX_VF_VSI) { 3269 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 3270 vf->vf_id, tc_filter->action_meta); 3271 goto err; 3272 } 3273 3274 /* Check filter if it's programmed for advanced mode or basic mode. 3275 * There are two ADq modes (for VF only), 3276 * 1. Basic mode: intended to allow as many filter options as possible 3277 * to be added to a VF in Non-trusted mode. Main goal is 3278 * to add filters to its own MAC and VLAN id. 3279 * 2. Advanced mode: is for allowing filters to be applied other than 3280 * its own MAC or VLAN. This mode requires the VF to be 3281 * Trusted. 3282 */ 3283 if (mask.dst_mac[0] && !mask.dst_ip[0]) { 3284 vsi = pf->vsi[vf->lan_vsi_idx]; 3285 f = i40e_find_mac(vsi, data.dst_mac); 3286 3287 if (!f) { 3288 dev_info(&pf->pdev->dev, 3289 "Destination MAC %pM doesn't belong to VF %d\n", 3290 data.dst_mac, vf->vf_id); 3291 goto err; 3292 } 3293 3294 if (mask.vlan_id) { 3295 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 3296 hlist) { 3297 if (f->vlan == ntohs(data.vlan_id)) { 3298 found = true; 3299 break; 3300 } 3301 } 3302 if (!found) { 3303 dev_info(&pf->pdev->dev, 3304 "VF %d doesn't have any VLAN id %u\n", 3305 vf->vf_id, ntohs(data.vlan_id)); 3306 goto err; 3307 } 3308 } 3309 } else { 3310 /* Check if VF is trusted */ 3311 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 3312 dev_err(&pf->pdev->dev, 3313 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 3314 vf->vf_id); 3315 return I40E_ERR_CONFIG; 3316 } 3317 } 3318 3319 if (mask.dst_mac[0] & data.dst_mac[0]) { 3320 if (is_broadcast_ether_addr(data.dst_mac) || 3321 is_zero_ether_addr(data.dst_mac)) { 3322 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 3323 vf->vf_id, data.dst_mac); 3324 goto err; 3325 } 3326 } 3327 3328 if (mask.src_mac[0] & data.src_mac[0]) { 3329 if (is_broadcast_ether_addr(data.src_mac) || 3330 is_zero_ether_addr(data.src_mac)) { 3331 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 3332 vf->vf_id, data.src_mac); 3333 goto err; 3334 } 3335 } 3336 3337 if (mask.dst_port & data.dst_port) { 3338 if (!data.dst_port) { 3339 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 3340 vf->vf_id); 3341 goto err; 3342 } 3343 } 3344 3345 if (mask.src_port & data.src_port) { 3346 if (!data.src_port) { 3347 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 3348 vf->vf_id); 3349 goto err; 3350 } 3351 } 3352 3353 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 3354 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 3355 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 3356 vf->vf_id); 3357 goto err; 3358 } 3359 3360 if (mask.vlan_id & data.vlan_id) { 3361 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 3362 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 3363 vf->vf_id); 3364 goto err; 3365 } 3366 } 3367 3368 return I40E_SUCCESS; 3369 err: 3370 return I40E_ERR_CONFIG; 3371 } 3372 3373 /** 3374 * i40e_find_vsi_from_seid - searches for the vsi with the given seid 3375 * @vf: pointer to the VF info 3376 * @seid: seid of the vsi it is searching for 3377 **/ 3378 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 3379 { 3380 struct i40e_pf *pf = vf->pf; 3381 struct i40e_vsi *vsi = NULL; 3382 int i; 3383 3384 for (i = 0; i < vf->num_tc ; i++) { 3385 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 3386 if (vsi && vsi->seid == seid) 3387 return vsi; 3388 } 3389 return NULL; 3390 } 3391 3392 /** 3393 * i40e_del_all_cloud_filters 3394 * @vf: pointer to the VF info 3395 * 3396 * This function deletes all cloud filters 3397 **/ 3398 static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 3399 { 3400 struct i40e_cloud_filter *cfilter = NULL; 3401 struct i40e_pf *pf = vf->pf; 3402 struct i40e_vsi *vsi = NULL; 3403 struct hlist_node *node; 3404 int ret; 3405 3406 hlist_for_each_entry_safe(cfilter, node, 3407 &vf->cloud_filter_list, cloud_node) { 3408 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 3409 3410 if (!vsi) { 3411 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 3412 vf->vf_id, cfilter->seid); 3413 continue; 3414 } 3415 3416 if (cfilter->dst_port) 3417 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 3418 false); 3419 else 3420 ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 3421 if (ret) 3422 dev_err(&pf->pdev->dev, 3423 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3424 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3425 i40e_aq_str(&pf->hw, 3426 pf->hw.aq.asq_last_status)); 3427 3428 hlist_del(&cfilter->cloud_node); 3429 kfree(cfilter); 3430 vf->num_cloud_filters--; 3431 } 3432 } 3433 3434 /** 3435 * i40e_vc_del_cloud_filter 3436 * @vf: pointer to the VF info 3437 * @msg: pointer to the msg buffer 3438 * 3439 * This function deletes a cloud filter programmed as TC filter for ADq 3440 **/ 3441 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 3442 { 3443 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3444 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3445 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3446 struct i40e_cloud_filter cfilter, *cf = NULL; 3447 struct i40e_pf *pf = vf->pf; 3448 struct i40e_vsi *vsi = NULL; 3449 struct hlist_node *node; 3450 i40e_status aq_ret = 0; 3451 int i, ret; 3452 3453 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3454 aq_ret = I40E_ERR_PARAM; 3455 goto err; 3456 } 3457 3458 if (!vf->adq_enabled) { 3459 dev_info(&pf->pdev->dev, 3460 "VF %d: ADq not enabled, can't apply cloud filter\n", 3461 vf->vf_id); 3462 aq_ret = I40E_ERR_PARAM; 3463 goto err; 3464 } 3465 3466 if (i40e_validate_cloud_filter(vf, vcf)) { 3467 dev_info(&pf->pdev->dev, 3468 "VF %d: Invalid input, can't apply cloud filter\n", 3469 vf->vf_id); 3470 aq_ret = I40E_ERR_PARAM; 3471 goto err; 3472 } 3473 3474 memset(&cfilter, 0, sizeof(cfilter)); 3475 /* parse destination mac address */ 3476 for (i = 0; i < ETH_ALEN; i++) 3477 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3478 3479 /* parse source mac address */ 3480 for (i = 0; i < ETH_ALEN; i++) 3481 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3482 3483 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 3484 cfilter.dst_port = mask.dst_port & tcf.dst_port; 3485 cfilter.src_port = mask.src_port & tcf.src_port; 3486 3487 switch (vcf->flow_type) { 3488 case VIRTCHNL_TCP_V4_FLOW: 3489 cfilter.n_proto = ETH_P_IP; 3490 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3491 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3492 ARRAY_SIZE(tcf.dst_ip)); 3493 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3494 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3495 ARRAY_SIZE(tcf.dst_ip)); 3496 break; 3497 case VIRTCHNL_TCP_V6_FLOW: 3498 cfilter.n_proto = ETH_P_IPV6; 3499 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3500 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 3501 sizeof(cfilter.ip.v6.dst_ip6)); 3502 if (mask.src_ip[3] & tcf.src_ip[3]) 3503 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 3504 sizeof(cfilter.ip.v6.src_ip6)); 3505 break; 3506 default: 3507 /* TC filter can be configured based on different combinations 3508 * and in this case IP is not a part of filter config 3509 */ 3510 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3511 vf->vf_id); 3512 } 3513 3514 /* get the vsi to which the tc belongs to */ 3515 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3516 cfilter.seid = vsi->seid; 3517 cfilter.flags = vcf->field_flags; 3518 3519 /* Deleting TC filter */ 3520 if (tcf.dst_port) 3521 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 3522 else 3523 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 3524 if (ret) { 3525 dev_err(&pf->pdev->dev, 3526 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 3527 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3528 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3529 goto err; 3530 } 3531 3532 hlist_for_each_entry_safe(cf, node, 3533 &vf->cloud_filter_list, cloud_node) { 3534 if (cf->seid != cfilter.seid) 3535 continue; 3536 if (mask.dst_port) 3537 if (cfilter.dst_port != cf->dst_port) 3538 continue; 3539 if (mask.dst_mac[0]) 3540 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 3541 continue; 3542 /* for ipv4 data to be valid, only first byte of mask is set */ 3543 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 3544 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 3545 ARRAY_SIZE(tcf.dst_ip))) 3546 continue; 3547 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3548 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3549 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3550 sizeof(cfilter.ip.v6.src_ip6))) 3551 continue; 3552 if (mask.vlan_id) 3553 if (cfilter.vlan_id != cf->vlan_id) 3554 continue; 3555 3556 hlist_del(&cf->cloud_node); 3557 kfree(cf); 3558 vf->num_cloud_filters--; 3559 } 3560 3561 err: 3562 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3563 aq_ret); 3564 } 3565 3566 /** 3567 * i40e_vc_add_cloud_filter 3568 * @vf: pointer to the VF info 3569 * @msg: pointer to the msg buffer 3570 * 3571 * This function adds a cloud filter programmed as TC filter for ADq 3572 **/ 3573 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3574 { 3575 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3576 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3577 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3578 struct i40e_cloud_filter *cfilter = NULL; 3579 struct i40e_pf *pf = vf->pf; 3580 struct i40e_vsi *vsi = NULL; 3581 i40e_status aq_ret = 0; 3582 int i, ret; 3583 3584 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3585 aq_ret = I40E_ERR_PARAM; 3586 goto err_out; 3587 } 3588 3589 if (!vf->adq_enabled) { 3590 dev_info(&pf->pdev->dev, 3591 "VF %d: ADq is not enabled, can't apply cloud filter\n", 3592 vf->vf_id); 3593 aq_ret = I40E_ERR_PARAM; 3594 goto err_out; 3595 } 3596 3597 if (i40e_validate_cloud_filter(vf, vcf)) { 3598 dev_info(&pf->pdev->dev, 3599 "VF %d: Invalid input/s, can't apply cloud filter\n", 3600 vf->vf_id); 3601 aq_ret = I40E_ERR_PARAM; 3602 goto err_out; 3603 } 3604 3605 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3606 if (!cfilter) 3607 return -ENOMEM; 3608 3609 /* parse destination mac address */ 3610 for (i = 0; i < ETH_ALEN; i++) 3611 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3612 3613 /* parse source mac address */ 3614 for (i = 0; i < ETH_ALEN; i++) 3615 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3616 3617 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3618 cfilter->dst_port = mask.dst_port & tcf.dst_port; 3619 cfilter->src_port = mask.src_port & tcf.src_port; 3620 3621 switch (vcf->flow_type) { 3622 case VIRTCHNL_TCP_V4_FLOW: 3623 cfilter->n_proto = ETH_P_IP; 3624 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3625 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3626 ARRAY_SIZE(tcf.dst_ip)); 3627 else if (mask.src_ip[0] & tcf.dst_ip[0]) 3628 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3629 ARRAY_SIZE(tcf.dst_ip)); 3630 break; 3631 case VIRTCHNL_TCP_V6_FLOW: 3632 cfilter->n_proto = ETH_P_IPV6; 3633 if (mask.dst_ip[3] & tcf.dst_ip[3]) 3634 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3635 sizeof(cfilter->ip.v6.dst_ip6)); 3636 if (mask.src_ip[3] & tcf.src_ip[3]) 3637 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3638 sizeof(cfilter->ip.v6.src_ip6)); 3639 break; 3640 default: 3641 /* TC filter can be configured based on different combinations 3642 * and in this case IP is not a part of filter config 3643 */ 3644 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3645 vf->vf_id); 3646 } 3647 3648 /* get the VSI to which the TC belongs to */ 3649 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3650 cfilter->seid = vsi->seid; 3651 cfilter->flags = vcf->field_flags; 3652 3653 /* Adding cloud filter programmed as TC filter */ 3654 if (tcf.dst_port) 3655 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3656 else 3657 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3658 if (ret) { 3659 dev_err(&pf->pdev->dev, 3660 "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3661 vf->vf_id, i40e_stat_str(&pf->hw, ret), 3662 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3663 goto err_free; 3664 } 3665 3666 INIT_HLIST_NODE(&cfilter->cloud_node); 3667 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3668 /* release the pointer passing it to the collection */ 3669 cfilter = NULL; 3670 vf->num_cloud_filters++; 3671 err_free: 3672 kfree(cfilter); 3673 err_out: 3674 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3675 aq_ret); 3676 } 3677 3678 /** 3679 * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3680 * @vf: pointer to the VF info 3681 * @msg: pointer to the msg buffer 3682 **/ 3683 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3684 { 3685 struct virtchnl_tc_info *tci = 3686 (struct virtchnl_tc_info *)msg; 3687 struct i40e_pf *pf = vf->pf; 3688 struct i40e_link_status *ls = &pf->hw.phy.link_info; 3689 int i, adq_request_qps = 0; 3690 i40e_status aq_ret = 0; 3691 u64 speed = 0; 3692 3693 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3694 aq_ret = I40E_ERR_PARAM; 3695 goto err; 3696 } 3697 3698 /* ADq cannot be applied if spoof check is ON */ 3699 if (vf->spoofchk) { 3700 dev_err(&pf->pdev->dev, 3701 "Spoof check is ON, turn it OFF to enable ADq\n"); 3702 aq_ret = I40E_ERR_PARAM; 3703 goto err; 3704 } 3705 3706 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3707 dev_err(&pf->pdev->dev, 3708 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3709 vf->vf_id); 3710 aq_ret = I40E_ERR_PARAM; 3711 goto err; 3712 } 3713 3714 /* max number of traffic classes for VF currently capped at 4 */ 3715 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3716 dev_err(&pf->pdev->dev, 3717 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", 3718 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); 3719 aq_ret = I40E_ERR_PARAM; 3720 goto err; 3721 } 3722 3723 /* validate queues for each TC */ 3724 for (i = 0; i < tci->num_tc; i++) 3725 if (!tci->list[i].count || 3726 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3727 dev_err(&pf->pdev->dev, 3728 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", 3729 vf->vf_id, i, tci->list[i].count, 3730 I40E_DEFAULT_QUEUES_PER_VF); 3731 aq_ret = I40E_ERR_PARAM; 3732 goto err; 3733 } 3734 3735 /* need Max VF queues but already have default number of queues */ 3736 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3737 3738 if (pf->queues_left < adq_request_qps) { 3739 dev_err(&pf->pdev->dev, 3740 "No queues left to allocate to VF %d\n", 3741 vf->vf_id); 3742 aq_ret = I40E_ERR_PARAM; 3743 goto err; 3744 } else { 3745 /* we need to allocate max VF queues to enable ADq so as to 3746 * make sure ADq enabled VF always gets back queues when it 3747 * goes through a reset. 3748 */ 3749 vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3750 } 3751 3752 /* get link speed in MB to validate rate limit */ 3753 speed = i40e_vc_link_speed2mbps(ls->link_speed); 3754 if (speed == SPEED_UNKNOWN) { 3755 dev_err(&pf->pdev->dev, 3756 "Cannot detect link speed\n"); 3757 aq_ret = I40E_ERR_PARAM; 3758 goto err; 3759 } 3760 3761 /* parse data from the queue channel info */ 3762 vf->num_tc = tci->num_tc; 3763 for (i = 0; i < vf->num_tc; i++) { 3764 if (tci->list[i].max_tx_rate) { 3765 if (tci->list[i].max_tx_rate > speed) { 3766 dev_err(&pf->pdev->dev, 3767 "Invalid max tx rate %llu specified for VF %d.", 3768 tci->list[i].max_tx_rate, 3769 vf->vf_id); 3770 aq_ret = I40E_ERR_PARAM; 3771 goto err; 3772 } else { 3773 vf->ch[i].max_tx_rate = 3774 tci->list[i].max_tx_rate; 3775 } 3776 } 3777 vf->ch[i].num_qps = tci->list[i].count; 3778 } 3779 3780 /* set this flag only after making sure all inputs are sane */ 3781 vf->adq_enabled = true; 3782 /* num_req_queues is set when user changes number of queues via ethtool 3783 * and this causes issue for default VSI(which depends on this variable) 3784 * when ADq is enabled, hence reset it. 3785 */ 3786 vf->num_req_queues = 0; 3787 3788 /* reset the VF in order to allocate resources */ 3789 i40e_vc_notify_vf_reset(vf); 3790 i40e_reset_vf(vf, false); 3791 3792 return I40E_SUCCESS; 3793 3794 /* send the response to the VF */ 3795 err: 3796 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3797 aq_ret); 3798 } 3799 3800 /** 3801 * i40e_vc_del_qch_msg 3802 * @vf: pointer to the VF info 3803 * @msg: pointer to the msg buffer 3804 **/ 3805 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3806 { 3807 struct i40e_pf *pf = vf->pf; 3808 i40e_status aq_ret = 0; 3809 3810 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3811 aq_ret = I40E_ERR_PARAM; 3812 goto err; 3813 } 3814 3815 if (vf->adq_enabled) { 3816 i40e_del_all_cloud_filters(vf); 3817 i40e_del_qch(vf); 3818 vf->adq_enabled = false; 3819 vf->num_tc = 0; 3820 dev_info(&pf->pdev->dev, 3821 "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3822 vf->vf_id); 3823 } else { 3824 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3825 vf->vf_id); 3826 aq_ret = I40E_ERR_PARAM; 3827 } 3828 3829 /* reset the VF in order to allocate resources */ 3830 i40e_vc_notify_vf_reset(vf); 3831 i40e_reset_vf(vf, false); 3832 3833 return I40E_SUCCESS; 3834 3835 err: 3836 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3837 aq_ret); 3838 } 3839 3840 /** 3841 * i40e_vc_process_vf_msg 3842 * @pf: pointer to the PF structure 3843 * @vf_id: source VF id 3844 * @v_opcode: operation code 3845 * @v_retval: unused return value code 3846 * @msg: pointer to the msg buffer 3847 * @msglen: msg length 3848 * 3849 * called from the common aeq/arq handler to 3850 * process request from VF 3851 **/ 3852 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3853 u32 __always_unused v_retval, u8 *msg, u16 msglen) 3854 { 3855 struct i40e_hw *hw = &pf->hw; 3856 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3857 struct i40e_vf *vf; 3858 int ret; 3859 3860 pf->vf_aq_requests++; 3861 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) 3862 return -EINVAL; 3863 vf = &(pf->vf[local_vf_id]); 3864 3865 /* Check if VF is disabled. */ 3866 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 3867 return I40E_ERR_PARAM; 3868 3869 /* perform basic checks on the msg */ 3870 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 3871 3872 if (ret) { 3873 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 3874 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 3875 local_vf_id, v_opcode, msglen); 3876 switch (ret) { 3877 case VIRTCHNL_STATUS_ERR_PARAM: 3878 return -EPERM; 3879 default: 3880 return -EINVAL; 3881 } 3882 } 3883 3884 switch (v_opcode) { 3885 case VIRTCHNL_OP_VERSION: 3886 ret = i40e_vc_get_version_msg(vf, msg); 3887 break; 3888 case VIRTCHNL_OP_GET_VF_RESOURCES: 3889 ret = i40e_vc_get_vf_resources_msg(vf, msg); 3890 i40e_vc_notify_vf_link_state(vf); 3891 break; 3892 case VIRTCHNL_OP_RESET_VF: 3893 i40e_vc_reset_vf_msg(vf); 3894 ret = 0; 3895 break; 3896 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 3897 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); 3898 break; 3899 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 3900 ret = i40e_vc_config_queues_msg(vf, msg); 3901 break; 3902 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 3903 ret = i40e_vc_config_irq_map_msg(vf, msg); 3904 break; 3905 case VIRTCHNL_OP_ENABLE_QUEUES: 3906 ret = i40e_vc_enable_queues_msg(vf, msg); 3907 i40e_vc_notify_vf_link_state(vf); 3908 break; 3909 case VIRTCHNL_OP_DISABLE_QUEUES: 3910 ret = i40e_vc_disable_queues_msg(vf, msg); 3911 break; 3912 case VIRTCHNL_OP_ADD_ETH_ADDR: 3913 ret = i40e_vc_add_mac_addr_msg(vf, msg); 3914 break; 3915 case VIRTCHNL_OP_DEL_ETH_ADDR: 3916 ret = i40e_vc_del_mac_addr_msg(vf, msg); 3917 break; 3918 case VIRTCHNL_OP_ADD_VLAN: 3919 ret = i40e_vc_add_vlan_msg(vf, msg); 3920 break; 3921 case VIRTCHNL_OP_DEL_VLAN: 3922 ret = i40e_vc_remove_vlan_msg(vf, msg); 3923 break; 3924 case VIRTCHNL_OP_GET_STATS: 3925 ret = i40e_vc_get_stats_msg(vf, msg); 3926 break; 3927 case VIRTCHNL_OP_IWARP: 3928 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 3929 break; 3930 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 3931 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); 3932 break; 3933 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 3934 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); 3935 break; 3936 case VIRTCHNL_OP_CONFIG_RSS_KEY: 3937 ret = i40e_vc_config_rss_key(vf, msg); 3938 break; 3939 case VIRTCHNL_OP_CONFIG_RSS_LUT: 3940 ret = i40e_vc_config_rss_lut(vf, msg); 3941 break; 3942 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 3943 ret = i40e_vc_get_rss_hena(vf, msg); 3944 break; 3945 case VIRTCHNL_OP_SET_RSS_HENA: 3946 ret = i40e_vc_set_rss_hena(vf, msg); 3947 break; 3948 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 3949 ret = i40e_vc_enable_vlan_stripping(vf, msg); 3950 break; 3951 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 3952 ret = i40e_vc_disable_vlan_stripping(vf, msg); 3953 break; 3954 case VIRTCHNL_OP_REQUEST_QUEUES: 3955 ret = i40e_vc_request_queues_msg(vf, msg); 3956 break; 3957 case VIRTCHNL_OP_ENABLE_CHANNELS: 3958 ret = i40e_vc_add_qch_msg(vf, msg); 3959 break; 3960 case VIRTCHNL_OP_DISABLE_CHANNELS: 3961 ret = i40e_vc_del_qch_msg(vf, msg); 3962 break; 3963 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 3964 ret = i40e_vc_add_cloud_filter(vf, msg); 3965 break; 3966 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 3967 ret = i40e_vc_del_cloud_filter(vf, msg); 3968 break; 3969 case VIRTCHNL_OP_UNKNOWN: 3970 default: 3971 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 3972 v_opcode, local_vf_id); 3973 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 3974 I40E_ERR_NOT_IMPLEMENTED); 3975 break; 3976 } 3977 3978 return ret; 3979 } 3980 3981 /** 3982 * i40e_vc_process_vflr_event 3983 * @pf: pointer to the PF structure 3984 * 3985 * called from the vlfr irq handler to 3986 * free up VF resources and state variables 3987 **/ 3988 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 3989 { 3990 struct i40e_hw *hw = &pf->hw; 3991 u32 reg, reg_idx, bit_idx; 3992 struct i40e_vf *vf; 3993 int vf_id; 3994 3995 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 3996 return 0; 3997 3998 /* Re-enable the VFLR interrupt cause here, before looking for which 3999 * VF got reset. Otherwise, if another VF gets a reset while the 4000 * first one is being processed, that interrupt will be lost, and 4001 * that VF will be stuck in reset forever. 4002 */ 4003 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 4004 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 4005 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 4006 i40e_flush(hw); 4007 4008 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 4009 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 4010 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 4011 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 4012 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 4013 vf = &pf->vf[vf_id]; 4014 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 4015 if (reg & BIT(bit_idx)) 4016 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 4017 i40e_reset_vf(vf, true); 4018 } 4019 4020 return 0; 4021 } 4022 4023 /** 4024 * i40e_validate_vf 4025 * @pf: the physical function 4026 * @vf_id: VF identifier 4027 * 4028 * Check that the VF is enabled and the VSI exists. 4029 * 4030 * Returns 0 on success, negative on failure 4031 **/ 4032 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) 4033 { 4034 struct i40e_vsi *vsi; 4035 struct i40e_vf *vf; 4036 int ret = 0; 4037 4038 if (vf_id >= pf->num_alloc_vfs) { 4039 dev_err(&pf->pdev->dev, 4040 "Invalid VF Identifier %d\n", vf_id); 4041 ret = -EINVAL; 4042 goto err_out; 4043 } 4044 vf = &pf->vf[vf_id]; 4045 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); 4046 if (!vsi) 4047 ret = -EINVAL; 4048 err_out: 4049 return ret; 4050 } 4051 4052 /** 4053 * i40e_ndo_set_vf_mac 4054 * @netdev: network interface device structure 4055 * @vf_id: VF identifier 4056 * @mac: mac address 4057 * 4058 * program VF mac address 4059 **/ 4060 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 4061 { 4062 struct i40e_netdev_priv *np = netdev_priv(netdev); 4063 struct i40e_vsi *vsi = np->vsi; 4064 struct i40e_pf *pf = vsi->back; 4065 struct i40e_mac_filter *f; 4066 struct i40e_vf *vf; 4067 int ret = 0; 4068 struct hlist_node *h; 4069 int bkt; 4070 u8 i; 4071 4072 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4073 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4074 return -EAGAIN; 4075 } 4076 4077 /* validate the request */ 4078 ret = i40e_validate_vf(pf, vf_id); 4079 if (ret) 4080 goto error_param; 4081 4082 vf = &pf->vf[vf_id]; 4083 4084 /* When the VF is resetting wait until it is done. 4085 * It can take up to 200 milliseconds, 4086 * but wait for up to 300 milliseconds to be safe. 4087 * Acquire the VSI pointer only after the VF has been 4088 * properly initialized. 4089 */ 4090 for (i = 0; i < 15; i++) { 4091 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) 4092 break; 4093 msleep(20); 4094 } 4095 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4096 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4097 vf_id); 4098 ret = -EAGAIN; 4099 goto error_param; 4100 } 4101 vsi = pf->vsi[vf->lan_vsi_idx]; 4102 4103 if (is_multicast_ether_addr(mac)) { 4104 dev_err(&pf->pdev->dev, 4105 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 4106 ret = -EINVAL; 4107 goto error_param; 4108 } 4109 4110 /* Lock once because below invoked function add/del_filter requires 4111 * mac_filter_hash_lock to be held 4112 */ 4113 spin_lock_bh(&vsi->mac_filter_hash_lock); 4114 4115 /* delete the temporary mac address */ 4116 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 4117 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 4118 4119 /* Delete all the filters for this VSI - we're going to kill it 4120 * anyway. 4121 */ 4122 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 4123 __i40e_del_filter(vsi, f); 4124 4125 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4126 4127 /* program mac filter */ 4128 if (i40e_sync_vsi_filters(vsi)) { 4129 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 4130 ret = -EIO; 4131 goto error_param; 4132 } 4133 ether_addr_copy(vf->default_lan_addr.addr, mac); 4134 4135 if (is_zero_ether_addr(mac)) { 4136 vf->pf_set_mac = false; 4137 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 4138 } else { 4139 vf->pf_set_mac = true; 4140 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 4141 mac, vf_id); 4142 } 4143 4144 /* Force the VF interface down so it has to bring up with new MAC 4145 * address 4146 */ 4147 i40e_vc_disable_vf(vf); 4148 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); 4149 4150 error_param: 4151 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4152 return ret; 4153 } 4154 4155 /** 4156 * i40e_vsi_has_vlans - True if VSI has configured VLANs 4157 * @vsi: pointer to the vsi 4158 * 4159 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if 4160 * we have no configured VLANs. Do not call while holding the 4161 * mac_filter_hash_lock. 4162 */ 4163 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) 4164 { 4165 bool have_vlans; 4166 4167 /* If we have a port VLAN, then the VSI cannot have any VLANs 4168 * configured, as all MAC/VLAN filters will be assigned to the PVID. 4169 */ 4170 if (vsi->info.pvid) 4171 return false; 4172 4173 /* Since we don't have a PVID, we know that if the device is in VLAN 4174 * mode it must be because of a VLAN filter configured on this VSI. 4175 */ 4176 spin_lock_bh(&vsi->mac_filter_hash_lock); 4177 have_vlans = i40e_is_vsi_in_vlan(vsi); 4178 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4179 4180 return have_vlans; 4181 } 4182 4183 /** 4184 * i40e_ndo_set_vf_port_vlan 4185 * @netdev: network interface device structure 4186 * @vf_id: VF identifier 4187 * @vlan_id: mac address 4188 * @qos: priority setting 4189 * @vlan_proto: vlan protocol 4190 * 4191 * program VF vlan id and/or qos 4192 **/ 4193 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 4194 u16 vlan_id, u8 qos, __be16 vlan_proto) 4195 { 4196 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 4197 struct i40e_netdev_priv *np = netdev_priv(netdev); 4198 bool allmulti = false, alluni = false; 4199 struct i40e_pf *pf = np->vsi->back; 4200 struct i40e_vsi *vsi; 4201 struct i40e_vf *vf; 4202 int ret = 0; 4203 4204 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4205 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4206 return -EAGAIN; 4207 } 4208 4209 /* validate the request */ 4210 ret = i40e_validate_vf(pf, vf_id); 4211 if (ret) 4212 goto error_pvid; 4213 4214 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 4215 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 4216 ret = -EINVAL; 4217 goto error_pvid; 4218 } 4219 4220 if (vlan_proto != htons(ETH_P_8021Q)) { 4221 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 4222 ret = -EPROTONOSUPPORT; 4223 goto error_pvid; 4224 } 4225 4226 vf = &pf->vf[vf_id]; 4227 vsi = pf->vsi[vf->lan_vsi_idx]; 4228 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4229 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4230 vf_id); 4231 ret = -EAGAIN; 4232 goto error_pvid; 4233 } 4234 4235 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 4236 /* duplicate request, so just return success */ 4237 goto error_pvid; 4238 4239 if (i40e_vsi_has_vlans(vsi)) { 4240 dev_err(&pf->pdev->dev, 4241 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 4242 vf_id); 4243 /* Administrator Error - knock the VF offline until he does 4244 * the right thing by reconfiguring his network correctly 4245 * and then reloading the VF driver. 4246 */ 4247 i40e_vc_disable_vf(vf); 4248 /* During reset the VF got a new VSI, so refresh the pointer. */ 4249 vsi = pf->vsi[vf->lan_vsi_idx]; 4250 } 4251 4252 /* Locked once because multiple functions below iterate list */ 4253 spin_lock_bh(&vsi->mac_filter_hash_lock); 4254 4255 /* Check for condition where there was already a port VLAN ID 4256 * filter set and now it is being deleted by setting it to zero. 4257 * Additionally check for the condition where there was a port 4258 * VLAN but now there is a new and different port VLAN being set. 4259 * Before deleting all the old VLAN filters we must add new ones 4260 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 4261 * MAC addresses deleted. 4262 */ 4263 if ((!(vlan_id || qos) || 4264 vlanprio != le16_to_cpu(vsi->info.pvid)) && 4265 vsi->info.pvid) { 4266 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 4267 if (ret) { 4268 dev_info(&vsi->back->pdev->dev, 4269 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4270 vsi->back->hw.aq.asq_last_status); 4271 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4272 goto error_pvid; 4273 } 4274 } 4275 4276 if (vsi->info.pvid) { 4277 /* remove all filters on the old VLAN */ 4278 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 4279 VLAN_VID_MASK)); 4280 } 4281 4282 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4283 4284 /* disable promisc modes in case they were enabled */ 4285 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, 4286 allmulti, alluni); 4287 if (ret) { 4288 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); 4289 goto error_pvid; 4290 } 4291 4292 if (vlan_id || qos) 4293 ret = i40e_vsi_add_pvid(vsi, vlanprio); 4294 else 4295 i40e_vsi_remove_pvid(vsi); 4296 spin_lock_bh(&vsi->mac_filter_hash_lock); 4297 4298 if (vlan_id) { 4299 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 4300 vlan_id, qos, vf_id); 4301 4302 /* add new VLAN filter for each MAC */ 4303 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 4304 if (ret) { 4305 dev_info(&vsi->back->pdev->dev, 4306 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 4307 vsi->back->hw.aq.asq_last_status); 4308 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4309 goto error_pvid; 4310 } 4311 4312 /* remove the previously added non-VLAN MAC filters */ 4313 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 4314 } 4315 4316 spin_unlock_bh(&vsi->mac_filter_hash_lock); 4317 4318 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 4319 alluni = true; 4320 4321 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 4322 allmulti = true; 4323 4324 /* Schedule the worker thread to take care of applying changes */ 4325 i40e_service_event_schedule(vsi->back); 4326 4327 if (ret) { 4328 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 4329 goto error_pvid; 4330 } 4331 4332 /* The Port VLAN needs to be saved across resets the same as the 4333 * default LAN MAC address. 4334 */ 4335 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 4336 4337 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); 4338 if (ret) { 4339 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); 4340 goto error_pvid; 4341 } 4342 4343 ret = 0; 4344 4345 error_pvid: 4346 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4347 return ret; 4348 } 4349 4350 /** 4351 * i40e_ndo_set_vf_bw 4352 * @netdev: network interface device structure 4353 * @vf_id: VF identifier 4354 * @min_tx_rate: Minimum Tx rate 4355 * @max_tx_rate: Maximum Tx rate 4356 * 4357 * configure VF Tx rate 4358 **/ 4359 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 4360 int max_tx_rate) 4361 { 4362 struct i40e_netdev_priv *np = netdev_priv(netdev); 4363 struct i40e_pf *pf = np->vsi->back; 4364 struct i40e_vsi *vsi; 4365 struct i40e_vf *vf; 4366 int ret = 0; 4367 4368 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4369 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4370 return -EAGAIN; 4371 } 4372 4373 /* validate the request */ 4374 ret = i40e_validate_vf(pf, vf_id); 4375 if (ret) 4376 goto error; 4377 4378 if (min_tx_rate) { 4379 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 4380 min_tx_rate, vf_id); 4381 ret = -EINVAL; 4382 goto error; 4383 } 4384 4385 vf = &pf->vf[vf_id]; 4386 vsi = pf->vsi[vf->lan_vsi_idx]; 4387 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4388 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4389 vf_id); 4390 ret = -EAGAIN; 4391 goto error; 4392 } 4393 4394 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 4395 if (ret) 4396 goto error; 4397 4398 vf->tx_rate = max_tx_rate; 4399 error: 4400 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4401 return ret; 4402 } 4403 4404 /** 4405 * i40e_ndo_get_vf_config 4406 * @netdev: network interface device structure 4407 * @vf_id: VF identifier 4408 * @ivi: VF configuration structure 4409 * 4410 * return VF configuration 4411 **/ 4412 int i40e_ndo_get_vf_config(struct net_device *netdev, 4413 int vf_id, struct ifla_vf_info *ivi) 4414 { 4415 struct i40e_netdev_priv *np = netdev_priv(netdev); 4416 struct i40e_vsi *vsi = np->vsi; 4417 struct i40e_pf *pf = vsi->back; 4418 struct i40e_vf *vf; 4419 int ret = 0; 4420 4421 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4422 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4423 return -EAGAIN; 4424 } 4425 4426 /* validate the request */ 4427 ret = i40e_validate_vf(pf, vf_id); 4428 if (ret) 4429 goto error_param; 4430 4431 vf = &pf->vf[vf_id]; 4432 /* first vsi is always the LAN vsi */ 4433 vsi = pf->vsi[vf->lan_vsi_idx]; 4434 if (!vsi) { 4435 ret = -ENOENT; 4436 goto error_param; 4437 } 4438 4439 ivi->vf = vf_id; 4440 4441 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 4442 4443 ivi->max_tx_rate = vf->tx_rate; 4444 ivi->min_tx_rate = 0; 4445 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 4446 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 4447 I40E_VLAN_PRIORITY_SHIFT; 4448 if (vf->link_forced == false) 4449 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 4450 else if (vf->link_up == true) 4451 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 4452 else 4453 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 4454 ivi->spoofchk = vf->spoofchk; 4455 ivi->trusted = vf->trusted; 4456 ret = 0; 4457 4458 error_param: 4459 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4460 return ret; 4461 } 4462 4463 /** 4464 * i40e_ndo_set_vf_link_state 4465 * @netdev: network interface device structure 4466 * @vf_id: VF identifier 4467 * @link: required link state 4468 * 4469 * Set the link state of a specified VF, regardless of physical link state 4470 **/ 4471 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 4472 { 4473 struct i40e_netdev_priv *np = netdev_priv(netdev); 4474 struct i40e_pf *pf = np->vsi->back; 4475 struct i40e_link_status *ls = &pf->hw.phy.link_info; 4476 struct virtchnl_pf_event pfe; 4477 struct i40e_hw *hw = &pf->hw; 4478 struct i40e_vf *vf; 4479 int abs_vf_id; 4480 int ret = 0; 4481 4482 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4483 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4484 return -EAGAIN; 4485 } 4486 4487 /* validate the request */ 4488 if (vf_id >= pf->num_alloc_vfs) { 4489 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4490 ret = -EINVAL; 4491 goto error_out; 4492 } 4493 4494 vf = &pf->vf[vf_id]; 4495 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 4496 4497 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4498 pfe.severity = PF_EVENT_SEVERITY_INFO; 4499 4500 switch (link) { 4501 case IFLA_VF_LINK_STATE_AUTO: 4502 vf->link_forced = false; 4503 i40e_set_vf_link_state(vf, &pfe, ls); 4504 break; 4505 case IFLA_VF_LINK_STATE_ENABLE: 4506 vf->link_forced = true; 4507 vf->link_up = true; 4508 i40e_set_vf_link_state(vf, &pfe, ls); 4509 break; 4510 case IFLA_VF_LINK_STATE_DISABLE: 4511 vf->link_forced = true; 4512 vf->link_up = false; 4513 i40e_set_vf_link_state(vf, &pfe, ls); 4514 break; 4515 default: 4516 ret = -EINVAL; 4517 goto error_out; 4518 } 4519 /* Notify the VF of its new link state */ 4520 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 4521 0, (u8 *)&pfe, sizeof(pfe), NULL); 4522 4523 error_out: 4524 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4525 return ret; 4526 } 4527 4528 /** 4529 * i40e_ndo_set_vf_spoofchk 4530 * @netdev: network interface device structure 4531 * @vf_id: VF identifier 4532 * @enable: flag to enable or disable feature 4533 * 4534 * Enable or disable VF spoof checking 4535 **/ 4536 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 4537 { 4538 struct i40e_netdev_priv *np = netdev_priv(netdev); 4539 struct i40e_vsi *vsi = np->vsi; 4540 struct i40e_pf *pf = vsi->back; 4541 struct i40e_vsi_context ctxt; 4542 struct i40e_hw *hw = &pf->hw; 4543 struct i40e_vf *vf; 4544 int ret = 0; 4545 4546 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4547 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4548 return -EAGAIN; 4549 } 4550 4551 /* validate the request */ 4552 if (vf_id >= pf->num_alloc_vfs) { 4553 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4554 ret = -EINVAL; 4555 goto out; 4556 } 4557 4558 vf = &(pf->vf[vf_id]); 4559 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4560 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 4561 vf_id); 4562 ret = -EAGAIN; 4563 goto out; 4564 } 4565 4566 if (enable == vf->spoofchk) 4567 goto out; 4568 4569 vf->spoofchk = enable; 4570 memset(&ctxt, 0, sizeof(ctxt)); 4571 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 4572 ctxt.pf_num = pf->hw.pf_id; 4573 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 4574 if (enable) 4575 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 4576 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 4577 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 4578 if (ret) { 4579 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 4580 ret); 4581 ret = -EIO; 4582 } 4583 out: 4584 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4585 return ret; 4586 } 4587 4588 /** 4589 * i40e_ndo_set_vf_trust 4590 * @netdev: network interface device structure of the pf 4591 * @vf_id: VF identifier 4592 * @setting: trust setting 4593 * 4594 * Enable or disable VF trust setting 4595 **/ 4596 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 4597 { 4598 struct i40e_netdev_priv *np = netdev_priv(netdev); 4599 struct i40e_pf *pf = np->vsi->back; 4600 struct i40e_vf *vf; 4601 int ret = 0; 4602 4603 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { 4604 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); 4605 return -EAGAIN; 4606 } 4607 4608 /* validate the request */ 4609 if (vf_id >= pf->num_alloc_vfs) { 4610 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 4611 ret = -EINVAL; 4612 goto out; 4613 } 4614 4615 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4616 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 4617 ret = -EINVAL; 4618 goto out; 4619 } 4620 4621 vf = &pf->vf[vf_id]; 4622 4623 if (setting == vf->trusted) 4624 goto out; 4625 4626 vf->trusted = setting; 4627 i40e_vc_disable_vf(vf); 4628 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4629 vf_id, setting ? "" : "un"); 4630 4631 if (vf->adq_enabled) { 4632 if (!vf->trusted) { 4633 dev_info(&pf->pdev->dev, 4634 "VF %u no longer Trusted, deleting all cloud filters\n", 4635 vf_id); 4636 i40e_del_all_cloud_filters(vf); 4637 } 4638 } 4639 4640 out: 4641 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); 4642 return ret; 4643 } 4644 4645 /** 4646 * i40e_get_vf_stats - populate some stats for the VF 4647 * @netdev: the netdev of the PF 4648 * @vf_id: the host OS identifier (0-127) 4649 * @vf_stats: pointer to the OS memory to be initialized 4650 */ 4651 int i40e_get_vf_stats(struct net_device *netdev, int vf_id, 4652 struct ifla_vf_stats *vf_stats) 4653 { 4654 struct i40e_netdev_priv *np = netdev_priv(netdev); 4655 struct i40e_pf *pf = np->vsi->back; 4656 struct i40e_eth_stats *stats; 4657 struct i40e_vsi *vsi; 4658 struct i40e_vf *vf; 4659 4660 /* validate the request */ 4661 if (i40e_validate_vf(pf, vf_id)) 4662 return -EINVAL; 4663 4664 vf = &pf->vf[vf_id]; 4665 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 4666 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); 4667 return -EBUSY; 4668 } 4669 4670 vsi = pf->vsi[vf->lan_vsi_idx]; 4671 if (!vsi) 4672 return -EINVAL; 4673 4674 i40e_update_eth_stats(vsi); 4675 stats = &vsi->eth_stats; 4676 4677 memset(vf_stats, 0, sizeof(*vf_stats)); 4678 4679 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 4680 stats->rx_multicast; 4681 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 4682 stats->tx_multicast; 4683 vf_stats->rx_bytes = stats->rx_bytes; 4684 vf_stats->tx_bytes = stats->tx_bytes; 4685 vf_stats->broadcast = stats->rx_broadcast; 4686 vf_stats->multicast = stats->rx_multicast; 4687 vf_stats->rx_dropped = stats->rx_discards; 4688 vf_stats->tx_dropped = stats->tx_discards; 4689 4690 return 0; 4691 } 4692