1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /*********************notification routines***********************/ 30 31 /** 32 * i40e_vc_vf_broadcast 33 * @pf: pointer to the PF structure 34 * @opcode: operation code 35 * @retval: return value 36 * @msg: pointer to the msg buffer 37 * @msglen: msg length 38 * 39 * send a message to all VFs on a given PF 40 **/ 41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 42 enum virtchnl_ops v_opcode, 43 i40e_status v_retval, u8 *msg, 44 u16 msglen) 45 { 46 struct i40e_hw *hw = &pf->hw; 47 struct i40e_vf *vf = pf->vf; 48 int i; 49 50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 51 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 52 /* Not all vfs are enabled so skip the ones that are not */ 53 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 54 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 55 continue; 56 57 /* Ignore return value on purpose - a given VF may fail, but 58 * we need to keep going and send to all of them 59 */ 60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 61 msg, msglen, NULL); 62 } 63 } 64 65 /** 66 * i40e_vc_notify_vf_link_state 67 * @vf: pointer to the VF structure 68 * 69 * send a link status message to a single VF 70 **/ 71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 72 { 73 struct virtchnl_pf_event pfe; 74 struct i40e_pf *pf = vf->pf; 75 struct i40e_hw *hw = &pf->hw; 76 struct i40e_link_status *ls = &pf->hw.phy.link_info; 77 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 78 79 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 80 pfe.severity = PF_EVENT_SEVERITY_INFO; 81 if (vf->link_forced) { 82 pfe.event_data.link_event.link_status = vf->link_up; 83 pfe.event_data.link_event.link_speed = 84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 85 } else { 86 pfe.event_data.link_event.link_status = 87 ls->link_info & I40E_AQ_LINK_UP; 88 pfe.event_data.link_event.link_speed = 89 (enum virtchnl_link_speed)ls->link_speed; 90 } 91 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 92 0, (u8 *)&pfe, sizeof(pfe), NULL); 93 } 94 95 /** 96 * i40e_vc_notify_link_state 97 * @pf: pointer to the PF structure 98 * 99 * send a link status message to all VFs on a given PF 100 **/ 101 void i40e_vc_notify_link_state(struct i40e_pf *pf) 102 { 103 int i; 104 105 for (i = 0; i < pf->num_alloc_vfs; i++) 106 i40e_vc_notify_vf_link_state(&pf->vf[i]); 107 } 108 109 /** 110 * i40e_vc_notify_reset 111 * @pf: pointer to the PF structure 112 * 113 * indicate a pending reset to all VFs on a given PF 114 **/ 115 void i40e_vc_notify_reset(struct i40e_pf *pf) 116 { 117 struct virtchnl_pf_event pfe; 118 119 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 120 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 121 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, 122 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 123 } 124 125 /** 126 * i40e_vc_notify_vf_reset 127 * @vf: pointer to the VF structure 128 * 129 * indicate a pending reset to the given VF 130 **/ 131 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 132 { 133 struct virtchnl_pf_event pfe; 134 int abs_vf_id; 135 136 /* validate the request */ 137 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 138 return; 139 140 /* verify if the VF is in either init or active before proceeding */ 141 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && 142 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 143 return; 144 145 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 146 147 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 148 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 149 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 150 0, (u8 *)&pfe, 151 sizeof(struct virtchnl_pf_event), NULL); 152 } 153 /***********************misc routines*****************************/ 154 155 /** 156 * i40e_vc_disable_vf 157 * @pf: pointer to the PF info 158 * @vf: pointer to the VF info 159 * 160 * Disable the VF through a SW reset 161 **/ 162 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 163 { 164 i40e_vc_notify_vf_reset(vf); 165 i40e_reset_vf(vf, false); 166 } 167 168 /** 169 * i40e_vc_isvalid_vsi_id 170 * @vf: pointer to the VF info 171 * @vsi_id: VF relative VSI id 172 * 173 * check for the valid VSI id 174 **/ 175 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 176 { 177 struct i40e_pf *pf = vf->pf; 178 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 179 180 return (vsi && (vsi->vf_id == vf->vf_id)); 181 } 182 183 /** 184 * i40e_vc_isvalid_queue_id 185 * @vf: pointer to the VF info 186 * @vsi_id: vsi id 187 * @qid: vsi relative queue id 188 * 189 * check for the valid queue id 190 **/ 191 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 192 u8 qid) 193 { 194 struct i40e_pf *pf = vf->pf; 195 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 196 197 return (vsi && (qid < vsi->alloc_queue_pairs)); 198 } 199 200 /** 201 * i40e_vc_isvalid_vector_id 202 * @vf: pointer to the VF info 203 * @vector_id: VF relative vector id 204 * 205 * check for the valid vector id 206 **/ 207 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 208 { 209 struct i40e_pf *pf = vf->pf; 210 211 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 212 } 213 214 /***********************vf resource mgmt routines*****************/ 215 216 /** 217 * i40e_vc_get_pf_queue_id 218 * @vf: pointer to the VF info 219 * @vsi_id: id of VSI as provided by the FW 220 * @vsi_queue_id: vsi relative queue id 221 * 222 * return PF relative queue id 223 **/ 224 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 225 u8 vsi_queue_id) 226 { 227 struct i40e_pf *pf = vf->pf; 228 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 229 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 230 231 if (!vsi) 232 return pf_queue_id; 233 234 if (le16_to_cpu(vsi->info.mapping_flags) & 235 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 236 pf_queue_id = 237 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 238 else 239 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 240 vsi_queue_id; 241 242 return pf_queue_id; 243 } 244 245 /** 246 * i40e_config_irq_link_list 247 * @vf: pointer to the VF info 248 * @vsi_id: id of VSI as given by the FW 249 * @vecmap: irq map info 250 * 251 * configure irq link list from the map 252 **/ 253 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 254 struct virtchnl_vector_map *vecmap) 255 { 256 unsigned long linklistmap = 0, tempmap; 257 struct i40e_pf *pf = vf->pf; 258 struct i40e_hw *hw = &pf->hw; 259 u16 vsi_queue_id, pf_queue_id; 260 enum i40e_queue_type qtype; 261 u16 next_q, vector_id; 262 u32 reg, reg_idx; 263 u16 itr_idx = 0; 264 265 vector_id = vecmap->vector_id; 266 /* setup the head */ 267 if (0 == vector_id) 268 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 269 else 270 reg_idx = I40E_VPINT_LNKLSTN( 271 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 272 (vector_id - 1)); 273 274 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 275 /* Special case - No queues mapped on this vector */ 276 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 277 goto irq_list_done; 278 } 279 tempmap = vecmap->rxq_map; 280 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 281 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 282 vsi_queue_id)); 283 } 284 285 tempmap = vecmap->txq_map; 286 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 287 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 288 vsi_queue_id + 1)); 289 } 290 291 next_q = find_first_bit(&linklistmap, 292 (I40E_MAX_VSI_QP * 293 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 294 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 295 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 296 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 297 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 298 299 wr32(hw, reg_idx, reg); 300 301 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 302 switch (qtype) { 303 case I40E_QUEUE_TYPE_RX: 304 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 305 itr_idx = vecmap->rxitr_idx; 306 break; 307 case I40E_QUEUE_TYPE_TX: 308 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 309 itr_idx = vecmap->txitr_idx; 310 break; 311 default: 312 break; 313 } 314 315 next_q = find_next_bit(&linklistmap, 316 (I40E_MAX_VSI_QP * 317 I40E_VIRTCHNL_SUPPORTED_QTYPES), 318 next_q + 1); 319 if (next_q < 320 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 321 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 323 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 324 vsi_queue_id); 325 } else { 326 pf_queue_id = I40E_QUEUE_END_OF_LIST; 327 qtype = 0; 328 } 329 330 /* format for the RQCTL & TQCTL regs is same */ 331 reg = (vector_id) | 332 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 333 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 334 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 335 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 336 wr32(hw, reg_idx, reg); 337 } 338 339 /* if the vf is running in polling mode and using interrupt zero, 340 * need to disable auto-mask on enabling zero interrupt for VFs. 341 */ 342 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 343 (vector_id == 0)) { 344 reg = rd32(hw, I40E_GLINT_CTL); 345 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 346 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 347 wr32(hw, I40E_GLINT_CTL, reg); 348 } 349 } 350 351 irq_list_done: 352 i40e_flush(hw); 353 } 354 355 /** 356 * i40e_release_iwarp_qvlist 357 * @vf: pointer to the VF. 358 * 359 **/ 360 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 361 { 362 struct i40e_pf *pf = vf->pf; 363 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 364 u32 msix_vf; 365 u32 i; 366 367 if (!vf->qvlist_info) 368 return; 369 370 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 371 for (i = 0; i < qvlist_info->num_vectors; i++) { 372 struct virtchnl_iwarp_qv_info *qv_info; 373 u32 next_q_index, next_q_type; 374 struct i40e_hw *hw = &pf->hw; 375 u32 v_idx, reg_idx, reg; 376 377 qv_info = &qvlist_info->qv_info[i]; 378 if (!qv_info) 379 continue; 380 v_idx = qv_info->v_idx; 381 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 382 /* Figure out the queue after CEQ and make that the 383 * first queue. 384 */ 385 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 386 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 387 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 388 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 389 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 390 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 391 392 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 393 reg = (next_q_index & 394 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 395 (next_q_type << 396 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 397 398 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 399 } 400 } 401 kfree(vf->qvlist_info); 402 vf->qvlist_info = NULL; 403 } 404 405 /** 406 * i40e_config_iwarp_qvlist 407 * @vf: pointer to the VF info 408 * @qvlist_info: queue and vector list 409 * 410 * Return 0 on success or < 0 on error 411 **/ 412 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 413 struct virtchnl_iwarp_qvlist_info *qvlist_info) 414 { 415 struct i40e_pf *pf = vf->pf; 416 struct i40e_hw *hw = &pf->hw; 417 struct virtchnl_iwarp_qv_info *qv_info; 418 u32 v_idx, i, reg_idx, reg; 419 u32 next_q_idx, next_q_type; 420 u32 msix_vf, size; 421 422 size = sizeof(struct virtchnl_iwarp_qvlist_info) + 423 (sizeof(struct virtchnl_iwarp_qv_info) * 424 (qvlist_info->num_vectors - 1)); 425 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 426 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 427 428 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 429 for (i = 0; i < qvlist_info->num_vectors; i++) { 430 qv_info = &qvlist_info->qv_info[i]; 431 if (!qv_info) 432 continue; 433 v_idx = qv_info->v_idx; 434 435 /* Validate vector id belongs to this vf */ 436 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) 437 goto err; 438 439 vf->qvlist_info->qv_info[i] = *qv_info; 440 441 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 442 /* We might be sharing the interrupt, so get the first queue 443 * index and type, push it down the list by adding the new 444 * queue on top. Also link it with the new queue in CEQCTL. 445 */ 446 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 447 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 448 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 449 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 450 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 451 452 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 453 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 454 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 455 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 456 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 457 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 458 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 459 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 460 461 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 462 reg = (qv_info->ceq_idx & 463 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 464 (I40E_QUEUE_TYPE_PE_CEQ << 465 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 466 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 467 } 468 469 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 470 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 471 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 472 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 473 474 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 475 } 476 } 477 478 return 0; 479 err: 480 kfree(vf->qvlist_info); 481 vf->qvlist_info = NULL; 482 return -EINVAL; 483 } 484 485 /** 486 * i40e_config_vsi_tx_queue 487 * @vf: pointer to the VF info 488 * @vsi_id: id of VSI as provided by the FW 489 * @vsi_queue_id: vsi relative queue index 490 * @info: config. info 491 * 492 * configure tx queue 493 **/ 494 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 495 u16 vsi_queue_id, 496 struct virtchnl_txq_info *info) 497 { 498 struct i40e_pf *pf = vf->pf; 499 struct i40e_hw *hw = &pf->hw; 500 struct i40e_hmc_obj_txq tx_ctx; 501 struct i40e_vsi *vsi; 502 u16 pf_queue_id; 503 u32 qtx_ctl; 504 int ret = 0; 505 506 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 507 ret = -ENOENT; 508 goto error_context; 509 } 510 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 511 vsi = i40e_find_vsi_from_id(pf, vsi_id); 512 if (!vsi) { 513 ret = -ENOENT; 514 goto error_context; 515 } 516 517 /* clear the context structure first */ 518 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 519 520 /* only set the required fields */ 521 tx_ctx.base = info->dma_ring_addr / 128; 522 tx_ctx.qlen = info->ring_len; 523 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 524 tx_ctx.rdylist_act = 0; 525 tx_ctx.head_wb_ena = info->headwb_enabled; 526 tx_ctx.head_wb_addr = info->dma_headwb_addr; 527 528 /* clear the context in the HMC */ 529 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 530 if (ret) { 531 dev_err(&pf->pdev->dev, 532 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 533 pf_queue_id, ret); 534 ret = -ENOENT; 535 goto error_context; 536 } 537 538 /* set the context in the HMC */ 539 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 540 if (ret) { 541 dev_err(&pf->pdev->dev, 542 "Failed to set VF LAN Tx queue context %d error: %d\n", 543 pf_queue_id, ret); 544 ret = -ENOENT; 545 goto error_context; 546 } 547 548 /* associate this queue with the PCI VF function */ 549 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 550 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 551 & I40E_QTX_CTL_PF_INDX_MASK); 552 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 553 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 554 & I40E_QTX_CTL_VFVM_INDX_MASK); 555 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 556 i40e_flush(hw); 557 558 error_context: 559 return ret; 560 } 561 562 /** 563 * i40e_config_vsi_rx_queue 564 * @vf: pointer to the VF info 565 * @vsi_id: id of VSI as provided by the FW 566 * @vsi_queue_id: vsi relative queue index 567 * @info: config. info 568 * 569 * configure rx queue 570 **/ 571 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 572 u16 vsi_queue_id, 573 struct virtchnl_rxq_info *info) 574 { 575 struct i40e_pf *pf = vf->pf; 576 struct i40e_hw *hw = &pf->hw; 577 struct i40e_hmc_obj_rxq rx_ctx; 578 u16 pf_queue_id; 579 int ret = 0; 580 581 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 582 583 /* clear the context structure first */ 584 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 585 586 /* only set the required fields */ 587 rx_ctx.base = info->dma_ring_addr / 128; 588 rx_ctx.qlen = info->ring_len; 589 590 if (info->splithdr_enabled) { 591 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 592 I40E_RX_SPLIT_IP | 593 I40E_RX_SPLIT_TCP_UDP | 594 I40E_RX_SPLIT_SCTP; 595 /* header length validation */ 596 if (info->hdr_size > ((2 * 1024) - 64)) { 597 ret = -EINVAL; 598 goto error_param; 599 } 600 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 601 602 /* set split mode 10b */ 603 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 604 } 605 606 /* databuffer length validation */ 607 if (info->databuffer_size > ((16 * 1024) - 128)) { 608 ret = -EINVAL; 609 goto error_param; 610 } 611 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 612 613 /* max pkt. length validation */ 614 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 615 ret = -EINVAL; 616 goto error_param; 617 } 618 rx_ctx.rxmax = info->max_pkt_size; 619 620 /* enable 32bytes desc always */ 621 rx_ctx.dsize = 1; 622 623 /* default values */ 624 rx_ctx.lrxqthresh = 2; 625 rx_ctx.crcstrip = 1; 626 rx_ctx.prefena = 1; 627 rx_ctx.l2tsel = 1; 628 629 /* clear the context in the HMC */ 630 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 631 if (ret) { 632 dev_err(&pf->pdev->dev, 633 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 634 pf_queue_id, ret); 635 ret = -ENOENT; 636 goto error_param; 637 } 638 639 /* set the context in the HMC */ 640 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 641 if (ret) { 642 dev_err(&pf->pdev->dev, 643 "Failed to set VF LAN Rx queue context %d error: %d\n", 644 pf_queue_id, ret); 645 ret = -ENOENT; 646 goto error_param; 647 } 648 649 error_param: 650 return ret; 651 } 652 653 /** 654 * i40e_alloc_vsi_res 655 * @vf: pointer to the VF info 656 * @type: type of VSI to allocate 657 * 658 * alloc VF vsi context & resources 659 **/ 660 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 661 { 662 struct i40e_mac_filter *f = NULL; 663 struct i40e_pf *pf = vf->pf; 664 struct i40e_vsi *vsi; 665 int ret = 0; 666 667 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 668 669 if (!vsi) { 670 dev_err(&pf->pdev->dev, 671 "add vsi failed for VF %d, aq_err %d\n", 672 vf->vf_id, pf->hw.aq.asq_last_status); 673 ret = -ENOENT; 674 goto error_alloc_vsi_res; 675 } 676 if (type == I40E_VSI_SRIOV) { 677 u64 hena = i40e_pf_get_default_rss_hena(pf); 678 u8 broadcast[ETH_ALEN]; 679 680 vf->lan_vsi_idx = vsi->idx; 681 vf->lan_vsi_id = vsi->id; 682 /* If the port VLAN has been configured and then the 683 * VF driver was removed then the VSI port VLAN 684 * configuration was destroyed. Check if there is 685 * a port VLAN and restore the VSI configuration if 686 * needed. 687 */ 688 if (vf->port_vlan_id) 689 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 690 691 spin_lock_bh(&vsi->mac_filter_hash_lock); 692 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 693 f = i40e_add_mac_filter(vsi, 694 vf->default_lan_addr.addr); 695 if (!f) 696 dev_info(&pf->pdev->dev, 697 "Could not add MAC filter %pM for VF %d\n", 698 vf->default_lan_addr.addr, vf->vf_id); 699 } 700 eth_broadcast_addr(broadcast); 701 f = i40e_add_mac_filter(vsi, broadcast); 702 if (!f) 703 dev_info(&pf->pdev->dev, 704 "Could not allocate VF broadcast filter\n"); 705 spin_unlock_bh(&vsi->mac_filter_hash_lock); 706 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 707 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 708 } 709 710 /* program mac filter */ 711 ret = i40e_sync_vsi_filters(vsi); 712 if (ret) 713 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 714 715 /* Set VF bandwidth if specified */ 716 if (vf->tx_rate) { 717 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 718 vf->tx_rate / 50, 0, NULL); 719 if (ret) 720 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 721 vf->vf_id, ret); 722 } 723 724 error_alloc_vsi_res: 725 return ret; 726 } 727 728 /** 729 * i40e_enable_vf_mappings 730 * @vf: pointer to the VF info 731 * 732 * enable VF mappings 733 **/ 734 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 735 { 736 struct i40e_pf *pf = vf->pf; 737 struct i40e_hw *hw = &pf->hw; 738 u32 reg, total_queue_pairs = 0; 739 int j; 740 741 /* Tell the hardware we're using noncontiguous mapping. HW requires 742 * that VF queues be mapped using this method, even when they are 743 * contiguous in real life 744 */ 745 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 746 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 747 748 /* enable VF vplan_qtable mappings */ 749 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 750 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 751 752 /* map PF queues to VF queues */ 753 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 754 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 755 756 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 757 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 758 total_queue_pairs++; 759 } 760 761 /* map PF queues to VSI */ 762 for (j = 0; j < 7; j++) { 763 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 764 reg = 0x07FF07FF; /* unused */ 765 } else { 766 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 767 j * 2); 768 reg = qid; 769 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 770 (j * 2) + 1); 771 reg |= qid << 16; 772 } 773 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), 774 reg); 775 } 776 777 i40e_flush(hw); 778 } 779 780 /** 781 * i40e_disable_vf_mappings 782 * @vf: pointer to the VF info 783 * 784 * disable VF mappings 785 **/ 786 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 787 { 788 struct i40e_pf *pf = vf->pf; 789 struct i40e_hw *hw = &pf->hw; 790 int i; 791 792 /* disable qp mappings */ 793 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 794 for (i = 0; i < I40E_MAX_VSI_QP; i++) 795 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 796 I40E_QUEUE_END_OF_LIST); 797 i40e_flush(hw); 798 } 799 800 /** 801 * i40e_free_vf_res 802 * @vf: pointer to the VF info 803 * 804 * free VF resources 805 **/ 806 static void i40e_free_vf_res(struct i40e_vf *vf) 807 { 808 struct i40e_pf *pf = vf->pf; 809 struct i40e_hw *hw = &pf->hw; 810 u32 reg_idx, reg; 811 int i, msix_vf; 812 813 /* Start by disabling VF's configuration API to prevent the OS from 814 * accessing the VF's VSI after it's freed / invalidated. 815 */ 816 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 817 818 /* free vsi & disconnect it from the parent uplink */ 819 if (vf->lan_vsi_idx) { 820 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 821 vf->lan_vsi_idx = 0; 822 vf->lan_vsi_id = 0; 823 vf->num_mac = 0; 824 } 825 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 826 827 /* disable interrupts so the VF starts in a known state */ 828 for (i = 0; i < msix_vf; i++) { 829 /* format is same for both registers */ 830 if (0 == i) 831 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 832 else 833 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 834 (vf->vf_id)) 835 + (i - 1)); 836 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 837 i40e_flush(hw); 838 } 839 840 /* clear the irq settings */ 841 for (i = 0; i < msix_vf; i++) { 842 /* format is same for both registers */ 843 if (0 == i) 844 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 845 else 846 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 847 (vf->vf_id)) 848 + (i - 1)); 849 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 850 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 851 wr32(hw, reg_idx, reg); 852 i40e_flush(hw); 853 } 854 /* reset some of the state variables keeping track of the resources */ 855 vf->num_queue_pairs = 0; 856 vf->vf_states = 0; 857 } 858 859 /** 860 * i40e_alloc_vf_res 861 * @vf: pointer to the VF info 862 * 863 * allocate VF resources 864 **/ 865 static int i40e_alloc_vf_res(struct i40e_vf *vf) 866 { 867 struct i40e_pf *pf = vf->pf; 868 int total_queue_pairs = 0; 869 int ret; 870 871 /* allocate hw vsi context & associated resources */ 872 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 873 if (ret) 874 goto error_alloc; 875 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 876 877 if (vf->trusted) 878 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 879 else 880 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 881 882 /* store the total qps number for the runtime 883 * VF req validation 884 */ 885 vf->num_queue_pairs = total_queue_pairs; 886 887 /* VF is now completely initialized */ 888 set_bit(I40E_VF_STATE_INIT, &vf->vf_states); 889 890 error_alloc: 891 if (ret) 892 i40e_free_vf_res(vf); 893 894 return ret; 895 } 896 897 #define VF_DEVICE_STATUS 0xAA 898 #define VF_TRANS_PENDING_MASK 0x20 899 /** 900 * i40e_quiesce_vf_pci 901 * @vf: pointer to the VF structure 902 * 903 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 904 * if the transactions never clear. 905 **/ 906 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 907 { 908 struct i40e_pf *pf = vf->pf; 909 struct i40e_hw *hw = &pf->hw; 910 int vf_abs_id, i; 911 u32 reg; 912 913 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 914 915 wr32(hw, I40E_PF_PCI_CIAA, 916 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 917 for (i = 0; i < 100; i++) { 918 reg = rd32(hw, I40E_PF_PCI_CIAD); 919 if ((reg & VF_TRANS_PENDING_MASK) == 0) 920 return 0; 921 udelay(1); 922 } 923 return -EIO; 924 } 925 926 /** 927 * i40e_trigger_vf_reset 928 * @vf: pointer to the VF structure 929 * @flr: VFLR was issued or not 930 * 931 * Trigger hardware to start a reset for a particular VF. Expects the caller 932 * to wait the proper amount of time to allow hardware to reset the VF before 933 * it cleans up and restores VF functionality. 934 **/ 935 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) 936 { 937 struct i40e_pf *pf = vf->pf; 938 struct i40e_hw *hw = &pf->hw; 939 u32 reg, reg_idx, bit_idx; 940 941 /* warn the VF */ 942 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 943 944 /* Disable VF's configuration API during reset. The flag is re-enabled 945 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. 946 * It's normally disabled in i40e_free_vf_res(), but it's safer 947 * to do it earlier to give some time to finish to any VF config 948 * functions that may still be running at this point. 949 */ 950 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); 951 952 /* In the case of a VFLR, the HW has already reset the VF and we 953 * just need to clean up, so don't hit the VFRTRIG register. 954 */ 955 if (!flr) { 956 /* reset VF using VPGEN_VFRTRIG reg */ 957 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 958 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 959 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 960 i40e_flush(hw); 961 } 962 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 963 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 964 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 965 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 966 i40e_flush(hw); 967 968 if (i40e_quiesce_vf_pci(vf)) 969 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 970 vf->vf_id); 971 } 972 973 /** 974 * i40e_cleanup_reset_vf 975 * @vf: pointer to the VF structure 976 * 977 * Cleanup a VF after the hardware reset is finished. Expects the caller to 978 * have verified whether the reset is finished properly, and ensure the 979 * minimum amount of wait time has passed. 980 **/ 981 static void i40e_cleanup_reset_vf(struct i40e_vf *vf) 982 { 983 struct i40e_pf *pf = vf->pf; 984 struct i40e_hw *hw = &pf->hw; 985 u32 reg; 986 987 /* free VF resources to begin resetting the VSI state */ 988 i40e_free_vf_res(vf); 989 990 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. 991 * By doing this we allow HW to access VF memory at any point. If we 992 * did it any sooner, HW could access memory while it was being freed 993 * in i40e_free_vf_res(), causing an IOMMU fault. 994 * 995 * On the other hand, this needs to be done ASAP, because the VF driver 996 * is waiting for this to happen and may report a timeout. It's 997 * harmless, but it gets logged into Guest OS kernel log, so best avoid 998 * it. 999 */ 1000 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 1001 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 1002 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 1003 1004 /* reallocate VF resources to finish resetting the VSI state */ 1005 if (!i40e_alloc_vf_res(vf)) { 1006 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1007 i40e_enable_vf_mappings(vf); 1008 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1009 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1010 /* Do not notify the client during VF init */ 1011 if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, 1012 &vf->vf_states)) 1013 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 1014 vf->num_vlan = 0; 1015 } 1016 1017 /* Tell the VF driver the reset is done. This needs to be done only 1018 * after VF has been fully initialized, because the VF driver may 1019 * request resources immediately after setting this flag. 1020 */ 1021 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 1022 } 1023 1024 /** 1025 * i40e_reset_vf 1026 * @vf: pointer to the VF structure 1027 * @flr: VFLR was issued or not 1028 * 1029 * reset the VF 1030 **/ 1031 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 1032 { 1033 struct i40e_pf *pf = vf->pf; 1034 struct i40e_hw *hw = &pf->hw; 1035 bool rsd = false; 1036 u32 reg; 1037 int i; 1038 1039 /* If VFs have been disabled, there is no need to reset */ 1040 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1041 return; 1042 1043 i40e_trigger_vf_reset(vf, flr); 1044 1045 /* poll VPGEN_VFRSTAT reg to make sure 1046 * that reset is complete 1047 */ 1048 for (i = 0; i < 10; i++) { 1049 /* VF reset requires driver to first reset the VF and then 1050 * poll the status register to make sure that the reset 1051 * completed successfully. Due to internal HW FIFO flushes, 1052 * we must wait 10ms before the register will be valid. 1053 */ 1054 usleep_range(10000, 20000); 1055 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1056 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 1057 rsd = true; 1058 break; 1059 } 1060 } 1061 1062 if (flr) 1063 usleep_range(10000, 20000); 1064 1065 if (!rsd) 1066 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1067 vf->vf_id); 1068 usleep_range(10000, 20000); 1069 1070 /* On initial reset, we don't have any queues to disable */ 1071 if (vf->lan_vsi_idx != 0) 1072 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1073 1074 i40e_cleanup_reset_vf(vf); 1075 1076 i40e_flush(hw); 1077 clear_bit(__I40E_VF_DISABLE, pf->state); 1078 } 1079 1080 /** 1081 * i40e_reset_all_vfs 1082 * @pf: pointer to the PF structure 1083 * @flr: VFLR was issued or not 1084 * 1085 * Reset all allocated VFs in one go. First, tell the hardware to reset each 1086 * VF, then do all the waiting in one chunk, and finally finish restoring each 1087 * VF after the wait. This is useful during PF routines which need to reset 1088 * all VFs, as otherwise it must perform these resets in a serialized fashion. 1089 **/ 1090 void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) 1091 { 1092 struct i40e_hw *hw = &pf->hw; 1093 struct i40e_vf *vf; 1094 int i, v; 1095 u32 reg; 1096 1097 /* If we don't have any VFs, then there is nothing to reset */ 1098 if (!pf->num_alloc_vfs) 1099 return; 1100 1101 /* If VFs have been disabled, there is no need to reset */ 1102 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1103 return; 1104 1105 /* Begin reset on all VFs at once */ 1106 for (v = 0; v < pf->num_alloc_vfs; v++) 1107 i40e_trigger_vf_reset(&pf->vf[v], flr); 1108 1109 /* HW requires some time to make sure it can flush the FIFO for a VF 1110 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 1111 * sequence to make sure that it has completed. We'll keep track of 1112 * the VFs using a simple iterator that increments once that VF has 1113 * finished resetting. 1114 */ 1115 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { 1116 usleep_range(10000, 20000); 1117 1118 /* Check each VF in sequence, beginning with the VF to fail 1119 * the previous check. 1120 */ 1121 while (v < pf->num_alloc_vfs) { 1122 vf = &pf->vf[v]; 1123 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 1124 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) 1125 break; 1126 1127 /* If the current VF has finished resetting, move on 1128 * to the next VF in sequence. 1129 */ 1130 v++; 1131 } 1132 } 1133 1134 if (flr) 1135 usleep_range(10000, 20000); 1136 1137 /* Display a warning if at least one VF didn't manage to reset in 1138 * time, but continue on with the operation. 1139 */ 1140 if (v < pf->num_alloc_vfs) 1141 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 1142 pf->vf[v].vf_id); 1143 usleep_range(10000, 20000); 1144 1145 /* Begin disabling all the rings associated with VFs, but do not wait 1146 * between each VF. 1147 */ 1148 for (v = 0; v < pf->num_alloc_vfs; v++) { 1149 /* On initial reset, we don't have any queues to disable */ 1150 if (pf->vf[v].lan_vsi_idx == 0) 1151 continue; 1152 1153 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); 1154 } 1155 1156 /* Now that we've notified HW to disable all of the VF rings, wait 1157 * until they finish. 1158 */ 1159 for (v = 0; v < pf->num_alloc_vfs; v++) { 1160 /* On initial reset, we don't have any queues to disable */ 1161 if (pf->vf[v].lan_vsi_idx == 0) 1162 continue; 1163 1164 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); 1165 } 1166 1167 /* Hw may need up to 50ms to finish disabling the RX queues. We 1168 * minimize the wait by delaying only once for all VFs. 1169 */ 1170 mdelay(50); 1171 1172 /* Finish the reset on each VF */ 1173 for (v = 0; v < pf->num_alloc_vfs; v++) 1174 i40e_cleanup_reset_vf(&pf->vf[v]); 1175 1176 i40e_flush(hw); 1177 clear_bit(__I40E_VF_DISABLE, pf->state); 1178 } 1179 1180 /** 1181 * i40e_free_vfs 1182 * @pf: pointer to the PF structure 1183 * 1184 * free VF resources 1185 **/ 1186 void i40e_free_vfs(struct i40e_pf *pf) 1187 { 1188 struct i40e_hw *hw = &pf->hw; 1189 u32 reg_idx, bit_idx; 1190 int i, tmp, vf_id; 1191 1192 if (!pf->vf) 1193 return; 1194 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) 1195 usleep_range(1000, 2000); 1196 1197 i40e_notify_client_of_vf_enable(pf, 0); 1198 1199 /* Amortize wait time by stopping all VFs at the same time */ 1200 for (i = 0; i < pf->num_alloc_vfs; i++) { 1201 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1202 continue; 1203 1204 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); 1205 } 1206 1207 for (i = 0; i < pf->num_alloc_vfs; i++) { 1208 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1209 continue; 1210 1211 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); 1212 } 1213 1214 /* Disable IOV before freeing resources. This lets any VF drivers 1215 * running in the host get themselves cleaned up before we yank 1216 * the carpet out from underneath their feet. 1217 */ 1218 if (!pci_vfs_assigned(pf->pdev)) 1219 pci_disable_sriov(pf->pdev); 1220 else 1221 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1222 1223 /* free up VF resources */ 1224 tmp = pf->num_alloc_vfs; 1225 pf->num_alloc_vfs = 0; 1226 for (i = 0; i < tmp; i++) { 1227 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) 1228 i40e_free_vf_res(&pf->vf[i]); 1229 /* disable qp mappings */ 1230 i40e_disable_vf_mappings(&pf->vf[i]); 1231 } 1232 1233 kfree(pf->vf); 1234 pf->vf = NULL; 1235 1236 /* This check is for when the driver is unloaded while VFs are 1237 * assigned. Setting the number of VFs to 0 through sysfs is caught 1238 * before this function ever gets called. 1239 */ 1240 if (!pci_vfs_assigned(pf->pdev)) { 1241 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1242 * work correctly when SR-IOV gets re-enabled. 1243 */ 1244 for (vf_id = 0; vf_id < tmp; vf_id++) { 1245 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1246 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1247 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1248 } 1249 } 1250 clear_bit(__I40E_VF_DISABLE, pf->state); 1251 } 1252 1253 #ifdef CONFIG_PCI_IOV 1254 /** 1255 * i40e_alloc_vfs 1256 * @pf: pointer to the PF structure 1257 * @num_alloc_vfs: number of VFs to allocate 1258 * 1259 * allocate VF resources 1260 **/ 1261 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1262 { 1263 struct i40e_vf *vfs; 1264 int i, ret = 0; 1265 1266 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1267 i40e_irq_dynamic_disable_icr0(pf); 1268 1269 /* Check to see if we're just allocating resources for extant VFs */ 1270 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1271 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1272 if (ret) { 1273 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1274 pf->num_alloc_vfs = 0; 1275 goto err_iov; 1276 } 1277 } 1278 /* allocate memory */ 1279 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1280 if (!vfs) { 1281 ret = -ENOMEM; 1282 goto err_alloc; 1283 } 1284 pf->vf = vfs; 1285 1286 /* apply default profile */ 1287 for (i = 0; i < num_alloc_vfs; i++) { 1288 vfs[i].pf = pf; 1289 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1290 vfs[i].vf_id = i; 1291 1292 /* assign default capabilities */ 1293 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1294 vfs[i].spoofchk = true; 1295 1296 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); 1297 1298 } 1299 pf->num_alloc_vfs = num_alloc_vfs; 1300 1301 /* VF resources get allocated during reset */ 1302 i40e_reset_all_vfs(pf, false); 1303 1304 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1305 1306 err_alloc: 1307 if (ret) 1308 i40e_free_vfs(pf); 1309 err_iov: 1310 /* Re-enable interrupt 0. */ 1311 i40e_irq_dynamic_enable_icr0(pf, false); 1312 return ret; 1313 } 1314 1315 #endif 1316 /** 1317 * i40e_pci_sriov_enable 1318 * @pdev: pointer to a pci_dev structure 1319 * @num_vfs: number of VFs to allocate 1320 * 1321 * Enable or change the number of VFs 1322 **/ 1323 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1324 { 1325 #ifdef CONFIG_PCI_IOV 1326 struct i40e_pf *pf = pci_get_drvdata(pdev); 1327 int pre_existing_vfs = pci_num_vf(pdev); 1328 int err = 0; 1329 1330 if (test_bit(__I40E_TESTING, pf->state)) { 1331 dev_warn(&pdev->dev, 1332 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1333 err = -EPERM; 1334 goto err_out; 1335 } 1336 1337 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1338 i40e_free_vfs(pf); 1339 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1340 goto out; 1341 1342 if (num_vfs > pf->num_req_vfs) { 1343 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1344 num_vfs, pf->num_req_vfs); 1345 err = -EPERM; 1346 goto err_out; 1347 } 1348 1349 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1350 err = i40e_alloc_vfs(pf, num_vfs); 1351 if (err) { 1352 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1353 goto err_out; 1354 } 1355 1356 out: 1357 return num_vfs; 1358 1359 err_out: 1360 return err; 1361 #endif 1362 return 0; 1363 } 1364 1365 /** 1366 * i40e_pci_sriov_configure 1367 * @pdev: pointer to a pci_dev structure 1368 * @num_vfs: number of VFs to allocate 1369 * 1370 * Enable or change the number of VFs. Called when the user updates the number 1371 * of VFs in sysfs. 1372 **/ 1373 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1374 { 1375 struct i40e_pf *pf = pci_get_drvdata(pdev); 1376 1377 if (num_vfs) { 1378 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1379 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1380 i40e_do_reset_safe(pf, 1381 BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1382 } 1383 return i40e_pci_sriov_enable(pdev, num_vfs); 1384 } 1385 1386 if (!pci_vfs_assigned(pf->pdev)) { 1387 i40e_free_vfs(pf); 1388 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1389 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1390 } else { 1391 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1392 return -EINVAL; 1393 } 1394 return 0; 1395 } 1396 1397 /***********************virtual channel routines******************/ 1398 1399 /** 1400 * i40e_vc_send_msg_to_vf 1401 * @vf: pointer to the VF info 1402 * @v_opcode: virtual channel opcode 1403 * @v_retval: virtual channel return value 1404 * @msg: pointer to the msg buffer 1405 * @msglen: msg length 1406 * 1407 * send msg to VF 1408 **/ 1409 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1410 u32 v_retval, u8 *msg, u16 msglen) 1411 { 1412 struct i40e_pf *pf; 1413 struct i40e_hw *hw; 1414 int abs_vf_id; 1415 i40e_status aq_ret; 1416 1417 /* validate the request */ 1418 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1419 return -EINVAL; 1420 1421 pf = vf->pf; 1422 hw = &pf->hw; 1423 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1424 1425 /* single place to detect unsuccessful return values */ 1426 if (v_retval) { 1427 vf->num_invalid_msgs++; 1428 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1429 vf->vf_id, v_opcode, v_retval); 1430 if (vf->num_invalid_msgs > 1431 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1432 dev_err(&pf->pdev->dev, 1433 "Number of invalid messages exceeded for VF %d\n", 1434 vf->vf_id); 1435 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1436 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 1437 } 1438 } else { 1439 vf->num_valid_msgs++; 1440 /* reset the invalid counter, if a valid message is received. */ 1441 vf->num_invalid_msgs = 0; 1442 } 1443 1444 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1445 msg, msglen, NULL); 1446 if (aq_ret) { 1447 dev_info(&pf->pdev->dev, 1448 "Unable to send the message to VF %d aq_err %d\n", 1449 vf->vf_id, pf->hw.aq.asq_last_status); 1450 return -EIO; 1451 } 1452 1453 return 0; 1454 } 1455 1456 /** 1457 * i40e_vc_send_resp_to_vf 1458 * @vf: pointer to the VF info 1459 * @opcode: operation code 1460 * @retval: return value 1461 * 1462 * send resp msg to VF 1463 **/ 1464 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1465 enum virtchnl_ops opcode, 1466 i40e_status retval) 1467 { 1468 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1469 } 1470 1471 /** 1472 * i40e_vc_get_version_msg 1473 * @vf: pointer to the VF info 1474 * 1475 * called from the VF to request the API version used by the PF 1476 **/ 1477 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1478 { 1479 struct virtchnl_version_info info = { 1480 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 1481 }; 1482 1483 vf->vf_ver = *(struct virtchnl_version_info *)msg; 1484 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1485 if (VF_IS_V10(&vf->vf_ver)) 1486 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1487 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 1488 I40E_SUCCESS, (u8 *)&info, 1489 sizeof(struct virtchnl_version_info)); 1490 } 1491 1492 /** 1493 * i40e_vc_get_vf_resources_msg 1494 * @vf: pointer to the VF info 1495 * @msg: pointer to the msg buffer 1496 * @msglen: msg length 1497 * 1498 * called from the VF to request its resources 1499 **/ 1500 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1501 { 1502 struct virtchnl_vf_resource *vfres = NULL; 1503 struct i40e_pf *pf = vf->pf; 1504 i40e_status aq_ret = 0; 1505 struct i40e_vsi *vsi; 1506 int num_vsis = 1; 1507 int len = 0; 1508 int ret; 1509 1510 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 1511 aq_ret = I40E_ERR_PARAM; 1512 goto err; 1513 } 1514 1515 len = (sizeof(struct virtchnl_vf_resource) + 1516 sizeof(struct virtchnl_vsi_resource) * num_vsis); 1517 1518 vfres = kzalloc(len, GFP_KERNEL); 1519 if (!vfres) { 1520 aq_ret = I40E_ERR_NO_MEMORY; 1521 len = 0; 1522 goto err; 1523 } 1524 if (VF_IS_V11(&vf->vf_ver)) 1525 vf->driver_caps = *(u32 *)msg; 1526 else 1527 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 1528 VIRTCHNL_VF_OFFLOAD_RSS_REG | 1529 VIRTCHNL_VF_OFFLOAD_VLAN; 1530 1531 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 1532 vsi = pf->vsi[vf->lan_vsi_idx]; 1533 if (!vsi->info.pvid) 1534 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 1535 1536 if (i40e_vf_client_capable(pf, vf->vf_id) && 1537 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { 1538 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; 1539 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); 1540 } 1541 1542 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1543 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 1544 } else { 1545 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 1546 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1547 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1548 else 1549 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 1550 } 1551 1552 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1553 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1554 vfres->vf_cap_flags |= 1555 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1556 } 1557 1558 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 1559 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 1560 1561 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && 1562 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 1563 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 1564 1565 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1566 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1567 dev_err(&pf->pdev->dev, 1568 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1569 vf->vf_id); 1570 aq_ret = I40E_ERR_PARAM; 1571 goto err; 1572 } 1573 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1574 } 1575 1576 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { 1577 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1578 vfres->vf_cap_flags |= 1579 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1580 } 1581 1582 vfres->num_vsis = num_vsis; 1583 vfres->num_queue_pairs = vf->num_queue_pairs; 1584 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1585 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1586 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1587 1588 if (vf->lan_vsi_idx) { 1589 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1590 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 1591 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1592 /* VFs only use TC 0 */ 1593 vfres->vsi_res[0].qset_handle 1594 = le16_to_cpu(vsi->info.qs_handle[0]); 1595 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1596 vf->default_lan_addr.addr); 1597 } 1598 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); 1599 1600 err: 1601 /* send the response back to the VF */ 1602 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, 1603 aq_ret, (u8 *)vfres, len); 1604 1605 kfree(vfres); 1606 return ret; 1607 } 1608 1609 /** 1610 * i40e_vc_reset_vf_msg 1611 * @vf: pointer to the VF info 1612 * @msg: pointer to the msg buffer 1613 * @msglen: msg length 1614 * 1615 * called from the VF to reset itself, 1616 * unlike other virtchnl messages, PF driver 1617 * doesn't send the response back to the VF 1618 **/ 1619 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1620 { 1621 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) 1622 i40e_reset_vf(vf, false); 1623 } 1624 1625 /** 1626 * i40e_getnum_vf_vsi_vlan_filters 1627 * @vsi: pointer to the vsi 1628 * 1629 * called to get the number of VLANs offloaded on this VF 1630 **/ 1631 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1632 { 1633 struct i40e_mac_filter *f; 1634 int num_vlans = 0, bkt; 1635 1636 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1637 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1638 num_vlans++; 1639 } 1640 1641 return num_vlans; 1642 } 1643 1644 /** 1645 * i40e_vc_config_promiscuous_mode_msg 1646 * @vf: pointer to the VF info 1647 * @msg: pointer to the msg buffer 1648 * @msglen: msg length 1649 * 1650 * called from the VF to configure the promiscuous mode of 1651 * VF vsis 1652 **/ 1653 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1654 u8 *msg, u16 msglen) 1655 { 1656 struct virtchnl_promisc_info *info = 1657 (struct virtchnl_promisc_info *)msg; 1658 struct i40e_pf *pf = vf->pf; 1659 struct i40e_hw *hw = &pf->hw; 1660 struct i40e_mac_filter *f; 1661 i40e_status aq_ret = 0; 1662 bool allmulti = false; 1663 struct i40e_vsi *vsi; 1664 bool alluni = false; 1665 int aq_err = 0; 1666 int bkt; 1667 1668 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); 1669 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 1670 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1671 !vsi) { 1672 aq_ret = I40E_ERR_PARAM; 1673 goto error_param; 1674 } 1675 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 1676 dev_err(&pf->pdev->dev, 1677 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1678 vf->vf_id); 1679 /* Lie to the VF on purpose. */ 1680 aq_ret = 0; 1681 goto error_param; 1682 } 1683 /* Multicast promiscuous handling*/ 1684 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 1685 allmulti = true; 1686 1687 if (vf->port_vlan_id) { 1688 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1689 allmulti, 1690 vf->port_vlan_id, 1691 NULL); 1692 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1693 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1694 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1695 continue; 1696 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1697 vsi->seid, 1698 allmulti, 1699 f->vlan, 1700 NULL); 1701 aq_err = pf->hw.aq.asq_last_status; 1702 if (aq_ret) { 1703 dev_err(&pf->pdev->dev, 1704 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1705 f->vlan, 1706 i40e_stat_str(&pf->hw, aq_ret), 1707 i40e_aq_str(&pf->hw, aq_err)); 1708 break; 1709 } 1710 } 1711 } else { 1712 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1713 allmulti, NULL); 1714 aq_err = pf->hw.aq.asq_last_status; 1715 if (aq_ret) { 1716 dev_err(&pf->pdev->dev, 1717 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1718 vf->vf_id, 1719 i40e_stat_str(&pf->hw, aq_ret), 1720 i40e_aq_str(&pf->hw, aq_err)); 1721 goto error_param; 1722 } 1723 } 1724 1725 if (!aq_ret) { 1726 dev_info(&pf->pdev->dev, 1727 "VF %d successfully set multicast promiscuous mode\n", 1728 vf->vf_id); 1729 if (allmulti) 1730 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1731 else 1732 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); 1733 } 1734 1735 if (info->flags & FLAG_VF_UNICAST_PROMISC) 1736 alluni = true; 1737 if (vf->port_vlan_id) { 1738 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1739 alluni, 1740 vf->port_vlan_id, 1741 NULL); 1742 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1743 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 1744 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1745 continue; 1746 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1747 vsi->seid, 1748 alluni, 1749 f->vlan, 1750 NULL); 1751 aq_err = pf->hw.aq.asq_last_status; 1752 if (aq_ret) 1753 dev_err(&pf->pdev->dev, 1754 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1755 f->vlan, 1756 i40e_stat_str(&pf->hw, aq_ret), 1757 i40e_aq_str(&pf->hw, aq_err)); 1758 } 1759 } else { 1760 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, 1761 alluni, NULL, 1762 true); 1763 aq_err = pf->hw.aq.asq_last_status; 1764 if (aq_ret) { 1765 dev_err(&pf->pdev->dev, 1766 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", 1767 vf->vf_id, info->flags, 1768 i40e_stat_str(&pf->hw, aq_ret), 1769 i40e_aq_str(&pf->hw, aq_err)); 1770 goto error_param; 1771 } 1772 } 1773 1774 if (!aq_ret) { 1775 dev_info(&pf->pdev->dev, 1776 "VF %d successfully set unicast promiscuous mode\n", 1777 vf->vf_id); 1778 if (alluni) 1779 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1780 else 1781 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); 1782 } 1783 1784 error_param: 1785 /* send the response to the VF */ 1786 return i40e_vc_send_resp_to_vf(vf, 1787 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1788 aq_ret); 1789 } 1790 1791 /** 1792 * i40e_vc_config_queues_msg 1793 * @vf: pointer to the VF info 1794 * @msg: pointer to the msg buffer 1795 * @msglen: msg length 1796 * 1797 * called from the VF to configure the rx/tx 1798 * queues 1799 **/ 1800 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1801 { 1802 struct virtchnl_vsi_queue_config_info *qci = 1803 (struct virtchnl_vsi_queue_config_info *)msg; 1804 struct virtchnl_queue_pair_info *qpi; 1805 struct i40e_pf *pf = vf->pf; 1806 u16 vsi_id, vsi_queue_id; 1807 i40e_status aq_ret = 0; 1808 int i; 1809 1810 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1811 aq_ret = I40E_ERR_PARAM; 1812 goto error_param; 1813 } 1814 1815 vsi_id = qci->vsi_id; 1816 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1817 aq_ret = I40E_ERR_PARAM; 1818 goto error_param; 1819 } 1820 for (i = 0; i < qci->num_queue_pairs; i++) { 1821 qpi = &qci->qpair[i]; 1822 vsi_queue_id = qpi->txq.queue_id; 1823 if ((qpi->txq.vsi_id != vsi_id) || 1824 (qpi->rxq.vsi_id != vsi_id) || 1825 (qpi->rxq.queue_id != vsi_queue_id) || 1826 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1827 aq_ret = I40E_ERR_PARAM; 1828 goto error_param; 1829 } 1830 1831 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1832 &qpi->rxq) || 1833 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1834 &qpi->txq)) { 1835 aq_ret = I40E_ERR_PARAM; 1836 goto error_param; 1837 } 1838 } 1839 /* set vsi num_queue_pairs in use to num configured by VF */ 1840 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1841 1842 error_param: 1843 /* send the response to the VF */ 1844 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1845 aq_ret); 1846 } 1847 1848 /** 1849 * i40e_vc_config_irq_map_msg 1850 * @vf: pointer to the VF info 1851 * @msg: pointer to the msg buffer 1852 * @msglen: msg length 1853 * 1854 * called from the VF to configure the irq to 1855 * queue map 1856 **/ 1857 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1858 { 1859 struct virtchnl_irq_map_info *irqmap_info = 1860 (struct virtchnl_irq_map_info *)msg; 1861 struct virtchnl_vector_map *map; 1862 u16 vsi_id, vsi_queue_id, vector_id; 1863 i40e_status aq_ret = 0; 1864 unsigned long tempmap; 1865 int i; 1866 1867 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1868 aq_ret = I40E_ERR_PARAM; 1869 goto error_param; 1870 } 1871 1872 for (i = 0; i < irqmap_info->num_vectors; i++) { 1873 map = &irqmap_info->vecmap[i]; 1874 1875 vector_id = map->vector_id; 1876 vsi_id = map->vsi_id; 1877 /* validate msg params */ 1878 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1879 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1880 aq_ret = I40E_ERR_PARAM; 1881 goto error_param; 1882 } 1883 1884 /* lookout for the invalid queue index */ 1885 tempmap = map->rxq_map; 1886 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1887 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1888 vsi_queue_id)) { 1889 aq_ret = I40E_ERR_PARAM; 1890 goto error_param; 1891 } 1892 } 1893 1894 tempmap = map->txq_map; 1895 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1896 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1897 vsi_queue_id)) { 1898 aq_ret = I40E_ERR_PARAM; 1899 goto error_param; 1900 } 1901 } 1902 1903 i40e_config_irq_link_list(vf, vsi_id, map); 1904 } 1905 error_param: 1906 /* send the response to the VF */ 1907 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, 1908 aq_ret); 1909 } 1910 1911 /** 1912 * i40e_vc_enable_queues_msg 1913 * @vf: pointer to the VF info 1914 * @msg: pointer to the msg buffer 1915 * @msglen: msg length 1916 * 1917 * called from the VF to enable all or specific queue(s) 1918 **/ 1919 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1920 { 1921 struct virtchnl_queue_select *vqs = 1922 (struct virtchnl_queue_select *)msg; 1923 struct i40e_pf *pf = vf->pf; 1924 u16 vsi_id = vqs->vsi_id; 1925 i40e_status aq_ret = 0; 1926 1927 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1928 aq_ret = I40E_ERR_PARAM; 1929 goto error_param; 1930 } 1931 1932 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1933 aq_ret = I40E_ERR_PARAM; 1934 goto error_param; 1935 } 1936 1937 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1938 aq_ret = I40E_ERR_PARAM; 1939 goto error_param; 1940 } 1941 1942 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) 1943 aq_ret = I40E_ERR_TIMEOUT; 1944 error_param: 1945 /* send the response to the VF */ 1946 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, 1947 aq_ret); 1948 } 1949 1950 /** 1951 * i40e_vc_disable_queues_msg 1952 * @vf: pointer to the VF info 1953 * @msg: pointer to the msg buffer 1954 * @msglen: msg length 1955 * 1956 * called from the VF to disable all or specific 1957 * queue(s) 1958 **/ 1959 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1960 { 1961 struct virtchnl_queue_select *vqs = 1962 (struct virtchnl_queue_select *)msg; 1963 struct i40e_pf *pf = vf->pf; 1964 i40e_status aq_ret = 0; 1965 1966 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 1967 aq_ret = I40E_ERR_PARAM; 1968 goto error_param; 1969 } 1970 1971 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1972 aq_ret = I40E_ERR_PARAM; 1973 goto error_param; 1974 } 1975 1976 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1977 aq_ret = I40E_ERR_PARAM; 1978 goto error_param; 1979 } 1980 1981 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 1982 1983 error_param: 1984 /* send the response to the VF */ 1985 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 1986 aq_ret); 1987 } 1988 1989 /** 1990 * i40e_vc_get_stats_msg 1991 * @vf: pointer to the VF info 1992 * @msg: pointer to the msg buffer 1993 * @msglen: msg length 1994 * 1995 * called from the VF to get vsi stats 1996 **/ 1997 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1998 { 1999 struct virtchnl_queue_select *vqs = 2000 (struct virtchnl_queue_select *)msg; 2001 struct i40e_pf *pf = vf->pf; 2002 struct i40e_eth_stats stats; 2003 i40e_status aq_ret = 0; 2004 struct i40e_vsi *vsi; 2005 2006 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 2007 2008 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2009 aq_ret = I40E_ERR_PARAM; 2010 goto error_param; 2011 } 2012 2013 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 2014 aq_ret = I40E_ERR_PARAM; 2015 goto error_param; 2016 } 2017 2018 vsi = pf->vsi[vf->lan_vsi_idx]; 2019 if (!vsi) { 2020 aq_ret = I40E_ERR_PARAM; 2021 goto error_param; 2022 } 2023 i40e_update_eth_stats(vsi); 2024 stats = vsi->eth_stats; 2025 2026 error_param: 2027 /* send the response back to the VF */ 2028 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, 2029 (u8 *)&stats, sizeof(stats)); 2030 } 2031 2032 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */ 2033 #define I40E_VC_MAX_MAC_ADDR_PER_VF 12 2034 #define I40E_VC_MAX_VLAN_PER_VF 8 2035 2036 /** 2037 * i40e_check_vf_permission 2038 * @vf: pointer to the VF info 2039 * @macaddr: pointer to the MAC Address being checked 2040 * 2041 * Check if the VF has permission to add or delete unicast MAC address 2042 * filters and return error code -EPERM if not. Then check if the 2043 * address filter requested is broadcast or zero and if so return 2044 * an invalid MAC address error code. 2045 **/ 2046 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 2047 { 2048 struct i40e_pf *pf = vf->pf; 2049 int ret = 0; 2050 2051 if (is_broadcast_ether_addr(macaddr) || 2052 is_zero_ether_addr(macaddr)) { 2053 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 2054 ret = I40E_ERR_INVALID_MAC_ADDR; 2055 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 2056 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 2057 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 2058 /* If the host VMM administrator has set the VF MAC address 2059 * administratively via the ndo_set_vf_mac command then deny 2060 * permission to the VF to add or delete unicast MAC addresses. 2061 * Unless the VF is privileged and then it can do whatever. 2062 * The VF may request to set the MAC address filter already 2063 * assigned to it so do not return an error in that case. 2064 */ 2065 dev_err(&pf->pdev->dev, 2066 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); 2067 ret = -EPERM; 2068 } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) && 2069 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2070 dev_err(&pf->pdev->dev, 2071 "VF is not trusted, switch the VF to trusted to add more functionality\n"); 2072 ret = -EPERM; 2073 } 2074 return ret; 2075 } 2076 2077 /** 2078 * i40e_vc_add_mac_addr_msg 2079 * @vf: pointer to the VF info 2080 * @msg: pointer to the msg buffer 2081 * @msglen: msg length 2082 * 2083 * add guest mac address filter 2084 **/ 2085 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2086 { 2087 struct virtchnl_ether_addr_list *al = 2088 (struct virtchnl_ether_addr_list *)msg; 2089 struct i40e_pf *pf = vf->pf; 2090 struct i40e_vsi *vsi = NULL; 2091 u16 vsi_id = al->vsi_id; 2092 i40e_status ret = 0; 2093 int i; 2094 2095 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2096 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2097 ret = I40E_ERR_PARAM; 2098 goto error_param; 2099 } 2100 2101 for (i = 0; i < al->num_elements; i++) { 2102 ret = i40e_check_vf_permission(vf, al->list[i].addr); 2103 if (ret) 2104 goto error_param; 2105 } 2106 vsi = pf->vsi[vf->lan_vsi_idx]; 2107 2108 /* Lock once, because all function inside for loop accesses VSI's 2109 * MAC filter list which needs to be protected using same lock. 2110 */ 2111 spin_lock_bh(&vsi->mac_filter_hash_lock); 2112 2113 /* add new addresses to the list */ 2114 for (i = 0; i < al->num_elements; i++) { 2115 struct i40e_mac_filter *f; 2116 2117 f = i40e_find_mac(vsi, al->list[i].addr); 2118 if (!f) 2119 f = i40e_add_mac_filter(vsi, al->list[i].addr); 2120 2121 if (!f) { 2122 dev_err(&pf->pdev->dev, 2123 "Unable to add MAC filter %pM for VF %d\n", 2124 al->list[i].addr, vf->vf_id); 2125 ret = I40E_ERR_PARAM; 2126 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2127 goto error_param; 2128 } else { 2129 vf->num_mac++; 2130 } 2131 } 2132 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2133 2134 /* program the updated filter list */ 2135 ret = i40e_sync_vsi_filters(vsi); 2136 if (ret) 2137 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2138 vf->vf_id, ret); 2139 2140 error_param: 2141 /* send the response to the VF */ 2142 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 2143 ret); 2144 } 2145 2146 /** 2147 * i40e_vc_del_mac_addr_msg 2148 * @vf: pointer to the VF info 2149 * @msg: pointer to the msg buffer 2150 * @msglen: msg length 2151 * 2152 * remove guest mac address filter 2153 **/ 2154 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2155 { 2156 struct virtchnl_ether_addr_list *al = 2157 (struct virtchnl_ether_addr_list *)msg; 2158 struct i40e_pf *pf = vf->pf; 2159 struct i40e_vsi *vsi = NULL; 2160 u16 vsi_id = al->vsi_id; 2161 i40e_status ret = 0; 2162 int i; 2163 2164 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2165 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2166 ret = I40E_ERR_PARAM; 2167 goto error_param; 2168 } 2169 2170 for (i = 0; i < al->num_elements; i++) { 2171 if (is_broadcast_ether_addr(al->list[i].addr) || 2172 is_zero_ether_addr(al->list[i].addr)) { 2173 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 2174 al->list[i].addr, vf->vf_id); 2175 ret = I40E_ERR_INVALID_MAC_ADDR; 2176 goto error_param; 2177 } 2178 } 2179 vsi = pf->vsi[vf->lan_vsi_idx]; 2180 2181 spin_lock_bh(&vsi->mac_filter_hash_lock); 2182 /* delete addresses from the list */ 2183 for (i = 0; i < al->num_elements; i++) 2184 if (i40e_del_mac_filter(vsi, al->list[i].addr)) { 2185 ret = I40E_ERR_INVALID_MAC_ADDR; 2186 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2187 goto error_param; 2188 } else { 2189 vf->num_mac--; 2190 } 2191 2192 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2193 2194 /* program the updated filter list */ 2195 ret = i40e_sync_vsi_filters(vsi); 2196 if (ret) 2197 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2198 vf->vf_id, ret); 2199 2200 error_param: 2201 /* send the response to the VF */ 2202 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 2203 ret); 2204 } 2205 2206 /** 2207 * i40e_vc_add_vlan_msg 2208 * @vf: pointer to the VF info 2209 * @msg: pointer to the msg buffer 2210 * @msglen: msg length 2211 * 2212 * program guest vlan id 2213 **/ 2214 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2215 { 2216 struct virtchnl_vlan_filter_list *vfl = 2217 (struct virtchnl_vlan_filter_list *)msg; 2218 struct i40e_pf *pf = vf->pf; 2219 struct i40e_vsi *vsi = NULL; 2220 u16 vsi_id = vfl->vsi_id; 2221 i40e_status aq_ret = 0; 2222 int i; 2223 2224 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2225 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2226 dev_err(&pf->pdev->dev, 2227 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2228 goto error_param; 2229 } 2230 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2231 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2232 aq_ret = I40E_ERR_PARAM; 2233 goto error_param; 2234 } 2235 2236 for (i = 0; i < vfl->num_elements; i++) { 2237 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2238 aq_ret = I40E_ERR_PARAM; 2239 dev_err(&pf->pdev->dev, 2240 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2241 goto error_param; 2242 } 2243 } 2244 vsi = pf->vsi[vf->lan_vsi_idx]; 2245 if (vsi->info.pvid) { 2246 aq_ret = I40E_ERR_PARAM; 2247 goto error_param; 2248 } 2249 2250 i40e_vlan_stripping_enable(vsi); 2251 for (i = 0; i < vfl->num_elements; i++) { 2252 /* add new VLAN filter */ 2253 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2254 if (!ret) 2255 vf->num_vlan++; 2256 2257 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2258 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2259 true, 2260 vfl->vlan_id[i], 2261 NULL); 2262 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2263 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2264 true, 2265 vfl->vlan_id[i], 2266 NULL); 2267 2268 if (ret) 2269 dev_err(&pf->pdev->dev, 2270 "Unable to add VLAN filter %d for VF %d, error %d\n", 2271 vfl->vlan_id[i], vf->vf_id, ret); 2272 } 2273 2274 error_param: 2275 /* send the response to the VF */ 2276 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); 2277 } 2278 2279 /** 2280 * i40e_vc_remove_vlan_msg 2281 * @vf: pointer to the VF info 2282 * @msg: pointer to the msg buffer 2283 * @msglen: msg length 2284 * 2285 * remove programmed guest vlan id 2286 **/ 2287 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2288 { 2289 struct virtchnl_vlan_filter_list *vfl = 2290 (struct virtchnl_vlan_filter_list *)msg; 2291 struct i40e_pf *pf = vf->pf; 2292 struct i40e_vsi *vsi = NULL; 2293 u16 vsi_id = vfl->vsi_id; 2294 i40e_status aq_ret = 0; 2295 int i; 2296 2297 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2298 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2299 aq_ret = I40E_ERR_PARAM; 2300 goto error_param; 2301 } 2302 2303 for (i = 0; i < vfl->num_elements; i++) { 2304 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2305 aq_ret = I40E_ERR_PARAM; 2306 goto error_param; 2307 } 2308 } 2309 2310 vsi = pf->vsi[vf->lan_vsi_idx]; 2311 if (vsi->info.pvid) { 2312 aq_ret = I40E_ERR_PARAM; 2313 goto error_param; 2314 } 2315 2316 for (i = 0; i < vfl->num_elements; i++) { 2317 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2318 vf->num_vlan--; 2319 2320 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) 2321 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2322 false, 2323 vfl->vlan_id[i], 2324 NULL); 2325 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) 2326 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2327 false, 2328 vfl->vlan_id[i], 2329 NULL); 2330 } 2331 2332 error_param: 2333 /* send the response to the VF */ 2334 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); 2335 } 2336 2337 /** 2338 * i40e_vc_iwarp_msg 2339 * @vf: pointer to the VF info 2340 * @msg: pointer to the msg buffer 2341 * @msglen: msg length 2342 * 2343 * called from the VF for the iwarp msgs 2344 **/ 2345 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2346 { 2347 struct i40e_pf *pf = vf->pf; 2348 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2349 i40e_status aq_ret = 0; 2350 2351 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2352 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2353 aq_ret = I40E_ERR_PARAM; 2354 goto error_param; 2355 } 2356 2357 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2358 msg, msglen); 2359 2360 error_param: 2361 /* send the response to the VF */ 2362 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, 2363 aq_ret); 2364 } 2365 2366 /** 2367 * i40e_vc_iwarp_qvmap_msg 2368 * @vf: pointer to the VF info 2369 * @msg: pointer to the msg buffer 2370 * @msglen: msg length 2371 * @config: config qvmap or release it 2372 * 2373 * called from the VF for the iwarp msgs 2374 **/ 2375 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, 2376 bool config) 2377 { 2378 struct virtchnl_iwarp_qvlist_info *qvlist_info = 2379 (struct virtchnl_iwarp_qvlist_info *)msg; 2380 i40e_status aq_ret = 0; 2381 2382 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2383 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { 2384 aq_ret = I40E_ERR_PARAM; 2385 goto error_param; 2386 } 2387 2388 if (config) { 2389 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2390 aq_ret = I40E_ERR_PARAM; 2391 } else { 2392 i40e_release_iwarp_qvlist(vf); 2393 } 2394 2395 error_param: 2396 /* send the response to the VF */ 2397 return i40e_vc_send_resp_to_vf(vf, 2398 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : 2399 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 2400 aq_ret); 2401 } 2402 2403 /** 2404 * i40e_vc_config_rss_key 2405 * @vf: pointer to the VF info 2406 * @msg: pointer to the msg buffer 2407 * @msglen: msg length 2408 * 2409 * Configure the VF's RSS key 2410 **/ 2411 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) 2412 { 2413 struct virtchnl_rss_key *vrk = 2414 (struct virtchnl_rss_key *)msg; 2415 struct i40e_pf *pf = vf->pf; 2416 struct i40e_vsi *vsi = NULL; 2417 u16 vsi_id = vrk->vsi_id; 2418 i40e_status aq_ret = 0; 2419 2420 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2421 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2422 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2423 aq_ret = I40E_ERR_PARAM; 2424 goto err; 2425 } 2426 2427 vsi = pf->vsi[vf->lan_vsi_idx]; 2428 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2429 err: 2430 /* send the response to the VF */ 2431 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, 2432 aq_ret); 2433 } 2434 2435 /** 2436 * i40e_vc_config_rss_lut 2437 * @vf: pointer to the VF info 2438 * @msg: pointer to the msg buffer 2439 * @msglen: msg length 2440 * 2441 * Configure the VF's RSS LUT 2442 **/ 2443 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) 2444 { 2445 struct virtchnl_rss_lut *vrl = 2446 (struct virtchnl_rss_lut *)msg; 2447 struct i40e_pf *pf = vf->pf; 2448 struct i40e_vsi *vsi = NULL; 2449 u16 vsi_id = vrl->vsi_id; 2450 i40e_status aq_ret = 0; 2451 2452 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || 2453 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2454 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2455 aq_ret = I40E_ERR_PARAM; 2456 goto err; 2457 } 2458 2459 vsi = pf->vsi[vf->lan_vsi_idx]; 2460 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2461 /* send the response to the VF */ 2462 err: 2463 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, 2464 aq_ret); 2465 } 2466 2467 /** 2468 * i40e_vc_get_rss_hena 2469 * @vf: pointer to the VF info 2470 * @msg: pointer to the msg buffer 2471 * @msglen: msg length 2472 * 2473 * Return the RSS HENA bits allowed by the hardware 2474 **/ 2475 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2476 { 2477 struct virtchnl_rss_hena *vrh = NULL; 2478 struct i40e_pf *pf = vf->pf; 2479 i40e_status aq_ret = 0; 2480 int len = 0; 2481 2482 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2483 aq_ret = I40E_ERR_PARAM; 2484 goto err; 2485 } 2486 len = sizeof(struct virtchnl_rss_hena); 2487 2488 vrh = kzalloc(len, GFP_KERNEL); 2489 if (!vrh) { 2490 aq_ret = I40E_ERR_NO_MEMORY; 2491 len = 0; 2492 goto err; 2493 } 2494 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2495 err: 2496 /* send the response back to the VF */ 2497 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, 2498 aq_ret, (u8 *)vrh, len); 2499 kfree(vrh); 2500 return aq_ret; 2501 } 2502 2503 /** 2504 * i40e_vc_set_rss_hena 2505 * @vf: pointer to the VF info 2506 * @msg: pointer to the msg buffer 2507 * @msglen: msg length 2508 * 2509 * Set the RSS HENA bits for the VF 2510 **/ 2511 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2512 { 2513 struct virtchnl_rss_hena *vrh = 2514 (struct virtchnl_rss_hena *)msg; 2515 struct i40e_pf *pf = vf->pf; 2516 struct i40e_hw *hw = &pf->hw; 2517 i40e_status aq_ret = 0; 2518 2519 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2520 aq_ret = I40E_ERR_PARAM; 2521 goto err; 2522 } 2523 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 2524 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 2525 (u32)(vrh->hena >> 32)); 2526 2527 /* send the response to the VF */ 2528 err: 2529 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); 2530 } 2531 2532 /** 2533 * i40e_vc_enable_vlan_stripping 2534 * @vf: pointer to the VF info 2535 * @msg: pointer to the msg buffer 2536 * @msglen: msg length 2537 * 2538 * Enable vlan header stripping for the VF 2539 **/ 2540 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, 2541 u16 msglen) 2542 { 2543 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 2544 i40e_status aq_ret = 0; 2545 2546 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2547 aq_ret = I40E_ERR_PARAM; 2548 goto err; 2549 } 2550 2551 i40e_vlan_stripping_enable(vsi); 2552 2553 /* send the response to the VF */ 2554 err: 2555 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 2556 aq_ret); 2557 } 2558 2559 /** 2560 * i40e_vc_disable_vlan_stripping 2561 * @vf: pointer to the VF info 2562 * @msg: pointer to the msg buffer 2563 * @msglen: msg length 2564 * 2565 * Disable vlan header stripping for the VF 2566 **/ 2567 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, 2568 u16 msglen) 2569 { 2570 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; 2571 i40e_status aq_ret = 0; 2572 2573 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2574 aq_ret = I40E_ERR_PARAM; 2575 goto err; 2576 } 2577 2578 i40e_vlan_stripping_disable(vsi); 2579 2580 /* send the response to the VF */ 2581 err: 2582 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 2583 aq_ret); 2584 } 2585 2586 /** 2587 * i40e_vc_process_vf_msg 2588 * @pf: pointer to the PF structure 2589 * @vf_id: source VF id 2590 * @msg: pointer to the msg buffer 2591 * @msglen: msg length 2592 * @msghndl: msg handle 2593 * 2594 * called from the common aeq/arq handler to 2595 * process request from VF 2596 **/ 2597 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 2598 u32 v_retval, u8 *msg, u16 msglen) 2599 { 2600 struct i40e_hw *hw = &pf->hw; 2601 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 2602 struct i40e_vf *vf; 2603 int ret; 2604 2605 pf->vf_aq_requests++; 2606 if (local_vf_id >= pf->num_alloc_vfs) 2607 return -EINVAL; 2608 vf = &(pf->vf[local_vf_id]); 2609 2610 /* Check if VF is disabled. */ 2611 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) 2612 return I40E_ERR_PARAM; 2613 2614 /* perform basic checks on the msg */ 2615 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 2616 2617 /* perform additional checks specific to this driver */ 2618 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { 2619 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; 2620 2621 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) 2622 ret = -EINVAL; 2623 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { 2624 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 2625 2626 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) 2627 ret = -EINVAL; 2628 } 2629 2630 if (ret) { 2631 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 2632 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 2633 local_vf_id, v_opcode, msglen); 2634 switch (ret) { 2635 case VIRTCHNL_ERR_PARAM: 2636 return -EPERM; 2637 default: 2638 return -EINVAL; 2639 } 2640 } 2641 2642 switch (v_opcode) { 2643 case VIRTCHNL_OP_VERSION: 2644 ret = i40e_vc_get_version_msg(vf, msg); 2645 break; 2646 case VIRTCHNL_OP_GET_VF_RESOURCES: 2647 ret = i40e_vc_get_vf_resources_msg(vf, msg); 2648 break; 2649 case VIRTCHNL_OP_RESET_VF: 2650 i40e_vc_reset_vf_msg(vf); 2651 ret = 0; 2652 break; 2653 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 2654 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 2655 break; 2656 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 2657 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 2658 break; 2659 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2660 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 2661 break; 2662 case VIRTCHNL_OP_ENABLE_QUEUES: 2663 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 2664 i40e_vc_notify_vf_link_state(vf); 2665 break; 2666 case VIRTCHNL_OP_DISABLE_QUEUES: 2667 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 2668 break; 2669 case VIRTCHNL_OP_ADD_ETH_ADDR: 2670 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 2671 break; 2672 case VIRTCHNL_OP_DEL_ETH_ADDR: 2673 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 2674 break; 2675 case VIRTCHNL_OP_ADD_VLAN: 2676 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 2677 break; 2678 case VIRTCHNL_OP_DEL_VLAN: 2679 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 2680 break; 2681 case VIRTCHNL_OP_GET_STATS: 2682 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 2683 break; 2684 case VIRTCHNL_OP_IWARP: 2685 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 2686 break; 2687 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2688 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); 2689 break; 2690 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 2691 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); 2692 break; 2693 case VIRTCHNL_OP_CONFIG_RSS_KEY: 2694 ret = i40e_vc_config_rss_key(vf, msg, msglen); 2695 break; 2696 case VIRTCHNL_OP_CONFIG_RSS_LUT: 2697 ret = i40e_vc_config_rss_lut(vf, msg, msglen); 2698 break; 2699 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 2700 ret = i40e_vc_get_rss_hena(vf, msg, msglen); 2701 break; 2702 case VIRTCHNL_OP_SET_RSS_HENA: 2703 ret = i40e_vc_set_rss_hena(vf, msg, msglen); 2704 break; 2705 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2706 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); 2707 break; 2708 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2709 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); 2710 break; 2711 2712 case VIRTCHNL_OP_UNKNOWN: 2713 default: 2714 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 2715 v_opcode, local_vf_id); 2716 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 2717 I40E_ERR_NOT_IMPLEMENTED); 2718 break; 2719 } 2720 2721 return ret; 2722 } 2723 2724 /** 2725 * i40e_vc_process_vflr_event 2726 * @pf: pointer to the PF structure 2727 * 2728 * called from the vlfr irq handler to 2729 * free up VF resources and state variables 2730 **/ 2731 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 2732 { 2733 struct i40e_hw *hw = &pf->hw; 2734 u32 reg, reg_idx, bit_idx; 2735 struct i40e_vf *vf; 2736 int vf_id; 2737 2738 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 2739 return 0; 2740 2741 /* Re-enable the VFLR interrupt cause here, before looking for which 2742 * VF got reset. Otherwise, if another VF gets a reset while the 2743 * first one is being processed, that interrupt will be lost, and 2744 * that VF will be stuck in reset forever. 2745 */ 2746 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 2747 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 2748 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 2749 i40e_flush(hw); 2750 2751 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 2752 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 2753 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 2754 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 2755 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 2756 vf = &pf->vf[vf_id]; 2757 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2758 if (reg & BIT(bit_idx)) 2759 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 2760 i40e_reset_vf(vf, true); 2761 } 2762 2763 return 0; 2764 } 2765 2766 /** 2767 * i40e_ndo_set_vf_mac 2768 * @netdev: network interface device structure 2769 * @vf_id: VF identifier 2770 * @mac: mac address 2771 * 2772 * program VF mac address 2773 **/ 2774 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2775 { 2776 struct i40e_netdev_priv *np = netdev_priv(netdev); 2777 struct i40e_vsi *vsi = np->vsi; 2778 struct i40e_pf *pf = vsi->back; 2779 struct i40e_mac_filter *f; 2780 struct i40e_vf *vf; 2781 int ret = 0; 2782 int bkt; 2783 2784 /* validate the request */ 2785 if (vf_id >= pf->num_alloc_vfs) { 2786 dev_err(&pf->pdev->dev, 2787 "Invalid VF Identifier %d\n", vf_id); 2788 ret = -EINVAL; 2789 goto error_param; 2790 } 2791 2792 vf = &(pf->vf[vf_id]); 2793 vsi = pf->vsi[vf->lan_vsi_idx]; 2794 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 2795 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2796 vf_id); 2797 ret = -EAGAIN; 2798 goto error_param; 2799 } 2800 2801 if (is_multicast_ether_addr(mac)) { 2802 dev_err(&pf->pdev->dev, 2803 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 2804 ret = -EINVAL; 2805 goto error_param; 2806 } 2807 2808 /* Lock once because below invoked function add/del_filter requires 2809 * mac_filter_hash_lock to be held 2810 */ 2811 spin_lock_bh(&vsi->mac_filter_hash_lock); 2812 2813 /* delete the temporary mac address */ 2814 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 2815 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); 2816 2817 /* Delete all the filters for this VSI - we're going to kill it 2818 * anyway. 2819 */ 2820 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) 2821 __i40e_del_filter(vsi, f); 2822 2823 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2824 2825 /* program mac filter */ 2826 if (i40e_sync_vsi_filters(vsi)) { 2827 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2828 ret = -EIO; 2829 goto error_param; 2830 } 2831 ether_addr_copy(vf->default_lan_addr.addr, mac); 2832 2833 if (is_zero_ether_addr(mac)) { 2834 vf->pf_set_mac = false; 2835 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); 2836 } else { 2837 vf->pf_set_mac = true; 2838 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", 2839 mac, vf_id); 2840 } 2841 2842 /* Force the VF driver stop so it has to reload with new MAC address */ 2843 i40e_vc_disable_vf(pf, vf); 2844 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2845 2846 error_param: 2847 return ret; 2848 } 2849 2850 /** 2851 * i40e_ndo_set_vf_port_vlan 2852 * @netdev: network interface device structure 2853 * @vf_id: VF identifier 2854 * @vlan_id: mac address 2855 * @qos: priority setting 2856 * @vlan_proto: vlan protocol 2857 * 2858 * program VF vlan id and/or qos 2859 **/ 2860 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 2861 u16 vlan_id, u8 qos, __be16 vlan_proto) 2862 { 2863 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 2864 struct i40e_netdev_priv *np = netdev_priv(netdev); 2865 struct i40e_pf *pf = np->vsi->back; 2866 struct i40e_vsi *vsi; 2867 struct i40e_vf *vf; 2868 int ret = 0; 2869 2870 /* validate the request */ 2871 if (vf_id >= pf->num_alloc_vfs) { 2872 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2873 ret = -EINVAL; 2874 goto error_pvid; 2875 } 2876 2877 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2878 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2879 ret = -EINVAL; 2880 goto error_pvid; 2881 } 2882 2883 if (vlan_proto != htons(ETH_P_8021Q)) { 2884 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); 2885 ret = -EPROTONOSUPPORT; 2886 goto error_pvid; 2887 } 2888 2889 vf = &(pf->vf[vf_id]); 2890 vsi = pf->vsi[vf->lan_vsi_idx]; 2891 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 2892 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2893 vf_id); 2894 ret = -EAGAIN; 2895 goto error_pvid; 2896 } 2897 2898 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 2899 /* duplicate request, so just return success */ 2900 goto error_pvid; 2901 2902 /* Locked once because multiple functions below iterate list */ 2903 spin_lock_bh(&vsi->mac_filter_hash_lock); 2904 2905 if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { 2906 dev_err(&pf->pdev->dev, 2907 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2908 vf_id); 2909 /* Administrator Error - knock the VF offline until he does 2910 * the right thing by reconfiguring his network correctly 2911 * and then reloading the VF driver. 2912 */ 2913 i40e_vc_disable_vf(pf, vf); 2914 /* During reset the VF got a new VSI, so refresh the pointer. */ 2915 vsi = pf->vsi[vf->lan_vsi_idx]; 2916 } 2917 2918 /* Check for condition where there was already a port VLAN ID 2919 * filter set and now it is being deleted by setting it to zero. 2920 * Additionally check for the condition where there was a port 2921 * VLAN but now there is a new and different port VLAN being set. 2922 * Before deleting all the old VLAN filters we must add new ones 2923 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2924 * MAC addresses deleted. 2925 */ 2926 if ((!(vlan_id || qos) || 2927 vlanprio != le16_to_cpu(vsi->info.pvid)) && 2928 vsi->info.pvid) { 2929 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); 2930 if (ret) { 2931 dev_info(&vsi->back->pdev->dev, 2932 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2933 vsi->back->hw.aq.asq_last_status); 2934 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2935 goto error_pvid; 2936 } 2937 } 2938 2939 if (vsi->info.pvid) { 2940 /* remove all filters on the old VLAN */ 2941 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & 2942 VLAN_VID_MASK)); 2943 } 2944 2945 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2946 if (vlan_id || qos) 2947 ret = i40e_vsi_add_pvid(vsi, vlanprio); 2948 else 2949 i40e_vsi_remove_pvid(vsi); 2950 spin_lock_bh(&vsi->mac_filter_hash_lock); 2951 2952 if (vlan_id) { 2953 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2954 vlan_id, qos, vf_id); 2955 2956 /* add new VLAN filter for each MAC */ 2957 ret = i40e_add_vlan_all_mac(vsi, vlan_id); 2958 if (ret) { 2959 dev_info(&vsi->back->pdev->dev, 2960 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2961 vsi->back->hw.aq.asq_last_status); 2962 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2963 goto error_pvid; 2964 } 2965 2966 /* remove the previously added non-VLAN MAC filters */ 2967 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); 2968 } 2969 2970 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2971 2972 /* Schedule the worker thread to take care of applying changes */ 2973 i40e_service_event_schedule(vsi->back); 2974 2975 if (ret) { 2976 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2977 goto error_pvid; 2978 } 2979 2980 /* The Port VLAN needs to be saved across resets the same as the 2981 * default LAN MAC address. 2982 */ 2983 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2984 ret = 0; 2985 2986 error_pvid: 2987 return ret; 2988 } 2989 2990 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2991 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2992 /** 2993 * i40e_ndo_set_vf_bw 2994 * @netdev: network interface device structure 2995 * @vf_id: VF identifier 2996 * @tx_rate: Tx rate 2997 * 2998 * configure VF Tx rate 2999 **/ 3000 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 3001 int max_tx_rate) 3002 { 3003 struct i40e_netdev_priv *np = netdev_priv(netdev); 3004 struct i40e_pf *pf = np->vsi->back; 3005 struct i40e_vsi *vsi; 3006 struct i40e_vf *vf; 3007 int speed = 0; 3008 int ret = 0; 3009 3010 /* validate the request */ 3011 if (vf_id >= pf->num_alloc_vfs) { 3012 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 3013 ret = -EINVAL; 3014 goto error; 3015 } 3016 3017 if (min_tx_rate) { 3018 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 3019 min_tx_rate, vf_id); 3020 return -EINVAL; 3021 } 3022 3023 vf = &(pf->vf[vf_id]); 3024 vsi = pf->vsi[vf->lan_vsi_idx]; 3025 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3026 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3027 vf_id); 3028 ret = -EAGAIN; 3029 goto error; 3030 } 3031 3032 switch (pf->hw.phy.link_info.link_speed) { 3033 case I40E_LINK_SPEED_40GB: 3034 speed = 40000; 3035 break; 3036 case I40E_LINK_SPEED_25GB: 3037 speed = 25000; 3038 break; 3039 case I40E_LINK_SPEED_20GB: 3040 speed = 20000; 3041 break; 3042 case I40E_LINK_SPEED_10GB: 3043 speed = 10000; 3044 break; 3045 case I40E_LINK_SPEED_1GB: 3046 speed = 1000; 3047 break; 3048 default: 3049 break; 3050 } 3051 3052 if (max_tx_rate > speed) { 3053 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n", 3054 max_tx_rate, vf->vf_id); 3055 ret = -EINVAL; 3056 goto error; 3057 } 3058 3059 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 3060 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 3061 max_tx_rate = 50; 3062 } 3063 3064 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 3065 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 3066 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 3067 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 3068 if (ret) { 3069 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 3070 ret); 3071 ret = -EIO; 3072 goto error; 3073 } 3074 vf->tx_rate = max_tx_rate; 3075 error: 3076 return ret; 3077 } 3078 3079 /** 3080 * i40e_ndo_get_vf_config 3081 * @netdev: network interface device structure 3082 * @vf_id: VF identifier 3083 * @ivi: VF configuration structure 3084 * 3085 * return VF configuration 3086 **/ 3087 int i40e_ndo_get_vf_config(struct net_device *netdev, 3088 int vf_id, struct ifla_vf_info *ivi) 3089 { 3090 struct i40e_netdev_priv *np = netdev_priv(netdev); 3091 struct i40e_vsi *vsi = np->vsi; 3092 struct i40e_pf *pf = vsi->back; 3093 struct i40e_vf *vf; 3094 int ret = 0; 3095 3096 /* validate the request */ 3097 if (vf_id >= pf->num_alloc_vfs) { 3098 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3099 ret = -EINVAL; 3100 goto error_param; 3101 } 3102 3103 vf = &(pf->vf[vf_id]); 3104 /* first vsi is always the LAN vsi */ 3105 vsi = pf->vsi[vf->lan_vsi_idx]; 3106 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3107 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3108 vf_id); 3109 ret = -EAGAIN; 3110 goto error_param; 3111 } 3112 3113 ivi->vf = vf_id; 3114 3115 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 3116 3117 ivi->max_tx_rate = vf->tx_rate; 3118 ivi->min_tx_rate = 0; 3119 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 3120 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 3121 I40E_VLAN_PRIORITY_SHIFT; 3122 if (vf->link_forced == false) 3123 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 3124 else if (vf->link_up == true) 3125 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 3126 else 3127 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 3128 ivi->spoofchk = vf->spoofchk; 3129 ivi->trusted = vf->trusted; 3130 ret = 0; 3131 3132 error_param: 3133 return ret; 3134 } 3135 3136 /** 3137 * i40e_ndo_set_vf_link_state 3138 * @netdev: network interface device structure 3139 * @vf_id: VF identifier 3140 * @link: required link state 3141 * 3142 * Set the link state of a specified VF, regardless of physical link state 3143 **/ 3144 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 3145 { 3146 struct i40e_netdev_priv *np = netdev_priv(netdev); 3147 struct i40e_pf *pf = np->vsi->back; 3148 struct virtchnl_pf_event pfe; 3149 struct i40e_hw *hw = &pf->hw; 3150 struct i40e_vf *vf; 3151 int abs_vf_id; 3152 int ret = 0; 3153 3154 /* validate the request */ 3155 if (vf_id >= pf->num_alloc_vfs) { 3156 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3157 ret = -EINVAL; 3158 goto error_out; 3159 } 3160 3161 vf = &pf->vf[vf_id]; 3162 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 3163 3164 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 3165 pfe.severity = PF_EVENT_SEVERITY_INFO; 3166 3167 switch (link) { 3168 case IFLA_VF_LINK_STATE_AUTO: 3169 vf->link_forced = false; 3170 pfe.event_data.link_event.link_status = 3171 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 3172 pfe.event_data.link_event.link_speed = 3173 (enum virtchnl_link_speed) 3174 pf->hw.phy.link_info.link_speed; 3175 break; 3176 case IFLA_VF_LINK_STATE_ENABLE: 3177 vf->link_forced = true; 3178 vf->link_up = true; 3179 pfe.event_data.link_event.link_status = true; 3180 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 3181 break; 3182 case IFLA_VF_LINK_STATE_DISABLE: 3183 vf->link_forced = true; 3184 vf->link_up = false; 3185 pfe.event_data.link_event.link_status = false; 3186 pfe.event_data.link_event.link_speed = 0; 3187 break; 3188 default: 3189 ret = -EINVAL; 3190 goto error_out; 3191 } 3192 /* Notify the VF of its new link state */ 3193 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 3194 0, (u8 *)&pfe, sizeof(pfe), NULL); 3195 3196 error_out: 3197 return ret; 3198 } 3199 3200 /** 3201 * i40e_ndo_set_vf_spoofchk 3202 * @netdev: network interface device structure 3203 * @vf_id: VF identifier 3204 * @enable: flag to enable or disable feature 3205 * 3206 * Enable or disable VF spoof checking 3207 **/ 3208 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 3209 { 3210 struct i40e_netdev_priv *np = netdev_priv(netdev); 3211 struct i40e_vsi *vsi = np->vsi; 3212 struct i40e_pf *pf = vsi->back; 3213 struct i40e_vsi_context ctxt; 3214 struct i40e_hw *hw = &pf->hw; 3215 struct i40e_vf *vf; 3216 int ret = 0; 3217 3218 /* validate the request */ 3219 if (vf_id >= pf->num_alloc_vfs) { 3220 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3221 ret = -EINVAL; 3222 goto out; 3223 } 3224 3225 vf = &(pf->vf[vf_id]); 3226 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { 3227 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3228 vf_id); 3229 ret = -EAGAIN; 3230 goto out; 3231 } 3232 3233 if (enable == vf->spoofchk) 3234 goto out; 3235 3236 vf->spoofchk = enable; 3237 memset(&ctxt, 0, sizeof(ctxt)); 3238 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 3239 ctxt.pf_num = pf->hw.pf_id; 3240 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 3241 if (enable) 3242 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 3243 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 3244 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 3245 if (ret) { 3246 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 3247 ret); 3248 ret = -EIO; 3249 } 3250 out: 3251 return ret; 3252 } 3253 3254 /** 3255 * i40e_ndo_set_vf_trust 3256 * @netdev: network interface device structure of the pf 3257 * @vf_id: VF identifier 3258 * @setting: trust setting 3259 * 3260 * Enable or disable VF trust setting 3261 **/ 3262 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 3263 { 3264 struct i40e_netdev_priv *np = netdev_priv(netdev); 3265 struct i40e_pf *pf = np->vsi->back; 3266 struct i40e_vf *vf; 3267 int ret = 0; 3268 3269 /* validate the request */ 3270 if (vf_id >= pf->num_alloc_vfs) { 3271 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3272 return -EINVAL; 3273 } 3274 3275 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3276 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 3277 return -EINVAL; 3278 } 3279 3280 vf = &pf->vf[vf_id]; 3281 3282 if (!vf) 3283 return -EINVAL; 3284 if (setting == vf->trusted) 3285 goto out; 3286 3287 vf->trusted = setting; 3288 i40e_vc_notify_vf_reset(vf); 3289 i40e_reset_vf(vf, false); 3290 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 3291 vf_id, setting ? "" : "un"); 3292 out: 3293 return ret; 3294 } 3295