1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e.h" 28 29 /*********************notification routines***********************/ 30 31 /** 32 * i40e_vc_vf_broadcast 33 * @pf: pointer to the PF structure 34 * @opcode: operation code 35 * @retval: return value 36 * @msg: pointer to the msg buffer 37 * @msglen: msg length 38 * 39 * send a message to all VFs on a given PF 40 **/ 41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, 42 enum i40e_virtchnl_ops v_opcode, 43 i40e_status v_retval, u8 *msg, 44 u16 msglen) 45 { 46 struct i40e_hw *hw = &pf->hw; 47 struct i40e_vf *vf = pf->vf; 48 int i; 49 50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { 51 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 52 /* Not all vfs are enabled so skip the ones that are not */ 53 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 54 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 55 continue; 56 57 /* Ignore return value on purpose - a given VF may fail, but 58 * we need to keep going and send to all of them 59 */ 60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 61 msg, msglen, NULL); 62 } 63 } 64 65 /** 66 * i40e_vc_notify_vf_link_state 67 * @vf: pointer to the VF structure 68 * 69 * send a link status message to a single VF 70 **/ 71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) 72 { 73 struct i40e_virtchnl_pf_event pfe; 74 struct i40e_pf *pf = vf->pf; 75 struct i40e_hw *hw = &pf->hw; 76 struct i40e_link_status *ls = &pf->hw.phy.link_info; 77 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; 78 79 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 80 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 81 if (vf->link_forced) { 82 pfe.event_data.link_event.link_status = vf->link_up; 83 pfe.event_data.link_event.link_speed = 84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0); 85 } else { 86 pfe.event_data.link_event.link_status = 87 ls->link_info & I40E_AQ_LINK_UP; 88 pfe.event_data.link_event.link_speed = ls->link_speed; 89 } 90 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 91 0, (u8 *)&pfe, sizeof(pfe), NULL); 92 } 93 94 /** 95 * i40e_vc_notify_link_state 96 * @pf: pointer to the PF structure 97 * 98 * send a link status message to all VFs on a given PF 99 **/ 100 void i40e_vc_notify_link_state(struct i40e_pf *pf) 101 { 102 int i; 103 104 for (i = 0; i < pf->num_alloc_vfs; i++) 105 i40e_vc_notify_vf_link_state(&pf->vf[i]); 106 } 107 108 /** 109 * i40e_vc_notify_reset 110 * @pf: pointer to the PF structure 111 * 112 * indicate a pending reset to all VFs on a given PF 113 **/ 114 void i40e_vc_notify_reset(struct i40e_pf *pf) 115 { 116 struct i40e_virtchnl_pf_event pfe; 117 118 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 119 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 120 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, 121 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 122 } 123 124 /** 125 * i40e_vc_notify_vf_reset 126 * @vf: pointer to the VF structure 127 * 128 * indicate a pending reset to the given VF 129 **/ 130 void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 131 { 132 struct i40e_virtchnl_pf_event pfe; 133 int abs_vf_id; 134 135 /* validate the request */ 136 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 137 return; 138 139 /* verify if the VF is in either init or active before proceeding */ 140 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && 141 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 142 return; 143 144 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; 145 146 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 147 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 148 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 149 0, (u8 *)&pfe, 150 sizeof(struct i40e_virtchnl_pf_event), NULL); 151 } 152 /***********************misc routines*****************************/ 153 154 /** 155 * i40e_vc_disable_vf 156 * @pf: pointer to the PF info 157 * @vf: pointer to the VF info 158 * 159 * Disable the VF through a SW reset 160 **/ 161 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) 162 { 163 i40e_vc_notify_vf_reset(vf); 164 i40e_reset_vf(vf, false); 165 } 166 167 /** 168 * i40e_vc_isvalid_vsi_id 169 * @vf: pointer to the VF info 170 * @vsi_id: VF relative VSI id 171 * 172 * check for the valid VSI id 173 **/ 174 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) 175 { 176 struct i40e_pf *pf = vf->pf; 177 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 178 179 return (vsi && (vsi->vf_id == vf->vf_id)); 180 } 181 182 /** 183 * i40e_vc_isvalid_queue_id 184 * @vf: pointer to the VF info 185 * @vsi_id: vsi id 186 * @qid: vsi relative queue id 187 * 188 * check for the valid queue id 189 **/ 190 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, 191 u8 qid) 192 { 193 struct i40e_pf *pf = vf->pf; 194 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 195 196 return (vsi && (qid < vsi->alloc_queue_pairs)); 197 } 198 199 /** 200 * i40e_vc_isvalid_vector_id 201 * @vf: pointer to the VF info 202 * @vector_id: VF relative vector id 203 * 204 * check for the valid vector id 205 **/ 206 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) 207 { 208 struct i40e_pf *pf = vf->pf; 209 210 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 211 } 212 213 /***********************vf resource mgmt routines*****************/ 214 215 /** 216 * i40e_vc_get_pf_queue_id 217 * @vf: pointer to the VF info 218 * @vsi_id: id of VSI as provided by the FW 219 * @vsi_queue_id: vsi relative queue id 220 * 221 * return PF relative queue id 222 **/ 223 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, 224 u8 vsi_queue_id) 225 { 226 struct i40e_pf *pf = vf->pf; 227 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); 228 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 229 230 if (!vsi) 231 return pf_queue_id; 232 233 if (le16_to_cpu(vsi->info.mapping_flags) & 234 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 235 pf_queue_id = 236 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); 237 else 238 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + 239 vsi_queue_id; 240 241 return pf_queue_id; 242 } 243 244 /** 245 * i40e_config_irq_link_list 246 * @vf: pointer to the VF info 247 * @vsi_id: id of VSI as given by the FW 248 * @vecmap: irq map info 249 * 250 * configure irq link list from the map 251 **/ 252 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, 253 struct i40e_virtchnl_vector_map *vecmap) 254 { 255 unsigned long linklistmap = 0, tempmap; 256 struct i40e_pf *pf = vf->pf; 257 struct i40e_hw *hw = &pf->hw; 258 u16 vsi_queue_id, pf_queue_id; 259 enum i40e_queue_type qtype; 260 u16 next_q, vector_id; 261 u32 reg, reg_idx; 262 u16 itr_idx = 0; 263 264 vector_id = vecmap->vector_id; 265 /* setup the head */ 266 if (0 == vector_id) 267 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 268 else 269 reg_idx = I40E_VPINT_LNKLSTN( 270 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 271 (vector_id - 1)); 272 273 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 274 /* Special case - No queues mapped on this vector */ 275 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); 276 goto irq_list_done; 277 } 278 tempmap = vecmap->rxq_map; 279 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 280 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 281 vsi_queue_id)); 282 } 283 284 tempmap = vecmap->txq_map; 285 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 286 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * 287 vsi_queue_id + 1)); 288 } 289 290 next_q = find_first_bit(&linklistmap, 291 (I40E_MAX_VSI_QP * 292 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 293 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 294 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 295 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 296 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 297 298 wr32(hw, reg_idx, reg); 299 300 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 301 switch (qtype) { 302 case I40E_QUEUE_TYPE_RX: 303 reg_idx = I40E_QINT_RQCTL(pf_queue_id); 304 itr_idx = vecmap->rxitr_idx; 305 break; 306 case I40E_QUEUE_TYPE_TX: 307 reg_idx = I40E_QINT_TQCTL(pf_queue_id); 308 itr_idx = vecmap->txitr_idx; 309 break; 310 default: 311 break; 312 } 313 314 next_q = find_next_bit(&linklistmap, 315 (I40E_MAX_VSI_QP * 316 I40E_VIRTCHNL_SUPPORTED_QTYPES), 317 next_q + 1); 318 if (next_q < 319 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 322 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 323 vsi_queue_id); 324 } else { 325 pf_queue_id = I40E_QUEUE_END_OF_LIST; 326 qtype = 0; 327 } 328 329 /* format for the RQCTL & TQCTL regs is same */ 330 reg = (vector_id) | 331 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | 332 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 333 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | 334 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); 335 wr32(hw, reg_idx, reg); 336 } 337 338 /* if the vf is running in polling mode and using interrupt zero, 339 * need to disable auto-mask on enabling zero interrupt for VFs. 340 */ 341 if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) && 342 (vector_id == 0)) { 343 reg = rd32(hw, I40E_GLINT_CTL); 344 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { 345 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; 346 wr32(hw, I40E_GLINT_CTL, reg); 347 } 348 } 349 350 irq_list_done: 351 i40e_flush(hw); 352 } 353 354 /** 355 * i40e_release_iwarp_qvlist 356 * @vf: pointer to the VF. 357 * 358 **/ 359 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) 360 { 361 struct i40e_pf *pf = vf->pf; 362 struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; 363 u32 msix_vf; 364 u32 i; 365 366 if (!vf->qvlist_info) 367 return; 368 369 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 370 for (i = 0; i < qvlist_info->num_vectors; i++) { 371 struct i40e_virtchnl_iwarp_qv_info *qv_info; 372 u32 next_q_index, next_q_type; 373 struct i40e_hw *hw = &pf->hw; 374 u32 v_idx, reg_idx, reg; 375 376 qv_info = &qvlist_info->qv_info[i]; 377 if (!qv_info) 378 continue; 379 v_idx = qv_info->v_idx; 380 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 381 /* Figure out the queue after CEQ and make that the 382 * first queue. 383 */ 384 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 385 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); 386 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) 387 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; 388 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) 389 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; 390 391 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 392 reg = (next_q_index & 393 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 394 (next_q_type << 395 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 396 397 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 398 } 399 } 400 kfree(vf->qvlist_info); 401 vf->qvlist_info = NULL; 402 } 403 404 /** 405 * i40e_config_iwarp_qvlist 406 * @vf: pointer to the VF info 407 * @qvlist_info: queue and vector list 408 * 409 * Return 0 on success or < 0 on error 410 **/ 411 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, 412 struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info) 413 { 414 struct i40e_pf *pf = vf->pf; 415 struct i40e_hw *hw = &pf->hw; 416 struct i40e_virtchnl_iwarp_qv_info *qv_info; 417 u32 v_idx, i, reg_idx, reg; 418 u32 next_q_idx, next_q_type; 419 u32 msix_vf, size; 420 421 size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + 422 (sizeof(struct i40e_virtchnl_iwarp_qv_info) * 423 (qvlist_info->num_vectors - 1)); 424 vf->qvlist_info = kzalloc(size, GFP_KERNEL); 425 vf->qvlist_info->num_vectors = qvlist_info->num_vectors; 426 427 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 428 for (i = 0; i < qvlist_info->num_vectors; i++) { 429 qv_info = &qvlist_info->qv_info[i]; 430 if (!qv_info) 431 continue; 432 v_idx = qv_info->v_idx; 433 434 /* Validate vector id belongs to this vf */ 435 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) 436 goto err; 437 438 vf->qvlist_info->qv_info[i] = *qv_info; 439 440 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 441 /* We might be sharing the interrupt, so get the first queue 442 * index and type, push it down the list by adding the new 443 * queue on top. Also link it with the new queue in CEQCTL. 444 */ 445 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); 446 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> 447 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); 448 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> 449 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 450 451 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { 452 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; 453 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | 454 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | 455 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | 456 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | 457 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); 458 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); 459 460 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); 461 reg = (qv_info->ceq_idx & 462 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | 463 (I40E_QUEUE_TYPE_PE_CEQ << 464 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); 465 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); 466 } 467 468 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { 469 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | 470 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | 471 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); 472 473 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); 474 } 475 } 476 477 return 0; 478 err: 479 kfree(vf->qvlist_info); 480 vf->qvlist_info = NULL; 481 return -EINVAL; 482 } 483 484 /** 485 * i40e_config_vsi_tx_queue 486 * @vf: pointer to the VF info 487 * @vsi_id: id of VSI as provided by the FW 488 * @vsi_queue_id: vsi relative queue index 489 * @info: config. info 490 * 491 * configure tx queue 492 **/ 493 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, 494 u16 vsi_queue_id, 495 struct i40e_virtchnl_txq_info *info) 496 { 497 struct i40e_pf *pf = vf->pf; 498 struct i40e_hw *hw = &pf->hw; 499 struct i40e_hmc_obj_txq tx_ctx; 500 struct i40e_vsi *vsi; 501 u16 pf_queue_id; 502 u32 qtx_ctl; 503 int ret = 0; 504 505 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 506 vsi = i40e_find_vsi_from_id(pf, vsi_id); 507 508 /* clear the context structure first */ 509 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 510 511 /* only set the required fields */ 512 tx_ctx.base = info->dma_ring_addr / 128; 513 tx_ctx.qlen = info->ring_len; 514 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); 515 tx_ctx.rdylist_act = 0; 516 tx_ctx.head_wb_ena = info->headwb_enabled; 517 tx_ctx.head_wb_addr = info->dma_headwb_addr; 518 519 /* clear the context in the HMC */ 520 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 521 if (ret) { 522 dev_err(&pf->pdev->dev, 523 "Failed to clear VF LAN Tx queue context %d, error: %d\n", 524 pf_queue_id, ret); 525 ret = -ENOENT; 526 goto error_context; 527 } 528 529 /* set the context in the HMC */ 530 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); 531 if (ret) { 532 dev_err(&pf->pdev->dev, 533 "Failed to set VF LAN Tx queue context %d error: %d\n", 534 pf_queue_id, ret); 535 ret = -ENOENT; 536 goto error_context; 537 } 538 539 /* associate this queue with the PCI VF function */ 540 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 541 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) 542 & I40E_QTX_CTL_PF_INDX_MASK); 543 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 544 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 545 & I40E_QTX_CTL_VFVM_INDX_MASK); 546 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); 547 i40e_flush(hw); 548 549 error_context: 550 return ret; 551 } 552 553 /** 554 * i40e_config_vsi_rx_queue 555 * @vf: pointer to the VF info 556 * @vsi_id: id of VSI as provided by the FW 557 * @vsi_queue_id: vsi relative queue index 558 * @info: config. info 559 * 560 * configure rx queue 561 **/ 562 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, 563 u16 vsi_queue_id, 564 struct i40e_virtchnl_rxq_info *info) 565 { 566 struct i40e_pf *pf = vf->pf; 567 struct i40e_hw *hw = &pf->hw; 568 struct i40e_hmc_obj_rxq rx_ctx; 569 u16 pf_queue_id; 570 int ret = 0; 571 572 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 573 574 /* clear the context structure first */ 575 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 576 577 /* only set the required fields */ 578 rx_ctx.base = info->dma_ring_addr / 128; 579 rx_ctx.qlen = info->ring_len; 580 581 if (info->splithdr_enabled) { 582 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 583 I40E_RX_SPLIT_IP | 584 I40E_RX_SPLIT_TCP_UDP | 585 I40E_RX_SPLIT_SCTP; 586 /* header length validation */ 587 if (info->hdr_size > ((2 * 1024) - 64)) { 588 ret = -EINVAL; 589 goto error_param; 590 } 591 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; 592 593 /* set split mode 10b */ 594 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; 595 } 596 597 /* databuffer length validation */ 598 if (info->databuffer_size > ((16 * 1024) - 128)) { 599 ret = -EINVAL; 600 goto error_param; 601 } 602 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 603 604 /* max pkt. length validation */ 605 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { 606 ret = -EINVAL; 607 goto error_param; 608 } 609 rx_ctx.rxmax = info->max_pkt_size; 610 611 /* enable 32bytes desc always */ 612 rx_ctx.dsize = 1; 613 614 /* default values */ 615 rx_ctx.lrxqthresh = 2; 616 rx_ctx.crcstrip = 1; 617 rx_ctx.prefena = 1; 618 rx_ctx.l2tsel = 1; 619 620 /* clear the context in the HMC */ 621 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 622 if (ret) { 623 dev_err(&pf->pdev->dev, 624 "Failed to clear VF LAN Rx queue context %d, error: %d\n", 625 pf_queue_id, ret); 626 ret = -ENOENT; 627 goto error_param; 628 } 629 630 /* set the context in the HMC */ 631 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); 632 if (ret) { 633 dev_err(&pf->pdev->dev, 634 "Failed to set VF LAN Rx queue context %d error: %d\n", 635 pf_queue_id, ret); 636 ret = -ENOENT; 637 goto error_param; 638 } 639 640 error_param: 641 return ret; 642 } 643 644 /** 645 * i40e_alloc_vsi_res 646 * @vf: pointer to the VF info 647 * @type: type of VSI to allocate 648 * 649 * alloc VF vsi context & resources 650 **/ 651 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 652 { 653 struct i40e_mac_filter *f = NULL; 654 struct i40e_pf *pf = vf->pf; 655 struct i40e_vsi *vsi; 656 int ret = 0; 657 658 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 659 660 if (!vsi) { 661 dev_err(&pf->pdev->dev, 662 "add vsi failed for VF %d, aq_err %d\n", 663 vf->vf_id, pf->hw.aq.asq_last_status); 664 ret = -ENOENT; 665 goto error_alloc_vsi_res; 666 } 667 if (type == I40E_VSI_SRIOV) { 668 vf->lan_vsi_idx = vsi->idx; 669 vf->lan_vsi_id = vsi->id; 670 /* If the port VLAN has been configured and then the 671 * VF driver was removed then the VSI port VLAN 672 * configuration was destroyed. Check if there is 673 * a port VLAN and restore the VSI configuration if 674 * needed. 675 */ 676 if (vf->port_vlan_id) 677 i40e_vsi_add_pvid(vsi, vf->port_vlan_id); 678 679 spin_lock_bh(&vsi->mac_filter_list_lock); 680 if (is_valid_ether_addr(vf->default_lan_addr.addr)) { 681 f = i40e_add_filter(vsi, vf->default_lan_addr.addr, 682 vf->port_vlan_id ? vf->port_vlan_id : -1, 683 true, false); 684 if (!f) 685 dev_info(&pf->pdev->dev, 686 "Could not add MAC filter %pM for VF %d\n", 687 vf->default_lan_addr.addr, vf->vf_id); 688 } 689 spin_unlock_bh(&vsi->mac_filter_list_lock); 690 } 691 692 /* program mac filter */ 693 ret = i40e_sync_vsi_filters(vsi); 694 if (ret) 695 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 696 697 /* Set VF bandwidth if specified */ 698 if (vf->tx_rate) { 699 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 700 vf->tx_rate / 50, 0, NULL); 701 if (ret) 702 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 703 vf->vf_id, ret); 704 } 705 706 error_alloc_vsi_res: 707 return ret; 708 } 709 710 /** 711 * i40e_enable_vf_mappings 712 * @vf: pointer to the VF info 713 * 714 * enable VF mappings 715 **/ 716 static void i40e_enable_vf_mappings(struct i40e_vf *vf) 717 { 718 struct i40e_pf *pf = vf->pf; 719 struct i40e_hw *hw = &pf->hw; 720 u32 reg, total_queue_pairs = 0; 721 int j; 722 723 /* Tell the hardware we're using noncontiguous mapping. HW requires 724 * that VF queues be mapped using this method, even when they are 725 * contiguous in real life 726 */ 727 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), 728 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); 729 730 /* enable VF vplan_qtable mappings */ 731 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 732 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 733 734 /* map PF queues to VF queues */ 735 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 736 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 737 738 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 739 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 740 total_queue_pairs++; 741 } 742 743 /* map PF queues to VSI */ 744 for (j = 0; j < 7; j++) { 745 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 746 reg = 0x07FF07FF; /* unused */ 747 } else { 748 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 749 j * 2); 750 reg = qid; 751 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 752 (j * 2) + 1); 753 reg |= qid << 16; 754 } 755 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), 756 reg); 757 } 758 759 i40e_flush(hw); 760 } 761 762 /** 763 * i40e_disable_vf_mappings 764 * @vf: pointer to the VF info 765 * 766 * disable VF mappings 767 **/ 768 static void i40e_disable_vf_mappings(struct i40e_vf *vf) 769 { 770 struct i40e_pf *pf = vf->pf; 771 struct i40e_hw *hw = &pf->hw; 772 int i; 773 774 /* disable qp mappings */ 775 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); 776 for (i = 0; i < I40E_MAX_VSI_QP; i++) 777 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), 778 I40E_QUEUE_END_OF_LIST); 779 i40e_flush(hw); 780 } 781 782 /** 783 * i40e_free_vf_res 784 * @vf: pointer to the VF info 785 * 786 * free VF resources 787 **/ 788 static void i40e_free_vf_res(struct i40e_vf *vf) 789 { 790 struct i40e_pf *pf = vf->pf; 791 struct i40e_hw *hw = &pf->hw; 792 u32 reg_idx, reg; 793 int i, msix_vf; 794 795 /* free vsi & disconnect it from the parent uplink */ 796 if (vf->lan_vsi_idx) { 797 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); 798 vf->lan_vsi_idx = 0; 799 vf->lan_vsi_id = 0; 800 } 801 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 802 803 /* disable interrupts so the VF starts in a known state */ 804 for (i = 0; i < msix_vf; i++) { 805 /* format is same for both registers */ 806 if (0 == i) 807 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); 808 else 809 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * 810 (vf->vf_id)) 811 + (i - 1)); 812 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); 813 i40e_flush(hw); 814 } 815 816 /* clear the irq settings */ 817 for (i = 0; i < msix_vf; i++) { 818 /* format is same for both registers */ 819 if (0 == i) 820 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 821 else 822 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * 823 (vf->vf_id)) 824 + (i - 1)); 825 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | 826 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); 827 wr32(hw, reg_idx, reg); 828 i40e_flush(hw); 829 } 830 /* reset some of the state varibles keeping 831 * track of the resources 832 */ 833 vf->num_queue_pairs = 0; 834 vf->vf_states = 0; 835 clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); 836 } 837 838 /** 839 * i40e_alloc_vf_res 840 * @vf: pointer to the VF info 841 * 842 * allocate VF resources 843 **/ 844 static int i40e_alloc_vf_res(struct i40e_vf *vf) 845 { 846 struct i40e_pf *pf = vf->pf; 847 int total_queue_pairs = 0; 848 int ret; 849 850 /* allocate hw vsi context & associated resources */ 851 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 852 if (ret) 853 goto error_alloc; 854 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 855 856 if (vf->trusted) 857 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 858 else 859 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 860 861 /* store the total qps number for the runtime 862 * VF req validation 863 */ 864 vf->num_queue_pairs = total_queue_pairs; 865 866 /* VF is now completely initialized */ 867 set_bit(I40E_VF_STAT_INIT, &vf->vf_states); 868 869 error_alloc: 870 if (ret) 871 i40e_free_vf_res(vf); 872 873 return ret; 874 } 875 876 #define VF_DEVICE_STATUS 0xAA 877 #define VF_TRANS_PENDING_MASK 0x20 878 /** 879 * i40e_quiesce_vf_pci 880 * @vf: pointer to the VF structure 881 * 882 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO 883 * if the transactions never clear. 884 **/ 885 static int i40e_quiesce_vf_pci(struct i40e_vf *vf) 886 { 887 struct i40e_pf *pf = vf->pf; 888 struct i40e_hw *hw = &pf->hw; 889 int vf_abs_id, i; 890 u32 reg; 891 892 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 893 894 wr32(hw, I40E_PF_PCI_CIAA, 895 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); 896 for (i = 0; i < 100; i++) { 897 reg = rd32(hw, I40E_PF_PCI_CIAD); 898 if ((reg & VF_TRANS_PENDING_MASK) == 0) 899 return 0; 900 udelay(1); 901 } 902 return -EIO; 903 } 904 905 /** 906 * i40e_reset_vf 907 * @vf: pointer to the VF structure 908 * @flr: VFLR was issued or not 909 * 910 * reset the VF 911 **/ 912 void i40e_reset_vf(struct i40e_vf *vf, bool flr) 913 { 914 struct i40e_pf *pf = vf->pf; 915 struct i40e_hw *hw = &pf->hw; 916 u32 reg, reg_idx, bit_idx; 917 bool rsd = false; 918 int i; 919 920 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 921 return; 922 923 /* warn the VF */ 924 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 925 926 /* In the case of a VFLR, the HW has already reset the VF and we 927 * just need to clean up, so don't hit the VFRTRIG register. 928 */ 929 if (!flr) { 930 /* reset VF using VPGEN_VFRTRIG reg */ 931 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 932 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; 933 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 934 i40e_flush(hw); 935 } 936 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 937 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 938 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 939 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 940 i40e_flush(hw); 941 942 if (i40e_quiesce_vf_pci(vf)) 943 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 944 vf->vf_id); 945 946 /* poll VPGEN_VFRSTAT reg to make sure 947 * that reset is complete 948 */ 949 for (i = 0; i < 10; i++) { 950 /* VF reset requires driver to first reset the VF and then 951 * poll the status register to make sure that the reset 952 * completed successfully. Due to internal HW FIFO flushes, 953 * we must wait 10ms before the register will be valid. 954 */ 955 usleep_range(10000, 20000); 956 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 957 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 958 rsd = true; 959 break; 960 } 961 } 962 963 if (flr) 964 usleep_range(10000, 20000); 965 966 if (!rsd) 967 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", 968 vf->vf_id); 969 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); 970 /* clear the reset bit in the VPGEN_VFRTRIG reg */ 971 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); 972 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; 973 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 974 975 /* On initial reset, we won't have any queues */ 976 if (vf->lan_vsi_idx == 0) 977 goto complete_reset; 978 979 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); 980 complete_reset: 981 /* reallocate VF resources to reset the VSI state */ 982 i40e_free_vf_res(vf); 983 if (!i40e_alloc_vf_res(vf)) { 984 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 985 i40e_enable_vf_mappings(vf); 986 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 987 clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 988 i40e_notify_client_of_vf_reset(pf, abs_vf_id); 989 } 990 /* tell the VF the reset is done */ 991 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 992 993 i40e_flush(hw); 994 clear_bit(__I40E_VF_DISABLE, &pf->state); 995 } 996 997 /** 998 * i40e_free_vfs 999 * @pf: pointer to the PF structure 1000 * 1001 * free VF resources 1002 **/ 1003 void i40e_free_vfs(struct i40e_pf *pf) 1004 { 1005 struct i40e_hw *hw = &pf->hw; 1006 u32 reg_idx, bit_idx; 1007 int i, tmp, vf_id; 1008 1009 if (!pf->vf) 1010 return; 1011 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 1012 usleep_range(1000, 2000); 1013 1014 i40e_notify_client_of_vf_enable(pf, 0); 1015 for (i = 0; i < pf->num_alloc_vfs; i++) 1016 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 1017 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], 1018 false); 1019 1020 /* Disable IOV before freeing resources. This lets any VF drivers 1021 * running in the host get themselves cleaned up before we yank 1022 * the carpet out from underneath their feet. 1023 */ 1024 if (!pci_vfs_assigned(pf->pdev)) 1025 pci_disable_sriov(pf->pdev); 1026 else 1027 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); 1028 1029 msleep(20); /* let any messages in transit get finished up */ 1030 1031 /* free up VF resources */ 1032 tmp = pf->num_alloc_vfs; 1033 pf->num_alloc_vfs = 0; 1034 for (i = 0; i < tmp; i++) { 1035 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 1036 i40e_free_vf_res(&pf->vf[i]); 1037 /* disable qp mappings */ 1038 i40e_disable_vf_mappings(&pf->vf[i]); 1039 } 1040 1041 kfree(pf->vf); 1042 pf->vf = NULL; 1043 1044 /* This check is for when the driver is unloaded while VFs are 1045 * assigned. Setting the number of VFs to 0 through sysfs is caught 1046 * before this function ever gets called. 1047 */ 1048 if (!pci_vfs_assigned(pf->pdev)) { 1049 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 1050 * work correctly when SR-IOV gets re-enabled. 1051 */ 1052 for (vf_id = 0; vf_id < tmp; vf_id++) { 1053 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 1054 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 1055 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 1056 } 1057 } 1058 clear_bit(__I40E_VF_DISABLE, &pf->state); 1059 } 1060 1061 #ifdef CONFIG_PCI_IOV 1062 /** 1063 * i40e_alloc_vfs 1064 * @pf: pointer to the PF structure 1065 * @num_alloc_vfs: number of VFs to allocate 1066 * 1067 * allocate VF resources 1068 **/ 1069 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 1070 { 1071 struct i40e_vf *vfs; 1072 int i, ret = 0; 1073 1074 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 1075 i40e_irq_dynamic_disable_icr0(pf); 1076 1077 /* Check to see if we're just allocating resources for extant VFs */ 1078 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { 1079 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 1080 if (ret) { 1081 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1082 pf->num_alloc_vfs = 0; 1083 goto err_iov; 1084 } 1085 } 1086 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); 1087 /* allocate memory */ 1088 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1089 if (!vfs) { 1090 ret = -ENOMEM; 1091 goto err_alloc; 1092 } 1093 pf->vf = vfs; 1094 1095 /* apply default profile */ 1096 for (i = 0; i < num_alloc_vfs; i++) { 1097 vfs[i].pf = pf; 1098 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; 1099 vfs[i].vf_id = i; 1100 1101 /* assign default capabilities */ 1102 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 1103 vfs[i].spoofchk = true; 1104 /* VF resources get allocated during reset */ 1105 i40e_reset_vf(&vfs[i], false); 1106 1107 } 1108 pf->num_alloc_vfs = num_alloc_vfs; 1109 1110 err_alloc: 1111 if (ret) 1112 i40e_free_vfs(pf); 1113 err_iov: 1114 /* Re-enable interrupt 0. */ 1115 i40e_irq_dynamic_enable_icr0(pf, false); 1116 return ret; 1117 } 1118 1119 #endif 1120 /** 1121 * i40e_pci_sriov_enable 1122 * @pdev: pointer to a pci_dev structure 1123 * @num_vfs: number of VFs to allocate 1124 * 1125 * Enable or change the number of VFs 1126 **/ 1127 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) 1128 { 1129 #ifdef CONFIG_PCI_IOV 1130 struct i40e_pf *pf = pci_get_drvdata(pdev); 1131 int pre_existing_vfs = pci_num_vf(pdev); 1132 int err = 0; 1133 1134 if (test_bit(__I40E_TESTING, &pf->state)) { 1135 dev_warn(&pdev->dev, 1136 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); 1137 err = -EPERM; 1138 goto err_out; 1139 } 1140 1141 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 1142 i40e_free_vfs(pf); 1143 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 1144 goto out; 1145 1146 if (num_vfs > pf->num_req_vfs) { 1147 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", 1148 num_vfs, pf->num_req_vfs); 1149 err = -EPERM; 1150 goto err_out; 1151 } 1152 1153 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 1154 err = i40e_alloc_vfs(pf, num_vfs); 1155 if (err) { 1156 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); 1157 goto err_out; 1158 } 1159 1160 out: 1161 return num_vfs; 1162 1163 err_out: 1164 return err; 1165 #endif 1166 return 0; 1167 } 1168 1169 /** 1170 * i40e_pci_sriov_configure 1171 * @pdev: pointer to a pci_dev structure 1172 * @num_vfs: number of VFs to allocate 1173 * 1174 * Enable or change the number of VFs. Called when the user updates the number 1175 * of VFs in sysfs. 1176 **/ 1177 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1178 { 1179 struct i40e_pf *pf = pci_get_drvdata(pdev); 1180 1181 if (num_vfs) { 1182 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 1183 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 1184 i40e_do_reset_safe(pf, 1185 BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1186 } 1187 return i40e_pci_sriov_enable(pdev, num_vfs); 1188 } 1189 1190 if (!pci_vfs_assigned(pf->pdev)) { 1191 i40e_free_vfs(pf); 1192 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 1193 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 1194 } else { 1195 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1196 return -EINVAL; 1197 } 1198 return 0; 1199 } 1200 1201 /***********************virtual channel routines******************/ 1202 1203 /** 1204 * i40e_vc_send_msg_to_vf 1205 * @vf: pointer to the VF info 1206 * @v_opcode: virtual channel opcode 1207 * @v_retval: virtual channel return value 1208 * @msg: pointer to the msg buffer 1209 * @msglen: msg length 1210 * 1211 * send msg to VF 1212 **/ 1213 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1214 u32 v_retval, u8 *msg, u16 msglen) 1215 { 1216 struct i40e_pf *pf; 1217 struct i40e_hw *hw; 1218 int abs_vf_id; 1219 i40e_status aq_ret; 1220 1221 /* validate the request */ 1222 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) 1223 return -EINVAL; 1224 1225 pf = vf->pf; 1226 hw = &pf->hw; 1227 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1228 1229 /* single place to detect unsuccessful return values */ 1230 if (v_retval) { 1231 vf->num_invalid_msgs++; 1232 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", 1233 vf->vf_id, v_opcode, v_retval); 1234 if (vf->num_invalid_msgs > 1235 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1236 dev_err(&pf->pdev->dev, 1237 "Number of invalid messages exceeded for VF %d\n", 1238 vf->vf_id); 1239 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); 1240 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 1241 } 1242 } else { 1243 vf->num_valid_msgs++; 1244 /* reset the invalid counter, if a valid message is received. */ 1245 vf->num_invalid_msgs = 0; 1246 } 1247 1248 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1249 msg, msglen, NULL); 1250 if (aq_ret) { 1251 dev_info(&pf->pdev->dev, 1252 "Unable to send the message to VF %d aq_err %d\n", 1253 vf->vf_id, pf->hw.aq.asq_last_status); 1254 return -EIO; 1255 } 1256 1257 return 0; 1258 } 1259 1260 /** 1261 * i40e_vc_send_resp_to_vf 1262 * @vf: pointer to the VF info 1263 * @opcode: operation code 1264 * @retval: return value 1265 * 1266 * send resp msg to VF 1267 **/ 1268 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, 1269 enum i40e_virtchnl_ops opcode, 1270 i40e_status retval) 1271 { 1272 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); 1273 } 1274 1275 /** 1276 * i40e_vc_get_version_msg 1277 * @vf: pointer to the VF info 1278 * 1279 * called from the VF to request the API version used by the PF 1280 **/ 1281 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) 1282 { 1283 struct i40e_virtchnl_version_info info = { 1284 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR 1285 }; 1286 1287 vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; 1288 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 1289 if (VF_IS_V10(vf)) 1290 info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 1291 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, 1292 I40E_SUCCESS, (u8 *)&info, 1293 sizeof(struct 1294 i40e_virtchnl_version_info)); 1295 } 1296 1297 /** 1298 * i40e_vc_get_vf_resources_msg 1299 * @vf: pointer to the VF info 1300 * @msg: pointer to the msg buffer 1301 * @msglen: msg length 1302 * 1303 * called from the VF to request its resources 1304 **/ 1305 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) 1306 { 1307 struct i40e_virtchnl_vf_resource *vfres = NULL; 1308 struct i40e_pf *pf = vf->pf; 1309 i40e_status aq_ret = 0; 1310 struct i40e_vsi *vsi; 1311 int num_vsis = 1; 1312 int len = 0; 1313 int ret; 1314 1315 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1316 aq_ret = I40E_ERR_PARAM; 1317 goto err; 1318 } 1319 1320 len = (sizeof(struct i40e_virtchnl_vf_resource) + 1321 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); 1322 1323 vfres = kzalloc(len, GFP_KERNEL); 1324 if (!vfres) { 1325 aq_ret = I40E_ERR_NO_MEMORY; 1326 len = 0; 1327 goto err; 1328 } 1329 if (VF_IS_V11(vf)) 1330 vf->driver_caps = *(u32 *)msg; 1331 else 1332 vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | 1333 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | 1334 I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1335 1336 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1337 vsi = pf->vsi[vf->lan_vsi_idx]; 1338 if (!vsi->info.pvid) 1339 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1340 1341 if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) && 1342 (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { 1343 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; 1344 set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); 1345 } 1346 1347 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { 1348 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF; 1349 } else { 1350 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 1351 (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)) 1352 vfres->vf_offload_flags |= 1353 I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; 1354 else 1355 vfres->vf_offload_flags |= 1356 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; 1357 } 1358 1359 if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { 1360 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1361 vfres->vf_offload_flags |= 1362 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1363 } 1364 1365 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) { 1366 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 1367 dev_err(&pf->pdev->dev, 1368 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", 1369 vf->vf_id); 1370 ret = I40E_ERR_PARAM; 1371 goto err; 1372 } 1373 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1374 } 1375 1376 if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { 1377 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1378 vfres->vf_offload_flags |= 1379 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 1380 } 1381 1382 vfres->num_vsis = num_vsis; 1383 vfres->num_queue_pairs = vf->num_queue_pairs; 1384 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1385 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; 1386 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; 1387 1388 if (vf->lan_vsi_idx) { 1389 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; 1390 vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV; 1391 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; 1392 /* VFs only use TC 0 */ 1393 vfres->vsi_res[0].qset_handle 1394 = le16_to_cpu(vsi->info.qs_handle[0]); 1395 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 1396 vf->default_lan_addr.addr); 1397 } 1398 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1399 1400 err: 1401 /* send the response back to the VF */ 1402 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 1403 aq_ret, (u8 *)vfres, len); 1404 1405 kfree(vfres); 1406 return ret; 1407 } 1408 1409 /** 1410 * i40e_vc_reset_vf_msg 1411 * @vf: pointer to the VF info 1412 * @msg: pointer to the msg buffer 1413 * @msglen: msg length 1414 * 1415 * called from the VF to reset itself, 1416 * unlike other virtchnl messages, PF driver 1417 * doesn't send the response back to the VF 1418 **/ 1419 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) 1420 { 1421 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) 1422 i40e_reset_vf(vf, false); 1423 } 1424 1425 /** 1426 * i40e_getnum_vf_vsi_vlan_filters 1427 * @vsi: pointer to the vsi 1428 * 1429 * called to get the number of VLANs offloaded on this VF 1430 **/ 1431 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) 1432 { 1433 struct i40e_mac_filter *f; 1434 int num_vlans = 0; 1435 1436 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1437 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) 1438 num_vlans++; 1439 } 1440 1441 return num_vlans; 1442 } 1443 1444 /** 1445 * i40e_vc_config_promiscuous_mode_msg 1446 * @vf: pointer to the VF info 1447 * @msg: pointer to the msg buffer 1448 * @msglen: msg length 1449 * 1450 * called from the VF to configure the promiscuous mode of 1451 * VF vsis 1452 **/ 1453 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, 1454 u8 *msg, u16 msglen) 1455 { 1456 struct i40e_virtchnl_promisc_info *info = 1457 (struct i40e_virtchnl_promisc_info *)msg; 1458 struct i40e_pf *pf = vf->pf; 1459 struct i40e_hw *hw = &pf->hw; 1460 struct i40e_mac_filter *f; 1461 i40e_status aq_ret = 0; 1462 bool allmulti = false; 1463 struct i40e_vsi *vsi; 1464 bool alluni = false; 1465 int aq_err = 0; 1466 1467 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); 1468 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1469 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { 1470 aq_ret = I40E_ERR_PARAM; 1471 goto error_param; 1472 } 1473 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 1474 dev_err(&pf->pdev->dev, 1475 "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1476 vf->vf_id); 1477 /* Lie to the VF on purpose. */ 1478 aq_ret = 0; 1479 goto error_param; 1480 } 1481 /* Multicast promiscuous handling*/ 1482 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1483 allmulti = true; 1484 1485 if (vf->port_vlan_id) { 1486 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, 1487 allmulti, 1488 vf->port_vlan_id, 1489 NULL); 1490 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1491 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1492 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) 1493 continue; 1494 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, 1495 vsi->seid, 1496 allmulti, 1497 f->vlan, 1498 NULL); 1499 aq_err = pf->hw.aq.asq_last_status; 1500 if (aq_ret) { 1501 dev_err(&pf->pdev->dev, 1502 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", 1503 f->vlan, 1504 i40e_stat_str(&pf->hw, aq_ret), 1505 i40e_aq_str(&pf->hw, aq_err)); 1506 break; 1507 } 1508 } 1509 } else { 1510 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1511 allmulti, NULL); 1512 aq_err = pf->hw.aq.asq_last_status; 1513 if (aq_ret) { 1514 dev_err(&pf->pdev->dev, 1515 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", 1516 vf->vf_id, 1517 i40e_stat_str(&pf->hw, aq_ret), 1518 i40e_aq_str(&pf->hw, aq_err)); 1519 goto error_param_int; 1520 } 1521 } 1522 1523 if (!aq_ret) { 1524 dev_info(&pf->pdev->dev, 1525 "VF %d successfully set multicast promiscuous mode\n", 1526 vf->vf_id); 1527 if (allmulti) 1528 set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); 1529 else 1530 clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); 1531 } 1532 1533 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) 1534 alluni = true; 1535 if (vf->port_vlan_id) { 1536 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, 1537 alluni, 1538 vf->port_vlan_id, 1539 NULL); 1540 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { 1541 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1542 aq_ret = 0; 1543 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { 1544 aq_ret = 1545 i40e_aq_set_vsi_uc_promisc_on_vlan(hw, 1546 vsi->seid, 1547 alluni, 1548 f->vlan, 1549 NULL); 1550 aq_err = pf->hw.aq.asq_last_status; 1551 } 1552 if (aq_ret) 1553 dev_err(&pf->pdev->dev, 1554 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", 1555 f->vlan, 1556 i40e_stat_str(&pf->hw, aq_ret), 1557 i40e_aq_str(&pf->hw, aq_err)); 1558 } 1559 } else { 1560 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, 1561 allmulti, NULL, 1562 true); 1563 aq_err = pf->hw.aq.asq_last_status; 1564 if (aq_ret) 1565 dev_err(&pf->pdev->dev, 1566 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", 1567 vf->vf_id, info->flags, 1568 i40e_stat_str(&pf->hw, aq_ret), 1569 i40e_aq_str(&pf->hw, aq_err)); 1570 } 1571 1572 error_param_int: 1573 if (!aq_ret) { 1574 dev_info(&pf->pdev->dev, 1575 "VF %d successfully set unicast promiscuous mode\n", 1576 vf->vf_id); 1577 if (alluni) 1578 set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); 1579 else 1580 clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); 1581 } 1582 1583 error_param: 1584 /* send the response to the VF */ 1585 return i40e_vc_send_resp_to_vf(vf, 1586 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1587 aq_ret); 1588 } 1589 1590 /** 1591 * i40e_vc_config_queues_msg 1592 * @vf: pointer to the VF info 1593 * @msg: pointer to the msg buffer 1594 * @msglen: msg length 1595 * 1596 * called from the VF to configure the rx/tx 1597 * queues 1598 **/ 1599 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1600 { 1601 struct i40e_virtchnl_vsi_queue_config_info *qci = 1602 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1603 struct i40e_virtchnl_queue_pair_info *qpi; 1604 struct i40e_pf *pf = vf->pf; 1605 u16 vsi_id, vsi_queue_id; 1606 i40e_status aq_ret = 0; 1607 int i; 1608 1609 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1610 aq_ret = I40E_ERR_PARAM; 1611 goto error_param; 1612 } 1613 1614 vsi_id = qci->vsi_id; 1615 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1616 aq_ret = I40E_ERR_PARAM; 1617 goto error_param; 1618 } 1619 for (i = 0; i < qci->num_queue_pairs; i++) { 1620 qpi = &qci->qpair[i]; 1621 vsi_queue_id = qpi->txq.queue_id; 1622 if ((qpi->txq.vsi_id != vsi_id) || 1623 (qpi->rxq.vsi_id != vsi_id) || 1624 (qpi->rxq.queue_id != vsi_queue_id) || 1625 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1626 aq_ret = I40E_ERR_PARAM; 1627 goto error_param; 1628 } 1629 1630 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, 1631 &qpi->rxq) || 1632 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, 1633 &qpi->txq)) { 1634 aq_ret = I40E_ERR_PARAM; 1635 goto error_param; 1636 } 1637 } 1638 /* set vsi num_queue_pairs in use to num configured by VF */ 1639 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1640 1641 error_param: 1642 /* send the response to the VF */ 1643 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1644 aq_ret); 1645 } 1646 1647 /** 1648 * i40e_vc_config_irq_map_msg 1649 * @vf: pointer to the VF info 1650 * @msg: pointer to the msg buffer 1651 * @msglen: msg length 1652 * 1653 * called from the VF to configure the irq to 1654 * queue map 1655 **/ 1656 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1657 { 1658 struct i40e_virtchnl_irq_map_info *irqmap_info = 1659 (struct i40e_virtchnl_irq_map_info *)msg; 1660 struct i40e_virtchnl_vector_map *map; 1661 u16 vsi_id, vsi_queue_id, vector_id; 1662 i40e_status aq_ret = 0; 1663 unsigned long tempmap; 1664 int i; 1665 1666 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1667 aq_ret = I40E_ERR_PARAM; 1668 goto error_param; 1669 } 1670 1671 for (i = 0; i < irqmap_info->num_vectors; i++) { 1672 map = &irqmap_info->vecmap[i]; 1673 1674 vector_id = map->vector_id; 1675 vsi_id = map->vsi_id; 1676 /* validate msg params */ 1677 if (!i40e_vc_isvalid_vector_id(vf, vector_id) || 1678 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1679 aq_ret = I40E_ERR_PARAM; 1680 goto error_param; 1681 } 1682 1683 /* lookout for the invalid queue index */ 1684 tempmap = map->rxq_map; 1685 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1686 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1687 vsi_queue_id)) { 1688 aq_ret = I40E_ERR_PARAM; 1689 goto error_param; 1690 } 1691 } 1692 1693 tempmap = map->txq_map; 1694 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 1695 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 1696 vsi_queue_id)) { 1697 aq_ret = I40E_ERR_PARAM; 1698 goto error_param; 1699 } 1700 } 1701 1702 i40e_config_irq_link_list(vf, vsi_id, map); 1703 } 1704 error_param: 1705 /* send the response to the VF */ 1706 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 1707 aq_ret); 1708 } 1709 1710 /** 1711 * i40e_vc_enable_queues_msg 1712 * @vf: pointer to the VF info 1713 * @msg: pointer to the msg buffer 1714 * @msglen: msg length 1715 * 1716 * called from the VF to enable all or specific queue(s) 1717 **/ 1718 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1719 { 1720 struct i40e_virtchnl_queue_select *vqs = 1721 (struct i40e_virtchnl_queue_select *)msg; 1722 struct i40e_pf *pf = vf->pf; 1723 u16 vsi_id = vqs->vsi_id; 1724 i40e_status aq_ret = 0; 1725 1726 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1727 aq_ret = I40E_ERR_PARAM; 1728 goto error_param; 1729 } 1730 1731 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1732 aq_ret = I40E_ERR_PARAM; 1733 goto error_param; 1734 } 1735 1736 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1737 aq_ret = I40E_ERR_PARAM; 1738 goto error_param; 1739 } 1740 1741 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) 1742 aq_ret = I40E_ERR_TIMEOUT; 1743 error_param: 1744 /* send the response to the VF */ 1745 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, 1746 aq_ret); 1747 } 1748 1749 /** 1750 * i40e_vc_disable_queues_msg 1751 * @vf: pointer to the VF info 1752 * @msg: pointer to the msg buffer 1753 * @msglen: msg length 1754 * 1755 * called from the VF to disable all or specific 1756 * queue(s) 1757 **/ 1758 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1759 { 1760 struct i40e_virtchnl_queue_select *vqs = 1761 (struct i40e_virtchnl_queue_select *)msg; 1762 struct i40e_pf *pf = vf->pf; 1763 i40e_status aq_ret = 0; 1764 1765 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1766 aq_ret = I40E_ERR_PARAM; 1767 goto error_param; 1768 } 1769 1770 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1771 aq_ret = I40E_ERR_PARAM; 1772 goto error_param; 1773 } 1774 1775 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { 1776 aq_ret = I40E_ERR_PARAM; 1777 goto error_param; 1778 } 1779 1780 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) 1781 aq_ret = I40E_ERR_TIMEOUT; 1782 1783 error_param: 1784 /* send the response to the VF */ 1785 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, 1786 aq_ret); 1787 } 1788 1789 /** 1790 * i40e_vc_get_stats_msg 1791 * @vf: pointer to the VF info 1792 * @msg: pointer to the msg buffer 1793 * @msglen: msg length 1794 * 1795 * called from the VF to get vsi stats 1796 **/ 1797 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1798 { 1799 struct i40e_virtchnl_queue_select *vqs = 1800 (struct i40e_virtchnl_queue_select *)msg; 1801 struct i40e_pf *pf = vf->pf; 1802 struct i40e_eth_stats stats; 1803 i40e_status aq_ret = 0; 1804 struct i40e_vsi *vsi; 1805 1806 memset(&stats, 0, sizeof(struct i40e_eth_stats)); 1807 1808 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1809 aq_ret = I40E_ERR_PARAM; 1810 goto error_param; 1811 } 1812 1813 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1814 aq_ret = I40E_ERR_PARAM; 1815 goto error_param; 1816 } 1817 1818 vsi = pf->vsi[vf->lan_vsi_idx]; 1819 if (!vsi) { 1820 aq_ret = I40E_ERR_PARAM; 1821 goto error_param; 1822 } 1823 i40e_update_eth_stats(vsi); 1824 stats = vsi->eth_stats; 1825 1826 error_param: 1827 /* send the response back to the VF */ 1828 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, 1829 (u8 *)&stats, sizeof(stats)); 1830 } 1831 1832 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */ 1833 #define I40E_VC_MAX_MAC_ADDR_PER_VF 8 1834 #define I40E_VC_MAX_VLAN_PER_VF 8 1835 1836 /** 1837 * i40e_check_vf_permission 1838 * @vf: pointer to the VF info 1839 * @macaddr: pointer to the MAC Address being checked 1840 * 1841 * Check if the VF has permission to add or delete unicast MAC address 1842 * filters and return error code -EPERM if not. Then check if the 1843 * address filter requested is broadcast or zero and if so return 1844 * an invalid MAC address error code. 1845 **/ 1846 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) 1847 { 1848 struct i40e_pf *pf = vf->pf; 1849 int ret = 0; 1850 1851 if (is_broadcast_ether_addr(macaddr) || 1852 is_zero_ether_addr(macaddr)) { 1853 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); 1854 ret = I40E_ERR_INVALID_MAC_ADDR; 1855 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && 1856 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && 1857 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { 1858 /* If the host VMM administrator has set the VF MAC address 1859 * administratively via the ndo_set_vf_mac command then deny 1860 * permission to the VF to add or delete unicast MAC addresses. 1861 * Unless the VF is privileged and then it can do whatever. 1862 * The VF may request to set the MAC address filter already 1863 * assigned to it so do not return an error in that case. 1864 */ 1865 dev_err(&pf->pdev->dev, 1866 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); 1867 ret = -EPERM; 1868 } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) && 1869 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 1870 dev_err(&pf->pdev->dev, 1871 "VF is not trusted, switch the VF to trusted to add more functionality\n"); 1872 ret = -EPERM; 1873 } 1874 return ret; 1875 } 1876 1877 /** 1878 * i40e_vc_add_mac_addr_msg 1879 * @vf: pointer to the VF info 1880 * @msg: pointer to the msg buffer 1881 * @msglen: msg length 1882 * 1883 * add guest mac address filter 1884 **/ 1885 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1886 { 1887 struct i40e_virtchnl_ether_addr_list *al = 1888 (struct i40e_virtchnl_ether_addr_list *)msg; 1889 struct i40e_pf *pf = vf->pf; 1890 struct i40e_vsi *vsi = NULL; 1891 u16 vsi_id = al->vsi_id; 1892 i40e_status ret = 0; 1893 int i; 1894 1895 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1896 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1897 ret = I40E_ERR_PARAM; 1898 goto error_param; 1899 } 1900 1901 for (i = 0; i < al->num_elements; i++) { 1902 ret = i40e_check_vf_permission(vf, al->list[i].addr); 1903 if (ret) 1904 goto error_param; 1905 } 1906 vsi = pf->vsi[vf->lan_vsi_idx]; 1907 1908 /* Lock once, because all function inside for loop accesses VSI's 1909 * MAC filter list which needs to be protected using same lock. 1910 */ 1911 spin_lock_bh(&vsi->mac_filter_list_lock); 1912 1913 /* add new addresses to the list */ 1914 for (i = 0; i < al->num_elements; i++) { 1915 struct i40e_mac_filter *f; 1916 1917 f = i40e_find_mac(vsi, al->list[i].addr, true, false); 1918 if (!f) { 1919 if (i40e_is_vsi_in_vlan(vsi)) 1920 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, 1921 true, false); 1922 else 1923 f = i40e_add_filter(vsi, al->list[i].addr, -1, 1924 true, false); 1925 } 1926 1927 if (!f) { 1928 dev_err(&pf->pdev->dev, 1929 "Unable to add MAC filter %pM for VF %d\n", 1930 al->list[i].addr, vf->vf_id); 1931 ret = I40E_ERR_PARAM; 1932 spin_unlock_bh(&vsi->mac_filter_list_lock); 1933 goto error_param; 1934 } else { 1935 vf->num_mac++; 1936 } 1937 } 1938 spin_unlock_bh(&vsi->mac_filter_list_lock); 1939 1940 /* program the updated filter list */ 1941 ret = i40e_sync_vsi_filters(vsi); 1942 if (ret) 1943 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 1944 vf->vf_id, ret); 1945 1946 error_param: 1947 /* send the response to the VF */ 1948 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 1949 ret); 1950 } 1951 1952 /** 1953 * i40e_vc_del_mac_addr_msg 1954 * @vf: pointer to the VF info 1955 * @msg: pointer to the msg buffer 1956 * @msglen: msg length 1957 * 1958 * remove guest mac address filter 1959 **/ 1960 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 1961 { 1962 struct i40e_virtchnl_ether_addr_list *al = 1963 (struct i40e_virtchnl_ether_addr_list *)msg; 1964 struct i40e_pf *pf = vf->pf; 1965 struct i40e_vsi *vsi = NULL; 1966 u16 vsi_id = al->vsi_id; 1967 i40e_status ret = 0; 1968 int i; 1969 1970 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1971 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 1972 ret = I40E_ERR_PARAM; 1973 goto error_param; 1974 } 1975 1976 for (i = 0; i < al->num_elements; i++) { 1977 if (is_broadcast_ether_addr(al->list[i].addr) || 1978 is_zero_ether_addr(al->list[i].addr)) { 1979 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", 1980 al->list[i].addr, vf->vf_id); 1981 ret = I40E_ERR_INVALID_MAC_ADDR; 1982 goto error_param; 1983 } 1984 } 1985 vsi = pf->vsi[vf->lan_vsi_idx]; 1986 1987 spin_lock_bh(&vsi->mac_filter_list_lock); 1988 /* delete addresses from the list */ 1989 for (i = 0; i < al->num_elements; i++) 1990 if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) { 1991 ret = I40E_ERR_INVALID_MAC_ADDR; 1992 spin_unlock_bh(&vsi->mac_filter_list_lock); 1993 goto error_param; 1994 } else { 1995 vf->num_mac--; 1996 } 1997 1998 spin_unlock_bh(&vsi->mac_filter_list_lock); 1999 2000 /* program the updated filter list */ 2001 ret = i40e_sync_vsi_filters(vsi); 2002 if (ret) 2003 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", 2004 vf->vf_id, ret); 2005 2006 error_param: 2007 /* send the response to the VF */ 2008 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 2009 ret); 2010 } 2011 2012 /** 2013 * i40e_vc_add_vlan_msg 2014 * @vf: pointer to the VF info 2015 * @msg: pointer to the msg buffer 2016 * @msglen: msg length 2017 * 2018 * program guest vlan id 2019 **/ 2020 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2021 { 2022 struct i40e_virtchnl_vlan_filter_list *vfl = 2023 (struct i40e_virtchnl_vlan_filter_list *)msg; 2024 struct i40e_pf *pf = vf->pf; 2025 struct i40e_vsi *vsi = NULL; 2026 u16 vsi_id = vfl->vsi_id; 2027 i40e_status aq_ret = 0; 2028 int i; 2029 2030 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && 2031 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2032 dev_err(&pf->pdev->dev, 2033 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); 2034 goto error_param; 2035 } 2036 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 2037 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2038 aq_ret = I40E_ERR_PARAM; 2039 goto error_param; 2040 } 2041 2042 for (i = 0; i < vfl->num_elements; i++) { 2043 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2044 aq_ret = I40E_ERR_PARAM; 2045 dev_err(&pf->pdev->dev, 2046 "invalid VF VLAN id %d\n", vfl->vlan_id[i]); 2047 goto error_param; 2048 } 2049 } 2050 vsi = pf->vsi[vf->lan_vsi_idx]; 2051 if (vsi->info.pvid) { 2052 aq_ret = I40E_ERR_PARAM; 2053 goto error_param; 2054 } 2055 2056 i40e_vlan_stripping_enable(vsi); 2057 for (i = 0; i < vfl->num_elements; i++) { 2058 /* add new VLAN filter */ 2059 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); 2060 if (!ret) 2061 vf->num_vlan++; 2062 2063 if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) 2064 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2065 true, 2066 vfl->vlan_id[i], 2067 NULL); 2068 if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) 2069 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2070 true, 2071 vfl->vlan_id[i], 2072 NULL); 2073 2074 if (ret) 2075 dev_err(&pf->pdev->dev, 2076 "Unable to add VLAN filter %d for VF %d, error %d\n", 2077 vfl->vlan_id[i], vf->vf_id, ret); 2078 } 2079 2080 error_param: 2081 /* send the response to the VF */ 2082 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); 2083 } 2084 2085 /** 2086 * i40e_vc_remove_vlan_msg 2087 * @vf: pointer to the VF info 2088 * @msg: pointer to the msg buffer 2089 * @msglen: msg length 2090 * 2091 * remove programmed guest vlan id 2092 **/ 2093 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2094 { 2095 struct i40e_virtchnl_vlan_filter_list *vfl = 2096 (struct i40e_virtchnl_vlan_filter_list *)msg; 2097 struct i40e_pf *pf = vf->pf; 2098 struct i40e_vsi *vsi = NULL; 2099 u16 vsi_id = vfl->vsi_id; 2100 i40e_status aq_ret = 0; 2101 int i; 2102 2103 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 2104 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2105 aq_ret = I40E_ERR_PARAM; 2106 goto error_param; 2107 } 2108 2109 for (i = 0; i < vfl->num_elements; i++) { 2110 if (vfl->vlan_id[i] > I40E_MAX_VLANID) { 2111 aq_ret = I40E_ERR_PARAM; 2112 goto error_param; 2113 } 2114 } 2115 2116 vsi = pf->vsi[vf->lan_vsi_idx]; 2117 if (vsi->info.pvid) { 2118 aq_ret = I40E_ERR_PARAM; 2119 goto error_param; 2120 } 2121 2122 for (i = 0; i < vfl->num_elements; i++) { 2123 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); 2124 if (!ret) 2125 vf->num_vlan--; 2126 2127 if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) 2128 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, 2129 false, 2130 vfl->vlan_id[i], 2131 NULL); 2132 if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) 2133 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, 2134 false, 2135 vfl->vlan_id[i], 2136 NULL); 2137 2138 if (ret) 2139 dev_err(&pf->pdev->dev, 2140 "Unable to delete VLAN filter %d for VF %d, error %d\n", 2141 vfl->vlan_id[i], vf->vf_id, ret); 2142 } 2143 2144 error_param: 2145 /* send the response to the VF */ 2146 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); 2147 } 2148 2149 /** 2150 * i40e_vc_iwarp_msg 2151 * @vf: pointer to the VF info 2152 * @msg: pointer to the msg buffer 2153 * @msglen: msg length 2154 * 2155 * called from the VF for the iwarp msgs 2156 **/ 2157 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) 2158 { 2159 struct i40e_pf *pf = vf->pf; 2160 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; 2161 i40e_status aq_ret = 0; 2162 2163 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 2164 !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { 2165 aq_ret = I40E_ERR_PARAM; 2166 goto error_param; 2167 } 2168 2169 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, 2170 msg, msglen); 2171 2172 error_param: 2173 /* send the response to the VF */ 2174 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP, 2175 aq_ret); 2176 } 2177 2178 /** 2179 * i40e_vc_iwarp_qvmap_msg 2180 * @vf: pointer to the VF info 2181 * @msg: pointer to the msg buffer 2182 * @msglen: msg length 2183 * @config: config qvmap or release it 2184 * 2185 * called from the VF for the iwarp msgs 2186 **/ 2187 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, 2188 bool config) 2189 { 2190 struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = 2191 (struct i40e_virtchnl_iwarp_qvlist_info *)msg; 2192 i40e_status aq_ret = 0; 2193 2194 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 2195 !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { 2196 aq_ret = I40E_ERR_PARAM; 2197 goto error_param; 2198 } 2199 2200 if (config) { 2201 if (i40e_config_iwarp_qvlist(vf, qvlist_info)) 2202 aq_ret = I40E_ERR_PARAM; 2203 } else { 2204 i40e_release_iwarp_qvlist(vf); 2205 } 2206 2207 error_param: 2208 /* send the response to the VF */ 2209 return i40e_vc_send_resp_to_vf(vf, 2210 config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP : 2211 I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, 2212 aq_ret); 2213 } 2214 2215 /** 2216 * i40e_vc_config_rss_key 2217 * @vf: pointer to the VF info 2218 * @msg: pointer to the msg buffer 2219 * @msglen: msg length 2220 * 2221 * Configure the VF's RSS key 2222 **/ 2223 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) 2224 { 2225 struct i40e_virtchnl_rss_key *vrk = 2226 (struct i40e_virtchnl_rss_key *)msg; 2227 struct i40e_pf *pf = vf->pf; 2228 struct i40e_vsi *vsi = NULL; 2229 u16 vsi_id = vrk->vsi_id; 2230 i40e_status aq_ret = 0; 2231 2232 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 2233 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2234 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { 2235 aq_ret = I40E_ERR_PARAM; 2236 goto err; 2237 } 2238 2239 vsi = pf->vsi[vf->lan_vsi_idx]; 2240 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); 2241 err: 2242 /* send the response to the VF */ 2243 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, 2244 aq_ret); 2245 } 2246 2247 /** 2248 * i40e_vc_config_rss_lut 2249 * @vf: pointer to the VF info 2250 * @msg: pointer to the msg buffer 2251 * @msglen: msg length 2252 * 2253 * Configure the VF's RSS LUT 2254 **/ 2255 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) 2256 { 2257 struct i40e_virtchnl_rss_lut *vrl = 2258 (struct i40e_virtchnl_rss_lut *)msg; 2259 struct i40e_pf *pf = vf->pf; 2260 struct i40e_vsi *vsi = NULL; 2261 u16 vsi_id = vrl->vsi_id; 2262 i40e_status aq_ret = 0; 2263 2264 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 2265 !i40e_vc_isvalid_vsi_id(vf, vsi_id) || 2266 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { 2267 aq_ret = I40E_ERR_PARAM; 2268 goto err; 2269 } 2270 2271 vsi = pf->vsi[vf->lan_vsi_idx]; 2272 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); 2273 /* send the response to the VF */ 2274 err: 2275 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, 2276 aq_ret); 2277 } 2278 2279 /** 2280 * i40e_vc_get_rss_hena 2281 * @vf: pointer to the VF info 2282 * @msg: pointer to the msg buffer 2283 * @msglen: msg length 2284 * 2285 * Return the RSS HENA bits allowed by the hardware 2286 **/ 2287 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2288 { 2289 struct i40e_virtchnl_rss_hena *vrh = NULL; 2290 struct i40e_pf *pf = vf->pf; 2291 i40e_status aq_ret = 0; 2292 int len = 0; 2293 2294 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 2295 aq_ret = I40E_ERR_PARAM; 2296 goto err; 2297 } 2298 len = sizeof(struct i40e_virtchnl_rss_hena); 2299 2300 vrh = kzalloc(len, GFP_KERNEL); 2301 if (!vrh) { 2302 aq_ret = I40E_ERR_NO_MEMORY; 2303 len = 0; 2304 goto err; 2305 } 2306 vrh->hena = i40e_pf_get_default_rss_hena(pf); 2307 err: 2308 /* send the response back to the VF */ 2309 aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, 2310 aq_ret, (u8 *)vrh, len); 2311 return aq_ret; 2312 } 2313 2314 /** 2315 * i40e_vc_set_rss_hena 2316 * @vf: pointer to the VF info 2317 * @msg: pointer to the msg buffer 2318 * @msglen: msg length 2319 * 2320 * Set the RSS HENA bits for the VF 2321 **/ 2322 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) 2323 { 2324 struct i40e_virtchnl_rss_hena *vrh = 2325 (struct i40e_virtchnl_rss_hena *)msg; 2326 struct i40e_pf *pf = vf->pf; 2327 struct i40e_hw *hw = &pf->hw; 2328 i40e_status aq_ret = 0; 2329 2330 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 2331 aq_ret = I40E_ERR_PARAM; 2332 goto err; 2333 } 2334 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); 2335 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), 2336 (u32)(vrh->hena >> 32)); 2337 2338 /* send the response to the VF */ 2339 err: 2340 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA, 2341 aq_ret); 2342 } 2343 2344 /** 2345 * i40e_vc_validate_vf_msg 2346 * @vf: pointer to the VF info 2347 * @msg: pointer to the msg buffer 2348 * @msglen: msg length 2349 * @msghndl: msg handle 2350 * 2351 * validate msg 2352 **/ 2353 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, 2354 u32 v_retval, u8 *msg, u16 msglen) 2355 { 2356 bool err_msg_format = false; 2357 int valid_len = 0; 2358 2359 /* Check if VF is disabled. */ 2360 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) 2361 return I40E_ERR_PARAM; 2362 2363 /* Validate message length. */ 2364 switch (v_opcode) { 2365 case I40E_VIRTCHNL_OP_VERSION: 2366 valid_len = sizeof(struct i40e_virtchnl_version_info); 2367 break; 2368 case I40E_VIRTCHNL_OP_RESET_VF: 2369 break; 2370 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 2371 if (VF_IS_V11(vf)) 2372 valid_len = sizeof(u32); 2373 break; 2374 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: 2375 valid_len = sizeof(struct i40e_virtchnl_txq_info); 2376 break; 2377 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: 2378 valid_len = sizeof(struct i40e_virtchnl_rxq_info); 2379 break; 2380 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 2381 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); 2382 if (msglen >= valid_len) { 2383 struct i40e_virtchnl_vsi_queue_config_info *vqc = 2384 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 2385 valid_len += (vqc->num_queue_pairs * 2386 sizeof(struct 2387 i40e_virtchnl_queue_pair_info)); 2388 if (vqc->num_queue_pairs == 0) 2389 err_msg_format = true; 2390 } 2391 break; 2392 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 2393 valid_len = sizeof(struct i40e_virtchnl_irq_map_info); 2394 if (msglen >= valid_len) { 2395 struct i40e_virtchnl_irq_map_info *vimi = 2396 (struct i40e_virtchnl_irq_map_info *)msg; 2397 valid_len += (vimi->num_vectors * 2398 sizeof(struct i40e_virtchnl_vector_map)); 2399 if (vimi->num_vectors == 0) 2400 err_msg_format = true; 2401 } 2402 break; 2403 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 2404 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 2405 valid_len = sizeof(struct i40e_virtchnl_queue_select); 2406 break; 2407 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 2408 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 2409 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); 2410 if (msglen >= valid_len) { 2411 struct i40e_virtchnl_ether_addr_list *veal = 2412 (struct i40e_virtchnl_ether_addr_list *)msg; 2413 valid_len += veal->num_elements * 2414 sizeof(struct i40e_virtchnl_ether_addr); 2415 if (veal->num_elements == 0) 2416 err_msg_format = true; 2417 } 2418 break; 2419 case I40E_VIRTCHNL_OP_ADD_VLAN: 2420 case I40E_VIRTCHNL_OP_DEL_VLAN: 2421 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); 2422 if (msglen >= valid_len) { 2423 struct i40e_virtchnl_vlan_filter_list *vfl = 2424 (struct i40e_virtchnl_vlan_filter_list *)msg; 2425 valid_len += vfl->num_elements * sizeof(u16); 2426 if (vfl->num_elements == 0) 2427 err_msg_format = true; 2428 } 2429 break; 2430 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 2431 valid_len = sizeof(struct i40e_virtchnl_promisc_info); 2432 break; 2433 case I40E_VIRTCHNL_OP_GET_STATS: 2434 valid_len = sizeof(struct i40e_virtchnl_queue_select); 2435 break; 2436 case I40E_VIRTCHNL_OP_IWARP: 2437 /* These messages are opaque to us and will be validated in 2438 * the RDMA client code. We just need to check for nonzero 2439 * length. The firmware will enforce max length restrictions. 2440 */ 2441 if (msglen) 2442 valid_len = msglen; 2443 else 2444 err_msg_format = true; 2445 break; 2446 case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 2447 valid_len = 0; 2448 break; 2449 case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2450 valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info); 2451 if (msglen >= valid_len) { 2452 struct i40e_virtchnl_iwarp_qvlist_info *qv = 2453 (struct i40e_virtchnl_iwarp_qvlist_info *)msg; 2454 if (qv->num_vectors == 0) { 2455 err_msg_format = true; 2456 break; 2457 } 2458 valid_len += ((qv->num_vectors - 1) * 2459 sizeof(struct i40e_virtchnl_iwarp_qv_info)); 2460 } 2461 break; 2462 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: 2463 valid_len = sizeof(struct i40e_virtchnl_rss_key); 2464 if (msglen >= valid_len) { 2465 struct i40e_virtchnl_rss_key *vrk = 2466 (struct i40e_virtchnl_rss_key *)msg; 2467 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) { 2468 err_msg_format = true; 2469 break; 2470 } 2471 valid_len += vrk->key_len - 1; 2472 } 2473 break; 2474 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: 2475 valid_len = sizeof(struct i40e_virtchnl_rss_lut); 2476 if (msglen >= valid_len) { 2477 struct i40e_virtchnl_rss_lut *vrl = 2478 (struct i40e_virtchnl_rss_lut *)msg; 2479 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { 2480 err_msg_format = true; 2481 break; 2482 } 2483 valid_len += vrl->lut_entries - 1; 2484 } 2485 break; 2486 case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: 2487 break; 2488 case I40E_VIRTCHNL_OP_SET_RSS_HENA: 2489 valid_len = sizeof(struct i40e_virtchnl_rss_hena); 2490 break; 2491 /* These are always errors coming from the VF. */ 2492 case I40E_VIRTCHNL_OP_EVENT: 2493 case I40E_VIRTCHNL_OP_UNKNOWN: 2494 default: 2495 return -EPERM; 2496 } 2497 /* few more checks */ 2498 if ((valid_len != msglen) || (err_msg_format)) { 2499 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); 2500 return -EINVAL; 2501 } else { 2502 return 0; 2503 } 2504 } 2505 2506 /** 2507 * i40e_vc_process_vf_msg 2508 * @pf: pointer to the PF structure 2509 * @vf_id: source VF id 2510 * @msg: pointer to the msg buffer 2511 * @msglen: msg length 2512 * @msghndl: msg handle 2513 * 2514 * called from the common aeq/arq handler to 2515 * process request from VF 2516 **/ 2517 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 2518 u32 v_retval, u8 *msg, u16 msglen) 2519 { 2520 struct i40e_hw *hw = &pf->hw; 2521 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 2522 struct i40e_vf *vf; 2523 int ret; 2524 2525 pf->vf_aq_requests++; 2526 if (local_vf_id >= pf->num_alloc_vfs) 2527 return -EINVAL; 2528 vf = &(pf->vf[local_vf_id]); 2529 /* perform basic checks on the msg */ 2530 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); 2531 2532 if (ret) { 2533 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", 2534 local_vf_id, v_opcode, msglen); 2535 return ret; 2536 } 2537 2538 switch (v_opcode) { 2539 case I40E_VIRTCHNL_OP_VERSION: 2540 ret = i40e_vc_get_version_msg(vf, msg); 2541 break; 2542 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: 2543 ret = i40e_vc_get_vf_resources_msg(vf, msg); 2544 break; 2545 case I40E_VIRTCHNL_OP_RESET_VF: 2546 i40e_vc_reset_vf_msg(vf); 2547 ret = 0; 2548 break; 2549 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 2550 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); 2551 break; 2552 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: 2553 ret = i40e_vc_config_queues_msg(vf, msg, msglen); 2554 break; 2555 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: 2556 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); 2557 break; 2558 case I40E_VIRTCHNL_OP_ENABLE_QUEUES: 2559 ret = i40e_vc_enable_queues_msg(vf, msg, msglen); 2560 i40e_vc_notify_vf_link_state(vf); 2561 break; 2562 case I40E_VIRTCHNL_OP_DISABLE_QUEUES: 2563 ret = i40e_vc_disable_queues_msg(vf, msg, msglen); 2564 break; 2565 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: 2566 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); 2567 break; 2568 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: 2569 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); 2570 break; 2571 case I40E_VIRTCHNL_OP_ADD_VLAN: 2572 ret = i40e_vc_add_vlan_msg(vf, msg, msglen); 2573 break; 2574 case I40E_VIRTCHNL_OP_DEL_VLAN: 2575 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); 2576 break; 2577 case I40E_VIRTCHNL_OP_GET_STATS: 2578 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 2579 break; 2580 case I40E_VIRTCHNL_OP_IWARP: 2581 ret = i40e_vc_iwarp_msg(vf, msg, msglen); 2582 break; 2583 case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2584 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); 2585 break; 2586 case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: 2587 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); 2588 break; 2589 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: 2590 ret = i40e_vc_config_rss_key(vf, msg, msglen); 2591 break; 2592 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: 2593 ret = i40e_vc_config_rss_lut(vf, msg, msglen); 2594 break; 2595 case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: 2596 ret = i40e_vc_get_rss_hena(vf, msg, msglen); 2597 break; 2598 case I40E_VIRTCHNL_OP_SET_RSS_HENA: 2599 ret = i40e_vc_set_rss_hena(vf, msg, msglen); 2600 break; 2601 2602 case I40E_VIRTCHNL_OP_UNKNOWN: 2603 default: 2604 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 2605 v_opcode, local_vf_id); 2606 ret = i40e_vc_send_resp_to_vf(vf, v_opcode, 2607 I40E_ERR_NOT_IMPLEMENTED); 2608 break; 2609 } 2610 2611 return ret; 2612 } 2613 2614 /** 2615 * i40e_vc_process_vflr_event 2616 * @pf: pointer to the PF structure 2617 * 2618 * called from the vlfr irq handler to 2619 * free up VF resources and state variables 2620 **/ 2621 int i40e_vc_process_vflr_event(struct i40e_pf *pf) 2622 { 2623 struct i40e_hw *hw = &pf->hw; 2624 u32 reg, reg_idx, bit_idx; 2625 struct i40e_vf *vf; 2626 int vf_id; 2627 2628 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 2629 return 0; 2630 2631 /* Re-enable the VFLR interrupt cause here, before looking for which 2632 * VF got reset. Otherwise, if another VF gets a reset while the 2633 * first one is being processed, that interrupt will be lost, and 2634 * that VF will be stuck in reset forever. 2635 */ 2636 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 2637 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; 2638 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 2639 i40e_flush(hw); 2640 2641 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 2642 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { 2643 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; 2644 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 2645 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 2646 vf = &pf->vf[vf_id]; 2647 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2648 if (reg & BIT(bit_idx)) 2649 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ 2650 i40e_reset_vf(vf, true); 2651 } 2652 2653 return 0; 2654 } 2655 2656 /** 2657 * i40e_ndo_set_vf_mac 2658 * @netdev: network interface device structure 2659 * @vf_id: VF identifier 2660 * @mac: mac address 2661 * 2662 * program VF mac address 2663 **/ 2664 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2665 { 2666 struct i40e_netdev_priv *np = netdev_priv(netdev); 2667 struct i40e_vsi *vsi = np->vsi; 2668 struct i40e_pf *pf = vsi->back; 2669 struct i40e_mac_filter *f; 2670 struct i40e_vf *vf; 2671 int ret = 0; 2672 2673 /* validate the request */ 2674 if (vf_id >= pf->num_alloc_vfs) { 2675 dev_err(&pf->pdev->dev, 2676 "Invalid VF Identifier %d\n", vf_id); 2677 ret = -EINVAL; 2678 goto error_param; 2679 } 2680 2681 vf = &(pf->vf[vf_id]); 2682 vsi = pf->vsi[vf->lan_vsi_idx]; 2683 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2684 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2685 vf_id); 2686 ret = -EAGAIN; 2687 goto error_param; 2688 } 2689 2690 if (is_multicast_ether_addr(mac)) { 2691 dev_err(&pf->pdev->dev, 2692 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); 2693 ret = -EINVAL; 2694 goto error_param; 2695 } 2696 2697 /* Lock once because below invoked function add/del_filter requires 2698 * mac_filter_list_lock to be held 2699 */ 2700 spin_lock_bh(&vsi->mac_filter_list_lock); 2701 2702 /* delete the temporary mac address */ 2703 if (!is_zero_ether_addr(vf->default_lan_addr.addr)) 2704 i40e_del_filter(vsi, vf->default_lan_addr.addr, 2705 vf->port_vlan_id ? vf->port_vlan_id : -1, 2706 true, false); 2707 2708 /* Delete all the filters for this VSI - we're going to kill it 2709 * anyway. 2710 */ 2711 list_for_each_entry(f, &vsi->mac_filter_list, list) 2712 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); 2713 2714 spin_unlock_bh(&vsi->mac_filter_list_lock); 2715 2716 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2717 /* program mac filter */ 2718 if (i40e_sync_vsi_filters(vsi)) { 2719 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 2720 ret = -EIO; 2721 goto error_param; 2722 } 2723 ether_addr_copy(vf->default_lan_addr.addr, mac); 2724 vf->pf_set_mac = true; 2725 /* Force the VF driver stop so it has to reload with new MAC address */ 2726 i40e_vc_disable_vf(pf, vf); 2727 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2728 2729 error_param: 2730 return ret; 2731 } 2732 2733 /** 2734 * i40e_ndo_set_vf_port_vlan 2735 * @netdev: network interface device structure 2736 * @vf_id: VF identifier 2737 * @vlan_id: mac address 2738 * @qos: priority setting 2739 * 2740 * program VF vlan id and/or qos 2741 **/ 2742 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 2743 int vf_id, u16 vlan_id, u8 qos) 2744 { 2745 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); 2746 struct i40e_netdev_priv *np = netdev_priv(netdev); 2747 struct i40e_pf *pf = np->vsi->back; 2748 bool is_vsi_in_vlan = false; 2749 struct i40e_vsi *vsi; 2750 struct i40e_vf *vf; 2751 int ret = 0; 2752 2753 /* validate the request */ 2754 if (vf_id >= pf->num_alloc_vfs) { 2755 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2756 ret = -EINVAL; 2757 goto error_pvid; 2758 } 2759 2760 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { 2761 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); 2762 ret = -EINVAL; 2763 goto error_pvid; 2764 } 2765 2766 vf = &(pf->vf[vf_id]); 2767 vsi = pf->vsi[vf->lan_vsi_idx]; 2768 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2769 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2770 vf_id); 2771 ret = -EAGAIN; 2772 goto error_pvid; 2773 } 2774 2775 if (le16_to_cpu(vsi->info.pvid) == vlanprio) 2776 /* duplicate request, so just return success */ 2777 goto error_pvid; 2778 2779 spin_lock_bh(&vsi->mac_filter_list_lock); 2780 is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi); 2781 spin_unlock_bh(&vsi->mac_filter_list_lock); 2782 2783 if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) { 2784 dev_err(&pf->pdev->dev, 2785 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2786 vf_id); 2787 /* Administrator Error - knock the VF offline until he does 2788 * the right thing by reconfiguring his network correctly 2789 * and then reloading the VF driver. 2790 */ 2791 i40e_vc_disable_vf(pf, vf); 2792 /* During reset the VF got a new VSI, so refresh the pointer. */ 2793 vsi = pf->vsi[vf->lan_vsi_idx]; 2794 } 2795 2796 /* Check for condition where there was already a port VLAN ID 2797 * filter set and now it is being deleted by setting it to zero. 2798 * Additionally check for the condition where there was a port 2799 * VLAN but now there is a new and different port VLAN being set. 2800 * Before deleting all the old VLAN filters we must add new ones 2801 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2802 * MAC addresses deleted. 2803 */ 2804 if ((!(vlan_id || qos) || 2805 vlanprio != le16_to_cpu(vsi->info.pvid)) && 2806 vsi->info.pvid) 2807 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2808 2809 if (vsi->info.pvid) { 2810 /* kill old VLAN */ 2811 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & 2812 VLAN_VID_MASK)); 2813 if (ret) { 2814 dev_info(&vsi->back->pdev->dev, 2815 "remove VLAN failed, ret=%d, aq_err=%d\n", 2816 ret, pf->hw.aq.asq_last_status); 2817 } 2818 } 2819 if (vlan_id || qos) 2820 ret = i40e_vsi_add_pvid(vsi, vlanprio); 2821 else 2822 i40e_vsi_remove_pvid(vsi); 2823 2824 if (vlan_id) { 2825 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 2826 vlan_id, qos, vf_id); 2827 2828 /* add new VLAN filter */ 2829 ret = i40e_vsi_add_vlan(vsi, vlan_id); 2830 if (ret) { 2831 dev_info(&vsi->back->pdev->dev, 2832 "add VF VLAN failed, ret=%d aq_err=%d\n", ret, 2833 vsi->back->hw.aq.asq_last_status); 2834 goto error_pvid; 2835 } 2836 /* Kill non-vlan MAC filters - ignore error return since 2837 * there might not be any non-vlan MAC filters. 2838 */ 2839 i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); 2840 } 2841 2842 if (ret) { 2843 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); 2844 goto error_pvid; 2845 } 2846 /* The Port VLAN needs to be saved across resets the same as the 2847 * default LAN MAC address. 2848 */ 2849 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); 2850 ret = 0; 2851 2852 error_pvid: 2853 return ret; 2854 } 2855 2856 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ 2857 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ 2858 /** 2859 * i40e_ndo_set_vf_bw 2860 * @netdev: network interface device structure 2861 * @vf_id: VF identifier 2862 * @tx_rate: Tx rate 2863 * 2864 * configure VF Tx rate 2865 **/ 2866 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 2867 int max_tx_rate) 2868 { 2869 struct i40e_netdev_priv *np = netdev_priv(netdev); 2870 struct i40e_pf *pf = np->vsi->back; 2871 struct i40e_vsi *vsi; 2872 struct i40e_vf *vf; 2873 int speed = 0; 2874 int ret = 0; 2875 2876 /* validate the request */ 2877 if (vf_id >= pf->num_alloc_vfs) { 2878 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); 2879 ret = -EINVAL; 2880 goto error; 2881 } 2882 2883 if (min_tx_rate) { 2884 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", 2885 min_tx_rate, vf_id); 2886 return -EINVAL; 2887 } 2888 2889 vf = &(pf->vf[vf_id]); 2890 vsi = pf->vsi[vf->lan_vsi_idx]; 2891 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2892 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2893 vf_id); 2894 ret = -EAGAIN; 2895 goto error; 2896 } 2897 2898 switch (pf->hw.phy.link_info.link_speed) { 2899 case I40E_LINK_SPEED_40GB: 2900 speed = 40000; 2901 break; 2902 case I40E_LINK_SPEED_20GB: 2903 speed = 20000; 2904 break; 2905 case I40E_LINK_SPEED_10GB: 2906 speed = 10000; 2907 break; 2908 case I40E_LINK_SPEED_1GB: 2909 speed = 1000; 2910 break; 2911 default: 2912 break; 2913 } 2914 2915 if (max_tx_rate > speed) { 2916 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", 2917 max_tx_rate, vf->vf_id); 2918 ret = -EINVAL; 2919 goto error; 2920 } 2921 2922 if ((max_tx_rate < 50) && (max_tx_rate > 0)) { 2923 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); 2924 max_tx_rate = 50; 2925 } 2926 2927 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ 2928 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 2929 max_tx_rate / I40E_BW_CREDIT_DIVISOR, 2930 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 2931 if (ret) { 2932 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", 2933 ret); 2934 ret = -EIO; 2935 goto error; 2936 } 2937 vf->tx_rate = max_tx_rate; 2938 error: 2939 return ret; 2940 } 2941 2942 /** 2943 * i40e_ndo_get_vf_config 2944 * @netdev: network interface device structure 2945 * @vf_id: VF identifier 2946 * @ivi: VF configuration structure 2947 * 2948 * return VF configuration 2949 **/ 2950 int i40e_ndo_get_vf_config(struct net_device *netdev, 2951 int vf_id, struct ifla_vf_info *ivi) 2952 { 2953 struct i40e_netdev_priv *np = netdev_priv(netdev); 2954 struct i40e_vsi *vsi = np->vsi; 2955 struct i40e_pf *pf = vsi->back; 2956 struct i40e_vf *vf; 2957 int ret = 0; 2958 2959 /* validate the request */ 2960 if (vf_id >= pf->num_alloc_vfs) { 2961 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 2962 ret = -EINVAL; 2963 goto error_param; 2964 } 2965 2966 vf = &(pf->vf[vf_id]); 2967 /* first vsi is always the LAN vsi */ 2968 vsi = pf->vsi[vf->lan_vsi_idx]; 2969 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2970 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 2971 vf_id); 2972 ret = -EAGAIN; 2973 goto error_param; 2974 } 2975 2976 ivi->vf = vf_id; 2977 2978 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); 2979 2980 ivi->max_tx_rate = vf->tx_rate; 2981 ivi->min_tx_rate = 0; 2982 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2983 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2984 I40E_VLAN_PRIORITY_SHIFT; 2985 if (vf->link_forced == false) 2986 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 2987 else if (vf->link_up == true) 2988 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 2989 else 2990 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 2991 ivi->spoofchk = vf->spoofchk; 2992 ret = 0; 2993 2994 error_param: 2995 return ret; 2996 } 2997 2998 /** 2999 * i40e_ndo_set_vf_link_state 3000 * @netdev: network interface device structure 3001 * @vf_id: VF identifier 3002 * @link: required link state 3003 * 3004 * Set the link state of a specified VF, regardless of physical link state 3005 **/ 3006 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 3007 { 3008 struct i40e_netdev_priv *np = netdev_priv(netdev); 3009 struct i40e_pf *pf = np->vsi->back; 3010 struct i40e_virtchnl_pf_event pfe; 3011 struct i40e_hw *hw = &pf->hw; 3012 struct i40e_vf *vf; 3013 int abs_vf_id; 3014 int ret = 0; 3015 3016 /* validate the request */ 3017 if (vf_id >= pf->num_alloc_vfs) { 3018 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3019 ret = -EINVAL; 3020 goto error_out; 3021 } 3022 3023 vf = &pf->vf[vf_id]; 3024 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 3025 3026 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 3027 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 3028 3029 switch (link) { 3030 case IFLA_VF_LINK_STATE_AUTO: 3031 vf->link_forced = false; 3032 pfe.event_data.link_event.link_status = 3033 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 3034 pfe.event_data.link_event.link_speed = 3035 pf->hw.phy.link_info.link_speed; 3036 break; 3037 case IFLA_VF_LINK_STATE_ENABLE: 3038 vf->link_forced = true; 3039 vf->link_up = true; 3040 pfe.event_data.link_event.link_status = true; 3041 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; 3042 break; 3043 case IFLA_VF_LINK_STATE_DISABLE: 3044 vf->link_forced = true; 3045 vf->link_up = false; 3046 pfe.event_data.link_event.link_status = false; 3047 pfe.event_data.link_event.link_speed = 0; 3048 break; 3049 default: 3050 ret = -EINVAL; 3051 goto error_out; 3052 } 3053 /* Notify the VF of its new link state */ 3054 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 3055 0, (u8 *)&pfe, sizeof(pfe), NULL); 3056 3057 error_out: 3058 return ret; 3059 } 3060 3061 /** 3062 * i40e_ndo_set_vf_spoofchk 3063 * @netdev: network interface device structure 3064 * @vf_id: VF identifier 3065 * @enable: flag to enable or disable feature 3066 * 3067 * Enable or disable VF spoof checking 3068 **/ 3069 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) 3070 { 3071 struct i40e_netdev_priv *np = netdev_priv(netdev); 3072 struct i40e_vsi *vsi = np->vsi; 3073 struct i40e_pf *pf = vsi->back; 3074 struct i40e_vsi_context ctxt; 3075 struct i40e_hw *hw = &pf->hw; 3076 struct i40e_vf *vf; 3077 int ret = 0; 3078 3079 /* validate the request */ 3080 if (vf_id >= pf->num_alloc_vfs) { 3081 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3082 ret = -EINVAL; 3083 goto out; 3084 } 3085 3086 vf = &(pf->vf[vf_id]); 3087 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 3088 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", 3089 vf_id); 3090 ret = -EAGAIN; 3091 goto out; 3092 } 3093 3094 if (enable == vf->spoofchk) 3095 goto out; 3096 3097 vf->spoofchk = enable; 3098 memset(&ctxt, 0, sizeof(ctxt)); 3099 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; 3100 ctxt.pf_num = pf->hw.pf_id; 3101 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 3102 if (enable) 3103 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 3104 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 3105 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 3106 if (ret) { 3107 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 3108 ret); 3109 ret = -EIO; 3110 } 3111 out: 3112 return ret; 3113 } 3114 3115 /** 3116 * i40e_ndo_set_vf_trust 3117 * @netdev: network interface device structure of the pf 3118 * @vf_id: VF identifier 3119 * @setting: trust setting 3120 * 3121 * Enable or disable VF trust setting 3122 **/ 3123 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) 3124 { 3125 struct i40e_netdev_priv *np = netdev_priv(netdev); 3126 struct i40e_pf *pf = np->vsi->back; 3127 struct i40e_vf *vf; 3128 int ret = 0; 3129 3130 /* validate the request */ 3131 if (vf_id >= pf->num_alloc_vfs) { 3132 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); 3133 return -EINVAL; 3134 } 3135 3136 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3137 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); 3138 return -EINVAL; 3139 } 3140 3141 vf = &pf->vf[vf_id]; 3142 3143 if (!vf) 3144 return -EINVAL; 3145 if (setting == vf->trusted) 3146 goto out; 3147 3148 vf->trusted = setting; 3149 i40e_vc_notify_vf_reset(vf); 3150 i40e_reset_vf(vf, false); 3151 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 3152 vf_id, setting ? "" : "un"); 3153 out: 3154 return ret; 3155 } 3156