1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/etherdevice.h> 10 #include <linux/crc32.h> 11 #include <linux/qed/qed_iov_if.h> 12 #include "qed_cxt.h" 13 #include "qed_hsi.h" 14 #include "qed_hw.h" 15 #include "qed_init_ops.h" 16 #include "qed_int.h" 17 #include "qed_mcp.h" 18 #include "qed_reg_addr.h" 19 #include "qed_sp.h" 20 #include "qed_sriov.h" 21 #include "qed_vf.h" 22 23 /* IOV ramrods */ 24 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) 25 { 26 struct vf_start_ramrod_data *p_ramrod = NULL; 27 struct qed_spq_entry *p_ent = NULL; 28 struct qed_sp_init_data init_data; 29 int rc = -EINVAL; 30 u8 fp_minor; 31 32 /* Get SPQ entry */ 33 memset(&init_data, 0, sizeof(init_data)); 34 init_data.cid = qed_spq_get_cid(p_hwfn); 35 init_data.opaque_fid = p_vf->opaque_fid; 36 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 37 38 rc = qed_sp_init_request(p_hwfn, &p_ent, 39 COMMON_RAMROD_VF_START, 40 PROTOCOLID_COMMON, &init_data); 41 if (rc) 42 return rc; 43 44 p_ramrod = &p_ent->ramrod.vf_start; 45 46 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 47 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); 48 49 switch (p_hwfn->hw_info.personality) { 50 case QED_PCI_ETH: 51 p_ramrod->personality = PERSONALITY_ETH; 52 break; 53 case QED_PCI_ETH_ROCE: 54 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 55 break; 56 default: 57 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 58 p_hwfn->hw_info.personality); 59 return -EINVAL; 60 } 61 62 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 63 if (fp_minor > ETH_HSI_VER_MINOR && 64 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { 65 DP_VERBOSE(p_hwfn, 66 QED_MSG_IOV, 67 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 68 p_vf->abs_vf_id, 69 ETH_HSI_VER_MAJOR, 70 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 71 fp_minor = ETH_HSI_VER_MINOR; 72 } 73 74 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 75 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 76 77 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 78 "VF[%d] - Starting using HSI %02x.%02x\n", 79 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 80 81 return qed_spq_post(p_hwfn, p_ent, NULL); 82 } 83 84 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, 85 u32 concrete_vfid, u16 opaque_vfid) 86 { 87 struct vf_stop_ramrod_data *p_ramrod = NULL; 88 struct qed_spq_entry *p_ent = NULL; 89 struct qed_sp_init_data init_data; 90 int rc = -EINVAL; 91 92 /* Get SPQ entry */ 93 memset(&init_data, 0, sizeof(init_data)); 94 init_data.cid = qed_spq_get_cid(p_hwfn); 95 init_data.opaque_fid = opaque_vfid; 96 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 97 98 rc = qed_sp_init_request(p_hwfn, &p_ent, 99 COMMON_RAMROD_VF_STOP, 100 PROTOCOLID_COMMON, &init_data); 101 if (rc) 102 return rc; 103 104 p_ramrod = &p_ent->ramrod.vf_stop; 105 106 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 107 108 return qed_spq_post(p_hwfn, p_ent, NULL); 109 } 110 111 static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 112 int rel_vf_id, bool b_enabled_only) 113 { 114 if (!p_hwfn->pf_iov_info) { 115 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 116 return false; 117 } 118 119 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || 120 (rel_vf_id < 0)) 121 return false; 122 123 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 124 b_enabled_only) 125 return false; 126 127 return true; 128 } 129 130 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, 131 u16 relative_vf_id, 132 bool b_enabled_only) 133 { 134 struct qed_vf_info *vf = NULL; 135 136 if (!p_hwfn->pf_iov_info) { 137 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 138 return NULL; 139 } 140 141 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) 142 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 143 else 144 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 145 relative_vf_id); 146 147 return vf; 148 } 149 150 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, 151 struct qed_vf_info *p_vf, u16 rx_qid) 152 { 153 if (rx_qid >= p_vf->num_rxqs) 154 DP_VERBOSE(p_hwfn, 155 QED_MSG_IOV, 156 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", 157 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); 158 return rx_qid < p_vf->num_rxqs; 159 } 160 161 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, 162 struct qed_vf_info *p_vf, u16 tx_qid) 163 { 164 if (tx_qid >= p_vf->num_txqs) 165 DP_VERBOSE(p_hwfn, 166 QED_MSG_IOV, 167 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", 168 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); 169 return tx_qid < p_vf->num_txqs; 170 } 171 172 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, 173 struct qed_vf_info *p_vf, u16 sb_idx) 174 { 175 int i; 176 177 for (i = 0; i < p_vf->num_sbs; i++) 178 if (p_vf->igu_sbs[i] == sb_idx) 179 return true; 180 181 DP_VERBOSE(p_hwfn, 182 QED_MSG_IOV, 183 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", 184 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); 185 186 return false; 187 } 188 189 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 190 int vfid, struct qed_ptt *p_ptt) 191 { 192 struct qed_bulletin_content *p_bulletin; 193 int crc_size = sizeof(p_bulletin->crc); 194 struct qed_dmae_params params; 195 struct qed_vf_info *p_vf; 196 197 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 198 if (!p_vf) 199 return -EINVAL; 200 201 if (!p_vf->vf_bulletin) 202 return -EINVAL; 203 204 p_bulletin = p_vf->bulletin.p_virt; 205 206 /* Increment bulletin board version and compute crc */ 207 p_bulletin->version++; 208 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, 209 p_vf->bulletin.size - crc_size); 210 211 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 212 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 213 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); 214 215 /* propagate bulletin board via dmae to vm memory */ 216 memset(¶ms, 0, sizeof(params)); 217 params.flags = QED_DMAE_FLAG_VF_DST; 218 params.dst_vfid = p_vf->abs_vf_id; 219 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 220 p_vf->vf_bulletin, p_vf->bulletin.size / 4, 221 ¶ms); 222 } 223 224 static int qed_iov_pci_cfg_info(struct qed_dev *cdev) 225 { 226 struct qed_hw_sriov_info *iov = cdev->p_iov_info; 227 int pos = iov->pos; 228 229 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); 230 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 231 232 pci_read_config_word(cdev->pdev, 233 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); 234 pci_read_config_word(cdev->pdev, 235 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); 236 237 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); 238 if (iov->num_vfs) { 239 DP_VERBOSE(cdev, 240 QED_MSG_IOV, 241 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 242 iov->num_vfs = 0; 243 } 244 245 pci_read_config_word(cdev->pdev, 246 pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 247 248 pci_read_config_word(cdev->pdev, 249 pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 250 251 pci_read_config_word(cdev->pdev, 252 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); 253 254 pci_read_config_dword(cdev->pdev, 255 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 256 257 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); 258 259 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 260 261 DP_VERBOSE(cdev, 262 QED_MSG_IOV, 263 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 264 iov->nres, 265 iov->cap, 266 iov->ctrl, 267 iov->total_vfs, 268 iov->initial_vfs, 269 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 270 271 /* Some sanity checks */ 272 if (iov->num_vfs > NUM_OF_VFS(cdev) || 273 iov->total_vfs > NUM_OF_VFS(cdev)) { 274 /* This can happen only due to a bug. In this case we set 275 * num_vfs to zero to avoid memory corruption in the code that 276 * assumes max number of vfs 277 */ 278 DP_NOTICE(cdev, 279 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 280 iov->num_vfs); 281 282 iov->num_vfs = 0; 283 iov->total_vfs = 0; 284 } 285 286 return 0; 287 } 288 289 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, 290 struct qed_ptt *p_ptt) 291 { 292 struct qed_igu_block *p_sb; 293 u16 sb_id; 294 u32 val; 295 296 if (!p_hwfn->hw_info.p_igu_info) { 297 DP_ERR(p_hwfn, 298 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); 299 return; 300 } 301 302 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 303 sb_id++) { 304 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; 305 if ((p_sb->status & QED_IGU_STATUS_FREE) && 306 !(p_sb->status & QED_IGU_STATUS_PF)) { 307 val = qed_rd(p_hwfn, p_ptt, 308 IGU_REG_MAPPING_MEMORY + sb_id * 4); 309 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 310 qed_wr(p_hwfn, p_ptt, 311 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); 312 } 313 } 314 } 315 316 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) 317 { 318 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 319 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 320 struct qed_bulletin_content *p_bulletin_virt; 321 dma_addr_t req_p, rply_p, bulletin_p; 322 union pfvf_tlvs *p_reply_virt_addr; 323 union vfpf_tlvs *p_req_virt_addr; 324 u8 idx = 0; 325 326 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 327 328 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 329 req_p = p_iov_info->mbx_msg_phys_addr; 330 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 331 rply_p = p_iov_info->mbx_reply_phys_addr; 332 p_bulletin_virt = p_iov_info->p_bulletins; 333 bulletin_p = p_iov_info->bulletins_phys; 334 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 335 DP_ERR(p_hwfn, 336 "qed_iov_setup_vfdb called without allocating mem first\n"); 337 return; 338 } 339 340 for (idx = 0; idx < p_iov->total_vfs; idx++) { 341 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; 342 u32 concrete; 343 344 vf->vf_mbx.req_virt = p_req_virt_addr + idx; 345 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 346 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 347 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 348 349 vf->state = VF_STOPPED; 350 vf->b_init = false; 351 352 vf->bulletin.phys = idx * 353 sizeof(struct qed_bulletin_content) + 354 bulletin_p; 355 vf->bulletin.p_virt = p_bulletin_virt + idx; 356 vf->bulletin.size = sizeof(struct qed_bulletin_content); 357 358 vf->relative_vf_id = idx; 359 vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 360 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 361 vf->concrete_fid = concrete; 362 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 363 (vf->abs_vf_id << 8); 364 vf->vport_id = idx + 1; 365 366 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 367 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 368 } 369 } 370 371 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) 372 { 373 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 374 void **p_v_addr; 375 u16 num_vfs = 0; 376 377 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 378 379 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 380 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 381 382 /* Allocate PF Mailbox buffer (per-VF) */ 383 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 384 p_v_addr = &p_iov_info->mbx_msg_virt_addr; 385 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 386 p_iov_info->mbx_msg_size, 387 &p_iov_info->mbx_msg_phys_addr, 388 GFP_KERNEL); 389 if (!*p_v_addr) 390 return -ENOMEM; 391 392 /* Allocate PF Mailbox Reply buffer (per-VF) */ 393 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 394 p_v_addr = &p_iov_info->mbx_reply_virt_addr; 395 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 396 p_iov_info->mbx_reply_size, 397 &p_iov_info->mbx_reply_phys_addr, 398 GFP_KERNEL); 399 if (!*p_v_addr) 400 return -ENOMEM; 401 402 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * 403 num_vfs; 404 p_v_addr = &p_iov_info->p_bulletins; 405 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 406 p_iov_info->bulletins_size, 407 &p_iov_info->bulletins_phys, 408 GFP_KERNEL); 409 if (!*p_v_addr) 410 return -ENOMEM; 411 412 DP_VERBOSE(p_hwfn, 413 QED_MSG_IOV, 414 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 415 p_iov_info->mbx_msg_virt_addr, 416 (u64) p_iov_info->mbx_msg_phys_addr, 417 p_iov_info->mbx_reply_virt_addr, 418 (u64) p_iov_info->mbx_reply_phys_addr, 419 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 420 421 return 0; 422 } 423 424 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) 425 { 426 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 427 428 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 429 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 430 p_iov_info->mbx_msg_size, 431 p_iov_info->mbx_msg_virt_addr, 432 p_iov_info->mbx_msg_phys_addr); 433 434 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 435 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 436 p_iov_info->mbx_reply_size, 437 p_iov_info->mbx_reply_virt_addr, 438 p_iov_info->mbx_reply_phys_addr); 439 440 if (p_iov_info->p_bulletins) 441 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 442 p_iov_info->bulletins_size, 443 p_iov_info->p_bulletins, 444 p_iov_info->bulletins_phys); 445 } 446 447 int qed_iov_alloc(struct qed_hwfn *p_hwfn) 448 { 449 struct qed_pf_iov *p_sriov; 450 451 if (!IS_PF_SRIOV(p_hwfn)) { 452 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 453 "No SR-IOV - no need for IOV db\n"); 454 return 0; 455 } 456 457 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); 458 if (!p_sriov) 459 return -ENOMEM; 460 461 p_hwfn->pf_iov_info = p_sriov; 462 463 return qed_iov_allocate_vfdb(p_hwfn); 464 } 465 466 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 467 { 468 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 469 return; 470 471 qed_iov_setup_vfdb(p_hwfn); 472 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); 473 } 474 475 void qed_iov_free(struct qed_hwfn *p_hwfn) 476 { 477 if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 478 qed_iov_free_vfdb(p_hwfn); 479 kfree(p_hwfn->pf_iov_info); 480 } 481 } 482 483 void qed_iov_free_hw_info(struct qed_dev *cdev) 484 { 485 kfree(cdev->p_iov_info); 486 cdev->p_iov_info = NULL; 487 } 488 489 int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 490 { 491 struct qed_dev *cdev = p_hwfn->cdev; 492 int pos; 493 int rc; 494 495 if (IS_VF(p_hwfn->cdev)) 496 return 0; 497 498 /* Learn the PCI configuration */ 499 pos = pci_find_ext_capability(p_hwfn->cdev->pdev, 500 PCI_EXT_CAP_ID_SRIOV); 501 if (!pos) { 502 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); 503 return 0; 504 } 505 506 /* Allocate a new struct for IOV information */ 507 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); 508 if (!cdev->p_iov_info) 509 return -ENOMEM; 510 511 cdev->p_iov_info->pos = pos; 512 513 rc = qed_iov_pci_cfg_info(cdev); 514 if (rc) 515 return rc; 516 517 /* We want PF IOV to be synonemous with the existance of p_iov_info; 518 * In case the capability is published but there are no VFs, simply 519 * de-allocate the struct. 520 */ 521 if (!cdev->p_iov_info->total_vfs) { 522 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 523 "IOV capabilities, but no VFs are published\n"); 524 kfree(cdev->p_iov_info); 525 cdev->p_iov_info = NULL; 526 return 0; 527 } 528 529 /* Calculate the first VF index - this is a bit tricky; Basically, 530 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin 531 * after the first engine's VFs. 532 */ 533 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset + 534 p_hwfn->abs_pf_id - 16; 535 if (QED_PATH_ID(p_hwfn)) 536 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 537 538 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 539 "First VF in hwfn 0x%08x\n", 540 cdev->p_iov_info->first_vf_in_pf); 541 542 return 0; 543 } 544 545 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 546 { 547 /* Check PF supports sriov */ 548 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 549 !IS_PF_SRIOV_ALLOC(p_hwfn)) 550 return false; 551 552 /* Check VF validity */ 553 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) 554 return false; 555 556 return true; 557 } 558 559 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, 560 u16 rel_vf_id, u8 to_disable) 561 { 562 struct qed_vf_info *vf; 563 int i; 564 565 for_each_hwfn(cdev, i) { 566 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 567 568 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 569 if (!vf) 570 continue; 571 572 vf->to_disable = to_disable; 573 } 574 } 575 576 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) 577 { 578 u16 i; 579 580 if (!IS_QED_SRIOV(cdev)) 581 return; 582 583 for (i = 0; i < cdev->p_iov_info->total_vfs; i++) 584 qed_iov_set_vf_to_disable(cdev, i, to_disable); 585 } 586 587 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 588 struct qed_ptt *p_ptt, u8 abs_vfid) 589 { 590 qed_wr(p_hwfn, p_ptt, 591 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 592 1 << (abs_vfid & 0x1f)); 593 } 594 595 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 596 struct qed_ptt *p_ptt, struct qed_vf_info *vf) 597 { 598 int i; 599 600 /* Set VF masks and configuration - pretend */ 601 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 602 603 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 604 605 /* unpretend */ 606 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 607 608 /* iterate over all queues, clear sb consumer */ 609 for (i = 0; i < vf->num_sbs; i++) 610 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 611 vf->igu_sbs[i], 612 vf->opaque_fid, true); 613 } 614 615 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, 616 struct qed_ptt *p_ptt, 617 struct qed_vf_info *vf, bool enable) 618 { 619 u32 igu_vf_conf; 620 621 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 622 623 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 624 625 if (enable) 626 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 627 else 628 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 629 630 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 631 632 /* unpretend */ 633 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 634 } 635 636 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, 637 struct qed_ptt *p_ptt, 638 struct qed_vf_info *vf) 639 { 640 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 641 int rc; 642 643 if (vf->to_disable) 644 return 0; 645 646 DP_VERBOSE(p_hwfn, 647 QED_MSG_IOV, 648 "Enable internal access for vf %x [abs %x]\n", 649 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 650 651 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 652 653 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 654 655 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); 656 if (rc) 657 return rc; 658 659 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 660 661 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 662 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 663 664 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 665 p_hwfn->hw_info.hw_mode); 666 667 /* unpretend */ 668 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 669 670 vf->state = VF_FREE; 671 672 return rc; 673 } 674 675 /** 676 * @brief qed_iov_config_perm_table - configure the permission 677 * zone table. 678 * In E4, queue zone permission table size is 320x9. There 679 * are 320 VF queues for single engine device (256 for dual 680 * engine device), and each entry has the following format: 681 * {Valid, VF[7:0]} 682 * @param p_hwfn 683 * @param p_ptt 684 * @param vf 685 * @param enable 686 */ 687 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, 688 struct qed_ptt *p_ptt, 689 struct qed_vf_info *vf, u8 enable) 690 { 691 u32 reg_addr, val; 692 u16 qzone_id = 0; 693 int qid; 694 695 for (qid = 0; qid < vf->num_rxqs; qid++) { 696 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 697 &qzone_id); 698 699 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 700 val = enable ? (vf->abs_vf_id | BIT(8)) : 0; 701 qed_wr(p_hwfn, p_ptt, reg_addr, val); 702 } 703 } 704 705 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, 706 struct qed_ptt *p_ptt, 707 struct qed_vf_info *vf) 708 { 709 /* Reset vf in IGU - interrupts are still disabled */ 710 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 711 712 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 713 714 /* Permission Table */ 715 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 716 } 717 718 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, 719 struct qed_ptt *p_ptt, 720 struct qed_vf_info *vf, u16 num_rx_queues) 721 { 722 struct qed_igu_block *igu_blocks; 723 int qid = 0, igu_id = 0; 724 u32 val = 0; 725 726 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; 727 728 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) 729 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; 730 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; 731 732 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 733 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 734 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 735 736 while ((qid < num_rx_queues) && 737 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { 738 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { 739 struct cau_sb_entry sb_entry; 740 741 vf->igu_sbs[qid] = (u16)igu_id; 742 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; 743 744 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 745 746 qed_wr(p_hwfn, p_ptt, 747 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, 748 val); 749 750 /* Configure igu sb in CAU which were marked valid */ 751 qed_init_cau_sb_entry(p_hwfn, &sb_entry, 752 p_hwfn->rel_pf_id, 753 vf->abs_vf_id, 1); 754 qed_dmae_host2grc(p_hwfn, p_ptt, 755 (u64)(uintptr_t)&sb_entry, 756 CAU_REG_SB_VAR_MEMORY + 757 igu_id * sizeof(u64), 2, 0); 758 qid++; 759 } 760 igu_id++; 761 } 762 763 vf->num_sbs = (u8) num_rx_queues; 764 765 return vf->num_sbs; 766 } 767 768 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, 769 struct qed_ptt *p_ptt, 770 struct qed_vf_info *vf) 771 { 772 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 773 int idx, igu_id; 774 u32 addr, val; 775 776 /* Invalidate igu CAM lines and mark them as free */ 777 for (idx = 0; idx < vf->num_sbs; idx++) { 778 igu_id = vf->igu_sbs[idx]; 779 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; 780 781 val = qed_rd(p_hwfn, p_ptt, addr); 782 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 783 qed_wr(p_hwfn, p_ptt, addr, val); 784 785 p_info->igu_map.igu_blocks[igu_id].status |= 786 QED_IGU_STATUS_FREE; 787 788 p_hwfn->hw_info.p_igu_info->free_blks++; 789 } 790 791 vf->num_sbs = 0; 792 } 793 794 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, 795 struct qed_ptt *p_ptt, 796 u16 rel_vf_id, u16 num_rx_queues) 797 { 798 u8 num_of_vf_avaiable_chains = 0; 799 struct qed_vf_info *vf = NULL; 800 int rc = 0; 801 u32 cids; 802 u8 i; 803 804 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 805 if (!vf) { 806 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 807 return -EINVAL; 808 } 809 810 if (vf->b_init) { 811 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id); 812 return -EINVAL; 813 } 814 815 /* Limit number of queues according to number of CIDs */ 816 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 817 DP_VERBOSE(p_hwfn, 818 QED_MSG_IOV, 819 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 820 vf->relative_vf_id, num_rx_queues, (u16) cids); 821 num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids)); 822 823 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, 824 p_ptt, 825 vf, 826 num_rx_queues); 827 if (!num_of_vf_avaiable_chains) { 828 DP_ERR(p_hwfn, "no available igu sbs\n"); 829 return -ENOMEM; 830 } 831 832 /* Choose queue number and index ranges */ 833 vf->num_rxqs = num_of_vf_avaiable_chains; 834 vf->num_txqs = num_of_vf_avaiable_chains; 835 836 for (i = 0; i < vf->num_rxqs; i++) { 837 u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn, 838 vf->igu_sbs[i]); 839 840 if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 841 DP_NOTICE(p_hwfn, 842 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n", 843 vf->relative_vf_id, queue_id); 844 return -EINVAL; 845 } 846 847 /* CIDs are per-VF, so no problem having them 0-based. */ 848 vf->vf_queues[i].fw_rx_qid = queue_id; 849 vf->vf_queues[i].fw_tx_qid = queue_id; 850 vf->vf_queues[i].fw_cid = i; 851 852 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 853 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", 854 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); 855 } 856 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); 857 if (!rc) { 858 vf->b_init = true; 859 860 if (IS_LEAD_HWFN(p_hwfn)) 861 p_hwfn->cdev->p_iov_info->num_vfs++; 862 } 863 864 return rc; 865 } 866 867 static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 868 u16 vfid, 869 struct qed_mcp_link_params *params, 870 struct qed_mcp_link_state *link, 871 struct qed_mcp_link_capabilities *p_caps) 872 { 873 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 874 vfid, 875 false); 876 struct qed_bulletin_content *p_bulletin; 877 878 if (!p_vf) 879 return; 880 881 p_bulletin = p_vf->bulletin.p_virt; 882 p_bulletin->req_autoneg = params->speed.autoneg; 883 p_bulletin->req_adv_speed = params->speed.advertised_speeds; 884 p_bulletin->req_forced_speed = params->speed.forced_speed; 885 p_bulletin->req_autoneg_pause = params->pause.autoneg; 886 p_bulletin->req_forced_rx = params->pause.forced_rx; 887 p_bulletin->req_forced_tx = params->pause.forced_tx; 888 p_bulletin->req_loopback = params->loopback_mode; 889 890 p_bulletin->link_up = link->link_up; 891 p_bulletin->speed = link->speed; 892 p_bulletin->full_duplex = link->full_duplex; 893 p_bulletin->autoneg = link->an; 894 p_bulletin->autoneg_complete = link->an_complete; 895 p_bulletin->parallel_detection = link->parallel_detection; 896 p_bulletin->pfc_enabled = link->pfc_enabled; 897 p_bulletin->partner_adv_speed = link->partner_adv_speed; 898 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 899 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 900 p_bulletin->partner_adv_pause = link->partner_adv_pause; 901 p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 902 903 p_bulletin->capability_speed = p_caps->speed_capabilities; 904 } 905 906 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 907 struct qed_ptt *p_ptt, u16 rel_vf_id) 908 { 909 struct qed_mcp_link_capabilities caps; 910 struct qed_mcp_link_params params; 911 struct qed_mcp_link_state link; 912 struct qed_vf_info *vf = NULL; 913 914 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 915 if (!vf) { 916 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 917 return -EINVAL; 918 } 919 920 if (vf->bulletin.p_virt) 921 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 922 923 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 924 925 /* Get the link configuration back in bulletin so 926 * that when VFs are re-enabled they get the actual 927 * link configuration. 928 */ 929 memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 930 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 931 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 932 qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 933 934 /* Forget the VF's acquisition message */ 935 memset(&vf->acquire, 0, sizeof(vf->acquire)); 936 937 /* disablng interrupts and resetting permission table was done during 938 * vf-close, however, we could get here without going through vf_close 939 */ 940 /* Disable Interrupts for VF */ 941 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 942 943 /* Reset Permission table */ 944 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 945 946 vf->num_rxqs = 0; 947 vf->num_txqs = 0; 948 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 949 950 if (vf->b_init) { 951 vf->b_init = false; 952 953 if (IS_LEAD_HWFN(p_hwfn)) 954 p_hwfn->cdev->p_iov_info->num_vfs--; 955 } 956 957 return 0; 958 } 959 960 static bool qed_iov_tlv_supported(u16 tlvtype) 961 { 962 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 963 } 964 965 /* place a given tlv on the tlv buffer, continuing current tlv list */ 966 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) 967 { 968 struct channel_tlv *tl = (struct channel_tlv *)*offset; 969 970 tl->type = type; 971 tl->length = length; 972 973 /* Offset should keep pointing to next TLV (the end of the last) */ 974 *offset += length; 975 976 /* Return a pointer to the start of the added tlv */ 977 return *offset - length; 978 } 979 980 /* list the types and lengths of the tlvs on the buffer */ 981 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) 982 { 983 u16 i = 1, total_length = 0; 984 struct channel_tlv *tlv; 985 986 do { 987 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 988 989 /* output tlv */ 990 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 991 "TLV number %d: type %d, length %d\n", 992 i, tlv->type, tlv->length); 993 994 if (tlv->type == CHANNEL_TLV_LIST_END) 995 return; 996 997 /* Validate entry - protect against malicious VFs */ 998 if (!tlv->length) { 999 DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); 1000 return; 1001 } 1002 1003 total_length += tlv->length; 1004 1005 if (total_length >= sizeof(struct tlv_buffer_size)) { 1006 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); 1007 return; 1008 } 1009 1010 i++; 1011 } while (1); 1012 } 1013 1014 static void qed_iov_send_response(struct qed_hwfn *p_hwfn, 1015 struct qed_ptt *p_ptt, 1016 struct qed_vf_info *p_vf, 1017 u16 length, u8 status) 1018 { 1019 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 1020 struct qed_dmae_params params; 1021 u8 eng_vf_id; 1022 1023 mbx->reply_virt->default_resp.hdr.status = status; 1024 1025 qed_dp_tlv_list(p_hwfn, mbx->reply_virt); 1026 1027 eng_vf_id = p_vf->abs_vf_id; 1028 1029 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 1030 params.flags = QED_DMAE_FLAG_VF_DST; 1031 params.dst_vfid = eng_vf_id; 1032 1033 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 1034 mbx->req_virt->first_tlv.reply_address + 1035 sizeof(u64), 1036 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, 1037 ¶ms); 1038 1039 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 1040 mbx->req_virt->first_tlv.reply_address, 1041 sizeof(u64) / 4, ¶ms); 1042 1043 REG_WR(p_hwfn, 1044 GTT_BAR0_MAP_REG_USDM_RAM + 1045 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); 1046 } 1047 1048 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, 1049 enum qed_iov_vport_update_flag flag) 1050 { 1051 switch (flag) { 1052 case QED_IOV_VP_UPDATE_ACTIVATE: 1053 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1054 case QED_IOV_VP_UPDATE_VLAN_STRIP: 1055 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 1056 case QED_IOV_VP_UPDATE_TX_SWITCH: 1057 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1058 case QED_IOV_VP_UPDATE_MCAST: 1059 return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1060 case QED_IOV_VP_UPDATE_ACCEPT_PARAM: 1061 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1062 case QED_IOV_VP_UPDATE_RSS: 1063 return CHANNEL_TLV_VPORT_UPDATE_RSS; 1064 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 1065 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1066 case QED_IOV_VP_UPDATE_SGE_TPA: 1067 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1068 default: 1069 return 0; 1070 } 1071 } 1072 1073 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, 1074 struct qed_vf_info *p_vf, 1075 struct qed_iov_vf_mbx *p_mbx, 1076 u8 status, 1077 u16 tlvs_mask, u16 tlvs_accepted) 1078 { 1079 struct pfvf_def_resp_tlv *resp; 1080 u16 size, total_len, i; 1081 1082 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1083 p_mbx->offset = (u8 *)p_mbx->reply_virt; 1084 size = sizeof(struct pfvf_def_resp_tlv); 1085 total_len = size; 1086 1087 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1088 1089 /* Prepare response for all extended tlvs if they are found by PF */ 1090 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { 1091 if (!(tlvs_mask & BIT(i))) 1092 continue; 1093 1094 resp = qed_add_tlv(p_hwfn, &p_mbx->offset, 1095 qed_iov_vport_to_tlv(p_hwfn, i), size); 1096 1097 if (tlvs_accepted & BIT(i)) 1098 resp->hdr.status = status; 1099 else 1100 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1101 1102 DP_VERBOSE(p_hwfn, 1103 QED_MSG_IOV, 1104 "VF[%d] - vport_update response: TLV %d, status %02x\n", 1105 p_vf->relative_vf_id, 1106 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); 1107 1108 total_len += size; 1109 } 1110 1111 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, 1112 sizeof(struct channel_list_end_tlv)); 1113 1114 return total_len; 1115 } 1116 1117 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, 1118 struct qed_ptt *p_ptt, 1119 struct qed_vf_info *vf_info, 1120 u16 type, u16 length, u8 status) 1121 { 1122 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; 1123 1124 mbx->offset = (u8 *)mbx->reply_virt; 1125 1126 qed_add_tlv(p_hwfn, &mbx->offset, type, length); 1127 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 1128 sizeof(struct channel_list_end_tlv)); 1129 1130 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 1131 } 1132 1133 static struct 1134 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, 1135 u16 relative_vf_id, 1136 bool b_enabled_only) 1137 { 1138 struct qed_vf_info *vf = NULL; 1139 1140 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 1141 if (!vf) 1142 return NULL; 1143 1144 return &vf->p_vf_info; 1145 } 1146 1147 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) 1148 { 1149 struct qed_public_vf_info *vf_info; 1150 1151 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); 1152 1153 if (!vf_info) 1154 return; 1155 1156 /* Clear the VF mac */ 1157 memset(vf_info->mac, 0, ETH_ALEN); 1158 } 1159 1160 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, 1161 struct qed_vf_info *p_vf) 1162 { 1163 u32 i; 1164 1165 p_vf->vf_bulletin = 0; 1166 p_vf->vport_instance = 0; 1167 p_vf->configured_features = 0; 1168 1169 /* If VF previously requested less resources, go back to default */ 1170 p_vf->num_rxqs = p_vf->num_sbs; 1171 p_vf->num_txqs = p_vf->num_sbs; 1172 1173 p_vf->num_active_rxqs = 0; 1174 1175 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) 1176 p_vf->vf_queues[i].rxq_active = 0; 1177 1178 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 1179 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 1180 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 1181 } 1182 1183 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, 1184 struct qed_ptt *p_ptt, 1185 struct qed_vf_info *p_vf, 1186 struct vf_pf_resc_request *p_req, 1187 struct pf_vf_resc *p_resp) 1188 { 1189 int i; 1190 1191 /* Queue related information */ 1192 p_resp->num_rxqs = p_vf->num_rxqs; 1193 p_resp->num_txqs = p_vf->num_txqs; 1194 p_resp->num_sbs = p_vf->num_sbs; 1195 1196 for (i = 0; i < p_resp->num_sbs; i++) { 1197 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; 1198 p_resp->hw_sbs[i].sb_qid = 0; 1199 } 1200 1201 /* These fields are filled for backward compatibility. 1202 * Unused by modern vfs. 1203 */ 1204 for (i = 0; i < p_resp->num_rxqs; i++) { 1205 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, 1206 (u16 *)&p_resp->hw_qid[i]); 1207 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid; 1208 } 1209 1210 /* Filter related information */ 1211 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, 1212 p_req->num_mac_filters); 1213 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, 1214 p_req->num_vlan_filters); 1215 1216 /* This isn't really needed/enforced, but some legacy VFs might depend 1217 * on the correct filling of this field. 1218 */ 1219 p_resp->num_mc_filters = QED_MAX_MC_ADDRS; 1220 1221 /* Validate sufficient resources for VF */ 1222 if (p_resp->num_rxqs < p_req->num_rxqs || 1223 p_resp->num_txqs < p_req->num_txqs || 1224 p_resp->num_sbs < p_req->num_sbs || 1225 p_resp->num_mac_filters < p_req->num_mac_filters || 1226 p_resp->num_vlan_filters < p_req->num_vlan_filters || 1227 p_resp->num_mc_filters < p_req->num_mc_filters) { 1228 DP_VERBOSE(p_hwfn, 1229 QED_MSG_IOV, 1230 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", 1231 p_vf->abs_vf_id, 1232 p_req->num_rxqs, 1233 p_resp->num_rxqs, 1234 p_req->num_rxqs, 1235 p_resp->num_txqs, 1236 p_req->num_sbs, 1237 p_resp->num_sbs, 1238 p_req->num_mac_filters, 1239 p_resp->num_mac_filters, 1240 p_req->num_vlan_filters, 1241 p_resp->num_vlan_filters, 1242 p_req->num_mc_filters, p_resp->num_mc_filters); 1243 1244 /* Some legacy OSes are incapable of correctly handling this 1245 * failure. 1246 */ 1247 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1248 ETH_HSI_VER_NO_PKT_LEN_TUNN) && 1249 (p_vf->acquire.vfdev_info.os_type == 1250 VFPF_ACQUIRE_OS_WINDOWS)) 1251 return PFVF_STATUS_SUCCESS; 1252 1253 return PFVF_STATUS_NO_RESOURCE; 1254 } 1255 1256 return PFVF_STATUS_SUCCESS; 1257 } 1258 1259 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, 1260 struct pfvf_stats_info *p_stats) 1261 { 1262 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + 1263 offsetof(struct mstorm_vf_zone, 1264 non_trigger.eth_queue_stat); 1265 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 1266 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + 1267 offsetof(struct ustorm_vf_zone, 1268 non_trigger.eth_queue_stat); 1269 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 1270 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + 1271 offsetof(struct pstorm_vf_zone, 1272 non_trigger.eth_queue_stat); 1273 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 1274 p_stats->tstats.address = 0; 1275 p_stats->tstats.len = 0; 1276 } 1277 1278 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 1279 struct qed_ptt *p_ptt, 1280 struct qed_vf_info *vf) 1281 { 1282 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1283 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 1284 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 1285 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 1286 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 1287 struct pf_vf_resc *resc = &resp->resc; 1288 int rc; 1289 1290 memset(resp, 0, sizeof(*resp)); 1291 1292 /* Write the PF version so that VF would know which version 1293 * is supported - might be later overriden. This guarantees that 1294 * VF could recognize legacy PF based on lack of versions in reply. 1295 */ 1296 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 1297 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 1298 1299 if (vf->state != VF_FREE && vf->state != VF_STOPPED) { 1300 DP_VERBOSE(p_hwfn, 1301 QED_MSG_IOV, 1302 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", 1303 vf->abs_vf_id, vf->state); 1304 goto out; 1305 } 1306 1307 /* Validate FW compatibility */ 1308 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 1309 if (req->vfdev_info.capabilities & 1310 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 1311 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; 1312 1313 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1314 "VF[%d] is pre-fastpath HSI\n", 1315 vf->abs_vf_id); 1316 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 1317 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1318 } else { 1319 DP_INFO(p_hwfn, 1320 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", 1321 vf->abs_vf_id, 1322 req->vfdev_info.eth_fp_hsi_major, 1323 req->vfdev_info.eth_fp_hsi_minor, 1324 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 1325 1326 goto out; 1327 } 1328 } 1329 1330 /* On 100g PFs, prevent old VFs from loading */ 1331 if ((p_hwfn->cdev->num_hwfns > 1) && 1332 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 1333 DP_INFO(p_hwfn, 1334 "VF[%d] is running an old driver that doesn't support 100g\n", 1335 vf->abs_vf_id); 1336 goto out; 1337 } 1338 1339 /* Store the acquire message */ 1340 memcpy(&vf->acquire, req, sizeof(vf->acquire)); 1341 1342 vf->opaque_fid = req->vfdev_info.opaque_fid; 1343 1344 vf->vf_bulletin = req->bulletin_addr; 1345 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 1346 vf->bulletin.size : req->bulletin_size; 1347 1348 /* fill in pfdev info */ 1349 pfdev_info->chip_num = p_hwfn->cdev->chip_num; 1350 pfdev_info->db_size = 0; 1351 pfdev_info->indices_per_sb = PIS_PER_SB; 1352 1353 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 1354 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 1355 if (p_hwfn->cdev->num_hwfns > 1) 1356 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 1357 1358 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); 1359 1360 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 1361 1362 pfdev_info->fw_major = FW_MAJOR_VERSION; 1363 pfdev_info->fw_minor = FW_MINOR_VERSION; 1364 pfdev_info->fw_rev = FW_REVISION_VERSION; 1365 pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1366 1367 /* Incorrect when legacy, but doesn't matter as legacy isn't reading 1368 * this field. 1369 */ 1370 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, 1371 req->vfdev_info.eth_fp_hsi_minor); 1372 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 1373 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 1374 1375 pfdev_info->dev_type = p_hwfn->cdev->type; 1376 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 1377 1378 /* Fill resources available to VF; Make sure there are enough to 1379 * satisfy the VF's request. 1380 */ 1381 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, 1382 &req->resc_request, resc); 1383 if (vfpf_status != PFVF_STATUS_SUCCESS) 1384 goto out; 1385 1386 /* Start the VF in FW */ 1387 rc = qed_sp_vf_start(p_hwfn, vf); 1388 if (rc) { 1389 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); 1390 vfpf_status = PFVF_STATUS_FAILURE; 1391 goto out; 1392 } 1393 1394 /* Fill agreed size of bulletin board in response */ 1395 resp->bulletin_size = vf->bulletin.size; 1396 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 1397 1398 DP_VERBOSE(p_hwfn, 1399 QED_MSG_IOV, 1400 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 1401 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 1402 vf->abs_vf_id, 1403 resp->pfdev_info.chip_num, 1404 resp->pfdev_info.db_size, 1405 resp->pfdev_info.indices_per_sb, 1406 resp->pfdev_info.capabilities, 1407 resc->num_rxqs, 1408 resc->num_txqs, 1409 resc->num_sbs, 1410 resc->num_mac_filters, 1411 resc->num_vlan_filters); 1412 vf->state = VF_ACQUIRED; 1413 1414 /* Prepare Response */ 1415 out: 1416 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 1417 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); 1418 } 1419 1420 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, 1421 struct qed_vf_info *p_vf, bool val) 1422 { 1423 struct qed_sp_vport_update_params params; 1424 int rc; 1425 1426 if (val == p_vf->spoof_chk) { 1427 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1428 "Spoofchk value[%d] is already configured\n", val); 1429 return 0; 1430 } 1431 1432 memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); 1433 params.opaque_fid = p_vf->opaque_fid; 1434 params.vport_id = p_vf->vport_id; 1435 params.update_anti_spoofing_en_flg = 1; 1436 params.anti_spoofing_en = val; 1437 1438 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 1439 if (!rc) { 1440 p_vf->spoof_chk = val; 1441 p_vf->req_spoofchk_val = p_vf->spoof_chk; 1442 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1443 "Spoofchk val[%d] configured\n", val); 1444 } else { 1445 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1446 "Spoofchk configuration[val:%d] failed for VF[%d]\n", 1447 val, p_vf->relative_vf_id); 1448 } 1449 1450 return rc; 1451 } 1452 1453 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, 1454 struct qed_vf_info *p_vf) 1455 { 1456 struct qed_filter_ucast filter; 1457 int rc = 0; 1458 int i; 1459 1460 memset(&filter, 0, sizeof(filter)); 1461 filter.is_rx_filter = 1; 1462 filter.is_tx_filter = 1; 1463 filter.vport_to_add_to = p_vf->vport_id; 1464 filter.opcode = QED_FILTER_ADD; 1465 1466 /* Reconfigure vlans */ 1467 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 1468 if (!p_vf->shadow_config.vlans[i].used) 1469 continue; 1470 1471 filter.type = QED_FILTER_VLAN; 1472 filter.vlan = p_vf->shadow_config.vlans[i].vid; 1473 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1474 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 1475 filter.vlan, p_vf->relative_vf_id); 1476 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1477 &filter, QED_SPQ_MODE_CB, NULL); 1478 if (rc) { 1479 DP_NOTICE(p_hwfn, 1480 "Failed to configure VLAN [%04x] to VF [%04x]\n", 1481 filter.vlan, p_vf->relative_vf_id); 1482 break; 1483 } 1484 } 1485 1486 return rc; 1487 } 1488 1489 static int 1490 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, 1491 struct qed_vf_info *p_vf, u64 events) 1492 { 1493 int rc = 0; 1494 1495 if ((events & BIT(VLAN_ADDR_FORCED)) && 1496 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 1497 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 1498 1499 return rc; 1500 } 1501 1502 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, 1503 struct qed_vf_info *p_vf, u64 events) 1504 { 1505 int rc = 0; 1506 struct qed_filter_ucast filter; 1507 1508 if (!p_vf->vport_instance) 1509 return -EINVAL; 1510 1511 if (events & BIT(MAC_ADDR_FORCED)) { 1512 /* Since there's no way [currently] of removing the MAC, 1513 * we can always assume this means we need to force it. 1514 */ 1515 memset(&filter, 0, sizeof(filter)); 1516 filter.type = QED_FILTER_MAC; 1517 filter.opcode = QED_FILTER_REPLACE; 1518 filter.is_rx_filter = 1; 1519 filter.is_tx_filter = 1; 1520 filter.vport_to_add_to = p_vf->vport_id; 1521 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); 1522 1523 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1524 &filter, QED_SPQ_MODE_CB, NULL); 1525 if (rc) { 1526 DP_NOTICE(p_hwfn, 1527 "PF failed to configure MAC for VF\n"); 1528 return rc; 1529 } 1530 1531 p_vf->configured_features |= 1 << MAC_ADDR_FORCED; 1532 } 1533 1534 if (events & BIT(VLAN_ADDR_FORCED)) { 1535 struct qed_sp_vport_update_params vport_update; 1536 u8 removal; 1537 int i; 1538 1539 memset(&filter, 0, sizeof(filter)); 1540 filter.type = QED_FILTER_VLAN; 1541 filter.is_rx_filter = 1; 1542 filter.is_tx_filter = 1; 1543 filter.vport_to_add_to = p_vf->vport_id; 1544 filter.vlan = p_vf->bulletin.p_virt->pvid; 1545 filter.opcode = filter.vlan ? QED_FILTER_REPLACE : 1546 QED_FILTER_FLUSH; 1547 1548 /* Send the ramrod */ 1549 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1550 &filter, QED_SPQ_MODE_CB, NULL); 1551 if (rc) { 1552 DP_NOTICE(p_hwfn, 1553 "PF failed to configure VLAN for VF\n"); 1554 return rc; 1555 } 1556 1557 /* Update the default-vlan & silent vlan stripping */ 1558 memset(&vport_update, 0, sizeof(vport_update)); 1559 vport_update.opaque_fid = p_vf->opaque_fid; 1560 vport_update.vport_id = p_vf->vport_id; 1561 vport_update.update_default_vlan_enable_flg = 1; 1562 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 1563 vport_update.update_default_vlan_flg = 1; 1564 vport_update.default_vlan = filter.vlan; 1565 1566 vport_update.update_inner_vlan_removal_flg = 1; 1567 removal = filter.vlan ? 1 1568 : p_vf->shadow_config.inner_vlan_removal; 1569 vport_update.inner_vlan_removal_flg = removal; 1570 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 1571 rc = qed_sp_vport_update(p_hwfn, 1572 &vport_update, 1573 QED_SPQ_MODE_EBLOCK, NULL); 1574 if (rc) { 1575 DP_NOTICE(p_hwfn, 1576 "PF failed to configure VF vport for vlan\n"); 1577 return rc; 1578 } 1579 1580 /* Update all the Rx queues */ 1581 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1582 u16 qid; 1583 1584 if (!p_vf->vf_queues[i].rxq_active) 1585 continue; 1586 1587 qid = p_vf->vf_queues[i].fw_rx_qid; 1588 1589 rc = qed_sp_eth_rx_queues_update(p_hwfn, qid, 1590 1, 0, 1, 1591 QED_SPQ_MODE_EBLOCK, 1592 NULL); 1593 if (rc) { 1594 DP_NOTICE(p_hwfn, 1595 "Failed to send Rx update fo queue[0x%04x]\n", 1596 qid); 1597 return rc; 1598 } 1599 } 1600 1601 if (filter.vlan) 1602 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 1603 else 1604 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); 1605 } 1606 1607 /* If forced features are terminated, we need to configure the shadow 1608 * configuration back again. 1609 */ 1610 if (events) 1611 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 1612 1613 return rc; 1614 } 1615 1616 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, 1617 struct qed_ptt *p_ptt, 1618 struct qed_vf_info *vf) 1619 { 1620 struct qed_sp_vport_start_params params = { 0 }; 1621 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1622 struct vfpf_vport_start_tlv *start; 1623 u8 status = PFVF_STATUS_SUCCESS; 1624 struct qed_vf_info *vf_info; 1625 u64 *p_bitmap; 1626 int sb_id; 1627 int rc; 1628 1629 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1630 if (!vf_info) { 1631 DP_NOTICE(p_hwfn->cdev, 1632 "Failed to get VF info, invalid vfid [%d]\n", 1633 vf->relative_vf_id); 1634 return; 1635 } 1636 1637 vf->state = VF_ENABLED; 1638 start = &mbx->req_virt->start_vport; 1639 1640 /* Initialize Status block in CAU */ 1641 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 1642 if (!start->sb_addr[sb_id]) { 1643 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1644 "VF[%d] did not fill the address of SB %d\n", 1645 vf->relative_vf_id, sb_id); 1646 break; 1647 } 1648 1649 qed_int_cau_conf_sb(p_hwfn, p_ptt, 1650 start->sb_addr[sb_id], 1651 vf->igu_sbs[sb_id], vf->abs_vf_id, 1); 1652 } 1653 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 1654 1655 vf->mtu = start->mtu; 1656 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 1657 1658 /* Take into consideration configuration forced by hypervisor; 1659 * If none is configured, use the supplied VF values [for old 1660 * vfs that would still be fine, since they passed '0' as padding]. 1661 */ 1662 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 1663 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 1664 u8 vf_req = start->only_untagged; 1665 1666 vf_info->bulletin.p_virt->default_only_untagged = vf_req; 1667 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 1668 } 1669 1670 params.tpa_mode = start->tpa_mode; 1671 params.remove_inner_vlan = start->inner_vlan_removal; 1672 params.tx_switching = true; 1673 1674 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 1675 params.drop_ttl0 = false; 1676 params.concrete_fid = vf->concrete_fid; 1677 params.opaque_fid = vf->opaque_fid; 1678 params.vport_id = vf->vport_id; 1679 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1680 params.mtu = vf->mtu; 1681 params.check_mac = true; 1682 1683 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); 1684 if (rc) { 1685 DP_ERR(p_hwfn, 1686 "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1687 status = PFVF_STATUS_FAILURE; 1688 } else { 1689 vf->vport_instance++; 1690 1691 /* Force configuration if needed on the newly opened vport */ 1692 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 1693 1694 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 1695 } 1696 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 1697 sizeof(struct pfvf_def_resp_tlv), status); 1698 } 1699 1700 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, 1701 struct qed_ptt *p_ptt, 1702 struct qed_vf_info *vf) 1703 { 1704 u8 status = PFVF_STATUS_SUCCESS; 1705 int rc; 1706 1707 vf->vport_instance--; 1708 vf->spoof_chk = false; 1709 1710 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 1711 if (rc) { 1712 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 1713 rc); 1714 status = PFVF_STATUS_FAILURE; 1715 } 1716 1717 /* Forget the configuration on the vport */ 1718 vf->configured_features = 0; 1719 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 1720 1721 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 1722 sizeof(struct pfvf_def_resp_tlv), status); 1723 } 1724 1725 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 1726 struct qed_ptt *p_ptt, 1727 struct qed_vf_info *vf, 1728 u8 status, bool b_legacy) 1729 { 1730 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1731 struct pfvf_start_queue_resp_tlv *p_tlv; 1732 struct vfpf_start_rxq_tlv *req; 1733 u16 length; 1734 1735 mbx->offset = (u8 *)mbx->reply_virt; 1736 1737 /* Taking a bigger struct instead of adding a TLV to list was a 1738 * mistake, but one which we're now stuck with, as some older 1739 * clients assume the size of the previous response. 1740 */ 1741 if (!b_legacy) 1742 length = sizeof(*p_tlv); 1743 else 1744 length = sizeof(struct pfvf_def_resp_tlv); 1745 1746 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, 1747 length); 1748 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 1749 sizeof(struct channel_list_end_tlv)); 1750 1751 /* Update the TLV with the response */ 1752 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 1753 req = &mbx->req_virt->start_rxq; 1754 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 1755 offsetof(struct mstorm_vf_zone, 1756 non_trigger.eth_rx_queue_producers) + 1757 sizeof(struct eth_rx_prod_data) * req->rx_qid; 1758 } 1759 1760 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 1761 } 1762 1763 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, 1764 struct qed_ptt *p_ptt, 1765 struct qed_vf_info *vf) 1766 { 1767 struct qed_queue_start_common_params params; 1768 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1769 u8 status = PFVF_STATUS_NO_RESOURCE; 1770 struct vfpf_start_rxq_tlv *req; 1771 bool b_legacy_vf = false; 1772 int rc; 1773 1774 memset(¶ms, 0, sizeof(params)); 1775 req = &mbx->req_virt->start_rxq; 1776 1777 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) || 1778 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 1779 goto out; 1780 1781 params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; 1782 params.vf_qid = req->rx_qid; 1783 params.vport_id = vf->vport_id; 1784 params.sb = req->hw_sb; 1785 params.sb_idx = req->sb_index; 1786 1787 /* Legacy VFs have their Producers in a different location, which they 1788 * calculate on their own and clean the producer prior to this. 1789 */ 1790 if (vf->acquire.vfdev_info.eth_fp_hsi_minor == 1791 ETH_HSI_VER_NO_PKT_LEN_TUNN) { 1792 b_legacy_vf = true; 1793 } else { 1794 REG_WR(p_hwfn, 1795 GTT_BAR0_MAP_REG_MSDM_RAM + 1796 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 1797 0); 1798 } 1799 1800 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, 1801 vf->vf_queues[req->rx_qid].fw_cid, 1802 ¶ms, 1803 vf->abs_vf_id + 0x10, 1804 req->bd_max_bytes, 1805 req->rxq_addr, 1806 req->cqe_pbl_addr, req->cqe_pbl_size, 1807 b_legacy_vf); 1808 1809 if (rc) { 1810 status = PFVF_STATUS_FAILURE; 1811 } else { 1812 status = PFVF_STATUS_SUCCESS; 1813 vf->vf_queues[req->rx_qid].rxq_active = true; 1814 vf->num_active_rxqs++; 1815 } 1816 1817 out: 1818 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); 1819 } 1820 1821 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, 1822 struct qed_ptt *p_ptt, 1823 struct qed_vf_info *p_vf, u8 status) 1824 { 1825 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 1826 struct pfvf_start_queue_resp_tlv *p_tlv; 1827 bool b_legacy = false; 1828 u16 length; 1829 1830 mbx->offset = (u8 *)mbx->reply_virt; 1831 1832 /* Taking a bigger struct instead of adding a TLV to list was a 1833 * mistake, but one which we're now stuck with, as some older 1834 * clients assume the size of the previous response. 1835 */ 1836 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1837 ETH_HSI_VER_NO_PKT_LEN_TUNN) 1838 b_legacy = true; 1839 1840 if (!b_legacy) 1841 length = sizeof(*p_tlv); 1842 else 1843 length = sizeof(struct pfvf_def_resp_tlv); 1844 1845 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, 1846 length); 1847 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 1848 sizeof(struct channel_list_end_tlv)); 1849 1850 /* Update the TLV with the response */ 1851 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 1852 u16 qid = mbx->req_virt->start_txq.tx_qid; 1853 1854 p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid, 1855 DQ_DEMS_LEGACY); 1856 } 1857 1858 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 1859 } 1860 1861 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 1862 struct qed_ptt *p_ptt, 1863 struct qed_vf_info *vf) 1864 { 1865 struct qed_queue_start_common_params params; 1866 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1867 u8 status = PFVF_STATUS_NO_RESOURCE; 1868 union qed_qm_pq_params pq_params; 1869 struct vfpf_start_txq_tlv *req; 1870 int rc; 1871 1872 /* Prepare the parameters which would choose the right PQ */ 1873 memset(&pq_params, 0, sizeof(pq_params)); 1874 pq_params.eth.is_vf = 1; 1875 pq_params.eth.vf_id = vf->relative_vf_id; 1876 1877 memset(¶ms, 0, sizeof(params)); 1878 req = &mbx->req_virt->start_txq; 1879 1880 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) || 1881 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 1882 goto out; 1883 1884 params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; 1885 params.vport_id = vf->vport_id; 1886 params.sb = req->hw_sb; 1887 params.sb_idx = req->sb_index; 1888 1889 rc = qed_sp_eth_txq_start_ramrod(p_hwfn, 1890 vf->opaque_fid, 1891 vf->vf_queues[req->tx_qid].fw_cid, 1892 ¶ms, 1893 vf->abs_vf_id + 0x10, 1894 req->pbl_addr, 1895 req->pbl_size, &pq_params); 1896 1897 if (rc) { 1898 status = PFVF_STATUS_FAILURE; 1899 } else { 1900 status = PFVF_STATUS_SUCCESS; 1901 vf->vf_queues[req->tx_qid].txq_active = true; 1902 } 1903 1904 out: 1905 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status); 1906 } 1907 1908 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 1909 struct qed_vf_info *vf, 1910 u16 rxq_id, u8 num_rxqs, bool cqe_completion) 1911 { 1912 int rc = 0; 1913 int qid; 1914 1915 if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues)) 1916 return -EINVAL; 1917 1918 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { 1919 if (vf->vf_queues[qid].rxq_active) { 1920 rc = qed_sp_eth_rx_queue_stop(p_hwfn, 1921 vf->vf_queues[qid]. 1922 fw_rx_qid, false, 1923 cqe_completion); 1924 1925 if (rc) 1926 return rc; 1927 } 1928 vf->vf_queues[qid].rxq_active = false; 1929 vf->num_active_rxqs--; 1930 } 1931 1932 return rc; 1933 } 1934 1935 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, 1936 struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) 1937 { 1938 int rc = 0; 1939 int qid; 1940 1941 if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) 1942 return -EINVAL; 1943 1944 for (qid = txq_id; qid < txq_id + num_txqs; qid++) { 1945 if (vf->vf_queues[qid].txq_active) { 1946 rc = qed_sp_eth_tx_queue_stop(p_hwfn, 1947 vf->vf_queues[qid]. 1948 fw_tx_qid); 1949 1950 if (rc) 1951 return rc; 1952 } 1953 vf->vf_queues[qid].txq_active = false; 1954 } 1955 return rc; 1956 } 1957 1958 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, 1959 struct qed_ptt *p_ptt, 1960 struct qed_vf_info *vf) 1961 { 1962 u16 length = sizeof(struct pfvf_def_resp_tlv); 1963 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1964 u8 status = PFVF_STATUS_SUCCESS; 1965 struct vfpf_stop_rxqs_tlv *req; 1966 int rc; 1967 1968 /* We give the option of starting from qid != 0, in this case we 1969 * need to make sure that qid + num_qs doesn't exceed the actual 1970 * amount of queues that exist. 1971 */ 1972 req = &mbx->req_virt->stop_rxqs; 1973 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 1974 req->num_rxqs, req->cqe_completion); 1975 if (rc) 1976 status = PFVF_STATUS_FAILURE; 1977 1978 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 1979 length, status); 1980 } 1981 1982 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, 1983 struct qed_ptt *p_ptt, 1984 struct qed_vf_info *vf) 1985 { 1986 u16 length = sizeof(struct pfvf_def_resp_tlv); 1987 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1988 u8 status = PFVF_STATUS_SUCCESS; 1989 struct vfpf_stop_txqs_tlv *req; 1990 int rc; 1991 1992 /* We give the option of starting from qid != 0, in this case we 1993 * need to make sure that qid + num_qs doesn't exceed the actual 1994 * amount of queues that exist. 1995 */ 1996 req = &mbx->req_virt->stop_txqs; 1997 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); 1998 if (rc) 1999 status = PFVF_STATUS_FAILURE; 2000 2001 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 2002 length, status); 2003 } 2004 2005 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, 2006 struct qed_ptt *p_ptt, 2007 struct qed_vf_info *vf) 2008 { 2009 u16 length = sizeof(struct pfvf_def_resp_tlv); 2010 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2011 struct vfpf_update_rxq_tlv *req; 2012 u8 status = PFVF_STATUS_SUCCESS; 2013 u8 complete_event_flg; 2014 u8 complete_cqe_flg; 2015 u16 qid; 2016 int rc; 2017 u8 i; 2018 2019 req = &mbx->req_virt->update_rxq; 2020 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 2021 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 2022 2023 for (i = 0; i < req->num_rxqs; i++) { 2024 qid = req->rx_qid + i; 2025 2026 if (!vf->vf_queues[qid].rxq_active) { 2027 DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n", 2028 qid); 2029 status = PFVF_STATUS_FAILURE; 2030 break; 2031 } 2032 2033 rc = qed_sp_eth_rx_queues_update(p_hwfn, 2034 vf->vf_queues[qid].fw_rx_qid, 2035 1, 2036 complete_cqe_flg, 2037 complete_event_flg, 2038 QED_SPQ_MODE_EBLOCK, NULL); 2039 2040 if (rc) { 2041 status = PFVF_STATUS_FAILURE; 2042 break; 2043 } 2044 } 2045 2046 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 2047 length, status); 2048 } 2049 2050 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 2051 void *p_tlvs_list, u16 req_type) 2052 { 2053 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 2054 int len = 0; 2055 2056 do { 2057 if (!p_tlv->length) { 2058 DP_NOTICE(p_hwfn, "Zero length TLV found\n"); 2059 return NULL; 2060 } 2061 2062 if (p_tlv->type == req_type) { 2063 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2064 "Extended tlv type %d, length %d found\n", 2065 p_tlv->type, p_tlv->length); 2066 return p_tlv; 2067 } 2068 2069 len += p_tlv->length; 2070 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 2071 2072 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 2073 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); 2074 return NULL; 2075 } 2076 } while (p_tlv->type != CHANNEL_TLV_LIST_END); 2077 2078 return NULL; 2079 } 2080 2081 static void 2082 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, 2083 struct qed_sp_vport_update_params *p_data, 2084 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2085 { 2086 struct vfpf_vport_update_activate_tlv *p_act_tlv; 2087 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 2088 2089 p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 2090 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2091 if (!p_act_tlv) 2092 return; 2093 2094 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 2095 p_data->vport_active_rx_flg = p_act_tlv->active_rx; 2096 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 2097 p_data->vport_active_tx_flg = p_act_tlv->active_tx; 2098 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; 2099 } 2100 2101 static void 2102 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, 2103 struct qed_sp_vport_update_params *p_data, 2104 struct qed_vf_info *p_vf, 2105 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2106 { 2107 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 2108 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 2109 2110 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 2111 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2112 if (!p_vlan_tlv) 2113 return; 2114 2115 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 2116 2117 /* Ignore the VF request if we're forcing a vlan */ 2118 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { 2119 p_data->update_inner_vlan_removal_flg = 1; 2120 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 2121 } 2122 2123 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; 2124 } 2125 2126 static void 2127 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, 2128 struct qed_sp_vport_update_params *p_data, 2129 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2130 { 2131 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 2132 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 2133 2134 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 2135 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2136 tlv); 2137 if (!p_tx_switch_tlv) 2138 return; 2139 2140 p_data->update_tx_switching_flg = 1; 2141 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 2142 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; 2143 } 2144 2145 static void 2146 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, 2147 struct qed_sp_vport_update_params *p_data, 2148 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2149 { 2150 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 2151 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 2152 2153 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 2154 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2155 if (!p_mcast_tlv) 2156 return; 2157 2158 p_data->update_approx_mcast_flg = 1; 2159 memcpy(p_data->bins, p_mcast_tlv->bins, 2160 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2161 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2162 } 2163 2164 static void 2165 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, 2166 struct qed_sp_vport_update_params *p_data, 2167 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2168 { 2169 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; 2170 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 2171 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 2172 2173 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 2174 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2175 if (!p_accept_tlv) 2176 return; 2177 2178 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 2179 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 2180 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 2181 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 2182 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; 2183 } 2184 2185 static void 2186 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, 2187 struct qed_sp_vport_update_params *p_data, 2188 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2189 { 2190 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 2191 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 2192 2193 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 2194 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2195 tlv); 2196 if (!p_accept_any_vlan) 2197 return; 2198 2199 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 2200 p_data->update_accept_any_vlan_flg = 2201 p_accept_any_vlan->update_accept_any_vlan_flg; 2202 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 2203 } 2204 2205 static void 2206 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, 2207 struct qed_vf_info *vf, 2208 struct qed_sp_vport_update_params *p_data, 2209 struct qed_rss_params *p_rss, 2210 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2211 { 2212 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 2213 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 2214 u16 i, q_idx, max_q_idx; 2215 u16 table_size; 2216 2217 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 2218 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2219 if (!p_rss_tlv) { 2220 p_data->rss_params = NULL; 2221 return; 2222 } 2223 2224 memset(p_rss, 0, sizeof(struct qed_rss_params)); 2225 2226 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 2227 VFPF_UPDATE_RSS_CONFIG_FLAG); 2228 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 2229 VFPF_UPDATE_RSS_CAPS_FLAG); 2230 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 2231 VFPF_UPDATE_RSS_IND_TABLE_FLAG); 2232 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 2233 VFPF_UPDATE_RSS_KEY_FLAG); 2234 2235 p_rss->rss_enable = p_rss_tlv->rss_enable; 2236 p_rss->rss_eng_id = vf->relative_vf_id + 1; 2237 p_rss->rss_caps = p_rss_tlv->rss_caps; 2238 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 2239 memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, 2240 sizeof(p_rss->rss_ind_table)); 2241 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); 2242 2243 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), 2244 (1 << p_rss_tlv->rss_table_size_log)); 2245 2246 max_q_idx = ARRAY_SIZE(vf->vf_queues); 2247 2248 for (i = 0; i < table_size; i++) { 2249 u16 index = vf->vf_queues[0].fw_rx_qid; 2250 2251 q_idx = p_rss->rss_ind_table[i]; 2252 if (q_idx >= max_q_idx) 2253 DP_NOTICE(p_hwfn, 2254 "rss_ind_table[%d] = %d, rxq is out of range\n", 2255 i, q_idx); 2256 else if (!vf->vf_queues[q_idx].rxq_active) 2257 DP_NOTICE(p_hwfn, 2258 "rss_ind_table[%d] = %d, rxq is not active\n", 2259 i, q_idx); 2260 else 2261 index = vf->vf_queues[q_idx].fw_rx_qid; 2262 p_rss->rss_ind_table[i] = index; 2263 } 2264 2265 p_data->rss_params = p_rss; 2266 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; 2267 } 2268 2269 static void 2270 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, 2271 struct qed_vf_info *vf, 2272 struct qed_sp_vport_update_params *p_data, 2273 struct qed_sge_tpa_params *p_sge_tpa, 2274 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2275 { 2276 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 2277 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 2278 2279 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 2280 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2281 2282 if (!p_sge_tpa_tlv) { 2283 p_data->sge_tpa_params = NULL; 2284 return; 2285 } 2286 2287 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); 2288 2289 p_sge_tpa->update_tpa_en_flg = 2290 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); 2291 p_sge_tpa->update_tpa_param_flg = 2292 !!(p_sge_tpa_tlv->update_sge_tpa_flags & 2293 VFPF_UPDATE_TPA_PARAM_FLAG); 2294 2295 p_sge_tpa->tpa_ipv4_en_flg = 2296 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); 2297 p_sge_tpa->tpa_ipv6_en_flg = 2298 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); 2299 p_sge_tpa->tpa_pkt_split_flg = 2300 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); 2301 p_sge_tpa->tpa_hdr_data_split_flg = 2302 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); 2303 p_sge_tpa->tpa_gro_consistent_flg = 2304 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); 2305 2306 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 2307 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 2308 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; 2309 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; 2310 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; 2311 2312 p_data->sge_tpa_params = p_sge_tpa; 2313 2314 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; 2315 } 2316 2317 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, 2318 struct qed_ptt *p_ptt, 2319 struct qed_vf_info *vf) 2320 { 2321 struct qed_sp_vport_update_params params; 2322 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2323 struct qed_sge_tpa_params sge_tpa_params; 2324 struct qed_rss_params rss_params; 2325 u8 status = PFVF_STATUS_SUCCESS; 2326 u16 tlvs_mask = 0; 2327 u16 length; 2328 int rc; 2329 2330 /* Valiate PF can send such a request */ 2331 if (!vf->vport_instance) { 2332 DP_VERBOSE(p_hwfn, 2333 QED_MSG_IOV, 2334 "No VPORT instance available for VF[%d], failing vport update\n", 2335 vf->abs_vf_id); 2336 status = PFVF_STATUS_FAILURE; 2337 goto out; 2338 } 2339 2340 memset(¶ms, 0, sizeof(params)); 2341 params.opaque_fid = vf->opaque_fid; 2342 params.vport_id = vf->vport_id; 2343 params.rss_params = NULL; 2344 2345 /* Search for extended tlvs list and update values 2346 * from VF in struct qed_sp_vport_update_params. 2347 */ 2348 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 2349 qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 2350 qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 2351 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 2352 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 2353 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, 2354 mbx, &tlvs_mask); 2355 qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 2356 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, 2357 &sge_tpa_params, mbx, &tlvs_mask); 2358 2359 /* Just log a message if there is no single extended tlv in buffer. 2360 * When all features of vport update ramrod would be requested by VF 2361 * as extended TLVs in buffer then an error can be returned in response 2362 * if there is no extended TLV present in buffer. 2363 */ 2364 if (!tlvs_mask) { 2365 DP_NOTICE(p_hwfn, 2366 "No feature tlvs found for vport update\n"); 2367 status = PFVF_STATUS_NOT_SUPPORTED; 2368 goto out; 2369 } 2370 2371 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 2372 2373 if (rc) 2374 status = PFVF_STATUS_FAILURE; 2375 2376 out: 2377 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 2378 tlvs_mask, tlvs_mask); 2379 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2380 } 2381 2382 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, 2383 struct qed_vf_info *p_vf, 2384 struct qed_filter_ucast *p_params) 2385 { 2386 int i; 2387 2388 /* First remove entries and then add new ones */ 2389 if (p_params->opcode == QED_FILTER_REMOVE) { 2390 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 2391 if (p_vf->shadow_config.vlans[i].used && 2392 p_vf->shadow_config.vlans[i].vid == 2393 p_params->vlan) { 2394 p_vf->shadow_config.vlans[i].used = false; 2395 break; 2396 } 2397 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 2398 DP_VERBOSE(p_hwfn, 2399 QED_MSG_IOV, 2400 "VF [%d] - Tries to remove a non-existing vlan\n", 2401 p_vf->relative_vf_id); 2402 return -EINVAL; 2403 } 2404 } else if (p_params->opcode == QED_FILTER_REPLACE || 2405 p_params->opcode == QED_FILTER_FLUSH) { 2406 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 2407 p_vf->shadow_config.vlans[i].used = false; 2408 } 2409 2410 /* In forced mode, we're willing to remove entries - but we don't add 2411 * new ones. 2412 */ 2413 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) 2414 return 0; 2415 2416 if (p_params->opcode == QED_FILTER_ADD || 2417 p_params->opcode == QED_FILTER_REPLACE) { 2418 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 2419 if (p_vf->shadow_config.vlans[i].used) 2420 continue; 2421 2422 p_vf->shadow_config.vlans[i].used = true; 2423 p_vf->shadow_config.vlans[i].vid = p_params->vlan; 2424 break; 2425 } 2426 2427 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 2428 DP_VERBOSE(p_hwfn, 2429 QED_MSG_IOV, 2430 "VF [%d] - Tries to configure more than %d vlan filters\n", 2431 p_vf->relative_vf_id, 2432 QED_ETH_VF_NUM_VLAN_FILTERS + 1); 2433 return -EINVAL; 2434 } 2435 } 2436 2437 return 0; 2438 } 2439 2440 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, 2441 struct qed_vf_info *p_vf, 2442 struct qed_filter_ucast *p_params) 2443 { 2444 int i; 2445 2446 /* If we're in forced-mode, we don't allow any change */ 2447 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) 2448 return 0; 2449 2450 /* First remove entries and then add new ones */ 2451 if (p_params->opcode == QED_FILTER_REMOVE) { 2452 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 2453 if (ether_addr_equal(p_vf->shadow_config.macs[i], 2454 p_params->mac)) { 2455 memset(p_vf->shadow_config.macs[i], 0, 2456 ETH_ALEN); 2457 break; 2458 } 2459 } 2460 2461 if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 2462 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2463 "MAC isn't configured\n"); 2464 return -EINVAL; 2465 } 2466 } else if (p_params->opcode == QED_FILTER_REPLACE || 2467 p_params->opcode == QED_FILTER_FLUSH) { 2468 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) 2469 memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN); 2470 } 2471 2472 /* List the new MAC address */ 2473 if (p_params->opcode != QED_FILTER_ADD && 2474 p_params->opcode != QED_FILTER_REPLACE) 2475 return 0; 2476 2477 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 2478 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { 2479 ether_addr_copy(p_vf->shadow_config.macs[i], 2480 p_params->mac); 2481 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2482 "Added MAC at %d entry in shadow\n", i); 2483 break; 2484 } 2485 } 2486 2487 if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 2488 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); 2489 return -EINVAL; 2490 } 2491 2492 return 0; 2493 } 2494 2495 static int 2496 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 2497 struct qed_vf_info *p_vf, 2498 struct qed_filter_ucast *p_params) 2499 { 2500 int rc = 0; 2501 2502 if (p_params->type == QED_FILTER_MAC) { 2503 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); 2504 if (rc) 2505 return rc; 2506 } 2507 2508 if (p_params->type == QED_FILTER_VLAN) 2509 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); 2510 2511 return rc; 2512 } 2513 2514 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 2515 int vfid, struct qed_filter_ucast *params) 2516 { 2517 struct qed_public_vf_info *vf; 2518 2519 vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 2520 if (!vf) 2521 return -EINVAL; 2522 2523 /* No real decision to make; Store the configured MAC */ 2524 if (params->type == QED_FILTER_MAC || 2525 params->type == QED_FILTER_MAC_VLAN) 2526 ether_addr_copy(vf->mac, params->mac); 2527 2528 return 0; 2529 } 2530 2531 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, 2532 struct qed_ptt *p_ptt, 2533 struct qed_vf_info *vf) 2534 { 2535 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; 2536 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2537 struct vfpf_ucast_filter_tlv *req; 2538 u8 status = PFVF_STATUS_SUCCESS; 2539 struct qed_filter_ucast params; 2540 int rc; 2541 2542 /* Prepare the unicast filter params */ 2543 memset(¶ms, 0, sizeof(struct qed_filter_ucast)); 2544 req = &mbx->req_virt->ucast_filter; 2545 params.opcode = (enum qed_filter_opcode)req->opcode; 2546 params.type = (enum qed_filter_ucast_type)req->type; 2547 2548 params.is_rx_filter = 1; 2549 params.is_tx_filter = 1; 2550 params.vport_to_remove_from = vf->vport_id; 2551 params.vport_to_add_to = vf->vport_id; 2552 memcpy(params.mac, req->mac, ETH_ALEN); 2553 params.vlan = req->vlan; 2554 2555 DP_VERBOSE(p_hwfn, 2556 QED_MSG_IOV, 2557 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", 2558 vf->abs_vf_id, params.opcode, params.type, 2559 params.is_rx_filter ? "RX" : "", 2560 params.is_tx_filter ? "TX" : "", 2561 params.vport_to_add_to, 2562 params.mac[0], params.mac[1], 2563 params.mac[2], params.mac[3], 2564 params.mac[4], params.mac[5], params.vlan); 2565 2566 if (!vf->vport_instance) { 2567 DP_VERBOSE(p_hwfn, 2568 QED_MSG_IOV, 2569 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 2570 vf->abs_vf_id); 2571 status = PFVF_STATUS_FAILURE; 2572 goto out; 2573 } 2574 2575 /* Update shadow copy of the VF configuration */ 2576 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { 2577 status = PFVF_STATUS_FAILURE; 2578 goto out; 2579 } 2580 2581 /* Determine if the unicast filtering is acceptible by PF */ 2582 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && 2583 (params.type == QED_FILTER_VLAN || 2584 params.type == QED_FILTER_MAC_VLAN)) { 2585 /* Once VLAN is forced or PVID is set, do not allow 2586 * to add/replace any further VLANs. 2587 */ 2588 if (params.opcode == QED_FILTER_ADD || 2589 params.opcode == QED_FILTER_REPLACE) 2590 status = PFVF_STATUS_FORCED; 2591 goto out; 2592 } 2593 2594 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && 2595 (params.type == QED_FILTER_MAC || 2596 params.type == QED_FILTER_MAC_VLAN)) { 2597 if (!ether_addr_equal(p_bulletin->mac, params.mac) || 2598 (params.opcode != QED_FILTER_ADD && 2599 params.opcode != QED_FILTER_REPLACE)) 2600 status = PFVF_STATUS_FORCED; 2601 goto out; 2602 } 2603 2604 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); 2605 if (rc) { 2606 status = PFVF_STATUS_FAILURE; 2607 goto out; 2608 } 2609 2610 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 2611 QED_SPQ_MODE_CB, NULL); 2612 if (rc) 2613 status = PFVF_STATUS_FAILURE; 2614 2615 out: 2616 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 2617 sizeof(struct pfvf_def_resp_tlv), status); 2618 } 2619 2620 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, 2621 struct qed_ptt *p_ptt, 2622 struct qed_vf_info *vf) 2623 { 2624 int i; 2625 2626 /* Reset the SBs */ 2627 for (i = 0; i < vf->num_sbs; i++) 2628 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2629 vf->igu_sbs[i], 2630 vf->opaque_fid, false); 2631 2632 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 2633 sizeof(struct pfvf_def_resp_tlv), 2634 PFVF_STATUS_SUCCESS); 2635 } 2636 2637 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, 2638 struct qed_ptt *p_ptt, struct qed_vf_info *vf) 2639 { 2640 u16 length = sizeof(struct pfvf_def_resp_tlv); 2641 u8 status = PFVF_STATUS_SUCCESS; 2642 2643 /* Disable Interrupts for VF */ 2644 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 2645 2646 /* Reset Permission table */ 2647 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 2648 2649 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 2650 length, status); 2651 } 2652 2653 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, 2654 struct qed_ptt *p_ptt, 2655 struct qed_vf_info *p_vf) 2656 { 2657 u16 length = sizeof(struct pfvf_def_resp_tlv); 2658 u8 status = PFVF_STATUS_SUCCESS; 2659 int rc = 0; 2660 2661 qed_iov_vf_cleanup(p_hwfn, p_vf); 2662 2663 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 2664 /* Stopping the VF */ 2665 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 2666 p_vf->opaque_fid); 2667 2668 if (rc) { 2669 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", 2670 rc); 2671 status = PFVF_STATUS_FAILURE; 2672 } 2673 2674 p_vf->state = VF_STOPPED; 2675 } 2676 2677 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 2678 length, status); 2679 } 2680 2681 static int 2682 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 2683 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 2684 { 2685 int cnt; 2686 u32 val; 2687 2688 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 2689 2690 for (cnt = 0; cnt < 50; cnt++) { 2691 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 2692 if (!val) 2693 break; 2694 msleep(20); 2695 } 2696 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 2697 2698 if (cnt == 50) { 2699 DP_ERR(p_hwfn, 2700 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 2701 p_vf->abs_vf_id, val); 2702 return -EBUSY; 2703 } 2704 2705 return 0; 2706 } 2707 2708 static int 2709 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, 2710 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 2711 { 2712 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; 2713 int i, cnt; 2714 2715 /* Read initial consumers & producers */ 2716 for (i = 0; i < MAX_NUM_VOQS; i++) { 2717 u32 prod; 2718 2719 cons[i] = qed_rd(p_hwfn, p_ptt, 2720 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 2721 i * 0x40); 2722 prod = qed_rd(p_hwfn, p_ptt, 2723 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 2724 i * 0x40); 2725 distance[i] = prod - cons[i]; 2726 } 2727 2728 /* Wait for consumers to pass the producers */ 2729 i = 0; 2730 for (cnt = 0; cnt < 50; cnt++) { 2731 for (; i < MAX_NUM_VOQS; i++) { 2732 u32 tmp; 2733 2734 tmp = qed_rd(p_hwfn, p_ptt, 2735 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 2736 i * 0x40); 2737 if (distance[i] > tmp - cons[i]) 2738 break; 2739 } 2740 2741 if (i == MAX_NUM_VOQS) 2742 break; 2743 2744 msleep(20); 2745 } 2746 2747 if (cnt == 50) { 2748 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 2749 p_vf->abs_vf_id, i); 2750 return -EBUSY; 2751 } 2752 2753 return 0; 2754 } 2755 2756 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, 2757 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 2758 { 2759 int rc; 2760 2761 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 2762 if (rc) 2763 return rc; 2764 2765 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 2766 if (rc) 2767 return rc; 2768 2769 return 0; 2770 } 2771 2772 static int 2773 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, 2774 struct qed_ptt *p_ptt, 2775 u16 rel_vf_id, u32 *ack_vfs) 2776 { 2777 struct qed_vf_info *p_vf; 2778 int rc = 0; 2779 2780 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 2781 if (!p_vf) 2782 return 0; 2783 2784 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 2785 (1ULL << (rel_vf_id % 64))) { 2786 u16 vfid = p_vf->abs_vf_id; 2787 2788 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2789 "VF[%d] - Handling FLR\n", vfid); 2790 2791 qed_iov_vf_cleanup(p_hwfn, p_vf); 2792 2793 /* If VF isn't active, no need for anything but SW */ 2794 if (!p_vf->b_init) 2795 goto cleanup; 2796 2797 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 2798 if (rc) 2799 goto cleanup; 2800 2801 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); 2802 if (rc) { 2803 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); 2804 return rc; 2805 } 2806 2807 /* VF_STOPPED has to be set only after final cleanup 2808 * but prior to re-enabling the VF. 2809 */ 2810 p_vf->state = VF_STOPPED; 2811 2812 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 2813 if (rc) { 2814 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 2815 vfid); 2816 return rc; 2817 } 2818 cleanup: 2819 /* Mark VF for ack and clean pending state */ 2820 if (p_vf->state == VF_RESET) 2821 p_vf->state = VF_STOPPED; 2822 ack_vfs[vfid / 32] |= BIT((vfid % 32)); 2823 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 2824 ~(1ULL << (rel_vf_id % 64)); 2825 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= 2826 ~(1ULL << (rel_vf_id % 64)); 2827 } 2828 2829 return rc; 2830 } 2831 2832 static int 2833 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2834 { 2835 u32 ack_vfs[VF_MAX_STATIC / 32]; 2836 int rc = 0; 2837 u16 i; 2838 2839 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 2840 2841 /* Since BRB <-> PRS interface can't be tested as part of the flr 2842 * polling due to HW limitations, simply sleep a bit. And since 2843 * there's no need to wait per-vf, do it before looping. 2844 */ 2845 msleep(100); 2846 2847 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) 2848 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 2849 2850 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 2851 return rc; 2852 } 2853 2854 int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) 2855 { 2856 u16 i, found = 0; 2857 2858 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); 2859 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 2860 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2861 "[%08x,...,%08x]: %08x\n", 2862 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 2863 2864 if (!p_hwfn->cdev->p_iov_info) { 2865 DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); 2866 return 0; 2867 } 2868 2869 /* Mark VFs */ 2870 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { 2871 struct qed_vf_info *p_vf; 2872 u8 vfid; 2873 2874 p_vf = qed_iov_get_vf_info(p_hwfn, i, false); 2875 if (!p_vf) 2876 continue; 2877 2878 vfid = p_vf->abs_vf_id; 2879 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { 2880 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 2881 u16 rel_vf_id = p_vf->relative_vf_id; 2882 2883 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2884 "VF[%d] [rel %d] got FLR-ed\n", 2885 vfid, rel_vf_id); 2886 2887 p_vf->state = VF_RESET; 2888 2889 /* No need to lock here, since pending_flr should 2890 * only change here and before ACKing MFw. Since 2891 * MFW will not trigger an additional attention for 2892 * VF flr until ACKs, we're safe. 2893 */ 2894 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 2895 found = 1; 2896 } 2897 } 2898 2899 return found; 2900 } 2901 2902 static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 2903 u16 vfid, 2904 struct qed_mcp_link_params *p_params, 2905 struct qed_mcp_link_state *p_link, 2906 struct qed_mcp_link_capabilities *p_caps) 2907 { 2908 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 2909 vfid, 2910 false); 2911 struct qed_bulletin_content *p_bulletin; 2912 2913 if (!p_vf) 2914 return; 2915 2916 p_bulletin = p_vf->bulletin.p_virt; 2917 2918 if (p_params) 2919 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); 2920 if (p_link) 2921 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); 2922 if (p_caps) 2923 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); 2924 } 2925 2926 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, 2927 struct qed_ptt *p_ptt, int vfid) 2928 { 2929 struct qed_iov_vf_mbx *mbx; 2930 struct qed_vf_info *p_vf; 2931 2932 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 2933 if (!p_vf) 2934 return; 2935 2936 mbx = &p_vf->vf_mbx; 2937 2938 /* qed_iov_process_mbx_request */ 2939 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2940 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id); 2941 2942 mbx->first_tlv = mbx->req_virt->first_tlv; 2943 2944 /* check if tlv type is known */ 2945 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 2946 switch (mbx->first_tlv.tl.type) { 2947 case CHANNEL_TLV_ACQUIRE: 2948 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 2949 break; 2950 case CHANNEL_TLV_VPORT_START: 2951 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 2952 break; 2953 case CHANNEL_TLV_VPORT_TEARDOWN: 2954 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 2955 break; 2956 case CHANNEL_TLV_START_RXQ: 2957 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 2958 break; 2959 case CHANNEL_TLV_START_TXQ: 2960 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 2961 break; 2962 case CHANNEL_TLV_STOP_RXQS: 2963 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 2964 break; 2965 case CHANNEL_TLV_STOP_TXQS: 2966 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 2967 break; 2968 case CHANNEL_TLV_UPDATE_RXQ: 2969 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 2970 break; 2971 case CHANNEL_TLV_VPORT_UPDATE: 2972 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 2973 break; 2974 case CHANNEL_TLV_UCAST_FILTER: 2975 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 2976 break; 2977 case CHANNEL_TLV_CLOSE: 2978 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 2979 break; 2980 case CHANNEL_TLV_INT_CLEANUP: 2981 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 2982 break; 2983 case CHANNEL_TLV_RELEASE: 2984 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 2985 break; 2986 } 2987 } else { 2988 /* unknown TLV - this may belong to a VF driver from the future 2989 * - a version written after this PF driver was written, which 2990 * supports features unknown as of yet. Too bad since we don't 2991 * support them. Or this may be because someone wrote a crappy 2992 * VF driver and is sending garbage over the channel. 2993 */ 2994 DP_NOTICE(p_hwfn, 2995 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", 2996 p_vf->abs_vf_id, 2997 mbx->first_tlv.tl.type, 2998 mbx->first_tlv.tl.length, 2999 mbx->first_tlv.padding, mbx->first_tlv.reply_address); 3000 3001 /* Try replying in case reply address matches the acquisition's 3002 * posted address. 3003 */ 3004 if (p_vf->acquire.first_tlv.reply_address && 3005 (mbx->first_tlv.reply_address == 3006 p_vf->acquire.first_tlv.reply_address)) { 3007 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3008 mbx->first_tlv.tl.type, 3009 sizeof(struct pfvf_def_resp_tlv), 3010 PFVF_STATUS_NOT_SUPPORTED); 3011 } else { 3012 DP_VERBOSE(p_hwfn, 3013 QED_MSG_IOV, 3014 "VF[%02x]: Can't respond to TLV - no valid reply address\n", 3015 p_vf->abs_vf_id); 3016 } 3017 } 3018 } 3019 3020 static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) 3021 { 3022 u64 add_bit = 1ULL << (vfid % 64); 3023 3024 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; 3025 } 3026 3027 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, 3028 u64 *events) 3029 { 3030 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; 3031 3032 memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); 3033 memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); 3034 } 3035 3036 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, 3037 u16 abs_vfid, struct regpair *vf_msg) 3038 { 3039 u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; 3040 struct qed_vf_info *p_vf; 3041 3042 if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { 3043 DP_VERBOSE(p_hwfn, 3044 QED_MSG_IOV, 3045 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", 3046 abs_vfid); 3047 return 0; 3048 } 3049 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; 3050 3051 /* List the physical address of the request so that handler 3052 * could later on copy the message from it. 3053 */ 3054 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; 3055 3056 /* Mark the event and schedule the workqueue */ 3057 qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); 3058 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); 3059 3060 return 0; 3061 } 3062 3063 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 3064 u8 opcode, __le16 echo, union event_ring_data *data) 3065 { 3066 switch (opcode) { 3067 case COMMON_EVENT_VF_PF_CHANNEL: 3068 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 3069 &data->vf_pf_channel.msg_addr); 3070 default: 3071 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 3072 opcode); 3073 return -EINVAL; 3074 } 3075 } 3076 3077 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 3078 { 3079 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 3080 u16 i; 3081 3082 if (!p_iov) 3083 goto out; 3084 3085 for (i = rel_vf_id; i < p_iov->total_vfs; i++) 3086 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) 3087 return i; 3088 3089 out: 3090 return MAX_NUM_VFS; 3091 } 3092 3093 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, 3094 int vfid) 3095 { 3096 struct qed_dmae_params params; 3097 struct qed_vf_info *vf_info; 3098 3099 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3100 if (!vf_info) 3101 return -EINVAL; 3102 3103 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 3104 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; 3105 params.src_vfid = vf_info->abs_vf_id; 3106 3107 if (qed_dmae_host2host(p_hwfn, ptt, 3108 vf_info->vf_mbx.pending_req, 3109 vf_info->vf_mbx.req_phys, 3110 sizeof(union vfpf_tlvs) / 4, ¶ms)) { 3111 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3112 "Failed to copy message from VF 0x%02x\n", vfid); 3113 3114 return -EIO; 3115 } 3116 3117 return 0; 3118 } 3119 3120 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, 3121 u8 *mac, int vfid) 3122 { 3123 struct qed_vf_info *vf_info; 3124 u64 feature; 3125 3126 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 3127 if (!vf_info) { 3128 DP_NOTICE(p_hwfn->cdev, 3129 "Can not set forced MAC, invalid vfid [%d]\n", vfid); 3130 return; 3131 } 3132 3133 feature = 1 << MAC_ADDR_FORCED; 3134 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); 3135 3136 vf_info->bulletin.p_virt->valid_bitmap |= feature; 3137 /* Forced MAC will disable MAC_ADDR */ 3138 vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); 3139 3140 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 3141 } 3142 3143 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, 3144 u16 pvid, int vfid) 3145 { 3146 struct qed_vf_info *vf_info; 3147 u64 feature; 3148 3149 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3150 if (!vf_info) { 3151 DP_NOTICE(p_hwfn->cdev, 3152 "Can not set forced MAC, invalid vfid [%d]\n", vfid); 3153 return; 3154 } 3155 3156 feature = 1 << VLAN_ADDR_FORCED; 3157 vf_info->bulletin.p_virt->pvid = pvid; 3158 if (pvid) 3159 vf_info->bulletin.p_virt->valid_bitmap |= feature; 3160 else 3161 vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 3162 3163 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 3164 } 3165 3166 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) 3167 { 3168 struct qed_vf_info *p_vf_info; 3169 3170 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3171 if (!p_vf_info) 3172 return false; 3173 3174 return !!p_vf_info->vport_instance; 3175 } 3176 3177 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) 3178 { 3179 struct qed_vf_info *p_vf_info; 3180 3181 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3182 if (!p_vf_info) 3183 return true; 3184 3185 return p_vf_info->state == VF_STOPPED; 3186 } 3187 3188 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) 3189 { 3190 struct qed_vf_info *vf_info; 3191 3192 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3193 if (!vf_info) 3194 return false; 3195 3196 return vf_info->spoof_chk; 3197 } 3198 3199 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) 3200 { 3201 struct qed_vf_info *vf; 3202 int rc = -EINVAL; 3203 3204 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 3205 DP_NOTICE(p_hwfn, 3206 "SR-IOV sanity check failed, can't set spoofchk\n"); 3207 goto out; 3208 } 3209 3210 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3211 if (!vf) 3212 goto out; 3213 3214 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { 3215 /* After VF VPORT start PF will configure spoof check */ 3216 vf->req_spoofchk_val = val; 3217 rc = 0; 3218 goto out; 3219 } 3220 3221 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); 3222 3223 out: 3224 return rc; 3225 } 3226 3227 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, 3228 u16 rel_vf_id) 3229 { 3230 struct qed_vf_info *p_vf; 3231 3232 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 3233 if (!p_vf || !p_vf->bulletin.p_virt) 3234 return NULL; 3235 3236 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) 3237 return NULL; 3238 3239 return p_vf->bulletin.p_virt->mac; 3240 } 3241 3242 static u16 3243 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 3244 { 3245 struct qed_vf_info *p_vf; 3246 3247 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 3248 if (!p_vf || !p_vf->bulletin.p_virt) 3249 return 0; 3250 3251 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) 3252 return 0; 3253 3254 return p_vf->bulletin.p_virt->pvid; 3255 } 3256 3257 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, 3258 struct qed_ptt *p_ptt, int vfid, int val) 3259 { 3260 struct qed_vf_info *vf; 3261 u8 abs_vp_id = 0; 3262 int rc; 3263 3264 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 3265 if (!vf) 3266 return -EINVAL; 3267 3268 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 3269 if (rc) 3270 return rc; 3271 3272 return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); 3273 } 3274 3275 static int 3276 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) 3277 { 3278 struct qed_vf_info *vf; 3279 u8 vport_id; 3280 int i; 3281 3282 for_each_hwfn(cdev, i) { 3283 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3284 3285 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 3286 DP_NOTICE(p_hwfn, 3287 "SR-IOV sanity check failed, can't set min rate\n"); 3288 return -EINVAL; 3289 } 3290 } 3291 3292 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); 3293 vport_id = vf->vport_id; 3294 3295 return qed_configure_vport_wfq(cdev, vport_id, rate); 3296 } 3297 3298 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) 3299 { 3300 struct qed_wfq_data *vf_vp_wfq; 3301 struct qed_vf_info *vf_info; 3302 3303 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3304 if (!vf_info) 3305 return 0; 3306 3307 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 3308 3309 if (vf_vp_wfq->configured) 3310 return vf_vp_wfq->min_speed; 3311 else 3312 return 0; 3313 } 3314 3315 /** 3316 * qed_schedule_iov - schedules IOV task for VF and PF 3317 * @hwfn: hardware function pointer 3318 * @flag: IOV flag for VF/PF 3319 */ 3320 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 3321 { 3322 smp_mb__before_atomic(); 3323 set_bit(flag, &hwfn->iov_task_flags); 3324 smp_mb__after_atomic(); 3325 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 3326 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); 3327 } 3328 3329 void qed_vf_start_iov_wq(struct qed_dev *cdev) 3330 { 3331 int i; 3332 3333 for_each_hwfn(cdev, i) 3334 queue_delayed_work(cdev->hwfns[i].iov_wq, 3335 &cdev->hwfns[i].iov_task, 0); 3336 } 3337 3338 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 3339 { 3340 int i, j; 3341 3342 for_each_hwfn(cdev, i) 3343 if (cdev->hwfns[i].iov_wq) 3344 flush_workqueue(cdev->hwfns[i].iov_wq); 3345 3346 /* Mark VFs for disablement */ 3347 qed_iov_set_vfs_to_disable(cdev, true); 3348 3349 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) 3350 pci_disable_sriov(cdev->pdev); 3351 3352 for_each_hwfn(cdev, i) { 3353 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3354 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 3355 3356 /* Failure to acquire the ptt in 100g creates an odd error 3357 * where the first engine has already relased IOV. 3358 */ 3359 if (!ptt) { 3360 DP_ERR(hwfn, "Failed to acquire ptt\n"); 3361 return -EBUSY; 3362 } 3363 3364 /* Clean WFQ db and configure equal weight for all vports */ 3365 qed_clean_wfq_db(hwfn, ptt); 3366 3367 qed_for_each_vf(hwfn, j) { 3368 int k; 3369 3370 if (!qed_iov_is_valid_vfid(hwfn, j, true)) 3371 continue; 3372 3373 /* Wait until VF is disabled before releasing */ 3374 for (k = 0; k < 100; k++) { 3375 if (!qed_iov_is_vf_stopped(hwfn, j)) 3376 msleep(20); 3377 else 3378 break; 3379 } 3380 3381 if (k < 100) 3382 qed_iov_release_hw_for_vf(&cdev->hwfns[i], 3383 ptt, j); 3384 else 3385 DP_ERR(hwfn, 3386 "Timeout waiting for VF's FLR to end\n"); 3387 } 3388 3389 qed_ptt_release(hwfn, ptt); 3390 } 3391 3392 qed_iov_set_vfs_to_disable(cdev, false); 3393 3394 return 0; 3395 } 3396 3397 static int qed_sriov_enable(struct qed_dev *cdev, int num) 3398 { 3399 struct qed_sb_cnt_info sb_cnt_info; 3400 int i, j, rc; 3401 3402 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 3403 DP_NOTICE(cdev, "Can start at most %d VFs\n", 3404 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); 3405 return -EINVAL; 3406 } 3407 3408 /* Initialize HW for VF access */ 3409 for_each_hwfn(cdev, j) { 3410 struct qed_hwfn *hwfn = &cdev->hwfns[j]; 3411 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 3412 int num_sbs = 0, limit = 16; 3413 3414 if (!ptt) { 3415 DP_ERR(hwfn, "Failed to acquire ptt\n"); 3416 rc = -EBUSY; 3417 goto err; 3418 } 3419 3420 if (IS_MF_DEFAULT(hwfn)) 3421 limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine; 3422 3423 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 3424 qed_int_get_num_sbs(hwfn, &sb_cnt_info); 3425 num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); 3426 3427 for (i = 0; i < num; i++) { 3428 if (!qed_iov_is_valid_vfid(hwfn, i, false)) 3429 continue; 3430 3431 rc = qed_iov_init_hw_for_vf(hwfn, 3432 ptt, i, num_sbs / num); 3433 if (rc) { 3434 DP_ERR(cdev, "Failed to enable VF[%d]\n", i); 3435 qed_ptt_release(hwfn, ptt); 3436 goto err; 3437 } 3438 } 3439 3440 qed_ptt_release(hwfn, ptt); 3441 } 3442 3443 /* Enable SRIOV PCIe functions */ 3444 rc = pci_enable_sriov(cdev->pdev, num); 3445 if (rc) { 3446 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); 3447 goto err; 3448 } 3449 3450 return num; 3451 3452 err: 3453 qed_sriov_disable(cdev, false); 3454 return rc; 3455 } 3456 3457 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) 3458 { 3459 if (!IS_QED_SRIOV(cdev)) { 3460 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); 3461 return -EOPNOTSUPP; 3462 } 3463 3464 if (num_vfs_param) 3465 return qed_sriov_enable(cdev, num_vfs_param); 3466 else 3467 return qed_sriov_disable(cdev, true); 3468 } 3469 3470 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) 3471 { 3472 int i; 3473 3474 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 3475 DP_VERBOSE(cdev, QED_MSG_IOV, 3476 "Cannot set a VF MAC; Sriov is not enabled\n"); 3477 return -EINVAL; 3478 } 3479 3480 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { 3481 DP_VERBOSE(cdev, QED_MSG_IOV, 3482 "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 3483 return -EINVAL; 3484 } 3485 3486 for_each_hwfn(cdev, i) { 3487 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3488 struct qed_public_vf_info *vf_info; 3489 3490 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 3491 if (!vf_info) 3492 continue; 3493 3494 /* Set the forced MAC, and schedule the IOV task */ 3495 ether_addr_copy(vf_info->forced_mac, mac); 3496 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 3497 } 3498 3499 return 0; 3500 } 3501 3502 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) 3503 { 3504 int i; 3505 3506 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 3507 DP_VERBOSE(cdev, QED_MSG_IOV, 3508 "Cannot set a VF MAC; Sriov is not enabled\n"); 3509 return -EINVAL; 3510 } 3511 3512 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { 3513 DP_VERBOSE(cdev, QED_MSG_IOV, 3514 "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 3515 return -EINVAL; 3516 } 3517 3518 for_each_hwfn(cdev, i) { 3519 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3520 struct qed_public_vf_info *vf_info; 3521 3522 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 3523 if (!vf_info) 3524 continue; 3525 3526 /* Set the forced vlan, and schedule the IOV task */ 3527 vf_info->forced_vlan = vid; 3528 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 3529 } 3530 3531 return 0; 3532 } 3533 3534 static int qed_get_vf_config(struct qed_dev *cdev, 3535 int vf_id, struct ifla_vf_info *ivi) 3536 { 3537 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 3538 struct qed_public_vf_info *vf_info; 3539 struct qed_mcp_link_state link; 3540 u32 tx_rate; 3541 3542 /* Sanitize request */ 3543 if (IS_VF(cdev)) 3544 return -EINVAL; 3545 3546 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { 3547 DP_VERBOSE(cdev, QED_MSG_IOV, 3548 "VF index [%d] isn't active\n", vf_id); 3549 return -EINVAL; 3550 } 3551 3552 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 3553 3554 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); 3555 3556 /* Fill information about VF */ 3557 ivi->vf = vf_id; 3558 3559 if (is_valid_ether_addr(vf_info->forced_mac)) 3560 ether_addr_copy(ivi->mac, vf_info->forced_mac); 3561 else 3562 ether_addr_copy(ivi->mac, vf_info->mac); 3563 3564 ivi->vlan = vf_info->forced_vlan; 3565 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); 3566 ivi->linkstate = vf_info->link_state; 3567 tx_rate = vf_info->tx_rate; 3568 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; 3569 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); 3570 3571 return 0; 3572 } 3573 3574 void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 3575 { 3576 struct qed_mcp_link_capabilities caps; 3577 struct qed_mcp_link_params params; 3578 struct qed_mcp_link_state link; 3579 int i; 3580 3581 if (!hwfn->pf_iov_info) 3582 return; 3583 3584 /* Update bulletin of all future possible VFs with link configuration */ 3585 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { 3586 struct qed_public_vf_info *vf_info; 3587 3588 vf_info = qed_iov_get_public_vf_info(hwfn, i, false); 3589 if (!vf_info) 3590 continue; 3591 3592 memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); 3593 memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); 3594 memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), 3595 sizeof(caps)); 3596 3597 /* Modify link according to the VF's configured link state */ 3598 switch (vf_info->link_state) { 3599 case IFLA_VF_LINK_STATE_DISABLE: 3600 link.link_up = false; 3601 break; 3602 case IFLA_VF_LINK_STATE_ENABLE: 3603 link.link_up = true; 3604 /* Set speed according to maximum supported by HW. 3605 * that is 40G for regular devices and 100G for CMT 3606 * mode devices. 3607 */ 3608 link.speed = (hwfn->cdev->num_hwfns > 1) ? 3609 100000 : 40000; 3610 default: 3611 /* In auto mode pass PF link image to VF */ 3612 break; 3613 } 3614 3615 if (link.link_up && vf_info->tx_rate) { 3616 struct qed_ptt *ptt; 3617 int rate; 3618 3619 rate = min_t(int, vf_info->tx_rate, link.speed); 3620 3621 ptt = qed_ptt_acquire(hwfn); 3622 if (!ptt) { 3623 DP_NOTICE(hwfn, "Failed to acquire PTT\n"); 3624 return; 3625 } 3626 3627 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { 3628 vf_info->tx_rate = rate; 3629 link.speed = rate; 3630 } 3631 3632 qed_ptt_release(hwfn, ptt); 3633 } 3634 3635 qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); 3636 } 3637 3638 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 3639 } 3640 3641 static int qed_set_vf_link_state(struct qed_dev *cdev, 3642 int vf_id, int link_state) 3643 { 3644 int i; 3645 3646 /* Sanitize request */ 3647 if (IS_VF(cdev)) 3648 return -EINVAL; 3649 3650 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { 3651 DP_VERBOSE(cdev, QED_MSG_IOV, 3652 "VF index [%d] isn't active\n", vf_id); 3653 return -EINVAL; 3654 } 3655 3656 /* Handle configuration of link state */ 3657 for_each_hwfn(cdev, i) { 3658 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 3659 struct qed_public_vf_info *vf; 3660 3661 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); 3662 if (!vf) 3663 continue; 3664 3665 if (vf->link_state == link_state) 3666 continue; 3667 3668 vf->link_state = link_state; 3669 qed_inform_vf_link_state(&cdev->hwfns[i]); 3670 } 3671 3672 return 0; 3673 } 3674 3675 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) 3676 { 3677 int i, rc = -EINVAL; 3678 3679 for_each_hwfn(cdev, i) { 3680 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3681 3682 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); 3683 if (rc) 3684 break; 3685 } 3686 3687 return rc; 3688 } 3689 3690 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) 3691 { 3692 int i; 3693 3694 for_each_hwfn(cdev, i) { 3695 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3696 struct qed_public_vf_info *vf; 3697 3698 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 3699 DP_NOTICE(p_hwfn, 3700 "SR-IOV sanity check failed, can't set tx rate\n"); 3701 return -EINVAL; 3702 } 3703 3704 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); 3705 3706 vf->tx_rate = rate; 3707 3708 qed_inform_vf_link_state(p_hwfn); 3709 } 3710 3711 return 0; 3712 } 3713 3714 static int qed_set_vf_rate(struct qed_dev *cdev, 3715 int vfid, u32 min_rate, u32 max_rate) 3716 { 3717 int rc_min = 0, rc_max = 0; 3718 3719 if (max_rate) 3720 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); 3721 3722 if (min_rate) 3723 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); 3724 3725 if (rc_max | rc_min) 3726 return -EINVAL; 3727 3728 return 0; 3729 } 3730 3731 static void qed_handle_vf_msg(struct qed_hwfn *hwfn) 3732 { 3733 u64 events[QED_VF_ARRAY_LENGTH]; 3734 struct qed_ptt *ptt; 3735 int i; 3736 3737 ptt = qed_ptt_acquire(hwfn); 3738 if (!ptt) { 3739 DP_VERBOSE(hwfn, QED_MSG_IOV, 3740 "Can't acquire PTT; re-scheduling\n"); 3741 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); 3742 return; 3743 } 3744 3745 qed_iov_pf_get_and_clear_pending_events(hwfn, events); 3746 3747 DP_VERBOSE(hwfn, QED_MSG_IOV, 3748 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", 3749 events[0], events[1], events[2]); 3750 3751 qed_for_each_vf(hwfn, i) { 3752 /* Skip VFs with no pending messages */ 3753 if (!(events[i / 64] & (1ULL << (i % 64)))) 3754 continue; 3755 3756 DP_VERBOSE(hwfn, QED_MSG_IOV, 3757 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 3758 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); 3759 3760 /* Copy VF's message to PF's request buffer for that VF */ 3761 if (qed_iov_copy_vf_msg(hwfn, ptt, i)) 3762 continue; 3763 3764 qed_iov_process_mbx_req(hwfn, ptt, i); 3765 } 3766 3767 qed_ptt_release(hwfn, ptt); 3768 } 3769 3770 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) 3771 { 3772 int i; 3773 3774 qed_for_each_vf(hwfn, i) { 3775 struct qed_public_vf_info *info; 3776 bool update = false; 3777 u8 *mac; 3778 3779 info = qed_iov_get_public_vf_info(hwfn, i, true); 3780 if (!info) 3781 continue; 3782 3783 /* Update data on bulletin board */ 3784 mac = qed_iov_bulletin_get_forced_mac(hwfn, i); 3785 if (is_valid_ether_addr(info->forced_mac) && 3786 (!mac || !ether_addr_equal(mac, info->forced_mac))) { 3787 DP_VERBOSE(hwfn, 3788 QED_MSG_IOV, 3789 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", 3790 i, 3791 hwfn->cdev->p_iov_info->first_vf_in_pf + i); 3792 3793 /* Update bulletin board with forced MAC */ 3794 qed_iov_bulletin_set_forced_mac(hwfn, 3795 info->forced_mac, i); 3796 update = true; 3797 } 3798 3799 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ 3800 info->forced_vlan) { 3801 DP_VERBOSE(hwfn, 3802 QED_MSG_IOV, 3803 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", 3804 info->forced_vlan, 3805 i, 3806 hwfn->cdev->p_iov_info->first_vf_in_pf + i); 3807 qed_iov_bulletin_set_forced_vlan(hwfn, 3808 info->forced_vlan, i); 3809 update = true; 3810 } 3811 3812 if (update) 3813 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 3814 } 3815 } 3816 3817 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) 3818 { 3819 struct qed_ptt *ptt; 3820 int i; 3821 3822 ptt = qed_ptt_acquire(hwfn); 3823 if (!ptt) { 3824 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); 3825 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 3826 return; 3827 } 3828 3829 qed_for_each_vf(hwfn, i) 3830 qed_iov_post_vf_bulletin(hwfn, i, ptt); 3831 3832 qed_ptt_release(hwfn, ptt); 3833 } 3834 3835 static void qed_iov_pf_task(struct work_struct *work) 3836 3837 { 3838 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 3839 iov_task.work); 3840 int rc; 3841 3842 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 3843 return; 3844 3845 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { 3846 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 3847 3848 if (!ptt) { 3849 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 3850 return; 3851 } 3852 3853 rc = qed_iov_vf_flr_cleanup(hwfn, ptt); 3854 if (rc) 3855 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 3856 3857 qed_ptt_release(hwfn, ptt); 3858 } 3859 3860 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) 3861 qed_handle_vf_msg(hwfn); 3862 3863 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 3864 &hwfn->iov_task_flags)) 3865 qed_handle_pf_set_vf_unicast(hwfn); 3866 3867 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 3868 &hwfn->iov_task_flags)) 3869 qed_handle_bulletin_post(hwfn); 3870 } 3871 3872 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 3873 { 3874 int i; 3875 3876 for_each_hwfn(cdev, i) { 3877 if (!cdev->hwfns[i].iov_wq) 3878 continue; 3879 3880 if (schedule_first) { 3881 qed_schedule_iov(&cdev->hwfns[i], 3882 QED_IOV_WQ_STOP_WQ_FLAG); 3883 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); 3884 } 3885 3886 flush_workqueue(cdev->hwfns[i].iov_wq); 3887 destroy_workqueue(cdev->hwfns[i].iov_wq); 3888 } 3889 } 3890 3891 int qed_iov_wq_start(struct qed_dev *cdev) 3892 { 3893 char name[NAME_SIZE]; 3894 int i; 3895 3896 for_each_hwfn(cdev, i) { 3897 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3898 3899 /* PFs needs a dedicated workqueue only if they support IOV. 3900 * VFs always require one. 3901 */ 3902 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) 3903 continue; 3904 3905 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", 3906 cdev->pdev->bus->number, 3907 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); 3908 3909 p_hwfn->iov_wq = create_singlethread_workqueue(name); 3910 if (!p_hwfn->iov_wq) { 3911 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); 3912 return -ENOMEM; 3913 } 3914 3915 if (IS_PF(cdev)) 3916 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); 3917 else 3918 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); 3919 } 3920 3921 return 0; 3922 } 3923 3924 const struct qed_iov_hv_ops qed_iov_ops_pass = { 3925 .configure = &qed_sriov_configure, 3926 .set_mac = &qed_sriov_pf_set_mac, 3927 .set_vlan = &qed_sriov_pf_set_vlan, 3928 .get_config = &qed_get_vf_config, 3929 .set_link_state = &qed_set_vf_link_state, 3930 .set_spoof = &qed_spoof_configure, 3931 .set_rate = &qed_set_vf_rate, 3932 }; 3933