1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/crc32.h> 9 #include <linux/vmalloc.h> 10 #include <linux/crash_dump.h> 11 #include <linux/qed/qed_iov_if.h> 12 #include "qed_cxt.h" 13 #include "qed_hsi.h" 14 #include "qed_hw.h" 15 #include "qed_init_ops.h" 16 #include "qed_int.h" 17 #include "qed_mcp.h" 18 #include "qed_reg_addr.h" 19 #include "qed_sp.h" 20 #include "qed_sriov.h" 21 #include "qed_vf.h" 22 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 23 u8 opcode, 24 __le16 echo, 25 union event_ring_data *data, u8 fw_return_code); 26 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); 27 28 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) 29 { 30 u8 legacy = 0; 31 32 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 33 ETH_HSI_VER_NO_PKT_LEN_TUNN) 34 legacy |= QED_QCID_LEGACY_VF_RX_PROD; 35 36 if (!(p_vf->acquire.vfdev_info.capabilities & 37 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 38 legacy |= QED_QCID_LEGACY_VF_CID; 39 40 return legacy; 41 } 42 43 /* IOV ramrods */ 44 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) 45 { 46 struct vf_start_ramrod_data *p_ramrod = NULL; 47 struct qed_spq_entry *p_ent = NULL; 48 struct qed_sp_init_data init_data; 49 int rc = -EINVAL; 50 u8 fp_minor; 51 52 /* Get SPQ entry */ 53 memset(&init_data, 0, sizeof(init_data)); 54 init_data.cid = qed_spq_get_cid(p_hwfn); 55 init_data.opaque_fid = p_vf->opaque_fid; 56 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 57 58 rc = qed_sp_init_request(p_hwfn, &p_ent, 59 COMMON_RAMROD_VF_START, 60 PROTOCOLID_COMMON, &init_data); 61 if (rc) 62 return rc; 63 64 p_ramrod = &p_ent->ramrod.vf_start; 65 66 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 67 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); 68 69 switch (p_hwfn->hw_info.personality) { 70 case QED_PCI_ETH: 71 p_ramrod->personality = PERSONALITY_ETH; 72 break; 73 case QED_PCI_ETH_ROCE: 74 case QED_PCI_ETH_IWARP: 75 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 76 break; 77 default: 78 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 79 p_hwfn->hw_info.personality); 80 qed_sp_destroy_request(p_hwfn, p_ent); 81 return -EINVAL; 82 } 83 84 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 85 if (fp_minor > ETH_HSI_VER_MINOR && 86 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { 87 DP_VERBOSE(p_hwfn, 88 QED_MSG_IOV, 89 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 90 p_vf->abs_vf_id, 91 ETH_HSI_VER_MAJOR, 92 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 93 fp_minor = ETH_HSI_VER_MINOR; 94 } 95 96 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 97 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 98 99 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 100 "VF[%d] - Starting using HSI %02x.%02x\n", 101 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 102 103 return qed_spq_post(p_hwfn, p_ent, NULL); 104 } 105 106 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, 107 u32 concrete_vfid, u16 opaque_vfid) 108 { 109 struct vf_stop_ramrod_data *p_ramrod = NULL; 110 struct qed_spq_entry *p_ent = NULL; 111 struct qed_sp_init_data init_data; 112 int rc = -EINVAL; 113 114 /* Get SPQ entry */ 115 memset(&init_data, 0, sizeof(init_data)); 116 init_data.cid = qed_spq_get_cid(p_hwfn); 117 init_data.opaque_fid = opaque_vfid; 118 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 119 120 rc = qed_sp_init_request(p_hwfn, &p_ent, 121 COMMON_RAMROD_VF_STOP, 122 PROTOCOLID_COMMON, &init_data); 123 if (rc) 124 return rc; 125 126 p_ramrod = &p_ent->ramrod.vf_stop; 127 128 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 129 130 return qed_spq_post(p_hwfn, p_ent, NULL); 131 } 132 133 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 134 int rel_vf_id, 135 bool b_enabled_only, bool b_non_malicious) 136 { 137 if (!p_hwfn->pf_iov_info) { 138 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 139 return false; 140 } 141 142 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || 143 (rel_vf_id < 0)) 144 return false; 145 146 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 147 b_enabled_only) 148 return false; 149 150 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && 151 b_non_malicious) 152 return false; 153 154 return true; 155 } 156 157 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, 158 u16 relative_vf_id, 159 bool b_enabled_only) 160 { 161 struct qed_vf_info *vf = NULL; 162 163 if (!p_hwfn->pf_iov_info) { 164 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 165 return NULL; 166 } 167 168 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, 169 b_enabled_only, false)) 170 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 171 else 172 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 173 relative_vf_id); 174 175 return vf; 176 } 177 178 static struct qed_queue_cid * 179 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) 180 { 181 int i; 182 183 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 184 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) 185 return p_queue->cids[i].p_cid; 186 } 187 188 return NULL; 189 } 190 191 enum qed_iov_validate_q_mode { 192 QED_IOV_VALIDATE_Q_NA, 193 QED_IOV_VALIDATE_Q_ENABLE, 194 QED_IOV_VALIDATE_Q_DISABLE, 195 }; 196 197 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, 198 struct qed_vf_info *p_vf, 199 u16 qid, 200 enum qed_iov_validate_q_mode mode, 201 bool b_is_tx) 202 { 203 int i; 204 205 if (mode == QED_IOV_VALIDATE_Q_NA) 206 return true; 207 208 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 209 struct qed_vf_queue_cid *p_qcid; 210 211 p_qcid = &p_vf->vf_queues[qid].cids[i]; 212 213 if (!p_qcid->p_cid) 214 continue; 215 216 if (p_qcid->b_is_tx != b_is_tx) 217 continue; 218 219 return mode == QED_IOV_VALIDATE_Q_ENABLE; 220 } 221 222 /* In case we haven't found any valid cid, then its disabled */ 223 return mode == QED_IOV_VALIDATE_Q_DISABLE; 224 } 225 226 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, 227 struct qed_vf_info *p_vf, 228 u16 rx_qid, 229 enum qed_iov_validate_q_mode mode) 230 { 231 if (rx_qid >= p_vf->num_rxqs) { 232 DP_VERBOSE(p_hwfn, 233 QED_MSG_IOV, 234 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", 235 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); 236 return false; 237 } 238 239 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); 240 } 241 242 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, 243 struct qed_vf_info *p_vf, 244 u16 tx_qid, 245 enum qed_iov_validate_q_mode mode) 246 { 247 if (tx_qid >= p_vf->num_txqs) { 248 DP_VERBOSE(p_hwfn, 249 QED_MSG_IOV, 250 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", 251 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); 252 return false; 253 } 254 255 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); 256 } 257 258 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, 259 struct qed_vf_info *p_vf, u16 sb_idx) 260 { 261 int i; 262 263 for (i = 0; i < p_vf->num_sbs; i++) 264 if (p_vf->igu_sbs[i] == sb_idx) 265 return true; 266 267 DP_VERBOSE(p_hwfn, 268 QED_MSG_IOV, 269 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", 270 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); 271 272 return false; 273 } 274 275 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, 276 struct qed_vf_info *p_vf) 277 { 278 u8 i; 279 280 for (i = 0; i < p_vf->num_rxqs; i++) 281 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 282 QED_IOV_VALIDATE_Q_ENABLE, 283 false)) 284 return true; 285 286 return false; 287 } 288 289 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, 290 struct qed_vf_info *p_vf) 291 { 292 u8 i; 293 294 for (i = 0; i < p_vf->num_txqs; i++) 295 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 296 QED_IOV_VALIDATE_Q_ENABLE, 297 true)) 298 return true; 299 300 return false; 301 } 302 303 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 304 int vfid, struct qed_ptt *p_ptt) 305 { 306 struct qed_bulletin_content *p_bulletin; 307 int crc_size = sizeof(p_bulletin->crc); 308 struct qed_dmae_params params; 309 struct qed_vf_info *p_vf; 310 311 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 312 if (!p_vf) 313 return -EINVAL; 314 315 if (!p_vf->vf_bulletin) 316 return -EINVAL; 317 318 p_bulletin = p_vf->bulletin.p_virt; 319 320 /* Increment bulletin board version and compute crc */ 321 p_bulletin->version++; 322 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, 323 p_vf->bulletin.size - crc_size); 324 325 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 326 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 327 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); 328 329 /* propagate bulletin board via dmae to vm memory */ 330 memset(¶ms, 0, sizeof(params)); 331 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); 332 params.dst_vfid = p_vf->abs_vf_id; 333 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 334 p_vf->vf_bulletin, p_vf->bulletin.size / 4, 335 ¶ms); 336 } 337 338 static int qed_iov_pci_cfg_info(struct qed_dev *cdev) 339 { 340 struct qed_hw_sriov_info *iov = cdev->p_iov_info; 341 int pos = iov->pos; 342 343 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); 344 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 345 346 pci_read_config_word(cdev->pdev, 347 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); 348 pci_read_config_word(cdev->pdev, 349 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); 350 351 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); 352 if (iov->num_vfs) { 353 DP_VERBOSE(cdev, 354 QED_MSG_IOV, 355 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 356 iov->num_vfs = 0; 357 } 358 359 pci_read_config_word(cdev->pdev, 360 pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 361 362 pci_read_config_word(cdev->pdev, 363 pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 364 365 pci_read_config_word(cdev->pdev, 366 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); 367 368 pci_read_config_dword(cdev->pdev, 369 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 370 371 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); 372 373 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 374 375 DP_VERBOSE(cdev, 376 QED_MSG_IOV, 377 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 378 iov->nres, 379 iov->cap, 380 iov->ctrl, 381 iov->total_vfs, 382 iov->initial_vfs, 383 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 384 385 /* Some sanity checks */ 386 if (iov->num_vfs > NUM_OF_VFS(cdev) || 387 iov->total_vfs > NUM_OF_VFS(cdev)) { 388 /* This can happen only due to a bug. In this case we set 389 * num_vfs to zero to avoid memory corruption in the code that 390 * assumes max number of vfs 391 */ 392 DP_NOTICE(cdev, 393 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 394 iov->num_vfs); 395 396 iov->num_vfs = 0; 397 iov->total_vfs = 0; 398 } 399 400 return 0; 401 } 402 403 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) 404 { 405 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 406 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 407 struct qed_bulletin_content *p_bulletin_virt; 408 dma_addr_t req_p, rply_p, bulletin_p; 409 union pfvf_tlvs *p_reply_virt_addr; 410 union vfpf_tlvs *p_req_virt_addr; 411 u8 idx = 0; 412 413 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 414 415 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 416 req_p = p_iov_info->mbx_msg_phys_addr; 417 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 418 rply_p = p_iov_info->mbx_reply_phys_addr; 419 p_bulletin_virt = p_iov_info->p_bulletins; 420 bulletin_p = p_iov_info->bulletins_phys; 421 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 422 DP_ERR(p_hwfn, 423 "qed_iov_setup_vfdb called without allocating mem first\n"); 424 return; 425 } 426 427 for (idx = 0; idx < p_iov->total_vfs; idx++) { 428 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; 429 u32 concrete; 430 431 vf->vf_mbx.req_virt = p_req_virt_addr + idx; 432 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 433 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 434 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 435 436 vf->state = VF_STOPPED; 437 vf->b_init = false; 438 439 vf->bulletin.phys = idx * 440 sizeof(struct qed_bulletin_content) + 441 bulletin_p; 442 vf->bulletin.p_virt = p_bulletin_virt + idx; 443 vf->bulletin.size = sizeof(struct qed_bulletin_content); 444 445 vf->relative_vf_id = idx; 446 vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 447 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 448 vf->concrete_fid = concrete; 449 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 450 (vf->abs_vf_id << 8); 451 vf->vport_id = idx + 1; 452 453 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 454 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 455 } 456 } 457 458 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) 459 { 460 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 461 void **p_v_addr; 462 u16 num_vfs = 0; 463 464 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 465 466 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 467 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 468 469 /* Allocate PF Mailbox buffer (per-VF) */ 470 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 471 p_v_addr = &p_iov_info->mbx_msg_virt_addr; 472 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 473 p_iov_info->mbx_msg_size, 474 &p_iov_info->mbx_msg_phys_addr, 475 GFP_KERNEL); 476 if (!*p_v_addr) 477 return -ENOMEM; 478 479 /* Allocate PF Mailbox Reply buffer (per-VF) */ 480 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 481 p_v_addr = &p_iov_info->mbx_reply_virt_addr; 482 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 483 p_iov_info->mbx_reply_size, 484 &p_iov_info->mbx_reply_phys_addr, 485 GFP_KERNEL); 486 if (!*p_v_addr) 487 return -ENOMEM; 488 489 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * 490 num_vfs; 491 p_v_addr = &p_iov_info->p_bulletins; 492 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 493 p_iov_info->bulletins_size, 494 &p_iov_info->bulletins_phys, 495 GFP_KERNEL); 496 if (!*p_v_addr) 497 return -ENOMEM; 498 499 DP_VERBOSE(p_hwfn, 500 QED_MSG_IOV, 501 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 502 p_iov_info->mbx_msg_virt_addr, 503 (u64) p_iov_info->mbx_msg_phys_addr, 504 p_iov_info->mbx_reply_virt_addr, 505 (u64) p_iov_info->mbx_reply_phys_addr, 506 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 507 508 return 0; 509 } 510 511 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) 512 { 513 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 514 515 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 516 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 517 p_iov_info->mbx_msg_size, 518 p_iov_info->mbx_msg_virt_addr, 519 p_iov_info->mbx_msg_phys_addr); 520 521 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 522 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 523 p_iov_info->mbx_reply_size, 524 p_iov_info->mbx_reply_virt_addr, 525 p_iov_info->mbx_reply_phys_addr); 526 527 if (p_iov_info->p_bulletins) 528 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 529 p_iov_info->bulletins_size, 530 p_iov_info->p_bulletins, 531 p_iov_info->bulletins_phys); 532 } 533 534 int qed_iov_alloc(struct qed_hwfn *p_hwfn) 535 { 536 struct qed_pf_iov *p_sriov; 537 538 if (!IS_PF_SRIOV(p_hwfn)) { 539 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 540 "No SR-IOV - no need for IOV db\n"); 541 return 0; 542 } 543 544 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); 545 if (!p_sriov) 546 return -ENOMEM; 547 548 p_hwfn->pf_iov_info = p_sriov; 549 550 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, 551 qed_sriov_eqe_event); 552 553 return qed_iov_allocate_vfdb(p_hwfn); 554 } 555 556 void qed_iov_setup(struct qed_hwfn *p_hwfn) 557 { 558 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 559 return; 560 561 qed_iov_setup_vfdb(p_hwfn); 562 } 563 564 void qed_iov_free(struct qed_hwfn *p_hwfn) 565 { 566 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); 567 568 if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 569 qed_iov_free_vfdb(p_hwfn); 570 kfree(p_hwfn->pf_iov_info); 571 } 572 } 573 574 void qed_iov_free_hw_info(struct qed_dev *cdev) 575 { 576 kfree(cdev->p_iov_info); 577 cdev->p_iov_info = NULL; 578 } 579 580 int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 581 { 582 struct qed_dev *cdev = p_hwfn->cdev; 583 int pos; 584 int rc; 585 586 if (is_kdump_kernel()) 587 return 0; 588 589 if (IS_VF(p_hwfn->cdev)) 590 return 0; 591 592 /* Learn the PCI configuration */ 593 pos = pci_find_ext_capability(p_hwfn->cdev->pdev, 594 PCI_EXT_CAP_ID_SRIOV); 595 if (!pos) { 596 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); 597 return 0; 598 } 599 600 /* Allocate a new struct for IOV information */ 601 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); 602 if (!cdev->p_iov_info) 603 return -ENOMEM; 604 605 cdev->p_iov_info->pos = pos; 606 607 rc = qed_iov_pci_cfg_info(cdev); 608 if (rc) 609 return rc; 610 611 /* We want PF IOV to be synonemous with the existance of p_iov_info; 612 * In case the capability is published but there are no VFs, simply 613 * de-allocate the struct. 614 */ 615 if (!cdev->p_iov_info->total_vfs) { 616 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 617 "IOV capabilities, but no VFs are published\n"); 618 kfree(cdev->p_iov_info); 619 cdev->p_iov_info = NULL; 620 return 0; 621 } 622 623 /* First VF index based on offset is tricky: 624 * - If ARI is supported [likely], offset - (16 - pf_id) would 625 * provide the number for eng0. 2nd engine Vfs would begin 626 * after the first engine's VFs. 627 * - If !ARI, VFs would start on next device. 628 * so offset - (256 - pf_id) would provide the number. 629 * Utilize the fact that (256 - pf_id) is achieved only by later 630 * to differentiate between the two. 631 */ 632 633 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { 634 u32 first = p_hwfn->cdev->p_iov_info->offset + 635 p_hwfn->abs_pf_id - 16; 636 637 cdev->p_iov_info->first_vf_in_pf = first; 638 639 if (QED_PATH_ID(p_hwfn)) 640 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 641 } else { 642 u32 first = p_hwfn->cdev->p_iov_info->offset + 643 p_hwfn->abs_pf_id - 256; 644 645 cdev->p_iov_info->first_vf_in_pf = first; 646 } 647 648 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 649 "First VF in hwfn 0x%08x\n", 650 cdev->p_iov_info->first_vf_in_pf); 651 652 return 0; 653 } 654 655 static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, 656 int vfid, bool b_fail_malicious) 657 { 658 /* Check PF supports sriov */ 659 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 660 !IS_PF_SRIOV_ALLOC(p_hwfn)) 661 return false; 662 663 /* Check VF validity */ 664 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) 665 return false; 666 667 return true; 668 } 669 670 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 671 { 672 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); 673 } 674 675 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, 676 u16 rel_vf_id, u8 to_disable) 677 { 678 struct qed_vf_info *vf; 679 int i; 680 681 for_each_hwfn(cdev, i) { 682 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 683 684 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 685 if (!vf) 686 continue; 687 688 vf->to_disable = to_disable; 689 } 690 } 691 692 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) 693 { 694 u16 i; 695 696 if (!IS_QED_SRIOV(cdev)) 697 return; 698 699 for (i = 0; i < cdev->p_iov_info->total_vfs; i++) 700 qed_iov_set_vf_to_disable(cdev, i, to_disable); 701 } 702 703 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 704 struct qed_ptt *p_ptt, u8 abs_vfid) 705 { 706 qed_wr(p_hwfn, p_ptt, 707 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 708 1 << (abs_vfid & 0x1f)); 709 } 710 711 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 712 struct qed_ptt *p_ptt, struct qed_vf_info *vf) 713 { 714 int i; 715 716 /* Set VF masks and configuration - pretend */ 717 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 718 719 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 720 721 /* unpretend */ 722 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 723 724 /* iterate over all queues, clear sb consumer */ 725 for (i = 0; i < vf->num_sbs; i++) 726 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 727 vf->igu_sbs[i], 728 vf->opaque_fid, true); 729 } 730 731 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, 732 struct qed_ptt *p_ptt, 733 struct qed_vf_info *vf, bool enable) 734 { 735 u32 igu_vf_conf; 736 737 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 738 739 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 740 741 if (enable) 742 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 743 else 744 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 745 746 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 747 748 /* unpretend */ 749 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 750 } 751 752 static int 753 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, 754 struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) 755 { 756 u8 current_max = 0; 757 int i; 758 759 /* For AH onward, configuration is per-PF. Find maximum of all 760 * the currently enabled child VFs, and set the number to be that. 761 */ 762 if (!QED_IS_BB(p_hwfn->cdev)) { 763 qed_for_each_vf(p_hwfn, i) { 764 struct qed_vf_info *p_vf; 765 766 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); 767 if (!p_vf) 768 continue; 769 770 current_max = max_t(u8, current_max, p_vf->num_sbs); 771 } 772 } 773 774 if (num_sbs > current_max) 775 return qed_mcp_config_vf_msix(p_hwfn, p_ptt, 776 abs_vf_id, num_sbs); 777 778 return 0; 779 } 780 781 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, 782 struct qed_ptt *p_ptt, 783 struct qed_vf_info *vf) 784 { 785 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 786 int rc; 787 788 /* It's possible VF was previously considered malicious - 789 * clear the indication even if we're only going to disable VF. 790 */ 791 vf->b_malicious = false; 792 793 if (vf->to_disable) 794 return 0; 795 796 DP_VERBOSE(p_hwfn, 797 QED_MSG_IOV, 798 "Enable internal access for vf %x [abs %x]\n", 799 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 800 801 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 802 803 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 804 805 rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, 806 vf->abs_vf_id, vf->num_sbs); 807 if (rc) 808 return rc; 809 810 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 811 812 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 813 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 814 815 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 816 p_hwfn->hw_info.hw_mode); 817 818 /* unpretend */ 819 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 820 821 vf->state = VF_FREE; 822 823 return rc; 824 } 825 826 /** 827 * qed_iov_config_perm_table() - Configure the permission zone table. 828 * 829 * @p_hwfn: HW device data. 830 * @p_ptt: PTT window for writing the registers. 831 * @vf: VF info data. 832 * @enable: The actual permision for this VF. 833 * 834 * In E4, queue zone permission table size is 320x9. There 835 * are 320 VF queues for single engine device (256 for dual 836 * engine device), and each entry has the following format: 837 * {Valid, VF[7:0]} 838 */ 839 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, 840 struct qed_ptt *p_ptt, 841 struct qed_vf_info *vf, u8 enable) 842 { 843 u32 reg_addr, val; 844 u16 qzone_id = 0; 845 int qid; 846 847 for (qid = 0; qid < vf->num_rxqs; qid++) { 848 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 849 &qzone_id); 850 851 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 852 val = enable ? (vf->abs_vf_id | BIT(8)) : 0; 853 qed_wr(p_hwfn, p_ptt, reg_addr, val); 854 } 855 } 856 857 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, 858 struct qed_ptt *p_ptt, 859 struct qed_vf_info *vf) 860 { 861 /* Reset vf in IGU - interrupts are still disabled */ 862 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 863 864 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 865 866 /* Permission Table */ 867 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 868 } 869 870 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, 871 struct qed_ptt *p_ptt, 872 struct qed_vf_info *vf, u16 num_rx_queues) 873 { 874 struct qed_igu_block *p_block; 875 struct cau_sb_entry sb_entry; 876 int qid = 0; 877 u32 val = 0; 878 879 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) 880 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; 881 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; 882 883 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 884 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 885 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 886 887 for (qid = 0; qid < num_rx_queues; qid++) { 888 p_block = qed_get_igu_free_sb(p_hwfn, false); 889 vf->igu_sbs[qid] = p_block->igu_sb_id; 890 p_block->status &= ~QED_IGU_STATUS_FREE; 891 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 892 893 qed_wr(p_hwfn, p_ptt, 894 IGU_REG_MAPPING_MEMORY + 895 sizeof(u32) * p_block->igu_sb_id, val); 896 897 /* Configure igu sb in CAU which were marked valid */ 898 qed_init_cau_sb_entry(p_hwfn, &sb_entry, 899 p_hwfn->rel_pf_id, vf->abs_vf_id, 1); 900 901 qed_dmae_host2grc(p_hwfn, p_ptt, 902 (u64)(uintptr_t)&sb_entry, 903 CAU_REG_SB_VAR_MEMORY + 904 p_block->igu_sb_id * sizeof(u64), 2, NULL); 905 } 906 907 vf->num_sbs = (u8) num_rx_queues; 908 909 return vf->num_sbs; 910 } 911 912 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, 913 struct qed_ptt *p_ptt, 914 struct qed_vf_info *vf) 915 { 916 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 917 int idx, igu_id; 918 u32 addr, val; 919 920 /* Invalidate igu CAM lines and mark them as free */ 921 for (idx = 0; idx < vf->num_sbs; idx++) { 922 igu_id = vf->igu_sbs[idx]; 923 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; 924 925 val = qed_rd(p_hwfn, p_ptt, addr); 926 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 927 qed_wr(p_hwfn, p_ptt, addr, val); 928 929 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; 930 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; 931 } 932 933 vf->num_sbs = 0; 934 } 935 936 static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 937 u16 vfid, 938 struct qed_mcp_link_params *params, 939 struct qed_mcp_link_state *link, 940 struct qed_mcp_link_capabilities *p_caps) 941 { 942 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 943 vfid, 944 false); 945 struct qed_bulletin_content *p_bulletin; 946 947 if (!p_vf) 948 return; 949 950 p_bulletin = p_vf->bulletin.p_virt; 951 p_bulletin->req_autoneg = params->speed.autoneg; 952 p_bulletin->req_adv_speed = params->speed.advertised_speeds; 953 p_bulletin->req_forced_speed = params->speed.forced_speed; 954 p_bulletin->req_autoneg_pause = params->pause.autoneg; 955 p_bulletin->req_forced_rx = params->pause.forced_rx; 956 p_bulletin->req_forced_tx = params->pause.forced_tx; 957 p_bulletin->req_loopback = params->loopback_mode; 958 959 p_bulletin->link_up = link->link_up; 960 p_bulletin->speed = link->speed; 961 p_bulletin->full_duplex = link->full_duplex; 962 p_bulletin->autoneg = link->an; 963 p_bulletin->autoneg_complete = link->an_complete; 964 p_bulletin->parallel_detection = link->parallel_detection; 965 p_bulletin->pfc_enabled = link->pfc_enabled; 966 p_bulletin->partner_adv_speed = link->partner_adv_speed; 967 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 968 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 969 p_bulletin->partner_adv_pause = link->partner_adv_pause; 970 p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 971 972 p_bulletin->capability_speed = p_caps->speed_capabilities; 973 } 974 975 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, 976 struct qed_ptt *p_ptt, 977 struct qed_iov_vf_init_params *p_params) 978 { 979 struct qed_mcp_link_capabilities link_caps; 980 struct qed_mcp_link_params link_params; 981 struct qed_mcp_link_state link_state; 982 u8 num_of_vf_avaiable_chains = 0; 983 struct qed_vf_info *vf = NULL; 984 u16 qid, num_irqs; 985 int rc = 0; 986 u32 cids; 987 u8 i; 988 989 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); 990 if (!vf) { 991 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 992 return -EINVAL; 993 } 994 995 if (vf->b_init) { 996 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", 997 p_params->rel_vf_id); 998 return -EINVAL; 999 } 1000 1001 /* Perform sanity checking on the requested queue_id */ 1002 for (i = 0; i < p_params->num_queues; i++) { 1003 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); 1004 u16 max_vf_qzone = min_vf_qzone + 1005 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; 1006 1007 qid = p_params->req_rx_queue[i]; 1008 if (qid < min_vf_qzone || qid > max_vf_qzone) { 1009 DP_NOTICE(p_hwfn, 1010 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", 1011 qid, 1012 p_params->rel_vf_id, 1013 min_vf_qzone, max_vf_qzone); 1014 return -EINVAL; 1015 } 1016 1017 qid = p_params->req_tx_queue[i]; 1018 if (qid > max_vf_qzone) { 1019 DP_NOTICE(p_hwfn, 1020 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", 1021 qid, p_params->rel_vf_id, max_vf_qzone); 1022 return -EINVAL; 1023 } 1024 1025 /* If client *really* wants, Tx qid can be shared with PF */ 1026 if (qid < min_vf_qzone) 1027 DP_VERBOSE(p_hwfn, 1028 QED_MSG_IOV, 1029 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", 1030 p_params->rel_vf_id, qid, i); 1031 } 1032 1033 /* Limit number of queues according to number of CIDs */ 1034 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 1035 DP_VERBOSE(p_hwfn, 1036 QED_MSG_IOV, 1037 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 1038 vf->relative_vf_id, p_params->num_queues, (u16)cids); 1039 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); 1040 1041 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, 1042 p_ptt, 1043 vf, num_irqs); 1044 if (!num_of_vf_avaiable_chains) { 1045 DP_ERR(p_hwfn, "no available igu sbs\n"); 1046 return -ENOMEM; 1047 } 1048 1049 /* Choose queue number and index ranges */ 1050 vf->num_rxqs = num_of_vf_avaiable_chains; 1051 vf->num_txqs = num_of_vf_avaiable_chains; 1052 1053 for (i = 0; i < vf->num_rxqs; i++) { 1054 struct qed_vf_queue *p_queue = &vf->vf_queues[i]; 1055 1056 p_queue->fw_rx_qid = p_params->req_rx_queue[i]; 1057 p_queue->fw_tx_qid = p_params->req_tx_queue[i]; 1058 1059 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1060 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", 1061 vf->relative_vf_id, i, vf->igu_sbs[i], 1062 p_queue->fw_rx_qid, p_queue->fw_tx_qid); 1063 } 1064 1065 /* Update the link configuration in bulletin */ 1066 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), 1067 sizeof(link_params)); 1068 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); 1069 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), 1070 sizeof(link_caps)); 1071 qed_iov_set_link(p_hwfn, p_params->rel_vf_id, 1072 &link_params, &link_state, &link_caps); 1073 1074 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); 1075 if (!rc) { 1076 vf->b_init = true; 1077 1078 if (IS_LEAD_HWFN(p_hwfn)) 1079 p_hwfn->cdev->p_iov_info->num_vfs++; 1080 } 1081 1082 return rc; 1083 } 1084 1085 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 1086 struct qed_ptt *p_ptt, u16 rel_vf_id) 1087 { 1088 struct qed_mcp_link_capabilities caps; 1089 struct qed_mcp_link_params params; 1090 struct qed_mcp_link_state link; 1091 struct qed_vf_info *vf = NULL; 1092 1093 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 1094 if (!vf) { 1095 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 1096 return -EINVAL; 1097 } 1098 1099 if (vf->bulletin.p_virt) 1100 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 1101 1102 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 1103 1104 /* Get the link configuration back in bulletin so 1105 * that when VFs are re-enabled they get the actual 1106 * link configuration. 1107 */ 1108 memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 1109 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 1110 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 1111 qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 1112 1113 /* Forget the VF's acquisition message */ 1114 memset(&vf->acquire, 0, sizeof(vf->acquire)); 1115 1116 /* disablng interrupts and resetting permission table was done during 1117 * vf-close, however, we could get here without going through vf_close 1118 */ 1119 /* Disable Interrupts for VF */ 1120 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 1121 1122 /* Reset Permission table */ 1123 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 1124 1125 vf->num_rxqs = 0; 1126 vf->num_txqs = 0; 1127 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 1128 1129 if (vf->b_init) { 1130 vf->b_init = false; 1131 1132 if (IS_LEAD_HWFN(p_hwfn)) 1133 p_hwfn->cdev->p_iov_info->num_vfs--; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static bool qed_iov_tlv_supported(u16 tlvtype) 1140 { 1141 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 1142 } 1143 1144 /* place a given tlv on the tlv buffer, continuing current tlv list */ 1145 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) 1146 { 1147 struct channel_tlv *tl = (struct channel_tlv *)*offset; 1148 1149 tl->type = type; 1150 tl->length = length; 1151 1152 /* Offset should keep pointing to next TLV (the end of the last) */ 1153 *offset += length; 1154 1155 /* Return a pointer to the start of the added tlv */ 1156 return *offset - length; 1157 } 1158 1159 /* list the types and lengths of the tlvs on the buffer */ 1160 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) 1161 { 1162 u16 i = 1, total_length = 0; 1163 struct channel_tlv *tlv; 1164 1165 do { 1166 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 1167 1168 /* output tlv */ 1169 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1170 "TLV number %d: type %d, length %d\n", 1171 i, tlv->type, tlv->length); 1172 1173 if (tlv->type == CHANNEL_TLV_LIST_END) 1174 return; 1175 1176 /* Validate entry - protect against malicious VFs */ 1177 if (!tlv->length) { 1178 DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); 1179 return; 1180 } 1181 1182 total_length += tlv->length; 1183 1184 if (total_length >= sizeof(struct tlv_buffer_size)) { 1185 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); 1186 return; 1187 } 1188 1189 i++; 1190 } while (1); 1191 } 1192 1193 static void qed_iov_send_response(struct qed_hwfn *p_hwfn, 1194 struct qed_ptt *p_ptt, 1195 struct qed_vf_info *p_vf, 1196 u16 length, u8 status) 1197 { 1198 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 1199 struct qed_dmae_params params; 1200 u8 eng_vf_id; 1201 1202 mbx->reply_virt->default_resp.hdr.status = status; 1203 1204 qed_dp_tlv_list(p_hwfn, mbx->reply_virt); 1205 1206 eng_vf_id = p_vf->abs_vf_id; 1207 1208 memset(¶ms, 0, sizeof(params)); 1209 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); 1210 params.dst_vfid = eng_vf_id; 1211 1212 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 1213 mbx->req_virt->first_tlv.reply_address + 1214 sizeof(u64), 1215 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, 1216 ¶ms); 1217 1218 /* Once PF copies the rc to the VF, the latter can continue 1219 * and send an additional message. So we have to make sure the 1220 * channel would be re-set to ready prior to that. 1221 */ 1222 REG_WR(p_hwfn, 1223 GTT_BAR0_MAP_REG_USDM_RAM + 1224 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); 1225 1226 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 1227 mbx->req_virt->first_tlv.reply_address, 1228 sizeof(u64) / 4, ¶ms); 1229 } 1230 1231 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, 1232 enum qed_iov_vport_update_flag flag) 1233 { 1234 switch (flag) { 1235 case QED_IOV_VP_UPDATE_ACTIVATE: 1236 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1237 case QED_IOV_VP_UPDATE_VLAN_STRIP: 1238 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 1239 case QED_IOV_VP_UPDATE_TX_SWITCH: 1240 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1241 case QED_IOV_VP_UPDATE_MCAST: 1242 return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1243 case QED_IOV_VP_UPDATE_ACCEPT_PARAM: 1244 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1245 case QED_IOV_VP_UPDATE_RSS: 1246 return CHANNEL_TLV_VPORT_UPDATE_RSS; 1247 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 1248 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1249 case QED_IOV_VP_UPDATE_SGE_TPA: 1250 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1251 default: 1252 return 0; 1253 } 1254 } 1255 1256 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, 1257 struct qed_vf_info *p_vf, 1258 struct qed_iov_vf_mbx *p_mbx, 1259 u8 status, 1260 u16 tlvs_mask, u16 tlvs_accepted) 1261 { 1262 struct pfvf_def_resp_tlv *resp; 1263 u16 size, total_len, i; 1264 1265 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1266 p_mbx->offset = (u8 *)p_mbx->reply_virt; 1267 size = sizeof(struct pfvf_def_resp_tlv); 1268 total_len = size; 1269 1270 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1271 1272 /* Prepare response for all extended tlvs if they are found by PF */ 1273 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { 1274 if (!(tlvs_mask & BIT(i))) 1275 continue; 1276 1277 resp = qed_add_tlv(p_hwfn, &p_mbx->offset, 1278 qed_iov_vport_to_tlv(p_hwfn, i), size); 1279 1280 if (tlvs_accepted & BIT(i)) 1281 resp->hdr.status = status; 1282 else 1283 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1284 1285 DP_VERBOSE(p_hwfn, 1286 QED_MSG_IOV, 1287 "VF[%d] - vport_update response: TLV %d, status %02x\n", 1288 p_vf->relative_vf_id, 1289 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); 1290 1291 total_len += size; 1292 } 1293 1294 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, 1295 sizeof(struct channel_list_end_tlv)); 1296 1297 return total_len; 1298 } 1299 1300 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, 1301 struct qed_ptt *p_ptt, 1302 struct qed_vf_info *vf_info, 1303 u16 type, u16 length, u8 status) 1304 { 1305 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; 1306 1307 mbx->offset = (u8 *)mbx->reply_virt; 1308 1309 qed_add_tlv(p_hwfn, &mbx->offset, type, length); 1310 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 1311 sizeof(struct channel_list_end_tlv)); 1312 1313 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 1314 } 1315 1316 static struct 1317 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, 1318 u16 relative_vf_id, 1319 bool b_enabled_only) 1320 { 1321 struct qed_vf_info *vf = NULL; 1322 1323 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 1324 if (!vf) 1325 return NULL; 1326 1327 return &vf->p_vf_info; 1328 } 1329 1330 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) 1331 { 1332 struct qed_public_vf_info *vf_info; 1333 1334 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); 1335 1336 if (!vf_info) 1337 return; 1338 1339 /* Clear the VF mac */ 1340 eth_zero_addr(vf_info->mac); 1341 1342 vf_info->rx_accept_mode = 0; 1343 vf_info->tx_accept_mode = 0; 1344 } 1345 1346 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, 1347 struct qed_vf_info *p_vf) 1348 { 1349 u32 i, j; 1350 1351 p_vf->vf_bulletin = 0; 1352 p_vf->vport_instance = 0; 1353 p_vf->configured_features = 0; 1354 1355 /* If VF previously requested less resources, go back to default */ 1356 p_vf->num_rxqs = p_vf->num_sbs; 1357 p_vf->num_txqs = p_vf->num_sbs; 1358 1359 p_vf->num_active_rxqs = 0; 1360 1361 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1362 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 1363 1364 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { 1365 if (!p_queue->cids[j].p_cid) 1366 continue; 1367 1368 qed_eth_queue_cid_release(p_hwfn, 1369 p_queue->cids[j].p_cid); 1370 p_queue->cids[j].p_cid = NULL; 1371 } 1372 } 1373 1374 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 1375 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 1376 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 1377 } 1378 1379 /* Returns either 0, or log(size) */ 1380 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, 1381 struct qed_ptt *p_ptt) 1382 { 1383 u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); 1384 1385 if (val) 1386 return val + 11; 1387 return 0; 1388 } 1389 1390 static void 1391 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, 1392 struct qed_ptt *p_ptt, 1393 struct qed_vf_info *p_vf, 1394 struct vf_pf_resc_request *p_req, 1395 struct pf_vf_resc *p_resp) 1396 { 1397 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; 1398 u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - 1399 qed_db_addr_vf(0, DQ_DEMS_LEGACY); 1400 u32 bar_size; 1401 1402 p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); 1403 1404 /* If VF didn't bother asking for QIDs than don't bother limiting 1405 * number of CIDs. The VF doesn't care about the number, and this 1406 * has the likely result of causing an additional acquisition. 1407 */ 1408 if (!(p_vf->acquire.vfdev_info.capabilities & 1409 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 1410 return; 1411 1412 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount 1413 * that would make sure doorbells for all CIDs fall within the bar. 1414 * If it doesn't, make sure regview window is sufficient. 1415 */ 1416 if (p_vf->acquire.vfdev_info.capabilities & 1417 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { 1418 bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 1419 if (bar_size) 1420 bar_size = 1 << bar_size; 1421 1422 if (p_hwfn->cdev->num_hwfns > 1) 1423 bar_size /= 2; 1424 } else { 1425 bar_size = PXP_VF_BAR0_DQ_LENGTH; 1426 } 1427 1428 if (bar_size / db_size < 256) 1429 p_resp->num_cids = min_t(u8, p_resp->num_cids, 1430 (u8)(bar_size / db_size)); 1431 } 1432 1433 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, 1434 struct qed_ptt *p_ptt, 1435 struct qed_vf_info *p_vf, 1436 struct vf_pf_resc_request *p_req, 1437 struct pf_vf_resc *p_resp) 1438 { 1439 u8 i; 1440 1441 /* Queue related information */ 1442 p_resp->num_rxqs = p_vf->num_rxqs; 1443 p_resp->num_txqs = p_vf->num_txqs; 1444 p_resp->num_sbs = p_vf->num_sbs; 1445 1446 for (i = 0; i < p_resp->num_sbs; i++) { 1447 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; 1448 p_resp->hw_sbs[i].sb_qid = 0; 1449 } 1450 1451 /* These fields are filled for backward compatibility. 1452 * Unused by modern vfs. 1453 */ 1454 for (i = 0; i < p_resp->num_rxqs; i++) { 1455 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, 1456 (u16 *)&p_resp->hw_qid[i]); 1457 p_resp->cid[i] = i; 1458 } 1459 1460 /* Filter related information */ 1461 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, 1462 p_req->num_mac_filters); 1463 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, 1464 p_req->num_vlan_filters); 1465 1466 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); 1467 1468 /* This isn't really needed/enforced, but some legacy VFs might depend 1469 * on the correct filling of this field. 1470 */ 1471 p_resp->num_mc_filters = QED_MAX_MC_ADDRS; 1472 1473 /* Validate sufficient resources for VF */ 1474 if (p_resp->num_rxqs < p_req->num_rxqs || 1475 p_resp->num_txqs < p_req->num_txqs || 1476 p_resp->num_sbs < p_req->num_sbs || 1477 p_resp->num_mac_filters < p_req->num_mac_filters || 1478 p_resp->num_vlan_filters < p_req->num_vlan_filters || 1479 p_resp->num_mc_filters < p_req->num_mc_filters || 1480 p_resp->num_cids < p_req->num_cids) { 1481 DP_VERBOSE(p_hwfn, 1482 QED_MSG_IOV, 1483 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", 1484 p_vf->abs_vf_id, 1485 p_req->num_rxqs, 1486 p_resp->num_rxqs, 1487 p_req->num_rxqs, 1488 p_resp->num_txqs, 1489 p_req->num_sbs, 1490 p_resp->num_sbs, 1491 p_req->num_mac_filters, 1492 p_resp->num_mac_filters, 1493 p_req->num_vlan_filters, 1494 p_resp->num_vlan_filters, 1495 p_req->num_mc_filters, 1496 p_resp->num_mc_filters, 1497 p_req->num_cids, p_resp->num_cids); 1498 1499 /* Some legacy OSes are incapable of correctly handling this 1500 * failure. 1501 */ 1502 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1503 ETH_HSI_VER_NO_PKT_LEN_TUNN) && 1504 (p_vf->acquire.vfdev_info.os_type == 1505 VFPF_ACQUIRE_OS_WINDOWS)) 1506 return PFVF_STATUS_SUCCESS; 1507 1508 return PFVF_STATUS_NO_RESOURCE; 1509 } 1510 1511 return PFVF_STATUS_SUCCESS; 1512 } 1513 1514 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, 1515 struct pfvf_stats_info *p_stats) 1516 { 1517 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + 1518 offsetof(struct mstorm_vf_zone, 1519 non_trigger.eth_queue_stat); 1520 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 1521 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + 1522 offsetof(struct ustorm_vf_zone, 1523 non_trigger.eth_queue_stat); 1524 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 1525 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + 1526 offsetof(struct pstorm_vf_zone, 1527 non_trigger.eth_queue_stat); 1528 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 1529 p_stats->tstats.address = 0; 1530 p_stats->tstats.len = 0; 1531 } 1532 1533 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 1534 struct qed_ptt *p_ptt, 1535 struct qed_vf_info *vf) 1536 { 1537 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1538 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 1539 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 1540 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 1541 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 1542 struct pf_vf_resc *resc = &resp->resc; 1543 int rc; 1544 1545 memset(resp, 0, sizeof(*resp)); 1546 1547 /* Write the PF version so that VF would know which version 1548 * is supported - might be later overriden. This guarantees that 1549 * VF could recognize legacy PF based on lack of versions in reply. 1550 */ 1551 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 1552 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 1553 1554 if (vf->state != VF_FREE && vf->state != VF_STOPPED) { 1555 DP_VERBOSE(p_hwfn, 1556 QED_MSG_IOV, 1557 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", 1558 vf->abs_vf_id, vf->state); 1559 goto out; 1560 } 1561 1562 /* Validate FW compatibility */ 1563 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 1564 if (req->vfdev_info.capabilities & 1565 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 1566 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; 1567 1568 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1569 "VF[%d] is pre-fastpath HSI\n", 1570 vf->abs_vf_id); 1571 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 1572 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1573 } else { 1574 DP_INFO(p_hwfn, 1575 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", 1576 vf->abs_vf_id, 1577 req->vfdev_info.eth_fp_hsi_major, 1578 req->vfdev_info.eth_fp_hsi_minor, 1579 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 1580 1581 goto out; 1582 } 1583 } 1584 1585 /* On 100g PFs, prevent old VFs from loading */ 1586 if ((p_hwfn->cdev->num_hwfns > 1) && 1587 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 1588 DP_INFO(p_hwfn, 1589 "VF[%d] is running an old driver that doesn't support 100g\n", 1590 vf->abs_vf_id); 1591 goto out; 1592 } 1593 1594 /* Store the acquire message */ 1595 memcpy(&vf->acquire, req, sizeof(vf->acquire)); 1596 1597 vf->opaque_fid = req->vfdev_info.opaque_fid; 1598 1599 vf->vf_bulletin = req->bulletin_addr; 1600 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 1601 vf->bulletin.size : req->bulletin_size; 1602 1603 /* fill in pfdev info */ 1604 pfdev_info->chip_num = p_hwfn->cdev->chip_num; 1605 pfdev_info->db_size = 0; 1606 pfdev_info->indices_per_sb = PIS_PER_SB_E4; 1607 1608 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 1609 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 1610 if (p_hwfn->cdev->num_hwfns > 1) 1611 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 1612 1613 /* Share our ability to use multiple queue-ids only with VFs 1614 * that request it. 1615 */ 1616 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) 1617 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; 1618 1619 /* Share the sizes of the bars with VF */ 1620 resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 1621 1622 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); 1623 1624 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 1625 1626 pfdev_info->fw_major = FW_MAJOR_VERSION; 1627 pfdev_info->fw_minor = FW_MINOR_VERSION; 1628 pfdev_info->fw_rev = FW_REVISION_VERSION; 1629 pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1630 1631 /* Incorrect when legacy, but doesn't matter as legacy isn't reading 1632 * this field. 1633 */ 1634 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, 1635 req->vfdev_info.eth_fp_hsi_minor); 1636 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 1637 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 1638 1639 pfdev_info->dev_type = p_hwfn->cdev->type; 1640 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 1641 1642 /* Fill resources available to VF; Make sure there are enough to 1643 * satisfy the VF's request. 1644 */ 1645 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, 1646 &req->resc_request, resc); 1647 if (vfpf_status != PFVF_STATUS_SUCCESS) 1648 goto out; 1649 1650 /* Start the VF in FW */ 1651 rc = qed_sp_vf_start(p_hwfn, vf); 1652 if (rc) { 1653 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); 1654 vfpf_status = PFVF_STATUS_FAILURE; 1655 goto out; 1656 } 1657 1658 /* Fill agreed size of bulletin board in response */ 1659 resp->bulletin_size = vf->bulletin.size; 1660 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 1661 1662 DP_VERBOSE(p_hwfn, 1663 QED_MSG_IOV, 1664 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 1665 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 1666 vf->abs_vf_id, 1667 resp->pfdev_info.chip_num, 1668 resp->pfdev_info.db_size, 1669 resp->pfdev_info.indices_per_sb, 1670 resp->pfdev_info.capabilities, 1671 resc->num_rxqs, 1672 resc->num_txqs, 1673 resc->num_sbs, 1674 resc->num_mac_filters, 1675 resc->num_vlan_filters); 1676 vf->state = VF_ACQUIRED; 1677 1678 /* Prepare Response */ 1679 out: 1680 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 1681 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); 1682 } 1683 1684 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, 1685 struct qed_vf_info *p_vf, bool val) 1686 { 1687 struct qed_sp_vport_update_params params; 1688 int rc; 1689 1690 if (val == p_vf->spoof_chk) { 1691 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1692 "Spoofchk value[%d] is already configured\n", val); 1693 return 0; 1694 } 1695 1696 memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); 1697 params.opaque_fid = p_vf->opaque_fid; 1698 params.vport_id = p_vf->vport_id; 1699 params.update_anti_spoofing_en_flg = 1; 1700 params.anti_spoofing_en = val; 1701 1702 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 1703 if (!rc) { 1704 p_vf->spoof_chk = val; 1705 p_vf->req_spoofchk_val = p_vf->spoof_chk; 1706 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1707 "Spoofchk val[%d] configured\n", val); 1708 } else { 1709 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1710 "Spoofchk configuration[val:%d] failed for VF[%d]\n", 1711 val, p_vf->relative_vf_id); 1712 } 1713 1714 return rc; 1715 } 1716 1717 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, 1718 struct qed_vf_info *p_vf) 1719 { 1720 struct qed_filter_ucast filter; 1721 int rc = 0; 1722 int i; 1723 1724 memset(&filter, 0, sizeof(filter)); 1725 filter.is_rx_filter = 1; 1726 filter.is_tx_filter = 1; 1727 filter.vport_to_add_to = p_vf->vport_id; 1728 filter.opcode = QED_FILTER_ADD; 1729 1730 /* Reconfigure vlans */ 1731 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 1732 if (!p_vf->shadow_config.vlans[i].used) 1733 continue; 1734 1735 filter.type = QED_FILTER_VLAN; 1736 filter.vlan = p_vf->shadow_config.vlans[i].vid; 1737 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1738 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 1739 filter.vlan, p_vf->relative_vf_id); 1740 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1741 &filter, QED_SPQ_MODE_CB, NULL); 1742 if (rc) { 1743 DP_NOTICE(p_hwfn, 1744 "Failed to configure VLAN [%04x] to VF [%04x]\n", 1745 filter.vlan, p_vf->relative_vf_id); 1746 break; 1747 } 1748 } 1749 1750 return rc; 1751 } 1752 1753 static int 1754 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, 1755 struct qed_vf_info *p_vf, u64 events) 1756 { 1757 int rc = 0; 1758 1759 if ((events & BIT(VLAN_ADDR_FORCED)) && 1760 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 1761 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 1762 1763 return rc; 1764 } 1765 1766 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, 1767 struct qed_vf_info *p_vf, u64 events) 1768 { 1769 int rc = 0; 1770 struct qed_filter_ucast filter; 1771 1772 if (!p_vf->vport_instance) 1773 return -EINVAL; 1774 1775 if ((events & BIT(MAC_ADDR_FORCED)) || 1776 p_vf->p_vf_info.is_trusted_configured) { 1777 /* Since there's no way [currently] of removing the MAC, 1778 * we can always assume this means we need to force it. 1779 */ 1780 memset(&filter, 0, sizeof(filter)); 1781 filter.type = QED_FILTER_MAC; 1782 filter.opcode = QED_FILTER_REPLACE; 1783 filter.is_rx_filter = 1; 1784 filter.is_tx_filter = 1; 1785 filter.vport_to_add_to = p_vf->vport_id; 1786 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); 1787 1788 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1789 &filter, QED_SPQ_MODE_CB, NULL); 1790 if (rc) { 1791 DP_NOTICE(p_hwfn, 1792 "PF failed to configure MAC for VF\n"); 1793 return rc; 1794 } 1795 if (p_vf->p_vf_info.is_trusted_configured) 1796 p_vf->configured_features |= 1797 BIT(VFPF_BULLETIN_MAC_ADDR); 1798 else 1799 p_vf->configured_features |= 1800 BIT(MAC_ADDR_FORCED); 1801 } 1802 1803 if (events & BIT(VLAN_ADDR_FORCED)) { 1804 struct qed_sp_vport_update_params vport_update; 1805 u8 removal; 1806 int i; 1807 1808 memset(&filter, 0, sizeof(filter)); 1809 filter.type = QED_FILTER_VLAN; 1810 filter.is_rx_filter = 1; 1811 filter.is_tx_filter = 1; 1812 filter.vport_to_add_to = p_vf->vport_id; 1813 filter.vlan = p_vf->bulletin.p_virt->pvid; 1814 filter.opcode = filter.vlan ? QED_FILTER_REPLACE : 1815 QED_FILTER_FLUSH; 1816 1817 /* Send the ramrod */ 1818 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1819 &filter, QED_SPQ_MODE_CB, NULL); 1820 if (rc) { 1821 DP_NOTICE(p_hwfn, 1822 "PF failed to configure VLAN for VF\n"); 1823 return rc; 1824 } 1825 1826 /* Update the default-vlan & silent vlan stripping */ 1827 memset(&vport_update, 0, sizeof(vport_update)); 1828 vport_update.opaque_fid = p_vf->opaque_fid; 1829 vport_update.vport_id = p_vf->vport_id; 1830 vport_update.update_default_vlan_enable_flg = 1; 1831 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 1832 vport_update.update_default_vlan_flg = 1; 1833 vport_update.default_vlan = filter.vlan; 1834 1835 vport_update.update_inner_vlan_removal_flg = 1; 1836 removal = filter.vlan ? 1 1837 : p_vf->shadow_config.inner_vlan_removal; 1838 vport_update.inner_vlan_removal_flg = removal; 1839 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 1840 rc = qed_sp_vport_update(p_hwfn, 1841 &vport_update, 1842 QED_SPQ_MODE_EBLOCK, NULL); 1843 if (rc) { 1844 DP_NOTICE(p_hwfn, 1845 "PF failed to configure VF vport for vlan\n"); 1846 return rc; 1847 } 1848 1849 /* Update all the Rx queues */ 1850 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1851 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 1852 struct qed_queue_cid *p_cid = NULL; 1853 1854 /* There can be at most 1 Rx queue on qzone. Find it */ 1855 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 1856 if (!p_cid) 1857 continue; 1858 1859 rc = qed_sp_eth_rx_queues_update(p_hwfn, 1860 (void **)&p_cid, 1861 1, 0, 1, 1862 QED_SPQ_MODE_EBLOCK, 1863 NULL); 1864 if (rc) { 1865 DP_NOTICE(p_hwfn, 1866 "Failed to send Rx update fo queue[0x%04x]\n", 1867 p_cid->rel.queue_id); 1868 return rc; 1869 } 1870 } 1871 1872 if (filter.vlan) 1873 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 1874 else 1875 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); 1876 } 1877 1878 /* If forced features are terminated, we need to configure the shadow 1879 * configuration back again. 1880 */ 1881 if (events) 1882 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 1883 1884 return rc; 1885 } 1886 1887 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, 1888 struct qed_ptt *p_ptt, 1889 struct qed_vf_info *vf) 1890 { 1891 struct qed_sp_vport_start_params params = { 0 }; 1892 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1893 struct vfpf_vport_start_tlv *start; 1894 u8 status = PFVF_STATUS_SUCCESS; 1895 struct qed_vf_info *vf_info; 1896 u64 *p_bitmap; 1897 int sb_id; 1898 int rc; 1899 1900 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1901 if (!vf_info) { 1902 DP_NOTICE(p_hwfn->cdev, 1903 "Failed to get VF info, invalid vfid [%d]\n", 1904 vf->relative_vf_id); 1905 return; 1906 } 1907 1908 vf->state = VF_ENABLED; 1909 start = &mbx->req_virt->start_vport; 1910 1911 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 1912 1913 /* Initialize Status block in CAU */ 1914 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 1915 if (!start->sb_addr[sb_id]) { 1916 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1917 "VF[%d] did not fill the address of SB %d\n", 1918 vf->relative_vf_id, sb_id); 1919 break; 1920 } 1921 1922 qed_int_cau_conf_sb(p_hwfn, p_ptt, 1923 start->sb_addr[sb_id], 1924 vf->igu_sbs[sb_id], vf->abs_vf_id, 1); 1925 } 1926 1927 vf->mtu = start->mtu; 1928 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 1929 1930 /* Take into consideration configuration forced by hypervisor; 1931 * If none is configured, use the supplied VF values [for old 1932 * vfs that would still be fine, since they passed '0' as padding]. 1933 */ 1934 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 1935 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 1936 u8 vf_req = start->only_untagged; 1937 1938 vf_info->bulletin.p_virt->default_only_untagged = vf_req; 1939 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 1940 } 1941 1942 params.tpa_mode = start->tpa_mode; 1943 params.remove_inner_vlan = start->inner_vlan_removal; 1944 params.tx_switching = true; 1945 1946 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 1947 params.drop_ttl0 = false; 1948 params.concrete_fid = vf->concrete_fid; 1949 params.opaque_fid = vf->opaque_fid; 1950 params.vport_id = vf->vport_id; 1951 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1952 params.mtu = vf->mtu; 1953 1954 /* Non trusted VFs should enable control frame filtering */ 1955 params.check_mac = !vf->p_vf_info.is_trusted_configured; 1956 1957 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); 1958 if (rc) { 1959 DP_ERR(p_hwfn, 1960 "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1961 status = PFVF_STATUS_FAILURE; 1962 } else { 1963 vf->vport_instance++; 1964 1965 /* Force configuration if needed on the newly opened vport */ 1966 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 1967 1968 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 1969 } 1970 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 1971 sizeof(struct pfvf_def_resp_tlv), status); 1972 } 1973 1974 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, 1975 struct qed_ptt *p_ptt, 1976 struct qed_vf_info *vf) 1977 { 1978 u8 status = PFVF_STATUS_SUCCESS; 1979 int rc; 1980 1981 vf->vport_instance--; 1982 vf->spoof_chk = false; 1983 1984 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || 1985 (qed_iov_validate_active_txq(p_hwfn, vf))) { 1986 vf->b_malicious = true; 1987 DP_NOTICE(p_hwfn, 1988 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", 1989 vf->abs_vf_id); 1990 status = PFVF_STATUS_MALICIOUS; 1991 goto out; 1992 } 1993 1994 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 1995 if (rc) { 1996 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 1997 rc); 1998 status = PFVF_STATUS_FAILURE; 1999 } 2000 2001 /* Forget the configuration on the vport */ 2002 vf->configured_features = 0; 2003 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 2004 2005 out: 2006 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 2007 sizeof(struct pfvf_def_resp_tlv), status); 2008 } 2009 2010 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 2011 struct qed_ptt *p_ptt, 2012 struct qed_vf_info *vf, 2013 u8 status, bool b_legacy) 2014 { 2015 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2016 struct pfvf_start_queue_resp_tlv *p_tlv; 2017 struct vfpf_start_rxq_tlv *req; 2018 u16 length; 2019 2020 mbx->offset = (u8 *)mbx->reply_virt; 2021 2022 /* Taking a bigger struct instead of adding a TLV to list was a 2023 * mistake, but one which we're now stuck with, as some older 2024 * clients assume the size of the previous response. 2025 */ 2026 if (!b_legacy) 2027 length = sizeof(*p_tlv); 2028 else 2029 length = sizeof(struct pfvf_def_resp_tlv); 2030 2031 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, 2032 length); 2033 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2034 sizeof(struct channel_list_end_tlv)); 2035 2036 /* Update the TLV with the response */ 2037 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 2038 req = &mbx->req_virt->start_rxq; 2039 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 2040 offsetof(struct mstorm_vf_zone, 2041 non_trigger.eth_rx_queue_producers) + 2042 sizeof(struct eth_rx_prod_data) * req->rx_qid; 2043 } 2044 2045 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2046 } 2047 2048 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, 2049 struct qed_vf_info *p_vf, bool b_is_tx) 2050 { 2051 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; 2052 struct vfpf_qid_tlv *p_qid_tlv; 2053 2054 /* Search for the qid if the VF published its going to provide it */ 2055 if (!(p_vf->acquire.vfdev_info.capabilities & 2056 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { 2057 if (b_is_tx) 2058 return QED_IOV_LEGACY_QID_TX; 2059 else 2060 return QED_IOV_LEGACY_QID_RX; 2061 } 2062 2063 p_qid_tlv = (struct vfpf_qid_tlv *) 2064 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2065 CHANNEL_TLV_QID); 2066 if (!p_qid_tlv) { 2067 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2068 "VF[%2x]: Failed to provide qid\n", 2069 p_vf->relative_vf_id); 2070 2071 return QED_IOV_QID_INVALID; 2072 } 2073 2074 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { 2075 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2076 "VF[%02x]: Provided qid out-of-bounds %02x\n", 2077 p_vf->relative_vf_id, p_qid_tlv->qid); 2078 return QED_IOV_QID_INVALID; 2079 } 2080 2081 return p_qid_tlv->qid; 2082 } 2083 2084 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, 2085 struct qed_ptt *p_ptt, 2086 struct qed_vf_info *vf) 2087 { 2088 struct qed_queue_start_common_params params; 2089 struct qed_queue_cid_vf_params vf_params; 2090 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2091 u8 status = PFVF_STATUS_NO_RESOURCE; 2092 u8 qid_usage_idx, vf_legacy = 0; 2093 struct vfpf_start_rxq_tlv *req; 2094 struct qed_vf_queue *p_queue; 2095 struct qed_queue_cid *p_cid; 2096 struct qed_sb_info sb_dummy; 2097 int rc; 2098 2099 req = &mbx->req_virt->start_rxq; 2100 2101 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, 2102 QED_IOV_VALIDATE_Q_DISABLE) || 2103 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 2104 goto out; 2105 2106 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 2107 if (qid_usage_idx == QED_IOV_QID_INVALID) 2108 goto out; 2109 2110 p_queue = &vf->vf_queues[req->rx_qid]; 2111 if (p_queue->cids[qid_usage_idx].p_cid) 2112 goto out; 2113 2114 vf_legacy = qed_vf_calculate_legacy(vf); 2115 2116 /* Acquire a new queue-cid */ 2117 memset(¶ms, 0, sizeof(params)); 2118 params.queue_id = p_queue->fw_rx_qid; 2119 params.vport_id = vf->vport_id; 2120 params.stats_id = vf->abs_vf_id + 0x10; 2121 /* Since IGU index is passed via sb_info, construct a dummy one */ 2122 memset(&sb_dummy, 0, sizeof(sb_dummy)); 2123 sb_dummy.igu_sb_id = req->hw_sb; 2124 params.p_sb = &sb_dummy; 2125 params.sb_idx = req->sb_index; 2126 2127 memset(&vf_params, 0, sizeof(vf_params)); 2128 vf_params.vfid = vf->relative_vf_id; 2129 vf_params.vf_qid = (u8)req->rx_qid; 2130 vf_params.vf_legacy = vf_legacy; 2131 vf_params.qid_usage_idx = qid_usage_idx; 2132 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2133 ¶ms, true, &vf_params); 2134 if (!p_cid) 2135 goto out; 2136 2137 /* Legacy VFs have their Producers in a different location, which they 2138 * calculate on their own and clean the producer prior to this. 2139 */ 2140 if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) 2141 REG_WR(p_hwfn, 2142 GTT_BAR0_MAP_REG_MSDM_RAM + 2143 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 2144 0); 2145 2146 rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 2147 req->bd_max_bytes, 2148 req->rxq_addr, 2149 req->cqe_pbl_addr, req->cqe_pbl_size); 2150 if (rc) { 2151 status = PFVF_STATUS_FAILURE; 2152 qed_eth_queue_cid_release(p_hwfn, p_cid); 2153 } else { 2154 p_queue->cids[qid_usage_idx].p_cid = p_cid; 2155 p_queue->cids[qid_usage_idx].b_is_tx = false; 2156 status = PFVF_STATUS_SUCCESS; 2157 vf->num_active_rxqs++; 2158 } 2159 2160 out: 2161 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, 2162 !!(vf_legacy & 2163 QED_QCID_LEGACY_VF_RX_PROD)); 2164 } 2165 2166 static void 2167 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, 2168 struct qed_tunnel_info *p_tun, 2169 u16 tunn_feature_mask) 2170 { 2171 p_resp->tunn_feature_mask = tunn_feature_mask; 2172 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; 2173 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; 2174 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; 2175 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; 2176 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; 2177 p_resp->vxlan_clss = p_tun->vxlan.tun_cls; 2178 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; 2179 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; 2180 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; 2181 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; 2182 p_resp->geneve_udp_port = p_tun->geneve_port.port; 2183 p_resp->vxlan_udp_port = p_tun->vxlan_port.port; 2184 } 2185 2186 static void 2187 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2188 struct qed_tunn_update_type *p_tun, 2189 enum qed_tunn_mode mask, u8 tun_cls) 2190 { 2191 if (p_req->tun_mode_update_mask & BIT(mask)) { 2192 p_tun->b_update_mode = true; 2193 2194 if (p_req->tunn_mode & BIT(mask)) 2195 p_tun->b_mode_enabled = true; 2196 } 2197 2198 p_tun->tun_cls = tun_cls; 2199 } 2200 2201 static void 2202 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2203 struct qed_tunn_update_type *p_tun, 2204 struct qed_tunn_update_udp_port *p_port, 2205 enum qed_tunn_mode mask, 2206 u8 tun_cls, u8 update_port, u16 port) 2207 { 2208 if (update_port) { 2209 p_port->b_update_port = true; 2210 p_port->port = port; 2211 } 2212 2213 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); 2214 } 2215 2216 static bool 2217 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) 2218 { 2219 bool b_update_requested = false; 2220 2221 if (p_req->tun_mode_update_mask || p_req->update_tun_cls || 2222 p_req->update_geneve_port || p_req->update_vxlan_port) 2223 b_update_requested = true; 2224 2225 return b_update_requested; 2226 } 2227 2228 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) 2229 { 2230 if (tun->b_update_mode && !tun->b_mode_enabled) { 2231 tun->b_update_mode = false; 2232 *rc = -EINVAL; 2233 } 2234 } 2235 2236 static int 2237 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, 2238 u16 *tun_features, bool *update, 2239 struct qed_tunnel_info *tun_src) 2240 { 2241 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; 2242 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; 2243 u16 bultn_vxlan_port, bultn_geneve_port; 2244 void *cookie = p_hwfn->cdev->ops_cookie; 2245 int i, rc = 0; 2246 2247 *tun_features = p_hwfn->cdev->tunn_feature_mask; 2248 bultn_vxlan_port = tun->vxlan_port.port; 2249 bultn_geneve_port = tun->geneve_port.port; 2250 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); 2251 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); 2252 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); 2253 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); 2254 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); 2255 2256 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && 2257 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2258 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2259 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2260 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2261 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { 2262 tun_src->b_update_rx_cls = false; 2263 tun_src->b_update_tx_cls = false; 2264 rc = -EINVAL; 2265 } 2266 2267 if (tun_src->vxlan_port.b_update_port) { 2268 if (tun_src->vxlan_port.port == tun->vxlan_port.port) { 2269 tun_src->vxlan_port.b_update_port = false; 2270 } else { 2271 *update = true; 2272 bultn_vxlan_port = tun_src->vxlan_port.port; 2273 } 2274 } 2275 2276 if (tun_src->geneve_port.b_update_port) { 2277 if (tun_src->geneve_port.port == tun->geneve_port.port) { 2278 tun_src->geneve_port.b_update_port = false; 2279 } else { 2280 *update = true; 2281 bultn_geneve_port = tun_src->geneve_port.port; 2282 } 2283 } 2284 2285 qed_for_each_vf(p_hwfn, i) { 2286 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, 2287 bultn_geneve_port); 2288 } 2289 2290 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2291 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); 2292 2293 return rc; 2294 } 2295 2296 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, 2297 struct qed_ptt *p_ptt, 2298 struct qed_vf_info *p_vf) 2299 { 2300 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 2301 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2302 struct pfvf_update_tunn_param_tlv *p_resp; 2303 struct vfpf_update_tunn_param_tlv *p_req; 2304 u8 status = PFVF_STATUS_SUCCESS; 2305 bool b_update_required = false; 2306 struct qed_tunnel_info tunn; 2307 u16 tunn_feature_mask = 0; 2308 int i, rc = 0; 2309 2310 mbx->offset = (u8 *)mbx->reply_virt; 2311 2312 memset(&tunn, 0, sizeof(tunn)); 2313 p_req = &mbx->req_virt->tunn_param_update; 2314 2315 if (!qed_iov_pf_validate_tunn_param(p_req)) { 2316 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2317 "No tunnel update requested by VF\n"); 2318 status = PFVF_STATUS_FAILURE; 2319 goto send_resp; 2320 } 2321 2322 tunn.b_update_rx_cls = p_req->update_tun_cls; 2323 tunn.b_update_tx_cls = p_req->update_tun_cls; 2324 2325 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, 2326 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, 2327 p_req->update_vxlan_port, 2328 p_req->vxlan_port); 2329 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, 2330 QED_MODE_L2GENEVE_TUNN, 2331 p_req->l2geneve_clss, 2332 p_req->update_geneve_port, 2333 p_req->geneve_port); 2334 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, 2335 QED_MODE_IPGENEVE_TUNN, 2336 p_req->ipgeneve_clss); 2337 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, 2338 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); 2339 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, 2340 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); 2341 2342 /* If PF modifies VF's req then it should 2343 * still return an error in case of partial configuration 2344 * or modified configuration as opposed to requested one. 2345 */ 2346 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, 2347 &b_update_required, &tunn); 2348 2349 if (rc) 2350 status = PFVF_STATUS_FAILURE; 2351 2352 /* If QED client is willing to update anything ? */ 2353 if (b_update_required) { 2354 u16 geneve_port; 2355 2356 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, 2357 QED_SPQ_MODE_EBLOCK, NULL); 2358 if (rc) 2359 status = PFVF_STATUS_FAILURE; 2360 2361 geneve_port = p_tun->geneve_port.port; 2362 qed_for_each_vf(p_hwfn, i) { 2363 qed_iov_bulletin_set_udp_ports(p_hwfn, i, 2364 p_tun->vxlan_port.port, 2365 geneve_port); 2366 } 2367 } 2368 2369 send_resp: 2370 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, 2371 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); 2372 2373 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); 2374 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2375 sizeof(struct channel_list_end_tlv)); 2376 2377 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 2378 } 2379 2380 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, 2381 struct qed_ptt *p_ptt, 2382 struct qed_vf_info *p_vf, 2383 u32 cid, u8 status) 2384 { 2385 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2386 struct pfvf_start_queue_resp_tlv *p_tlv; 2387 bool b_legacy = false; 2388 u16 length; 2389 2390 mbx->offset = (u8 *)mbx->reply_virt; 2391 2392 /* Taking a bigger struct instead of adding a TLV to list was a 2393 * mistake, but one which we're now stuck with, as some older 2394 * clients assume the size of the previous response. 2395 */ 2396 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 2397 ETH_HSI_VER_NO_PKT_LEN_TUNN) 2398 b_legacy = true; 2399 2400 if (!b_legacy) 2401 length = sizeof(*p_tlv); 2402 else 2403 length = sizeof(struct pfvf_def_resp_tlv); 2404 2405 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, 2406 length); 2407 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2408 sizeof(struct channel_list_end_tlv)); 2409 2410 /* Update the TLV with the response */ 2411 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) 2412 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); 2413 2414 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 2415 } 2416 2417 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 2418 struct qed_ptt *p_ptt, 2419 struct qed_vf_info *vf) 2420 { 2421 struct qed_queue_start_common_params params; 2422 struct qed_queue_cid_vf_params vf_params; 2423 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2424 u8 status = PFVF_STATUS_NO_RESOURCE; 2425 struct vfpf_start_txq_tlv *req; 2426 struct qed_vf_queue *p_queue; 2427 struct qed_queue_cid *p_cid; 2428 struct qed_sb_info sb_dummy; 2429 u8 qid_usage_idx, vf_legacy; 2430 u32 cid = 0; 2431 int rc; 2432 u16 pq; 2433 2434 memset(¶ms, 0, sizeof(params)); 2435 req = &mbx->req_virt->start_txq; 2436 2437 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, 2438 QED_IOV_VALIDATE_Q_NA) || 2439 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 2440 goto out; 2441 2442 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 2443 if (qid_usage_idx == QED_IOV_QID_INVALID) 2444 goto out; 2445 2446 p_queue = &vf->vf_queues[req->tx_qid]; 2447 if (p_queue->cids[qid_usage_idx].p_cid) 2448 goto out; 2449 2450 vf_legacy = qed_vf_calculate_legacy(vf); 2451 2452 /* Acquire a new queue-cid */ 2453 params.queue_id = p_queue->fw_tx_qid; 2454 params.vport_id = vf->vport_id; 2455 params.stats_id = vf->abs_vf_id + 0x10; 2456 2457 /* Since IGU index is passed via sb_info, construct a dummy one */ 2458 memset(&sb_dummy, 0, sizeof(sb_dummy)); 2459 sb_dummy.igu_sb_id = req->hw_sb; 2460 params.p_sb = &sb_dummy; 2461 params.sb_idx = req->sb_index; 2462 2463 memset(&vf_params, 0, sizeof(vf_params)); 2464 vf_params.vfid = vf->relative_vf_id; 2465 vf_params.vf_qid = (u8)req->tx_qid; 2466 vf_params.vf_legacy = vf_legacy; 2467 vf_params.qid_usage_idx = qid_usage_idx; 2468 2469 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2470 ¶ms, false, &vf_params); 2471 if (!p_cid) 2472 goto out; 2473 2474 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); 2475 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 2476 req->pbl_addr, req->pbl_size, pq); 2477 if (rc) { 2478 status = PFVF_STATUS_FAILURE; 2479 qed_eth_queue_cid_release(p_hwfn, p_cid); 2480 } else { 2481 status = PFVF_STATUS_SUCCESS; 2482 p_queue->cids[qid_usage_idx].p_cid = p_cid; 2483 p_queue->cids[qid_usage_idx].b_is_tx = true; 2484 cid = p_cid->cid; 2485 } 2486 2487 out: 2488 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status); 2489 } 2490 2491 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 2492 struct qed_vf_info *vf, 2493 u16 rxq_id, 2494 u8 qid_usage_idx, bool cqe_completion) 2495 { 2496 struct qed_vf_queue *p_queue; 2497 int rc = 0; 2498 2499 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { 2500 DP_VERBOSE(p_hwfn, 2501 QED_MSG_IOV, 2502 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", 2503 vf->relative_vf_id, rxq_id, qid_usage_idx); 2504 return -EINVAL; 2505 } 2506 2507 p_queue = &vf->vf_queues[rxq_id]; 2508 2509 /* We've validated the index and the existence of the active RXQ - 2510 * now we need to make sure that it's using the correct qid. 2511 */ 2512 if (!p_queue->cids[qid_usage_idx].p_cid || 2513 p_queue->cids[qid_usage_idx].b_is_tx) { 2514 struct qed_queue_cid *p_cid; 2515 2516 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 2517 DP_VERBOSE(p_hwfn, 2518 QED_MSG_IOV, 2519 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", 2520 vf->relative_vf_id, 2521 rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); 2522 return -EINVAL; 2523 } 2524 2525 /* Now that we know we have a valid Rx-queue - close it */ 2526 rc = qed_eth_rx_queue_stop(p_hwfn, 2527 p_queue->cids[qid_usage_idx].p_cid, 2528 false, cqe_completion); 2529 if (rc) 2530 return rc; 2531 2532 p_queue->cids[qid_usage_idx].p_cid = NULL; 2533 vf->num_active_rxqs--; 2534 2535 return 0; 2536 } 2537 2538 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, 2539 struct qed_vf_info *vf, 2540 u16 txq_id, u8 qid_usage_idx) 2541 { 2542 struct qed_vf_queue *p_queue; 2543 int rc = 0; 2544 2545 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) 2546 return -EINVAL; 2547 2548 p_queue = &vf->vf_queues[txq_id]; 2549 if (!p_queue->cids[qid_usage_idx].p_cid || 2550 !p_queue->cids[qid_usage_idx].b_is_tx) 2551 return -EINVAL; 2552 2553 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); 2554 if (rc) 2555 return rc; 2556 2557 p_queue->cids[qid_usage_idx].p_cid = NULL; 2558 return 0; 2559 } 2560 2561 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, 2562 struct qed_ptt *p_ptt, 2563 struct qed_vf_info *vf) 2564 { 2565 u16 length = sizeof(struct pfvf_def_resp_tlv); 2566 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2567 u8 status = PFVF_STATUS_FAILURE; 2568 struct vfpf_stop_rxqs_tlv *req; 2569 u8 qid_usage_idx; 2570 int rc; 2571 2572 /* There has never been an official driver that used this interface 2573 * for stopping multiple queues, and it is now considered deprecated. 2574 * Validate this isn't used here. 2575 */ 2576 req = &mbx->req_virt->stop_rxqs; 2577 if (req->num_rxqs != 1) { 2578 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2579 "Odd; VF[%d] tried stopping multiple Rx queues\n", 2580 vf->relative_vf_id); 2581 status = PFVF_STATUS_NOT_SUPPORTED; 2582 goto out; 2583 } 2584 2585 /* Find which qid-index is associated with the queue */ 2586 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 2587 if (qid_usage_idx == QED_IOV_QID_INVALID) 2588 goto out; 2589 2590 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 2591 qid_usage_idx, req->cqe_completion); 2592 if (!rc) 2593 status = PFVF_STATUS_SUCCESS; 2594 out: 2595 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 2596 length, status); 2597 } 2598 2599 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, 2600 struct qed_ptt *p_ptt, 2601 struct qed_vf_info *vf) 2602 { 2603 u16 length = sizeof(struct pfvf_def_resp_tlv); 2604 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2605 u8 status = PFVF_STATUS_FAILURE; 2606 struct vfpf_stop_txqs_tlv *req; 2607 u8 qid_usage_idx; 2608 int rc; 2609 2610 /* There has never been an official driver that used this interface 2611 * for stopping multiple queues, and it is now considered deprecated. 2612 * Validate this isn't used here. 2613 */ 2614 req = &mbx->req_virt->stop_txqs; 2615 if (req->num_txqs != 1) { 2616 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2617 "Odd; VF[%d] tried stopping multiple Tx queues\n", 2618 vf->relative_vf_id); 2619 status = PFVF_STATUS_NOT_SUPPORTED; 2620 goto out; 2621 } 2622 2623 /* Find which qid-index is associated with the queue */ 2624 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 2625 if (qid_usage_idx == QED_IOV_QID_INVALID) 2626 goto out; 2627 2628 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); 2629 if (!rc) 2630 status = PFVF_STATUS_SUCCESS; 2631 2632 out: 2633 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 2634 length, status); 2635 } 2636 2637 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, 2638 struct qed_ptt *p_ptt, 2639 struct qed_vf_info *vf) 2640 { 2641 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; 2642 u16 length = sizeof(struct pfvf_def_resp_tlv); 2643 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2644 struct vfpf_update_rxq_tlv *req; 2645 u8 status = PFVF_STATUS_FAILURE; 2646 u8 complete_event_flg; 2647 u8 complete_cqe_flg; 2648 u8 qid_usage_idx; 2649 int rc; 2650 u8 i; 2651 2652 req = &mbx->req_virt->update_rxq; 2653 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 2654 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 2655 2656 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 2657 if (qid_usage_idx == QED_IOV_QID_INVALID) 2658 goto out; 2659 2660 /* There shouldn't exist a VF that uses queue-qids yet uses this 2661 * API with multiple Rx queues. Validate this. 2662 */ 2663 if ((vf->acquire.vfdev_info.capabilities & 2664 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { 2665 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2666 "VF[%d] supports QIDs but sends multiple queues\n", 2667 vf->relative_vf_id); 2668 goto out; 2669 } 2670 2671 /* Validate inputs - for the legacy case this is still true since 2672 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. 2673 */ 2674 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { 2675 if (!qed_iov_validate_rxq(p_hwfn, vf, i, 2676 QED_IOV_VALIDATE_Q_NA) || 2677 !vf->vf_queues[i].cids[qid_usage_idx].p_cid || 2678 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { 2679 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2680 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", 2681 vf->relative_vf_id, req->rx_qid, 2682 req->num_rxqs); 2683 goto out; 2684 } 2685 } 2686 2687 /* Prepare the handlers */ 2688 for (i = 0; i < req->num_rxqs; i++) { 2689 u16 qid = req->rx_qid + i; 2690 2691 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; 2692 } 2693 2694 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, 2695 req->num_rxqs, 2696 complete_cqe_flg, 2697 complete_event_flg, 2698 QED_SPQ_MODE_EBLOCK, NULL); 2699 if (rc) 2700 goto out; 2701 2702 status = PFVF_STATUS_SUCCESS; 2703 out: 2704 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 2705 length, status); 2706 } 2707 2708 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 2709 void *p_tlvs_list, u16 req_type) 2710 { 2711 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 2712 int len = 0; 2713 2714 do { 2715 if (!p_tlv->length) { 2716 DP_NOTICE(p_hwfn, "Zero length TLV found\n"); 2717 return NULL; 2718 } 2719 2720 if (p_tlv->type == req_type) { 2721 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2722 "Extended tlv type %d, length %d found\n", 2723 p_tlv->type, p_tlv->length); 2724 return p_tlv; 2725 } 2726 2727 len += p_tlv->length; 2728 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 2729 2730 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 2731 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); 2732 return NULL; 2733 } 2734 } while (p_tlv->type != CHANNEL_TLV_LIST_END); 2735 2736 return NULL; 2737 } 2738 2739 static void 2740 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, 2741 struct qed_sp_vport_update_params *p_data, 2742 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2743 { 2744 struct vfpf_vport_update_activate_tlv *p_act_tlv; 2745 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 2746 2747 p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 2748 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2749 if (!p_act_tlv) 2750 return; 2751 2752 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 2753 p_data->vport_active_rx_flg = p_act_tlv->active_rx; 2754 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 2755 p_data->vport_active_tx_flg = p_act_tlv->active_tx; 2756 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; 2757 } 2758 2759 static void 2760 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, 2761 struct qed_sp_vport_update_params *p_data, 2762 struct qed_vf_info *p_vf, 2763 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2764 { 2765 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 2766 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 2767 2768 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 2769 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2770 if (!p_vlan_tlv) 2771 return; 2772 2773 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 2774 2775 /* Ignore the VF request if we're forcing a vlan */ 2776 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { 2777 p_data->update_inner_vlan_removal_flg = 1; 2778 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 2779 } 2780 2781 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; 2782 } 2783 2784 static void 2785 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, 2786 struct qed_sp_vport_update_params *p_data, 2787 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2788 { 2789 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 2790 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 2791 2792 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 2793 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2794 tlv); 2795 if (!p_tx_switch_tlv) 2796 return; 2797 2798 p_data->update_tx_switching_flg = 1; 2799 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 2800 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; 2801 } 2802 2803 static void 2804 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, 2805 struct qed_sp_vport_update_params *p_data, 2806 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2807 { 2808 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 2809 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 2810 2811 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 2812 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2813 if (!p_mcast_tlv) 2814 return; 2815 2816 p_data->update_approx_mcast_flg = 1; 2817 memcpy(p_data->bins, p_mcast_tlv->bins, 2818 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2819 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2820 } 2821 2822 static void 2823 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, 2824 struct qed_sp_vport_update_params *p_data, 2825 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2826 { 2827 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; 2828 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 2829 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 2830 2831 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 2832 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2833 if (!p_accept_tlv) 2834 return; 2835 2836 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 2837 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 2838 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 2839 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 2840 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; 2841 } 2842 2843 static void 2844 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, 2845 struct qed_sp_vport_update_params *p_data, 2846 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2847 { 2848 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 2849 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 2850 2851 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 2852 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2853 tlv); 2854 if (!p_accept_any_vlan) 2855 return; 2856 2857 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 2858 p_data->update_accept_any_vlan_flg = 2859 p_accept_any_vlan->update_accept_any_vlan_flg; 2860 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 2861 } 2862 2863 static void 2864 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, 2865 struct qed_vf_info *vf, 2866 struct qed_sp_vport_update_params *p_data, 2867 struct qed_rss_params *p_rss, 2868 struct qed_iov_vf_mbx *p_mbx, 2869 u16 *tlvs_mask, u16 *tlvs_accepted) 2870 { 2871 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 2872 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 2873 bool b_reject = false; 2874 u16 table_size; 2875 u16 i, q_idx; 2876 2877 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 2878 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2879 if (!p_rss_tlv) { 2880 p_data->rss_params = NULL; 2881 return; 2882 } 2883 2884 memset(p_rss, 0, sizeof(struct qed_rss_params)); 2885 2886 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 2887 VFPF_UPDATE_RSS_CONFIG_FLAG); 2888 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 2889 VFPF_UPDATE_RSS_CAPS_FLAG); 2890 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 2891 VFPF_UPDATE_RSS_IND_TABLE_FLAG); 2892 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 2893 VFPF_UPDATE_RSS_KEY_FLAG); 2894 2895 p_rss->rss_enable = p_rss_tlv->rss_enable; 2896 p_rss->rss_eng_id = vf->relative_vf_id + 1; 2897 p_rss->rss_caps = p_rss_tlv->rss_caps; 2898 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 2899 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); 2900 2901 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), 2902 (1 << p_rss_tlv->rss_table_size_log)); 2903 2904 for (i = 0; i < table_size; i++) { 2905 struct qed_queue_cid *p_cid; 2906 2907 q_idx = p_rss_tlv->rss_ind_table[i]; 2908 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, 2909 QED_IOV_VALIDATE_Q_ENABLE)) { 2910 DP_VERBOSE(p_hwfn, 2911 QED_MSG_IOV, 2912 "VF[%d]: Omitting RSS due to wrong queue %04x\n", 2913 vf->relative_vf_id, q_idx); 2914 b_reject = true; 2915 goto out; 2916 } 2917 2918 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); 2919 p_rss->rss_ind_table[i] = p_cid; 2920 } 2921 2922 p_data->rss_params = p_rss; 2923 out: 2924 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; 2925 if (!b_reject) 2926 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; 2927 } 2928 2929 static void 2930 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, 2931 struct qed_vf_info *vf, 2932 struct qed_sp_vport_update_params *p_data, 2933 struct qed_sge_tpa_params *p_sge_tpa, 2934 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2935 { 2936 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 2937 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 2938 2939 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 2940 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2941 2942 if (!p_sge_tpa_tlv) { 2943 p_data->sge_tpa_params = NULL; 2944 return; 2945 } 2946 2947 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); 2948 2949 p_sge_tpa->update_tpa_en_flg = 2950 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); 2951 p_sge_tpa->update_tpa_param_flg = 2952 !!(p_sge_tpa_tlv->update_sge_tpa_flags & 2953 VFPF_UPDATE_TPA_PARAM_FLAG); 2954 2955 p_sge_tpa->tpa_ipv4_en_flg = 2956 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); 2957 p_sge_tpa->tpa_ipv6_en_flg = 2958 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); 2959 p_sge_tpa->tpa_pkt_split_flg = 2960 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); 2961 p_sge_tpa->tpa_hdr_data_split_flg = 2962 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); 2963 p_sge_tpa->tpa_gro_consistent_flg = 2964 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); 2965 2966 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 2967 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 2968 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; 2969 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; 2970 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; 2971 2972 p_data->sge_tpa_params = p_sge_tpa; 2973 2974 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; 2975 } 2976 2977 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, 2978 u8 vfid, 2979 struct qed_sp_vport_update_params *params, 2980 u16 *tlvs) 2981 { 2982 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 2983 struct qed_filter_accept_flags *flags = ¶ms->accept_flags; 2984 struct qed_public_vf_info *vf_info; 2985 2986 /* Untrusted VFs can't even be trusted to know that fact. 2987 * Simply indicate everything is configured fine, and trace 2988 * configuration 'behind their back'. 2989 */ 2990 if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) 2991 return 0; 2992 2993 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 2994 2995 if (flags->update_rx_mode_config) { 2996 vf_info->rx_accept_mode = flags->rx_accept_filter; 2997 if (!vf_info->is_trusted_configured) 2998 flags->rx_accept_filter &= ~mask; 2999 } 3000 3001 if (flags->update_tx_mode_config) { 3002 vf_info->tx_accept_mode = flags->tx_accept_filter; 3003 if (!vf_info->is_trusted_configured) 3004 flags->tx_accept_filter &= ~mask; 3005 } 3006 3007 return 0; 3008 } 3009 3010 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, 3011 struct qed_ptt *p_ptt, 3012 struct qed_vf_info *vf) 3013 { 3014 struct qed_rss_params *p_rss_params = NULL; 3015 struct qed_sp_vport_update_params params; 3016 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3017 struct qed_sge_tpa_params sge_tpa_params; 3018 u16 tlvs_mask = 0, tlvs_accepted = 0; 3019 u8 status = PFVF_STATUS_SUCCESS; 3020 u16 length; 3021 int rc; 3022 3023 /* Valiate PF can send such a request */ 3024 if (!vf->vport_instance) { 3025 DP_VERBOSE(p_hwfn, 3026 QED_MSG_IOV, 3027 "No VPORT instance available for VF[%d], failing vport update\n", 3028 vf->abs_vf_id); 3029 status = PFVF_STATUS_FAILURE; 3030 goto out; 3031 } 3032 p_rss_params = vzalloc(sizeof(*p_rss_params)); 3033 if (p_rss_params == NULL) { 3034 status = PFVF_STATUS_FAILURE; 3035 goto out; 3036 } 3037 3038 memset(¶ms, 0, sizeof(params)); 3039 params.opaque_fid = vf->opaque_fid; 3040 params.vport_id = vf->vport_id; 3041 params.rss_params = NULL; 3042 3043 /* Search for extended tlvs list and update values 3044 * from VF in struct qed_sp_vport_update_params. 3045 */ 3046 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3047 qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 3048 qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 3049 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3050 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 3051 qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 3052 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, 3053 &sge_tpa_params, mbx, &tlvs_mask); 3054 3055 tlvs_accepted = tlvs_mask; 3056 3057 /* Some of the extended TLVs need to be validated first; In that case, 3058 * they can update the mask without updating the accepted [so that 3059 * PF could communicate to VF it has rejected request]. 3060 */ 3061 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, 3062 mbx, &tlvs_mask, &tlvs_accepted); 3063 3064 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, 3065 ¶ms, &tlvs_accepted)) { 3066 tlvs_accepted = 0; 3067 status = PFVF_STATUS_NOT_SUPPORTED; 3068 goto out; 3069 } 3070 3071 if (!tlvs_accepted) { 3072 if (tlvs_mask) 3073 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3074 "Upper-layer prevents VF vport configuration\n"); 3075 else 3076 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3077 "No feature tlvs found for vport update\n"); 3078 status = PFVF_STATUS_NOT_SUPPORTED; 3079 goto out; 3080 } 3081 3082 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 3083 3084 if (rc) 3085 status = PFVF_STATUS_FAILURE; 3086 3087 out: 3088 vfree(p_rss_params); 3089 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 3090 tlvs_mask, tlvs_accepted); 3091 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 3092 } 3093 3094 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, 3095 struct qed_vf_info *p_vf, 3096 struct qed_filter_ucast *p_params) 3097 { 3098 int i; 3099 3100 /* First remove entries and then add new ones */ 3101 if (p_params->opcode == QED_FILTER_REMOVE) { 3102 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 3103 if (p_vf->shadow_config.vlans[i].used && 3104 p_vf->shadow_config.vlans[i].vid == 3105 p_params->vlan) { 3106 p_vf->shadow_config.vlans[i].used = false; 3107 break; 3108 } 3109 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 3110 DP_VERBOSE(p_hwfn, 3111 QED_MSG_IOV, 3112 "VF [%d] - Tries to remove a non-existing vlan\n", 3113 p_vf->relative_vf_id); 3114 return -EINVAL; 3115 } 3116 } else if (p_params->opcode == QED_FILTER_REPLACE || 3117 p_params->opcode == QED_FILTER_FLUSH) { 3118 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 3119 p_vf->shadow_config.vlans[i].used = false; 3120 } 3121 3122 /* In forced mode, we're willing to remove entries - but we don't add 3123 * new ones. 3124 */ 3125 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) 3126 return 0; 3127 3128 if (p_params->opcode == QED_FILTER_ADD || 3129 p_params->opcode == QED_FILTER_REPLACE) { 3130 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 3131 if (p_vf->shadow_config.vlans[i].used) 3132 continue; 3133 3134 p_vf->shadow_config.vlans[i].used = true; 3135 p_vf->shadow_config.vlans[i].vid = p_params->vlan; 3136 break; 3137 } 3138 3139 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 3140 DP_VERBOSE(p_hwfn, 3141 QED_MSG_IOV, 3142 "VF [%d] - Tries to configure more than %d vlan filters\n", 3143 p_vf->relative_vf_id, 3144 QED_ETH_VF_NUM_VLAN_FILTERS + 1); 3145 return -EINVAL; 3146 } 3147 } 3148 3149 return 0; 3150 } 3151 3152 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, 3153 struct qed_vf_info *p_vf, 3154 struct qed_filter_ucast *p_params) 3155 { 3156 int i; 3157 3158 /* If we're in forced-mode, we don't allow any change */ 3159 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) 3160 return 0; 3161 3162 /* Don't keep track of shadow copy since we don't intend to restore. */ 3163 if (p_vf->p_vf_info.is_trusted_configured) 3164 return 0; 3165 3166 /* First remove entries and then add new ones */ 3167 if (p_params->opcode == QED_FILTER_REMOVE) { 3168 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 3169 if (ether_addr_equal(p_vf->shadow_config.macs[i], 3170 p_params->mac)) { 3171 eth_zero_addr(p_vf->shadow_config.macs[i]); 3172 break; 3173 } 3174 } 3175 3176 if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 3177 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3178 "MAC isn't configured\n"); 3179 return -EINVAL; 3180 } 3181 } else if (p_params->opcode == QED_FILTER_REPLACE || 3182 p_params->opcode == QED_FILTER_FLUSH) { 3183 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) 3184 eth_zero_addr(p_vf->shadow_config.macs[i]); 3185 } 3186 3187 /* List the new MAC address */ 3188 if (p_params->opcode != QED_FILTER_ADD && 3189 p_params->opcode != QED_FILTER_REPLACE) 3190 return 0; 3191 3192 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 3193 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { 3194 ether_addr_copy(p_vf->shadow_config.macs[i], 3195 p_params->mac); 3196 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3197 "Added MAC at %d entry in shadow\n", i); 3198 break; 3199 } 3200 } 3201 3202 if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 3203 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); 3204 return -EINVAL; 3205 } 3206 3207 return 0; 3208 } 3209 3210 static int 3211 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 3212 struct qed_vf_info *p_vf, 3213 struct qed_filter_ucast *p_params) 3214 { 3215 int rc = 0; 3216 3217 if (p_params->type == QED_FILTER_MAC) { 3218 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); 3219 if (rc) 3220 return rc; 3221 } 3222 3223 if (p_params->type == QED_FILTER_VLAN) 3224 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); 3225 3226 return rc; 3227 } 3228 3229 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 3230 int vfid, struct qed_filter_ucast *params) 3231 { 3232 struct qed_public_vf_info *vf; 3233 3234 vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 3235 if (!vf) 3236 return -EINVAL; 3237 3238 /* No real decision to make; Store the configured MAC */ 3239 if (params->type == QED_FILTER_MAC || 3240 params->type == QED_FILTER_MAC_VLAN) { 3241 ether_addr_copy(vf->mac, params->mac); 3242 3243 if (vf->is_trusted_configured) { 3244 qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); 3245 3246 /* Update and post bulleitin again */ 3247 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 3248 } 3249 } 3250 3251 return 0; 3252 } 3253 3254 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, 3255 struct qed_ptt *p_ptt, 3256 struct qed_vf_info *vf) 3257 { 3258 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; 3259 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3260 struct vfpf_ucast_filter_tlv *req; 3261 u8 status = PFVF_STATUS_SUCCESS; 3262 struct qed_filter_ucast params; 3263 int rc; 3264 3265 /* Prepare the unicast filter params */ 3266 memset(¶ms, 0, sizeof(struct qed_filter_ucast)); 3267 req = &mbx->req_virt->ucast_filter; 3268 params.opcode = (enum qed_filter_opcode)req->opcode; 3269 params.type = (enum qed_filter_ucast_type)req->type; 3270 3271 params.is_rx_filter = 1; 3272 params.is_tx_filter = 1; 3273 params.vport_to_remove_from = vf->vport_id; 3274 params.vport_to_add_to = vf->vport_id; 3275 memcpy(params.mac, req->mac, ETH_ALEN); 3276 params.vlan = req->vlan; 3277 3278 DP_VERBOSE(p_hwfn, 3279 QED_MSG_IOV, 3280 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n", 3281 vf->abs_vf_id, params.opcode, params.type, 3282 params.is_rx_filter ? "RX" : "", 3283 params.is_tx_filter ? "TX" : "", 3284 params.vport_to_add_to, 3285 params.mac, params.vlan); 3286 3287 if (!vf->vport_instance) { 3288 DP_VERBOSE(p_hwfn, 3289 QED_MSG_IOV, 3290 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 3291 vf->abs_vf_id); 3292 status = PFVF_STATUS_FAILURE; 3293 goto out; 3294 } 3295 3296 /* Update shadow copy of the VF configuration */ 3297 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { 3298 status = PFVF_STATUS_FAILURE; 3299 goto out; 3300 } 3301 3302 /* Determine if the unicast filtering is acceptible by PF */ 3303 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && 3304 (params.type == QED_FILTER_VLAN || 3305 params.type == QED_FILTER_MAC_VLAN)) { 3306 /* Once VLAN is forced or PVID is set, do not allow 3307 * to add/replace any further VLANs. 3308 */ 3309 if (params.opcode == QED_FILTER_ADD || 3310 params.opcode == QED_FILTER_REPLACE) 3311 status = PFVF_STATUS_FORCED; 3312 goto out; 3313 } 3314 3315 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && 3316 (params.type == QED_FILTER_MAC || 3317 params.type == QED_FILTER_MAC_VLAN)) { 3318 if (!ether_addr_equal(p_bulletin->mac, params.mac) || 3319 (params.opcode != QED_FILTER_ADD && 3320 params.opcode != QED_FILTER_REPLACE)) 3321 status = PFVF_STATUS_FORCED; 3322 goto out; 3323 } 3324 3325 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); 3326 if (rc) { 3327 status = PFVF_STATUS_FAILURE; 3328 goto out; 3329 } 3330 3331 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 3332 QED_SPQ_MODE_CB, NULL); 3333 if (rc) 3334 status = PFVF_STATUS_FAILURE; 3335 3336 out: 3337 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 3338 sizeof(struct pfvf_def_resp_tlv), status); 3339 } 3340 3341 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, 3342 struct qed_ptt *p_ptt, 3343 struct qed_vf_info *vf) 3344 { 3345 int i; 3346 3347 /* Reset the SBs */ 3348 for (i = 0; i < vf->num_sbs; i++) 3349 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 3350 vf->igu_sbs[i], 3351 vf->opaque_fid, false); 3352 3353 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 3354 sizeof(struct pfvf_def_resp_tlv), 3355 PFVF_STATUS_SUCCESS); 3356 } 3357 3358 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, 3359 struct qed_ptt *p_ptt, struct qed_vf_info *vf) 3360 { 3361 u16 length = sizeof(struct pfvf_def_resp_tlv); 3362 u8 status = PFVF_STATUS_SUCCESS; 3363 3364 /* Disable Interrupts for VF */ 3365 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 3366 3367 /* Reset Permission table */ 3368 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 3369 3370 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 3371 length, status); 3372 } 3373 3374 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, 3375 struct qed_ptt *p_ptt, 3376 struct qed_vf_info *p_vf) 3377 { 3378 u16 length = sizeof(struct pfvf_def_resp_tlv); 3379 u8 status = PFVF_STATUS_SUCCESS; 3380 int rc = 0; 3381 3382 qed_iov_vf_cleanup(p_hwfn, p_vf); 3383 3384 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 3385 /* Stopping the VF */ 3386 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 3387 p_vf->opaque_fid); 3388 3389 if (rc) { 3390 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", 3391 rc); 3392 status = PFVF_STATUS_FAILURE; 3393 } 3394 3395 p_vf->state = VF_STOPPED; 3396 } 3397 3398 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 3399 length, status); 3400 } 3401 3402 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, 3403 struct qed_ptt *p_ptt, 3404 struct qed_vf_info *p_vf) 3405 { 3406 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 3407 struct pfvf_read_coal_resp_tlv *p_resp; 3408 struct vfpf_read_coal_req_tlv *req; 3409 u8 status = PFVF_STATUS_FAILURE; 3410 struct qed_vf_queue *p_queue; 3411 struct qed_queue_cid *p_cid; 3412 u16 coal = 0, qid, i; 3413 bool b_is_rx; 3414 int rc = 0; 3415 3416 mbx->offset = (u8 *)mbx->reply_virt; 3417 req = &mbx->req_virt->read_coal_req; 3418 3419 qid = req->qid; 3420 b_is_rx = req->is_rx ? true : false; 3421 3422 if (b_is_rx) { 3423 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, 3424 QED_IOV_VALIDATE_Q_ENABLE)) { 3425 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3426 "VF[%d]: Invalid Rx queue_id = %d\n", 3427 p_vf->abs_vf_id, qid); 3428 goto send_resp; 3429 } 3430 3431 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); 3432 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); 3433 if (rc) 3434 goto send_resp; 3435 } else { 3436 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, 3437 QED_IOV_VALIDATE_Q_ENABLE)) { 3438 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3439 "VF[%d]: Invalid Tx queue_id = %d\n", 3440 p_vf->abs_vf_id, qid); 3441 goto send_resp; 3442 } 3443 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3444 p_queue = &p_vf->vf_queues[qid]; 3445 if ((!p_queue->cids[i].p_cid) || 3446 (!p_queue->cids[i].b_is_tx)) 3447 continue; 3448 3449 p_cid = p_queue->cids[i].p_cid; 3450 3451 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); 3452 if (rc) 3453 goto send_resp; 3454 break; 3455 } 3456 } 3457 3458 status = PFVF_STATUS_SUCCESS; 3459 3460 send_resp: 3461 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, 3462 sizeof(*p_resp)); 3463 p_resp->coal = coal; 3464 3465 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 3466 sizeof(struct channel_list_end_tlv)); 3467 3468 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 3469 } 3470 3471 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, 3472 struct qed_ptt *p_ptt, 3473 struct qed_vf_info *vf) 3474 { 3475 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3476 struct vfpf_update_coalesce *req; 3477 u8 status = PFVF_STATUS_FAILURE; 3478 struct qed_queue_cid *p_cid; 3479 u16 rx_coal, tx_coal; 3480 int rc = 0, i; 3481 u16 qid; 3482 3483 req = &mbx->req_virt->update_coalesce; 3484 3485 rx_coal = req->rx_coal; 3486 tx_coal = req->tx_coal; 3487 qid = req->qid; 3488 3489 if (!qed_iov_validate_rxq(p_hwfn, vf, qid, 3490 QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { 3491 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3492 "VF[%d]: Invalid Rx queue_id = %d\n", 3493 vf->abs_vf_id, qid); 3494 goto out; 3495 } 3496 3497 if (!qed_iov_validate_txq(p_hwfn, vf, qid, 3498 QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { 3499 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3500 "VF[%d]: Invalid Tx queue_id = %d\n", 3501 vf->abs_vf_id, qid); 3502 goto out; 3503 } 3504 3505 DP_VERBOSE(p_hwfn, 3506 QED_MSG_IOV, 3507 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", 3508 vf->abs_vf_id, rx_coal, tx_coal, qid); 3509 3510 if (rx_coal) { 3511 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); 3512 3513 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 3514 if (rc) { 3515 DP_VERBOSE(p_hwfn, 3516 QED_MSG_IOV, 3517 "VF[%d]: Unable to set rx queue = %d coalesce\n", 3518 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); 3519 goto out; 3520 } 3521 vf->rx_coal = rx_coal; 3522 } 3523 3524 if (tx_coal) { 3525 struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; 3526 3527 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3528 if (!p_queue->cids[i].p_cid) 3529 continue; 3530 3531 if (!p_queue->cids[i].b_is_tx) 3532 continue; 3533 3534 rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, 3535 p_queue->cids[i].p_cid); 3536 3537 if (rc) { 3538 DP_VERBOSE(p_hwfn, 3539 QED_MSG_IOV, 3540 "VF[%d]: Unable to set tx queue coalesce\n", 3541 vf->abs_vf_id); 3542 goto out; 3543 } 3544 } 3545 vf->tx_coal = tx_coal; 3546 } 3547 3548 status = PFVF_STATUS_SUCCESS; 3549 out: 3550 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, 3551 sizeof(struct pfvf_def_resp_tlv), status); 3552 } 3553 static int 3554 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 3555 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 3556 { 3557 int cnt; 3558 u32 val; 3559 3560 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 3561 3562 for (cnt = 0; cnt < 50; cnt++) { 3563 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 3564 if (!val) 3565 break; 3566 msleep(20); 3567 } 3568 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 3569 3570 if (cnt == 50) { 3571 DP_ERR(p_hwfn, 3572 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 3573 p_vf->abs_vf_id, val); 3574 return -EBUSY; 3575 } 3576 3577 return 0; 3578 } 3579 3580 static int 3581 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, 3582 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 3583 { 3584 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; 3585 int i, cnt; 3586 3587 /* Read initial consumers & producers */ 3588 for (i = 0; i < MAX_NUM_VOQS_E4; i++) { 3589 u32 prod; 3590 3591 cons[i] = qed_rd(p_hwfn, p_ptt, 3592 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 3593 i * 0x40); 3594 prod = qed_rd(p_hwfn, p_ptt, 3595 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 3596 i * 0x40); 3597 distance[i] = prod - cons[i]; 3598 } 3599 3600 /* Wait for consumers to pass the producers */ 3601 i = 0; 3602 for (cnt = 0; cnt < 50; cnt++) { 3603 for (; i < MAX_NUM_VOQS_E4; i++) { 3604 u32 tmp; 3605 3606 tmp = qed_rd(p_hwfn, p_ptt, 3607 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 3608 i * 0x40); 3609 if (distance[i] > tmp - cons[i]) 3610 break; 3611 } 3612 3613 if (i == MAX_NUM_VOQS_E4) 3614 break; 3615 3616 msleep(20); 3617 } 3618 3619 if (cnt == 50) { 3620 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 3621 p_vf->abs_vf_id, i); 3622 return -EBUSY; 3623 } 3624 3625 return 0; 3626 } 3627 3628 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, 3629 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 3630 { 3631 int rc; 3632 3633 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 3634 if (rc) 3635 return rc; 3636 3637 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 3638 if (rc) 3639 return rc; 3640 3641 return 0; 3642 } 3643 3644 static int 3645 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, 3646 struct qed_ptt *p_ptt, 3647 u16 rel_vf_id, u32 *ack_vfs) 3648 { 3649 struct qed_vf_info *p_vf; 3650 int rc = 0; 3651 3652 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 3653 if (!p_vf) 3654 return 0; 3655 3656 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 3657 (1ULL << (rel_vf_id % 64))) { 3658 u16 vfid = p_vf->abs_vf_id; 3659 3660 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3661 "VF[%d] - Handling FLR\n", vfid); 3662 3663 qed_iov_vf_cleanup(p_hwfn, p_vf); 3664 3665 /* If VF isn't active, no need for anything but SW */ 3666 if (!p_vf->b_init) 3667 goto cleanup; 3668 3669 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 3670 if (rc) 3671 goto cleanup; 3672 3673 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); 3674 if (rc) { 3675 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); 3676 return rc; 3677 } 3678 3679 /* Workaround to make VF-PF channel ready, as FW 3680 * doesn't do that as a part of FLR. 3681 */ 3682 REG_WR(p_hwfn, 3683 GTT_BAR0_MAP_REG_USDM_RAM + 3684 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); 3685 3686 /* VF_STOPPED has to be set only after final cleanup 3687 * but prior to re-enabling the VF. 3688 */ 3689 p_vf->state = VF_STOPPED; 3690 3691 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 3692 if (rc) { 3693 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 3694 vfid); 3695 return rc; 3696 } 3697 cleanup: 3698 /* Mark VF for ack and clean pending state */ 3699 if (p_vf->state == VF_RESET) 3700 p_vf->state = VF_STOPPED; 3701 ack_vfs[vfid / 32] |= BIT((vfid % 32)); 3702 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 3703 ~(1ULL << (rel_vf_id % 64)); 3704 p_vf->vf_mbx.b_pending_msg = false; 3705 } 3706 3707 return rc; 3708 } 3709 3710 static int 3711 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3712 { 3713 u32 ack_vfs[VF_MAX_STATIC / 32]; 3714 int rc = 0; 3715 u16 i; 3716 3717 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 3718 3719 /* Since BRB <-> PRS interface can't be tested as part of the flr 3720 * polling due to HW limitations, simply sleep a bit. And since 3721 * there's no need to wait per-vf, do it before looping. 3722 */ 3723 msleep(100); 3724 3725 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) 3726 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 3727 3728 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 3729 return rc; 3730 } 3731 3732 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) 3733 { 3734 bool found = false; 3735 u16 i; 3736 3737 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); 3738 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 3739 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3740 "[%08x,...,%08x]: %08x\n", 3741 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 3742 3743 if (!p_hwfn->cdev->p_iov_info) { 3744 DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); 3745 return false; 3746 } 3747 3748 /* Mark VFs */ 3749 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { 3750 struct qed_vf_info *p_vf; 3751 u8 vfid; 3752 3753 p_vf = qed_iov_get_vf_info(p_hwfn, i, false); 3754 if (!p_vf) 3755 continue; 3756 3757 vfid = p_vf->abs_vf_id; 3758 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { 3759 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 3760 u16 rel_vf_id = p_vf->relative_vf_id; 3761 3762 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3763 "VF[%d] [rel %d] got FLR-ed\n", 3764 vfid, rel_vf_id); 3765 3766 p_vf->state = VF_RESET; 3767 3768 /* No need to lock here, since pending_flr should 3769 * only change here and before ACKing MFw. Since 3770 * MFW will not trigger an additional attention for 3771 * VF flr until ACKs, we're safe. 3772 */ 3773 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 3774 found = true; 3775 } 3776 } 3777 3778 return found; 3779 } 3780 3781 static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 3782 u16 vfid, 3783 struct qed_mcp_link_params *p_params, 3784 struct qed_mcp_link_state *p_link, 3785 struct qed_mcp_link_capabilities *p_caps) 3786 { 3787 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 3788 vfid, 3789 false); 3790 struct qed_bulletin_content *p_bulletin; 3791 3792 if (!p_vf) 3793 return; 3794 3795 p_bulletin = p_vf->bulletin.p_virt; 3796 3797 if (p_params) 3798 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); 3799 if (p_link) 3800 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); 3801 if (p_caps) 3802 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); 3803 } 3804 3805 static int 3806 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, 3807 struct qed_ptt *p_ptt, 3808 struct qed_vf_info *p_vf) 3809 { 3810 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; 3811 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 3812 struct vfpf_bulletin_update_mac_tlv *p_req; 3813 u8 status = PFVF_STATUS_SUCCESS; 3814 int rc = 0; 3815 3816 if (!p_vf->p_vf_info.is_trusted_configured) { 3817 DP_VERBOSE(p_hwfn, 3818 QED_MSG_IOV, 3819 "Blocking bulletin update request from untrusted VF[%d]\n", 3820 p_vf->abs_vf_id); 3821 status = PFVF_STATUS_NOT_SUPPORTED; 3822 rc = -EINVAL; 3823 goto send_status; 3824 } 3825 3826 p_req = &mbx->req_virt->bulletin_update_mac; 3827 ether_addr_copy(p_bulletin->mac, p_req->mac); 3828 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3829 "Updated bulletin of VF[%d] with requested MAC[%pM]\n", 3830 p_vf->abs_vf_id, p_req->mac); 3831 3832 send_status: 3833 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3834 CHANNEL_TLV_BULLETIN_UPDATE_MAC, 3835 sizeof(struct pfvf_def_resp_tlv), status); 3836 return rc; 3837 } 3838 3839 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, 3840 struct qed_ptt *p_ptt, int vfid) 3841 { 3842 struct qed_iov_vf_mbx *mbx; 3843 struct qed_vf_info *p_vf; 3844 3845 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3846 if (!p_vf) 3847 return; 3848 3849 mbx = &p_vf->vf_mbx; 3850 3851 /* qed_iov_process_mbx_request */ 3852 if (!mbx->b_pending_msg) { 3853 DP_NOTICE(p_hwfn, 3854 "VF[%02x]: Trying to process mailbox message when none is pending\n", 3855 p_vf->abs_vf_id); 3856 return; 3857 } 3858 mbx->b_pending_msg = false; 3859 3860 mbx->first_tlv = mbx->req_virt->first_tlv; 3861 3862 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3863 "VF[%02x]: Processing mailbox message [type %04x]\n", 3864 p_vf->abs_vf_id, mbx->first_tlv.tl.type); 3865 3866 /* check if tlv type is known */ 3867 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && 3868 !p_vf->b_malicious) { 3869 switch (mbx->first_tlv.tl.type) { 3870 case CHANNEL_TLV_ACQUIRE: 3871 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 3872 break; 3873 case CHANNEL_TLV_VPORT_START: 3874 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 3875 break; 3876 case CHANNEL_TLV_VPORT_TEARDOWN: 3877 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 3878 break; 3879 case CHANNEL_TLV_START_RXQ: 3880 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 3881 break; 3882 case CHANNEL_TLV_START_TXQ: 3883 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 3884 break; 3885 case CHANNEL_TLV_STOP_RXQS: 3886 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 3887 break; 3888 case CHANNEL_TLV_STOP_TXQS: 3889 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 3890 break; 3891 case CHANNEL_TLV_UPDATE_RXQ: 3892 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 3893 break; 3894 case CHANNEL_TLV_VPORT_UPDATE: 3895 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 3896 break; 3897 case CHANNEL_TLV_UCAST_FILTER: 3898 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 3899 break; 3900 case CHANNEL_TLV_CLOSE: 3901 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 3902 break; 3903 case CHANNEL_TLV_INT_CLEANUP: 3904 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 3905 break; 3906 case CHANNEL_TLV_RELEASE: 3907 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 3908 break; 3909 case CHANNEL_TLV_UPDATE_TUNN_PARAM: 3910 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); 3911 break; 3912 case CHANNEL_TLV_COALESCE_UPDATE: 3913 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); 3914 break; 3915 case CHANNEL_TLV_COALESCE_READ: 3916 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); 3917 break; 3918 case CHANNEL_TLV_BULLETIN_UPDATE_MAC: 3919 qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); 3920 break; 3921 } 3922 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 3923 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3924 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", 3925 p_vf->abs_vf_id, mbx->first_tlv.tl.type); 3926 3927 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3928 mbx->first_tlv.tl.type, 3929 sizeof(struct pfvf_def_resp_tlv), 3930 PFVF_STATUS_MALICIOUS); 3931 } else { 3932 /* unknown TLV - this may belong to a VF driver from the future 3933 * - a version written after this PF driver was written, which 3934 * supports features unknown as of yet. Too bad since we don't 3935 * support them. Or this may be because someone wrote a crappy 3936 * VF driver and is sending garbage over the channel. 3937 */ 3938 DP_NOTICE(p_hwfn, 3939 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", 3940 p_vf->abs_vf_id, 3941 mbx->first_tlv.tl.type, 3942 mbx->first_tlv.tl.length, 3943 mbx->first_tlv.padding, mbx->first_tlv.reply_address); 3944 3945 /* Try replying in case reply address matches the acquisition's 3946 * posted address. 3947 */ 3948 if (p_vf->acquire.first_tlv.reply_address && 3949 (mbx->first_tlv.reply_address == 3950 p_vf->acquire.first_tlv.reply_address)) { 3951 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3952 mbx->first_tlv.tl.type, 3953 sizeof(struct pfvf_def_resp_tlv), 3954 PFVF_STATUS_NOT_SUPPORTED); 3955 } else { 3956 DP_VERBOSE(p_hwfn, 3957 QED_MSG_IOV, 3958 "VF[%02x]: Can't respond to TLV - no valid reply address\n", 3959 p_vf->abs_vf_id); 3960 } 3961 } 3962 } 3963 3964 static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) 3965 { 3966 int i; 3967 3968 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); 3969 3970 qed_for_each_vf(p_hwfn, i) { 3971 struct qed_vf_info *p_vf; 3972 3973 p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; 3974 if (p_vf->vf_mbx.b_pending_msg) 3975 events[i / 64] |= 1ULL << (i % 64); 3976 } 3977 } 3978 3979 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, 3980 u16 abs_vfid) 3981 { 3982 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; 3983 3984 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { 3985 DP_VERBOSE(p_hwfn, 3986 QED_MSG_IOV, 3987 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", 3988 abs_vfid); 3989 return NULL; 3990 } 3991 3992 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; 3993 } 3994 3995 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, 3996 u16 abs_vfid, struct regpair *vf_msg) 3997 { 3998 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, 3999 abs_vfid); 4000 4001 if (!p_vf) 4002 return 0; 4003 4004 /* List the physical address of the request so that handler 4005 * could later on copy the message from it. 4006 */ 4007 p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo); 4008 4009 /* Mark the event and schedule the workqueue */ 4010 p_vf->vf_mbx.b_pending_msg = true; 4011 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); 4012 4013 return 0; 4014 } 4015 4016 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 4017 struct malicious_vf_eqe_data *p_data) 4018 { 4019 struct qed_vf_info *p_vf; 4020 4021 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); 4022 4023 if (!p_vf) 4024 return; 4025 4026 if (!p_vf->b_malicious) { 4027 DP_NOTICE(p_hwfn, 4028 "VF [%d] - Malicious behavior [%02x]\n", 4029 p_vf->abs_vf_id, p_data->err_id); 4030 4031 p_vf->b_malicious = true; 4032 } else { 4033 DP_INFO(p_hwfn, 4034 "VF [%d] - Malicious behavior [%02x]\n", 4035 p_vf->abs_vf_id, p_data->err_id); 4036 } 4037 } 4038 4039 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, 4040 union event_ring_data *data, u8 fw_return_code) 4041 { 4042 switch (opcode) { 4043 case COMMON_EVENT_VF_PF_CHANNEL: 4044 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 4045 &data->vf_pf_channel.msg_addr); 4046 case COMMON_EVENT_MALICIOUS_VF: 4047 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); 4048 return 0; 4049 default: 4050 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 4051 opcode); 4052 return -EINVAL; 4053 } 4054 } 4055 4056 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 4057 { 4058 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 4059 u16 i; 4060 4061 if (!p_iov) 4062 goto out; 4063 4064 for (i = rel_vf_id; i < p_iov->total_vfs; i++) 4065 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) 4066 return i; 4067 4068 out: 4069 return MAX_NUM_VFS; 4070 } 4071 4072 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, 4073 int vfid) 4074 { 4075 struct qed_dmae_params params; 4076 struct qed_vf_info *vf_info; 4077 4078 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4079 if (!vf_info) 4080 return -EINVAL; 4081 4082 memset(¶ms, 0, sizeof(params)); 4083 SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1); 4084 SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1); 4085 params.src_vfid = vf_info->abs_vf_id; 4086 4087 if (qed_dmae_host2host(p_hwfn, ptt, 4088 vf_info->vf_mbx.pending_req, 4089 vf_info->vf_mbx.req_phys, 4090 sizeof(union vfpf_tlvs) / 4, ¶ms)) { 4091 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 4092 "Failed to copy message from VF 0x%02x\n", vfid); 4093 4094 return -EIO; 4095 } 4096 4097 return 0; 4098 } 4099 4100 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, 4101 u8 *mac, int vfid) 4102 { 4103 struct qed_vf_info *vf_info; 4104 u64 feature; 4105 4106 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4107 if (!vf_info) { 4108 DP_NOTICE(p_hwfn->cdev, 4109 "Can not set forced MAC, invalid vfid [%d]\n", vfid); 4110 return; 4111 } 4112 4113 if (vf_info->b_malicious) { 4114 DP_NOTICE(p_hwfn->cdev, 4115 "Can't set forced MAC to malicious VF [%d]\n", vfid); 4116 return; 4117 } 4118 4119 if (vf_info->p_vf_info.is_trusted_configured) { 4120 feature = BIT(VFPF_BULLETIN_MAC_ADDR); 4121 /* Trust mode will disable Forced MAC */ 4122 vf_info->bulletin.p_virt->valid_bitmap &= 4123 ~BIT(MAC_ADDR_FORCED); 4124 } else { 4125 feature = BIT(MAC_ADDR_FORCED); 4126 /* Forced MAC will disable MAC_ADDR */ 4127 vf_info->bulletin.p_virt->valid_bitmap &= 4128 ~BIT(VFPF_BULLETIN_MAC_ADDR); 4129 } 4130 4131 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); 4132 4133 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4134 4135 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4136 } 4137 4138 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) 4139 { 4140 struct qed_vf_info *vf_info; 4141 u64 feature; 4142 4143 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4144 if (!vf_info) { 4145 DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", 4146 vfid); 4147 return -EINVAL; 4148 } 4149 4150 if (vf_info->b_malicious) { 4151 DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", 4152 vfid); 4153 return -EINVAL; 4154 } 4155 4156 if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { 4157 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 4158 "Can not set MAC, Forced MAC is configured\n"); 4159 return -EINVAL; 4160 } 4161 4162 feature = BIT(VFPF_BULLETIN_MAC_ADDR); 4163 ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); 4164 4165 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4166 4167 if (vf_info->p_vf_info.is_trusted_configured) 4168 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4169 4170 return 0; 4171 } 4172 4173 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, 4174 u16 pvid, int vfid) 4175 { 4176 struct qed_vf_info *vf_info; 4177 u64 feature; 4178 4179 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4180 if (!vf_info) { 4181 DP_NOTICE(p_hwfn->cdev, 4182 "Can not set forced MAC, invalid vfid [%d]\n", vfid); 4183 return; 4184 } 4185 4186 if (vf_info->b_malicious) { 4187 DP_NOTICE(p_hwfn->cdev, 4188 "Can't set forced vlan to malicious VF [%d]\n", vfid); 4189 return; 4190 } 4191 4192 feature = 1 << VLAN_ADDR_FORCED; 4193 vf_info->bulletin.p_virt->pvid = pvid; 4194 if (pvid) 4195 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4196 else 4197 vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 4198 4199 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4200 } 4201 4202 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 4203 int vfid, u16 vxlan_port, u16 geneve_port) 4204 { 4205 struct qed_vf_info *vf_info; 4206 4207 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4208 if (!vf_info) { 4209 DP_NOTICE(p_hwfn->cdev, 4210 "Can not set udp ports, invalid vfid [%d]\n", vfid); 4211 return; 4212 } 4213 4214 if (vf_info->b_malicious) { 4215 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 4216 "Can not set udp ports to malicious VF [%d]\n", 4217 vfid); 4218 return; 4219 } 4220 4221 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; 4222 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; 4223 } 4224 4225 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) 4226 { 4227 struct qed_vf_info *p_vf_info; 4228 4229 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4230 if (!p_vf_info) 4231 return false; 4232 4233 return !!p_vf_info->vport_instance; 4234 } 4235 4236 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) 4237 { 4238 struct qed_vf_info *p_vf_info; 4239 4240 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4241 if (!p_vf_info) 4242 return true; 4243 4244 return p_vf_info->state == VF_STOPPED; 4245 } 4246 4247 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) 4248 { 4249 struct qed_vf_info *vf_info; 4250 4251 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4252 if (!vf_info) 4253 return false; 4254 4255 return vf_info->spoof_chk; 4256 } 4257 4258 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) 4259 { 4260 struct qed_vf_info *vf; 4261 int rc = -EINVAL; 4262 4263 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4264 DP_NOTICE(p_hwfn, 4265 "SR-IOV sanity check failed, can't set spoofchk\n"); 4266 goto out; 4267 } 4268 4269 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4270 if (!vf) 4271 goto out; 4272 4273 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { 4274 /* After VF VPORT start PF will configure spoof check */ 4275 vf->req_spoofchk_val = val; 4276 rc = 0; 4277 goto out; 4278 } 4279 4280 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); 4281 4282 out: 4283 return rc; 4284 } 4285 4286 static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 4287 { 4288 struct qed_vf_info *p_vf; 4289 4290 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4291 if (!p_vf || !p_vf->bulletin.p_virt) 4292 return NULL; 4293 4294 if (!(p_vf->bulletin.p_virt->valid_bitmap & 4295 BIT(VFPF_BULLETIN_MAC_ADDR))) 4296 return NULL; 4297 4298 return p_vf->bulletin.p_virt->mac; 4299 } 4300 4301 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, 4302 u16 rel_vf_id) 4303 { 4304 struct qed_vf_info *p_vf; 4305 4306 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4307 if (!p_vf || !p_vf->bulletin.p_virt) 4308 return NULL; 4309 4310 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) 4311 return NULL; 4312 4313 return p_vf->bulletin.p_virt->mac; 4314 } 4315 4316 static u16 4317 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 4318 { 4319 struct qed_vf_info *p_vf; 4320 4321 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4322 if (!p_vf || !p_vf->bulletin.p_virt) 4323 return 0; 4324 4325 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) 4326 return 0; 4327 4328 return p_vf->bulletin.p_virt->pvid; 4329 } 4330 4331 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, 4332 struct qed_ptt *p_ptt, int vfid, int val) 4333 { 4334 struct qed_vf_info *vf; 4335 u8 abs_vp_id = 0; 4336 u16 rl_id; 4337 int rc; 4338 4339 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4340 if (!vf) 4341 return -EINVAL; 4342 4343 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 4344 if (rc) 4345 return rc; 4346 4347 rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ 4348 return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); 4349 } 4350 4351 static int 4352 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) 4353 { 4354 struct qed_vf_info *vf; 4355 u8 vport_id; 4356 int i; 4357 4358 for_each_hwfn(cdev, i) { 4359 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4360 4361 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4362 DP_NOTICE(p_hwfn, 4363 "SR-IOV sanity check failed, can't set min rate\n"); 4364 return -EINVAL; 4365 } 4366 } 4367 4368 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); 4369 vport_id = vf->vport_id; 4370 4371 return qed_configure_vport_wfq(cdev, vport_id, rate); 4372 } 4373 4374 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) 4375 { 4376 struct qed_wfq_data *vf_vp_wfq; 4377 struct qed_vf_info *vf_info; 4378 4379 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4380 if (!vf_info) 4381 return 0; 4382 4383 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 4384 4385 if (vf_vp_wfq->configured) 4386 return vf_vp_wfq->min_speed; 4387 else 4388 return 0; 4389 } 4390 4391 /** 4392 * qed_schedule_iov - schedules IOV task for VF and PF 4393 * @hwfn: hardware function pointer 4394 * @flag: IOV flag for VF/PF 4395 */ 4396 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 4397 { 4398 smp_mb__before_atomic(); 4399 set_bit(flag, &hwfn->iov_task_flags); 4400 smp_mb__after_atomic(); 4401 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 4402 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); 4403 } 4404 4405 void qed_vf_start_iov_wq(struct qed_dev *cdev) 4406 { 4407 int i; 4408 4409 for_each_hwfn(cdev, i) 4410 queue_delayed_work(cdev->hwfns[i].iov_wq, 4411 &cdev->hwfns[i].iov_task, 0); 4412 } 4413 4414 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 4415 { 4416 int i, j; 4417 4418 for_each_hwfn(cdev, i) 4419 if (cdev->hwfns[i].iov_wq) 4420 flush_workqueue(cdev->hwfns[i].iov_wq); 4421 4422 /* Mark VFs for disablement */ 4423 qed_iov_set_vfs_to_disable(cdev, true); 4424 4425 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) 4426 pci_disable_sriov(cdev->pdev); 4427 4428 if (cdev->recov_in_prog) { 4429 DP_VERBOSE(cdev, 4430 QED_MSG_IOV, 4431 "Skip SRIOV disable operations in the device since a recovery is in progress\n"); 4432 goto out; 4433 } 4434 4435 for_each_hwfn(cdev, i) { 4436 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4437 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 4438 4439 /* Failure to acquire the ptt in 100g creates an odd error 4440 * where the first engine has already relased IOV. 4441 */ 4442 if (!ptt) { 4443 DP_ERR(hwfn, "Failed to acquire ptt\n"); 4444 return -EBUSY; 4445 } 4446 4447 /* Clean WFQ db and configure equal weight for all vports */ 4448 qed_clean_wfq_db(hwfn, ptt); 4449 4450 qed_for_each_vf(hwfn, j) { 4451 int k; 4452 4453 if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) 4454 continue; 4455 4456 /* Wait until VF is disabled before releasing */ 4457 for (k = 0; k < 100; k++) { 4458 if (!qed_iov_is_vf_stopped(hwfn, j)) 4459 msleep(20); 4460 else 4461 break; 4462 } 4463 4464 if (k < 100) 4465 qed_iov_release_hw_for_vf(&cdev->hwfns[i], 4466 ptt, j); 4467 else 4468 DP_ERR(hwfn, 4469 "Timeout waiting for VF's FLR to end\n"); 4470 } 4471 4472 qed_ptt_release(hwfn, ptt); 4473 } 4474 out: 4475 qed_iov_set_vfs_to_disable(cdev, false); 4476 4477 return 0; 4478 } 4479 4480 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, 4481 u16 vfid, 4482 struct qed_iov_vf_init_params *params) 4483 { 4484 u16 base, i; 4485 4486 /* Since we have an equal resource distribution per-VF, and we assume 4487 * PF has acquired the QED_PF_L2_QUE first queues, we start setting 4488 * sequentially from there. 4489 */ 4490 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; 4491 4492 params->rel_vf_id = vfid; 4493 for (i = 0; i < params->num_queues; i++) { 4494 params->req_rx_queue[i] = base + i; 4495 params->req_tx_queue[i] = base + i; 4496 } 4497 } 4498 4499 static int qed_sriov_enable(struct qed_dev *cdev, int num) 4500 { 4501 struct qed_iov_vf_init_params params; 4502 struct qed_hwfn *hwfn; 4503 struct qed_ptt *ptt; 4504 int i, j, rc; 4505 4506 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 4507 DP_NOTICE(cdev, "Can start at most %d VFs\n", 4508 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); 4509 return -EINVAL; 4510 } 4511 4512 memset(¶ms, 0, sizeof(params)); 4513 4514 /* Initialize HW for VF access */ 4515 for_each_hwfn(cdev, j) { 4516 hwfn = &cdev->hwfns[j]; 4517 ptt = qed_ptt_acquire(hwfn); 4518 4519 /* Make sure not to use more than 16 queues per VF */ 4520 params.num_queues = min_t(int, 4521 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 4522 16); 4523 4524 if (!ptt) { 4525 DP_ERR(hwfn, "Failed to acquire ptt\n"); 4526 rc = -EBUSY; 4527 goto err; 4528 } 4529 4530 for (i = 0; i < num; i++) { 4531 if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) 4532 continue; 4533 4534 qed_sriov_enable_qid_config(hwfn, i, ¶ms); 4535 rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 4536 if (rc) { 4537 DP_ERR(cdev, "Failed to enable VF[%d]\n", i); 4538 qed_ptt_release(hwfn, ptt); 4539 goto err; 4540 } 4541 } 4542 4543 qed_ptt_release(hwfn, ptt); 4544 } 4545 4546 /* Enable SRIOV PCIe functions */ 4547 rc = pci_enable_sriov(cdev->pdev, num); 4548 if (rc) { 4549 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); 4550 goto err; 4551 } 4552 4553 hwfn = QED_LEADING_HWFN(cdev); 4554 ptt = qed_ptt_acquire(hwfn); 4555 if (!ptt) { 4556 DP_ERR(hwfn, "Failed to acquire ptt\n"); 4557 rc = -EBUSY; 4558 goto err; 4559 } 4560 4561 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); 4562 if (rc) 4563 DP_INFO(cdev, "Failed to update eswitch mode\n"); 4564 qed_ptt_release(hwfn, ptt); 4565 4566 return num; 4567 4568 err: 4569 qed_sriov_disable(cdev, false); 4570 return rc; 4571 } 4572 4573 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) 4574 { 4575 if (!IS_QED_SRIOV(cdev)) { 4576 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); 4577 return -EOPNOTSUPP; 4578 } 4579 4580 if (num_vfs_param) 4581 return qed_sriov_enable(cdev, num_vfs_param); 4582 else 4583 return qed_sriov_disable(cdev, true); 4584 } 4585 4586 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) 4587 { 4588 int i; 4589 4590 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 4591 DP_VERBOSE(cdev, QED_MSG_IOV, 4592 "Cannot set a VF MAC; Sriov is not enabled\n"); 4593 return -EINVAL; 4594 } 4595 4596 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 4597 DP_VERBOSE(cdev, QED_MSG_IOV, 4598 "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 4599 return -EINVAL; 4600 } 4601 4602 for_each_hwfn(cdev, i) { 4603 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4604 struct qed_public_vf_info *vf_info; 4605 4606 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 4607 if (!vf_info) 4608 continue; 4609 4610 /* Set the MAC, and schedule the IOV task */ 4611 if (vf_info->is_trusted_configured) 4612 ether_addr_copy(vf_info->mac, mac); 4613 else 4614 ether_addr_copy(vf_info->forced_mac, mac); 4615 4616 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 4617 } 4618 4619 return 0; 4620 } 4621 4622 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) 4623 { 4624 int i; 4625 4626 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 4627 DP_VERBOSE(cdev, QED_MSG_IOV, 4628 "Cannot set a VF MAC; Sriov is not enabled\n"); 4629 return -EINVAL; 4630 } 4631 4632 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 4633 DP_VERBOSE(cdev, QED_MSG_IOV, 4634 "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 4635 return -EINVAL; 4636 } 4637 4638 for_each_hwfn(cdev, i) { 4639 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4640 struct qed_public_vf_info *vf_info; 4641 4642 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 4643 if (!vf_info) 4644 continue; 4645 4646 /* Set the forced vlan, and schedule the IOV task */ 4647 vf_info->forced_vlan = vid; 4648 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 4649 } 4650 4651 return 0; 4652 } 4653 4654 static int qed_get_vf_config(struct qed_dev *cdev, 4655 int vf_id, struct ifla_vf_info *ivi) 4656 { 4657 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 4658 struct qed_public_vf_info *vf_info; 4659 struct qed_mcp_link_state link; 4660 u32 tx_rate; 4661 4662 /* Sanitize request */ 4663 if (IS_VF(cdev)) 4664 return -EINVAL; 4665 4666 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { 4667 DP_VERBOSE(cdev, QED_MSG_IOV, 4668 "VF index [%d] isn't active\n", vf_id); 4669 return -EINVAL; 4670 } 4671 4672 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 4673 4674 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); 4675 4676 /* Fill information about VF */ 4677 ivi->vf = vf_id; 4678 4679 if (is_valid_ether_addr(vf_info->forced_mac)) 4680 ether_addr_copy(ivi->mac, vf_info->forced_mac); 4681 else 4682 ether_addr_copy(ivi->mac, vf_info->mac); 4683 4684 ivi->vlan = vf_info->forced_vlan; 4685 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); 4686 ivi->linkstate = vf_info->link_state; 4687 tx_rate = vf_info->tx_rate; 4688 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; 4689 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); 4690 4691 return 0; 4692 } 4693 4694 void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 4695 { 4696 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); 4697 struct qed_mcp_link_capabilities caps; 4698 struct qed_mcp_link_params params; 4699 struct qed_mcp_link_state link; 4700 int i; 4701 4702 if (!hwfn->pf_iov_info) 4703 return; 4704 4705 /* Update bulletin of all future possible VFs with link configuration */ 4706 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { 4707 struct qed_public_vf_info *vf_info; 4708 4709 vf_info = qed_iov_get_public_vf_info(hwfn, i, false); 4710 if (!vf_info) 4711 continue; 4712 4713 /* Only hwfn0 is actually interested in the link speed. 4714 * But since only it would receive an MFW indication of link, 4715 * need to take configuration from it - otherwise things like 4716 * rate limiting for hwfn1 VF would not work. 4717 */ 4718 memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), 4719 sizeof(params)); 4720 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); 4721 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), 4722 sizeof(caps)); 4723 4724 /* Modify link according to the VF's configured link state */ 4725 switch (vf_info->link_state) { 4726 case IFLA_VF_LINK_STATE_DISABLE: 4727 link.link_up = false; 4728 break; 4729 case IFLA_VF_LINK_STATE_ENABLE: 4730 link.link_up = true; 4731 /* Set speed according to maximum supported by HW. 4732 * that is 40G for regular devices and 100G for CMT 4733 * mode devices. 4734 */ 4735 link.speed = (hwfn->cdev->num_hwfns > 1) ? 4736 100000 : 40000; 4737 default: 4738 /* In auto mode pass PF link image to VF */ 4739 break; 4740 } 4741 4742 if (link.link_up && vf_info->tx_rate) { 4743 struct qed_ptt *ptt; 4744 int rate; 4745 4746 rate = min_t(int, vf_info->tx_rate, link.speed); 4747 4748 ptt = qed_ptt_acquire(hwfn); 4749 if (!ptt) { 4750 DP_NOTICE(hwfn, "Failed to acquire PTT\n"); 4751 return; 4752 } 4753 4754 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { 4755 vf_info->tx_rate = rate; 4756 link.speed = rate; 4757 } 4758 4759 qed_ptt_release(hwfn, ptt); 4760 } 4761 4762 qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); 4763 } 4764 4765 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 4766 } 4767 4768 static int qed_set_vf_link_state(struct qed_dev *cdev, 4769 int vf_id, int link_state) 4770 { 4771 int i; 4772 4773 /* Sanitize request */ 4774 if (IS_VF(cdev)) 4775 return -EINVAL; 4776 4777 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { 4778 DP_VERBOSE(cdev, QED_MSG_IOV, 4779 "VF index [%d] isn't active\n", vf_id); 4780 return -EINVAL; 4781 } 4782 4783 /* Handle configuration of link state */ 4784 for_each_hwfn(cdev, i) { 4785 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4786 struct qed_public_vf_info *vf; 4787 4788 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); 4789 if (!vf) 4790 continue; 4791 4792 if (vf->link_state == link_state) 4793 continue; 4794 4795 vf->link_state = link_state; 4796 qed_inform_vf_link_state(&cdev->hwfns[i]); 4797 } 4798 4799 return 0; 4800 } 4801 4802 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) 4803 { 4804 int i, rc = -EINVAL; 4805 4806 for_each_hwfn(cdev, i) { 4807 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4808 4809 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); 4810 if (rc) 4811 break; 4812 } 4813 4814 return rc; 4815 } 4816 4817 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) 4818 { 4819 int i; 4820 4821 for_each_hwfn(cdev, i) { 4822 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4823 struct qed_public_vf_info *vf; 4824 4825 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4826 DP_NOTICE(p_hwfn, 4827 "SR-IOV sanity check failed, can't set tx rate\n"); 4828 return -EINVAL; 4829 } 4830 4831 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); 4832 4833 vf->tx_rate = rate; 4834 4835 qed_inform_vf_link_state(p_hwfn); 4836 } 4837 4838 return 0; 4839 } 4840 4841 static int qed_set_vf_rate(struct qed_dev *cdev, 4842 int vfid, u32 min_rate, u32 max_rate) 4843 { 4844 int rc_min = 0, rc_max = 0; 4845 4846 if (max_rate) 4847 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); 4848 4849 if (min_rate) 4850 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); 4851 4852 if (rc_max | rc_min) 4853 return -EINVAL; 4854 4855 return 0; 4856 } 4857 4858 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) 4859 { 4860 int i; 4861 4862 for_each_hwfn(cdev, i) { 4863 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4864 struct qed_public_vf_info *vf; 4865 4866 if (!qed_iov_pf_sanity_check(hwfn, vfid)) { 4867 DP_NOTICE(hwfn, 4868 "SR-IOV sanity check failed, can't set trust\n"); 4869 return -EINVAL; 4870 } 4871 4872 vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 4873 4874 if (vf->is_trusted_request == trust) 4875 return 0; 4876 vf->is_trusted_request = trust; 4877 4878 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); 4879 } 4880 4881 return 0; 4882 } 4883 4884 static void qed_handle_vf_msg(struct qed_hwfn *hwfn) 4885 { 4886 u64 events[QED_VF_ARRAY_LENGTH]; 4887 struct qed_ptt *ptt; 4888 int i; 4889 4890 ptt = qed_ptt_acquire(hwfn); 4891 if (!ptt) { 4892 DP_VERBOSE(hwfn, QED_MSG_IOV, 4893 "Can't acquire PTT; re-scheduling\n"); 4894 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); 4895 return; 4896 } 4897 4898 qed_iov_pf_get_pending_events(hwfn, events); 4899 4900 DP_VERBOSE(hwfn, QED_MSG_IOV, 4901 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", 4902 events[0], events[1], events[2]); 4903 4904 qed_for_each_vf(hwfn, i) { 4905 /* Skip VFs with no pending messages */ 4906 if (!(events[i / 64] & (1ULL << (i % 64)))) 4907 continue; 4908 4909 DP_VERBOSE(hwfn, QED_MSG_IOV, 4910 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 4911 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4912 4913 /* Copy VF's message to PF's request buffer for that VF */ 4914 if (qed_iov_copy_vf_msg(hwfn, ptt, i)) 4915 continue; 4916 4917 qed_iov_process_mbx_req(hwfn, ptt, i); 4918 } 4919 4920 qed_ptt_release(hwfn, ptt); 4921 } 4922 4923 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, 4924 u8 *mac, 4925 struct qed_public_vf_info *info) 4926 { 4927 if (info->is_trusted_configured) { 4928 if (is_valid_ether_addr(info->mac) && 4929 (!mac || !ether_addr_equal(mac, info->mac))) 4930 return true; 4931 } else { 4932 if (is_valid_ether_addr(info->forced_mac) && 4933 (!mac || !ether_addr_equal(mac, info->forced_mac))) 4934 return true; 4935 } 4936 4937 return false; 4938 } 4939 4940 static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, 4941 struct qed_public_vf_info *info, 4942 int vfid) 4943 { 4944 if (info->is_trusted_configured) 4945 qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); 4946 else 4947 qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); 4948 } 4949 4950 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) 4951 { 4952 int i; 4953 4954 qed_for_each_vf(hwfn, i) { 4955 struct qed_public_vf_info *info; 4956 bool update = false; 4957 u8 *mac; 4958 4959 info = qed_iov_get_public_vf_info(hwfn, i, true); 4960 if (!info) 4961 continue; 4962 4963 /* Update data on bulletin board */ 4964 if (info->is_trusted_configured) 4965 mac = qed_iov_bulletin_get_mac(hwfn, i); 4966 else 4967 mac = qed_iov_bulletin_get_forced_mac(hwfn, i); 4968 4969 if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { 4970 DP_VERBOSE(hwfn, 4971 QED_MSG_IOV, 4972 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", 4973 i, 4974 hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4975 4976 /* Update bulletin board with MAC */ 4977 qed_set_bulletin_mac(hwfn, info, i); 4978 update = true; 4979 } 4980 4981 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ 4982 info->forced_vlan) { 4983 DP_VERBOSE(hwfn, 4984 QED_MSG_IOV, 4985 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", 4986 info->forced_vlan, 4987 i, 4988 hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4989 qed_iov_bulletin_set_forced_vlan(hwfn, 4990 info->forced_vlan, i); 4991 update = true; 4992 } 4993 4994 if (update) 4995 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 4996 } 4997 } 4998 4999 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) 5000 { 5001 struct qed_ptt *ptt; 5002 int i; 5003 5004 ptt = qed_ptt_acquire(hwfn); 5005 if (!ptt) { 5006 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); 5007 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 5008 return; 5009 } 5010 5011 qed_for_each_vf(hwfn, i) 5012 qed_iov_post_vf_bulletin(hwfn, i, ptt); 5013 5014 qed_ptt_release(hwfn, ptt); 5015 } 5016 5017 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) 5018 { 5019 struct qed_public_vf_info *vf_info; 5020 struct qed_vf_info *vf; 5021 u8 *force_mac; 5022 int i; 5023 5024 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 5025 vf = qed_iov_get_vf_info(hwfn, vf_id, true); 5026 5027 if (!vf_info || !vf) 5028 return; 5029 5030 /* Force MAC converted to generic MAC in case of VF trust on */ 5031 if (vf_info->is_trusted_configured && 5032 (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { 5033 force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); 5034 5035 if (force_mac) { 5036 /* Clear existing shadow copy of MAC to have a clean 5037 * slate. 5038 */ 5039 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 5040 if (ether_addr_equal(vf->shadow_config.macs[i], 5041 vf_info->mac)) { 5042 eth_zero_addr(vf->shadow_config.macs[i]); 5043 DP_VERBOSE(hwfn, QED_MSG_IOV, 5044 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", 5045 vf_info->mac, vf_id); 5046 break; 5047 } 5048 } 5049 5050 ether_addr_copy(vf_info->mac, force_mac); 5051 eth_zero_addr(vf_info->forced_mac); 5052 vf->bulletin.p_virt->valid_bitmap &= 5053 ~BIT(MAC_ADDR_FORCED); 5054 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 5055 } 5056 } 5057 5058 /* Update shadow copy with VF MAC when trust mode is turned off */ 5059 if (!vf_info->is_trusted_configured) { 5060 u8 empty_mac[ETH_ALEN]; 5061 5062 eth_zero_addr(empty_mac); 5063 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 5064 if (ether_addr_equal(vf->shadow_config.macs[i], 5065 empty_mac)) { 5066 ether_addr_copy(vf->shadow_config.macs[i], 5067 vf_info->mac); 5068 DP_VERBOSE(hwfn, QED_MSG_IOV, 5069 "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", 5070 vf_info->mac, vf_id); 5071 break; 5072 } 5073 } 5074 /* Clear bulletin when trust mode is turned off, 5075 * to have a clean slate for next (normal) operations. 5076 */ 5077 qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); 5078 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 5079 } 5080 } 5081 5082 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) 5083 { 5084 struct qed_sp_vport_update_params params; 5085 struct qed_filter_accept_flags *flags; 5086 struct qed_public_vf_info *vf_info; 5087 struct qed_vf_info *vf; 5088 u8 mask; 5089 int i; 5090 5091 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 5092 flags = ¶ms.accept_flags; 5093 5094 qed_for_each_vf(hwfn, i) { 5095 /* Need to make sure current requested configuration didn't 5096 * flip so that we'll end up configuring something that's not 5097 * needed. 5098 */ 5099 vf_info = qed_iov_get_public_vf_info(hwfn, i, true); 5100 if (vf_info->is_trusted_configured == 5101 vf_info->is_trusted_request) 5102 continue; 5103 vf_info->is_trusted_configured = vf_info->is_trusted_request; 5104 5105 /* Handle forced MAC mode */ 5106 qed_update_mac_for_vf_trust_change(hwfn, i); 5107 5108 /* Validate that the VF has a configured vport */ 5109 vf = qed_iov_get_vf_info(hwfn, i, true); 5110 if (!vf->vport_instance) 5111 continue; 5112 5113 memset(¶ms, 0, sizeof(params)); 5114 params.opaque_fid = vf->opaque_fid; 5115 params.vport_id = vf->vport_id; 5116 5117 params.update_ctl_frame_check = 1; 5118 params.mac_chk_en = !vf_info->is_trusted_configured; 5119 5120 if (vf_info->rx_accept_mode & mask) { 5121 flags->update_rx_mode_config = 1; 5122 flags->rx_accept_filter = vf_info->rx_accept_mode; 5123 } 5124 5125 if (vf_info->tx_accept_mode & mask) { 5126 flags->update_tx_mode_config = 1; 5127 flags->tx_accept_filter = vf_info->tx_accept_mode; 5128 } 5129 5130 /* Remove if needed; Otherwise this would set the mask */ 5131 if (!vf_info->is_trusted_configured) { 5132 flags->rx_accept_filter &= ~mask; 5133 flags->tx_accept_filter &= ~mask; 5134 } 5135 5136 if (flags->update_rx_mode_config || 5137 flags->update_tx_mode_config || 5138 params.update_ctl_frame_check) 5139 qed_sp_vport_update(hwfn, ¶ms, 5140 QED_SPQ_MODE_EBLOCK, NULL); 5141 } 5142 } 5143 5144 static void qed_iov_pf_task(struct work_struct *work) 5145 5146 { 5147 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 5148 iov_task.work); 5149 int rc; 5150 5151 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 5152 return; 5153 5154 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { 5155 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 5156 5157 if (!ptt) { 5158 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 5159 return; 5160 } 5161 5162 rc = qed_iov_vf_flr_cleanup(hwfn, ptt); 5163 if (rc) 5164 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 5165 5166 qed_ptt_release(hwfn, ptt); 5167 } 5168 5169 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) 5170 qed_handle_vf_msg(hwfn); 5171 5172 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 5173 &hwfn->iov_task_flags)) 5174 qed_handle_pf_set_vf_unicast(hwfn); 5175 5176 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 5177 &hwfn->iov_task_flags)) 5178 qed_handle_bulletin_post(hwfn); 5179 5180 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) 5181 qed_iov_handle_trust_change(hwfn); 5182 } 5183 5184 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 5185 { 5186 int i; 5187 5188 for_each_hwfn(cdev, i) { 5189 if (!cdev->hwfns[i].iov_wq) 5190 continue; 5191 5192 if (schedule_first) { 5193 qed_schedule_iov(&cdev->hwfns[i], 5194 QED_IOV_WQ_STOP_WQ_FLAG); 5195 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); 5196 } 5197 5198 flush_workqueue(cdev->hwfns[i].iov_wq); 5199 destroy_workqueue(cdev->hwfns[i].iov_wq); 5200 } 5201 } 5202 5203 int qed_iov_wq_start(struct qed_dev *cdev) 5204 { 5205 char name[NAME_SIZE]; 5206 int i; 5207 5208 for_each_hwfn(cdev, i) { 5209 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 5210 5211 /* PFs needs a dedicated workqueue only if they support IOV. 5212 * VFs always require one. 5213 */ 5214 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) 5215 continue; 5216 5217 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", 5218 cdev->pdev->bus->number, 5219 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); 5220 5221 p_hwfn->iov_wq = create_singlethread_workqueue(name); 5222 if (!p_hwfn->iov_wq) { 5223 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); 5224 return -ENOMEM; 5225 } 5226 5227 if (IS_PF(cdev)) 5228 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); 5229 else 5230 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); 5231 } 5232 5233 return 0; 5234 } 5235 5236 const struct qed_iov_hv_ops qed_iov_ops_pass = { 5237 .configure = &qed_sriov_configure, 5238 .set_mac = &qed_sriov_pf_set_mac, 5239 .set_vlan = &qed_sriov_pf_set_vlan, 5240 .get_config = &qed_get_vf_config, 5241 .set_link_state = &qed_set_vf_link_state, 5242 .set_spoof = &qed_spoof_configure, 5243 .set_rate = &qed_set_vf_rate, 5244 .set_trust = &qed_set_vf_trust, 5245 }; 5246