1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/crc32.h> 8 #include <linux/vmalloc.h> 9 #include <linux/crash_dump.h> 10 #include <linux/qed/qed_iov_if.h> 11 #include "qed_cxt.h" 12 #include "qed_hsi.h" 13 #include "qed_hw.h" 14 #include "qed_init_ops.h" 15 #include "qed_int.h" 16 #include "qed_mcp.h" 17 #include "qed_reg_addr.h" 18 #include "qed_sp.h" 19 #include "qed_sriov.h" 20 #include "qed_vf.h" 21 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 22 u8 opcode, 23 __le16 echo, 24 union event_ring_data *data, u8 fw_return_code); 25 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); 26 27 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) 28 { 29 u8 legacy = 0; 30 31 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 32 ETH_HSI_VER_NO_PKT_LEN_TUNN) 33 legacy |= QED_QCID_LEGACY_VF_RX_PROD; 34 35 if (!(p_vf->acquire.vfdev_info.capabilities & 36 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 37 legacy |= QED_QCID_LEGACY_VF_CID; 38 39 return legacy; 40 } 41 42 /* IOV ramrods */ 43 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) 44 { 45 struct vf_start_ramrod_data *p_ramrod = NULL; 46 struct qed_spq_entry *p_ent = NULL; 47 struct qed_sp_init_data init_data; 48 int rc = -EINVAL; 49 u8 fp_minor; 50 51 /* Get SPQ entry */ 52 memset(&init_data, 0, sizeof(init_data)); 53 init_data.cid = qed_spq_get_cid(p_hwfn); 54 init_data.opaque_fid = p_vf->opaque_fid; 55 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 56 57 rc = qed_sp_init_request(p_hwfn, &p_ent, 58 COMMON_RAMROD_VF_START, 59 PROTOCOLID_COMMON, &init_data); 60 if (rc) 61 return rc; 62 63 p_ramrod = &p_ent->ramrod.vf_start; 64 65 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 66 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); 67 68 switch (p_hwfn->hw_info.personality) { 69 case QED_PCI_ETH: 70 p_ramrod->personality = PERSONALITY_ETH; 71 break; 72 case QED_PCI_ETH_ROCE: 73 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 74 break; 75 default: 76 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 77 p_hwfn->hw_info.personality); 78 qed_sp_destroy_request(p_hwfn, p_ent); 79 return -EINVAL; 80 } 81 82 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 83 if (fp_minor > ETH_HSI_VER_MINOR && 84 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { 85 DP_VERBOSE(p_hwfn, 86 QED_MSG_IOV, 87 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 88 p_vf->abs_vf_id, 89 ETH_HSI_VER_MAJOR, 90 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 91 fp_minor = ETH_HSI_VER_MINOR; 92 } 93 94 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 95 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 96 97 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 98 "VF[%d] - Starting using HSI %02x.%02x\n", 99 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 100 101 return qed_spq_post(p_hwfn, p_ent, NULL); 102 } 103 104 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, 105 u32 concrete_vfid, u16 opaque_vfid) 106 { 107 struct vf_stop_ramrod_data *p_ramrod = NULL; 108 struct qed_spq_entry *p_ent = NULL; 109 struct qed_sp_init_data init_data; 110 int rc = -EINVAL; 111 112 /* Get SPQ entry */ 113 memset(&init_data, 0, sizeof(init_data)); 114 init_data.cid = qed_spq_get_cid(p_hwfn); 115 init_data.opaque_fid = opaque_vfid; 116 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 117 118 rc = qed_sp_init_request(p_hwfn, &p_ent, 119 COMMON_RAMROD_VF_STOP, 120 PROTOCOLID_COMMON, &init_data); 121 if (rc) 122 return rc; 123 124 p_ramrod = &p_ent->ramrod.vf_stop; 125 126 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 127 128 return qed_spq_post(p_hwfn, p_ent, NULL); 129 } 130 131 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 132 int rel_vf_id, 133 bool b_enabled_only, bool b_non_malicious) 134 { 135 if (!p_hwfn->pf_iov_info) { 136 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 137 return false; 138 } 139 140 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || 141 (rel_vf_id < 0)) 142 return false; 143 144 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 145 b_enabled_only) 146 return false; 147 148 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && 149 b_non_malicious) 150 return false; 151 152 return true; 153 } 154 155 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, 156 u16 relative_vf_id, 157 bool b_enabled_only) 158 { 159 struct qed_vf_info *vf = NULL; 160 161 if (!p_hwfn->pf_iov_info) { 162 DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 163 return NULL; 164 } 165 166 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, 167 b_enabled_only, false)) 168 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 169 else 170 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 171 relative_vf_id); 172 173 return vf; 174 } 175 176 static struct qed_queue_cid * 177 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) 178 { 179 int i; 180 181 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 182 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) 183 return p_queue->cids[i].p_cid; 184 } 185 186 return NULL; 187 } 188 189 enum qed_iov_validate_q_mode { 190 QED_IOV_VALIDATE_Q_NA, 191 QED_IOV_VALIDATE_Q_ENABLE, 192 QED_IOV_VALIDATE_Q_DISABLE, 193 }; 194 195 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, 196 struct qed_vf_info *p_vf, 197 u16 qid, 198 enum qed_iov_validate_q_mode mode, 199 bool b_is_tx) 200 { 201 int i; 202 203 if (mode == QED_IOV_VALIDATE_Q_NA) 204 return true; 205 206 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 207 struct qed_vf_queue_cid *p_qcid; 208 209 p_qcid = &p_vf->vf_queues[qid].cids[i]; 210 211 if (!p_qcid->p_cid) 212 continue; 213 214 if (p_qcid->b_is_tx != b_is_tx) 215 continue; 216 217 return mode == QED_IOV_VALIDATE_Q_ENABLE; 218 } 219 220 /* In case we haven't found any valid cid, then its disabled */ 221 return mode == QED_IOV_VALIDATE_Q_DISABLE; 222 } 223 224 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, 225 struct qed_vf_info *p_vf, 226 u16 rx_qid, 227 enum qed_iov_validate_q_mode mode) 228 { 229 if (rx_qid >= p_vf->num_rxqs) { 230 DP_VERBOSE(p_hwfn, 231 QED_MSG_IOV, 232 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", 233 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); 234 return false; 235 } 236 237 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); 238 } 239 240 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, 241 struct qed_vf_info *p_vf, 242 u16 tx_qid, 243 enum qed_iov_validate_q_mode mode) 244 { 245 if (tx_qid >= p_vf->num_txqs) { 246 DP_VERBOSE(p_hwfn, 247 QED_MSG_IOV, 248 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", 249 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); 250 return false; 251 } 252 253 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); 254 } 255 256 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, 257 struct qed_vf_info *p_vf, u16 sb_idx) 258 { 259 int i; 260 261 for (i = 0; i < p_vf->num_sbs; i++) 262 if (p_vf->igu_sbs[i] == sb_idx) 263 return true; 264 265 DP_VERBOSE(p_hwfn, 266 QED_MSG_IOV, 267 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", 268 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); 269 270 return false; 271 } 272 273 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, 274 struct qed_vf_info *p_vf) 275 { 276 u8 i; 277 278 for (i = 0; i < p_vf->num_rxqs; i++) 279 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 280 QED_IOV_VALIDATE_Q_ENABLE, 281 false)) 282 return true; 283 284 return false; 285 } 286 287 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, 288 struct qed_vf_info *p_vf) 289 { 290 u8 i; 291 292 for (i = 0; i < p_vf->num_txqs; i++) 293 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 294 QED_IOV_VALIDATE_Q_ENABLE, 295 true)) 296 return true; 297 298 return false; 299 } 300 301 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 302 int vfid, struct qed_ptt *p_ptt) 303 { 304 struct qed_bulletin_content *p_bulletin; 305 int crc_size = sizeof(p_bulletin->crc); 306 struct qed_dmae_params params; 307 struct qed_vf_info *p_vf; 308 309 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 310 if (!p_vf) 311 return -EINVAL; 312 313 if (!p_vf->vf_bulletin) 314 return -EINVAL; 315 316 p_bulletin = p_vf->bulletin.p_virt; 317 318 /* Increment bulletin board version and compute crc */ 319 p_bulletin->version++; 320 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, 321 p_vf->bulletin.size - crc_size); 322 323 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 324 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 325 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); 326 327 /* propagate bulletin board via dmae to vm memory */ 328 memset(¶ms, 0, sizeof(params)); 329 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); 330 params.dst_vfid = p_vf->abs_vf_id; 331 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 332 p_vf->vf_bulletin, p_vf->bulletin.size / 4, 333 ¶ms); 334 } 335 336 static int qed_iov_pci_cfg_info(struct qed_dev *cdev) 337 { 338 struct qed_hw_sriov_info *iov = cdev->p_iov_info; 339 int pos = iov->pos; 340 341 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); 342 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 343 344 pci_read_config_word(cdev->pdev, 345 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); 346 pci_read_config_word(cdev->pdev, 347 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); 348 349 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); 350 if (iov->num_vfs) { 351 DP_VERBOSE(cdev, 352 QED_MSG_IOV, 353 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 354 iov->num_vfs = 0; 355 } 356 357 pci_read_config_word(cdev->pdev, 358 pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 359 360 pci_read_config_word(cdev->pdev, 361 pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 362 363 pci_read_config_word(cdev->pdev, 364 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); 365 366 pci_read_config_dword(cdev->pdev, 367 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 368 369 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); 370 371 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 372 373 DP_VERBOSE(cdev, 374 QED_MSG_IOV, 375 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 376 iov->nres, 377 iov->cap, 378 iov->ctrl, 379 iov->total_vfs, 380 iov->initial_vfs, 381 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 382 383 /* Some sanity checks */ 384 if (iov->num_vfs > NUM_OF_VFS(cdev) || 385 iov->total_vfs > NUM_OF_VFS(cdev)) { 386 /* This can happen only due to a bug. In this case we set 387 * num_vfs to zero to avoid memory corruption in the code that 388 * assumes max number of vfs 389 */ 390 DP_NOTICE(cdev, 391 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 392 iov->num_vfs); 393 394 iov->num_vfs = 0; 395 iov->total_vfs = 0; 396 } 397 398 return 0; 399 } 400 401 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) 402 { 403 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 404 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 405 struct qed_bulletin_content *p_bulletin_virt; 406 dma_addr_t req_p, rply_p, bulletin_p; 407 union pfvf_tlvs *p_reply_virt_addr; 408 union vfpf_tlvs *p_req_virt_addr; 409 u8 idx = 0; 410 411 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 412 413 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 414 req_p = p_iov_info->mbx_msg_phys_addr; 415 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 416 rply_p = p_iov_info->mbx_reply_phys_addr; 417 p_bulletin_virt = p_iov_info->p_bulletins; 418 bulletin_p = p_iov_info->bulletins_phys; 419 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 420 DP_ERR(p_hwfn, 421 "qed_iov_setup_vfdb called without allocating mem first\n"); 422 return; 423 } 424 425 for (idx = 0; idx < p_iov->total_vfs; idx++) { 426 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; 427 u32 concrete; 428 429 vf->vf_mbx.req_virt = p_req_virt_addr + idx; 430 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 431 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 432 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 433 434 vf->state = VF_STOPPED; 435 vf->b_init = false; 436 437 vf->bulletin.phys = idx * 438 sizeof(struct qed_bulletin_content) + 439 bulletin_p; 440 vf->bulletin.p_virt = p_bulletin_virt + idx; 441 vf->bulletin.size = sizeof(struct qed_bulletin_content); 442 443 vf->relative_vf_id = idx; 444 vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 445 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 446 vf->concrete_fid = concrete; 447 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 448 (vf->abs_vf_id << 8); 449 vf->vport_id = idx + 1; 450 451 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 452 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 453 } 454 } 455 456 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) 457 { 458 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 459 void **p_v_addr; 460 u16 num_vfs = 0; 461 462 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 463 464 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 465 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 466 467 /* Allocate PF Mailbox buffer (per-VF) */ 468 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 469 p_v_addr = &p_iov_info->mbx_msg_virt_addr; 470 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 471 p_iov_info->mbx_msg_size, 472 &p_iov_info->mbx_msg_phys_addr, 473 GFP_KERNEL); 474 if (!*p_v_addr) 475 return -ENOMEM; 476 477 /* Allocate PF Mailbox Reply buffer (per-VF) */ 478 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 479 p_v_addr = &p_iov_info->mbx_reply_virt_addr; 480 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 481 p_iov_info->mbx_reply_size, 482 &p_iov_info->mbx_reply_phys_addr, 483 GFP_KERNEL); 484 if (!*p_v_addr) 485 return -ENOMEM; 486 487 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * 488 num_vfs; 489 p_v_addr = &p_iov_info->p_bulletins; 490 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 491 p_iov_info->bulletins_size, 492 &p_iov_info->bulletins_phys, 493 GFP_KERNEL); 494 if (!*p_v_addr) 495 return -ENOMEM; 496 497 DP_VERBOSE(p_hwfn, 498 QED_MSG_IOV, 499 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 500 p_iov_info->mbx_msg_virt_addr, 501 (u64) p_iov_info->mbx_msg_phys_addr, 502 p_iov_info->mbx_reply_virt_addr, 503 (u64) p_iov_info->mbx_reply_phys_addr, 504 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 505 506 return 0; 507 } 508 509 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) 510 { 511 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 512 513 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 514 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 515 p_iov_info->mbx_msg_size, 516 p_iov_info->mbx_msg_virt_addr, 517 p_iov_info->mbx_msg_phys_addr); 518 519 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 520 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 521 p_iov_info->mbx_reply_size, 522 p_iov_info->mbx_reply_virt_addr, 523 p_iov_info->mbx_reply_phys_addr); 524 525 if (p_iov_info->p_bulletins) 526 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 527 p_iov_info->bulletins_size, 528 p_iov_info->p_bulletins, 529 p_iov_info->bulletins_phys); 530 } 531 532 int qed_iov_alloc(struct qed_hwfn *p_hwfn) 533 { 534 struct qed_pf_iov *p_sriov; 535 536 if (!IS_PF_SRIOV(p_hwfn)) { 537 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 538 "No SR-IOV - no need for IOV db\n"); 539 return 0; 540 } 541 542 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); 543 if (!p_sriov) 544 return -ENOMEM; 545 546 p_hwfn->pf_iov_info = p_sriov; 547 548 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, 549 qed_sriov_eqe_event); 550 551 return qed_iov_allocate_vfdb(p_hwfn); 552 } 553 554 void qed_iov_setup(struct qed_hwfn *p_hwfn) 555 { 556 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 557 return; 558 559 qed_iov_setup_vfdb(p_hwfn); 560 } 561 562 void qed_iov_free(struct qed_hwfn *p_hwfn) 563 { 564 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); 565 566 if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 567 qed_iov_free_vfdb(p_hwfn); 568 kfree(p_hwfn->pf_iov_info); 569 } 570 } 571 572 void qed_iov_free_hw_info(struct qed_dev *cdev) 573 { 574 kfree(cdev->p_iov_info); 575 cdev->p_iov_info = NULL; 576 } 577 578 int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 579 { 580 struct qed_dev *cdev = p_hwfn->cdev; 581 int pos; 582 int rc; 583 584 if (is_kdump_kernel()) 585 return 0; 586 587 if (IS_VF(p_hwfn->cdev)) 588 return 0; 589 590 /* Learn the PCI configuration */ 591 pos = pci_find_ext_capability(p_hwfn->cdev->pdev, 592 PCI_EXT_CAP_ID_SRIOV); 593 if (!pos) { 594 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); 595 return 0; 596 } 597 598 /* Allocate a new struct for IOV information */ 599 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); 600 if (!cdev->p_iov_info) 601 return -ENOMEM; 602 603 cdev->p_iov_info->pos = pos; 604 605 rc = qed_iov_pci_cfg_info(cdev); 606 if (rc) 607 return rc; 608 609 /* We want PF IOV to be synonemous with the existance of p_iov_info; 610 * In case the capability is published but there are no VFs, simply 611 * de-allocate the struct. 612 */ 613 if (!cdev->p_iov_info->total_vfs) { 614 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 615 "IOV capabilities, but no VFs are published\n"); 616 kfree(cdev->p_iov_info); 617 cdev->p_iov_info = NULL; 618 return 0; 619 } 620 621 /* First VF index based on offset is tricky: 622 * - If ARI is supported [likely], offset - (16 - pf_id) would 623 * provide the number for eng0. 2nd engine Vfs would begin 624 * after the first engine's VFs. 625 * - If !ARI, VFs would start on next device. 626 * so offset - (256 - pf_id) would provide the number. 627 * Utilize the fact that (256 - pf_id) is achieved only by later 628 * to differentiate between the two. 629 */ 630 631 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { 632 u32 first = p_hwfn->cdev->p_iov_info->offset + 633 p_hwfn->abs_pf_id - 16; 634 635 cdev->p_iov_info->first_vf_in_pf = first; 636 637 if (QED_PATH_ID(p_hwfn)) 638 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 639 } else { 640 u32 first = p_hwfn->cdev->p_iov_info->offset + 641 p_hwfn->abs_pf_id - 256; 642 643 cdev->p_iov_info->first_vf_in_pf = first; 644 } 645 646 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 647 "First VF in hwfn 0x%08x\n", 648 cdev->p_iov_info->first_vf_in_pf); 649 650 return 0; 651 } 652 653 static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, 654 int vfid, bool b_fail_malicious) 655 { 656 /* Check PF supports sriov */ 657 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 658 !IS_PF_SRIOV_ALLOC(p_hwfn)) 659 return false; 660 661 /* Check VF validity */ 662 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) 663 return false; 664 665 return true; 666 } 667 668 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 669 { 670 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); 671 } 672 673 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, 674 u16 rel_vf_id, u8 to_disable) 675 { 676 struct qed_vf_info *vf; 677 int i; 678 679 for_each_hwfn(cdev, i) { 680 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 681 682 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 683 if (!vf) 684 continue; 685 686 vf->to_disable = to_disable; 687 } 688 } 689 690 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) 691 { 692 u16 i; 693 694 if (!IS_QED_SRIOV(cdev)) 695 return; 696 697 for (i = 0; i < cdev->p_iov_info->total_vfs; i++) 698 qed_iov_set_vf_to_disable(cdev, i, to_disable); 699 } 700 701 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 702 struct qed_ptt *p_ptt, u8 abs_vfid) 703 { 704 qed_wr(p_hwfn, p_ptt, 705 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 706 1 << (abs_vfid & 0x1f)); 707 } 708 709 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 710 struct qed_ptt *p_ptt, struct qed_vf_info *vf) 711 { 712 int i; 713 714 /* Set VF masks and configuration - pretend */ 715 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 716 717 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 718 719 /* unpretend */ 720 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 721 722 /* iterate over all queues, clear sb consumer */ 723 for (i = 0; i < vf->num_sbs; i++) 724 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 725 vf->igu_sbs[i], 726 vf->opaque_fid, true); 727 } 728 729 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, 730 struct qed_ptt *p_ptt, 731 struct qed_vf_info *vf, bool enable) 732 { 733 u32 igu_vf_conf; 734 735 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 736 737 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 738 739 if (enable) 740 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 741 else 742 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 743 744 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 745 746 /* unpretend */ 747 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 748 } 749 750 static int 751 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, 752 struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) 753 { 754 u8 current_max = 0; 755 int i; 756 757 /* For AH onward, configuration is per-PF. Find maximum of all 758 * the currently enabled child VFs, and set the number to be that. 759 */ 760 if (!QED_IS_BB(p_hwfn->cdev)) { 761 qed_for_each_vf(p_hwfn, i) { 762 struct qed_vf_info *p_vf; 763 764 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); 765 if (!p_vf) 766 continue; 767 768 current_max = max_t(u8, current_max, p_vf->num_sbs); 769 } 770 } 771 772 if (num_sbs > current_max) 773 return qed_mcp_config_vf_msix(p_hwfn, p_ptt, 774 abs_vf_id, num_sbs); 775 776 return 0; 777 } 778 779 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, 780 struct qed_ptt *p_ptt, 781 struct qed_vf_info *vf) 782 { 783 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 784 int rc; 785 786 /* It's possible VF was previously considered malicious - 787 * clear the indication even if we're only going to disable VF. 788 */ 789 vf->b_malicious = false; 790 791 if (vf->to_disable) 792 return 0; 793 794 DP_VERBOSE(p_hwfn, 795 QED_MSG_IOV, 796 "Enable internal access for vf %x [abs %x]\n", 797 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 798 799 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 800 801 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 802 803 rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, 804 vf->abs_vf_id, vf->num_sbs); 805 if (rc) 806 return rc; 807 808 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 809 810 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 811 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 812 813 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 814 p_hwfn->hw_info.hw_mode); 815 816 /* unpretend */ 817 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 818 819 vf->state = VF_FREE; 820 821 return rc; 822 } 823 824 /** 825 * @brief qed_iov_config_perm_table - configure the permission 826 * zone table. 827 * In E4, queue zone permission table size is 320x9. There 828 * are 320 VF queues for single engine device (256 for dual 829 * engine device), and each entry has the following format: 830 * {Valid, VF[7:0]} 831 * @param p_hwfn 832 * @param p_ptt 833 * @param vf 834 * @param enable 835 */ 836 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, 837 struct qed_ptt *p_ptt, 838 struct qed_vf_info *vf, u8 enable) 839 { 840 u32 reg_addr, val; 841 u16 qzone_id = 0; 842 int qid; 843 844 for (qid = 0; qid < vf->num_rxqs; qid++) { 845 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 846 &qzone_id); 847 848 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 849 val = enable ? (vf->abs_vf_id | BIT(8)) : 0; 850 qed_wr(p_hwfn, p_ptt, reg_addr, val); 851 } 852 } 853 854 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, 855 struct qed_ptt *p_ptt, 856 struct qed_vf_info *vf) 857 { 858 /* Reset vf in IGU - interrupts are still disabled */ 859 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 860 861 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 862 863 /* Permission Table */ 864 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 865 } 866 867 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, 868 struct qed_ptt *p_ptt, 869 struct qed_vf_info *vf, u16 num_rx_queues) 870 { 871 struct qed_igu_block *p_block; 872 struct cau_sb_entry sb_entry; 873 int qid = 0; 874 u32 val = 0; 875 876 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) 877 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; 878 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; 879 880 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 881 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 882 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 883 884 for (qid = 0; qid < num_rx_queues; qid++) { 885 p_block = qed_get_igu_free_sb(p_hwfn, false); 886 vf->igu_sbs[qid] = p_block->igu_sb_id; 887 p_block->status &= ~QED_IGU_STATUS_FREE; 888 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 889 890 qed_wr(p_hwfn, p_ptt, 891 IGU_REG_MAPPING_MEMORY + 892 sizeof(u32) * p_block->igu_sb_id, val); 893 894 /* Configure igu sb in CAU which were marked valid */ 895 qed_init_cau_sb_entry(p_hwfn, &sb_entry, 896 p_hwfn->rel_pf_id, vf->abs_vf_id, 1); 897 898 qed_dmae_host2grc(p_hwfn, p_ptt, 899 (u64)(uintptr_t)&sb_entry, 900 CAU_REG_SB_VAR_MEMORY + 901 p_block->igu_sb_id * sizeof(u64), 2, NULL); 902 } 903 904 vf->num_sbs = (u8) num_rx_queues; 905 906 return vf->num_sbs; 907 } 908 909 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, 910 struct qed_ptt *p_ptt, 911 struct qed_vf_info *vf) 912 { 913 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 914 int idx, igu_id; 915 u32 addr, val; 916 917 /* Invalidate igu CAM lines and mark them as free */ 918 for (idx = 0; idx < vf->num_sbs; idx++) { 919 igu_id = vf->igu_sbs[idx]; 920 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; 921 922 val = qed_rd(p_hwfn, p_ptt, addr); 923 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 924 qed_wr(p_hwfn, p_ptt, addr, val); 925 926 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; 927 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; 928 } 929 930 vf->num_sbs = 0; 931 } 932 933 static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 934 u16 vfid, 935 struct qed_mcp_link_params *params, 936 struct qed_mcp_link_state *link, 937 struct qed_mcp_link_capabilities *p_caps) 938 { 939 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 940 vfid, 941 false); 942 struct qed_bulletin_content *p_bulletin; 943 944 if (!p_vf) 945 return; 946 947 p_bulletin = p_vf->bulletin.p_virt; 948 p_bulletin->req_autoneg = params->speed.autoneg; 949 p_bulletin->req_adv_speed = params->speed.advertised_speeds; 950 p_bulletin->req_forced_speed = params->speed.forced_speed; 951 p_bulletin->req_autoneg_pause = params->pause.autoneg; 952 p_bulletin->req_forced_rx = params->pause.forced_rx; 953 p_bulletin->req_forced_tx = params->pause.forced_tx; 954 p_bulletin->req_loopback = params->loopback_mode; 955 956 p_bulletin->link_up = link->link_up; 957 p_bulletin->speed = link->speed; 958 p_bulletin->full_duplex = link->full_duplex; 959 p_bulletin->autoneg = link->an; 960 p_bulletin->autoneg_complete = link->an_complete; 961 p_bulletin->parallel_detection = link->parallel_detection; 962 p_bulletin->pfc_enabled = link->pfc_enabled; 963 p_bulletin->partner_adv_speed = link->partner_adv_speed; 964 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 965 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 966 p_bulletin->partner_adv_pause = link->partner_adv_pause; 967 p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 968 969 p_bulletin->capability_speed = p_caps->speed_capabilities; 970 } 971 972 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, 973 struct qed_ptt *p_ptt, 974 struct qed_iov_vf_init_params *p_params) 975 { 976 struct qed_mcp_link_capabilities link_caps; 977 struct qed_mcp_link_params link_params; 978 struct qed_mcp_link_state link_state; 979 u8 num_of_vf_avaiable_chains = 0; 980 struct qed_vf_info *vf = NULL; 981 u16 qid, num_irqs; 982 int rc = 0; 983 u32 cids; 984 u8 i; 985 986 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); 987 if (!vf) { 988 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 989 return -EINVAL; 990 } 991 992 if (vf->b_init) { 993 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", 994 p_params->rel_vf_id); 995 return -EINVAL; 996 } 997 998 /* Perform sanity checking on the requested queue_id */ 999 for (i = 0; i < p_params->num_queues; i++) { 1000 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); 1001 u16 max_vf_qzone = min_vf_qzone + 1002 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; 1003 1004 qid = p_params->req_rx_queue[i]; 1005 if (qid < min_vf_qzone || qid > max_vf_qzone) { 1006 DP_NOTICE(p_hwfn, 1007 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", 1008 qid, 1009 p_params->rel_vf_id, 1010 min_vf_qzone, max_vf_qzone); 1011 return -EINVAL; 1012 } 1013 1014 qid = p_params->req_tx_queue[i]; 1015 if (qid > max_vf_qzone) { 1016 DP_NOTICE(p_hwfn, 1017 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", 1018 qid, p_params->rel_vf_id, max_vf_qzone); 1019 return -EINVAL; 1020 } 1021 1022 /* If client *really* wants, Tx qid can be shared with PF */ 1023 if (qid < min_vf_qzone) 1024 DP_VERBOSE(p_hwfn, 1025 QED_MSG_IOV, 1026 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", 1027 p_params->rel_vf_id, qid, i); 1028 } 1029 1030 /* Limit number of queues according to number of CIDs */ 1031 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 1032 DP_VERBOSE(p_hwfn, 1033 QED_MSG_IOV, 1034 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 1035 vf->relative_vf_id, p_params->num_queues, (u16)cids); 1036 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); 1037 1038 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, 1039 p_ptt, 1040 vf, num_irqs); 1041 if (!num_of_vf_avaiable_chains) { 1042 DP_ERR(p_hwfn, "no available igu sbs\n"); 1043 return -ENOMEM; 1044 } 1045 1046 /* Choose queue number and index ranges */ 1047 vf->num_rxqs = num_of_vf_avaiable_chains; 1048 vf->num_txqs = num_of_vf_avaiable_chains; 1049 1050 for (i = 0; i < vf->num_rxqs; i++) { 1051 struct qed_vf_queue *p_queue = &vf->vf_queues[i]; 1052 1053 p_queue->fw_rx_qid = p_params->req_rx_queue[i]; 1054 p_queue->fw_tx_qid = p_params->req_tx_queue[i]; 1055 1056 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1057 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", 1058 vf->relative_vf_id, i, vf->igu_sbs[i], 1059 p_queue->fw_rx_qid, p_queue->fw_tx_qid); 1060 } 1061 1062 /* Update the link configuration in bulletin */ 1063 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), 1064 sizeof(link_params)); 1065 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); 1066 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), 1067 sizeof(link_caps)); 1068 qed_iov_set_link(p_hwfn, p_params->rel_vf_id, 1069 &link_params, &link_state, &link_caps); 1070 1071 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); 1072 if (!rc) { 1073 vf->b_init = true; 1074 1075 if (IS_LEAD_HWFN(p_hwfn)) 1076 p_hwfn->cdev->p_iov_info->num_vfs++; 1077 } 1078 1079 return rc; 1080 } 1081 1082 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 1083 struct qed_ptt *p_ptt, u16 rel_vf_id) 1084 { 1085 struct qed_mcp_link_capabilities caps; 1086 struct qed_mcp_link_params params; 1087 struct qed_mcp_link_state link; 1088 struct qed_vf_info *vf = NULL; 1089 1090 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 1091 if (!vf) { 1092 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 1093 return -EINVAL; 1094 } 1095 1096 if (vf->bulletin.p_virt) 1097 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 1098 1099 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 1100 1101 /* Get the link configuration back in bulletin so 1102 * that when VFs are re-enabled they get the actual 1103 * link configuration. 1104 */ 1105 memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 1106 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 1107 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 1108 qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 1109 1110 /* Forget the VF's acquisition message */ 1111 memset(&vf->acquire, 0, sizeof(vf->acquire)); 1112 1113 /* disablng interrupts and resetting permission table was done during 1114 * vf-close, however, we could get here without going through vf_close 1115 */ 1116 /* Disable Interrupts for VF */ 1117 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 1118 1119 /* Reset Permission table */ 1120 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 1121 1122 vf->num_rxqs = 0; 1123 vf->num_txqs = 0; 1124 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 1125 1126 if (vf->b_init) { 1127 vf->b_init = false; 1128 1129 if (IS_LEAD_HWFN(p_hwfn)) 1130 p_hwfn->cdev->p_iov_info->num_vfs--; 1131 } 1132 1133 return 0; 1134 } 1135 1136 static bool qed_iov_tlv_supported(u16 tlvtype) 1137 { 1138 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 1139 } 1140 1141 /* place a given tlv on the tlv buffer, continuing current tlv list */ 1142 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) 1143 { 1144 struct channel_tlv *tl = (struct channel_tlv *)*offset; 1145 1146 tl->type = type; 1147 tl->length = length; 1148 1149 /* Offset should keep pointing to next TLV (the end of the last) */ 1150 *offset += length; 1151 1152 /* Return a pointer to the start of the added tlv */ 1153 return *offset - length; 1154 } 1155 1156 /* list the types and lengths of the tlvs on the buffer */ 1157 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) 1158 { 1159 u16 i = 1, total_length = 0; 1160 struct channel_tlv *tlv; 1161 1162 do { 1163 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 1164 1165 /* output tlv */ 1166 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1167 "TLV number %d: type %d, length %d\n", 1168 i, tlv->type, tlv->length); 1169 1170 if (tlv->type == CHANNEL_TLV_LIST_END) 1171 return; 1172 1173 /* Validate entry - protect against malicious VFs */ 1174 if (!tlv->length) { 1175 DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); 1176 return; 1177 } 1178 1179 total_length += tlv->length; 1180 1181 if (total_length >= sizeof(struct tlv_buffer_size)) { 1182 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); 1183 return; 1184 } 1185 1186 i++; 1187 } while (1); 1188 } 1189 1190 static void qed_iov_send_response(struct qed_hwfn *p_hwfn, 1191 struct qed_ptt *p_ptt, 1192 struct qed_vf_info *p_vf, 1193 u16 length, u8 status) 1194 { 1195 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 1196 struct qed_dmae_params params; 1197 u8 eng_vf_id; 1198 1199 mbx->reply_virt->default_resp.hdr.status = status; 1200 1201 qed_dp_tlv_list(p_hwfn, mbx->reply_virt); 1202 1203 eng_vf_id = p_vf->abs_vf_id; 1204 1205 memset(¶ms, 0, sizeof(params)); 1206 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); 1207 params.dst_vfid = eng_vf_id; 1208 1209 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 1210 mbx->req_virt->first_tlv.reply_address + 1211 sizeof(u64), 1212 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, 1213 ¶ms); 1214 1215 /* Once PF copies the rc to the VF, the latter can continue 1216 * and send an additional message. So we have to make sure the 1217 * channel would be re-set to ready prior to that. 1218 */ 1219 REG_WR(p_hwfn, 1220 GTT_BAR0_MAP_REG_USDM_RAM + 1221 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); 1222 1223 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 1224 mbx->req_virt->first_tlv.reply_address, 1225 sizeof(u64) / 4, ¶ms); 1226 } 1227 1228 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, 1229 enum qed_iov_vport_update_flag flag) 1230 { 1231 switch (flag) { 1232 case QED_IOV_VP_UPDATE_ACTIVATE: 1233 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1234 case QED_IOV_VP_UPDATE_VLAN_STRIP: 1235 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 1236 case QED_IOV_VP_UPDATE_TX_SWITCH: 1237 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1238 case QED_IOV_VP_UPDATE_MCAST: 1239 return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1240 case QED_IOV_VP_UPDATE_ACCEPT_PARAM: 1241 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1242 case QED_IOV_VP_UPDATE_RSS: 1243 return CHANNEL_TLV_VPORT_UPDATE_RSS; 1244 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 1245 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1246 case QED_IOV_VP_UPDATE_SGE_TPA: 1247 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1248 default: 1249 return 0; 1250 } 1251 } 1252 1253 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, 1254 struct qed_vf_info *p_vf, 1255 struct qed_iov_vf_mbx *p_mbx, 1256 u8 status, 1257 u16 tlvs_mask, u16 tlvs_accepted) 1258 { 1259 struct pfvf_def_resp_tlv *resp; 1260 u16 size, total_len, i; 1261 1262 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1263 p_mbx->offset = (u8 *)p_mbx->reply_virt; 1264 size = sizeof(struct pfvf_def_resp_tlv); 1265 total_len = size; 1266 1267 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1268 1269 /* Prepare response for all extended tlvs if they are found by PF */ 1270 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { 1271 if (!(tlvs_mask & BIT(i))) 1272 continue; 1273 1274 resp = qed_add_tlv(p_hwfn, &p_mbx->offset, 1275 qed_iov_vport_to_tlv(p_hwfn, i), size); 1276 1277 if (tlvs_accepted & BIT(i)) 1278 resp->hdr.status = status; 1279 else 1280 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1281 1282 DP_VERBOSE(p_hwfn, 1283 QED_MSG_IOV, 1284 "VF[%d] - vport_update response: TLV %d, status %02x\n", 1285 p_vf->relative_vf_id, 1286 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); 1287 1288 total_len += size; 1289 } 1290 1291 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, 1292 sizeof(struct channel_list_end_tlv)); 1293 1294 return total_len; 1295 } 1296 1297 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, 1298 struct qed_ptt *p_ptt, 1299 struct qed_vf_info *vf_info, 1300 u16 type, u16 length, u8 status) 1301 { 1302 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; 1303 1304 mbx->offset = (u8 *)mbx->reply_virt; 1305 1306 qed_add_tlv(p_hwfn, &mbx->offset, type, length); 1307 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 1308 sizeof(struct channel_list_end_tlv)); 1309 1310 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 1311 } 1312 1313 static struct 1314 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, 1315 u16 relative_vf_id, 1316 bool b_enabled_only) 1317 { 1318 struct qed_vf_info *vf = NULL; 1319 1320 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 1321 if (!vf) 1322 return NULL; 1323 1324 return &vf->p_vf_info; 1325 } 1326 1327 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) 1328 { 1329 struct qed_public_vf_info *vf_info; 1330 1331 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); 1332 1333 if (!vf_info) 1334 return; 1335 1336 /* Clear the VF mac */ 1337 eth_zero_addr(vf_info->mac); 1338 1339 vf_info->rx_accept_mode = 0; 1340 vf_info->tx_accept_mode = 0; 1341 } 1342 1343 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, 1344 struct qed_vf_info *p_vf) 1345 { 1346 u32 i, j; 1347 1348 p_vf->vf_bulletin = 0; 1349 p_vf->vport_instance = 0; 1350 p_vf->configured_features = 0; 1351 1352 /* If VF previously requested less resources, go back to default */ 1353 p_vf->num_rxqs = p_vf->num_sbs; 1354 p_vf->num_txqs = p_vf->num_sbs; 1355 1356 p_vf->num_active_rxqs = 0; 1357 1358 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1359 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 1360 1361 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { 1362 if (!p_queue->cids[j].p_cid) 1363 continue; 1364 1365 qed_eth_queue_cid_release(p_hwfn, 1366 p_queue->cids[j].p_cid); 1367 p_queue->cids[j].p_cid = NULL; 1368 } 1369 } 1370 1371 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 1372 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 1373 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 1374 } 1375 1376 /* Returns either 0, or log(size) */ 1377 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, 1378 struct qed_ptt *p_ptt) 1379 { 1380 u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); 1381 1382 if (val) 1383 return val + 11; 1384 return 0; 1385 } 1386 1387 static void 1388 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, 1389 struct qed_ptt *p_ptt, 1390 struct qed_vf_info *p_vf, 1391 struct vf_pf_resc_request *p_req, 1392 struct pf_vf_resc *p_resp) 1393 { 1394 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; 1395 u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - 1396 qed_db_addr_vf(0, DQ_DEMS_LEGACY); 1397 u32 bar_size; 1398 1399 p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); 1400 1401 /* If VF didn't bother asking for QIDs than don't bother limiting 1402 * number of CIDs. The VF doesn't care about the number, and this 1403 * has the likely result of causing an additional acquisition. 1404 */ 1405 if (!(p_vf->acquire.vfdev_info.capabilities & 1406 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 1407 return; 1408 1409 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount 1410 * that would make sure doorbells for all CIDs fall within the bar. 1411 * If it doesn't, make sure regview window is sufficient. 1412 */ 1413 if (p_vf->acquire.vfdev_info.capabilities & 1414 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { 1415 bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 1416 if (bar_size) 1417 bar_size = 1 << bar_size; 1418 1419 if (p_hwfn->cdev->num_hwfns > 1) 1420 bar_size /= 2; 1421 } else { 1422 bar_size = PXP_VF_BAR0_DQ_LENGTH; 1423 } 1424 1425 if (bar_size / db_size < 256) 1426 p_resp->num_cids = min_t(u8, p_resp->num_cids, 1427 (u8)(bar_size / db_size)); 1428 } 1429 1430 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, 1431 struct qed_ptt *p_ptt, 1432 struct qed_vf_info *p_vf, 1433 struct vf_pf_resc_request *p_req, 1434 struct pf_vf_resc *p_resp) 1435 { 1436 u8 i; 1437 1438 /* Queue related information */ 1439 p_resp->num_rxqs = p_vf->num_rxqs; 1440 p_resp->num_txqs = p_vf->num_txqs; 1441 p_resp->num_sbs = p_vf->num_sbs; 1442 1443 for (i = 0; i < p_resp->num_sbs; i++) { 1444 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; 1445 p_resp->hw_sbs[i].sb_qid = 0; 1446 } 1447 1448 /* These fields are filled for backward compatibility. 1449 * Unused by modern vfs. 1450 */ 1451 for (i = 0; i < p_resp->num_rxqs; i++) { 1452 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, 1453 (u16 *)&p_resp->hw_qid[i]); 1454 p_resp->cid[i] = i; 1455 } 1456 1457 /* Filter related information */ 1458 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, 1459 p_req->num_mac_filters); 1460 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, 1461 p_req->num_vlan_filters); 1462 1463 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); 1464 1465 /* This isn't really needed/enforced, but some legacy VFs might depend 1466 * on the correct filling of this field. 1467 */ 1468 p_resp->num_mc_filters = QED_MAX_MC_ADDRS; 1469 1470 /* Validate sufficient resources for VF */ 1471 if (p_resp->num_rxqs < p_req->num_rxqs || 1472 p_resp->num_txqs < p_req->num_txqs || 1473 p_resp->num_sbs < p_req->num_sbs || 1474 p_resp->num_mac_filters < p_req->num_mac_filters || 1475 p_resp->num_vlan_filters < p_req->num_vlan_filters || 1476 p_resp->num_mc_filters < p_req->num_mc_filters || 1477 p_resp->num_cids < p_req->num_cids) { 1478 DP_VERBOSE(p_hwfn, 1479 QED_MSG_IOV, 1480 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", 1481 p_vf->abs_vf_id, 1482 p_req->num_rxqs, 1483 p_resp->num_rxqs, 1484 p_req->num_rxqs, 1485 p_resp->num_txqs, 1486 p_req->num_sbs, 1487 p_resp->num_sbs, 1488 p_req->num_mac_filters, 1489 p_resp->num_mac_filters, 1490 p_req->num_vlan_filters, 1491 p_resp->num_vlan_filters, 1492 p_req->num_mc_filters, 1493 p_resp->num_mc_filters, 1494 p_req->num_cids, p_resp->num_cids); 1495 1496 /* Some legacy OSes are incapable of correctly handling this 1497 * failure. 1498 */ 1499 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1500 ETH_HSI_VER_NO_PKT_LEN_TUNN) && 1501 (p_vf->acquire.vfdev_info.os_type == 1502 VFPF_ACQUIRE_OS_WINDOWS)) 1503 return PFVF_STATUS_SUCCESS; 1504 1505 return PFVF_STATUS_NO_RESOURCE; 1506 } 1507 1508 return PFVF_STATUS_SUCCESS; 1509 } 1510 1511 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, 1512 struct pfvf_stats_info *p_stats) 1513 { 1514 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + 1515 offsetof(struct mstorm_vf_zone, 1516 non_trigger.eth_queue_stat); 1517 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 1518 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + 1519 offsetof(struct ustorm_vf_zone, 1520 non_trigger.eth_queue_stat); 1521 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 1522 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + 1523 offsetof(struct pstorm_vf_zone, 1524 non_trigger.eth_queue_stat); 1525 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 1526 p_stats->tstats.address = 0; 1527 p_stats->tstats.len = 0; 1528 } 1529 1530 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 1531 struct qed_ptt *p_ptt, 1532 struct qed_vf_info *vf) 1533 { 1534 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1535 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 1536 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 1537 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 1538 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 1539 struct pf_vf_resc *resc = &resp->resc; 1540 int rc; 1541 1542 memset(resp, 0, sizeof(*resp)); 1543 1544 /* Write the PF version so that VF would know which version 1545 * is supported - might be later overriden. This guarantees that 1546 * VF could recognize legacy PF based on lack of versions in reply. 1547 */ 1548 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 1549 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 1550 1551 if (vf->state != VF_FREE && vf->state != VF_STOPPED) { 1552 DP_VERBOSE(p_hwfn, 1553 QED_MSG_IOV, 1554 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", 1555 vf->abs_vf_id, vf->state); 1556 goto out; 1557 } 1558 1559 /* Validate FW compatibility */ 1560 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 1561 if (req->vfdev_info.capabilities & 1562 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 1563 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; 1564 1565 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1566 "VF[%d] is pre-fastpath HSI\n", 1567 vf->abs_vf_id); 1568 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 1569 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1570 } else { 1571 DP_INFO(p_hwfn, 1572 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", 1573 vf->abs_vf_id, 1574 req->vfdev_info.eth_fp_hsi_major, 1575 req->vfdev_info.eth_fp_hsi_minor, 1576 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 1577 1578 goto out; 1579 } 1580 } 1581 1582 /* On 100g PFs, prevent old VFs from loading */ 1583 if ((p_hwfn->cdev->num_hwfns > 1) && 1584 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 1585 DP_INFO(p_hwfn, 1586 "VF[%d] is running an old driver that doesn't support 100g\n", 1587 vf->abs_vf_id); 1588 goto out; 1589 } 1590 1591 /* Store the acquire message */ 1592 memcpy(&vf->acquire, req, sizeof(vf->acquire)); 1593 1594 vf->opaque_fid = req->vfdev_info.opaque_fid; 1595 1596 vf->vf_bulletin = req->bulletin_addr; 1597 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 1598 vf->bulletin.size : req->bulletin_size; 1599 1600 /* fill in pfdev info */ 1601 pfdev_info->chip_num = p_hwfn->cdev->chip_num; 1602 pfdev_info->db_size = 0; 1603 pfdev_info->indices_per_sb = PIS_PER_SB_E4; 1604 1605 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 1606 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 1607 if (p_hwfn->cdev->num_hwfns > 1) 1608 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 1609 1610 /* Share our ability to use multiple queue-ids only with VFs 1611 * that request it. 1612 */ 1613 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) 1614 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; 1615 1616 /* Share the sizes of the bars with VF */ 1617 resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 1618 1619 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); 1620 1621 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 1622 1623 pfdev_info->fw_major = FW_MAJOR_VERSION; 1624 pfdev_info->fw_minor = FW_MINOR_VERSION; 1625 pfdev_info->fw_rev = FW_REVISION_VERSION; 1626 pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1627 1628 /* Incorrect when legacy, but doesn't matter as legacy isn't reading 1629 * this field. 1630 */ 1631 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, 1632 req->vfdev_info.eth_fp_hsi_minor); 1633 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 1634 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 1635 1636 pfdev_info->dev_type = p_hwfn->cdev->type; 1637 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 1638 1639 /* Fill resources available to VF; Make sure there are enough to 1640 * satisfy the VF's request. 1641 */ 1642 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, 1643 &req->resc_request, resc); 1644 if (vfpf_status != PFVF_STATUS_SUCCESS) 1645 goto out; 1646 1647 /* Start the VF in FW */ 1648 rc = qed_sp_vf_start(p_hwfn, vf); 1649 if (rc) { 1650 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); 1651 vfpf_status = PFVF_STATUS_FAILURE; 1652 goto out; 1653 } 1654 1655 /* Fill agreed size of bulletin board in response */ 1656 resp->bulletin_size = vf->bulletin.size; 1657 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 1658 1659 DP_VERBOSE(p_hwfn, 1660 QED_MSG_IOV, 1661 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 1662 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 1663 vf->abs_vf_id, 1664 resp->pfdev_info.chip_num, 1665 resp->pfdev_info.db_size, 1666 resp->pfdev_info.indices_per_sb, 1667 resp->pfdev_info.capabilities, 1668 resc->num_rxqs, 1669 resc->num_txqs, 1670 resc->num_sbs, 1671 resc->num_mac_filters, 1672 resc->num_vlan_filters); 1673 vf->state = VF_ACQUIRED; 1674 1675 /* Prepare Response */ 1676 out: 1677 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 1678 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); 1679 } 1680 1681 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, 1682 struct qed_vf_info *p_vf, bool val) 1683 { 1684 struct qed_sp_vport_update_params params; 1685 int rc; 1686 1687 if (val == p_vf->spoof_chk) { 1688 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1689 "Spoofchk value[%d] is already configured\n", val); 1690 return 0; 1691 } 1692 1693 memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); 1694 params.opaque_fid = p_vf->opaque_fid; 1695 params.vport_id = p_vf->vport_id; 1696 params.update_anti_spoofing_en_flg = 1; 1697 params.anti_spoofing_en = val; 1698 1699 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 1700 if (!rc) { 1701 p_vf->spoof_chk = val; 1702 p_vf->req_spoofchk_val = p_vf->spoof_chk; 1703 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1704 "Spoofchk val[%d] configured\n", val); 1705 } else { 1706 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1707 "Spoofchk configuration[val:%d] failed for VF[%d]\n", 1708 val, p_vf->relative_vf_id); 1709 } 1710 1711 return rc; 1712 } 1713 1714 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, 1715 struct qed_vf_info *p_vf) 1716 { 1717 struct qed_filter_ucast filter; 1718 int rc = 0; 1719 int i; 1720 1721 memset(&filter, 0, sizeof(filter)); 1722 filter.is_rx_filter = 1; 1723 filter.is_tx_filter = 1; 1724 filter.vport_to_add_to = p_vf->vport_id; 1725 filter.opcode = QED_FILTER_ADD; 1726 1727 /* Reconfigure vlans */ 1728 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 1729 if (!p_vf->shadow_config.vlans[i].used) 1730 continue; 1731 1732 filter.type = QED_FILTER_VLAN; 1733 filter.vlan = p_vf->shadow_config.vlans[i].vid; 1734 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1735 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 1736 filter.vlan, p_vf->relative_vf_id); 1737 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1738 &filter, QED_SPQ_MODE_CB, NULL); 1739 if (rc) { 1740 DP_NOTICE(p_hwfn, 1741 "Failed to configure VLAN [%04x] to VF [%04x]\n", 1742 filter.vlan, p_vf->relative_vf_id); 1743 break; 1744 } 1745 } 1746 1747 return rc; 1748 } 1749 1750 static int 1751 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, 1752 struct qed_vf_info *p_vf, u64 events) 1753 { 1754 int rc = 0; 1755 1756 if ((events & BIT(VLAN_ADDR_FORCED)) && 1757 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 1758 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 1759 1760 return rc; 1761 } 1762 1763 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, 1764 struct qed_vf_info *p_vf, u64 events) 1765 { 1766 int rc = 0; 1767 struct qed_filter_ucast filter; 1768 1769 if (!p_vf->vport_instance) 1770 return -EINVAL; 1771 1772 if ((events & BIT(MAC_ADDR_FORCED)) || 1773 p_vf->p_vf_info.is_trusted_configured) { 1774 /* Since there's no way [currently] of removing the MAC, 1775 * we can always assume this means we need to force it. 1776 */ 1777 memset(&filter, 0, sizeof(filter)); 1778 filter.type = QED_FILTER_MAC; 1779 filter.opcode = QED_FILTER_REPLACE; 1780 filter.is_rx_filter = 1; 1781 filter.is_tx_filter = 1; 1782 filter.vport_to_add_to = p_vf->vport_id; 1783 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); 1784 1785 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1786 &filter, QED_SPQ_MODE_CB, NULL); 1787 if (rc) { 1788 DP_NOTICE(p_hwfn, 1789 "PF failed to configure MAC for VF\n"); 1790 return rc; 1791 } 1792 if (p_vf->p_vf_info.is_trusted_configured) 1793 p_vf->configured_features |= 1794 BIT(VFPF_BULLETIN_MAC_ADDR); 1795 else 1796 p_vf->configured_features |= 1797 BIT(MAC_ADDR_FORCED); 1798 } 1799 1800 if (events & BIT(VLAN_ADDR_FORCED)) { 1801 struct qed_sp_vport_update_params vport_update; 1802 u8 removal; 1803 int i; 1804 1805 memset(&filter, 0, sizeof(filter)); 1806 filter.type = QED_FILTER_VLAN; 1807 filter.is_rx_filter = 1; 1808 filter.is_tx_filter = 1; 1809 filter.vport_to_add_to = p_vf->vport_id; 1810 filter.vlan = p_vf->bulletin.p_virt->pvid; 1811 filter.opcode = filter.vlan ? QED_FILTER_REPLACE : 1812 QED_FILTER_FLUSH; 1813 1814 /* Send the ramrod */ 1815 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1816 &filter, QED_SPQ_MODE_CB, NULL); 1817 if (rc) { 1818 DP_NOTICE(p_hwfn, 1819 "PF failed to configure VLAN for VF\n"); 1820 return rc; 1821 } 1822 1823 /* Update the default-vlan & silent vlan stripping */ 1824 memset(&vport_update, 0, sizeof(vport_update)); 1825 vport_update.opaque_fid = p_vf->opaque_fid; 1826 vport_update.vport_id = p_vf->vport_id; 1827 vport_update.update_default_vlan_enable_flg = 1; 1828 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 1829 vport_update.update_default_vlan_flg = 1; 1830 vport_update.default_vlan = filter.vlan; 1831 1832 vport_update.update_inner_vlan_removal_flg = 1; 1833 removal = filter.vlan ? 1 1834 : p_vf->shadow_config.inner_vlan_removal; 1835 vport_update.inner_vlan_removal_flg = removal; 1836 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 1837 rc = qed_sp_vport_update(p_hwfn, 1838 &vport_update, 1839 QED_SPQ_MODE_EBLOCK, NULL); 1840 if (rc) { 1841 DP_NOTICE(p_hwfn, 1842 "PF failed to configure VF vport for vlan\n"); 1843 return rc; 1844 } 1845 1846 /* Update all the Rx queues */ 1847 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1848 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 1849 struct qed_queue_cid *p_cid = NULL; 1850 1851 /* There can be at most 1 Rx queue on qzone. Find it */ 1852 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 1853 if (!p_cid) 1854 continue; 1855 1856 rc = qed_sp_eth_rx_queues_update(p_hwfn, 1857 (void **)&p_cid, 1858 1, 0, 1, 1859 QED_SPQ_MODE_EBLOCK, 1860 NULL); 1861 if (rc) { 1862 DP_NOTICE(p_hwfn, 1863 "Failed to send Rx update fo queue[0x%04x]\n", 1864 p_cid->rel.queue_id); 1865 return rc; 1866 } 1867 } 1868 1869 if (filter.vlan) 1870 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 1871 else 1872 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); 1873 } 1874 1875 /* If forced features are terminated, we need to configure the shadow 1876 * configuration back again. 1877 */ 1878 if (events) 1879 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 1880 1881 return rc; 1882 } 1883 1884 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, 1885 struct qed_ptt *p_ptt, 1886 struct qed_vf_info *vf) 1887 { 1888 struct qed_sp_vport_start_params params = { 0 }; 1889 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1890 struct vfpf_vport_start_tlv *start; 1891 u8 status = PFVF_STATUS_SUCCESS; 1892 struct qed_vf_info *vf_info; 1893 u64 *p_bitmap; 1894 int sb_id; 1895 int rc; 1896 1897 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1898 if (!vf_info) { 1899 DP_NOTICE(p_hwfn->cdev, 1900 "Failed to get VF info, invalid vfid [%d]\n", 1901 vf->relative_vf_id); 1902 return; 1903 } 1904 1905 vf->state = VF_ENABLED; 1906 start = &mbx->req_virt->start_vport; 1907 1908 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 1909 1910 /* Initialize Status block in CAU */ 1911 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 1912 if (!start->sb_addr[sb_id]) { 1913 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1914 "VF[%d] did not fill the address of SB %d\n", 1915 vf->relative_vf_id, sb_id); 1916 break; 1917 } 1918 1919 qed_int_cau_conf_sb(p_hwfn, p_ptt, 1920 start->sb_addr[sb_id], 1921 vf->igu_sbs[sb_id], vf->abs_vf_id, 1); 1922 } 1923 1924 vf->mtu = start->mtu; 1925 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 1926 1927 /* Take into consideration configuration forced by hypervisor; 1928 * If none is configured, use the supplied VF values [for old 1929 * vfs that would still be fine, since they passed '0' as padding]. 1930 */ 1931 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 1932 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 1933 u8 vf_req = start->only_untagged; 1934 1935 vf_info->bulletin.p_virt->default_only_untagged = vf_req; 1936 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 1937 } 1938 1939 params.tpa_mode = start->tpa_mode; 1940 params.remove_inner_vlan = start->inner_vlan_removal; 1941 params.tx_switching = true; 1942 1943 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 1944 params.drop_ttl0 = false; 1945 params.concrete_fid = vf->concrete_fid; 1946 params.opaque_fid = vf->opaque_fid; 1947 params.vport_id = vf->vport_id; 1948 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1949 params.mtu = vf->mtu; 1950 1951 /* Non trusted VFs should enable control frame filtering */ 1952 params.check_mac = !vf->p_vf_info.is_trusted_configured; 1953 1954 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); 1955 if (rc) { 1956 DP_ERR(p_hwfn, 1957 "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1958 status = PFVF_STATUS_FAILURE; 1959 } else { 1960 vf->vport_instance++; 1961 1962 /* Force configuration if needed on the newly opened vport */ 1963 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 1964 1965 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 1966 } 1967 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 1968 sizeof(struct pfvf_def_resp_tlv), status); 1969 } 1970 1971 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, 1972 struct qed_ptt *p_ptt, 1973 struct qed_vf_info *vf) 1974 { 1975 u8 status = PFVF_STATUS_SUCCESS; 1976 int rc; 1977 1978 vf->vport_instance--; 1979 vf->spoof_chk = false; 1980 1981 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || 1982 (qed_iov_validate_active_txq(p_hwfn, vf))) { 1983 vf->b_malicious = true; 1984 DP_NOTICE(p_hwfn, 1985 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", 1986 vf->abs_vf_id); 1987 status = PFVF_STATUS_MALICIOUS; 1988 goto out; 1989 } 1990 1991 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 1992 if (rc) { 1993 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 1994 rc); 1995 status = PFVF_STATUS_FAILURE; 1996 } 1997 1998 /* Forget the configuration on the vport */ 1999 vf->configured_features = 0; 2000 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 2001 2002 out: 2003 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 2004 sizeof(struct pfvf_def_resp_tlv), status); 2005 } 2006 2007 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 2008 struct qed_ptt *p_ptt, 2009 struct qed_vf_info *vf, 2010 u8 status, bool b_legacy) 2011 { 2012 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2013 struct pfvf_start_queue_resp_tlv *p_tlv; 2014 struct vfpf_start_rxq_tlv *req; 2015 u16 length; 2016 2017 mbx->offset = (u8 *)mbx->reply_virt; 2018 2019 /* Taking a bigger struct instead of adding a TLV to list was a 2020 * mistake, but one which we're now stuck with, as some older 2021 * clients assume the size of the previous response. 2022 */ 2023 if (!b_legacy) 2024 length = sizeof(*p_tlv); 2025 else 2026 length = sizeof(struct pfvf_def_resp_tlv); 2027 2028 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, 2029 length); 2030 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2031 sizeof(struct channel_list_end_tlv)); 2032 2033 /* Update the TLV with the response */ 2034 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 2035 req = &mbx->req_virt->start_rxq; 2036 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 2037 offsetof(struct mstorm_vf_zone, 2038 non_trigger.eth_rx_queue_producers) + 2039 sizeof(struct eth_rx_prod_data) * req->rx_qid; 2040 } 2041 2042 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2043 } 2044 2045 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, 2046 struct qed_vf_info *p_vf, bool b_is_tx) 2047 { 2048 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; 2049 struct vfpf_qid_tlv *p_qid_tlv; 2050 2051 /* Search for the qid if the VF published its going to provide it */ 2052 if (!(p_vf->acquire.vfdev_info.capabilities & 2053 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { 2054 if (b_is_tx) 2055 return QED_IOV_LEGACY_QID_TX; 2056 else 2057 return QED_IOV_LEGACY_QID_RX; 2058 } 2059 2060 p_qid_tlv = (struct vfpf_qid_tlv *) 2061 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2062 CHANNEL_TLV_QID); 2063 if (!p_qid_tlv) { 2064 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2065 "VF[%2x]: Failed to provide qid\n", 2066 p_vf->relative_vf_id); 2067 2068 return QED_IOV_QID_INVALID; 2069 } 2070 2071 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { 2072 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2073 "VF[%02x]: Provided qid out-of-bounds %02x\n", 2074 p_vf->relative_vf_id, p_qid_tlv->qid); 2075 return QED_IOV_QID_INVALID; 2076 } 2077 2078 return p_qid_tlv->qid; 2079 } 2080 2081 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, 2082 struct qed_ptt *p_ptt, 2083 struct qed_vf_info *vf) 2084 { 2085 struct qed_queue_start_common_params params; 2086 struct qed_queue_cid_vf_params vf_params; 2087 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2088 u8 status = PFVF_STATUS_NO_RESOURCE; 2089 u8 qid_usage_idx, vf_legacy = 0; 2090 struct vfpf_start_rxq_tlv *req; 2091 struct qed_vf_queue *p_queue; 2092 struct qed_queue_cid *p_cid; 2093 struct qed_sb_info sb_dummy; 2094 int rc; 2095 2096 req = &mbx->req_virt->start_rxq; 2097 2098 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, 2099 QED_IOV_VALIDATE_Q_DISABLE) || 2100 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 2101 goto out; 2102 2103 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 2104 if (qid_usage_idx == QED_IOV_QID_INVALID) 2105 goto out; 2106 2107 p_queue = &vf->vf_queues[req->rx_qid]; 2108 if (p_queue->cids[qid_usage_idx].p_cid) 2109 goto out; 2110 2111 vf_legacy = qed_vf_calculate_legacy(vf); 2112 2113 /* Acquire a new queue-cid */ 2114 memset(¶ms, 0, sizeof(params)); 2115 params.queue_id = p_queue->fw_rx_qid; 2116 params.vport_id = vf->vport_id; 2117 params.stats_id = vf->abs_vf_id + 0x10; 2118 /* Since IGU index is passed via sb_info, construct a dummy one */ 2119 memset(&sb_dummy, 0, sizeof(sb_dummy)); 2120 sb_dummy.igu_sb_id = req->hw_sb; 2121 params.p_sb = &sb_dummy; 2122 params.sb_idx = req->sb_index; 2123 2124 memset(&vf_params, 0, sizeof(vf_params)); 2125 vf_params.vfid = vf->relative_vf_id; 2126 vf_params.vf_qid = (u8)req->rx_qid; 2127 vf_params.vf_legacy = vf_legacy; 2128 vf_params.qid_usage_idx = qid_usage_idx; 2129 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2130 ¶ms, true, &vf_params); 2131 if (!p_cid) 2132 goto out; 2133 2134 /* Legacy VFs have their Producers in a different location, which they 2135 * calculate on their own and clean the producer prior to this. 2136 */ 2137 if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) 2138 REG_WR(p_hwfn, 2139 GTT_BAR0_MAP_REG_MSDM_RAM + 2140 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 2141 0); 2142 2143 rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 2144 req->bd_max_bytes, 2145 req->rxq_addr, 2146 req->cqe_pbl_addr, req->cqe_pbl_size); 2147 if (rc) { 2148 status = PFVF_STATUS_FAILURE; 2149 qed_eth_queue_cid_release(p_hwfn, p_cid); 2150 } else { 2151 p_queue->cids[qid_usage_idx].p_cid = p_cid; 2152 p_queue->cids[qid_usage_idx].b_is_tx = false; 2153 status = PFVF_STATUS_SUCCESS; 2154 vf->num_active_rxqs++; 2155 } 2156 2157 out: 2158 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, 2159 !!(vf_legacy & 2160 QED_QCID_LEGACY_VF_RX_PROD)); 2161 } 2162 2163 static void 2164 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, 2165 struct qed_tunnel_info *p_tun, 2166 u16 tunn_feature_mask) 2167 { 2168 p_resp->tunn_feature_mask = tunn_feature_mask; 2169 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; 2170 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; 2171 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; 2172 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; 2173 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; 2174 p_resp->vxlan_clss = p_tun->vxlan.tun_cls; 2175 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; 2176 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; 2177 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; 2178 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; 2179 p_resp->geneve_udp_port = p_tun->geneve_port.port; 2180 p_resp->vxlan_udp_port = p_tun->vxlan_port.port; 2181 } 2182 2183 static void 2184 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2185 struct qed_tunn_update_type *p_tun, 2186 enum qed_tunn_mode mask, u8 tun_cls) 2187 { 2188 if (p_req->tun_mode_update_mask & BIT(mask)) { 2189 p_tun->b_update_mode = true; 2190 2191 if (p_req->tunn_mode & BIT(mask)) 2192 p_tun->b_mode_enabled = true; 2193 } 2194 2195 p_tun->tun_cls = tun_cls; 2196 } 2197 2198 static void 2199 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2200 struct qed_tunn_update_type *p_tun, 2201 struct qed_tunn_update_udp_port *p_port, 2202 enum qed_tunn_mode mask, 2203 u8 tun_cls, u8 update_port, u16 port) 2204 { 2205 if (update_port) { 2206 p_port->b_update_port = true; 2207 p_port->port = port; 2208 } 2209 2210 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); 2211 } 2212 2213 static bool 2214 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) 2215 { 2216 bool b_update_requested = false; 2217 2218 if (p_req->tun_mode_update_mask || p_req->update_tun_cls || 2219 p_req->update_geneve_port || p_req->update_vxlan_port) 2220 b_update_requested = true; 2221 2222 return b_update_requested; 2223 } 2224 2225 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) 2226 { 2227 if (tun->b_update_mode && !tun->b_mode_enabled) { 2228 tun->b_update_mode = false; 2229 *rc = -EINVAL; 2230 } 2231 } 2232 2233 static int 2234 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, 2235 u16 *tun_features, bool *update, 2236 struct qed_tunnel_info *tun_src) 2237 { 2238 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; 2239 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; 2240 u16 bultn_vxlan_port, bultn_geneve_port; 2241 void *cookie = p_hwfn->cdev->ops_cookie; 2242 int i, rc = 0; 2243 2244 *tun_features = p_hwfn->cdev->tunn_feature_mask; 2245 bultn_vxlan_port = tun->vxlan_port.port; 2246 bultn_geneve_port = tun->geneve_port.port; 2247 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); 2248 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); 2249 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); 2250 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); 2251 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); 2252 2253 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && 2254 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2255 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2256 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2257 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2258 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { 2259 tun_src->b_update_rx_cls = false; 2260 tun_src->b_update_tx_cls = false; 2261 rc = -EINVAL; 2262 } 2263 2264 if (tun_src->vxlan_port.b_update_port) { 2265 if (tun_src->vxlan_port.port == tun->vxlan_port.port) { 2266 tun_src->vxlan_port.b_update_port = false; 2267 } else { 2268 *update = true; 2269 bultn_vxlan_port = tun_src->vxlan_port.port; 2270 } 2271 } 2272 2273 if (tun_src->geneve_port.b_update_port) { 2274 if (tun_src->geneve_port.port == tun->geneve_port.port) { 2275 tun_src->geneve_port.b_update_port = false; 2276 } else { 2277 *update = true; 2278 bultn_geneve_port = tun_src->geneve_port.port; 2279 } 2280 } 2281 2282 qed_for_each_vf(p_hwfn, i) { 2283 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, 2284 bultn_geneve_port); 2285 } 2286 2287 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2288 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); 2289 2290 return rc; 2291 } 2292 2293 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, 2294 struct qed_ptt *p_ptt, 2295 struct qed_vf_info *p_vf) 2296 { 2297 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 2298 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2299 struct pfvf_update_tunn_param_tlv *p_resp; 2300 struct vfpf_update_tunn_param_tlv *p_req; 2301 u8 status = PFVF_STATUS_SUCCESS; 2302 bool b_update_required = false; 2303 struct qed_tunnel_info tunn; 2304 u16 tunn_feature_mask = 0; 2305 int i, rc = 0; 2306 2307 mbx->offset = (u8 *)mbx->reply_virt; 2308 2309 memset(&tunn, 0, sizeof(tunn)); 2310 p_req = &mbx->req_virt->tunn_param_update; 2311 2312 if (!qed_iov_pf_validate_tunn_param(p_req)) { 2313 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2314 "No tunnel update requested by VF\n"); 2315 status = PFVF_STATUS_FAILURE; 2316 goto send_resp; 2317 } 2318 2319 tunn.b_update_rx_cls = p_req->update_tun_cls; 2320 tunn.b_update_tx_cls = p_req->update_tun_cls; 2321 2322 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, 2323 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, 2324 p_req->update_vxlan_port, 2325 p_req->vxlan_port); 2326 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, 2327 QED_MODE_L2GENEVE_TUNN, 2328 p_req->l2geneve_clss, 2329 p_req->update_geneve_port, 2330 p_req->geneve_port); 2331 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, 2332 QED_MODE_IPGENEVE_TUNN, 2333 p_req->ipgeneve_clss); 2334 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, 2335 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); 2336 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, 2337 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); 2338 2339 /* If PF modifies VF's req then it should 2340 * still return an error in case of partial configuration 2341 * or modified configuration as opposed to requested one. 2342 */ 2343 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, 2344 &b_update_required, &tunn); 2345 2346 if (rc) 2347 status = PFVF_STATUS_FAILURE; 2348 2349 /* If QED client is willing to update anything ? */ 2350 if (b_update_required) { 2351 u16 geneve_port; 2352 2353 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, 2354 QED_SPQ_MODE_EBLOCK, NULL); 2355 if (rc) 2356 status = PFVF_STATUS_FAILURE; 2357 2358 geneve_port = p_tun->geneve_port.port; 2359 qed_for_each_vf(p_hwfn, i) { 2360 qed_iov_bulletin_set_udp_ports(p_hwfn, i, 2361 p_tun->vxlan_port.port, 2362 geneve_port); 2363 } 2364 } 2365 2366 send_resp: 2367 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, 2368 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); 2369 2370 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); 2371 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2372 sizeof(struct channel_list_end_tlv)); 2373 2374 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 2375 } 2376 2377 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, 2378 struct qed_ptt *p_ptt, 2379 struct qed_vf_info *p_vf, 2380 u32 cid, u8 status) 2381 { 2382 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2383 struct pfvf_start_queue_resp_tlv *p_tlv; 2384 bool b_legacy = false; 2385 u16 length; 2386 2387 mbx->offset = (u8 *)mbx->reply_virt; 2388 2389 /* Taking a bigger struct instead of adding a TLV to list was a 2390 * mistake, but one which we're now stuck with, as some older 2391 * clients assume the size of the previous response. 2392 */ 2393 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 2394 ETH_HSI_VER_NO_PKT_LEN_TUNN) 2395 b_legacy = true; 2396 2397 if (!b_legacy) 2398 length = sizeof(*p_tlv); 2399 else 2400 length = sizeof(struct pfvf_def_resp_tlv); 2401 2402 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, 2403 length); 2404 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2405 sizeof(struct channel_list_end_tlv)); 2406 2407 /* Update the TLV with the response */ 2408 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) 2409 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); 2410 2411 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 2412 } 2413 2414 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 2415 struct qed_ptt *p_ptt, 2416 struct qed_vf_info *vf) 2417 { 2418 struct qed_queue_start_common_params params; 2419 struct qed_queue_cid_vf_params vf_params; 2420 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2421 u8 status = PFVF_STATUS_NO_RESOURCE; 2422 struct vfpf_start_txq_tlv *req; 2423 struct qed_vf_queue *p_queue; 2424 struct qed_queue_cid *p_cid; 2425 struct qed_sb_info sb_dummy; 2426 u8 qid_usage_idx, vf_legacy; 2427 u32 cid = 0; 2428 int rc; 2429 u16 pq; 2430 2431 memset(¶ms, 0, sizeof(params)); 2432 req = &mbx->req_virt->start_txq; 2433 2434 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, 2435 QED_IOV_VALIDATE_Q_NA) || 2436 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 2437 goto out; 2438 2439 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 2440 if (qid_usage_idx == QED_IOV_QID_INVALID) 2441 goto out; 2442 2443 p_queue = &vf->vf_queues[req->tx_qid]; 2444 if (p_queue->cids[qid_usage_idx].p_cid) 2445 goto out; 2446 2447 vf_legacy = qed_vf_calculate_legacy(vf); 2448 2449 /* Acquire a new queue-cid */ 2450 params.queue_id = p_queue->fw_tx_qid; 2451 params.vport_id = vf->vport_id; 2452 params.stats_id = vf->abs_vf_id + 0x10; 2453 2454 /* Since IGU index is passed via sb_info, construct a dummy one */ 2455 memset(&sb_dummy, 0, sizeof(sb_dummy)); 2456 sb_dummy.igu_sb_id = req->hw_sb; 2457 params.p_sb = &sb_dummy; 2458 params.sb_idx = req->sb_index; 2459 2460 memset(&vf_params, 0, sizeof(vf_params)); 2461 vf_params.vfid = vf->relative_vf_id; 2462 vf_params.vf_qid = (u8)req->tx_qid; 2463 vf_params.vf_legacy = vf_legacy; 2464 vf_params.qid_usage_idx = qid_usage_idx; 2465 2466 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2467 ¶ms, false, &vf_params); 2468 if (!p_cid) 2469 goto out; 2470 2471 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); 2472 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 2473 req->pbl_addr, req->pbl_size, pq); 2474 if (rc) { 2475 status = PFVF_STATUS_FAILURE; 2476 qed_eth_queue_cid_release(p_hwfn, p_cid); 2477 } else { 2478 status = PFVF_STATUS_SUCCESS; 2479 p_queue->cids[qid_usage_idx].p_cid = p_cid; 2480 p_queue->cids[qid_usage_idx].b_is_tx = true; 2481 cid = p_cid->cid; 2482 } 2483 2484 out: 2485 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status); 2486 } 2487 2488 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 2489 struct qed_vf_info *vf, 2490 u16 rxq_id, 2491 u8 qid_usage_idx, bool cqe_completion) 2492 { 2493 struct qed_vf_queue *p_queue; 2494 int rc = 0; 2495 2496 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { 2497 DP_VERBOSE(p_hwfn, 2498 QED_MSG_IOV, 2499 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", 2500 vf->relative_vf_id, rxq_id, qid_usage_idx); 2501 return -EINVAL; 2502 } 2503 2504 p_queue = &vf->vf_queues[rxq_id]; 2505 2506 /* We've validated the index and the existence of the active RXQ - 2507 * now we need to make sure that it's using the correct qid. 2508 */ 2509 if (!p_queue->cids[qid_usage_idx].p_cid || 2510 p_queue->cids[qid_usage_idx].b_is_tx) { 2511 struct qed_queue_cid *p_cid; 2512 2513 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 2514 DP_VERBOSE(p_hwfn, 2515 QED_MSG_IOV, 2516 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", 2517 vf->relative_vf_id, 2518 rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); 2519 return -EINVAL; 2520 } 2521 2522 /* Now that we know we have a valid Rx-queue - close it */ 2523 rc = qed_eth_rx_queue_stop(p_hwfn, 2524 p_queue->cids[qid_usage_idx].p_cid, 2525 false, cqe_completion); 2526 if (rc) 2527 return rc; 2528 2529 p_queue->cids[qid_usage_idx].p_cid = NULL; 2530 vf->num_active_rxqs--; 2531 2532 return 0; 2533 } 2534 2535 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, 2536 struct qed_vf_info *vf, 2537 u16 txq_id, u8 qid_usage_idx) 2538 { 2539 struct qed_vf_queue *p_queue; 2540 int rc = 0; 2541 2542 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) 2543 return -EINVAL; 2544 2545 p_queue = &vf->vf_queues[txq_id]; 2546 if (!p_queue->cids[qid_usage_idx].p_cid || 2547 !p_queue->cids[qid_usage_idx].b_is_tx) 2548 return -EINVAL; 2549 2550 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); 2551 if (rc) 2552 return rc; 2553 2554 p_queue->cids[qid_usage_idx].p_cid = NULL; 2555 return 0; 2556 } 2557 2558 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, 2559 struct qed_ptt *p_ptt, 2560 struct qed_vf_info *vf) 2561 { 2562 u16 length = sizeof(struct pfvf_def_resp_tlv); 2563 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2564 u8 status = PFVF_STATUS_FAILURE; 2565 struct vfpf_stop_rxqs_tlv *req; 2566 u8 qid_usage_idx; 2567 int rc; 2568 2569 /* There has never been an official driver that used this interface 2570 * for stopping multiple queues, and it is now considered deprecated. 2571 * Validate this isn't used here. 2572 */ 2573 req = &mbx->req_virt->stop_rxqs; 2574 if (req->num_rxqs != 1) { 2575 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2576 "Odd; VF[%d] tried stopping multiple Rx queues\n", 2577 vf->relative_vf_id); 2578 status = PFVF_STATUS_NOT_SUPPORTED; 2579 goto out; 2580 } 2581 2582 /* Find which qid-index is associated with the queue */ 2583 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 2584 if (qid_usage_idx == QED_IOV_QID_INVALID) 2585 goto out; 2586 2587 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 2588 qid_usage_idx, req->cqe_completion); 2589 if (!rc) 2590 status = PFVF_STATUS_SUCCESS; 2591 out: 2592 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 2593 length, status); 2594 } 2595 2596 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, 2597 struct qed_ptt *p_ptt, 2598 struct qed_vf_info *vf) 2599 { 2600 u16 length = sizeof(struct pfvf_def_resp_tlv); 2601 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2602 u8 status = PFVF_STATUS_FAILURE; 2603 struct vfpf_stop_txqs_tlv *req; 2604 u8 qid_usage_idx; 2605 int rc; 2606 2607 /* There has never been an official driver that used this interface 2608 * for stopping multiple queues, and it is now considered deprecated. 2609 * Validate this isn't used here. 2610 */ 2611 req = &mbx->req_virt->stop_txqs; 2612 if (req->num_txqs != 1) { 2613 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2614 "Odd; VF[%d] tried stopping multiple Tx queues\n", 2615 vf->relative_vf_id); 2616 status = PFVF_STATUS_NOT_SUPPORTED; 2617 goto out; 2618 } 2619 2620 /* Find which qid-index is associated with the queue */ 2621 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 2622 if (qid_usage_idx == QED_IOV_QID_INVALID) 2623 goto out; 2624 2625 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); 2626 if (!rc) 2627 status = PFVF_STATUS_SUCCESS; 2628 2629 out: 2630 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 2631 length, status); 2632 } 2633 2634 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, 2635 struct qed_ptt *p_ptt, 2636 struct qed_vf_info *vf) 2637 { 2638 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; 2639 u16 length = sizeof(struct pfvf_def_resp_tlv); 2640 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2641 struct vfpf_update_rxq_tlv *req; 2642 u8 status = PFVF_STATUS_FAILURE; 2643 u8 complete_event_flg; 2644 u8 complete_cqe_flg; 2645 u8 qid_usage_idx; 2646 int rc; 2647 u8 i; 2648 2649 req = &mbx->req_virt->update_rxq; 2650 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 2651 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 2652 2653 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 2654 if (qid_usage_idx == QED_IOV_QID_INVALID) 2655 goto out; 2656 2657 /* There shouldn't exist a VF that uses queue-qids yet uses this 2658 * API with multiple Rx queues. Validate this. 2659 */ 2660 if ((vf->acquire.vfdev_info.capabilities & 2661 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { 2662 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2663 "VF[%d] supports QIDs but sends multiple queues\n", 2664 vf->relative_vf_id); 2665 goto out; 2666 } 2667 2668 /* Validate inputs - for the legacy case this is still true since 2669 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. 2670 */ 2671 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { 2672 if (!qed_iov_validate_rxq(p_hwfn, vf, i, 2673 QED_IOV_VALIDATE_Q_NA) || 2674 !vf->vf_queues[i].cids[qid_usage_idx].p_cid || 2675 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { 2676 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2677 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", 2678 vf->relative_vf_id, req->rx_qid, 2679 req->num_rxqs); 2680 goto out; 2681 } 2682 } 2683 2684 /* Prepare the handlers */ 2685 for (i = 0; i < req->num_rxqs; i++) { 2686 u16 qid = req->rx_qid + i; 2687 2688 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; 2689 } 2690 2691 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, 2692 req->num_rxqs, 2693 complete_cqe_flg, 2694 complete_event_flg, 2695 QED_SPQ_MODE_EBLOCK, NULL); 2696 if (rc) 2697 goto out; 2698 2699 status = PFVF_STATUS_SUCCESS; 2700 out: 2701 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 2702 length, status); 2703 } 2704 2705 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 2706 void *p_tlvs_list, u16 req_type) 2707 { 2708 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 2709 int len = 0; 2710 2711 do { 2712 if (!p_tlv->length) { 2713 DP_NOTICE(p_hwfn, "Zero length TLV found\n"); 2714 return NULL; 2715 } 2716 2717 if (p_tlv->type == req_type) { 2718 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2719 "Extended tlv type %d, length %d found\n", 2720 p_tlv->type, p_tlv->length); 2721 return p_tlv; 2722 } 2723 2724 len += p_tlv->length; 2725 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 2726 2727 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 2728 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); 2729 return NULL; 2730 } 2731 } while (p_tlv->type != CHANNEL_TLV_LIST_END); 2732 2733 return NULL; 2734 } 2735 2736 static void 2737 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, 2738 struct qed_sp_vport_update_params *p_data, 2739 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2740 { 2741 struct vfpf_vport_update_activate_tlv *p_act_tlv; 2742 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 2743 2744 p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 2745 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2746 if (!p_act_tlv) 2747 return; 2748 2749 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 2750 p_data->vport_active_rx_flg = p_act_tlv->active_rx; 2751 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 2752 p_data->vport_active_tx_flg = p_act_tlv->active_tx; 2753 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; 2754 } 2755 2756 static void 2757 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, 2758 struct qed_sp_vport_update_params *p_data, 2759 struct qed_vf_info *p_vf, 2760 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2761 { 2762 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 2763 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 2764 2765 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 2766 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2767 if (!p_vlan_tlv) 2768 return; 2769 2770 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 2771 2772 /* Ignore the VF request if we're forcing a vlan */ 2773 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { 2774 p_data->update_inner_vlan_removal_flg = 1; 2775 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 2776 } 2777 2778 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; 2779 } 2780 2781 static void 2782 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, 2783 struct qed_sp_vport_update_params *p_data, 2784 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2785 { 2786 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 2787 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 2788 2789 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 2790 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2791 tlv); 2792 if (!p_tx_switch_tlv) 2793 return; 2794 2795 p_data->update_tx_switching_flg = 1; 2796 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 2797 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; 2798 } 2799 2800 static void 2801 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, 2802 struct qed_sp_vport_update_params *p_data, 2803 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2804 { 2805 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 2806 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 2807 2808 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 2809 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2810 if (!p_mcast_tlv) 2811 return; 2812 2813 p_data->update_approx_mcast_flg = 1; 2814 memcpy(p_data->bins, p_mcast_tlv->bins, 2815 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2816 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2817 } 2818 2819 static void 2820 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, 2821 struct qed_sp_vport_update_params *p_data, 2822 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2823 { 2824 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; 2825 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 2826 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 2827 2828 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 2829 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2830 if (!p_accept_tlv) 2831 return; 2832 2833 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 2834 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 2835 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 2836 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 2837 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; 2838 } 2839 2840 static void 2841 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, 2842 struct qed_sp_vport_update_params *p_data, 2843 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2844 { 2845 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 2846 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 2847 2848 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 2849 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 2850 tlv); 2851 if (!p_accept_any_vlan) 2852 return; 2853 2854 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 2855 p_data->update_accept_any_vlan_flg = 2856 p_accept_any_vlan->update_accept_any_vlan_flg; 2857 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 2858 } 2859 2860 static void 2861 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, 2862 struct qed_vf_info *vf, 2863 struct qed_sp_vport_update_params *p_data, 2864 struct qed_rss_params *p_rss, 2865 struct qed_iov_vf_mbx *p_mbx, 2866 u16 *tlvs_mask, u16 *tlvs_accepted) 2867 { 2868 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 2869 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 2870 bool b_reject = false; 2871 u16 table_size; 2872 u16 i, q_idx; 2873 2874 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 2875 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2876 if (!p_rss_tlv) { 2877 p_data->rss_params = NULL; 2878 return; 2879 } 2880 2881 memset(p_rss, 0, sizeof(struct qed_rss_params)); 2882 2883 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 2884 VFPF_UPDATE_RSS_CONFIG_FLAG); 2885 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 2886 VFPF_UPDATE_RSS_CAPS_FLAG); 2887 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 2888 VFPF_UPDATE_RSS_IND_TABLE_FLAG); 2889 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 2890 VFPF_UPDATE_RSS_KEY_FLAG); 2891 2892 p_rss->rss_enable = p_rss_tlv->rss_enable; 2893 p_rss->rss_eng_id = vf->relative_vf_id + 1; 2894 p_rss->rss_caps = p_rss_tlv->rss_caps; 2895 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 2896 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); 2897 2898 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), 2899 (1 << p_rss_tlv->rss_table_size_log)); 2900 2901 for (i = 0; i < table_size; i++) { 2902 struct qed_queue_cid *p_cid; 2903 2904 q_idx = p_rss_tlv->rss_ind_table[i]; 2905 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, 2906 QED_IOV_VALIDATE_Q_ENABLE)) { 2907 DP_VERBOSE(p_hwfn, 2908 QED_MSG_IOV, 2909 "VF[%d]: Omitting RSS due to wrong queue %04x\n", 2910 vf->relative_vf_id, q_idx); 2911 b_reject = true; 2912 goto out; 2913 } 2914 2915 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); 2916 p_rss->rss_ind_table[i] = p_cid; 2917 } 2918 2919 p_data->rss_params = p_rss; 2920 out: 2921 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; 2922 if (!b_reject) 2923 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; 2924 } 2925 2926 static void 2927 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, 2928 struct qed_vf_info *vf, 2929 struct qed_sp_vport_update_params *p_data, 2930 struct qed_sge_tpa_params *p_sge_tpa, 2931 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2932 { 2933 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 2934 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 2935 2936 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 2937 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2938 2939 if (!p_sge_tpa_tlv) { 2940 p_data->sge_tpa_params = NULL; 2941 return; 2942 } 2943 2944 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); 2945 2946 p_sge_tpa->update_tpa_en_flg = 2947 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); 2948 p_sge_tpa->update_tpa_param_flg = 2949 !!(p_sge_tpa_tlv->update_sge_tpa_flags & 2950 VFPF_UPDATE_TPA_PARAM_FLAG); 2951 2952 p_sge_tpa->tpa_ipv4_en_flg = 2953 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); 2954 p_sge_tpa->tpa_ipv6_en_flg = 2955 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); 2956 p_sge_tpa->tpa_pkt_split_flg = 2957 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); 2958 p_sge_tpa->tpa_hdr_data_split_flg = 2959 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); 2960 p_sge_tpa->tpa_gro_consistent_flg = 2961 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); 2962 2963 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 2964 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 2965 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; 2966 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; 2967 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; 2968 2969 p_data->sge_tpa_params = p_sge_tpa; 2970 2971 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; 2972 } 2973 2974 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, 2975 u8 vfid, 2976 struct qed_sp_vport_update_params *params, 2977 u16 *tlvs) 2978 { 2979 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 2980 struct qed_filter_accept_flags *flags = ¶ms->accept_flags; 2981 struct qed_public_vf_info *vf_info; 2982 2983 /* Untrusted VFs can't even be trusted to know that fact. 2984 * Simply indicate everything is configured fine, and trace 2985 * configuration 'behind their back'. 2986 */ 2987 if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) 2988 return 0; 2989 2990 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 2991 2992 if (flags->update_rx_mode_config) { 2993 vf_info->rx_accept_mode = flags->rx_accept_filter; 2994 if (!vf_info->is_trusted_configured) 2995 flags->rx_accept_filter &= ~mask; 2996 } 2997 2998 if (flags->update_tx_mode_config) { 2999 vf_info->tx_accept_mode = flags->tx_accept_filter; 3000 if (!vf_info->is_trusted_configured) 3001 flags->tx_accept_filter &= ~mask; 3002 } 3003 3004 return 0; 3005 } 3006 3007 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, 3008 struct qed_ptt *p_ptt, 3009 struct qed_vf_info *vf) 3010 { 3011 struct qed_rss_params *p_rss_params = NULL; 3012 struct qed_sp_vport_update_params params; 3013 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3014 struct qed_sge_tpa_params sge_tpa_params; 3015 u16 tlvs_mask = 0, tlvs_accepted = 0; 3016 u8 status = PFVF_STATUS_SUCCESS; 3017 u16 length; 3018 int rc; 3019 3020 /* Valiate PF can send such a request */ 3021 if (!vf->vport_instance) { 3022 DP_VERBOSE(p_hwfn, 3023 QED_MSG_IOV, 3024 "No VPORT instance available for VF[%d], failing vport update\n", 3025 vf->abs_vf_id); 3026 status = PFVF_STATUS_FAILURE; 3027 goto out; 3028 } 3029 p_rss_params = vzalloc(sizeof(*p_rss_params)); 3030 if (p_rss_params == NULL) { 3031 status = PFVF_STATUS_FAILURE; 3032 goto out; 3033 } 3034 3035 memset(¶ms, 0, sizeof(params)); 3036 params.opaque_fid = vf->opaque_fid; 3037 params.vport_id = vf->vport_id; 3038 params.rss_params = NULL; 3039 3040 /* Search for extended tlvs list and update values 3041 * from VF in struct qed_sp_vport_update_params. 3042 */ 3043 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3044 qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 3045 qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 3046 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3047 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 3048 qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 3049 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, 3050 &sge_tpa_params, mbx, &tlvs_mask); 3051 3052 tlvs_accepted = tlvs_mask; 3053 3054 /* Some of the extended TLVs need to be validated first; In that case, 3055 * they can update the mask without updating the accepted [so that 3056 * PF could communicate to VF it has rejected request]. 3057 */ 3058 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, 3059 mbx, &tlvs_mask, &tlvs_accepted); 3060 3061 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, 3062 ¶ms, &tlvs_accepted)) { 3063 tlvs_accepted = 0; 3064 status = PFVF_STATUS_NOT_SUPPORTED; 3065 goto out; 3066 } 3067 3068 if (!tlvs_accepted) { 3069 if (tlvs_mask) 3070 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3071 "Upper-layer prevents VF vport configuration\n"); 3072 else 3073 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3074 "No feature tlvs found for vport update\n"); 3075 status = PFVF_STATUS_NOT_SUPPORTED; 3076 goto out; 3077 } 3078 3079 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 3080 3081 if (rc) 3082 status = PFVF_STATUS_FAILURE; 3083 3084 out: 3085 vfree(p_rss_params); 3086 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 3087 tlvs_mask, tlvs_accepted); 3088 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 3089 } 3090 3091 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, 3092 struct qed_vf_info *p_vf, 3093 struct qed_filter_ucast *p_params) 3094 { 3095 int i; 3096 3097 /* First remove entries and then add new ones */ 3098 if (p_params->opcode == QED_FILTER_REMOVE) { 3099 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 3100 if (p_vf->shadow_config.vlans[i].used && 3101 p_vf->shadow_config.vlans[i].vid == 3102 p_params->vlan) { 3103 p_vf->shadow_config.vlans[i].used = false; 3104 break; 3105 } 3106 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 3107 DP_VERBOSE(p_hwfn, 3108 QED_MSG_IOV, 3109 "VF [%d] - Tries to remove a non-existing vlan\n", 3110 p_vf->relative_vf_id); 3111 return -EINVAL; 3112 } 3113 } else if (p_params->opcode == QED_FILTER_REPLACE || 3114 p_params->opcode == QED_FILTER_FLUSH) { 3115 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 3116 p_vf->shadow_config.vlans[i].used = false; 3117 } 3118 3119 /* In forced mode, we're willing to remove entries - but we don't add 3120 * new ones. 3121 */ 3122 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) 3123 return 0; 3124 3125 if (p_params->opcode == QED_FILTER_ADD || 3126 p_params->opcode == QED_FILTER_REPLACE) { 3127 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 3128 if (p_vf->shadow_config.vlans[i].used) 3129 continue; 3130 3131 p_vf->shadow_config.vlans[i].used = true; 3132 p_vf->shadow_config.vlans[i].vid = p_params->vlan; 3133 break; 3134 } 3135 3136 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 3137 DP_VERBOSE(p_hwfn, 3138 QED_MSG_IOV, 3139 "VF [%d] - Tries to configure more than %d vlan filters\n", 3140 p_vf->relative_vf_id, 3141 QED_ETH_VF_NUM_VLAN_FILTERS + 1); 3142 return -EINVAL; 3143 } 3144 } 3145 3146 return 0; 3147 } 3148 3149 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, 3150 struct qed_vf_info *p_vf, 3151 struct qed_filter_ucast *p_params) 3152 { 3153 int i; 3154 3155 /* If we're in forced-mode, we don't allow any change */ 3156 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) 3157 return 0; 3158 3159 /* Don't keep track of shadow copy since we don't intend to restore. */ 3160 if (p_vf->p_vf_info.is_trusted_configured) 3161 return 0; 3162 3163 /* First remove entries and then add new ones */ 3164 if (p_params->opcode == QED_FILTER_REMOVE) { 3165 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 3166 if (ether_addr_equal(p_vf->shadow_config.macs[i], 3167 p_params->mac)) { 3168 eth_zero_addr(p_vf->shadow_config.macs[i]); 3169 break; 3170 } 3171 } 3172 3173 if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 3174 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3175 "MAC isn't configured\n"); 3176 return -EINVAL; 3177 } 3178 } else if (p_params->opcode == QED_FILTER_REPLACE || 3179 p_params->opcode == QED_FILTER_FLUSH) { 3180 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) 3181 eth_zero_addr(p_vf->shadow_config.macs[i]); 3182 } 3183 3184 /* List the new MAC address */ 3185 if (p_params->opcode != QED_FILTER_ADD && 3186 p_params->opcode != QED_FILTER_REPLACE) 3187 return 0; 3188 3189 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 3190 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { 3191 ether_addr_copy(p_vf->shadow_config.macs[i], 3192 p_params->mac); 3193 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3194 "Added MAC at %d entry in shadow\n", i); 3195 break; 3196 } 3197 } 3198 3199 if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 3200 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); 3201 return -EINVAL; 3202 } 3203 3204 return 0; 3205 } 3206 3207 static int 3208 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 3209 struct qed_vf_info *p_vf, 3210 struct qed_filter_ucast *p_params) 3211 { 3212 int rc = 0; 3213 3214 if (p_params->type == QED_FILTER_MAC) { 3215 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); 3216 if (rc) 3217 return rc; 3218 } 3219 3220 if (p_params->type == QED_FILTER_VLAN) 3221 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); 3222 3223 return rc; 3224 } 3225 3226 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 3227 int vfid, struct qed_filter_ucast *params) 3228 { 3229 struct qed_public_vf_info *vf; 3230 3231 vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 3232 if (!vf) 3233 return -EINVAL; 3234 3235 /* No real decision to make; Store the configured MAC */ 3236 if (params->type == QED_FILTER_MAC || 3237 params->type == QED_FILTER_MAC_VLAN) { 3238 ether_addr_copy(vf->mac, params->mac); 3239 3240 if (vf->is_trusted_configured) { 3241 qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); 3242 3243 /* Update and post bulleitin again */ 3244 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 3245 } 3246 } 3247 3248 return 0; 3249 } 3250 3251 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, 3252 struct qed_ptt *p_ptt, 3253 struct qed_vf_info *vf) 3254 { 3255 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; 3256 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3257 struct vfpf_ucast_filter_tlv *req; 3258 u8 status = PFVF_STATUS_SUCCESS; 3259 struct qed_filter_ucast params; 3260 int rc; 3261 3262 /* Prepare the unicast filter params */ 3263 memset(¶ms, 0, sizeof(struct qed_filter_ucast)); 3264 req = &mbx->req_virt->ucast_filter; 3265 params.opcode = (enum qed_filter_opcode)req->opcode; 3266 params.type = (enum qed_filter_ucast_type)req->type; 3267 3268 params.is_rx_filter = 1; 3269 params.is_tx_filter = 1; 3270 params.vport_to_remove_from = vf->vport_id; 3271 params.vport_to_add_to = vf->vport_id; 3272 memcpy(params.mac, req->mac, ETH_ALEN); 3273 params.vlan = req->vlan; 3274 3275 DP_VERBOSE(p_hwfn, 3276 QED_MSG_IOV, 3277 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", 3278 vf->abs_vf_id, params.opcode, params.type, 3279 params.is_rx_filter ? "RX" : "", 3280 params.is_tx_filter ? "TX" : "", 3281 params.vport_to_add_to, 3282 params.mac[0], params.mac[1], 3283 params.mac[2], params.mac[3], 3284 params.mac[4], params.mac[5], params.vlan); 3285 3286 if (!vf->vport_instance) { 3287 DP_VERBOSE(p_hwfn, 3288 QED_MSG_IOV, 3289 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 3290 vf->abs_vf_id); 3291 status = PFVF_STATUS_FAILURE; 3292 goto out; 3293 } 3294 3295 /* Update shadow copy of the VF configuration */ 3296 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { 3297 status = PFVF_STATUS_FAILURE; 3298 goto out; 3299 } 3300 3301 /* Determine if the unicast filtering is acceptible by PF */ 3302 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && 3303 (params.type == QED_FILTER_VLAN || 3304 params.type == QED_FILTER_MAC_VLAN)) { 3305 /* Once VLAN is forced or PVID is set, do not allow 3306 * to add/replace any further VLANs. 3307 */ 3308 if (params.opcode == QED_FILTER_ADD || 3309 params.opcode == QED_FILTER_REPLACE) 3310 status = PFVF_STATUS_FORCED; 3311 goto out; 3312 } 3313 3314 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && 3315 (params.type == QED_FILTER_MAC || 3316 params.type == QED_FILTER_MAC_VLAN)) { 3317 if (!ether_addr_equal(p_bulletin->mac, params.mac) || 3318 (params.opcode != QED_FILTER_ADD && 3319 params.opcode != QED_FILTER_REPLACE)) 3320 status = PFVF_STATUS_FORCED; 3321 goto out; 3322 } 3323 3324 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); 3325 if (rc) { 3326 status = PFVF_STATUS_FAILURE; 3327 goto out; 3328 } 3329 3330 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 3331 QED_SPQ_MODE_CB, NULL); 3332 if (rc) 3333 status = PFVF_STATUS_FAILURE; 3334 3335 out: 3336 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 3337 sizeof(struct pfvf_def_resp_tlv), status); 3338 } 3339 3340 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, 3341 struct qed_ptt *p_ptt, 3342 struct qed_vf_info *vf) 3343 { 3344 int i; 3345 3346 /* Reset the SBs */ 3347 for (i = 0; i < vf->num_sbs; i++) 3348 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 3349 vf->igu_sbs[i], 3350 vf->opaque_fid, false); 3351 3352 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 3353 sizeof(struct pfvf_def_resp_tlv), 3354 PFVF_STATUS_SUCCESS); 3355 } 3356 3357 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, 3358 struct qed_ptt *p_ptt, struct qed_vf_info *vf) 3359 { 3360 u16 length = sizeof(struct pfvf_def_resp_tlv); 3361 u8 status = PFVF_STATUS_SUCCESS; 3362 3363 /* Disable Interrupts for VF */ 3364 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 3365 3366 /* Reset Permission table */ 3367 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 3368 3369 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 3370 length, status); 3371 } 3372 3373 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, 3374 struct qed_ptt *p_ptt, 3375 struct qed_vf_info *p_vf) 3376 { 3377 u16 length = sizeof(struct pfvf_def_resp_tlv); 3378 u8 status = PFVF_STATUS_SUCCESS; 3379 int rc = 0; 3380 3381 qed_iov_vf_cleanup(p_hwfn, p_vf); 3382 3383 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 3384 /* Stopping the VF */ 3385 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 3386 p_vf->opaque_fid); 3387 3388 if (rc) { 3389 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", 3390 rc); 3391 status = PFVF_STATUS_FAILURE; 3392 } 3393 3394 p_vf->state = VF_STOPPED; 3395 } 3396 3397 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 3398 length, status); 3399 } 3400 3401 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, 3402 struct qed_ptt *p_ptt, 3403 struct qed_vf_info *p_vf) 3404 { 3405 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 3406 struct pfvf_read_coal_resp_tlv *p_resp; 3407 struct vfpf_read_coal_req_tlv *req; 3408 u8 status = PFVF_STATUS_FAILURE; 3409 struct qed_vf_queue *p_queue; 3410 struct qed_queue_cid *p_cid; 3411 u16 coal = 0, qid, i; 3412 bool b_is_rx; 3413 int rc = 0; 3414 3415 mbx->offset = (u8 *)mbx->reply_virt; 3416 req = &mbx->req_virt->read_coal_req; 3417 3418 qid = req->qid; 3419 b_is_rx = req->is_rx ? true : false; 3420 3421 if (b_is_rx) { 3422 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, 3423 QED_IOV_VALIDATE_Q_ENABLE)) { 3424 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3425 "VF[%d]: Invalid Rx queue_id = %d\n", 3426 p_vf->abs_vf_id, qid); 3427 goto send_resp; 3428 } 3429 3430 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); 3431 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); 3432 if (rc) 3433 goto send_resp; 3434 } else { 3435 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, 3436 QED_IOV_VALIDATE_Q_ENABLE)) { 3437 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3438 "VF[%d]: Invalid Tx queue_id = %d\n", 3439 p_vf->abs_vf_id, qid); 3440 goto send_resp; 3441 } 3442 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3443 p_queue = &p_vf->vf_queues[qid]; 3444 if ((!p_queue->cids[i].p_cid) || 3445 (!p_queue->cids[i].b_is_tx)) 3446 continue; 3447 3448 p_cid = p_queue->cids[i].p_cid; 3449 3450 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); 3451 if (rc) 3452 goto send_resp; 3453 break; 3454 } 3455 } 3456 3457 status = PFVF_STATUS_SUCCESS; 3458 3459 send_resp: 3460 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, 3461 sizeof(*p_resp)); 3462 p_resp->coal = coal; 3463 3464 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 3465 sizeof(struct channel_list_end_tlv)); 3466 3467 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 3468 } 3469 3470 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, 3471 struct qed_ptt *p_ptt, 3472 struct qed_vf_info *vf) 3473 { 3474 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3475 struct vfpf_update_coalesce *req; 3476 u8 status = PFVF_STATUS_FAILURE; 3477 struct qed_queue_cid *p_cid; 3478 u16 rx_coal, tx_coal; 3479 int rc = 0, i; 3480 u16 qid; 3481 3482 req = &mbx->req_virt->update_coalesce; 3483 3484 rx_coal = req->rx_coal; 3485 tx_coal = req->tx_coal; 3486 qid = req->qid; 3487 3488 if (!qed_iov_validate_rxq(p_hwfn, vf, qid, 3489 QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { 3490 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3491 "VF[%d]: Invalid Rx queue_id = %d\n", 3492 vf->abs_vf_id, qid); 3493 goto out; 3494 } 3495 3496 if (!qed_iov_validate_txq(p_hwfn, vf, qid, 3497 QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { 3498 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3499 "VF[%d]: Invalid Tx queue_id = %d\n", 3500 vf->abs_vf_id, qid); 3501 goto out; 3502 } 3503 3504 DP_VERBOSE(p_hwfn, 3505 QED_MSG_IOV, 3506 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", 3507 vf->abs_vf_id, rx_coal, tx_coal, qid); 3508 3509 if (rx_coal) { 3510 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); 3511 3512 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 3513 if (rc) { 3514 DP_VERBOSE(p_hwfn, 3515 QED_MSG_IOV, 3516 "VF[%d]: Unable to set rx queue = %d coalesce\n", 3517 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); 3518 goto out; 3519 } 3520 vf->rx_coal = rx_coal; 3521 } 3522 3523 if (tx_coal) { 3524 struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; 3525 3526 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3527 if (!p_queue->cids[i].p_cid) 3528 continue; 3529 3530 if (!p_queue->cids[i].b_is_tx) 3531 continue; 3532 3533 rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, 3534 p_queue->cids[i].p_cid); 3535 3536 if (rc) { 3537 DP_VERBOSE(p_hwfn, 3538 QED_MSG_IOV, 3539 "VF[%d]: Unable to set tx queue coalesce\n", 3540 vf->abs_vf_id); 3541 goto out; 3542 } 3543 } 3544 vf->tx_coal = tx_coal; 3545 } 3546 3547 status = PFVF_STATUS_SUCCESS; 3548 out: 3549 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, 3550 sizeof(struct pfvf_def_resp_tlv), status); 3551 } 3552 static int 3553 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 3554 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 3555 { 3556 int cnt; 3557 u32 val; 3558 3559 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 3560 3561 for (cnt = 0; cnt < 50; cnt++) { 3562 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 3563 if (!val) 3564 break; 3565 msleep(20); 3566 } 3567 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 3568 3569 if (cnt == 50) { 3570 DP_ERR(p_hwfn, 3571 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 3572 p_vf->abs_vf_id, val); 3573 return -EBUSY; 3574 } 3575 3576 return 0; 3577 } 3578 3579 static int 3580 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, 3581 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 3582 { 3583 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; 3584 int i, cnt; 3585 3586 /* Read initial consumers & producers */ 3587 for (i = 0; i < MAX_NUM_VOQS_E4; i++) { 3588 u32 prod; 3589 3590 cons[i] = qed_rd(p_hwfn, p_ptt, 3591 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 3592 i * 0x40); 3593 prod = qed_rd(p_hwfn, p_ptt, 3594 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 3595 i * 0x40); 3596 distance[i] = prod - cons[i]; 3597 } 3598 3599 /* Wait for consumers to pass the producers */ 3600 i = 0; 3601 for (cnt = 0; cnt < 50; cnt++) { 3602 for (; i < MAX_NUM_VOQS_E4; i++) { 3603 u32 tmp; 3604 3605 tmp = qed_rd(p_hwfn, p_ptt, 3606 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 3607 i * 0x40); 3608 if (distance[i] > tmp - cons[i]) 3609 break; 3610 } 3611 3612 if (i == MAX_NUM_VOQS_E4) 3613 break; 3614 3615 msleep(20); 3616 } 3617 3618 if (cnt == 50) { 3619 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 3620 p_vf->abs_vf_id, i); 3621 return -EBUSY; 3622 } 3623 3624 return 0; 3625 } 3626 3627 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, 3628 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 3629 { 3630 int rc; 3631 3632 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 3633 if (rc) 3634 return rc; 3635 3636 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 3637 if (rc) 3638 return rc; 3639 3640 return 0; 3641 } 3642 3643 static int 3644 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, 3645 struct qed_ptt *p_ptt, 3646 u16 rel_vf_id, u32 *ack_vfs) 3647 { 3648 struct qed_vf_info *p_vf; 3649 int rc = 0; 3650 3651 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 3652 if (!p_vf) 3653 return 0; 3654 3655 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 3656 (1ULL << (rel_vf_id % 64))) { 3657 u16 vfid = p_vf->abs_vf_id; 3658 3659 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3660 "VF[%d] - Handling FLR\n", vfid); 3661 3662 qed_iov_vf_cleanup(p_hwfn, p_vf); 3663 3664 /* If VF isn't active, no need for anything but SW */ 3665 if (!p_vf->b_init) 3666 goto cleanup; 3667 3668 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 3669 if (rc) 3670 goto cleanup; 3671 3672 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); 3673 if (rc) { 3674 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); 3675 return rc; 3676 } 3677 3678 /* Workaround to make VF-PF channel ready, as FW 3679 * doesn't do that as a part of FLR. 3680 */ 3681 REG_WR(p_hwfn, 3682 GTT_BAR0_MAP_REG_USDM_RAM + 3683 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); 3684 3685 /* VF_STOPPED has to be set only after final cleanup 3686 * but prior to re-enabling the VF. 3687 */ 3688 p_vf->state = VF_STOPPED; 3689 3690 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 3691 if (rc) { 3692 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 3693 vfid); 3694 return rc; 3695 } 3696 cleanup: 3697 /* Mark VF for ack and clean pending state */ 3698 if (p_vf->state == VF_RESET) 3699 p_vf->state = VF_STOPPED; 3700 ack_vfs[vfid / 32] |= BIT((vfid % 32)); 3701 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 3702 ~(1ULL << (rel_vf_id % 64)); 3703 p_vf->vf_mbx.b_pending_msg = false; 3704 } 3705 3706 return rc; 3707 } 3708 3709 static int 3710 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 3711 { 3712 u32 ack_vfs[VF_MAX_STATIC / 32]; 3713 int rc = 0; 3714 u16 i; 3715 3716 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 3717 3718 /* Since BRB <-> PRS interface can't be tested as part of the flr 3719 * polling due to HW limitations, simply sleep a bit. And since 3720 * there's no need to wait per-vf, do it before looping. 3721 */ 3722 msleep(100); 3723 3724 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) 3725 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 3726 3727 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 3728 return rc; 3729 } 3730 3731 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) 3732 { 3733 bool found = false; 3734 u16 i; 3735 3736 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); 3737 for (i = 0; i < (VF_MAX_STATIC / 32); i++) 3738 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3739 "[%08x,...,%08x]: %08x\n", 3740 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 3741 3742 if (!p_hwfn->cdev->p_iov_info) { 3743 DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); 3744 return false; 3745 } 3746 3747 /* Mark VFs */ 3748 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { 3749 struct qed_vf_info *p_vf; 3750 u8 vfid; 3751 3752 p_vf = qed_iov_get_vf_info(p_hwfn, i, false); 3753 if (!p_vf) 3754 continue; 3755 3756 vfid = p_vf->abs_vf_id; 3757 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { 3758 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 3759 u16 rel_vf_id = p_vf->relative_vf_id; 3760 3761 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3762 "VF[%d] [rel %d] got FLR-ed\n", 3763 vfid, rel_vf_id); 3764 3765 p_vf->state = VF_RESET; 3766 3767 /* No need to lock here, since pending_flr should 3768 * only change here and before ACKing MFw. Since 3769 * MFW will not trigger an additional attention for 3770 * VF flr until ACKs, we're safe. 3771 */ 3772 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 3773 found = true; 3774 } 3775 } 3776 3777 return found; 3778 } 3779 3780 static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 3781 u16 vfid, 3782 struct qed_mcp_link_params *p_params, 3783 struct qed_mcp_link_state *p_link, 3784 struct qed_mcp_link_capabilities *p_caps) 3785 { 3786 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 3787 vfid, 3788 false); 3789 struct qed_bulletin_content *p_bulletin; 3790 3791 if (!p_vf) 3792 return; 3793 3794 p_bulletin = p_vf->bulletin.p_virt; 3795 3796 if (p_params) 3797 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); 3798 if (p_link) 3799 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); 3800 if (p_caps) 3801 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); 3802 } 3803 3804 static int 3805 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, 3806 struct qed_ptt *p_ptt, 3807 struct qed_vf_info *p_vf) 3808 { 3809 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; 3810 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 3811 struct vfpf_bulletin_update_mac_tlv *p_req; 3812 u8 status = PFVF_STATUS_SUCCESS; 3813 int rc = 0; 3814 3815 if (!p_vf->p_vf_info.is_trusted_configured) { 3816 DP_VERBOSE(p_hwfn, 3817 QED_MSG_IOV, 3818 "Blocking bulletin update request from untrusted VF[%d]\n", 3819 p_vf->abs_vf_id); 3820 status = PFVF_STATUS_NOT_SUPPORTED; 3821 rc = -EINVAL; 3822 goto send_status; 3823 } 3824 3825 p_req = &mbx->req_virt->bulletin_update_mac; 3826 ether_addr_copy(p_bulletin->mac, p_req->mac); 3827 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3828 "Updated bulletin of VF[%d] with requested MAC[%pM]\n", 3829 p_vf->abs_vf_id, p_req->mac); 3830 3831 send_status: 3832 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3833 CHANNEL_TLV_BULLETIN_UPDATE_MAC, 3834 sizeof(struct pfvf_def_resp_tlv), status); 3835 return rc; 3836 } 3837 3838 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, 3839 struct qed_ptt *p_ptt, int vfid) 3840 { 3841 struct qed_iov_vf_mbx *mbx; 3842 struct qed_vf_info *p_vf; 3843 3844 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 3845 if (!p_vf) 3846 return; 3847 3848 mbx = &p_vf->vf_mbx; 3849 3850 /* qed_iov_process_mbx_request */ 3851 if (!mbx->b_pending_msg) { 3852 DP_NOTICE(p_hwfn, 3853 "VF[%02x]: Trying to process mailbox message when none is pending\n", 3854 p_vf->abs_vf_id); 3855 return; 3856 } 3857 mbx->b_pending_msg = false; 3858 3859 mbx->first_tlv = mbx->req_virt->first_tlv; 3860 3861 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3862 "VF[%02x]: Processing mailbox message [type %04x]\n", 3863 p_vf->abs_vf_id, mbx->first_tlv.tl.type); 3864 3865 /* check if tlv type is known */ 3866 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && 3867 !p_vf->b_malicious) { 3868 switch (mbx->first_tlv.tl.type) { 3869 case CHANNEL_TLV_ACQUIRE: 3870 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 3871 break; 3872 case CHANNEL_TLV_VPORT_START: 3873 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 3874 break; 3875 case CHANNEL_TLV_VPORT_TEARDOWN: 3876 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 3877 break; 3878 case CHANNEL_TLV_START_RXQ: 3879 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 3880 break; 3881 case CHANNEL_TLV_START_TXQ: 3882 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 3883 break; 3884 case CHANNEL_TLV_STOP_RXQS: 3885 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 3886 break; 3887 case CHANNEL_TLV_STOP_TXQS: 3888 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 3889 break; 3890 case CHANNEL_TLV_UPDATE_RXQ: 3891 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 3892 break; 3893 case CHANNEL_TLV_VPORT_UPDATE: 3894 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 3895 break; 3896 case CHANNEL_TLV_UCAST_FILTER: 3897 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 3898 break; 3899 case CHANNEL_TLV_CLOSE: 3900 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 3901 break; 3902 case CHANNEL_TLV_INT_CLEANUP: 3903 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 3904 break; 3905 case CHANNEL_TLV_RELEASE: 3906 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 3907 break; 3908 case CHANNEL_TLV_UPDATE_TUNN_PARAM: 3909 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); 3910 break; 3911 case CHANNEL_TLV_COALESCE_UPDATE: 3912 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); 3913 break; 3914 case CHANNEL_TLV_COALESCE_READ: 3915 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); 3916 break; 3917 case CHANNEL_TLV_BULLETIN_UPDATE_MAC: 3918 qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); 3919 break; 3920 } 3921 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 3922 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3923 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", 3924 p_vf->abs_vf_id, mbx->first_tlv.tl.type); 3925 3926 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3927 mbx->first_tlv.tl.type, 3928 sizeof(struct pfvf_def_resp_tlv), 3929 PFVF_STATUS_MALICIOUS); 3930 } else { 3931 /* unknown TLV - this may belong to a VF driver from the future 3932 * - a version written after this PF driver was written, which 3933 * supports features unknown as of yet. Too bad since we don't 3934 * support them. Or this may be because someone wrote a crappy 3935 * VF driver and is sending garbage over the channel. 3936 */ 3937 DP_NOTICE(p_hwfn, 3938 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", 3939 p_vf->abs_vf_id, 3940 mbx->first_tlv.tl.type, 3941 mbx->first_tlv.tl.length, 3942 mbx->first_tlv.padding, mbx->first_tlv.reply_address); 3943 3944 /* Try replying in case reply address matches the acquisition's 3945 * posted address. 3946 */ 3947 if (p_vf->acquire.first_tlv.reply_address && 3948 (mbx->first_tlv.reply_address == 3949 p_vf->acquire.first_tlv.reply_address)) { 3950 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3951 mbx->first_tlv.tl.type, 3952 sizeof(struct pfvf_def_resp_tlv), 3953 PFVF_STATUS_NOT_SUPPORTED); 3954 } else { 3955 DP_VERBOSE(p_hwfn, 3956 QED_MSG_IOV, 3957 "VF[%02x]: Can't respond to TLV - no valid reply address\n", 3958 p_vf->abs_vf_id); 3959 } 3960 } 3961 } 3962 3963 static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) 3964 { 3965 int i; 3966 3967 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); 3968 3969 qed_for_each_vf(p_hwfn, i) { 3970 struct qed_vf_info *p_vf; 3971 3972 p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; 3973 if (p_vf->vf_mbx.b_pending_msg) 3974 events[i / 64] |= 1ULL << (i % 64); 3975 } 3976 } 3977 3978 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, 3979 u16 abs_vfid) 3980 { 3981 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; 3982 3983 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { 3984 DP_VERBOSE(p_hwfn, 3985 QED_MSG_IOV, 3986 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", 3987 abs_vfid); 3988 return NULL; 3989 } 3990 3991 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; 3992 } 3993 3994 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, 3995 u16 abs_vfid, struct regpair *vf_msg) 3996 { 3997 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, 3998 abs_vfid); 3999 4000 if (!p_vf) 4001 return 0; 4002 4003 /* List the physical address of the request so that handler 4004 * could later on copy the message from it. 4005 */ 4006 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; 4007 4008 /* Mark the event and schedule the workqueue */ 4009 p_vf->vf_mbx.b_pending_msg = true; 4010 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); 4011 4012 return 0; 4013 } 4014 4015 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 4016 struct malicious_vf_eqe_data *p_data) 4017 { 4018 struct qed_vf_info *p_vf; 4019 4020 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); 4021 4022 if (!p_vf) 4023 return; 4024 4025 if (!p_vf->b_malicious) { 4026 DP_NOTICE(p_hwfn, 4027 "VF [%d] - Malicious behavior [%02x]\n", 4028 p_vf->abs_vf_id, p_data->err_id); 4029 4030 p_vf->b_malicious = true; 4031 } else { 4032 DP_INFO(p_hwfn, 4033 "VF [%d] - Malicious behavior [%02x]\n", 4034 p_vf->abs_vf_id, p_data->err_id); 4035 } 4036 } 4037 4038 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 4039 u8 opcode, 4040 __le16 echo, 4041 union event_ring_data *data, u8 fw_return_code) 4042 { 4043 switch (opcode) { 4044 case COMMON_EVENT_VF_PF_CHANNEL: 4045 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 4046 &data->vf_pf_channel.msg_addr); 4047 case COMMON_EVENT_MALICIOUS_VF: 4048 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); 4049 return 0; 4050 default: 4051 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 4052 opcode); 4053 return -EINVAL; 4054 } 4055 } 4056 4057 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 4058 { 4059 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 4060 u16 i; 4061 4062 if (!p_iov) 4063 goto out; 4064 4065 for (i = rel_vf_id; i < p_iov->total_vfs; i++) 4066 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) 4067 return i; 4068 4069 out: 4070 return MAX_NUM_VFS; 4071 } 4072 4073 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, 4074 int vfid) 4075 { 4076 struct qed_dmae_params params; 4077 struct qed_vf_info *vf_info; 4078 4079 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4080 if (!vf_info) 4081 return -EINVAL; 4082 4083 memset(¶ms, 0, sizeof(params)); 4084 SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1); 4085 SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1); 4086 params.src_vfid = vf_info->abs_vf_id; 4087 4088 if (qed_dmae_host2host(p_hwfn, ptt, 4089 vf_info->vf_mbx.pending_req, 4090 vf_info->vf_mbx.req_phys, 4091 sizeof(union vfpf_tlvs) / 4, ¶ms)) { 4092 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 4093 "Failed to copy message from VF 0x%02x\n", vfid); 4094 4095 return -EIO; 4096 } 4097 4098 return 0; 4099 } 4100 4101 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, 4102 u8 *mac, int vfid) 4103 { 4104 struct qed_vf_info *vf_info; 4105 u64 feature; 4106 4107 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4108 if (!vf_info) { 4109 DP_NOTICE(p_hwfn->cdev, 4110 "Can not set forced MAC, invalid vfid [%d]\n", vfid); 4111 return; 4112 } 4113 4114 if (vf_info->b_malicious) { 4115 DP_NOTICE(p_hwfn->cdev, 4116 "Can't set forced MAC to malicious VF [%d]\n", vfid); 4117 return; 4118 } 4119 4120 if (vf_info->p_vf_info.is_trusted_configured) { 4121 feature = BIT(VFPF_BULLETIN_MAC_ADDR); 4122 /* Trust mode will disable Forced MAC */ 4123 vf_info->bulletin.p_virt->valid_bitmap &= 4124 ~BIT(MAC_ADDR_FORCED); 4125 } else { 4126 feature = BIT(MAC_ADDR_FORCED); 4127 /* Forced MAC will disable MAC_ADDR */ 4128 vf_info->bulletin.p_virt->valid_bitmap &= 4129 ~BIT(VFPF_BULLETIN_MAC_ADDR); 4130 } 4131 4132 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); 4133 4134 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4135 4136 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4137 } 4138 4139 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) 4140 { 4141 struct qed_vf_info *vf_info; 4142 u64 feature; 4143 4144 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4145 if (!vf_info) { 4146 DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", 4147 vfid); 4148 return -EINVAL; 4149 } 4150 4151 if (vf_info->b_malicious) { 4152 DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", 4153 vfid); 4154 return -EINVAL; 4155 } 4156 4157 if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { 4158 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 4159 "Can not set MAC, Forced MAC is configured\n"); 4160 return -EINVAL; 4161 } 4162 4163 feature = BIT(VFPF_BULLETIN_MAC_ADDR); 4164 ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); 4165 4166 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4167 4168 if (vf_info->p_vf_info.is_trusted_configured) 4169 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4170 4171 return 0; 4172 } 4173 4174 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, 4175 u16 pvid, int vfid) 4176 { 4177 struct qed_vf_info *vf_info; 4178 u64 feature; 4179 4180 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4181 if (!vf_info) { 4182 DP_NOTICE(p_hwfn->cdev, 4183 "Can not set forced MAC, invalid vfid [%d]\n", vfid); 4184 return; 4185 } 4186 4187 if (vf_info->b_malicious) { 4188 DP_NOTICE(p_hwfn->cdev, 4189 "Can't set forced vlan to malicious VF [%d]\n", vfid); 4190 return; 4191 } 4192 4193 feature = 1 << VLAN_ADDR_FORCED; 4194 vf_info->bulletin.p_virt->pvid = pvid; 4195 if (pvid) 4196 vf_info->bulletin.p_virt->valid_bitmap |= feature; 4197 else 4198 vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 4199 4200 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4201 } 4202 4203 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 4204 int vfid, u16 vxlan_port, u16 geneve_port) 4205 { 4206 struct qed_vf_info *vf_info; 4207 4208 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4209 if (!vf_info) { 4210 DP_NOTICE(p_hwfn->cdev, 4211 "Can not set udp ports, invalid vfid [%d]\n", vfid); 4212 return; 4213 } 4214 4215 if (vf_info->b_malicious) { 4216 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 4217 "Can not set udp ports to malicious VF [%d]\n", 4218 vfid); 4219 return; 4220 } 4221 4222 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; 4223 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; 4224 } 4225 4226 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) 4227 { 4228 struct qed_vf_info *p_vf_info; 4229 4230 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4231 if (!p_vf_info) 4232 return false; 4233 4234 return !!p_vf_info->vport_instance; 4235 } 4236 4237 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) 4238 { 4239 struct qed_vf_info *p_vf_info; 4240 4241 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4242 if (!p_vf_info) 4243 return true; 4244 4245 return p_vf_info->state == VF_STOPPED; 4246 } 4247 4248 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) 4249 { 4250 struct qed_vf_info *vf_info; 4251 4252 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4253 if (!vf_info) 4254 return false; 4255 4256 return vf_info->spoof_chk; 4257 } 4258 4259 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) 4260 { 4261 struct qed_vf_info *vf; 4262 int rc = -EINVAL; 4263 4264 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4265 DP_NOTICE(p_hwfn, 4266 "SR-IOV sanity check failed, can't set spoofchk\n"); 4267 goto out; 4268 } 4269 4270 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4271 if (!vf) 4272 goto out; 4273 4274 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { 4275 /* After VF VPORT start PF will configure spoof check */ 4276 vf->req_spoofchk_val = val; 4277 rc = 0; 4278 goto out; 4279 } 4280 4281 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); 4282 4283 out: 4284 return rc; 4285 } 4286 4287 static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 4288 { 4289 struct qed_vf_info *p_vf; 4290 4291 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4292 if (!p_vf || !p_vf->bulletin.p_virt) 4293 return NULL; 4294 4295 if (!(p_vf->bulletin.p_virt->valid_bitmap & 4296 BIT(VFPF_BULLETIN_MAC_ADDR))) 4297 return NULL; 4298 4299 return p_vf->bulletin.p_virt->mac; 4300 } 4301 4302 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, 4303 u16 rel_vf_id) 4304 { 4305 struct qed_vf_info *p_vf; 4306 4307 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4308 if (!p_vf || !p_vf->bulletin.p_virt) 4309 return NULL; 4310 4311 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) 4312 return NULL; 4313 4314 return p_vf->bulletin.p_virt->mac; 4315 } 4316 4317 static u16 4318 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 4319 { 4320 struct qed_vf_info *p_vf; 4321 4322 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4323 if (!p_vf || !p_vf->bulletin.p_virt) 4324 return 0; 4325 4326 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) 4327 return 0; 4328 4329 return p_vf->bulletin.p_virt->pvid; 4330 } 4331 4332 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, 4333 struct qed_ptt *p_ptt, int vfid, int val) 4334 { 4335 struct qed_vf_info *vf; 4336 u8 abs_vp_id = 0; 4337 u16 rl_id; 4338 int rc; 4339 4340 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4341 if (!vf) 4342 return -EINVAL; 4343 4344 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 4345 if (rc) 4346 return rc; 4347 4348 rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ 4349 return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); 4350 } 4351 4352 static int 4353 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) 4354 { 4355 struct qed_vf_info *vf; 4356 u8 vport_id; 4357 int i; 4358 4359 for_each_hwfn(cdev, i) { 4360 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4361 4362 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4363 DP_NOTICE(p_hwfn, 4364 "SR-IOV sanity check failed, can't set min rate\n"); 4365 return -EINVAL; 4366 } 4367 } 4368 4369 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); 4370 vport_id = vf->vport_id; 4371 4372 return qed_configure_vport_wfq(cdev, vport_id, rate); 4373 } 4374 4375 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) 4376 { 4377 struct qed_wfq_data *vf_vp_wfq; 4378 struct qed_vf_info *vf_info; 4379 4380 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 4381 if (!vf_info) 4382 return 0; 4383 4384 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 4385 4386 if (vf_vp_wfq->configured) 4387 return vf_vp_wfq->min_speed; 4388 else 4389 return 0; 4390 } 4391 4392 /** 4393 * qed_schedule_iov - schedules IOV task for VF and PF 4394 * @hwfn: hardware function pointer 4395 * @flag: IOV flag for VF/PF 4396 */ 4397 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 4398 { 4399 smp_mb__before_atomic(); 4400 set_bit(flag, &hwfn->iov_task_flags); 4401 smp_mb__after_atomic(); 4402 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 4403 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); 4404 } 4405 4406 void qed_vf_start_iov_wq(struct qed_dev *cdev) 4407 { 4408 int i; 4409 4410 for_each_hwfn(cdev, i) 4411 queue_delayed_work(cdev->hwfns[i].iov_wq, 4412 &cdev->hwfns[i].iov_task, 0); 4413 } 4414 4415 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 4416 { 4417 int i, j; 4418 4419 for_each_hwfn(cdev, i) 4420 if (cdev->hwfns[i].iov_wq) 4421 flush_workqueue(cdev->hwfns[i].iov_wq); 4422 4423 /* Mark VFs for disablement */ 4424 qed_iov_set_vfs_to_disable(cdev, true); 4425 4426 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) 4427 pci_disable_sriov(cdev->pdev); 4428 4429 if (cdev->recov_in_prog) { 4430 DP_VERBOSE(cdev, 4431 QED_MSG_IOV, 4432 "Skip SRIOV disable operations in the device since a recovery is in progress\n"); 4433 goto out; 4434 } 4435 4436 for_each_hwfn(cdev, i) { 4437 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4438 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 4439 4440 /* Failure to acquire the ptt in 100g creates an odd error 4441 * where the first engine has already relased IOV. 4442 */ 4443 if (!ptt) { 4444 DP_ERR(hwfn, "Failed to acquire ptt\n"); 4445 return -EBUSY; 4446 } 4447 4448 /* Clean WFQ db and configure equal weight for all vports */ 4449 qed_clean_wfq_db(hwfn, ptt); 4450 4451 qed_for_each_vf(hwfn, j) { 4452 int k; 4453 4454 if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) 4455 continue; 4456 4457 /* Wait until VF is disabled before releasing */ 4458 for (k = 0; k < 100; k++) { 4459 if (!qed_iov_is_vf_stopped(hwfn, j)) 4460 msleep(20); 4461 else 4462 break; 4463 } 4464 4465 if (k < 100) 4466 qed_iov_release_hw_for_vf(&cdev->hwfns[i], 4467 ptt, j); 4468 else 4469 DP_ERR(hwfn, 4470 "Timeout waiting for VF's FLR to end\n"); 4471 } 4472 4473 qed_ptt_release(hwfn, ptt); 4474 } 4475 out: 4476 qed_iov_set_vfs_to_disable(cdev, false); 4477 4478 return 0; 4479 } 4480 4481 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, 4482 u16 vfid, 4483 struct qed_iov_vf_init_params *params) 4484 { 4485 u16 base, i; 4486 4487 /* Since we have an equal resource distribution per-VF, and we assume 4488 * PF has acquired the QED_PF_L2_QUE first queues, we start setting 4489 * sequentially from there. 4490 */ 4491 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; 4492 4493 params->rel_vf_id = vfid; 4494 for (i = 0; i < params->num_queues; i++) { 4495 params->req_rx_queue[i] = base + i; 4496 params->req_tx_queue[i] = base + i; 4497 } 4498 } 4499 4500 static int qed_sriov_enable(struct qed_dev *cdev, int num) 4501 { 4502 struct qed_iov_vf_init_params params; 4503 struct qed_hwfn *hwfn; 4504 struct qed_ptt *ptt; 4505 int i, j, rc; 4506 4507 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 4508 DP_NOTICE(cdev, "Can start at most %d VFs\n", 4509 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); 4510 return -EINVAL; 4511 } 4512 4513 memset(¶ms, 0, sizeof(params)); 4514 4515 /* Initialize HW for VF access */ 4516 for_each_hwfn(cdev, j) { 4517 hwfn = &cdev->hwfns[j]; 4518 ptt = qed_ptt_acquire(hwfn); 4519 4520 /* Make sure not to use more than 16 queues per VF */ 4521 params.num_queues = min_t(int, 4522 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 4523 16); 4524 4525 if (!ptt) { 4526 DP_ERR(hwfn, "Failed to acquire ptt\n"); 4527 rc = -EBUSY; 4528 goto err; 4529 } 4530 4531 for (i = 0; i < num; i++) { 4532 if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) 4533 continue; 4534 4535 qed_sriov_enable_qid_config(hwfn, i, ¶ms); 4536 rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 4537 if (rc) { 4538 DP_ERR(cdev, "Failed to enable VF[%d]\n", i); 4539 qed_ptt_release(hwfn, ptt); 4540 goto err; 4541 } 4542 } 4543 4544 qed_ptt_release(hwfn, ptt); 4545 } 4546 4547 /* Enable SRIOV PCIe functions */ 4548 rc = pci_enable_sriov(cdev->pdev, num); 4549 if (rc) { 4550 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); 4551 goto err; 4552 } 4553 4554 hwfn = QED_LEADING_HWFN(cdev); 4555 ptt = qed_ptt_acquire(hwfn); 4556 if (!ptt) { 4557 DP_ERR(hwfn, "Failed to acquire ptt\n"); 4558 rc = -EBUSY; 4559 goto err; 4560 } 4561 4562 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); 4563 if (rc) 4564 DP_INFO(cdev, "Failed to update eswitch mode\n"); 4565 qed_ptt_release(hwfn, ptt); 4566 4567 return num; 4568 4569 err: 4570 qed_sriov_disable(cdev, false); 4571 return rc; 4572 } 4573 4574 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) 4575 { 4576 if (!IS_QED_SRIOV(cdev)) { 4577 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); 4578 return -EOPNOTSUPP; 4579 } 4580 4581 if (num_vfs_param) 4582 return qed_sriov_enable(cdev, num_vfs_param); 4583 else 4584 return qed_sriov_disable(cdev, true); 4585 } 4586 4587 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) 4588 { 4589 int i; 4590 4591 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 4592 DP_VERBOSE(cdev, QED_MSG_IOV, 4593 "Cannot set a VF MAC; Sriov is not enabled\n"); 4594 return -EINVAL; 4595 } 4596 4597 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 4598 DP_VERBOSE(cdev, QED_MSG_IOV, 4599 "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 4600 return -EINVAL; 4601 } 4602 4603 for_each_hwfn(cdev, i) { 4604 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4605 struct qed_public_vf_info *vf_info; 4606 4607 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 4608 if (!vf_info) 4609 continue; 4610 4611 /* Set the MAC, and schedule the IOV task */ 4612 if (vf_info->is_trusted_configured) 4613 ether_addr_copy(vf_info->mac, mac); 4614 else 4615 ether_addr_copy(vf_info->forced_mac, mac); 4616 4617 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 4618 } 4619 4620 return 0; 4621 } 4622 4623 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) 4624 { 4625 int i; 4626 4627 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 4628 DP_VERBOSE(cdev, QED_MSG_IOV, 4629 "Cannot set a VF MAC; Sriov is not enabled\n"); 4630 return -EINVAL; 4631 } 4632 4633 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 4634 DP_VERBOSE(cdev, QED_MSG_IOV, 4635 "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 4636 return -EINVAL; 4637 } 4638 4639 for_each_hwfn(cdev, i) { 4640 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4641 struct qed_public_vf_info *vf_info; 4642 4643 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 4644 if (!vf_info) 4645 continue; 4646 4647 /* Set the forced vlan, and schedule the IOV task */ 4648 vf_info->forced_vlan = vid; 4649 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 4650 } 4651 4652 return 0; 4653 } 4654 4655 static int qed_get_vf_config(struct qed_dev *cdev, 4656 int vf_id, struct ifla_vf_info *ivi) 4657 { 4658 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 4659 struct qed_public_vf_info *vf_info; 4660 struct qed_mcp_link_state link; 4661 u32 tx_rate; 4662 4663 /* Sanitize request */ 4664 if (IS_VF(cdev)) 4665 return -EINVAL; 4666 4667 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { 4668 DP_VERBOSE(cdev, QED_MSG_IOV, 4669 "VF index [%d] isn't active\n", vf_id); 4670 return -EINVAL; 4671 } 4672 4673 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 4674 4675 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); 4676 4677 /* Fill information about VF */ 4678 ivi->vf = vf_id; 4679 4680 if (is_valid_ether_addr(vf_info->forced_mac)) 4681 ether_addr_copy(ivi->mac, vf_info->forced_mac); 4682 else 4683 ether_addr_copy(ivi->mac, vf_info->mac); 4684 4685 ivi->vlan = vf_info->forced_vlan; 4686 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); 4687 ivi->linkstate = vf_info->link_state; 4688 tx_rate = vf_info->tx_rate; 4689 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; 4690 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); 4691 4692 return 0; 4693 } 4694 4695 void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 4696 { 4697 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); 4698 struct qed_mcp_link_capabilities caps; 4699 struct qed_mcp_link_params params; 4700 struct qed_mcp_link_state link; 4701 int i; 4702 4703 if (!hwfn->pf_iov_info) 4704 return; 4705 4706 /* Update bulletin of all future possible VFs with link configuration */ 4707 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { 4708 struct qed_public_vf_info *vf_info; 4709 4710 vf_info = qed_iov_get_public_vf_info(hwfn, i, false); 4711 if (!vf_info) 4712 continue; 4713 4714 /* Only hwfn0 is actually interested in the link speed. 4715 * But since only it would receive an MFW indication of link, 4716 * need to take configuration from it - otherwise things like 4717 * rate limiting for hwfn1 VF would not work. 4718 */ 4719 memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), 4720 sizeof(params)); 4721 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); 4722 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), 4723 sizeof(caps)); 4724 4725 /* Modify link according to the VF's configured link state */ 4726 switch (vf_info->link_state) { 4727 case IFLA_VF_LINK_STATE_DISABLE: 4728 link.link_up = false; 4729 break; 4730 case IFLA_VF_LINK_STATE_ENABLE: 4731 link.link_up = true; 4732 /* Set speed according to maximum supported by HW. 4733 * that is 40G for regular devices and 100G for CMT 4734 * mode devices. 4735 */ 4736 link.speed = (hwfn->cdev->num_hwfns > 1) ? 4737 100000 : 40000; 4738 default: 4739 /* In auto mode pass PF link image to VF */ 4740 break; 4741 } 4742 4743 if (link.link_up && vf_info->tx_rate) { 4744 struct qed_ptt *ptt; 4745 int rate; 4746 4747 rate = min_t(int, vf_info->tx_rate, link.speed); 4748 4749 ptt = qed_ptt_acquire(hwfn); 4750 if (!ptt) { 4751 DP_NOTICE(hwfn, "Failed to acquire PTT\n"); 4752 return; 4753 } 4754 4755 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { 4756 vf_info->tx_rate = rate; 4757 link.speed = rate; 4758 } 4759 4760 qed_ptt_release(hwfn, ptt); 4761 } 4762 4763 qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); 4764 } 4765 4766 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 4767 } 4768 4769 static int qed_set_vf_link_state(struct qed_dev *cdev, 4770 int vf_id, int link_state) 4771 { 4772 int i; 4773 4774 /* Sanitize request */ 4775 if (IS_VF(cdev)) 4776 return -EINVAL; 4777 4778 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { 4779 DP_VERBOSE(cdev, QED_MSG_IOV, 4780 "VF index [%d] isn't active\n", vf_id); 4781 return -EINVAL; 4782 } 4783 4784 /* Handle configuration of link state */ 4785 for_each_hwfn(cdev, i) { 4786 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4787 struct qed_public_vf_info *vf; 4788 4789 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); 4790 if (!vf) 4791 continue; 4792 4793 if (vf->link_state == link_state) 4794 continue; 4795 4796 vf->link_state = link_state; 4797 qed_inform_vf_link_state(&cdev->hwfns[i]); 4798 } 4799 4800 return 0; 4801 } 4802 4803 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) 4804 { 4805 int i, rc = -EINVAL; 4806 4807 for_each_hwfn(cdev, i) { 4808 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4809 4810 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); 4811 if (rc) 4812 break; 4813 } 4814 4815 return rc; 4816 } 4817 4818 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) 4819 { 4820 int i; 4821 4822 for_each_hwfn(cdev, i) { 4823 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4824 struct qed_public_vf_info *vf; 4825 4826 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4827 DP_NOTICE(p_hwfn, 4828 "SR-IOV sanity check failed, can't set tx rate\n"); 4829 return -EINVAL; 4830 } 4831 4832 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); 4833 4834 vf->tx_rate = rate; 4835 4836 qed_inform_vf_link_state(p_hwfn); 4837 } 4838 4839 return 0; 4840 } 4841 4842 static int qed_set_vf_rate(struct qed_dev *cdev, 4843 int vfid, u32 min_rate, u32 max_rate) 4844 { 4845 int rc_min = 0, rc_max = 0; 4846 4847 if (max_rate) 4848 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); 4849 4850 if (min_rate) 4851 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); 4852 4853 if (rc_max | rc_min) 4854 return -EINVAL; 4855 4856 return 0; 4857 } 4858 4859 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) 4860 { 4861 int i; 4862 4863 for_each_hwfn(cdev, i) { 4864 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4865 struct qed_public_vf_info *vf; 4866 4867 if (!qed_iov_pf_sanity_check(hwfn, vfid)) { 4868 DP_NOTICE(hwfn, 4869 "SR-IOV sanity check failed, can't set trust\n"); 4870 return -EINVAL; 4871 } 4872 4873 vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 4874 4875 if (vf->is_trusted_request == trust) 4876 return 0; 4877 vf->is_trusted_request = trust; 4878 4879 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); 4880 } 4881 4882 return 0; 4883 } 4884 4885 static void qed_handle_vf_msg(struct qed_hwfn *hwfn) 4886 { 4887 u64 events[QED_VF_ARRAY_LENGTH]; 4888 struct qed_ptt *ptt; 4889 int i; 4890 4891 ptt = qed_ptt_acquire(hwfn); 4892 if (!ptt) { 4893 DP_VERBOSE(hwfn, QED_MSG_IOV, 4894 "Can't acquire PTT; re-scheduling\n"); 4895 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); 4896 return; 4897 } 4898 4899 qed_iov_pf_get_pending_events(hwfn, events); 4900 4901 DP_VERBOSE(hwfn, QED_MSG_IOV, 4902 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", 4903 events[0], events[1], events[2]); 4904 4905 qed_for_each_vf(hwfn, i) { 4906 /* Skip VFs with no pending messages */ 4907 if (!(events[i / 64] & (1ULL << (i % 64)))) 4908 continue; 4909 4910 DP_VERBOSE(hwfn, QED_MSG_IOV, 4911 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 4912 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4913 4914 /* Copy VF's message to PF's request buffer for that VF */ 4915 if (qed_iov_copy_vf_msg(hwfn, ptt, i)) 4916 continue; 4917 4918 qed_iov_process_mbx_req(hwfn, ptt, i); 4919 } 4920 4921 qed_ptt_release(hwfn, ptt); 4922 } 4923 4924 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, 4925 u8 *mac, 4926 struct qed_public_vf_info *info) 4927 { 4928 if (info->is_trusted_configured) { 4929 if (is_valid_ether_addr(info->mac) && 4930 (!mac || !ether_addr_equal(mac, info->mac))) 4931 return true; 4932 } else { 4933 if (is_valid_ether_addr(info->forced_mac) && 4934 (!mac || !ether_addr_equal(mac, info->forced_mac))) 4935 return true; 4936 } 4937 4938 return false; 4939 } 4940 4941 static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, 4942 struct qed_public_vf_info *info, 4943 int vfid) 4944 { 4945 if (info->is_trusted_configured) 4946 qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); 4947 else 4948 qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); 4949 } 4950 4951 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) 4952 { 4953 int i; 4954 4955 qed_for_each_vf(hwfn, i) { 4956 struct qed_public_vf_info *info; 4957 bool update = false; 4958 u8 *mac; 4959 4960 info = qed_iov_get_public_vf_info(hwfn, i, true); 4961 if (!info) 4962 continue; 4963 4964 /* Update data on bulletin board */ 4965 if (info->is_trusted_configured) 4966 mac = qed_iov_bulletin_get_mac(hwfn, i); 4967 else 4968 mac = qed_iov_bulletin_get_forced_mac(hwfn, i); 4969 4970 if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { 4971 DP_VERBOSE(hwfn, 4972 QED_MSG_IOV, 4973 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", 4974 i, 4975 hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4976 4977 /* Update bulletin board with MAC */ 4978 qed_set_bulletin_mac(hwfn, info, i); 4979 update = true; 4980 } 4981 4982 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ 4983 info->forced_vlan) { 4984 DP_VERBOSE(hwfn, 4985 QED_MSG_IOV, 4986 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", 4987 info->forced_vlan, 4988 i, 4989 hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4990 qed_iov_bulletin_set_forced_vlan(hwfn, 4991 info->forced_vlan, i); 4992 update = true; 4993 } 4994 4995 if (update) 4996 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 4997 } 4998 } 4999 5000 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) 5001 { 5002 struct qed_ptt *ptt; 5003 int i; 5004 5005 ptt = qed_ptt_acquire(hwfn); 5006 if (!ptt) { 5007 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); 5008 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 5009 return; 5010 } 5011 5012 qed_for_each_vf(hwfn, i) 5013 qed_iov_post_vf_bulletin(hwfn, i, ptt); 5014 5015 qed_ptt_release(hwfn, ptt); 5016 } 5017 5018 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) 5019 { 5020 struct qed_public_vf_info *vf_info; 5021 struct qed_vf_info *vf; 5022 u8 *force_mac; 5023 int i; 5024 5025 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 5026 vf = qed_iov_get_vf_info(hwfn, vf_id, true); 5027 5028 if (!vf_info || !vf) 5029 return; 5030 5031 /* Force MAC converted to generic MAC in case of VF trust on */ 5032 if (vf_info->is_trusted_configured && 5033 (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { 5034 force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); 5035 5036 if (force_mac) { 5037 /* Clear existing shadow copy of MAC to have a clean 5038 * slate. 5039 */ 5040 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 5041 if (ether_addr_equal(vf->shadow_config.macs[i], 5042 vf_info->mac)) { 5043 memset(vf->shadow_config.macs[i], 0, 5044 ETH_ALEN); 5045 DP_VERBOSE(hwfn, QED_MSG_IOV, 5046 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", 5047 vf_info->mac, vf_id); 5048 break; 5049 } 5050 } 5051 5052 ether_addr_copy(vf_info->mac, force_mac); 5053 memset(vf_info->forced_mac, 0, ETH_ALEN); 5054 vf->bulletin.p_virt->valid_bitmap &= 5055 ~BIT(MAC_ADDR_FORCED); 5056 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 5057 } 5058 } 5059 5060 /* Update shadow copy with VF MAC when trust mode is turned off */ 5061 if (!vf_info->is_trusted_configured) { 5062 u8 empty_mac[ETH_ALEN]; 5063 5064 memset(empty_mac, 0, ETH_ALEN); 5065 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 5066 if (ether_addr_equal(vf->shadow_config.macs[i], 5067 empty_mac)) { 5068 ether_addr_copy(vf->shadow_config.macs[i], 5069 vf_info->mac); 5070 DP_VERBOSE(hwfn, QED_MSG_IOV, 5071 "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", 5072 vf_info->mac, vf_id); 5073 break; 5074 } 5075 } 5076 /* Clear bulletin when trust mode is turned off, 5077 * to have a clean slate for next (normal) operations. 5078 */ 5079 qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); 5080 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 5081 } 5082 } 5083 5084 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) 5085 { 5086 struct qed_sp_vport_update_params params; 5087 struct qed_filter_accept_flags *flags; 5088 struct qed_public_vf_info *vf_info; 5089 struct qed_vf_info *vf; 5090 u8 mask; 5091 int i; 5092 5093 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 5094 flags = ¶ms.accept_flags; 5095 5096 qed_for_each_vf(hwfn, i) { 5097 /* Need to make sure current requested configuration didn't 5098 * flip so that we'll end up configuring something that's not 5099 * needed. 5100 */ 5101 vf_info = qed_iov_get_public_vf_info(hwfn, i, true); 5102 if (vf_info->is_trusted_configured == 5103 vf_info->is_trusted_request) 5104 continue; 5105 vf_info->is_trusted_configured = vf_info->is_trusted_request; 5106 5107 /* Handle forced MAC mode */ 5108 qed_update_mac_for_vf_trust_change(hwfn, i); 5109 5110 /* Validate that the VF has a configured vport */ 5111 vf = qed_iov_get_vf_info(hwfn, i, true); 5112 if (!vf->vport_instance) 5113 continue; 5114 5115 memset(¶ms, 0, sizeof(params)); 5116 params.opaque_fid = vf->opaque_fid; 5117 params.vport_id = vf->vport_id; 5118 5119 params.update_ctl_frame_check = 1; 5120 params.mac_chk_en = !vf_info->is_trusted_configured; 5121 5122 if (vf_info->rx_accept_mode & mask) { 5123 flags->update_rx_mode_config = 1; 5124 flags->rx_accept_filter = vf_info->rx_accept_mode; 5125 } 5126 5127 if (vf_info->tx_accept_mode & mask) { 5128 flags->update_tx_mode_config = 1; 5129 flags->tx_accept_filter = vf_info->tx_accept_mode; 5130 } 5131 5132 /* Remove if needed; Otherwise this would set the mask */ 5133 if (!vf_info->is_trusted_configured) { 5134 flags->rx_accept_filter &= ~mask; 5135 flags->tx_accept_filter &= ~mask; 5136 } 5137 5138 if (flags->update_rx_mode_config || 5139 flags->update_tx_mode_config || 5140 params.update_ctl_frame_check) 5141 qed_sp_vport_update(hwfn, ¶ms, 5142 QED_SPQ_MODE_EBLOCK, NULL); 5143 } 5144 } 5145 5146 static void qed_iov_pf_task(struct work_struct *work) 5147 5148 { 5149 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 5150 iov_task.work); 5151 int rc; 5152 5153 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 5154 return; 5155 5156 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { 5157 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 5158 5159 if (!ptt) { 5160 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 5161 return; 5162 } 5163 5164 rc = qed_iov_vf_flr_cleanup(hwfn, ptt); 5165 if (rc) 5166 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 5167 5168 qed_ptt_release(hwfn, ptt); 5169 } 5170 5171 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) 5172 qed_handle_vf_msg(hwfn); 5173 5174 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 5175 &hwfn->iov_task_flags)) 5176 qed_handle_pf_set_vf_unicast(hwfn); 5177 5178 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 5179 &hwfn->iov_task_flags)) 5180 qed_handle_bulletin_post(hwfn); 5181 5182 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) 5183 qed_iov_handle_trust_change(hwfn); 5184 } 5185 5186 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 5187 { 5188 int i; 5189 5190 for_each_hwfn(cdev, i) { 5191 if (!cdev->hwfns[i].iov_wq) 5192 continue; 5193 5194 if (schedule_first) { 5195 qed_schedule_iov(&cdev->hwfns[i], 5196 QED_IOV_WQ_STOP_WQ_FLAG); 5197 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); 5198 } 5199 5200 flush_workqueue(cdev->hwfns[i].iov_wq); 5201 destroy_workqueue(cdev->hwfns[i].iov_wq); 5202 } 5203 } 5204 5205 int qed_iov_wq_start(struct qed_dev *cdev) 5206 { 5207 char name[NAME_SIZE]; 5208 int i; 5209 5210 for_each_hwfn(cdev, i) { 5211 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 5212 5213 /* PFs needs a dedicated workqueue only if they support IOV. 5214 * VFs always require one. 5215 */ 5216 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) 5217 continue; 5218 5219 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", 5220 cdev->pdev->bus->number, 5221 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); 5222 5223 p_hwfn->iov_wq = create_singlethread_workqueue(name); 5224 if (!p_hwfn->iov_wq) { 5225 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); 5226 return -ENOMEM; 5227 } 5228 5229 if (IS_PF(cdev)) 5230 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); 5231 else 5232 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); 5233 } 5234 5235 return 0; 5236 } 5237 5238 const struct qed_iov_hv_ops qed_iov_ops_pass = { 5239 .configure = &qed_sriov_configure, 5240 .set_mac = &qed_sriov_pf_set_mac, 5241 .set_vlan = &qed_sriov_pf_set_vlan, 5242 .get_config = &qed_get_vf_config, 5243 .set_link_state = &qed_set_vf_link_state, 5244 .set_spoof = &qed_spoof_configure, 5245 .set_rate = &qed_set_vf_rate, 5246 .set_trust = &qed_set_vf_trust, 5247 }; 5248