1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/etherdevice.h> 11 #include "qed.h" 12 #include "qed_sriov.h" 13 #include "qed_vf.h" 14 15 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) 16 { 17 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 18 void *p_tlv; 19 20 /* This lock is released when we receive PF's response 21 * in qed_send_msg2pf(). 22 * So, qed_vf_pf_prep() and qed_send_msg2pf() 23 * must come in sequence. 24 */ 25 mutex_lock(&(p_iov->mutex)); 26 27 DP_VERBOSE(p_hwfn, 28 QED_MSG_IOV, 29 "preparing to send 0x%04x tlv over vf pf channel\n", 30 type); 31 32 /* Reset Requst offset */ 33 p_iov->offset = (u8 *)p_iov->vf2pf_request; 34 35 /* Clear mailbox - both request and reply */ 36 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); 37 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 38 39 /* Init type and length */ 40 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); 41 42 /* Init first tlv header */ 43 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 44 (u64)p_iov->pf2vf_reply_phys; 45 46 return p_tlv; 47 } 48 49 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) 50 { 51 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 52 53 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 54 "VF request status = 0x%x, PF reply status = 0x%x\n", 55 req_status, resp->default_resp.hdr.status); 56 57 mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); 58 } 59 60 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) 61 { 62 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 63 struct ustorm_trigger_vf_zone trigger; 64 struct ustorm_vf_zone *zone_data; 65 int rc = 0, time = 100; 66 67 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 68 69 /* output tlvs list */ 70 qed_dp_tlv_list(p_hwfn, p_req); 71 72 /* need to add the END TLV to the message size */ 73 resp_size += sizeof(struct channel_list_end_tlv); 74 75 /* Send TLVs over HW channel */ 76 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 77 trigger.vf_pf_msg_valid = 1; 78 79 DP_VERBOSE(p_hwfn, 80 QED_MSG_IOV, 81 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 82 GET_FIELD(p_hwfn->hw_info.concrete_fid, 83 PXP_CONCRETE_FID_PFID), 84 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 85 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 86 &zone_data->non_trigger.vf_pf_msg_addr, 87 *((u32 *)&trigger), &zone_data->trigger); 88 89 REG_WR(p_hwfn, 90 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 91 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 92 93 REG_WR(p_hwfn, 94 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 95 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 96 97 /* The message data must be written first, to prevent trigger before 98 * data is written. 99 */ 100 wmb(); 101 102 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 103 104 /* When PF would be done with the response, it would write back to the 105 * `done' address. Poll until then. 106 */ 107 while ((!*done) && time) { 108 msleep(25); 109 time--; 110 } 111 112 if (!*done) { 113 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 114 "VF <-- PF Timeout [Type %d]\n", 115 p_req->first_tlv.tl.type); 116 rc = -EBUSY; 117 } else { 118 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 119 "PF response: %d [Type %d]\n", 120 *done, p_req->first_tlv.tl.type); 121 } 122 123 return rc; 124 } 125 126 #define VF_ACQUIRE_THRESH 3 127 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, 128 struct vf_pf_resc_request *p_req, 129 struct pf_vf_resc *p_resp) 130 { 131 DP_VERBOSE(p_hwfn, 132 QED_MSG_IOV, 133 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", 134 p_req->num_rxqs, 135 p_resp->num_rxqs, 136 p_req->num_rxqs, 137 p_resp->num_txqs, 138 p_req->num_sbs, 139 p_resp->num_sbs, 140 p_req->num_mac_filters, 141 p_resp->num_mac_filters, 142 p_req->num_vlan_filters, 143 p_resp->num_vlan_filters, 144 p_req->num_mc_filters, p_resp->num_mc_filters); 145 146 /* humble our request */ 147 p_req->num_txqs = p_resp->num_txqs; 148 p_req->num_rxqs = p_resp->num_rxqs; 149 p_req->num_sbs = p_resp->num_sbs; 150 p_req->num_mac_filters = p_resp->num_mac_filters; 151 p_req->num_vlan_filters = p_resp->num_vlan_filters; 152 p_req->num_mc_filters = p_resp->num_mc_filters; 153 } 154 155 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) 156 { 157 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 158 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 159 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 160 struct vf_pf_resc_request *p_resc; 161 bool resources_acquired = false; 162 struct vfpf_acquire_tlv *req; 163 int rc = 0, attempts = 0; 164 165 /* clear mailbox and prep first tlv */ 166 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 167 p_resc = &req->resc_request; 168 169 /* starting filling the request */ 170 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 171 172 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; 173 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; 174 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; 175 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 176 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 177 178 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; 179 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 180 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 181 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 182 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 183 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 184 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 185 186 /* Fill capability field with any non-deprecated config we support */ 187 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 188 189 /* pf 2 vf bulletin board address */ 190 req->bulletin_addr = p_iov->bulletin.phys; 191 req->bulletin_size = p_iov->bulletin.size; 192 193 /* add list termination tlv */ 194 qed_add_tlv(p_hwfn, &p_iov->offset, 195 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 196 197 while (!resources_acquired) { 198 DP_VERBOSE(p_hwfn, 199 QED_MSG_IOV, "attempting to acquire resources\n"); 200 201 /* Clear response buffer, as this might be a re-send */ 202 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 203 204 /* send acquire request */ 205 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 206 if (rc) 207 return rc; 208 209 /* copy acquire response from buffer to p_hwfn */ 210 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); 211 212 attempts++; 213 214 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 215 /* PF agrees to allocate our resources */ 216 if (!(resp->pfdev_info.capabilities & 217 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 218 /* It's possible legacy PF mistakenly accepted; 219 * but we don't care - simply mark it as 220 * legacy and continue. 221 */ 222 req->vfdev_info.capabilities |= 223 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 224 } 225 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); 226 resources_acquired = true; 227 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 228 attempts < VF_ACQUIRE_THRESH) { 229 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 230 &resp->resc); 231 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 232 if (pfdev_info->major_fp_hsi && 233 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 234 DP_NOTICE(p_hwfn, 235 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 236 pfdev_info->major_fp_hsi, 237 pfdev_info->minor_fp_hsi, 238 ETH_HSI_VER_MAJOR, 239 ETH_HSI_VER_MINOR, 240 pfdev_info->major_fp_hsi); 241 rc = -EINVAL; 242 goto exit; 243 } 244 245 if (!pfdev_info->major_fp_hsi) { 246 if (req->vfdev_info.capabilities & 247 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 248 DP_NOTICE(p_hwfn, 249 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 250 rc = -EINVAL; 251 goto exit; 252 } else { 253 DP_INFO(p_hwfn, 254 "PF is old - try re-acquire to see if it supports FW-version override\n"); 255 req->vfdev_info.capabilities |= 256 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 257 continue; 258 } 259 } 260 261 /* If PF/VF are using same Major, PF must have had 262 * it's reasons. Simply fail. 263 */ 264 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); 265 rc = -EINVAL; 266 goto exit; 267 } else { 268 DP_ERR(p_hwfn, 269 "PF returned error %d to VF acquisition request\n", 270 resp->hdr.status); 271 rc = -EAGAIN; 272 goto exit; 273 } 274 } 275 276 /* Mark the PF as legacy, if needed */ 277 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) 278 p_iov->b_pre_fp_hsi = true; 279 280 /* Update bulletin board size with response from PF */ 281 p_iov->bulletin.size = resp->bulletin_size; 282 283 /* get HW info */ 284 p_hwfn->cdev->type = resp->pfdev_info.dev_type; 285 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; 286 287 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; 288 289 /* Learn of the possibility of CMT */ 290 if (IS_LEAD_HWFN(p_hwfn)) { 291 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 292 DP_NOTICE(p_hwfn, "100g VF\n"); 293 p_hwfn->cdev->num_hwfns = 2; 294 } 295 } 296 297 if (!p_iov->b_pre_fp_hsi && 298 ETH_HSI_VER_MINOR && 299 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 300 DP_INFO(p_hwfn, 301 "PF is using older fastpath HSI; %02x.%02x is configured\n", 302 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); 303 } 304 305 exit: 306 qed_vf_pf_req_end(p_hwfn, rc); 307 308 return rc; 309 } 310 311 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 312 { 313 struct qed_vf_iov *p_iov; 314 u32 reg; 315 316 /* Set number of hwfns - might be overriden once leading hwfn learns 317 * actual configuration from PF. 318 */ 319 if (IS_LEAD_HWFN(p_hwfn)) 320 p_hwfn->cdev->num_hwfns = 1; 321 322 /* Set the doorbell bar. Assumption: regview is set */ 323 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + 324 PXP_VF_BAR0_START_DQ; 325 326 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 327 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 328 329 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 330 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 331 332 /* Allocate vf sriov info */ 333 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); 334 if (!p_iov) 335 return -ENOMEM; 336 337 /* Allocate vf2pf msg */ 338 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 339 sizeof(union vfpf_tlvs), 340 &p_iov->vf2pf_request_phys, 341 GFP_KERNEL); 342 if (!p_iov->vf2pf_request) 343 goto free_p_iov; 344 345 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 346 sizeof(union pfvf_tlvs), 347 &p_iov->pf2vf_reply_phys, 348 GFP_KERNEL); 349 if (!p_iov->pf2vf_reply) 350 goto free_vf2pf_request; 351 352 DP_VERBOSE(p_hwfn, 353 QED_MSG_IOV, 354 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 355 p_iov->vf2pf_request, 356 (u64) p_iov->vf2pf_request_phys, 357 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); 358 359 /* Allocate Bulletin board */ 360 p_iov->bulletin.size = sizeof(struct qed_bulletin_content); 361 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 362 p_iov->bulletin.size, 363 &p_iov->bulletin.phys, 364 GFP_KERNEL); 365 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 366 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 367 p_iov->bulletin.p_virt, 368 (u64)p_iov->bulletin.phys, p_iov->bulletin.size); 369 370 mutex_init(&p_iov->mutex); 371 372 p_hwfn->vf_iov_info = p_iov; 373 374 p_hwfn->hw_info.personality = QED_PCI_ETH; 375 376 return qed_vf_pf_acquire(p_hwfn); 377 378 free_vf2pf_request: 379 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 380 sizeof(union vfpf_tlvs), 381 p_iov->vf2pf_request, p_iov->vf2pf_request_phys); 382 free_p_iov: 383 kfree(p_iov); 384 385 return -ENOMEM; 386 } 387 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 388 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 389 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 390 391 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 392 u8 rx_qid, 393 u16 sb, 394 u8 sb_index, 395 u16 bd_max_bytes, 396 dma_addr_t bd_chain_phys_addr, 397 dma_addr_t cqe_pbl_addr, 398 u16 cqe_pbl_size, void __iomem **pp_prod) 399 { 400 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 401 struct pfvf_start_queue_resp_tlv *resp; 402 struct vfpf_start_rxq_tlv *req; 403 int rc; 404 405 /* clear mailbox and prep first tlv */ 406 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 407 408 req->rx_qid = rx_qid; 409 req->cqe_pbl_addr = cqe_pbl_addr; 410 req->cqe_pbl_size = cqe_pbl_size; 411 req->rxq_addr = bd_chain_phys_addr; 412 req->hw_sb = sb; 413 req->sb_index = sb_index; 414 req->bd_max_bytes = bd_max_bytes; 415 req->stat_id = -1; 416 417 /* If PF is legacy, we'll need to calculate producers ourselves 418 * as well as clean them. 419 */ 420 if (pp_prod && p_iov->b_pre_fp_hsi) { 421 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 422 u32 init_prod_val = 0; 423 424 *pp_prod = (u8 __iomem *)p_hwfn->regview + 425 MSTORM_QZONE_START(p_hwfn->cdev) + 426 hw_qid * MSTORM_QZONE_SIZE; 427 428 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 429 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 430 (u32 *)(&init_prod_val)); 431 } 432 /* add list termination tlv */ 433 qed_add_tlv(p_hwfn, &p_iov->offset, 434 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 435 436 resp = &p_iov->pf2vf_reply->queue_start; 437 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 438 if (rc) 439 goto exit; 440 441 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 442 rc = -EINVAL; 443 goto exit; 444 } 445 446 /* Learn the address of the producer from the response */ 447 if (pp_prod && !p_iov->b_pre_fp_hsi) { 448 u32 init_prod_val = 0; 449 450 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; 451 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 452 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 453 rx_qid, *pp_prod, resp->offset); 454 455 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 456 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 457 (u32 *)&init_prod_val); 458 } 459 exit: 460 qed_vf_pf_req_end(p_hwfn, rc); 461 462 return rc; 463 } 464 465 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) 466 { 467 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 468 struct vfpf_stop_rxqs_tlv *req; 469 struct pfvf_def_resp_tlv *resp; 470 int rc; 471 472 /* clear mailbox and prep first tlv */ 473 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 474 475 req->rx_qid = rx_qid; 476 req->num_rxqs = 1; 477 req->cqe_completion = cqe_completion; 478 479 /* add list termination tlv */ 480 qed_add_tlv(p_hwfn, &p_iov->offset, 481 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 482 483 resp = &p_iov->pf2vf_reply->default_resp; 484 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 485 if (rc) 486 goto exit; 487 488 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 489 rc = -EINVAL; 490 goto exit; 491 } 492 493 exit: 494 qed_vf_pf_req_end(p_hwfn, rc); 495 496 return rc; 497 } 498 499 int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 500 u16 tx_queue_id, 501 u16 sb, 502 u8 sb_index, 503 dma_addr_t pbl_addr, 504 u16 pbl_size, void __iomem **pp_doorbell) 505 { 506 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 507 struct pfvf_start_queue_resp_tlv *resp; 508 struct vfpf_start_txq_tlv *req; 509 int rc; 510 511 /* clear mailbox and prep first tlv */ 512 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 513 514 req->tx_qid = tx_queue_id; 515 516 /* Tx */ 517 req->pbl_addr = pbl_addr; 518 req->pbl_size = pbl_size; 519 req->hw_sb = sb; 520 req->sb_index = sb_index; 521 522 /* add list termination tlv */ 523 qed_add_tlv(p_hwfn, &p_iov->offset, 524 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 525 526 resp = &p_iov->pf2vf_reply->queue_start; 527 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 528 if (rc) 529 goto exit; 530 531 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 532 rc = -EINVAL; 533 goto exit; 534 } 535 536 if (pp_doorbell) { 537 /* Modern PFs provide the actual offsets, while legacy 538 * provided only the queue id. 539 */ 540 if (!p_iov->b_pre_fp_hsi) { 541 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 542 resp->offset; 543 } else { 544 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; 545 u32 db_addr; 546 547 db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); 548 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 549 db_addr; 550 } 551 552 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 553 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", 554 tx_queue_id, *pp_doorbell, resp->offset); 555 } 556 exit: 557 qed_vf_pf_req_end(p_hwfn, rc); 558 559 return rc; 560 } 561 562 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) 563 { 564 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 565 struct vfpf_stop_txqs_tlv *req; 566 struct pfvf_def_resp_tlv *resp; 567 int rc; 568 569 /* clear mailbox and prep first tlv */ 570 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 571 572 req->tx_qid = tx_qid; 573 req->num_txqs = 1; 574 575 /* add list termination tlv */ 576 qed_add_tlv(p_hwfn, &p_iov->offset, 577 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 578 579 resp = &p_iov->pf2vf_reply->default_resp; 580 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 581 if (rc) 582 goto exit; 583 584 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 585 rc = -EINVAL; 586 goto exit; 587 } 588 589 exit: 590 qed_vf_pf_req_end(p_hwfn, rc); 591 592 return rc; 593 } 594 595 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 596 u8 vport_id, 597 u16 mtu, 598 u8 inner_vlan_removal, 599 enum qed_tpa_mode tpa_mode, 600 u8 max_buffers_per_cqe, u8 only_untagged) 601 { 602 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 603 struct vfpf_vport_start_tlv *req; 604 struct pfvf_def_resp_tlv *resp; 605 int rc, i; 606 607 /* clear mailbox and prep first tlv */ 608 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 609 610 req->mtu = mtu; 611 req->vport_id = vport_id; 612 req->inner_vlan_removal = inner_vlan_removal; 613 req->tpa_mode = tpa_mode; 614 req->max_buffers_per_cqe = max_buffers_per_cqe; 615 req->only_untagged = only_untagged; 616 617 /* status blocks */ 618 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) 619 if (p_hwfn->sbs_info[i]) 620 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; 621 622 /* add list termination tlv */ 623 qed_add_tlv(p_hwfn, &p_iov->offset, 624 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 625 626 resp = &p_iov->pf2vf_reply->default_resp; 627 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 628 if (rc) 629 goto exit; 630 631 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 632 rc = -EINVAL; 633 goto exit; 634 } 635 636 exit: 637 qed_vf_pf_req_end(p_hwfn, rc); 638 639 return rc; 640 } 641 642 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 643 { 644 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 645 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 646 int rc; 647 648 /* clear mailbox and prep first tlv */ 649 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 650 sizeof(struct vfpf_first_tlv)); 651 652 /* add list termination tlv */ 653 qed_add_tlv(p_hwfn, &p_iov->offset, 654 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 655 656 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 657 if (rc) 658 goto exit; 659 660 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 661 rc = -EINVAL; 662 goto exit; 663 } 664 665 exit: 666 qed_vf_pf_req_end(p_hwfn, rc); 667 668 return rc; 669 } 670 671 static bool 672 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, 673 struct qed_sp_vport_update_params *p_data, 674 u16 tlv) 675 { 676 switch (tlv) { 677 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 678 return !!(p_data->update_vport_active_rx_flg || 679 p_data->update_vport_active_tx_flg); 680 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 681 return !!p_data->update_tx_switching_flg; 682 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 683 return !!p_data->update_inner_vlan_removal_flg; 684 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 685 return !!p_data->update_accept_any_vlan_flg; 686 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 687 return !!p_data->update_approx_mcast_flg; 688 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 689 return !!(p_data->accept_flags.update_rx_mode_config || 690 p_data->accept_flags.update_tx_mode_config); 691 case CHANNEL_TLV_VPORT_UPDATE_RSS: 692 return !!p_data->rss_params; 693 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 694 return !!p_data->sge_tpa_params; 695 default: 696 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", 697 tlv); 698 return false; 699 } 700 } 701 702 static void 703 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, 704 struct qed_sp_vport_update_params *p_data) 705 { 706 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 707 struct pfvf_def_resp_tlv *p_resp; 708 u16 tlv; 709 710 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 711 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { 712 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 713 continue; 714 715 p_resp = (struct pfvf_def_resp_tlv *) 716 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 717 tlv); 718 if (p_resp && p_resp->hdr.status) 719 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 720 "TLV[%d] Configuration %s\n", 721 tlv, 722 (p_resp && p_resp->hdr.status) ? "succeeded" 723 : "failed"); 724 } 725 } 726 727 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 728 struct qed_sp_vport_update_params *p_params) 729 { 730 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 731 struct vfpf_vport_update_tlv *req; 732 struct pfvf_def_resp_tlv *resp; 733 u8 update_rx, update_tx; 734 u32 resp_size = 0; 735 u16 size, tlv; 736 int rc; 737 738 resp = &p_iov->pf2vf_reply->default_resp; 739 resp_size = sizeof(*resp); 740 741 update_rx = p_params->update_vport_active_rx_flg; 742 update_tx = p_params->update_vport_active_tx_flg; 743 744 /* clear mailbox and prep header tlv */ 745 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 746 747 /* Prepare extended tlvs */ 748 if (update_rx || update_tx) { 749 struct vfpf_vport_update_activate_tlv *p_act_tlv; 750 751 size = sizeof(struct vfpf_vport_update_activate_tlv); 752 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 753 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 754 size); 755 resp_size += sizeof(struct pfvf_def_resp_tlv); 756 757 if (update_rx) { 758 p_act_tlv->update_rx = update_rx; 759 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 760 } 761 762 if (update_tx) { 763 p_act_tlv->update_tx = update_tx; 764 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 765 } 766 } 767 768 if (p_params->update_tx_switching_flg) { 769 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 770 771 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 772 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 773 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 774 tlv, size); 775 resp_size += sizeof(struct pfvf_def_resp_tlv); 776 777 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 778 } 779 780 if (p_params->update_approx_mcast_flg) { 781 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 782 783 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 784 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 785 CHANNEL_TLV_VPORT_UPDATE_MCAST, size); 786 resp_size += sizeof(struct pfvf_def_resp_tlv); 787 788 memcpy(p_mcast_tlv->bins, p_params->bins, 789 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 790 } 791 792 update_rx = p_params->accept_flags.update_rx_mode_config; 793 update_tx = p_params->accept_flags.update_tx_mode_config; 794 795 if (update_rx || update_tx) { 796 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 797 798 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 799 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 800 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 801 resp_size += sizeof(struct pfvf_def_resp_tlv); 802 803 if (update_rx) { 804 p_accept_tlv->update_rx_mode = update_rx; 805 p_accept_tlv->rx_accept_filter = 806 p_params->accept_flags.rx_accept_filter; 807 } 808 809 if (update_tx) { 810 p_accept_tlv->update_tx_mode = update_tx; 811 p_accept_tlv->tx_accept_filter = 812 p_params->accept_flags.tx_accept_filter; 813 } 814 } 815 816 if (p_params->rss_params) { 817 struct qed_rss_params *rss_params = p_params->rss_params; 818 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 819 820 size = sizeof(struct vfpf_vport_update_rss_tlv); 821 p_rss_tlv = qed_add_tlv(p_hwfn, 822 &p_iov->offset, 823 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 824 resp_size += sizeof(struct pfvf_def_resp_tlv); 825 826 if (rss_params->update_rss_config) 827 p_rss_tlv->update_rss_flags |= 828 VFPF_UPDATE_RSS_CONFIG_FLAG; 829 if (rss_params->update_rss_capabilities) 830 p_rss_tlv->update_rss_flags |= 831 VFPF_UPDATE_RSS_CAPS_FLAG; 832 if (rss_params->update_rss_ind_table) 833 p_rss_tlv->update_rss_flags |= 834 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 835 if (rss_params->update_rss_key) 836 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; 837 838 p_rss_tlv->rss_enable = rss_params->rss_enable; 839 p_rss_tlv->rss_caps = rss_params->rss_caps; 840 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 841 memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, 842 sizeof(rss_params->rss_ind_table)); 843 memcpy(p_rss_tlv->rss_key, rss_params->rss_key, 844 sizeof(rss_params->rss_key)); 845 } 846 847 if (p_params->update_accept_any_vlan_flg) { 848 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 849 850 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 851 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 852 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 853 854 resp_size += sizeof(struct pfvf_def_resp_tlv); 855 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 856 p_any_vlan_tlv->update_accept_any_vlan_flg = 857 p_params->update_accept_any_vlan_flg; 858 } 859 860 /* add list termination tlv */ 861 qed_add_tlv(p_hwfn, &p_iov->offset, 862 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 863 864 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 865 if (rc) 866 goto exit; 867 868 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 869 rc = -EINVAL; 870 goto exit; 871 } 872 873 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 874 875 exit: 876 qed_vf_pf_req_end(p_hwfn, rc); 877 878 return rc; 879 } 880 881 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 882 { 883 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 884 struct pfvf_def_resp_tlv *resp; 885 struct vfpf_first_tlv *req; 886 int rc; 887 888 /* clear mailbox and prep first tlv */ 889 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 890 891 /* add list termination tlv */ 892 qed_add_tlv(p_hwfn, &p_iov->offset, 893 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 894 895 resp = &p_iov->pf2vf_reply->default_resp; 896 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 897 if (rc) 898 goto exit; 899 900 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 901 rc = -EAGAIN; 902 goto exit; 903 } 904 905 p_hwfn->b_int_enabled = 0; 906 907 exit: 908 qed_vf_pf_req_end(p_hwfn, rc); 909 910 return rc; 911 } 912 913 int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 914 { 915 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 916 struct pfvf_def_resp_tlv *resp; 917 struct vfpf_first_tlv *req; 918 u32 size; 919 int rc; 920 921 /* clear mailbox and prep first tlv */ 922 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 923 924 /* add list termination tlv */ 925 qed_add_tlv(p_hwfn, &p_iov->offset, 926 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 927 928 resp = &p_iov->pf2vf_reply->default_resp; 929 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 930 931 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) 932 rc = -EAGAIN; 933 934 qed_vf_pf_req_end(p_hwfn, rc); 935 936 p_hwfn->b_int_enabled = 0; 937 938 if (p_iov->vf2pf_request) 939 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 940 sizeof(union vfpf_tlvs), 941 p_iov->vf2pf_request, 942 p_iov->vf2pf_request_phys); 943 if (p_iov->pf2vf_reply) 944 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 945 sizeof(union pfvf_tlvs), 946 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); 947 948 if (p_iov->bulletin.p_virt) { 949 size = sizeof(struct qed_bulletin_content); 950 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 951 size, 952 p_iov->bulletin.p_virt, p_iov->bulletin.phys); 953 } 954 955 kfree(p_hwfn->vf_iov_info); 956 p_hwfn->vf_iov_info = NULL; 957 958 return rc; 959 } 960 961 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 962 struct qed_filter_mcast *p_filter_cmd) 963 { 964 struct qed_sp_vport_update_params sp_params; 965 int i; 966 967 memset(&sp_params, 0, sizeof(sp_params)); 968 sp_params.update_approx_mcast_flg = 1; 969 970 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 971 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 972 u32 bit; 973 974 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 975 __set_bit(bit, sp_params.bins); 976 } 977 } 978 979 qed_vf_pf_vport_update(p_hwfn, &sp_params); 980 } 981 982 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 983 struct qed_filter_ucast *p_ucast) 984 { 985 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 986 struct vfpf_ucast_filter_tlv *req; 987 struct pfvf_def_resp_tlv *resp; 988 int rc; 989 990 /* clear mailbox and prep first tlv */ 991 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 992 req->opcode = (u8) p_ucast->opcode; 993 req->type = (u8) p_ucast->type; 994 memcpy(req->mac, p_ucast->mac, ETH_ALEN); 995 req->vlan = p_ucast->vlan; 996 997 /* add list termination tlv */ 998 qed_add_tlv(p_hwfn, &p_iov->offset, 999 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1000 1001 resp = &p_iov->pf2vf_reply->default_resp; 1002 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1003 if (rc) 1004 goto exit; 1005 1006 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1007 rc = -EAGAIN; 1008 goto exit; 1009 } 1010 1011 exit: 1012 qed_vf_pf_req_end(p_hwfn, rc); 1013 1014 return rc; 1015 } 1016 1017 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 1018 { 1019 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1020 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1021 int rc; 1022 1023 /* clear mailbox and prep first tlv */ 1024 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1025 sizeof(struct vfpf_first_tlv)); 1026 1027 /* add list termination tlv */ 1028 qed_add_tlv(p_hwfn, &p_iov->offset, 1029 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1030 1031 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1032 if (rc) 1033 goto exit; 1034 1035 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1036 rc = -EINVAL; 1037 goto exit; 1038 } 1039 1040 exit: 1041 qed_vf_pf_req_end(p_hwfn, rc); 1042 1043 return rc; 1044 } 1045 1046 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1047 { 1048 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1049 1050 if (!p_iov) { 1051 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); 1052 return 0; 1053 } 1054 1055 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1056 } 1057 1058 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) 1059 { 1060 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1061 struct qed_bulletin_content shadow; 1062 u32 crc, crc_size; 1063 1064 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1065 *p_change = 0; 1066 1067 /* Need to guarantee PF is not in the middle of writing it */ 1068 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1069 1070 /* If version did not update, no need to do anything */ 1071 if (shadow.version == p_iov->bulletin_shadow.version) 1072 return 0; 1073 1074 /* Verify the bulletin we see is valid */ 1075 crc = crc32(0, (u8 *)&shadow + crc_size, 1076 p_iov->bulletin.size - crc_size); 1077 if (crc != shadow.crc) 1078 return -EAGAIN; 1079 1080 /* Set the shadow bulletin and process it */ 1081 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1082 1083 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1084 "Read a bulletin update %08x\n", shadow.version); 1085 1086 *p_change = 1; 1087 1088 return 0; 1089 } 1090 1091 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1092 struct qed_mcp_link_params *p_params, 1093 struct qed_bulletin_content *p_bulletin) 1094 { 1095 memset(p_params, 0, sizeof(*p_params)); 1096 1097 p_params->speed.autoneg = p_bulletin->req_autoneg; 1098 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1099 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1100 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1101 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1102 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1103 p_params->loopback_mode = p_bulletin->req_loopback; 1104 } 1105 1106 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1107 struct qed_mcp_link_params *params) 1108 { 1109 __qed_vf_get_link_params(p_hwfn, params, 1110 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1111 } 1112 1113 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1114 struct qed_mcp_link_state *p_link, 1115 struct qed_bulletin_content *p_bulletin) 1116 { 1117 memset(p_link, 0, sizeof(*p_link)); 1118 1119 p_link->link_up = p_bulletin->link_up; 1120 p_link->speed = p_bulletin->speed; 1121 p_link->full_duplex = p_bulletin->full_duplex; 1122 p_link->an = p_bulletin->autoneg; 1123 p_link->an_complete = p_bulletin->autoneg_complete; 1124 p_link->parallel_detection = p_bulletin->parallel_detection; 1125 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1126 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1127 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1128 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1129 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1130 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1131 } 1132 1133 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1134 struct qed_mcp_link_state *link) 1135 { 1136 __qed_vf_get_link_state(p_hwfn, link, 1137 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1138 } 1139 1140 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1141 struct qed_mcp_link_capabilities *p_link_caps, 1142 struct qed_bulletin_content *p_bulletin) 1143 { 1144 memset(p_link_caps, 0, sizeof(*p_link_caps)); 1145 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1146 } 1147 1148 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1149 struct qed_mcp_link_capabilities *p_link_caps) 1150 { 1151 __qed_vf_get_link_caps(p_hwfn, p_link_caps, 1152 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1153 } 1154 1155 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 1156 { 1157 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1158 } 1159 1160 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 1161 { 1162 memcpy(port_mac, 1163 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); 1164 } 1165 1166 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) 1167 { 1168 struct qed_vf_iov *p_vf; 1169 1170 p_vf = p_hwfn->vf_iov_info; 1171 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1172 } 1173 1174 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 1175 { 1176 struct qed_bulletin_content *bulletin; 1177 1178 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1179 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1180 return true; 1181 1182 /* Forbid VF from changing a MAC enforced by PF */ 1183 if (ether_addr_equal(bulletin->mac, mac)) 1184 return false; 1185 1186 return false; 1187 } 1188 1189 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, 1190 u8 *dst_mac, u8 *p_is_forced) 1191 { 1192 struct qed_bulletin_content *bulletin; 1193 1194 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1195 1196 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1197 if (p_is_forced) 1198 *p_is_forced = 1; 1199 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1200 if (p_is_forced) 1201 *p_is_forced = 0; 1202 } else { 1203 return false; 1204 } 1205 1206 ether_addr_copy(dst_mac, bulletin->mac); 1207 1208 return true; 1209 } 1210 1211 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 1212 u16 *fw_major, u16 *fw_minor, 1213 u16 *fw_rev, u16 *fw_eng) 1214 { 1215 struct pf_vf_pfdev_info *info; 1216 1217 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1218 1219 *fw_major = info->fw_major; 1220 *fw_minor = info->fw_minor; 1221 *fw_rev = info->fw_rev; 1222 *fw_eng = info->fw_eng; 1223 } 1224 1225 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) 1226 { 1227 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; 1228 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; 1229 void *cookie = hwfn->cdev->ops_cookie; 1230 1231 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, 1232 &is_mac_forced); 1233 if (is_mac_exist && is_mac_forced && cookie) 1234 ops->force_mac(cookie, mac); 1235 1236 /* Always update link configuration according to bulletin */ 1237 qed_link_update(hwfn); 1238 } 1239 1240 void qed_iov_vf_task(struct work_struct *work) 1241 { 1242 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1243 iov_task.work); 1244 u8 change = 0; 1245 1246 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 1247 return; 1248 1249 /* Handle bulletin board changes */ 1250 qed_vf_read_bulletin(hwfn, &change); 1251 if (change) 1252 qed_handle_bulletin_change(hwfn); 1253 1254 /* As VF is polling bulletin board, need to constantly re-schedule */ 1255 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); 1256 } 1257