1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/etherdevice.h> 11 #include "qed.h" 12 #include "qed_sriov.h" 13 #include "qed_vf.h" 14 15 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) 16 { 17 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 18 void *p_tlv; 19 20 /* This lock is released when we receive PF's response 21 * in qed_send_msg2pf(). 22 * So, qed_vf_pf_prep() and qed_send_msg2pf() 23 * must come in sequence. 24 */ 25 mutex_lock(&(p_iov->mutex)); 26 27 DP_VERBOSE(p_hwfn, 28 QED_MSG_IOV, 29 "preparing to send 0x%04x tlv over vf pf channel\n", 30 type); 31 32 /* Reset Requst offset */ 33 p_iov->offset = (u8 *)p_iov->vf2pf_request; 34 35 /* Clear mailbox - both request and reply */ 36 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); 37 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 38 39 /* Init type and length */ 40 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); 41 42 /* Init first tlv header */ 43 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 44 (u64)p_iov->pf2vf_reply_phys; 45 46 return p_tlv; 47 } 48 49 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) 50 { 51 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 52 53 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 54 "VF request status = 0x%x, PF reply status = 0x%x\n", 55 req_status, resp->default_resp.hdr.status); 56 57 mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); 58 } 59 60 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) 61 { 62 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 63 struct ustorm_trigger_vf_zone trigger; 64 struct ustorm_vf_zone *zone_data; 65 int rc = 0, time = 100; 66 67 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 68 69 /* output tlvs list */ 70 qed_dp_tlv_list(p_hwfn, p_req); 71 72 /* need to add the END TLV to the message size */ 73 resp_size += sizeof(struct channel_list_end_tlv); 74 75 /* Send TLVs over HW channel */ 76 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 77 trigger.vf_pf_msg_valid = 1; 78 79 DP_VERBOSE(p_hwfn, 80 QED_MSG_IOV, 81 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 82 GET_FIELD(p_hwfn->hw_info.concrete_fid, 83 PXP_CONCRETE_FID_PFID), 84 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 85 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 86 &zone_data->non_trigger.vf_pf_msg_addr, 87 *((u32 *)&trigger), &zone_data->trigger); 88 89 REG_WR(p_hwfn, 90 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 91 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 92 93 REG_WR(p_hwfn, 94 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 95 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 96 97 /* The message data must be written first, to prevent trigger before 98 * data is written. 99 */ 100 wmb(); 101 102 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 103 104 /* When PF would be done with the response, it would write back to the 105 * `done' address. Poll until then. 106 */ 107 while ((!*done) && time) { 108 msleep(25); 109 time--; 110 } 111 112 if (!*done) { 113 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 114 "VF <-- PF Timeout [Type %d]\n", 115 p_req->first_tlv.tl.type); 116 rc = -EBUSY; 117 } else { 118 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 119 "PF response: %d [Type %d]\n", 120 *done, p_req->first_tlv.tl.type); 121 } 122 123 return rc; 124 } 125 126 #define VF_ACQUIRE_THRESH 3 127 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, 128 struct vf_pf_resc_request *p_req, 129 struct pf_vf_resc *p_resp) 130 { 131 DP_VERBOSE(p_hwfn, 132 QED_MSG_IOV, 133 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", 134 p_req->num_rxqs, 135 p_resp->num_rxqs, 136 p_req->num_rxqs, 137 p_resp->num_txqs, 138 p_req->num_sbs, 139 p_resp->num_sbs, 140 p_req->num_mac_filters, 141 p_resp->num_mac_filters, 142 p_req->num_vlan_filters, 143 p_resp->num_vlan_filters, 144 p_req->num_mc_filters, p_resp->num_mc_filters); 145 146 /* humble our request */ 147 p_req->num_txqs = p_resp->num_txqs; 148 p_req->num_rxqs = p_resp->num_rxqs; 149 p_req->num_sbs = p_resp->num_sbs; 150 p_req->num_mac_filters = p_resp->num_mac_filters; 151 p_req->num_vlan_filters = p_resp->num_vlan_filters; 152 p_req->num_mc_filters = p_resp->num_mc_filters; 153 } 154 155 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) 156 { 157 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 158 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 159 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 160 struct vf_pf_resc_request *p_resc; 161 bool resources_acquired = false; 162 struct vfpf_acquire_tlv *req; 163 int rc = 0, attempts = 0; 164 165 /* clear mailbox and prep first tlv */ 166 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 167 p_resc = &req->resc_request; 168 169 /* starting filling the request */ 170 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 171 172 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; 173 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; 174 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; 175 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 176 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 177 178 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; 179 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 180 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 181 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 182 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 183 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 184 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 185 186 /* Fill capability field with any non-deprecated config we support */ 187 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 188 189 /* pf 2 vf bulletin board address */ 190 req->bulletin_addr = p_iov->bulletin.phys; 191 req->bulletin_size = p_iov->bulletin.size; 192 193 /* add list termination tlv */ 194 qed_add_tlv(p_hwfn, &p_iov->offset, 195 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 196 197 while (!resources_acquired) { 198 DP_VERBOSE(p_hwfn, 199 QED_MSG_IOV, "attempting to acquire resources\n"); 200 201 /* Clear response buffer, as this might be a re-send */ 202 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 203 204 /* send acquire request */ 205 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 206 if (rc) 207 return rc; 208 209 /* copy acquire response from buffer to p_hwfn */ 210 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); 211 212 attempts++; 213 214 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 215 /* PF agrees to allocate our resources */ 216 if (!(resp->pfdev_info.capabilities & 217 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 218 /* It's possible legacy PF mistakenly accepted; 219 * but we don't care - simply mark it as 220 * legacy and continue. 221 */ 222 req->vfdev_info.capabilities |= 223 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 224 } 225 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); 226 resources_acquired = true; 227 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 228 attempts < VF_ACQUIRE_THRESH) { 229 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 230 &resp->resc); 231 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 232 if (pfdev_info->major_fp_hsi && 233 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 234 DP_NOTICE(p_hwfn, 235 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 236 pfdev_info->major_fp_hsi, 237 pfdev_info->minor_fp_hsi, 238 ETH_HSI_VER_MAJOR, 239 ETH_HSI_VER_MINOR, 240 pfdev_info->major_fp_hsi); 241 rc = -EINVAL; 242 goto exit; 243 } 244 245 if (!pfdev_info->major_fp_hsi) { 246 if (req->vfdev_info.capabilities & 247 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 248 DP_NOTICE(p_hwfn, 249 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 250 rc = -EINVAL; 251 goto exit; 252 } else { 253 DP_INFO(p_hwfn, 254 "PF is old - try re-acquire to see if it supports FW-version override\n"); 255 req->vfdev_info.capabilities |= 256 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 257 continue; 258 } 259 } 260 261 /* If PF/VF are using same Major, PF must have had 262 * it's reasons. Simply fail. 263 */ 264 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); 265 rc = -EINVAL; 266 goto exit; 267 } else { 268 DP_ERR(p_hwfn, 269 "PF returned error %d to VF acquisition request\n", 270 resp->hdr.status); 271 rc = -EAGAIN; 272 goto exit; 273 } 274 } 275 276 /* Mark the PF as legacy, if needed */ 277 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) 278 p_iov->b_pre_fp_hsi = true; 279 280 /* Update bulletin board size with response from PF */ 281 p_iov->bulletin.size = resp->bulletin_size; 282 283 /* get HW info */ 284 p_hwfn->cdev->type = resp->pfdev_info.dev_type; 285 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; 286 287 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; 288 289 /* Learn of the possibility of CMT */ 290 if (IS_LEAD_HWFN(p_hwfn)) { 291 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 292 DP_NOTICE(p_hwfn, "100g VF\n"); 293 p_hwfn->cdev->num_hwfns = 2; 294 } 295 } 296 297 if (!p_iov->b_pre_fp_hsi && 298 ETH_HSI_VER_MINOR && 299 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 300 DP_INFO(p_hwfn, 301 "PF is using older fastpath HSI; %02x.%02x is configured\n", 302 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); 303 } 304 305 exit: 306 qed_vf_pf_req_end(p_hwfn, rc); 307 308 return rc; 309 } 310 311 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 312 { 313 struct qed_vf_iov *p_iov; 314 u32 reg; 315 316 /* Set number of hwfns - might be overriden once leading hwfn learns 317 * actual configuration from PF. 318 */ 319 if (IS_LEAD_HWFN(p_hwfn)) 320 p_hwfn->cdev->num_hwfns = 1; 321 322 /* Set the doorbell bar. Assumption: regview is set */ 323 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + 324 PXP_VF_BAR0_START_DQ; 325 326 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 327 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 328 329 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 330 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 331 332 /* Allocate vf sriov info */ 333 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); 334 if (!p_iov) 335 return -ENOMEM; 336 337 /* Allocate vf2pf msg */ 338 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 339 sizeof(union vfpf_tlvs), 340 &p_iov->vf2pf_request_phys, 341 GFP_KERNEL); 342 if (!p_iov->vf2pf_request) 343 goto free_p_iov; 344 345 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 346 sizeof(union pfvf_tlvs), 347 &p_iov->pf2vf_reply_phys, 348 GFP_KERNEL); 349 if (!p_iov->pf2vf_reply) 350 goto free_vf2pf_request; 351 352 DP_VERBOSE(p_hwfn, 353 QED_MSG_IOV, 354 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 355 p_iov->vf2pf_request, 356 (u64) p_iov->vf2pf_request_phys, 357 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); 358 359 /* Allocate Bulletin board */ 360 p_iov->bulletin.size = sizeof(struct qed_bulletin_content); 361 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 362 p_iov->bulletin.size, 363 &p_iov->bulletin.phys, 364 GFP_KERNEL); 365 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 366 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 367 p_iov->bulletin.p_virt, 368 (u64)p_iov->bulletin.phys, p_iov->bulletin.size); 369 370 mutex_init(&p_iov->mutex); 371 372 p_hwfn->vf_iov_info = p_iov; 373 374 p_hwfn->hw_info.personality = QED_PCI_ETH; 375 376 return qed_vf_pf_acquire(p_hwfn); 377 378 free_vf2pf_request: 379 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 380 sizeof(union vfpf_tlvs), 381 p_iov->vf2pf_request, p_iov->vf2pf_request_phys); 382 free_p_iov: 383 kfree(p_iov); 384 385 return -ENOMEM; 386 } 387 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 388 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 389 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 390 391 int 392 qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 393 struct qed_queue_cid *p_cid, 394 u16 bd_max_bytes, 395 dma_addr_t bd_chain_phys_addr, 396 dma_addr_t cqe_pbl_addr, 397 u16 cqe_pbl_size, void __iomem **pp_prod) 398 { 399 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 400 struct pfvf_start_queue_resp_tlv *resp; 401 struct vfpf_start_rxq_tlv *req; 402 u8 rx_qid = p_cid->rel.queue_id; 403 int rc; 404 405 /* clear mailbox and prep first tlv */ 406 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 407 408 req->rx_qid = rx_qid; 409 req->cqe_pbl_addr = cqe_pbl_addr; 410 req->cqe_pbl_size = cqe_pbl_size; 411 req->rxq_addr = bd_chain_phys_addr; 412 req->hw_sb = p_cid->rel.sb; 413 req->sb_index = p_cid->rel.sb_idx; 414 req->bd_max_bytes = bd_max_bytes; 415 req->stat_id = -1; 416 417 /* If PF is legacy, we'll need to calculate producers ourselves 418 * as well as clean them. 419 */ 420 if (p_iov->b_pre_fp_hsi) { 421 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 422 u32 init_prod_val = 0; 423 424 *pp_prod = (u8 __iomem *) 425 p_hwfn->regview + 426 MSTORM_QZONE_START(p_hwfn->cdev) + 427 hw_qid * MSTORM_QZONE_SIZE; 428 429 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 430 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 431 (u32 *)(&init_prod_val)); 432 } 433 /* add list termination tlv */ 434 qed_add_tlv(p_hwfn, &p_iov->offset, 435 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 436 437 resp = &p_iov->pf2vf_reply->queue_start; 438 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 439 if (rc) 440 goto exit; 441 442 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 443 rc = -EINVAL; 444 goto exit; 445 } 446 447 /* Learn the address of the producer from the response */ 448 if (!p_iov->b_pre_fp_hsi) { 449 u32 init_prod_val = 0; 450 451 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; 452 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 453 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 454 rx_qid, *pp_prod, resp->offset); 455 456 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 457 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 458 (u32 *)&init_prod_val); 459 } 460 exit: 461 qed_vf_pf_req_end(p_hwfn, rc); 462 463 return rc; 464 } 465 466 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 467 struct qed_queue_cid *p_cid, bool cqe_completion) 468 { 469 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 470 struct vfpf_stop_rxqs_tlv *req; 471 struct pfvf_def_resp_tlv *resp; 472 int rc; 473 474 /* clear mailbox and prep first tlv */ 475 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 476 477 req->rx_qid = p_cid->rel.queue_id; 478 req->num_rxqs = 1; 479 req->cqe_completion = cqe_completion; 480 481 /* add list termination tlv */ 482 qed_add_tlv(p_hwfn, &p_iov->offset, 483 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 484 485 resp = &p_iov->pf2vf_reply->default_resp; 486 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 487 if (rc) 488 goto exit; 489 490 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 491 rc = -EINVAL; 492 goto exit; 493 } 494 495 exit: 496 qed_vf_pf_req_end(p_hwfn, rc); 497 498 return rc; 499 } 500 501 int 502 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 503 struct qed_queue_cid *p_cid, 504 dma_addr_t pbl_addr, 505 u16 pbl_size, void __iomem **pp_doorbell) 506 { 507 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 508 struct pfvf_start_queue_resp_tlv *resp; 509 struct vfpf_start_txq_tlv *req; 510 u16 qid = p_cid->rel.queue_id; 511 int rc; 512 513 /* clear mailbox and prep first tlv */ 514 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 515 516 req->tx_qid = qid; 517 518 /* Tx */ 519 req->pbl_addr = pbl_addr; 520 req->pbl_size = pbl_size; 521 req->hw_sb = p_cid->rel.sb; 522 req->sb_index = p_cid->rel.sb_idx; 523 524 /* add list termination tlv */ 525 qed_add_tlv(p_hwfn, &p_iov->offset, 526 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 527 528 resp = &p_iov->pf2vf_reply->queue_start; 529 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 530 if (rc) 531 goto exit; 532 533 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 534 rc = -EINVAL; 535 goto exit; 536 } 537 538 /* Modern PFs provide the actual offsets, while legacy 539 * provided only the queue id. 540 */ 541 if (!p_iov->b_pre_fp_hsi) { 542 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; 543 } else { 544 u8 cid = p_iov->acquire_resp.resc.cid[qid]; 545 546 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 547 qed_db_addr_vf(cid, 548 DQ_DEMS_LEGACY); 549 } 550 551 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 552 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", 553 qid, *pp_doorbell, resp->offset); 554 exit: 555 qed_vf_pf_req_end(p_hwfn, rc); 556 557 return rc; 558 } 559 560 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 561 { 562 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 563 struct vfpf_stop_txqs_tlv *req; 564 struct pfvf_def_resp_tlv *resp; 565 int rc; 566 567 /* clear mailbox and prep first tlv */ 568 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 569 570 req->tx_qid = p_cid->rel.queue_id; 571 req->num_txqs = 1; 572 573 /* add list termination tlv */ 574 qed_add_tlv(p_hwfn, &p_iov->offset, 575 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 576 577 resp = &p_iov->pf2vf_reply->default_resp; 578 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 579 if (rc) 580 goto exit; 581 582 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 583 rc = -EINVAL; 584 goto exit; 585 } 586 587 exit: 588 qed_vf_pf_req_end(p_hwfn, rc); 589 590 return rc; 591 } 592 593 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 594 u8 vport_id, 595 u16 mtu, 596 u8 inner_vlan_removal, 597 enum qed_tpa_mode tpa_mode, 598 u8 max_buffers_per_cqe, u8 only_untagged) 599 { 600 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 601 struct vfpf_vport_start_tlv *req; 602 struct pfvf_def_resp_tlv *resp; 603 int rc, i; 604 605 /* clear mailbox and prep first tlv */ 606 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 607 608 req->mtu = mtu; 609 req->vport_id = vport_id; 610 req->inner_vlan_removal = inner_vlan_removal; 611 req->tpa_mode = tpa_mode; 612 req->max_buffers_per_cqe = max_buffers_per_cqe; 613 req->only_untagged = only_untagged; 614 615 /* status blocks */ 616 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) 617 if (p_hwfn->sbs_info[i]) 618 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; 619 620 /* add list termination tlv */ 621 qed_add_tlv(p_hwfn, &p_iov->offset, 622 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 623 624 resp = &p_iov->pf2vf_reply->default_resp; 625 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 626 if (rc) 627 goto exit; 628 629 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 630 rc = -EINVAL; 631 goto exit; 632 } 633 634 exit: 635 qed_vf_pf_req_end(p_hwfn, rc); 636 637 return rc; 638 } 639 640 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 641 { 642 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 643 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 644 int rc; 645 646 /* clear mailbox and prep first tlv */ 647 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 648 sizeof(struct vfpf_first_tlv)); 649 650 /* add list termination tlv */ 651 qed_add_tlv(p_hwfn, &p_iov->offset, 652 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 653 654 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 655 if (rc) 656 goto exit; 657 658 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 659 rc = -EINVAL; 660 goto exit; 661 } 662 663 exit: 664 qed_vf_pf_req_end(p_hwfn, rc); 665 666 return rc; 667 } 668 669 static bool 670 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, 671 struct qed_sp_vport_update_params *p_data, 672 u16 tlv) 673 { 674 switch (tlv) { 675 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 676 return !!(p_data->update_vport_active_rx_flg || 677 p_data->update_vport_active_tx_flg); 678 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 679 return !!p_data->update_tx_switching_flg; 680 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 681 return !!p_data->update_inner_vlan_removal_flg; 682 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 683 return !!p_data->update_accept_any_vlan_flg; 684 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 685 return !!p_data->update_approx_mcast_flg; 686 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 687 return !!(p_data->accept_flags.update_rx_mode_config || 688 p_data->accept_flags.update_tx_mode_config); 689 case CHANNEL_TLV_VPORT_UPDATE_RSS: 690 return !!p_data->rss_params; 691 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 692 return !!p_data->sge_tpa_params; 693 default: 694 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", 695 tlv); 696 return false; 697 } 698 } 699 700 static void 701 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, 702 struct qed_sp_vport_update_params *p_data) 703 { 704 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 705 struct pfvf_def_resp_tlv *p_resp; 706 u16 tlv; 707 708 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 709 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { 710 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 711 continue; 712 713 p_resp = (struct pfvf_def_resp_tlv *) 714 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 715 tlv); 716 if (p_resp && p_resp->hdr.status) 717 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 718 "TLV[%d] Configuration %s\n", 719 tlv, 720 (p_resp && p_resp->hdr.status) ? "succeeded" 721 : "failed"); 722 } 723 } 724 725 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 726 struct qed_sp_vport_update_params *p_params) 727 { 728 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 729 struct vfpf_vport_update_tlv *req; 730 struct pfvf_def_resp_tlv *resp; 731 u8 update_rx, update_tx; 732 u32 resp_size = 0; 733 u16 size, tlv; 734 int rc; 735 736 resp = &p_iov->pf2vf_reply->default_resp; 737 resp_size = sizeof(*resp); 738 739 update_rx = p_params->update_vport_active_rx_flg; 740 update_tx = p_params->update_vport_active_tx_flg; 741 742 /* clear mailbox and prep header tlv */ 743 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 744 745 /* Prepare extended tlvs */ 746 if (update_rx || update_tx) { 747 struct vfpf_vport_update_activate_tlv *p_act_tlv; 748 749 size = sizeof(struct vfpf_vport_update_activate_tlv); 750 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 751 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 752 size); 753 resp_size += sizeof(struct pfvf_def_resp_tlv); 754 755 if (update_rx) { 756 p_act_tlv->update_rx = update_rx; 757 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 758 } 759 760 if (update_tx) { 761 p_act_tlv->update_tx = update_tx; 762 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 763 } 764 } 765 766 if (p_params->update_tx_switching_flg) { 767 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 768 769 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 770 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 771 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 772 tlv, size); 773 resp_size += sizeof(struct pfvf_def_resp_tlv); 774 775 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 776 } 777 778 if (p_params->update_approx_mcast_flg) { 779 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 780 781 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 782 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 783 CHANNEL_TLV_VPORT_UPDATE_MCAST, size); 784 resp_size += sizeof(struct pfvf_def_resp_tlv); 785 786 memcpy(p_mcast_tlv->bins, p_params->bins, 787 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 788 } 789 790 update_rx = p_params->accept_flags.update_rx_mode_config; 791 update_tx = p_params->accept_flags.update_tx_mode_config; 792 793 if (update_rx || update_tx) { 794 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 795 796 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 797 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 798 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 799 resp_size += sizeof(struct pfvf_def_resp_tlv); 800 801 if (update_rx) { 802 p_accept_tlv->update_rx_mode = update_rx; 803 p_accept_tlv->rx_accept_filter = 804 p_params->accept_flags.rx_accept_filter; 805 } 806 807 if (update_tx) { 808 p_accept_tlv->update_tx_mode = update_tx; 809 p_accept_tlv->tx_accept_filter = 810 p_params->accept_flags.tx_accept_filter; 811 } 812 } 813 814 if (p_params->rss_params) { 815 struct qed_rss_params *rss_params = p_params->rss_params; 816 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 817 818 size = sizeof(struct vfpf_vport_update_rss_tlv); 819 p_rss_tlv = qed_add_tlv(p_hwfn, 820 &p_iov->offset, 821 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 822 resp_size += sizeof(struct pfvf_def_resp_tlv); 823 824 if (rss_params->update_rss_config) 825 p_rss_tlv->update_rss_flags |= 826 VFPF_UPDATE_RSS_CONFIG_FLAG; 827 if (rss_params->update_rss_capabilities) 828 p_rss_tlv->update_rss_flags |= 829 VFPF_UPDATE_RSS_CAPS_FLAG; 830 if (rss_params->update_rss_ind_table) 831 p_rss_tlv->update_rss_flags |= 832 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 833 if (rss_params->update_rss_key) 834 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; 835 836 p_rss_tlv->rss_enable = rss_params->rss_enable; 837 p_rss_tlv->rss_caps = rss_params->rss_caps; 838 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 839 memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, 840 sizeof(rss_params->rss_ind_table)); 841 memcpy(p_rss_tlv->rss_key, rss_params->rss_key, 842 sizeof(rss_params->rss_key)); 843 } 844 845 if (p_params->update_accept_any_vlan_flg) { 846 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 847 848 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 849 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 850 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 851 852 resp_size += sizeof(struct pfvf_def_resp_tlv); 853 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 854 p_any_vlan_tlv->update_accept_any_vlan_flg = 855 p_params->update_accept_any_vlan_flg; 856 } 857 858 /* add list termination tlv */ 859 qed_add_tlv(p_hwfn, &p_iov->offset, 860 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 861 862 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 863 if (rc) 864 goto exit; 865 866 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 867 rc = -EINVAL; 868 goto exit; 869 } 870 871 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 872 873 exit: 874 qed_vf_pf_req_end(p_hwfn, rc); 875 876 return rc; 877 } 878 879 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 880 { 881 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 882 struct pfvf_def_resp_tlv *resp; 883 struct vfpf_first_tlv *req; 884 int rc; 885 886 /* clear mailbox and prep first tlv */ 887 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 888 889 /* add list termination tlv */ 890 qed_add_tlv(p_hwfn, &p_iov->offset, 891 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 892 893 resp = &p_iov->pf2vf_reply->default_resp; 894 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 895 if (rc) 896 goto exit; 897 898 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 899 rc = -EAGAIN; 900 goto exit; 901 } 902 903 p_hwfn->b_int_enabled = 0; 904 905 exit: 906 qed_vf_pf_req_end(p_hwfn, rc); 907 908 return rc; 909 } 910 911 int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 912 { 913 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 914 struct pfvf_def_resp_tlv *resp; 915 struct vfpf_first_tlv *req; 916 u32 size; 917 int rc; 918 919 /* clear mailbox and prep first tlv */ 920 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 921 922 /* add list termination tlv */ 923 qed_add_tlv(p_hwfn, &p_iov->offset, 924 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 925 926 resp = &p_iov->pf2vf_reply->default_resp; 927 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 928 929 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) 930 rc = -EAGAIN; 931 932 qed_vf_pf_req_end(p_hwfn, rc); 933 934 p_hwfn->b_int_enabled = 0; 935 936 if (p_iov->vf2pf_request) 937 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 938 sizeof(union vfpf_tlvs), 939 p_iov->vf2pf_request, 940 p_iov->vf2pf_request_phys); 941 if (p_iov->pf2vf_reply) 942 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 943 sizeof(union pfvf_tlvs), 944 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); 945 946 if (p_iov->bulletin.p_virt) { 947 size = sizeof(struct qed_bulletin_content); 948 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 949 size, 950 p_iov->bulletin.p_virt, p_iov->bulletin.phys); 951 } 952 953 kfree(p_hwfn->vf_iov_info); 954 p_hwfn->vf_iov_info = NULL; 955 956 return rc; 957 } 958 959 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 960 struct qed_filter_mcast *p_filter_cmd) 961 { 962 struct qed_sp_vport_update_params sp_params; 963 int i; 964 965 memset(&sp_params, 0, sizeof(sp_params)); 966 sp_params.update_approx_mcast_flg = 1; 967 968 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 969 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 970 u32 bit; 971 972 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 973 __set_bit(bit, sp_params.bins); 974 } 975 } 976 977 qed_vf_pf_vport_update(p_hwfn, &sp_params); 978 } 979 980 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 981 struct qed_filter_ucast *p_ucast) 982 { 983 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 984 struct vfpf_ucast_filter_tlv *req; 985 struct pfvf_def_resp_tlv *resp; 986 int rc; 987 988 /* clear mailbox and prep first tlv */ 989 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 990 req->opcode = (u8) p_ucast->opcode; 991 req->type = (u8) p_ucast->type; 992 memcpy(req->mac, p_ucast->mac, ETH_ALEN); 993 req->vlan = p_ucast->vlan; 994 995 /* add list termination tlv */ 996 qed_add_tlv(p_hwfn, &p_iov->offset, 997 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 998 999 resp = &p_iov->pf2vf_reply->default_resp; 1000 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1001 if (rc) 1002 goto exit; 1003 1004 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1005 rc = -EAGAIN; 1006 goto exit; 1007 } 1008 1009 exit: 1010 qed_vf_pf_req_end(p_hwfn, rc); 1011 1012 return rc; 1013 } 1014 1015 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 1016 { 1017 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1018 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1019 int rc; 1020 1021 /* clear mailbox and prep first tlv */ 1022 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1023 sizeof(struct vfpf_first_tlv)); 1024 1025 /* add list termination tlv */ 1026 qed_add_tlv(p_hwfn, &p_iov->offset, 1027 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1028 1029 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1030 if (rc) 1031 goto exit; 1032 1033 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1034 rc = -EINVAL; 1035 goto exit; 1036 } 1037 1038 exit: 1039 qed_vf_pf_req_end(p_hwfn, rc); 1040 1041 return rc; 1042 } 1043 1044 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1045 { 1046 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1047 1048 if (!p_iov) { 1049 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); 1050 return 0; 1051 } 1052 1053 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1054 } 1055 1056 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) 1057 { 1058 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1059 struct qed_bulletin_content shadow; 1060 u32 crc, crc_size; 1061 1062 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1063 *p_change = 0; 1064 1065 /* Need to guarantee PF is not in the middle of writing it */ 1066 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1067 1068 /* If version did not update, no need to do anything */ 1069 if (shadow.version == p_iov->bulletin_shadow.version) 1070 return 0; 1071 1072 /* Verify the bulletin we see is valid */ 1073 crc = crc32(0, (u8 *)&shadow + crc_size, 1074 p_iov->bulletin.size - crc_size); 1075 if (crc != shadow.crc) 1076 return -EAGAIN; 1077 1078 /* Set the shadow bulletin and process it */ 1079 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1080 1081 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1082 "Read a bulletin update %08x\n", shadow.version); 1083 1084 *p_change = 1; 1085 1086 return 0; 1087 } 1088 1089 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1090 struct qed_mcp_link_params *p_params, 1091 struct qed_bulletin_content *p_bulletin) 1092 { 1093 memset(p_params, 0, sizeof(*p_params)); 1094 1095 p_params->speed.autoneg = p_bulletin->req_autoneg; 1096 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1097 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1098 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1099 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1100 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1101 p_params->loopback_mode = p_bulletin->req_loopback; 1102 } 1103 1104 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1105 struct qed_mcp_link_params *params) 1106 { 1107 __qed_vf_get_link_params(p_hwfn, params, 1108 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1109 } 1110 1111 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1112 struct qed_mcp_link_state *p_link, 1113 struct qed_bulletin_content *p_bulletin) 1114 { 1115 memset(p_link, 0, sizeof(*p_link)); 1116 1117 p_link->link_up = p_bulletin->link_up; 1118 p_link->speed = p_bulletin->speed; 1119 p_link->full_duplex = p_bulletin->full_duplex; 1120 p_link->an = p_bulletin->autoneg; 1121 p_link->an_complete = p_bulletin->autoneg_complete; 1122 p_link->parallel_detection = p_bulletin->parallel_detection; 1123 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1124 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1125 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1126 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1127 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1128 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1129 } 1130 1131 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1132 struct qed_mcp_link_state *link) 1133 { 1134 __qed_vf_get_link_state(p_hwfn, link, 1135 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1136 } 1137 1138 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1139 struct qed_mcp_link_capabilities *p_link_caps, 1140 struct qed_bulletin_content *p_bulletin) 1141 { 1142 memset(p_link_caps, 0, sizeof(*p_link_caps)); 1143 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1144 } 1145 1146 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1147 struct qed_mcp_link_capabilities *p_link_caps) 1148 { 1149 __qed_vf_get_link_caps(p_hwfn, p_link_caps, 1150 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1151 } 1152 1153 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 1154 { 1155 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1156 } 1157 1158 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 1159 { 1160 memcpy(port_mac, 1161 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); 1162 } 1163 1164 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) 1165 { 1166 struct qed_vf_iov *p_vf; 1167 1168 p_vf = p_hwfn->vf_iov_info; 1169 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1170 } 1171 1172 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) 1173 { 1174 struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; 1175 1176 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1177 } 1178 1179 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 1180 { 1181 struct qed_bulletin_content *bulletin; 1182 1183 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1184 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1185 return true; 1186 1187 /* Forbid VF from changing a MAC enforced by PF */ 1188 if (ether_addr_equal(bulletin->mac, mac)) 1189 return false; 1190 1191 return false; 1192 } 1193 1194 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, 1195 u8 *dst_mac, u8 *p_is_forced) 1196 { 1197 struct qed_bulletin_content *bulletin; 1198 1199 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1200 1201 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1202 if (p_is_forced) 1203 *p_is_forced = 1; 1204 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1205 if (p_is_forced) 1206 *p_is_forced = 0; 1207 } else { 1208 return false; 1209 } 1210 1211 ether_addr_copy(dst_mac, bulletin->mac); 1212 1213 return true; 1214 } 1215 1216 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 1217 u16 *fw_major, u16 *fw_minor, 1218 u16 *fw_rev, u16 *fw_eng) 1219 { 1220 struct pf_vf_pfdev_info *info; 1221 1222 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1223 1224 *fw_major = info->fw_major; 1225 *fw_minor = info->fw_minor; 1226 *fw_rev = info->fw_rev; 1227 *fw_eng = info->fw_eng; 1228 } 1229 1230 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) 1231 { 1232 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; 1233 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; 1234 void *cookie = hwfn->cdev->ops_cookie; 1235 1236 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, 1237 &is_mac_forced); 1238 if (is_mac_exist && cookie) 1239 ops->force_mac(cookie, mac, !!is_mac_forced); 1240 1241 /* Always update link configuration according to bulletin */ 1242 qed_link_update(hwfn); 1243 } 1244 1245 void qed_iov_vf_task(struct work_struct *work) 1246 { 1247 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1248 iov_task.work); 1249 u8 change = 0; 1250 1251 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 1252 return; 1253 1254 /* Handle bulletin board changes */ 1255 qed_vf_read_bulletin(hwfn, &change); 1256 if (change) 1257 qed_handle_bulletin_change(hwfn); 1258 1259 /* As VF is polling bulletin board, need to constantly re-schedule */ 1260 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); 1261 } 1262