1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/crc32.h> 34 #include <linux/etherdevice.h> 35 #include "qed.h" 36 #include "qed_sriov.h" 37 #include "qed_vf.h" 38 39 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) 40 { 41 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 42 void *p_tlv; 43 44 /* This lock is released when we receive PF's response 45 * in qed_send_msg2pf(). 46 * So, qed_vf_pf_prep() and qed_send_msg2pf() 47 * must come in sequence. 48 */ 49 mutex_lock(&(p_iov->mutex)); 50 51 DP_VERBOSE(p_hwfn, 52 QED_MSG_IOV, 53 "preparing to send 0x%04x tlv over vf pf channel\n", 54 type); 55 56 /* Reset Requst offset */ 57 p_iov->offset = (u8 *)p_iov->vf2pf_request; 58 59 /* Clear mailbox - both request and reply */ 60 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); 61 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 62 63 /* Init type and length */ 64 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); 65 66 /* Init first tlv header */ 67 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 68 (u64)p_iov->pf2vf_reply_phys; 69 70 return p_tlv; 71 } 72 73 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) 74 { 75 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 76 77 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 78 "VF request status = 0x%x, PF reply status = 0x%x\n", 79 req_status, resp->default_resp.hdr.status); 80 81 mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); 82 } 83 84 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) 85 { 86 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 87 struct ustorm_trigger_vf_zone trigger; 88 struct ustorm_vf_zone *zone_data; 89 int rc = 0, time = 100; 90 91 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 92 93 /* output tlvs list */ 94 qed_dp_tlv_list(p_hwfn, p_req); 95 96 /* need to add the END TLV to the message size */ 97 resp_size += sizeof(struct channel_list_end_tlv); 98 99 /* Send TLVs over HW channel */ 100 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 101 trigger.vf_pf_msg_valid = 1; 102 103 DP_VERBOSE(p_hwfn, 104 QED_MSG_IOV, 105 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 106 GET_FIELD(p_hwfn->hw_info.concrete_fid, 107 PXP_CONCRETE_FID_PFID), 108 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 109 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 110 &zone_data->non_trigger.vf_pf_msg_addr, 111 *((u32 *)&trigger), &zone_data->trigger); 112 113 REG_WR(p_hwfn, 114 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 115 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 116 117 REG_WR(p_hwfn, 118 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 119 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 120 121 /* The message data must be written first, to prevent trigger before 122 * data is written. 123 */ 124 wmb(); 125 126 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 127 128 /* When PF would be done with the response, it would write back to the 129 * `done' address. Poll until then. 130 */ 131 while ((!*done) && time) { 132 msleep(25); 133 time--; 134 } 135 136 if (!*done) { 137 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 138 "VF <-- PF Timeout [Type %d]\n", 139 p_req->first_tlv.tl.type); 140 rc = -EBUSY; 141 } else { 142 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 143 "PF response: %d [Type %d]\n", 144 *done, p_req->first_tlv.tl.type); 145 } 146 147 return rc; 148 } 149 150 #define VF_ACQUIRE_THRESH 3 151 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, 152 struct vf_pf_resc_request *p_req, 153 struct pf_vf_resc *p_resp) 154 { 155 DP_VERBOSE(p_hwfn, 156 QED_MSG_IOV, 157 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", 158 p_req->num_rxqs, 159 p_resp->num_rxqs, 160 p_req->num_rxqs, 161 p_resp->num_txqs, 162 p_req->num_sbs, 163 p_resp->num_sbs, 164 p_req->num_mac_filters, 165 p_resp->num_mac_filters, 166 p_req->num_vlan_filters, 167 p_resp->num_vlan_filters, 168 p_req->num_mc_filters, p_resp->num_mc_filters); 169 170 /* humble our request */ 171 p_req->num_txqs = p_resp->num_txqs; 172 p_req->num_rxqs = p_resp->num_rxqs; 173 p_req->num_sbs = p_resp->num_sbs; 174 p_req->num_mac_filters = p_resp->num_mac_filters; 175 p_req->num_vlan_filters = p_resp->num_vlan_filters; 176 p_req->num_mc_filters = p_resp->num_mc_filters; 177 } 178 179 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) 180 { 181 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 182 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 183 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 184 struct vf_pf_resc_request *p_resc; 185 bool resources_acquired = false; 186 struct vfpf_acquire_tlv *req; 187 int rc = 0, attempts = 0; 188 189 /* clear mailbox and prep first tlv */ 190 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 191 p_resc = &req->resc_request; 192 193 /* starting filling the request */ 194 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 195 196 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; 197 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; 198 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; 199 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 200 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 201 202 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; 203 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 204 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 205 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 206 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 207 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 208 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 209 210 /* Fill capability field with any non-deprecated config we support */ 211 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 212 213 /* pf 2 vf bulletin board address */ 214 req->bulletin_addr = p_iov->bulletin.phys; 215 req->bulletin_size = p_iov->bulletin.size; 216 217 /* add list termination tlv */ 218 qed_add_tlv(p_hwfn, &p_iov->offset, 219 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 220 221 while (!resources_acquired) { 222 DP_VERBOSE(p_hwfn, 223 QED_MSG_IOV, "attempting to acquire resources\n"); 224 225 /* Clear response buffer, as this might be a re-send */ 226 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 227 228 /* send acquire request */ 229 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 230 if (rc) 231 return rc; 232 233 /* copy acquire response from buffer to p_hwfn */ 234 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); 235 236 attempts++; 237 238 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 239 /* PF agrees to allocate our resources */ 240 if (!(resp->pfdev_info.capabilities & 241 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 242 /* It's possible legacy PF mistakenly accepted; 243 * but we don't care - simply mark it as 244 * legacy and continue. 245 */ 246 req->vfdev_info.capabilities |= 247 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 248 } 249 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); 250 resources_acquired = true; 251 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 252 attempts < VF_ACQUIRE_THRESH) { 253 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 254 &resp->resc); 255 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 256 if (pfdev_info->major_fp_hsi && 257 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 258 DP_NOTICE(p_hwfn, 259 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 260 pfdev_info->major_fp_hsi, 261 pfdev_info->minor_fp_hsi, 262 ETH_HSI_VER_MAJOR, 263 ETH_HSI_VER_MINOR, 264 pfdev_info->major_fp_hsi); 265 rc = -EINVAL; 266 goto exit; 267 } 268 269 if (!pfdev_info->major_fp_hsi) { 270 if (req->vfdev_info.capabilities & 271 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 272 DP_NOTICE(p_hwfn, 273 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 274 rc = -EINVAL; 275 goto exit; 276 } else { 277 DP_INFO(p_hwfn, 278 "PF is old - try re-acquire to see if it supports FW-version override\n"); 279 req->vfdev_info.capabilities |= 280 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 281 continue; 282 } 283 } 284 285 /* If PF/VF are using same Major, PF must have had 286 * it's reasons. Simply fail. 287 */ 288 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); 289 rc = -EINVAL; 290 goto exit; 291 } else { 292 DP_ERR(p_hwfn, 293 "PF returned error %d to VF acquisition request\n", 294 resp->hdr.status); 295 rc = -EAGAIN; 296 goto exit; 297 } 298 } 299 300 /* Mark the PF as legacy, if needed */ 301 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) 302 p_iov->b_pre_fp_hsi = true; 303 304 /* Update bulletin board size with response from PF */ 305 p_iov->bulletin.size = resp->bulletin_size; 306 307 /* get HW info */ 308 p_hwfn->cdev->type = resp->pfdev_info.dev_type; 309 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; 310 311 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; 312 313 /* Learn of the possibility of CMT */ 314 if (IS_LEAD_HWFN(p_hwfn)) { 315 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 316 DP_NOTICE(p_hwfn, "100g VF\n"); 317 p_hwfn->cdev->num_hwfns = 2; 318 } 319 } 320 321 if (!p_iov->b_pre_fp_hsi && 322 ETH_HSI_VER_MINOR && 323 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 324 DP_INFO(p_hwfn, 325 "PF is using older fastpath HSI; %02x.%02x is configured\n", 326 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); 327 } 328 329 exit: 330 qed_vf_pf_req_end(p_hwfn, rc); 331 332 return rc; 333 } 334 335 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 336 { 337 struct qed_vf_iov *p_iov; 338 u32 reg; 339 340 /* Set number of hwfns - might be overriden once leading hwfn learns 341 * actual configuration from PF. 342 */ 343 if (IS_LEAD_HWFN(p_hwfn)) 344 p_hwfn->cdev->num_hwfns = 1; 345 346 /* Set the doorbell bar. Assumption: regview is set */ 347 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + 348 PXP_VF_BAR0_START_DQ; 349 350 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 351 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 352 353 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 354 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 355 356 /* Allocate vf sriov info */ 357 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); 358 if (!p_iov) 359 return -ENOMEM; 360 361 /* Allocate vf2pf msg */ 362 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 363 sizeof(union vfpf_tlvs), 364 &p_iov->vf2pf_request_phys, 365 GFP_KERNEL); 366 if (!p_iov->vf2pf_request) 367 goto free_p_iov; 368 369 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 370 sizeof(union pfvf_tlvs), 371 &p_iov->pf2vf_reply_phys, 372 GFP_KERNEL); 373 if (!p_iov->pf2vf_reply) 374 goto free_vf2pf_request; 375 376 DP_VERBOSE(p_hwfn, 377 QED_MSG_IOV, 378 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 379 p_iov->vf2pf_request, 380 (u64) p_iov->vf2pf_request_phys, 381 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); 382 383 /* Allocate Bulletin board */ 384 p_iov->bulletin.size = sizeof(struct qed_bulletin_content); 385 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 386 p_iov->bulletin.size, 387 &p_iov->bulletin.phys, 388 GFP_KERNEL); 389 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 390 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 391 p_iov->bulletin.p_virt, 392 (u64)p_iov->bulletin.phys, p_iov->bulletin.size); 393 394 mutex_init(&p_iov->mutex); 395 396 p_hwfn->vf_iov_info = p_iov; 397 398 p_hwfn->hw_info.personality = QED_PCI_ETH; 399 400 return qed_vf_pf_acquire(p_hwfn); 401 402 free_vf2pf_request: 403 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 404 sizeof(union vfpf_tlvs), 405 p_iov->vf2pf_request, p_iov->vf2pf_request_phys); 406 free_p_iov: 407 kfree(p_iov); 408 409 return -ENOMEM; 410 } 411 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 412 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 413 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 414 415 int 416 qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 417 struct qed_queue_cid *p_cid, 418 u16 bd_max_bytes, 419 dma_addr_t bd_chain_phys_addr, 420 dma_addr_t cqe_pbl_addr, 421 u16 cqe_pbl_size, void __iomem **pp_prod) 422 { 423 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 424 struct pfvf_start_queue_resp_tlv *resp; 425 struct vfpf_start_rxq_tlv *req; 426 u8 rx_qid = p_cid->rel.queue_id; 427 int rc; 428 429 /* clear mailbox and prep first tlv */ 430 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 431 432 req->rx_qid = rx_qid; 433 req->cqe_pbl_addr = cqe_pbl_addr; 434 req->cqe_pbl_size = cqe_pbl_size; 435 req->rxq_addr = bd_chain_phys_addr; 436 req->hw_sb = p_cid->rel.sb; 437 req->sb_index = p_cid->rel.sb_idx; 438 req->bd_max_bytes = bd_max_bytes; 439 req->stat_id = -1; 440 441 /* If PF is legacy, we'll need to calculate producers ourselves 442 * as well as clean them. 443 */ 444 if (p_iov->b_pre_fp_hsi) { 445 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 446 u32 init_prod_val = 0; 447 448 *pp_prod = (u8 __iomem *) 449 p_hwfn->regview + 450 MSTORM_QZONE_START(p_hwfn->cdev) + 451 hw_qid * MSTORM_QZONE_SIZE; 452 453 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 454 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 455 (u32 *)(&init_prod_val)); 456 } 457 /* add list termination tlv */ 458 qed_add_tlv(p_hwfn, &p_iov->offset, 459 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 460 461 resp = &p_iov->pf2vf_reply->queue_start; 462 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 463 if (rc) 464 goto exit; 465 466 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 467 rc = -EINVAL; 468 goto exit; 469 } 470 471 /* Learn the address of the producer from the response */ 472 if (!p_iov->b_pre_fp_hsi) { 473 u32 init_prod_val = 0; 474 475 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; 476 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 477 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 478 rx_qid, *pp_prod, resp->offset); 479 480 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 481 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 482 (u32 *)&init_prod_val); 483 } 484 exit: 485 qed_vf_pf_req_end(p_hwfn, rc); 486 487 return rc; 488 } 489 490 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 491 struct qed_queue_cid *p_cid, bool cqe_completion) 492 { 493 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 494 struct vfpf_stop_rxqs_tlv *req; 495 struct pfvf_def_resp_tlv *resp; 496 int rc; 497 498 /* clear mailbox and prep first tlv */ 499 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 500 501 req->rx_qid = p_cid->rel.queue_id; 502 req->num_rxqs = 1; 503 req->cqe_completion = cqe_completion; 504 505 /* add list termination tlv */ 506 qed_add_tlv(p_hwfn, &p_iov->offset, 507 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 508 509 resp = &p_iov->pf2vf_reply->default_resp; 510 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 511 if (rc) 512 goto exit; 513 514 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 515 rc = -EINVAL; 516 goto exit; 517 } 518 519 exit: 520 qed_vf_pf_req_end(p_hwfn, rc); 521 522 return rc; 523 } 524 525 int 526 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 527 struct qed_queue_cid *p_cid, 528 dma_addr_t pbl_addr, 529 u16 pbl_size, void __iomem **pp_doorbell) 530 { 531 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 532 struct pfvf_start_queue_resp_tlv *resp; 533 struct vfpf_start_txq_tlv *req; 534 u16 qid = p_cid->rel.queue_id; 535 int rc; 536 537 /* clear mailbox and prep first tlv */ 538 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 539 540 req->tx_qid = qid; 541 542 /* Tx */ 543 req->pbl_addr = pbl_addr; 544 req->pbl_size = pbl_size; 545 req->hw_sb = p_cid->rel.sb; 546 req->sb_index = p_cid->rel.sb_idx; 547 548 /* add list termination tlv */ 549 qed_add_tlv(p_hwfn, &p_iov->offset, 550 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 551 552 resp = &p_iov->pf2vf_reply->queue_start; 553 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 554 if (rc) 555 goto exit; 556 557 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 558 rc = -EINVAL; 559 goto exit; 560 } 561 562 /* Modern PFs provide the actual offsets, while legacy 563 * provided only the queue id. 564 */ 565 if (!p_iov->b_pre_fp_hsi) { 566 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; 567 } else { 568 u8 cid = p_iov->acquire_resp.resc.cid[qid]; 569 570 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 571 qed_db_addr_vf(cid, 572 DQ_DEMS_LEGACY); 573 } 574 575 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 576 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", 577 qid, *pp_doorbell, resp->offset); 578 exit: 579 qed_vf_pf_req_end(p_hwfn, rc); 580 581 return rc; 582 } 583 584 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 585 { 586 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 587 struct vfpf_stop_txqs_tlv *req; 588 struct pfvf_def_resp_tlv *resp; 589 int rc; 590 591 /* clear mailbox and prep first tlv */ 592 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 593 594 req->tx_qid = p_cid->rel.queue_id; 595 req->num_txqs = 1; 596 597 /* add list termination tlv */ 598 qed_add_tlv(p_hwfn, &p_iov->offset, 599 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 600 601 resp = &p_iov->pf2vf_reply->default_resp; 602 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 603 if (rc) 604 goto exit; 605 606 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 607 rc = -EINVAL; 608 goto exit; 609 } 610 611 exit: 612 qed_vf_pf_req_end(p_hwfn, rc); 613 614 return rc; 615 } 616 617 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 618 u8 vport_id, 619 u16 mtu, 620 u8 inner_vlan_removal, 621 enum qed_tpa_mode tpa_mode, 622 u8 max_buffers_per_cqe, u8 only_untagged) 623 { 624 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 625 struct vfpf_vport_start_tlv *req; 626 struct pfvf_def_resp_tlv *resp; 627 int rc, i; 628 629 /* clear mailbox and prep first tlv */ 630 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 631 632 req->mtu = mtu; 633 req->vport_id = vport_id; 634 req->inner_vlan_removal = inner_vlan_removal; 635 req->tpa_mode = tpa_mode; 636 req->max_buffers_per_cqe = max_buffers_per_cqe; 637 req->only_untagged = only_untagged; 638 639 /* status blocks */ 640 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) 641 if (p_hwfn->sbs_info[i]) 642 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; 643 644 /* add list termination tlv */ 645 qed_add_tlv(p_hwfn, &p_iov->offset, 646 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 647 648 resp = &p_iov->pf2vf_reply->default_resp; 649 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 650 if (rc) 651 goto exit; 652 653 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 654 rc = -EINVAL; 655 goto exit; 656 } 657 658 exit: 659 qed_vf_pf_req_end(p_hwfn, rc); 660 661 return rc; 662 } 663 664 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 665 { 666 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 667 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 668 int rc; 669 670 /* clear mailbox and prep first tlv */ 671 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 672 sizeof(struct vfpf_first_tlv)); 673 674 /* add list termination tlv */ 675 qed_add_tlv(p_hwfn, &p_iov->offset, 676 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 677 678 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 679 if (rc) 680 goto exit; 681 682 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 683 rc = -EINVAL; 684 goto exit; 685 } 686 687 exit: 688 qed_vf_pf_req_end(p_hwfn, rc); 689 690 return rc; 691 } 692 693 static bool 694 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, 695 struct qed_sp_vport_update_params *p_data, 696 u16 tlv) 697 { 698 switch (tlv) { 699 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 700 return !!(p_data->update_vport_active_rx_flg || 701 p_data->update_vport_active_tx_flg); 702 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 703 return !!p_data->update_tx_switching_flg; 704 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 705 return !!p_data->update_inner_vlan_removal_flg; 706 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 707 return !!p_data->update_accept_any_vlan_flg; 708 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 709 return !!p_data->update_approx_mcast_flg; 710 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 711 return !!(p_data->accept_flags.update_rx_mode_config || 712 p_data->accept_flags.update_tx_mode_config); 713 case CHANNEL_TLV_VPORT_UPDATE_RSS: 714 return !!p_data->rss_params; 715 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 716 return !!p_data->sge_tpa_params; 717 default: 718 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", 719 tlv); 720 return false; 721 } 722 } 723 724 static void 725 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, 726 struct qed_sp_vport_update_params *p_data) 727 { 728 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 729 struct pfvf_def_resp_tlv *p_resp; 730 u16 tlv; 731 732 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 733 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { 734 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 735 continue; 736 737 p_resp = (struct pfvf_def_resp_tlv *) 738 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 739 tlv); 740 if (p_resp && p_resp->hdr.status) 741 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 742 "TLV[%d] Configuration %s\n", 743 tlv, 744 (p_resp && p_resp->hdr.status) ? "succeeded" 745 : "failed"); 746 } 747 } 748 749 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 750 struct qed_sp_vport_update_params *p_params) 751 { 752 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 753 struct vfpf_vport_update_tlv *req; 754 struct pfvf_def_resp_tlv *resp; 755 u8 update_rx, update_tx; 756 u32 resp_size = 0; 757 u16 size, tlv; 758 int rc; 759 760 resp = &p_iov->pf2vf_reply->default_resp; 761 resp_size = sizeof(*resp); 762 763 update_rx = p_params->update_vport_active_rx_flg; 764 update_tx = p_params->update_vport_active_tx_flg; 765 766 /* clear mailbox and prep header tlv */ 767 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 768 769 /* Prepare extended tlvs */ 770 if (update_rx || update_tx) { 771 struct vfpf_vport_update_activate_tlv *p_act_tlv; 772 773 size = sizeof(struct vfpf_vport_update_activate_tlv); 774 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 775 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 776 size); 777 resp_size += sizeof(struct pfvf_def_resp_tlv); 778 779 if (update_rx) { 780 p_act_tlv->update_rx = update_rx; 781 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 782 } 783 784 if (update_tx) { 785 p_act_tlv->update_tx = update_tx; 786 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 787 } 788 } 789 790 if (p_params->update_tx_switching_flg) { 791 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 792 793 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 794 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 795 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 796 tlv, size); 797 resp_size += sizeof(struct pfvf_def_resp_tlv); 798 799 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 800 } 801 802 if (p_params->update_approx_mcast_flg) { 803 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 804 805 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 806 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 807 CHANNEL_TLV_VPORT_UPDATE_MCAST, size); 808 resp_size += sizeof(struct pfvf_def_resp_tlv); 809 810 memcpy(p_mcast_tlv->bins, p_params->bins, 811 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 812 } 813 814 update_rx = p_params->accept_flags.update_rx_mode_config; 815 update_tx = p_params->accept_flags.update_tx_mode_config; 816 817 if (update_rx || update_tx) { 818 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 819 820 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 821 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 822 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 823 resp_size += sizeof(struct pfvf_def_resp_tlv); 824 825 if (update_rx) { 826 p_accept_tlv->update_rx_mode = update_rx; 827 p_accept_tlv->rx_accept_filter = 828 p_params->accept_flags.rx_accept_filter; 829 } 830 831 if (update_tx) { 832 p_accept_tlv->update_tx_mode = update_tx; 833 p_accept_tlv->tx_accept_filter = 834 p_params->accept_flags.tx_accept_filter; 835 } 836 } 837 838 if (p_params->rss_params) { 839 struct qed_rss_params *rss_params = p_params->rss_params; 840 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 841 int i, table_size; 842 843 size = sizeof(struct vfpf_vport_update_rss_tlv); 844 p_rss_tlv = qed_add_tlv(p_hwfn, 845 &p_iov->offset, 846 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 847 resp_size += sizeof(struct pfvf_def_resp_tlv); 848 849 if (rss_params->update_rss_config) 850 p_rss_tlv->update_rss_flags |= 851 VFPF_UPDATE_RSS_CONFIG_FLAG; 852 if (rss_params->update_rss_capabilities) 853 p_rss_tlv->update_rss_flags |= 854 VFPF_UPDATE_RSS_CAPS_FLAG; 855 if (rss_params->update_rss_ind_table) 856 p_rss_tlv->update_rss_flags |= 857 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 858 if (rss_params->update_rss_key) 859 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; 860 861 p_rss_tlv->rss_enable = rss_params->rss_enable; 862 p_rss_tlv->rss_caps = rss_params->rss_caps; 863 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 864 865 table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, 866 1 << p_rss_tlv->rss_table_size_log); 867 for (i = 0; i < table_size; i++) { 868 struct qed_queue_cid *p_queue; 869 870 p_queue = rss_params->rss_ind_table[i]; 871 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; 872 } 873 memcpy(p_rss_tlv->rss_key, rss_params->rss_key, 874 sizeof(rss_params->rss_key)); 875 } 876 877 if (p_params->update_accept_any_vlan_flg) { 878 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 879 880 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 881 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 882 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 883 884 resp_size += sizeof(struct pfvf_def_resp_tlv); 885 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 886 p_any_vlan_tlv->update_accept_any_vlan_flg = 887 p_params->update_accept_any_vlan_flg; 888 } 889 890 /* add list termination tlv */ 891 qed_add_tlv(p_hwfn, &p_iov->offset, 892 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 893 894 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 895 if (rc) 896 goto exit; 897 898 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 899 rc = -EINVAL; 900 goto exit; 901 } 902 903 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 904 905 exit: 906 qed_vf_pf_req_end(p_hwfn, rc); 907 908 return rc; 909 } 910 911 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 912 { 913 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 914 struct pfvf_def_resp_tlv *resp; 915 struct vfpf_first_tlv *req; 916 int rc; 917 918 /* clear mailbox and prep first tlv */ 919 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 920 921 /* add list termination tlv */ 922 qed_add_tlv(p_hwfn, &p_iov->offset, 923 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 924 925 resp = &p_iov->pf2vf_reply->default_resp; 926 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 927 if (rc) 928 goto exit; 929 930 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 931 rc = -EAGAIN; 932 goto exit; 933 } 934 935 p_hwfn->b_int_enabled = 0; 936 937 exit: 938 qed_vf_pf_req_end(p_hwfn, rc); 939 940 return rc; 941 } 942 943 int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 944 { 945 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 946 struct pfvf_def_resp_tlv *resp; 947 struct vfpf_first_tlv *req; 948 u32 size; 949 int rc; 950 951 /* clear mailbox and prep first tlv */ 952 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 953 954 /* add list termination tlv */ 955 qed_add_tlv(p_hwfn, &p_iov->offset, 956 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 957 958 resp = &p_iov->pf2vf_reply->default_resp; 959 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 960 961 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) 962 rc = -EAGAIN; 963 964 qed_vf_pf_req_end(p_hwfn, rc); 965 966 p_hwfn->b_int_enabled = 0; 967 968 if (p_iov->vf2pf_request) 969 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 970 sizeof(union vfpf_tlvs), 971 p_iov->vf2pf_request, 972 p_iov->vf2pf_request_phys); 973 if (p_iov->pf2vf_reply) 974 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 975 sizeof(union pfvf_tlvs), 976 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); 977 978 if (p_iov->bulletin.p_virt) { 979 size = sizeof(struct qed_bulletin_content); 980 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 981 size, 982 p_iov->bulletin.p_virt, p_iov->bulletin.phys); 983 } 984 985 kfree(p_hwfn->vf_iov_info); 986 p_hwfn->vf_iov_info = NULL; 987 988 return rc; 989 } 990 991 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 992 struct qed_filter_mcast *p_filter_cmd) 993 { 994 struct qed_sp_vport_update_params sp_params; 995 int i; 996 997 memset(&sp_params, 0, sizeof(sp_params)); 998 sp_params.update_approx_mcast_flg = 1; 999 1000 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1001 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1002 u32 bit; 1003 1004 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1005 __set_bit(bit, sp_params.bins); 1006 } 1007 } 1008 1009 qed_vf_pf_vport_update(p_hwfn, &sp_params); 1010 } 1011 1012 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 1013 struct qed_filter_ucast *p_ucast) 1014 { 1015 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1016 struct vfpf_ucast_filter_tlv *req; 1017 struct pfvf_def_resp_tlv *resp; 1018 int rc; 1019 1020 /* clear mailbox and prep first tlv */ 1021 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 1022 req->opcode = (u8) p_ucast->opcode; 1023 req->type = (u8) p_ucast->type; 1024 memcpy(req->mac, p_ucast->mac, ETH_ALEN); 1025 req->vlan = p_ucast->vlan; 1026 1027 /* add list termination tlv */ 1028 qed_add_tlv(p_hwfn, &p_iov->offset, 1029 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1030 1031 resp = &p_iov->pf2vf_reply->default_resp; 1032 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1033 if (rc) 1034 goto exit; 1035 1036 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1037 rc = -EAGAIN; 1038 goto exit; 1039 } 1040 1041 exit: 1042 qed_vf_pf_req_end(p_hwfn, rc); 1043 1044 return rc; 1045 } 1046 1047 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 1048 { 1049 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1050 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1051 int rc; 1052 1053 /* clear mailbox and prep first tlv */ 1054 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1055 sizeof(struct vfpf_first_tlv)); 1056 1057 /* add list termination tlv */ 1058 qed_add_tlv(p_hwfn, &p_iov->offset, 1059 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1060 1061 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1062 if (rc) 1063 goto exit; 1064 1065 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1066 rc = -EINVAL; 1067 goto exit; 1068 } 1069 1070 exit: 1071 qed_vf_pf_req_end(p_hwfn, rc); 1072 1073 return rc; 1074 } 1075 1076 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1077 { 1078 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1079 1080 if (!p_iov) { 1081 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); 1082 return 0; 1083 } 1084 1085 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1086 } 1087 1088 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) 1089 { 1090 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1091 struct qed_bulletin_content shadow; 1092 u32 crc, crc_size; 1093 1094 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1095 *p_change = 0; 1096 1097 /* Need to guarantee PF is not in the middle of writing it */ 1098 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1099 1100 /* If version did not update, no need to do anything */ 1101 if (shadow.version == p_iov->bulletin_shadow.version) 1102 return 0; 1103 1104 /* Verify the bulletin we see is valid */ 1105 crc = crc32(0, (u8 *)&shadow + crc_size, 1106 p_iov->bulletin.size - crc_size); 1107 if (crc != shadow.crc) 1108 return -EAGAIN; 1109 1110 /* Set the shadow bulletin and process it */ 1111 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1112 1113 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1114 "Read a bulletin update %08x\n", shadow.version); 1115 1116 *p_change = 1; 1117 1118 return 0; 1119 } 1120 1121 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1122 struct qed_mcp_link_params *p_params, 1123 struct qed_bulletin_content *p_bulletin) 1124 { 1125 memset(p_params, 0, sizeof(*p_params)); 1126 1127 p_params->speed.autoneg = p_bulletin->req_autoneg; 1128 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1129 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1130 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1131 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1132 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1133 p_params->loopback_mode = p_bulletin->req_loopback; 1134 } 1135 1136 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1137 struct qed_mcp_link_params *params) 1138 { 1139 __qed_vf_get_link_params(p_hwfn, params, 1140 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1141 } 1142 1143 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1144 struct qed_mcp_link_state *p_link, 1145 struct qed_bulletin_content *p_bulletin) 1146 { 1147 memset(p_link, 0, sizeof(*p_link)); 1148 1149 p_link->link_up = p_bulletin->link_up; 1150 p_link->speed = p_bulletin->speed; 1151 p_link->full_duplex = p_bulletin->full_duplex; 1152 p_link->an = p_bulletin->autoneg; 1153 p_link->an_complete = p_bulletin->autoneg_complete; 1154 p_link->parallel_detection = p_bulletin->parallel_detection; 1155 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1156 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1157 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1158 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1159 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1160 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1161 } 1162 1163 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1164 struct qed_mcp_link_state *link) 1165 { 1166 __qed_vf_get_link_state(p_hwfn, link, 1167 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1168 } 1169 1170 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1171 struct qed_mcp_link_capabilities *p_link_caps, 1172 struct qed_bulletin_content *p_bulletin) 1173 { 1174 memset(p_link_caps, 0, sizeof(*p_link_caps)); 1175 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1176 } 1177 1178 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1179 struct qed_mcp_link_capabilities *p_link_caps) 1180 { 1181 __qed_vf_get_link_caps(p_hwfn, p_link_caps, 1182 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1183 } 1184 1185 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 1186 { 1187 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1188 } 1189 1190 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 1191 { 1192 memcpy(port_mac, 1193 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); 1194 } 1195 1196 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) 1197 { 1198 struct qed_vf_iov *p_vf; 1199 1200 p_vf = p_hwfn->vf_iov_info; 1201 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1202 } 1203 1204 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) 1205 { 1206 struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; 1207 1208 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1209 } 1210 1211 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 1212 { 1213 struct qed_bulletin_content *bulletin; 1214 1215 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1216 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1217 return true; 1218 1219 /* Forbid VF from changing a MAC enforced by PF */ 1220 if (ether_addr_equal(bulletin->mac, mac)) 1221 return false; 1222 1223 return false; 1224 } 1225 1226 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, 1227 u8 *dst_mac, u8 *p_is_forced) 1228 { 1229 struct qed_bulletin_content *bulletin; 1230 1231 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1232 1233 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1234 if (p_is_forced) 1235 *p_is_forced = 1; 1236 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1237 if (p_is_forced) 1238 *p_is_forced = 0; 1239 } else { 1240 return false; 1241 } 1242 1243 ether_addr_copy(dst_mac, bulletin->mac); 1244 1245 return true; 1246 } 1247 1248 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 1249 u16 *fw_major, u16 *fw_minor, 1250 u16 *fw_rev, u16 *fw_eng) 1251 { 1252 struct pf_vf_pfdev_info *info; 1253 1254 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1255 1256 *fw_major = info->fw_major; 1257 *fw_minor = info->fw_minor; 1258 *fw_rev = info->fw_rev; 1259 *fw_eng = info->fw_eng; 1260 } 1261 1262 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) 1263 { 1264 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; 1265 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; 1266 void *cookie = hwfn->cdev->ops_cookie; 1267 1268 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, 1269 &is_mac_forced); 1270 if (is_mac_exist && cookie) 1271 ops->force_mac(cookie, mac, !!is_mac_forced); 1272 1273 /* Always update link configuration according to bulletin */ 1274 qed_link_update(hwfn); 1275 } 1276 1277 void qed_iov_vf_task(struct work_struct *work) 1278 { 1279 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1280 iov_task.work); 1281 u8 change = 0; 1282 1283 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 1284 return; 1285 1286 /* Handle bulletin board changes */ 1287 qed_vf_read_bulletin(hwfn, &change); 1288 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, 1289 &hwfn->iov_task_flags)) 1290 change = 1; 1291 if (change) 1292 qed_handle_bulletin_change(hwfn); 1293 1294 /* As VF is polling bulletin board, need to constantly re-schedule */ 1295 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); 1296 } 1297