1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/crc32.h> 34 #include <linux/etherdevice.h> 35 #include "qed.h" 36 #include "qed_sriov.h" 37 #include "qed_vf.h" 38 39 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) 40 { 41 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 42 void *p_tlv; 43 44 /* This lock is released when we receive PF's response 45 * in qed_send_msg2pf(). 46 * So, qed_vf_pf_prep() and qed_send_msg2pf() 47 * must come in sequence. 48 */ 49 mutex_lock(&(p_iov->mutex)); 50 51 DP_VERBOSE(p_hwfn, 52 QED_MSG_IOV, 53 "preparing to send 0x%04x tlv over vf pf channel\n", 54 type); 55 56 /* Reset Requst offset */ 57 p_iov->offset = (u8 *)p_iov->vf2pf_request; 58 59 /* Clear mailbox - both request and reply */ 60 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); 61 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 62 63 /* Init type and length */ 64 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); 65 66 /* Init first tlv header */ 67 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 68 (u64)p_iov->pf2vf_reply_phys; 69 70 return p_tlv; 71 } 72 73 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) 74 { 75 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 76 77 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 78 "VF request status = 0x%x, PF reply status = 0x%x\n", 79 req_status, resp->default_resp.hdr.status); 80 81 mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); 82 } 83 84 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) 85 { 86 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 87 struct ustorm_trigger_vf_zone trigger; 88 struct ustorm_vf_zone *zone_data; 89 int rc = 0, time = 100; 90 91 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 92 93 /* output tlvs list */ 94 qed_dp_tlv_list(p_hwfn, p_req); 95 96 /* need to add the END TLV to the message size */ 97 resp_size += sizeof(struct channel_list_end_tlv); 98 99 /* Send TLVs over HW channel */ 100 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 101 trigger.vf_pf_msg_valid = 1; 102 103 DP_VERBOSE(p_hwfn, 104 QED_MSG_IOV, 105 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 106 GET_FIELD(p_hwfn->hw_info.concrete_fid, 107 PXP_CONCRETE_FID_PFID), 108 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 109 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 110 &zone_data->non_trigger.vf_pf_msg_addr, 111 *((u32 *)&trigger), &zone_data->trigger); 112 113 REG_WR(p_hwfn, 114 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 115 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 116 117 REG_WR(p_hwfn, 118 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 119 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 120 121 /* The message data must be written first, to prevent trigger before 122 * data is written. 123 */ 124 wmb(); 125 126 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 127 128 /* When PF would be done with the response, it would write back to the 129 * `done' address. Poll until then. 130 */ 131 while ((!*done) && time) { 132 msleep(25); 133 time--; 134 } 135 136 if (!*done) { 137 DP_NOTICE(p_hwfn, 138 "VF <-- PF Timeout [Type %d]\n", 139 p_req->first_tlv.tl.type); 140 rc = -EBUSY; 141 } else { 142 if ((*done != PFVF_STATUS_SUCCESS) && 143 (*done != PFVF_STATUS_NO_RESOURCE)) 144 DP_NOTICE(p_hwfn, 145 "PF response: %d [Type %d]\n", 146 *done, p_req->first_tlv.tl.type); 147 else 148 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 149 "PF response: %d [Type %d]\n", 150 *done, p_req->first_tlv.tl.type); 151 } 152 153 return rc; 154 } 155 156 static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, 157 struct qed_queue_cid *p_cid) 158 { 159 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 160 struct vfpf_qid_tlv *p_qid_tlv; 161 162 /* Only add QIDs for the queue if it was negotiated with PF */ 163 if (!(p_iov->acquire_resp.pfdev_info.capabilities & 164 PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 165 return; 166 167 p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 168 CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); 169 p_qid_tlv->qid = p_cid->qid_usage_idx; 170 } 171 172 static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) 173 { 174 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 175 struct pfvf_def_resp_tlv *resp; 176 struct vfpf_first_tlv *req; 177 u32 size; 178 int rc; 179 180 /* clear mailbox and prep first tlv */ 181 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 182 183 /* add list termination tlv */ 184 qed_add_tlv(p_hwfn, &p_iov->offset, 185 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 186 187 resp = &p_iov->pf2vf_reply->default_resp; 188 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 189 190 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) 191 rc = -EAGAIN; 192 193 qed_vf_pf_req_end(p_hwfn, rc); 194 if (!b_final) 195 return rc; 196 197 p_hwfn->b_int_enabled = 0; 198 199 if (p_iov->vf2pf_request) 200 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 201 sizeof(union vfpf_tlvs), 202 p_iov->vf2pf_request, 203 p_iov->vf2pf_request_phys); 204 if (p_iov->pf2vf_reply) 205 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 206 sizeof(union pfvf_tlvs), 207 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); 208 209 if (p_iov->bulletin.p_virt) { 210 size = sizeof(struct qed_bulletin_content); 211 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 212 size, 213 p_iov->bulletin.p_virt, p_iov->bulletin.phys); 214 } 215 216 kfree(p_hwfn->vf_iov_info); 217 p_hwfn->vf_iov_info = NULL; 218 219 return rc; 220 } 221 222 int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 223 { 224 return _qed_vf_pf_release(p_hwfn, true); 225 } 226 227 #define VF_ACQUIRE_THRESH 3 228 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, 229 struct vf_pf_resc_request *p_req, 230 struct pf_vf_resc *p_resp) 231 { 232 DP_VERBOSE(p_hwfn, 233 QED_MSG_IOV, 234 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", 235 p_req->num_rxqs, 236 p_resp->num_rxqs, 237 p_req->num_rxqs, 238 p_resp->num_txqs, 239 p_req->num_sbs, 240 p_resp->num_sbs, 241 p_req->num_mac_filters, 242 p_resp->num_mac_filters, 243 p_req->num_vlan_filters, 244 p_resp->num_vlan_filters, 245 p_req->num_mc_filters, 246 p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids); 247 248 /* humble our request */ 249 p_req->num_txqs = p_resp->num_txqs; 250 p_req->num_rxqs = p_resp->num_rxqs; 251 p_req->num_sbs = p_resp->num_sbs; 252 p_req->num_mac_filters = p_resp->num_mac_filters; 253 p_req->num_vlan_filters = p_resp->num_vlan_filters; 254 p_req->num_mc_filters = p_resp->num_mc_filters; 255 p_req->num_cids = p_resp->num_cids; 256 } 257 258 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) 259 { 260 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 263 struct vf_pf_resc_request *p_resc; 264 u8 retry_cnt = VF_ACQUIRE_THRESH; 265 bool resources_acquired = false; 266 struct vfpf_acquire_tlv *req; 267 int rc = 0, attempts = 0; 268 269 /* clear mailbox and prep first tlv */ 270 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 271 p_resc = &req->resc_request; 272 273 /* starting filling the request */ 274 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 275 276 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; 277 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; 278 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; 279 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 280 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 281 p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS; 282 283 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; 284 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 285 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 286 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 287 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 288 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 289 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 290 291 /* Fill capability field with any non-deprecated config we support */ 292 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 293 294 /* If we've mapped the doorbell bar, try using queue qids */ 295 if (p_iov->b_doorbell_bar) { 296 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | 297 VFPF_ACQUIRE_CAP_QUEUE_QIDS; 298 p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS; 299 } 300 301 /* pf 2 vf bulletin board address */ 302 req->bulletin_addr = p_iov->bulletin.phys; 303 req->bulletin_size = p_iov->bulletin.size; 304 305 /* add list termination tlv */ 306 qed_add_tlv(p_hwfn, &p_iov->offset, 307 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 308 309 while (!resources_acquired) { 310 DP_VERBOSE(p_hwfn, 311 QED_MSG_IOV, "attempting to acquire resources\n"); 312 313 /* Clear response buffer, as this might be a re-send */ 314 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 315 316 /* send acquire request */ 317 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 318 319 /* Re-try acquire in case of vf-pf hw channel timeout */ 320 if (retry_cnt && rc == -EBUSY) { 321 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 322 "VF retrying to acquire due to VPC timeout\n"); 323 retry_cnt--; 324 continue; 325 } 326 327 if (rc) 328 goto exit; 329 330 /* copy acquire response from buffer to p_hwfn */ 331 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); 332 333 attempts++; 334 335 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 336 /* PF agrees to allocate our resources */ 337 if (!(resp->pfdev_info.capabilities & 338 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 339 /* It's possible legacy PF mistakenly accepted; 340 * but we don't care - simply mark it as 341 * legacy and continue. 342 */ 343 req->vfdev_info.capabilities |= 344 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 345 } 346 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); 347 resources_acquired = true; 348 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 349 attempts < VF_ACQUIRE_THRESH) { 350 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 351 &resp->resc); 352 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 353 if (pfdev_info->major_fp_hsi && 354 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 355 DP_NOTICE(p_hwfn, 356 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 357 pfdev_info->major_fp_hsi, 358 pfdev_info->minor_fp_hsi, 359 ETH_HSI_VER_MAJOR, 360 ETH_HSI_VER_MINOR, 361 pfdev_info->major_fp_hsi); 362 rc = -EINVAL; 363 goto exit; 364 } 365 366 if (!pfdev_info->major_fp_hsi) { 367 if (req->vfdev_info.capabilities & 368 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 369 DP_NOTICE(p_hwfn, 370 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 371 rc = -EINVAL; 372 goto exit; 373 } else { 374 DP_INFO(p_hwfn, 375 "PF is old - try re-acquire to see if it supports FW-version override\n"); 376 req->vfdev_info.capabilities |= 377 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 378 continue; 379 } 380 } 381 382 /* If PF/VF are using same Major, PF must have had 383 * it's reasons. Simply fail. 384 */ 385 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); 386 rc = -EINVAL; 387 goto exit; 388 } else { 389 DP_ERR(p_hwfn, 390 "PF returned error %d to VF acquisition request\n", 391 resp->hdr.status); 392 rc = -EAGAIN; 393 goto exit; 394 } 395 } 396 397 /* Mark the PF as legacy, if needed */ 398 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) 399 p_iov->b_pre_fp_hsi = true; 400 401 /* In case PF doesn't support multi-queue Tx, update the number of 402 * CIDs to reflect the number of queues [older PFs didn't fill that 403 * field]. 404 */ 405 if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 406 resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs; 407 408 /* Update bulletin board size with response from PF */ 409 p_iov->bulletin.size = resp->bulletin_size; 410 411 /* get HW info */ 412 p_hwfn->cdev->type = resp->pfdev_info.dev_type; 413 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; 414 415 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; 416 417 /* Learn of the possibility of CMT */ 418 if (IS_LEAD_HWFN(p_hwfn)) { 419 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 420 DP_NOTICE(p_hwfn, "100g VF\n"); 421 p_hwfn->cdev->num_hwfns = 2; 422 } 423 } 424 425 if (!p_iov->b_pre_fp_hsi && 426 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 427 DP_INFO(p_hwfn, 428 "PF is using older fastpath HSI; %02x.%02x is configured\n", 429 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); 430 } 431 432 exit: 433 qed_vf_pf_req_end(p_hwfn, rc); 434 435 return rc; 436 } 437 438 u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) 439 { 440 u32 bar_size; 441 442 /* Regview size is fixed */ 443 if (bar_id == BAR_ID_0) 444 return 1 << 17; 445 446 /* Doorbell is received from PF */ 447 bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; 448 if (bar_size) 449 return 1 << bar_size; 450 return 0; 451 } 452 453 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 454 { 455 struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev); 456 struct qed_vf_iov *p_iov; 457 u32 reg; 458 int rc; 459 460 /* Set number of hwfns - might be overriden once leading hwfn learns 461 * actual configuration from PF. 462 */ 463 if (IS_LEAD_HWFN(p_hwfn)) 464 p_hwfn->cdev->num_hwfns = 1; 465 466 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 467 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 468 469 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 470 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 471 472 /* Allocate vf sriov info */ 473 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); 474 if (!p_iov) 475 return -ENOMEM; 476 477 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell 478 * value, but there are several incompatibily scenarios where that 479 * would be incorrect and we'd need to override it. 480 */ 481 if (!p_hwfn->doorbells) { 482 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + 483 PXP_VF_BAR0_START_DQ; 484 } else if (p_hwfn == p_lead) { 485 /* For leading hw-function, value is always correct, but need 486 * to handle scenario where legacy PF would not support 100g 487 * mapped bars later. 488 */ 489 p_iov->b_doorbell_bar = true; 490 } else { 491 /* here, value would be correct ONLY if the leading hwfn 492 * received indication that mapped-bars are supported. 493 */ 494 if (p_lead->vf_iov_info->b_doorbell_bar) 495 p_iov->b_doorbell_bar = true; 496 else 497 p_hwfn->doorbells = (u8 __iomem *) 498 p_hwfn->regview + PXP_VF_BAR0_START_DQ; 499 } 500 501 /* Allocate vf2pf msg */ 502 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 503 sizeof(union vfpf_tlvs), 504 &p_iov->vf2pf_request_phys, 505 GFP_KERNEL); 506 if (!p_iov->vf2pf_request) 507 goto free_p_iov; 508 509 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 510 sizeof(union pfvf_tlvs), 511 &p_iov->pf2vf_reply_phys, 512 GFP_KERNEL); 513 if (!p_iov->pf2vf_reply) 514 goto free_vf2pf_request; 515 516 DP_VERBOSE(p_hwfn, 517 QED_MSG_IOV, 518 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 519 p_iov->vf2pf_request, 520 (u64) p_iov->vf2pf_request_phys, 521 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); 522 523 /* Allocate Bulletin board */ 524 p_iov->bulletin.size = sizeof(struct qed_bulletin_content); 525 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 526 p_iov->bulletin.size, 527 &p_iov->bulletin.phys, 528 GFP_KERNEL); 529 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 530 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 531 p_iov->bulletin.p_virt, 532 (u64)p_iov->bulletin.phys, p_iov->bulletin.size); 533 534 mutex_init(&p_iov->mutex); 535 536 p_hwfn->vf_iov_info = p_iov; 537 538 p_hwfn->hw_info.personality = QED_PCI_ETH; 539 540 rc = qed_vf_pf_acquire(p_hwfn); 541 542 /* If VF is 100g using a mapped bar and PF is too old to support that, 543 * acquisition would succeed - but the VF would have no way knowing 544 * the size of the doorbell bar configured in HW and thus will not 545 * know how to split it for 2nd hw-function. 546 * In this case we re-try without the indication of the mapped 547 * doorbell. 548 */ 549 if (!rc && p_iov->b_doorbell_bar && 550 !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) && 551 (p_hwfn->cdev->num_hwfns > 1)) { 552 rc = _qed_vf_pf_release(p_hwfn, false); 553 if (rc) 554 return rc; 555 556 p_iov->b_doorbell_bar = false; 557 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + 558 PXP_VF_BAR0_START_DQ; 559 rc = qed_vf_pf_acquire(p_hwfn); 560 } 561 562 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 563 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", 564 p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells); 565 566 return rc; 567 568 free_vf2pf_request: 569 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 570 sizeof(union vfpf_tlvs), 571 p_iov->vf2pf_request, p_iov->vf2pf_request_phys); 572 free_p_iov: 573 kfree(p_iov); 574 575 return -ENOMEM; 576 } 577 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 578 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 579 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 580 581 static void 582 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 583 struct qed_tunn_update_type *p_src, 584 enum qed_tunn_mode mask, u8 *p_cls) 585 { 586 if (p_src->b_update_mode) { 587 p_req->tun_mode_update_mask |= BIT(mask); 588 589 if (p_src->b_mode_enabled) 590 p_req->tunn_mode |= BIT(mask); 591 } 592 593 *p_cls = p_src->tun_cls; 594 } 595 596 static void 597 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 598 struct qed_tunn_update_type *p_src, 599 enum qed_tunn_mode mask, 600 u8 *p_cls, struct qed_tunn_update_udp_port *p_port, 601 u8 *p_update_port, u16 *p_udp_port) 602 { 603 if (p_port->b_update_port) { 604 *p_update_port = 1; 605 *p_udp_port = p_port->port; 606 } 607 608 __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); 609 } 610 611 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) 612 { 613 if (p_tun->vxlan.b_mode_enabled) 614 p_tun->vxlan.b_update_mode = true; 615 if (p_tun->l2_geneve.b_mode_enabled) 616 p_tun->l2_geneve.b_update_mode = true; 617 if (p_tun->ip_geneve.b_mode_enabled) 618 p_tun->ip_geneve.b_update_mode = true; 619 if (p_tun->l2_gre.b_mode_enabled) 620 p_tun->l2_gre.b_update_mode = true; 621 if (p_tun->ip_gre.b_mode_enabled) 622 p_tun->ip_gre.b_update_mode = true; 623 624 p_tun->b_update_rx_cls = true; 625 p_tun->b_update_tx_cls = true; 626 } 627 628 static void 629 __qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun, 630 u16 feature_mask, u8 tunn_mode, 631 u8 tunn_cls, enum qed_tunn_mode val) 632 { 633 if (feature_mask & BIT(val)) { 634 p_tun->b_mode_enabled = tunn_mode; 635 p_tun->tun_cls = tunn_cls; 636 } else { 637 p_tun->b_mode_enabled = false; 638 } 639 } 640 641 static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn, 642 struct qed_tunnel_info *p_tun, 643 struct pfvf_update_tunn_param_tlv *p_resp) 644 { 645 /* Update mode and classes provided by PF */ 646 u16 feat_mask = p_resp->tunn_feature_mask; 647 648 __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask, 649 p_resp->vxlan_mode, p_resp->vxlan_clss, 650 QED_MODE_VXLAN_TUNN); 651 __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, 652 p_resp->l2geneve_mode, 653 p_resp->l2geneve_clss, 654 QED_MODE_L2GENEVE_TUNN); 655 __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, 656 p_resp->ipgeneve_mode, 657 p_resp->ipgeneve_clss, 658 QED_MODE_IPGENEVE_TUNN); 659 __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, 660 p_resp->l2gre_mode, p_resp->l2gre_clss, 661 QED_MODE_L2GRE_TUNN); 662 __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, 663 p_resp->ipgre_mode, p_resp->ipgre_clss, 664 QED_MODE_IPGRE_TUNN); 665 p_tun->geneve_port.port = p_resp->geneve_udp_port; 666 p_tun->vxlan_port.port = p_resp->vxlan_udp_port; 667 668 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 669 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", 670 p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, 671 p_tun->ip_geneve.b_mode_enabled, 672 p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled); 673 } 674 675 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, 676 struct qed_tunnel_info *p_src) 677 { 678 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 679 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 680 struct pfvf_update_tunn_param_tlv *p_resp; 681 struct vfpf_update_tunn_param_tlv *p_req; 682 int rc; 683 684 p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, 685 sizeof(*p_req)); 686 687 if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) 688 p_req->update_tun_cls = 1; 689 690 qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN, 691 &p_req->vxlan_clss, &p_src->vxlan_port, 692 &p_req->update_vxlan_port, 693 &p_req->vxlan_port); 694 qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, 695 QED_MODE_L2GENEVE_TUNN, 696 &p_req->l2geneve_clss, &p_src->geneve_port, 697 &p_req->update_geneve_port, 698 &p_req->geneve_port); 699 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, 700 QED_MODE_IPGENEVE_TUNN, 701 &p_req->ipgeneve_clss); 702 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, 703 QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss); 704 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, 705 QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss); 706 707 /* add list termination tlv */ 708 qed_add_tlv(p_hwfn, &p_iov->offset, 709 CHANNEL_TLV_LIST_END, 710 sizeof(struct channel_list_end_tlv)); 711 712 p_resp = &p_iov->pf2vf_reply->tunn_param_resp; 713 rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 714 715 if (rc) 716 goto exit; 717 718 if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { 719 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 720 "Failed to update tunnel parameters\n"); 721 rc = -EINVAL; 722 } 723 724 qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp); 725 exit: 726 qed_vf_pf_req_end(p_hwfn, rc); 727 return rc; 728 } 729 730 int 731 qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 732 struct qed_queue_cid *p_cid, 733 u16 bd_max_bytes, 734 dma_addr_t bd_chain_phys_addr, 735 dma_addr_t cqe_pbl_addr, 736 u16 cqe_pbl_size, void __iomem **pp_prod) 737 { 738 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 739 struct pfvf_start_queue_resp_tlv *resp; 740 struct vfpf_start_rxq_tlv *req; 741 u8 rx_qid = p_cid->rel.queue_id; 742 int rc; 743 744 /* clear mailbox and prep first tlv */ 745 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 746 747 req->rx_qid = rx_qid; 748 req->cqe_pbl_addr = cqe_pbl_addr; 749 req->cqe_pbl_size = cqe_pbl_size; 750 req->rxq_addr = bd_chain_phys_addr; 751 req->hw_sb = p_cid->sb_igu_id; 752 req->sb_index = p_cid->sb_idx; 753 req->bd_max_bytes = bd_max_bytes; 754 req->stat_id = -1; 755 756 /* If PF is legacy, we'll need to calculate producers ourselves 757 * as well as clean them. 758 */ 759 if (p_iov->b_pre_fp_hsi) { 760 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 761 u32 init_prod_val = 0; 762 763 *pp_prod = (u8 __iomem *) 764 p_hwfn->regview + 765 MSTORM_QZONE_START(p_hwfn->cdev) + 766 hw_qid * MSTORM_QZONE_SIZE; 767 768 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 769 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 770 (u32 *)(&init_prod_val)); 771 } 772 773 qed_vf_pf_add_qid(p_hwfn, p_cid); 774 775 /* add list termination tlv */ 776 qed_add_tlv(p_hwfn, &p_iov->offset, 777 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 778 779 resp = &p_iov->pf2vf_reply->queue_start; 780 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 781 if (rc) 782 goto exit; 783 784 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 785 rc = -EINVAL; 786 goto exit; 787 } 788 789 /* Learn the address of the producer from the response */ 790 if (!p_iov->b_pre_fp_hsi) { 791 u32 init_prod_val = 0; 792 793 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; 794 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 795 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 796 rx_qid, *pp_prod, resp->offset); 797 798 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 799 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 800 (u32 *)&init_prod_val); 801 } 802 exit: 803 qed_vf_pf_req_end(p_hwfn, rc); 804 805 return rc; 806 } 807 808 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 809 struct qed_queue_cid *p_cid, bool cqe_completion) 810 { 811 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 812 struct vfpf_stop_rxqs_tlv *req; 813 struct pfvf_def_resp_tlv *resp; 814 int rc; 815 816 /* clear mailbox and prep first tlv */ 817 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 818 819 req->rx_qid = p_cid->rel.queue_id; 820 req->num_rxqs = 1; 821 req->cqe_completion = cqe_completion; 822 823 qed_vf_pf_add_qid(p_hwfn, p_cid); 824 825 /* add list termination tlv */ 826 qed_add_tlv(p_hwfn, &p_iov->offset, 827 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 828 829 resp = &p_iov->pf2vf_reply->default_resp; 830 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 831 if (rc) 832 goto exit; 833 834 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 835 rc = -EINVAL; 836 goto exit; 837 } 838 839 exit: 840 qed_vf_pf_req_end(p_hwfn, rc); 841 842 return rc; 843 } 844 845 int 846 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 847 struct qed_queue_cid *p_cid, 848 dma_addr_t pbl_addr, 849 u16 pbl_size, void __iomem **pp_doorbell) 850 { 851 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 852 struct pfvf_start_queue_resp_tlv *resp; 853 struct vfpf_start_txq_tlv *req; 854 u16 qid = p_cid->rel.queue_id; 855 int rc; 856 857 /* clear mailbox and prep first tlv */ 858 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 859 860 req->tx_qid = qid; 861 862 /* Tx */ 863 req->pbl_addr = pbl_addr; 864 req->pbl_size = pbl_size; 865 req->hw_sb = p_cid->sb_igu_id; 866 req->sb_index = p_cid->sb_idx; 867 868 qed_vf_pf_add_qid(p_hwfn, p_cid); 869 870 /* add list termination tlv */ 871 qed_add_tlv(p_hwfn, &p_iov->offset, 872 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 873 874 resp = &p_iov->pf2vf_reply->queue_start; 875 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 876 if (rc) 877 goto exit; 878 879 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 880 rc = -EINVAL; 881 goto exit; 882 } 883 884 /* Modern PFs provide the actual offsets, while legacy 885 * provided only the queue id. 886 */ 887 if (!p_iov->b_pre_fp_hsi) { 888 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; 889 } else { 890 u8 cid = p_iov->acquire_resp.resc.cid[qid]; 891 892 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 893 qed_db_addr_vf(cid, 894 DQ_DEMS_LEGACY); 895 } 896 897 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 898 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", 899 qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); 900 exit: 901 qed_vf_pf_req_end(p_hwfn, rc); 902 903 return rc; 904 } 905 906 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 907 { 908 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 909 struct vfpf_stop_txqs_tlv *req; 910 struct pfvf_def_resp_tlv *resp; 911 int rc; 912 913 /* clear mailbox and prep first tlv */ 914 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 915 916 req->tx_qid = p_cid->rel.queue_id; 917 req->num_txqs = 1; 918 919 qed_vf_pf_add_qid(p_hwfn, p_cid); 920 921 /* add list termination tlv */ 922 qed_add_tlv(p_hwfn, &p_iov->offset, 923 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 924 925 resp = &p_iov->pf2vf_reply->default_resp; 926 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 927 if (rc) 928 goto exit; 929 930 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 931 rc = -EINVAL; 932 goto exit; 933 } 934 935 exit: 936 qed_vf_pf_req_end(p_hwfn, rc); 937 938 return rc; 939 } 940 941 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 942 u8 vport_id, 943 u16 mtu, 944 u8 inner_vlan_removal, 945 enum qed_tpa_mode tpa_mode, 946 u8 max_buffers_per_cqe, u8 only_untagged) 947 { 948 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 949 struct vfpf_vport_start_tlv *req; 950 struct pfvf_def_resp_tlv *resp; 951 int rc, i; 952 953 /* clear mailbox and prep first tlv */ 954 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 955 956 req->mtu = mtu; 957 req->vport_id = vport_id; 958 req->inner_vlan_removal = inner_vlan_removal; 959 req->tpa_mode = tpa_mode; 960 req->max_buffers_per_cqe = max_buffers_per_cqe; 961 req->only_untagged = only_untagged; 962 963 /* status blocks */ 964 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { 965 struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; 966 967 if (p_sb) 968 req->sb_addr[i] = p_sb->sb_phys; 969 } 970 971 /* add list termination tlv */ 972 qed_add_tlv(p_hwfn, &p_iov->offset, 973 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 974 975 resp = &p_iov->pf2vf_reply->default_resp; 976 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 977 if (rc) 978 goto exit; 979 980 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 981 rc = -EINVAL; 982 goto exit; 983 } 984 985 exit: 986 qed_vf_pf_req_end(p_hwfn, rc); 987 988 return rc; 989 } 990 991 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 992 { 993 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 994 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 995 int rc; 996 997 /* clear mailbox and prep first tlv */ 998 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 999 sizeof(struct vfpf_first_tlv)); 1000 1001 /* add list termination tlv */ 1002 qed_add_tlv(p_hwfn, &p_iov->offset, 1003 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1004 1005 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1006 if (rc) 1007 goto exit; 1008 1009 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1010 rc = -EINVAL; 1011 goto exit; 1012 } 1013 1014 exit: 1015 qed_vf_pf_req_end(p_hwfn, rc); 1016 1017 return rc; 1018 } 1019 1020 static bool 1021 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, 1022 struct qed_sp_vport_update_params *p_data, 1023 u16 tlv) 1024 { 1025 switch (tlv) { 1026 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 1027 return !!(p_data->update_vport_active_rx_flg || 1028 p_data->update_vport_active_tx_flg); 1029 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 1030 return !!p_data->update_tx_switching_flg; 1031 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 1032 return !!p_data->update_inner_vlan_removal_flg; 1033 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 1034 return !!p_data->update_accept_any_vlan_flg; 1035 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 1036 return !!p_data->update_approx_mcast_flg; 1037 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 1038 return !!(p_data->accept_flags.update_rx_mode_config || 1039 p_data->accept_flags.update_tx_mode_config); 1040 case CHANNEL_TLV_VPORT_UPDATE_RSS: 1041 return !!p_data->rss_params; 1042 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 1043 return !!p_data->sge_tpa_params; 1044 default: 1045 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", 1046 tlv); 1047 return false; 1048 } 1049 } 1050 1051 static void 1052 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, 1053 struct qed_sp_vport_update_params *p_data) 1054 { 1055 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1056 struct pfvf_def_resp_tlv *p_resp; 1057 u16 tlv; 1058 1059 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1060 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { 1061 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 1062 continue; 1063 1064 p_resp = (struct pfvf_def_resp_tlv *) 1065 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 1066 tlv); 1067 if (p_resp && p_resp->hdr.status) 1068 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1069 "TLV[%d] Configuration %s\n", 1070 tlv, 1071 (p_resp && p_resp->hdr.status) ? "succeeded" 1072 : "failed"); 1073 } 1074 } 1075 1076 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 1077 struct qed_sp_vport_update_params *p_params) 1078 { 1079 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1080 struct vfpf_vport_update_tlv *req; 1081 struct pfvf_def_resp_tlv *resp; 1082 u8 update_rx, update_tx; 1083 u32 resp_size = 0; 1084 u16 size, tlv; 1085 int rc; 1086 1087 resp = &p_iov->pf2vf_reply->default_resp; 1088 resp_size = sizeof(*resp); 1089 1090 update_rx = p_params->update_vport_active_rx_flg; 1091 update_tx = p_params->update_vport_active_tx_flg; 1092 1093 /* clear mailbox and prep header tlv */ 1094 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 1095 1096 /* Prepare extended tlvs */ 1097 if (update_rx || update_tx) { 1098 struct vfpf_vport_update_activate_tlv *p_act_tlv; 1099 1100 size = sizeof(struct vfpf_vport_update_activate_tlv); 1101 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 1102 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 1103 size); 1104 resp_size += sizeof(struct pfvf_def_resp_tlv); 1105 1106 if (update_rx) { 1107 p_act_tlv->update_rx = update_rx; 1108 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 1109 } 1110 1111 if (update_tx) { 1112 p_act_tlv->update_tx = update_tx; 1113 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 1114 } 1115 } 1116 1117 if (p_params->update_tx_switching_flg) { 1118 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 1119 1120 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 1121 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1122 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 1123 tlv, size); 1124 resp_size += sizeof(struct pfvf_def_resp_tlv); 1125 1126 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 1127 } 1128 1129 if (p_params->update_approx_mcast_flg) { 1130 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 1131 1132 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 1133 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 1134 CHANNEL_TLV_VPORT_UPDATE_MCAST, size); 1135 resp_size += sizeof(struct pfvf_def_resp_tlv); 1136 1137 memcpy(p_mcast_tlv->bins, p_params->bins, 1138 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1139 } 1140 1141 update_rx = p_params->accept_flags.update_rx_mode_config; 1142 update_tx = p_params->accept_flags.update_tx_mode_config; 1143 1144 if (update_rx || update_tx) { 1145 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 1146 1147 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1148 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 1149 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 1150 resp_size += sizeof(struct pfvf_def_resp_tlv); 1151 1152 if (update_rx) { 1153 p_accept_tlv->update_rx_mode = update_rx; 1154 p_accept_tlv->rx_accept_filter = 1155 p_params->accept_flags.rx_accept_filter; 1156 } 1157 1158 if (update_tx) { 1159 p_accept_tlv->update_tx_mode = update_tx; 1160 p_accept_tlv->tx_accept_filter = 1161 p_params->accept_flags.tx_accept_filter; 1162 } 1163 } 1164 1165 if (p_params->rss_params) { 1166 struct qed_rss_params *rss_params = p_params->rss_params; 1167 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 1168 int i, table_size; 1169 1170 size = sizeof(struct vfpf_vport_update_rss_tlv); 1171 p_rss_tlv = qed_add_tlv(p_hwfn, 1172 &p_iov->offset, 1173 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 1174 resp_size += sizeof(struct pfvf_def_resp_tlv); 1175 1176 if (rss_params->update_rss_config) 1177 p_rss_tlv->update_rss_flags |= 1178 VFPF_UPDATE_RSS_CONFIG_FLAG; 1179 if (rss_params->update_rss_capabilities) 1180 p_rss_tlv->update_rss_flags |= 1181 VFPF_UPDATE_RSS_CAPS_FLAG; 1182 if (rss_params->update_rss_ind_table) 1183 p_rss_tlv->update_rss_flags |= 1184 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 1185 if (rss_params->update_rss_key) 1186 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; 1187 1188 p_rss_tlv->rss_enable = rss_params->rss_enable; 1189 p_rss_tlv->rss_caps = rss_params->rss_caps; 1190 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 1191 1192 table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, 1193 1 << p_rss_tlv->rss_table_size_log); 1194 for (i = 0; i < table_size; i++) { 1195 struct qed_queue_cid *p_queue; 1196 1197 p_queue = rss_params->rss_ind_table[i]; 1198 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; 1199 } 1200 memcpy(p_rss_tlv->rss_key, rss_params->rss_key, 1201 sizeof(rss_params->rss_key)); 1202 } 1203 1204 if (p_params->update_accept_any_vlan_flg) { 1205 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 1206 1207 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 1208 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1209 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 1210 1211 resp_size += sizeof(struct pfvf_def_resp_tlv); 1212 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 1213 p_any_vlan_tlv->update_accept_any_vlan_flg = 1214 p_params->update_accept_any_vlan_flg; 1215 } 1216 1217 /* add list termination tlv */ 1218 qed_add_tlv(p_hwfn, &p_iov->offset, 1219 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1220 1221 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 1222 if (rc) 1223 goto exit; 1224 1225 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1226 rc = -EINVAL; 1227 goto exit; 1228 } 1229 1230 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 1231 1232 exit: 1233 qed_vf_pf_req_end(p_hwfn, rc); 1234 1235 return rc; 1236 } 1237 1238 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 1239 { 1240 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1241 struct pfvf_def_resp_tlv *resp; 1242 struct vfpf_first_tlv *req; 1243 int rc; 1244 1245 /* clear mailbox and prep first tlv */ 1246 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 1247 1248 /* add list termination tlv */ 1249 qed_add_tlv(p_hwfn, &p_iov->offset, 1250 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1251 1252 resp = &p_iov->pf2vf_reply->default_resp; 1253 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1254 if (rc) 1255 goto exit; 1256 1257 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1258 rc = -EAGAIN; 1259 goto exit; 1260 } 1261 1262 p_hwfn->b_int_enabled = 0; 1263 1264 exit: 1265 qed_vf_pf_req_end(p_hwfn, rc); 1266 1267 return rc; 1268 } 1269 1270 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 1271 struct qed_filter_mcast *p_filter_cmd) 1272 { 1273 struct qed_sp_vport_update_params sp_params; 1274 int i; 1275 1276 memset(&sp_params, 0, sizeof(sp_params)); 1277 sp_params.update_approx_mcast_flg = 1; 1278 1279 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1280 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1281 u32 bit; 1282 1283 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1284 sp_params.bins[bit / 32] |= 1 << (bit % 32); 1285 } 1286 } 1287 1288 qed_vf_pf_vport_update(p_hwfn, &sp_params); 1289 } 1290 1291 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 1292 struct qed_filter_ucast *p_ucast) 1293 { 1294 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1295 struct vfpf_ucast_filter_tlv *req; 1296 struct pfvf_def_resp_tlv *resp; 1297 int rc; 1298 1299 /* clear mailbox and prep first tlv */ 1300 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 1301 req->opcode = (u8) p_ucast->opcode; 1302 req->type = (u8) p_ucast->type; 1303 memcpy(req->mac, p_ucast->mac, ETH_ALEN); 1304 req->vlan = p_ucast->vlan; 1305 1306 /* add list termination tlv */ 1307 qed_add_tlv(p_hwfn, &p_iov->offset, 1308 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1309 1310 resp = &p_iov->pf2vf_reply->default_resp; 1311 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1312 if (rc) 1313 goto exit; 1314 1315 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1316 rc = -EAGAIN; 1317 goto exit; 1318 } 1319 1320 exit: 1321 qed_vf_pf_req_end(p_hwfn, rc); 1322 1323 return rc; 1324 } 1325 1326 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 1327 { 1328 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1329 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1330 int rc; 1331 1332 /* clear mailbox and prep first tlv */ 1333 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1334 sizeof(struct vfpf_first_tlv)); 1335 1336 /* add list termination tlv */ 1337 qed_add_tlv(p_hwfn, &p_iov->offset, 1338 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1339 1340 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1341 if (rc) 1342 goto exit; 1343 1344 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1345 rc = -EINVAL; 1346 goto exit; 1347 } 1348 1349 exit: 1350 qed_vf_pf_req_end(p_hwfn, rc); 1351 1352 return rc; 1353 } 1354 1355 int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, 1356 u16 *p_coal, struct qed_queue_cid *p_cid) 1357 { 1358 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1359 struct pfvf_read_coal_resp_tlv *resp; 1360 struct vfpf_read_coal_req_tlv *req; 1361 int rc; 1362 1363 /* clear mailbox and prep header tlv */ 1364 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req)); 1365 req->qid = p_cid->rel.queue_id; 1366 req->is_rx = p_cid->b_is_rx ? 1 : 0; 1367 1368 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, 1369 sizeof(struct channel_list_end_tlv)); 1370 resp = &p_iov->pf2vf_reply->read_coal_resp; 1371 1372 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1373 if (rc) 1374 goto exit; 1375 1376 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1377 goto exit; 1378 1379 *p_coal = resp->coal; 1380 exit: 1381 qed_vf_pf_req_end(p_hwfn, rc); 1382 1383 return rc; 1384 } 1385 1386 int 1387 qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, 1388 u8 *p_mac) 1389 { 1390 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1391 struct vfpf_bulletin_update_mac_tlv *p_req; 1392 struct pfvf_def_resp_tlv *p_resp; 1393 int rc; 1394 1395 if (!p_mac) 1396 return -EINVAL; 1397 1398 /* clear mailbox and prep header tlv */ 1399 p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC, 1400 sizeof(*p_req)); 1401 ether_addr_copy(p_req->mac, p_mac); 1402 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1403 "Requesting bulletin update for MAC[%pM]\n", p_mac); 1404 1405 /* add list termination tlv */ 1406 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, 1407 sizeof(struct channel_list_end_tlv)); 1408 1409 p_resp = &p_iov->pf2vf_reply->default_resp; 1410 rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 1411 qed_vf_pf_req_end(p_hwfn, rc); 1412 return rc; 1413 } 1414 1415 int 1416 qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, 1417 u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) 1418 { 1419 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1420 struct vfpf_update_coalesce *req; 1421 struct pfvf_def_resp_tlv *resp; 1422 int rc; 1423 1424 /* clear mailbox and prep header tlv */ 1425 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req)); 1426 1427 req->rx_coal = rx_coal; 1428 req->tx_coal = tx_coal; 1429 req->qid = p_cid->rel.queue_id; 1430 1431 DP_VERBOSE(p_hwfn, 1432 QED_MSG_IOV, 1433 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", 1434 rx_coal, tx_coal, req->qid); 1435 1436 /* add list termination tlv */ 1437 qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, 1438 sizeof(struct channel_list_end_tlv)); 1439 1440 resp = &p_iov->pf2vf_reply->default_resp; 1441 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1442 if (rc) 1443 goto exit; 1444 1445 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1446 goto exit; 1447 1448 if (rx_coal) 1449 p_hwfn->cdev->rx_coalesce_usecs = rx_coal; 1450 1451 if (tx_coal) 1452 p_hwfn->cdev->tx_coalesce_usecs = tx_coal; 1453 1454 exit: 1455 qed_vf_pf_req_end(p_hwfn, rc); 1456 return rc; 1457 } 1458 1459 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1460 { 1461 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1462 1463 if (!p_iov) { 1464 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); 1465 return 0; 1466 } 1467 1468 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1469 } 1470 1471 void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, 1472 u16 sb_id, struct qed_sb_info *p_sb) 1473 { 1474 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1475 1476 if (!p_iov) { 1477 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); 1478 return; 1479 } 1480 1481 if (sb_id >= PFVF_MAX_SBS_PER_VF) { 1482 DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id); 1483 return; 1484 } 1485 1486 p_iov->sbs_info[sb_id] = p_sb; 1487 } 1488 1489 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) 1490 { 1491 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1492 struct qed_bulletin_content shadow; 1493 u32 crc, crc_size; 1494 1495 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1496 *p_change = 0; 1497 1498 /* Need to guarantee PF is not in the middle of writing it */ 1499 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1500 1501 /* If version did not update, no need to do anything */ 1502 if (shadow.version == p_iov->bulletin_shadow.version) 1503 return 0; 1504 1505 /* Verify the bulletin we see is valid */ 1506 crc = crc32(0, (u8 *)&shadow + crc_size, 1507 p_iov->bulletin.size - crc_size); 1508 if (crc != shadow.crc) 1509 return -EAGAIN; 1510 1511 /* Set the shadow bulletin and process it */ 1512 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1513 1514 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1515 "Read a bulletin update %08x\n", shadow.version); 1516 1517 *p_change = 1; 1518 1519 return 0; 1520 } 1521 1522 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1523 struct qed_mcp_link_params *p_params, 1524 struct qed_bulletin_content *p_bulletin) 1525 { 1526 memset(p_params, 0, sizeof(*p_params)); 1527 1528 p_params->speed.autoneg = p_bulletin->req_autoneg; 1529 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1530 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1531 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1532 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1533 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1534 p_params->loopback_mode = p_bulletin->req_loopback; 1535 } 1536 1537 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1538 struct qed_mcp_link_params *params) 1539 { 1540 __qed_vf_get_link_params(p_hwfn, params, 1541 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1542 } 1543 1544 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1545 struct qed_mcp_link_state *p_link, 1546 struct qed_bulletin_content *p_bulletin) 1547 { 1548 memset(p_link, 0, sizeof(*p_link)); 1549 1550 p_link->link_up = p_bulletin->link_up; 1551 p_link->speed = p_bulletin->speed; 1552 p_link->full_duplex = p_bulletin->full_duplex; 1553 p_link->an = p_bulletin->autoneg; 1554 p_link->an_complete = p_bulletin->autoneg_complete; 1555 p_link->parallel_detection = p_bulletin->parallel_detection; 1556 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1557 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1558 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1559 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1560 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1561 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1562 } 1563 1564 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1565 struct qed_mcp_link_state *link) 1566 { 1567 __qed_vf_get_link_state(p_hwfn, link, 1568 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1569 } 1570 1571 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1572 struct qed_mcp_link_capabilities *p_link_caps, 1573 struct qed_bulletin_content *p_bulletin) 1574 { 1575 memset(p_link_caps, 0, sizeof(*p_link_caps)); 1576 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1577 } 1578 1579 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1580 struct qed_mcp_link_capabilities *p_link_caps) 1581 { 1582 __qed_vf_get_link_caps(p_hwfn, p_link_caps, 1583 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1584 } 1585 1586 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 1587 { 1588 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1589 } 1590 1591 void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) 1592 { 1593 *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; 1594 } 1595 1596 void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) 1597 { 1598 *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; 1599 } 1600 1601 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 1602 { 1603 memcpy(port_mac, 1604 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); 1605 } 1606 1607 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) 1608 { 1609 struct qed_vf_iov *p_vf; 1610 1611 p_vf = p_hwfn->vf_iov_info; 1612 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1613 } 1614 1615 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) 1616 { 1617 struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; 1618 1619 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1620 } 1621 1622 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 1623 { 1624 struct qed_bulletin_content *bulletin; 1625 1626 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1627 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1628 return true; 1629 1630 /* Forbid VF from changing a MAC enforced by PF */ 1631 if (ether_addr_equal(bulletin->mac, mac)) 1632 return false; 1633 1634 return false; 1635 } 1636 1637 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, 1638 u8 *dst_mac, u8 *p_is_forced) 1639 { 1640 struct qed_bulletin_content *bulletin; 1641 1642 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1643 1644 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1645 if (p_is_forced) 1646 *p_is_forced = 1; 1647 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1648 if (p_is_forced) 1649 *p_is_forced = 0; 1650 } else { 1651 return false; 1652 } 1653 1654 ether_addr_copy(dst_mac, bulletin->mac); 1655 1656 return true; 1657 } 1658 1659 static void 1660 qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn, 1661 u16 *p_vxlan_port, u16 *p_geneve_port) 1662 { 1663 struct qed_bulletin_content *p_bulletin; 1664 1665 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1666 1667 *p_vxlan_port = p_bulletin->vxlan_udp_port; 1668 *p_geneve_port = p_bulletin->geneve_udp_port; 1669 } 1670 1671 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 1672 u16 *fw_major, u16 *fw_minor, 1673 u16 *fw_rev, u16 *fw_eng) 1674 { 1675 struct pf_vf_pfdev_info *info; 1676 1677 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1678 1679 *fw_major = info->fw_major; 1680 *fw_minor = info->fw_minor; 1681 *fw_rev = info->fw_rev; 1682 *fw_eng = info->fw_eng; 1683 } 1684 1685 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) 1686 { 1687 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; 1688 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; 1689 void *cookie = hwfn->cdev->ops_cookie; 1690 u16 vxlan_port, geneve_port; 1691 1692 qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port); 1693 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, 1694 &is_mac_forced); 1695 if (is_mac_exist && cookie) 1696 ops->force_mac(cookie, mac, !!is_mac_forced); 1697 1698 ops->ports_update(cookie, vxlan_port, geneve_port); 1699 1700 /* Always update link configuration according to bulletin */ 1701 qed_link_update(hwfn, NULL); 1702 } 1703 1704 void qed_iov_vf_task(struct work_struct *work) 1705 { 1706 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1707 iov_task.work); 1708 u8 change = 0; 1709 1710 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 1711 return; 1712 1713 /* Handle bulletin board changes */ 1714 qed_vf_read_bulletin(hwfn, &change); 1715 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, 1716 &hwfn->iov_task_flags)) 1717 change = 1; 1718 if (change) 1719 qed_handle_bulletin_change(hwfn); 1720 1721 /* As VF is polling bulletin board, need to constantly re-schedule */ 1722 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); 1723 } 1724