1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/crc32.h> 34 #include <linux/etherdevice.h> 35 #include "qed.h" 36 #include "qed_sriov.h" 37 #include "qed_vf.h" 38 39 static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) 40 { 41 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 42 void *p_tlv; 43 44 /* This lock is released when we receive PF's response 45 * in qed_send_msg2pf(). 46 * So, qed_vf_pf_prep() and qed_send_msg2pf() 47 * must come in sequence. 48 */ 49 mutex_lock(&(p_iov->mutex)); 50 51 DP_VERBOSE(p_hwfn, 52 QED_MSG_IOV, 53 "preparing to send 0x%04x tlv over vf pf channel\n", 54 type); 55 56 /* Reset Requst offset */ 57 p_iov->offset = (u8 *)p_iov->vf2pf_request; 58 59 /* Clear mailbox - both request and reply */ 60 memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); 61 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 62 63 /* Init type and length */ 64 p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); 65 66 /* Init first tlv header */ 67 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 68 (u64)p_iov->pf2vf_reply_phys; 69 70 return p_tlv; 71 } 72 73 static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) 74 { 75 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 76 77 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 78 "VF request status = 0x%x, PF reply status = 0x%x\n", 79 req_status, resp->default_resp.hdr.status); 80 81 mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); 82 } 83 84 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) 85 { 86 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 87 struct ustorm_trigger_vf_zone trigger; 88 struct ustorm_vf_zone *zone_data; 89 int rc = 0, time = 100; 90 91 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 92 93 /* output tlvs list */ 94 qed_dp_tlv_list(p_hwfn, p_req); 95 96 /* need to add the END TLV to the message size */ 97 resp_size += sizeof(struct channel_list_end_tlv); 98 99 /* Send TLVs over HW channel */ 100 memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 101 trigger.vf_pf_msg_valid = 1; 102 103 DP_VERBOSE(p_hwfn, 104 QED_MSG_IOV, 105 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 106 GET_FIELD(p_hwfn->hw_info.concrete_fid, 107 PXP_CONCRETE_FID_PFID), 108 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 109 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), 110 &zone_data->non_trigger.vf_pf_msg_addr, 111 *((u32 *)&trigger), &zone_data->trigger); 112 113 REG_WR(p_hwfn, 114 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 115 lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 116 117 REG_WR(p_hwfn, 118 (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 119 upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); 120 121 /* The message data must be written first, to prevent trigger before 122 * data is written. 123 */ 124 wmb(); 125 126 REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 127 128 /* When PF would be done with the response, it would write back to the 129 * `done' address. Poll until then. 130 */ 131 while ((!*done) && time) { 132 msleep(25); 133 time--; 134 } 135 136 if (!*done) { 137 DP_NOTICE(p_hwfn, 138 "VF <-- PF Timeout [Type %d]\n", 139 p_req->first_tlv.tl.type); 140 rc = -EBUSY; 141 } else { 142 if ((*done != PFVF_STATUS_SUCCESS) && 143 (*done != PFVF_STATUS_NO_RESOURCE)) 144 DP_NOTICE(p_hwfn, 145 "PF response: %d [Type %d]\n", 146 *done, p_req->first_tlv.tl.type); 147 else 148 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 149 "PF response: %d [Type %d]\n", 150 *done, p_req->first_tlv.tl.type); 151 } 152 153 return rc; 154 } 155 156 #define VF_ACQUIRE_THRESH 3 157 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, 158 struct vf_pf_resc_request *p_req, 159 struct pf_vf_resc *p_resp) 160 { 161 DP_VERBOSE(p_hwfn, 162 QED_MSG_IOV, 163 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", 164 p_req->num_rxqs, 165 p_resp->num_rxqs, 166 p_req->num_rxqs, 167 p_resp->num_txqs, 168 p_req->num_sbs, 169 p_resp->num_sbs, 170 p_req->num_mac_filters, 171 p_resp->num_mac_filters, 172 p_req->num_vlan_filters, 173 p_resp->num_vlan_filters, 174 p_req->num_mc_filters, p_resp->num_mc_filters); 175 176 /* humble our request */ 177 p_req->num_txqs = p_resp->num_txqs; 178 p_req->num_rxqs = p_resp->num_rxqs; 179 p_req->num_sbs = p_resp->num_sbs; 180 p_req->num_mac_filters = p_resp->num_mac_filters; 181 p_req->num_vlan_filters = p_resp->num_vlan_filters; 182 p_req->num_mc_filters = p_resp->num_mc_filters; 183 } 184 185 static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) 186 { 187 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 188 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 189 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 190 struct vf_pf_resc_request *p_resc; 191 bool resources_acquired = false; 192 struct vfpf_acquire_tlv *req; 193 int rc = 0, attempts = 0; 194 195 /* clear mailbox and prep first tlv */ 196 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 197 p_resc = &req->resc_request; 198 199 /* starting filling the request */ 200 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 201 202 p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; 203 p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; 204 p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; 205 p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 206 p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 207 208 req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; 209 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 210 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 211 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 212 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 213 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 214 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 215 216 /* Fill capability field with any non-deprecated config we support */ 217 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 218 219 /* pf 2 vf bulletin board address */ 220 req->bulletin_addr = p_iov->bulletin.phys; 221 req->bulletin_size = p_iov->bulletin.size; 222 223 /* add list termination tlv */ 224 qed_add_tlv(p_hwfn, &p_iov->offset, 225 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 226 227 while (!resources_acquired) { 228 DP_VERBOSE(p_hwfn, 229 QED_MSG_IOV, "attempting to acquire resources\n"); 230 231 /* Clear response buffer, as this might be a re-send */ 232 memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 233 234 /* send acquire request */ 235 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 236 if (rc) 237 goto exit; 238 239 /* copy acquire response from buffer to p_hwfn */ 240 memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); 241 242 attempts++; 243 244 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 245 /* PF agrees to allocate our resources */ 246 if (!(resp->pfdev_info.capabilities & 247 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 248 /* It's possible legacy PF mistakenly accepted; 249 * but we don't care - simply mark it as 250 * legacy and continue. 251 */ 252 req->vfdev_info.capabilities |= 253 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 254 } 255 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); 256 resources_acquired = true; 257 } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 258 attempts < VF_ACQUIRE_THRESH) { 259 qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 260 &resp->resc); 261 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 262 if (pfdev_info->major_fp_hsi && 263 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 264 DP_NOTICE(p_hwfn, 265 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 266 pfdev_info->major_fp_hsi, 267 pfdev_info->minor_fp_hsi, 268 ETH_HSI_VER_MAJOR, 269 ETH_HSI_VER_MINOR, 270 pfdev_info->major_fp_hsi); 271 rc = -EINVAL; 272 goto exit; 273 } 274 275 if (!pfdev_info->major_fp_hsi) { 276 if (req->vfdev_info.capabilities & 277 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 278 DP_NOTICE(p_hwfn, 279 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 280 rc = -EINVAL; 281 goto exit; 282 } else { 283 DP_INFO(p_hwfn, 284 "PF is old - try re-acquire to see if it supports FW-version override\n"); 285 req->vfdev_info.capabilities |= 286 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 287 continue; 288 } 289 } 290 291 /* If PF/VF are using same Major, PF must have had 292 * it's reasons. Simply fail. 293 */ 294 DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); 295 rc = -EINVAL; 296 goto exit; 297 } else { 298 DP_ERR(p_hwfn, 299 "PF returned error %d to VF acquisition request\n", 300 resp->hdr.status); 301 rc = -EAGAIN; 302 goto exit; 303 } 304 } 305 306 /* Mark the PF as legacy, if needed */ 307 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) 308 p_iov->b_pre_fp_hsi = true; 309 310 /* Update bulletin board size with response from PF */ 311 p_iov->bulletin.size = resp->bulletin_size; 312 313 /* get HW info */ 314 p_hwfn->cdev->type = resp->pfdev_info.dev_type; 315 p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; 316 317 p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; 318 319 /* Learn of the possibility of CMT */ 320 if (IS_LEAD_HWFN(p_hwfn)) { 321 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 322 DP_NOTICE(p_hwfn, "100g VF\n"); 323 p_hwfn->cdev->num_hwfns = 2; 324 } 325 } 326 327 if (!p_iov->b_pre_fp_hsi && 328 ETH_HSI_VER_MINOR && 329 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 330 DP_INFO(p_hwfn, 331 "PF is using older fastpath HSI; %02x.%02x is configured\n", 332 ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); 333 } 334 335 exit: 336 qed_vf_pf_req_end(p_hwfn, rc); 337 338 return rc; 339 } 340 341 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) 342 { 343 struct qed_vf_iov *p_iov; 344 u32 reg; 345 346 /* Set number of hwfns - might be overriden once leading hwfn learns 347 * actual configuration from PF. 348 */ 349 if (IS_LEAD_HWFN(p_hwfn)) 350 p_hwfn->cdev->num_hwfns = 1; 351 352 /* Set the doorbell bar. Assumption: regview is set */ 353 p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + 354 PXP_VF_BAR0_START_DQ; 355 356 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 357 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 358 359 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 360 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 361 362 /* Allocate vf sriov info */ 363 p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); 364 if (!p_iov) 365 return -ENOMEM; 366 367 /* Allocate vf2pf msg */ 368 p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 369 sizeof(union vfpf_tlvs), 370 &p_iov->vf2pf_request_phys, 371 GFP_KERNEL); 372 if (!p_iov->vf2pf_request) 373 goto free_p_iov; 374 375 p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 376 sizeof(union pfvf_tlvs), 377 &p_iov->pf2vf_reply_phys, 378 GFP_KERNEL); 379 if (!p_iov->pf2vf_reply) 380 goto free_vf2pf_request; 381 382 DP_VERBOSE(p_hwfn, 383 QED_MSG_IOV, 384 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 385 p_iov->vf2pf_request, 386 (u64) p_iov->vf2pf_request_phys, 387 p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); 388 389 /* Allocate Bulletin board */ 390 p_iov->bulletin.size = sizeof(struct qed_bulletin_content); 391 p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 392 p_iov->bulletin.size, 393 &p_iov->bulletin.phys, 394 GFP_KERNEL); 395 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 396 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 397 p_iov->bulletin.p_virt, 398 (u64)p_iov->bulletin.phys, p_iov->bulletin.size); 399 400 mutex_init(&p_iov->mutex); 401 402 p_hwfn->vf_iov_info = p_iov; 403 404 p_hwfn->hw_info.personality = QED_PCI_ETH; 405 406 return qed_vf_pf_acquire(p_hwfn); 407 408 free_vf2pf_request: 409 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 410 sizeof(union vfpf_tlvs), 411 p_iov->vf2pf_request, p_iov->vf2pf_request_phys); 412 free_p_iov: 413 kfree(p_iov); 414 415 return -ENOMEM; 416 } 417 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 418 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 419 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 420 421 static void 422 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 423 struct qed_tunn_update_type *p_src, 424 enum qed_tunn_clss mask, u8 *p_cls) 425 { 426 if (p_src->b_update_mode) { 427 p_req->tun_mode_update_mask |= BIT(mask); 428 429 if (p_src->b_mode_enabled) 430 p_req->tunn_mode |= BIT(mask); 431 } 432 433 *p_cls = p_src->tun_cls; 434 } 435 436 static void 437 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 438 struct qed_tunn_update_type *p_src, 439 enum qed_tunn_clss mask, 440 u8 *p_cls, struct qed_tunn_update_udp_port *p_port, 441 u8 *p_update_port, u16 *p_udp_port) 442 { 443 if (p_port->b_update_port) { 444 *p_update_port = 1; 445 *p_udp_port = p_port->port; 446 } 447 448 __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); 449 } 450 451 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) 452 { 453 if (p_tun->vxlan.b_mode_enabled) 454 p_tun->vxlan.b_update_mode = true; 455 if (p_tun->l2_geneve.b_mode_enabled) 456 p_tun->l2_geneve.b_update_mode = true; 457 if (p_tun->ip_geneve.b_mode_enabled) 458 p_tun->ip_geneve.b_update_mode = true; 459 if (p_tun->l2_gre.b_mode_enabled) 460 p_tun->l2_gre.b_update_mode = true; 461 if (p_tun->ip_gre.b_mode_enabled) 462 p_tun->ip_gre.b_update_mode = true; 463 464 p_tun->b_update_rx_cls = true; 465 p_tun->b_update_tx_cls = true; 466 } 467 468 static void 469 __qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun, 470 u16 feature_mask, u8 tunn_mode, 471 u8 tunn_cls, enum qed_tunn_mode val) 472 { 473 if (feature_mask & BIT(val)) { 474 p_tun->b_mode_enabled = tunn_mode; 475 p_tun->tun_cls = tunn_cls; 476 } else { 477 p_tun->b_mode_enabled = false; 478 } 479 } 480 481 static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn, 482 struct qed_tunnel_info *p_tun, 483 struct pfvf_update_tunn_param_tlv *p_resp) 484 { 485 /* Update mode and classes provided by PF */ 486 u16 feat_mask = p_resp->tunn_feature_mask; 487 488 __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask, 489 p_resp->vxlan_mode, p_resp->vxlan_clss, 490 QED_MODE_VXLAN_TUNN); 491 __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, 492 p_resp->l2geneve_mode, 493 p_resp->l2geneve_clss, 494 QED_MODE_L2GENEVE_TUNN); 495 __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, 496 p_resp->ipgeneve_mode, 497 p_resp->ipgeneve_clss, 498 QED_MODE_IPGENEVE_TUNN); 499 __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, 500 p_resp->l2gre_mode, p_resp->l2gre_clss, 501 QED_MODE_L2GRE_TUNN); 502 __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, 503 p_resp->ipgre_mode, p_resp->ipgre_clss, 504 QED_MODE_IPGRE_TUNN); 505 p_tun->geneve_port.port = p_resp->geneve_udp_port; 506 p_tun->vxlan_port.port = p_resp->vxlan_udp_port; 507 508 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 509 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", 510 p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, 511 p_tun->ip_geneve.b_mode_enabled, 512 p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled); 513 } 514 515 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, 516 struct qed_tunnel_info *p_src) 517 { 518 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 519 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 520 struct pfvf_update_tunn_param_tlv *p_resp; 521 struct vfpf_update_tunn_param_tlv *p_req; 522 int rc; 523 524 p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, 525 sizeof(*p_req)); 526 527 if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) 528 p_req->update_tun_cls = 1; 529 530 qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN, 531 &p_req->vxlan_clss, &p_src->vxlan_port, 532 &p_req->update_vxlan_port, 533 &p_req->vxlan_port); 534 qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, 535 QED_MODE_L2GENEVE_TUNN, 536 &p_req->l2geneve_clss, &p_src->geneve_port, 537 &p_req->update_geneve_port, 538 &p_req->geneve_port); 539 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, 540 QED_MODE_IPGENEVE_TUNN, 541 &p_req->ipgeneve_clss); 542 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, 543 QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss); 544 __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, 545 QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss); 546 547 /* add list termination tlv */ 548 qed_add_tlv(p_hwfn, &p_iov->offset, 549 CHANNEL_TLV_LIST_END, 550 sizeof(struct channel_list_end_tlv)); 551 552 p_resp = &p_iov->pf2vf_reply->tunn_param_resp; 553 rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 554 555 if (rc) 556 goto exit; 557 558 if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { 559 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 560 "Failed to update tunnel parameters\n"); 561 rc = -EINVAL; 562 } 563 564 qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp); 565 exit: 566 qed_vf_pf_req_end(p_hwfn, rc); 567 return rc; 568 } 569 570 int 571 qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 572 struct qed_queue_cid *p_cid, 573 u16 bd_max_bytes, 574 dma_addr_t bd_chain_phys_addr, 575 dma_addr_t cqe_pbl_addr, 576 u16 cqe_pbl_size, void __iomem **pp_prod) 577 { 578 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 579 struct pfvf_start_queue_resp_tlv *resp; 580 struct vfpf_start_rxq_tlv *req; 581 u8 rx_qid = p_cid->rel.queue_id; 582 int rc; 583 584 /* clear mailbox and prep first tlv */ 585 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 586 587 req->rx_qid = rx_qid; 588 req->cqe_pbl_addr = cqe_pbl_addr; 589 req->cqe_pbl_size = cqe_pbl_size; 590 req->rxq_addr = bd_chain_phys_addr; 591 req->hw_sb = p_cid->rel.sb; 592 req->sb_index = p_cid->rel.sb_idx; 593 req->bd_max_bytes = bd_max_bytes; 594 req->stat_id = -1; 595 596 /* If PF is legacy, we'll need to calculate producers ourselves 597 * as well as clean them. 598 */ 599 if (p_iov->b_pre_fp_hsi) { 600 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 601 u32 init_prod_val = 0; 602 603 *pp_prod = (u8 __iomem *) 604 p_hwfn->regview + 605 MSTORM_QZONE_START(p_hwfn->cdev) + 606 hw_qid * MSTORM_QZONE_SIZE; 607 608 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 609 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 610 (u32 *)(&init_prod_val)); 611 } 612 /* add list termination tlv */ 613 qed_add_tlv(p_hwfn, &p_iov->offset, 614 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 615 616 resp = &p_iov->pf2vf_reply->queue_start; 617 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 618 if (rc) 619 goto exit; 620 621 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 622 rc = -EINVAL; 623 goto exit; 624 } 625 626 /* Learn the address of the producer from the response */ 627 if (!p_iov->b_pre_fp_hsi) { 628 u32 init_prod_val = 0; 629 630 *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; 631 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 632 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 633 rx_qid, *pp_prod, resp->offset); 634 635 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 636 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 637 (u32 *)&init_prod_val); 638 } 639 exit: 640 qed_vf_pf_req_end(p_hwfn, rc); 641 642 return rc; 643 } 644 645 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, 646 struct qed_queue_cid *p_cid, bool cqe_completion) 647 { 648 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 649 struct vfpf_stop_rxqs_tlv *req; 650 struct pfvf_def_resp_tlv *resp; 651 int rc; 652 653 /* clear mailbox and prep first tlv */ 654 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 655 656 req->rx_qid = p_cid->rel.queue_id; 657 req->num_rxqs = 1; 658 req->cqe_completion = cqe_completion; 659 660 /* add list termination tlv */ 661 qed_add_tlv(p_hwfn, &p_iov->offset, 662 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 663 664 resp = &p_iov->pf2vf_reply->default_resp; 665 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 666 if (rc) 667 goto exit; 668 669 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 670 rc = -EINVAL; 671 goto exit; 672 } 673 674 exit: 675 qed_vf_pf_req_end(p_hwfn, rc); 676 677 return rc; 678 } 679 680 int 681 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, 682 struct qed_queue_cid *p_cid, 683 dma_addr_t pbl_addr, 684 u16 pbl_size, void __iomem **pp_doorbell) 685 { 686 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 687 struct pfvf_start_queue_resp_tlv *resp; 688 struct vfpf_start_txq_tlv *req; 689 u16 qid = p_cid->rel.queue_id; 690 int rc; 691 692 /* clear mailbox and prep first tlv */ 693 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 694 695 req->tx_qid = qid; 696 697 /* Tx */ 698 req->pbl_addr = pbl_addr; 699 req->pbl_size = pbl_size; 700 req->hw_sb = p_cid->rel.sb; 701 req->sb_index = p_cid->rel.sb_idx; 702 703 /* add list termination tlv */ 704 qed_add_tlv(p_hwfn, &p_iov->offset, 705 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 706 707 resp = &p_iov->pf2vf_reply->queue_start; 708 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 709 if (rc) 710 goto exit; 711 712 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 713 rc = -EINVAL; 714 goto exit; 715 } 716 717 /* Modern PFs provide the actual offsets, while legacy 718 * provided only the queue id. 719 */ 720 if (!p_iov->b_pre_fp_hsi) { 721 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; 722 } else { 723 u8 cid = p_iov->acquire_resp.resc.cid[qid]; 724 725 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 726 qed_db_addr_vf(cid, 727 DQ_DEMS_LEGACY); 728 } 729 730 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 731 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", 732 qid, *pp_doorbell, resp->offset); 733 exit: 734 qed_vf_pf_req_end(p_hwfn, rc); 735 736 return rc; 737 } 738 739 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 740 { 741 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 742 struct vfpf_stop_txqs_tlv *req; 743 struct pfvf_def_resp_tlv *resp; 744 int rc; 745 746 /* clear mailbox and prep first tlv */ 747 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 748 749 req->tx_qid = p_cid->rel.queue_id; 750 req->num_txqs = 1; 751 752 /* add list termination tlv */ 753 qed_add_tlv(p_hwfn, &p_iov->offset, 754 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 755 756 resp = &p_iov->pf2vf_reply->default_resp; 757 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 758 if (rc) 759 goto exit; 760 761 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 762 rc = -EINVAL; 763 goto exit; 764 } 765 766 exit: 767 qed_vf_pf_req_end(p_hwfn, rc); 768 769 return rc; 770 } 771 772 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, 773 u8 vport_id, 774 u16 mtu, 775 u8 inner_vlan_removal, 776 enum qed_tpa_mode tpa_mode, 777 u8 max_buffers_per_cqe, u8 only_untagged) 778 { 779 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 780 struct vfpf_vport_start_tlv *req; 781 struct pfvf_def_resp_tlv *resp; 782 int rc, i; 783 784 /* clear mailbox and prep first tlv */ 785 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 786 787 req->mtu = mtu; 788 req->vport_id = vport_id; 789 req->inner_vlan_removal = inner_vlan_removal; 790 req->tpa_mode = tpa_mode; 791 req->max_buffers_per_cqe = max_buffers_per_cqe; 792 req->only_untagged = only_untagged; 793 794 /* status blocks */ 795 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) 796 if (p_hwfn->sbs_info[i]) 797 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; 798 799 /* add list termination tlv */ 800 qed_add_tlv(p_hwfn, &p_iov->offset, 801 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 802 803 resp = &p_iov->pf2vf_reply->default_resp; 804 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 805 if (rc) 806 goto exit; 807 808 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 809 rc = -EINVAL; 810 goto exit; 811 } 812 813 exit: 814 qed_vf_pf_req_end(p_hwfn, rc); 815 816 return rc; 817 } 818 819 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) 820 { 821 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 822 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 823 int rc; 824 825 /* clear mailbox and prep first tlv */ 826 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 827 sizeof(struct vfpf_first_tlv)); 828 829 /* add list termination tlv */ 830 qed_add_tlv(p_hwfn, &p_iov->offset, 831 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 832 833 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 834 if (rc) 835 goto exit; 836 837 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 838 rc = -EINVAL; 839 goto exit; 840 } 841 842 exit: 843 qed_vf_pf_req_end(p_hwfn, rc); 844 845 return rc; 846 } 847 848 static bool 849 qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, 850 struct qed_sp_vport_update_params *p_data, 851 u16 tlv) 852 { 853 switch (tlv) { 854 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 855 return !!(p_data->update_vport_active_rx_flg || 856 p_data->update_vport_active_tx_flg); 857 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 858 return !!p_data->update_tx_switching_flg; 859 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 860 return !!p_data->update_inner_vlan_removal_flg; 861 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 862 return !!p_data->update_accept_any_vlan_flg; 863 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 864 return !!p_data->update_approx_mcast_flg; 865 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 866 return !!(p_data->accept_flags.update_rx_mode_config || 867 p_data->accept_flags.update_tx_mode_config); 868 case CHANNEL_TLV_VPORT_UPDATE_RSS: 869 return !!p_data->rss_params; 870 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 871 return !!p_data->sge_tpa_params; 872 default: 873 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", 874 tlv); 875 return false; 876 } 877 } 878 879 static void 880 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, 881 struct qed_sp_vport_update_params *p_data) 882 { 883 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 884 struct pfvf_def_resp_tlv *p_resp; 885 u16 tlv; 886 887 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 888 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { 889 if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 890 continue; 891 892 p_resp = (struct pfvf_def_resp_tlv *) 893 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 894 tlv); 895 if (p_resp && p_resp->hdr.status) 896 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 897 "TLV[%d] Configuration %s\n", 898 tlv, 899 (p_resp && p_resp->hdr.status) ? "succeeded" 900 : "failed"); 901 } 902 } 903 904 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, 905 struct qed_sp_vport_update_params *p_params) 906 { 907 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 908 struct vfpf_vport_update_tlv *req; 909 struct pfvf_def_resp_tlv *resp; 910 u8 update_rx, update_tx; 911 u32 resp_size = 0; 912 u16 size, tlv; 913 int rc; 914 915 resp = &p_iov->pf2vf_reply->default_resp; 916 resp_size = sizeof(*resp); 917 918 update_rx = p_params->update_vport_active_rx_flg; 919 update_tx = p_params->update_vport_active_tx_flg; 920 921 /* clear mailbox and prep header tlv */ 922 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 923 924 /* Prepare extended tlvs */ 925 if (update_rx || update_tx) { 926 struct vfpf_vport_update_activate_tlv *p_act_tlv; 927 928 size = sizeof(struct vfpf_vport_update_activate_tlv); 929 p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 930 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 931 size); 932 resp_size += sizeof(struct pfvf_def_resp_tlv); 933 934 if (update_rx) { 935 p_act_tlv->update_rx = update_rx; 936 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 937 } 938 939 if (update_tx) { 940 p_act_tlv->update_tx = update_tx; 941 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 942 } 943 } 944 945 if (p_params->update_tx_switching_flg) { 946 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 947 948 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 949 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 950 p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 951 tlv, size); 952 resp_size += sizeof(struct pfvf_def_resp_tlv); 953 954 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 955 } 956 957 if (p_params->update_approx_mcast_flg) { 958 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 959 960 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 961 p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, 962 CHANNEL_TLV_VPORT_UPDATE_MCAST, size); 963 resp_size += sizeof(struct pfvf_def_resp_tlv); 964 965 memcpy(p_mcast_tlv->bins, p_params->bins, 966 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 967 } 968 969 update_rx = p_params->accept_flags.update_rx_mode_config; 970 update_tx = p_params->accept_flags.update_tx_mode_config; 971 972 if (update_rx || update_tx) { 973 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 974 975 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 976 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 977 p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 978 resp_size += sizeof(struct pfvf_def_resp_tlv); 979 980 if (update_rx) { 981 p_accept_tlv->update_rx_mode = update_rx; 982 p_accept_tlv->rx_accept_filter = 983 p_params->accept_flags.rx_accept_filter; 984 } 985 986 if (update_tx) { 987 p_accept_tlv->update_tx_mode = update_tx; 988 p_accept_tlv->tx_accept_filter = 989 p_params->accept_flags.tx_accept_filter; 990 } 991 } 992 993 if (p_params->rss_params) { 994 struct qed_rss_params *rss_params = p_params->rss_params; 995 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 996 int i, table_size; 997 998 size = sizeof(struct vfpf_vport_update_rss_tlv); 999 p_rss_tlv = qed_add_tlv(p_hwfn, 1000 &p_iov->offset, 1001 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 1002 resp_size += sizeof(struct pfvf_def_resp_tlv); 1003 1004 if (rss_params->update_rss_config) 1005 p_rss_tlv->update_rss_flags |= 1006 VFPF_UPDATE_RSS_CONFIG_FLAG; 1007 if (rss_params->update_rss_capabilities) 1008 p_rss_tlv->update_rss_flags |= 1009 VFPF_UPDATE_RSS_CAPS_FLAG; 1010 if (rss_params->update_rss_ind_table) 1011 p_rss_tlv->update_rss_flags |= 1012 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 1013 if (rss_params->update_rss_key) 1014 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; 1015 1016 p_rss_tlv->rss_enable = rss_params->rss_enable; 1017 p_rss_tlv->rss_caps = rss_params->rss_caps; 1018 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 1019 1020 table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, 1021 1 << p_rss_tlv->rss_table_size_log); 1022 for (i = 0; i < table_size; i++) { 1023 struct qed_queue_cid *p_queue; 1024 1025 p_queue = rss_params->rss_ind_table[i]; 1026 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; 1027 } 1028 memcpy(p_rss_tlv->rss_key, rss_params->rss_key, 1029 sizeof(rss_params->rss_key)); 1030 } 1031 1032 if (p_params->update_accept_any_vlan_flg) { 1033 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 1034 1035 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 1036 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1037 p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); 1038 1039 resp_size += sizeof(struct pfvf_def_resp_tlv); 1040 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 1041 p_any_vlan_tlv->update_accept_any_vlan_flg = 1042 p_params->update_accept_any_vlan_flg; 1043 } 1044 1045 /* add list termination tlv */ 1046 qed_add_tlv(p_hwfn, &p_iov->offset, 1047 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1048 1049 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 1050 if (rc) 1051 goto exit; 1052 1053 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1054 rc = -EINVAL; 1055 goto exit; 1056 } 1057 1058 qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 1059 1060 exit: 1061 qed_vf_pf_req_end(p_hwfn, rc); 1062 1063 return rc; 1064 } 1065 1066 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) 1067 { 1068 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1069 struct pfvf_def_resp_tlv *resp; 1070 struct vfpf_first_tlv *req; 1071 int rc; 1072 1073 /* clear mailbox and prep first tlv */ 1074 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 1075 1076 /* add list termination tlv */ 1077 qed_add_tlv(p_hwfn, &p_iov->offset, 1078 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1079 1080 resp = &p_iov->pf2vf_reply->default_resp; 1081 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1082 if (rc) 1083 goto exit; 1084 1085 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1086 rc = -EAGAIN; 1087 goto exit; 1088 } 1089 1090 p_hwfn->b_int_enabled = 0; 1091 1092 exit: 1093 qed_vf_pf_req_end(p_hwfn, rc); 1094 1095 return rc; 1096 } 1097 1098 int qed_vf_pf_release(struct qed_hwfn *p_hwfn) 1099 { 1100 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1101 struct pfvf_def_resp_tlv *resp; 1102 struct vfpf_first_tlv *req; 1103 u32 size; 1104 int rc; 1105 1106 /* clear mailbox and prep first tlv */ 1107 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 1108 1109 /* add list termination tlv */ 1110 qed_add_tlv(p_hwfn, &p_iov->offset, 1111 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1112 1113 resp = &p_iov->pf2vf_reply->default_resp; 1114 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1115 1116 if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) 1117 rc = -EAGAIN; 1118 1119 qed_vf_pf_req_end(p_hwfn, rc); 1120 1121 p_hwfn->b_int_enabled = 0; 1122 1123 if (p_iov->vf2pf_request) 1124 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1125 sizeof(union vfpf_tlvs), 1126 p_iov->vf2pf_request, 1127 p_iov->vf2pf_request_phys); 1128 if (p_iov->pf2vf_reply) 1129 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1130 sizeof(union pfvf_tlvs), 1131 p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); 1132 1133 if (p_iov->bulletin.p_virt) { 1134 size = sizeof(struct qed_bulletin_content); 1135 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1136 size, 1137 p_iov->bulletin.p_virt, p_iov->bulletin.phys); 1138 } 1139 1140 kfree(p_hwfn->vf_iov_info); 1141 p_hwfn->vf_iov_info = NULL; 1142 1143 return rc; 1144 } 1145 1146 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, 1147 struct qed_filter_mcast *p_filter_cmd) 1148 { 1149 struct qed_sp_vport_update_params sp_params; 1150 int i; 1151 1152 memset(&sp_params, 0, sizeof(sp_params)); 1153 sp_params.update_approx_mcast_flg = 1; 1154 1155 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1156 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1157 u32 bit; 1158 1159 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1160 __set_bit(bit, sp_params.bins); 1161 } 1162 } 1163 1164 qed_vf_pf_vport_update(p_hwfn, &sp_params); 1165 } 1166 1167 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, 1168 struct qed_filter_ucast *p_ucast) 1169 { 1170 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1171 struct vfpf_ucast_filter_tlv *req; 1172 struct pfvf_def_resp_tlv *resp; 1173 int rc; 1174 1175 /* clear mailbox and prep first tlv */ 1176 req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 1177 req->opcode = (u8) p_ucast->opcode; 1178 req->type = (u8) p_ucast->type; 1179 memcpy(req->mac, p_ucast->mac, ETH_ALEN); 1180 req->vlan = p_ucast->vlan; 1181 1182 /* add list termination tlv */ 1183 qed_add_tlv(p_hwfn, &p_iov->offset, 1184 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1185 1186 resp = &p_iov->pf2vf_reply->default_resp; 1187 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1188 if (rc) 1189 goto exit; 1190 1191 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1192 rc = -EAGAIN; 1193 goto exit; 1194 } 1195 1196 exit: 1197 qed_vf_pf_req_end(p_hwfn, rc); 1198 1199 return rc; 1200 } 1201 1202 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) 1203 { 1204 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1205 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1206 int rc; 1207 1208 /* clear mailbox and prep first tlv */ 1209 qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1210 sizeof(struct vfpf_first_tlv)); 1211 1212 /* add list termination tlv */ 1213 qed_add_tlv(p_hwfn, &p_iov->offset, 1214 CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); 1215 1216 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1217 if (rc) 1218 goto exit; 1219 1220 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1221 rc = -EINVAL; 1222 goto exit; 1223 } 1224 1225 exit: 1226 qed_vf_pf_req_end(p_hwfn, rc); 1227 1228 return rc; 1229 } 1230 1231 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1232 { 1233 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1234 1235 if (!p_iov) { 1236 DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); 1237 return 0; 1238 } 1239 1240 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1241 } 1242 1243 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) 1244 { 1245 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1246 struct qed_bulletin_content shadow; 1247 u32 crc, crc_size; 1248 1249 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1250 *p_change = 0; 1251 1252 /* Need to guarantee PF is not in the middle of writing it */ 1253 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1254 1255 /* If version did not update, no need to do anything */ 1256 if (shadow.version == p_iov->bulletin_shadow.version) 1257 return 0; 1258 1259 /* Verify the bulletin we see is valid */ 1260 crc = crc32(0, (u8 *)&shadow + crc_size, 1261 p_iov->bulletin.size - crc_size); 1262 if (crc != shadow.crc) 1263 return -EAGAIN; 1264 1265 /* Set the shadow bulletin and process it */ 1266 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1267 1268 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1269 "Read a bulletin update %08x\n", shadow.version); 1270 1271 *p_change = 1; 1272 1273 return 0; 1274 } 1275 1276 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1277 struct qed_mcp_link_params *p_params, 1278 struct qed_bulletin_content *p_bulletin) 1279 { 1280 memset(p_params, 0, sizeof(*p_params)); 1281 1282 p_params->speed.autoneg = p_bulletin->req_autoneg; 1283 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1284 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1285 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1286 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1287 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1288 p_params->loopback_mode = p_bulletin->req_loopback; 1289 } 1290 1291 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 1292 struct qed_mcp_link_params *params) 1293 { 1294 __qed_vf_get_link_params(p_hwfn, params, 1295 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1296 } 1297 1298 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1299 struct qed_mcp_link_state *p_link, 1300 struct qed_bulletin_content *p_bulletin) 1301 { 1302 memset(p_link, 0, sizeof(*p_link)); 1303 1304 p_link->link_up = p_bulletin->link_up; 1305 p_link->speed = p_bulletin->speed; 1306 p_link->full_duplex = p_bulletin->full_duplex; 1307 p_link->an = p_bulletin->autoneg; 1308 p_link->an_complete = p_bulletin->autoneg_complete; 1309 p_link->parallel_detection = p_bulletin->parallel_detection; 1310 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1311 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1312 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1313 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1314 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1315 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1316 } 1317 1318 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, 1319 struct qed_mcp_link_state *link) 1320 { 1321 __qed_vf_get_link_state(p_hwfn, link, 1322 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1323 } 1324 1325 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1326 struct qed_mcp_link_capabilities *p_link_caps, 1327 struct qed_bulletin_content *p_bulletin) 1328 { 1329 memset(p_link_caps, 0, sizeof(*p_link_caps)); 1330 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1331 } 1332 1333 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, 1334 struct qed_mcp_link_capabilities *p_link_caps) 1335 { 1336 __qed_vf_get_link_caps(p_hwfn, p_link_caps, 1337 &(p_hwfn->vf_iov_info->bulletin_shadow)); 1338 } 1339 1340 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) 1341 { 1342 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1343 } 1344 1345 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) 1346 { 1347 memcpy(port_mac, 1348 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); 1349 } 1350 1351 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) 1352 { 1353 struct qed_vf_iov *p_vf; 1354 1355 p_vf = p_hwfn->vf_iov_info; 1356 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1357 } 1358 1359 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) 1360 { 1361 struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; 1362 1363 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1364 } 1365 1366 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) 1367 { 1368 struct qed_bulletin_content *bulletin; 1369 1370 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1371 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1372 return true; 1373 1374 /* Forbid VF from changing a MAC enforced by PF */ 1375 if (ether_addr_equal(bulletin->mac, mac)) 1376 return false; 1377 1378 return false; 1379 } 1380 1381 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, 1382 u8 *dst_mac, u8 *p_is_forced) 1383 { 1384 struct qed_bulletin_content *bulletin; 1385 1386 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1387 1388 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1389 if (p_is_forced) 1390 *p_is_forced = 1; 1391 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1392 if (p_is_forced) 1393 *p_is_forced = 0; 1394 } else { 1395 return false; 1396 } 1397 1398 ether_addr_copy(dst_mac, bulletin->mac); 1399 1400 return true; 1401 } 1402 1403 static void 1404 qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn, 1405 u16 *p_vxlan_port, u16 *p_geneve_port) 1406 { 1407 struct qed_bulletin_content *p_bulletin; 1408 1409 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1410 1411 *p_vxlan_port = p_bulletin->vxlan_udp_port; 1412 *p_geneve_port = p_bulletin->geneve_udp_port; 1413 } 1414 1415 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 1416 u16 *fw_major, u16 *fw_minor, 1417 u16 *fw_rev, u16 *fw_eng) 1418 { 1419 struct pf_vf_pfdev_info *info; 1420 1421 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1422 1423 *fw_major = info->fw_major; 1424 *fw_minor = info->fw_minor; 1425 *fw_rev = info->fw_rev; 1426 *fw_eng = info->fw_eng; 1427 } 1428 1429 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) 1430 { 1431 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; 1432 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; 1433 void *cookie = hwfn->cdev->ops_cookie; 1434 u16 vxlan_port, geneve_port; 1435 1436 qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port); 1437 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, 1438 &is_mac_forced); 1439 if (is_mac_exist && cookie) 1440 ops->force_mac(cookie, mac, !!is_mac_forced); 1441 1442 ops->ports_update(cookie, vxlan_port, geneve_port); 1443 1444 /* Always update link configuration according to bulletin */ 1445 qed_link_update(hwfn); 1446 } 1447 1448 void qed_iov_vf_task(struct work_struct *work) 1449 { 1450 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1451 iov_task.work); 1452 u8 change = 0; 1453 1454 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 1455 return; 1456 1457 /* Handle bulletin board changes */ 1458 qed_vf_read_bulletin(hwfn, &change); 1459 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, 1460 &hwfn->iov_task_flags)) 1461 change = 1; 1462 if (change) 1463 qed_handle_bulletin_change(hwfn); 1464 1465 /* As VF is polling bulletin board, need to constantly re-schedule */ 1466 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); 1467 } 1468