1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/bitops.h> 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include "qed.h" 14 #include <linux/qed/qed_chain.h> 15 #include "qed_cxt.h" 16 #include "qed_dcbx.h" 17 #include "qed_hsi.h" 18 #include "qed_hw.h" 19 #include "qed_int.h" 20 #include "qed_reg_addr.h" 21 #include "qed_sp.h" 22 #include "qed_sriov.h" 23 24 void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, 25 struct qed_spq_entry *p_ent) 26 { 27 /* qed_spq_get_entry() can either get an entry from the free_pool, 28 * or, if no entries are left, allocate a new entry and add it to 29 * the unlimited_pending list. 30 */ 31 if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) 32 kfree(p_ent); 33 else 34 qed_spq_return_entry(p_hwfn, p_ent); 35 } 36 37 int qed_sp_init_request(struct qed_hwfn *p_hwfn, 38 struct qed_spq_entry **pp_ent, 39 u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) 40 { 41 u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; 42 struct qed_spq_entry *p_ent = NULL; 43 int rc; 44 45 if (!pp_ent) 46 return -ENOMEM; 47 48 rc = qed_spq_get_entry(p_hwfn, pp_ent); 49 50 if (rc) 51 return rc; 52 53 p_ent = *pp_ent; 54 55 p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid); 56 p_ent->elem.hdr.cmd_id = cmd; 57 p_ent->elem.hdr.protocol_id = protocol; 58 59 p_ent->priority = QED_SPQ_PRIORITY_NORMAL; 60 p_ent->comp_mode = p_data->comp_mode; 61 p_ent->comp_done.done = 0; 62 63 switch (p_ent->comp_mode) { 64 case QED_SPQ_MODE_EBLOCK: 65 p_ent->comp_cb.cookie = &p_ent->comp_done; 66 break; 67 68 case QED_SPQ_MODE_BLOCK: 69 if (!p_data->p_comp_data) 70 goto err; 71 72 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; 73 break; 74 75 case QED_SPQ_MODE_CB: 76 if (!p_data->p_comp_data) 77 p_ent->comp_cb.function = NULL; 78 else 79 p_ent->comp_cb = *p_data->p_comp_data; 80 break; 81 82 default: 83 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", 84 p_ent->comp_mode); 85 goto err; 86 } 87 88 DP_VERBOSE(p_hwfn, 89 QED_MSG_SPQ, 90 "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n", 91 opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd), 92 cmd, qed_get_protocol_type_str(protocol), protocol, 93 (unsigned long long)(uintptr_t)&p_ent->ramrod, 94 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, 95 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", 96 "MODE_CB")); 97 98 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); 99 100 return 0; 101 102 err: 103 qed_sp_destroy_request(p_hwfn, p_ent); 104 105 return -EINVAL; 106 } 107 108 static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) 109 { 110 switch (type) { 111 case QED_TUNN_CLSS_MAC_VLAN: 112 return TUNNEL_CLSS_MAC_VLAN; 113 case QED_TUNN_CLSS_MAC_VNI: 114 return TUNNEL_CLSS_MAC_VNI; 115 case QED_TUNN_CLSS_INNER_MAC_VLAN: 116 return TUNNEL_CLSS_INNER_MAC_VLAN; 117 case QED_TUNN_CLSS_INNER_MAC_VNI: 118 return TUNNEL_CLSS_INNER_MAC_VNI; 119 case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: 120 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; 121 default: 122 return TUNNEL_CLSS_MAC_VLAN; 123 } 124 } 125 126 static void 127 qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun, 128 struct qed_tunnel_info *p_src, bool b_pf_start) 129 { 130 if (p_src->vxlan.b_update_mode || b_pf_start) 131 p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled; 132 133 if (p_src->l2_gre.b_update_mode || b_pf_start) 134 p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled; 135 136 if (p_src->ip_gre.b_update_mode || b_pf_start) 137 p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled; 138 139 if (p_src->l2_geneve.b_update_mode || b_pf_start) 140 p_tun->l2_geneve.b_mode_enabled = 141 p_src->l2_geneve.b_mode_enabled; 142 143 if (p_src->ip_geneve.b_update_mode || b_pf_start) 144 p_tun->ip_geneve.b_mode_enabled = 145 p_src->ip_geneve.b_mode_enabled; 146 } 147 148 static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, 149 struct qed_tunnel_info *p_src) 150 { 151 int type; 152 153 p_tun->b_update_rx_cls = p_src->b_update_rx_cls; 154 p_tun->b_update_tx_cls = p_src->b_update_tx_cls; 155 156 type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls); 157 p_tun->vxlan.tun_cls = type; 158 type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls); 159 p_tun->l2_gre.tun_cls = type; 160 type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls); 161 p_tun->ip_gre.tun_cls = type; 162 type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls); 163 p_tun->l2_geneve.tun_cls = type; 164 type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls); 165 p_tun->ip_geneve.tun_cls = type; 166 } 167 168 static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun, 169 struct qed_tunnel_info *p_src) 170 { 171 p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port; 172 p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port; 173 174 if (p_src->geneve_port.b_update_port) 175 p_tun->geneve_port.port = p_src->geneve_port.port; 176 177 if (p_src->vxlan_port.b_update_port) 178 p_tun->vxlan_port.port = p_src->vxlan_port.port; 179 } 180 181 static void 182 __qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, 183 struct qed_tunn_update_type *tun_type) 184 { 185 *p_tunn_cls = tun_type->tun_cls; 186 } 187 188 static void 189 qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, 190 struct qed_tunn_update_type *tun_type, 191 u8 *p_update_port, 192 __le16 *p_port, 193 struct qed_tunn_update_udp_port *p_udp_port) 194 { 195 __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type); 196 if (p_udp_port->b_update_port) { 197 *p_update_port = 1; 198 *p_port = cpu_to_le16(p_udp_port->port); 199 } 200 } 201 202 static void 203 qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, 204 struct qed_tunnel_info *p_src, 205 struct pf_update_tunnel_config *p_tunn_cfg) 206 { 207 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 208 209 qed_set_pf_update_tunn_mode(p_tun, p_src, false); 210 qed_set_tunn_cls_info(p_tun, p_src); 211 qed_set_tunn_ports(p_tun, p_src); 212 213 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, 214 &p_tun->vxlan, 215 &p_tunn_cfg->set_vxlan_udp_port_flg, 216 &p_tunn_cfg->vxlan_udp_port, 217 &p_tun->vxlan_port); 218 219 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, 220 &p_tun->l2_geneve, 221 &p_tunn_cfg->set_geneve_udp_port_flg, 222 &p_tunn_cfg->geneve_udp_port, 223 &p_tun->geneve_port); 224 225 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, 226 &p_tun->ip_geneve); 227 228 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, 229 &p_tun->l2_gre); 230 231 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, 232 &p_tun->ip_gre); 233 234 p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; 235 } 236 237 static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, 238 struct qed_ptt *p_ptt, 239 struct qed_tunnel_info *p_tun) 240 { 241 qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled, 242 p_tun->ip_gre.b_mode_enabled); 243 qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled); 244 245 qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled, 246 p_tun->ip_geneve.b_mode_enabled); 247 } 248 249 static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn, 250 struct qed_ptt *p_ptt, 251 struct qed_tunnel_info *p_tunn) 252 { 253 if (p_tunn->vxlan_port.b_update_port) 254 qed_set_vxlan_dest_port(p_hwfn, p_ptt, 255 p_tunn->vxlan_port.port); 256 257 if (p_tunn->geneve_port.b_update_port) 258 qed_set_geneve_dest_port(p_hwfn, p_ptt, 259 p_tunn->geneve_port.port); 260 261 qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn); 262 } 263 264 static void 265 qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, 266 struct qed_tunnel_info *p_src, 267 struct pf_start_tunnel_config *p_tunn_cfg) 268 { 269 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 270 271 if (!p_src) 272 return; 273 274 qed_set_pf_update_tunn_mode(p_tun, p_src, true); 275 qed_set_tunn_cls_info(p_tun, p_src); 276 qed_set_tunn_ports(p_tun, p_src); 277 278 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, 279 &p_tun->vxlan, 280 &p_tunn_cfg->set_vxlan_udp_port_flg, 281 &p_tunn_cfg->vxlan_udp_port, 282 &p_tun->vxlan_port); 283 284 qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, 285 &p_tun->l2_geneve, 286 &p_tunn_cfg->set_geneve_udp_port_flg, 287 &p_tunn_cfg->geneve_udp_port, 288 &p_tun->geneve_port); 289 290 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, 291 &p_tun->ip_geneve); 292 293 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, 294 &p_tun->l2_gre); 295 296 __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, 297 &p_tun->ip_gre); 298 } 299 300 int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 301 struct qed_ptt *p_ptt, 302 struct qed_tunnel_info *p_tunn, 303 bool allow_npar_tx_switch) 304 { 305 struct outer_tag_config_struct *outer_tag_config; 306 struct pf_start_ramrod_data *p_ramrod = NULL; 307 u16 sb = qed_int_get_sp_sb_id(p_hwfn); 308 u8 sb_index = p_hwfn->p_eq->eq_sb_index; 309 struct qed_spq_entry *p_ent = NULL; 310 struct qed_sp_init_data init_data; 311 u8 page_cnt, i; 312 int rc; 313 314 /* update initial eq producer */ 315 qed_eq_prod_update(p_hwfn, 316 qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); 317 318 memset(&init_data, 0, sizeof(init_data)); 319 init_data.cid = qed_spq_get_cid(p_hwfn); 320 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 321 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 322 323 rc = qed_sp_init_request(p_hwfn, &p_ent, 324 COMMON_RAMROD_PF_START, 325 PROTOCOLID_COMMON, &init_data); 326 if (rc) 327 return rc; 328 329 p_ramrod = &p_ent->ramrod.pf_start; 330 331 p_ramrod->event_ring_sb_id = cpu_to_le16(sb); 332 p_ramrod->event_ring_sb_index = sb_index; 333 p_ramrod->path_id = QED_PATH_ID(p_hwfn); 334 p_ramrod->dont_log_ramrods = 0; 335 p_ramrod->log_type_mask = cpu_to_le16(0xf); 336 337 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) 338 p_ramrod->mf_mode = MF_OVLAN; 339 else 340 p_ramrod->mf_mode = MF_NPAR; 341 342 outer_tag_config = &p_ramrod->outer_tag_config; 343 outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); 344 345 if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { 346 outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q); 347 } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { 348 outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD); 349 outer_tag_config->enable_stag_pri_change = 1; 350 } 351 352 outer_tag_config->pri_map_valid = 1; 353 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) 354 outer_tag_config->inner_to_outer_pri_map[i] = i; 355 356 /* enable_stag_pri_change should be set if port is in BD mode or, 357 * UFP with Host Control mode. 358 */ 359 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { 360 if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) 361 outer_tag_config->enable_stag_pri_change = 1; 362 else 363 outer_tag_config->enable_stag_pri_change = 0; 364 365 outer_tag_config->outer_tag.tci |= 366 cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); 367 } 368 369 /* Place EQ address in RAMROD */ 370 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, 371 qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); 372 page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); 373 p_ramrod->event_ring_num_pages = page_cnt; 374 375 /* Place consolidation queue address in ramrod */ 376 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr, 377 qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); 378 page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain); 379 p_ramrod->consolid_q_num_pages = page_cnt; 380 381 qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); 382 383 if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) 384 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; 385 386 switch (p_hwfn->hw_info.personality) { 387 case QED_PCI_ETH: 388 p_ramrod->personality = PERSONALITY_ETH; 389 break; 390 case QED_PCI_FCOE: 391 p_ramrod->personality = PERSONALITY_FCOE; 392 break; 393 case QED_PCI_ISCSI: 394 case QED_PCI_NVMETCP: 395 p_ramrod->personality = PERSONALITY_TCP_ULP; 396 break; 397 case QED_PCI_ETH_ROCE: 398 case QED_PCI_ETH_IWARP: 399 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 400 break; 401 default: 402 DP_NOTICE(p_hwfn, "Unknown personality %d\n", 403 p_hwfn->hw_info.personality); 404 p_ramrod->personality = PERSONALITY_ETH; 405 } 406 407 if (p_hwfn->cdev->p_iov_info) { 408 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 409 410 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; 411 p_ramrod->num_vfs = (u8)p_iov->total_vfs; 412 } 413 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 414 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; 415 416 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 417 "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", 418 sb, sb_index, outer_tag_config->outer_tag.tci); 419 420 rc = qed_spq_post(p_hwfn, p_ent, NULL); 421 422 if (p_tunn) 423 qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, 424 &p_hwfn->cdev->tunnel); 425 426 return rc; 427 } 428 429 int qed_sp_pf_update(struct qed_hwfn *p_hwfn) 430 { 431 struct qed_spq_entry *p_ent = NULL; 432 struct qed_sp_init_data init_data; 433 int rc; 434 435 /* Get SPQ entry */ 436 memset(&init_data, 0, sizeof(init_data)); 437 init_data.cid = qed_spq_get_cid(p_hwfn); 438 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 439 init_data.comp_mode = QED_SPQ_MODE_CB; 440 441 rc = qed_sp_init_request(p_hwfn, &p_ent, 442 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, 443 &init_data); 444 if (rc) 445 return rc; 446 447 qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, 448 &p_ent->ramrod.pf_update); 449 450 return qed_spq_post(p_hwfn, p_ent, NULL); 451 } 452 453 int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) 454 { 455 struct qed_spq_entry *p_ent = NULL; 456 struct qed_sp_init_data init_data; 457 int rc; 458 459 if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { 460 DP_INFO(p_hwfn, "Invalid priority type %d\n", 461 p_hwfn->ufp_info.pri_type); 462 return -EINVAL; 463 } 464 465 /* Get SPQ entry */ 466 memset(&init_data, 0, sizeof(init_data)); 467 init_data.cid = qed_spq_get_cid(p_hwfn); 468 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 469 init_data.comp_mode = QED_SPQ_MODE_CB; 470 471 rc = qed_sp_init_request(p_hwfn, &p_ent, 472 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, 473 &init_data); 474 if (rc) 475 return rc; 476 477 p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; 478 if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) 479 p_ent->ramrod.pf_update.enable_stag_pri_change = 1; 480 else 481 p_ent->ramrod.pf_update.enable_stag_pri_change = 0; 482 483 return qed_spq_post(p_hwfn, p_ent, NULL); 484 } 485 486 /* Set pf update ramrod command params */ 487 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, 488 struct qed_ptt *p_ptt, 489 struct qed_tunnel_info *p_tunn, 490 enum spq_mode comp_mode, 491 struct qed_spq_comp_cb *p_comp_data) 492 { 493 struct qed_spq_entry *p_ent = NULL; 494 struct qed_sp_init_data init_data; 495 int rc; 496 497 if (IS_VF(p_hwfn->cdev)) 498 return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn); 499 500 if (!p_tunn) 501 return -EINVAL; 502 503 /* Get SPQ entry */ 504 memset(&init_data, 0, sizeof(init_data)); 505 init_data.cid = qed_spq_get_cid(p_hwfn); 506 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 507 init_data.comp_mode = comp_mode; 508 init_data.p_comp_data = p_comp_data; 509 510 rc = qed_sp_init_request(p_hwfn, &p_ent, 511 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, 512 &init_data); 513 if (rc) 514 return rc; 515 516 qed_tunn_set_pf_update_params(p_hwfn, p_tunn, 517 &p_ent->ramrod.pf_update.tunnel_config); 518 519 rc = qed_spq_post(p_hwfn, p_ent, NULL); 520 if (rc) 521 return rc; 522 523 qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel); 524 525 return rc; 526 } 527 528 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) 529 { 530 struct qed_spq_entry *p_ent = NULL; 531 struct qed_sp_init_data init_data; 532 int rc; 533 534 /* Get SPQ entry */ 535 memset(&init_data, 0, sizeof(init_data)); 536 init_data.cid = qed_spq_get_cid(p_hwfn); 537 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 538 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 539 540 rc = qed_sp_init_request(p_hwfn, &p_ent, 541 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, 542 &init_data); 543 if (rc) 544 return rc; 545 546 return qed_spq_post(p_hwfn, p_ent, NULL); 547 } 548 549 int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn) 550 { 551 struct qed_spq_entry *p_ent = NULL; 552 struct qed_sp_init_data init_data; 553 int rc; 554 555 /* Get SPQ entry */ 556 memset(&init_data, 0, sizeof(init_data)); 557 init_data.cid = qed_spq_get_cid(p_hwfn); 558 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 559 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 560 561 rc = qed_sp_init_request(p_hwfn, &p_ent, 562 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, 563 &init_data); 564 if (rc) 565 return rc; 566 567 return qed_spq_post(p_hwfn, p_ent, NULL); 568 } 569 570 int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) 571 { 572 struct qed_spq_entry *p_ent = NULL; 573 struct qed_sp_init_data init_data; 574 int rc; 575 576 /* Get SPQ entry */ 577 memset(&init_data, 0, sizeof(init_data)); 578 init_data.cid = qed_spq_get_cid(p_hwfn); 579 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 580 init_data.comp_mode = QED_SPQ_MODE_CB; 581 582 rc = qed_sp_init_request(p_hwfn, &p_ent, 583 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, 584 &init_data); 585 if (rc) 586 return rc; 587 588 p_ent->ramrod.pf_update.update_mf_vlan_flag = true; 589 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); 590 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 591 p_ent->ramrod.pf_update.mf_vlan |= 592 cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); 593 594 return qed_spq_post(p_hwfn, p_ent, NULL); 595 } 596