1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/bitops.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/errno.h> 13 #include <linux/io.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/string.h> 22 #include <linux/if_vlan.h> 23 #include "qed.h" 24 #include "qed_cxt.h" 25 #include "qed_dcbx.h" 26 #include "qed_hsi.h" 27 #include "qed_hw.h" 28 #include "qed_init_ops.h" 29 #include "qed_int.h" 30 #include "qed_ll2.h" 31 #include "qed_mcp.h" 32 #include "qed_reg_addr.h" 33 #include <linux/qed/qed_rdma_if.h> 34 #include "qed_rdma.h" 35 #include "qed_roce.h" 36 #include "qed_sp.h" 37 38 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); 39 40 static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, 41 u16 echo, union event_ring_data *data, 42 u8 fw_return_code) 43 { 44 struct qed_rdma_events events = p_hwfn->p_rdma_info->events; 45 union rdma_eqe_data *rdata = &data->rdma_data; 46 47 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { 48 u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid); 49 50 /* icid release in this async event can occur only if the icid 51 * was offloaded to the FW. In case it wasn't offloaded this is 52 * handled in qed_roce_sp_destroy_qp. 53 */ 54 qed_roce_free_real_icid(p_hwfn, icid); 55 } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || 56 fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { 57 u16 srq_id = (u16)rdata->async_handle.lo; 58 59 events.affiliated_event(events.context, fw_event_code, 60 &srq_id); 61 } else { 62 events.affiliated_event(events.context, fw_event_code, 63 (void *)&rdata->async_handle); 64 } 65 66 return 0; 67 } 68 69 void qed_roce_stop(struct qed_hwfn *p_hwfn) 70 { 71 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; 72 int wait_count = 0; 73 74 /* when destroying a_RoCE QP the control is returned to the user after 75 * the synchronous part. The asynchronous part may take a little longer. 76 * We delay for a short while if an async destroy QP is still expected. 77 * Beyond the added delay we clear the bitmap anyway. 78 */ 79 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { 80 msleep(100); 81 if (wait_count++ > 20) { 82 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); 83 break; 84 } 85 } 86 } 87 88 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, 89 __le32 *dst_gid) 90 { 91 u32 i; 92 93 if (qp->roce_mode == ROCE_V2_IPV4) { 94 /* The IPv4 addresses shall be aligned to the highest word. 95 * The lower words must be zero. 96 */ 97 memset(src_gid, 0, sizeof(union qed_gid)); 98 memset(dst_gid, 0, sizeof(union qed_gid)); 99 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); 100 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); 101 } else { 102 /* GIDs and IPv6 addresses coincide in location and size */ 103 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { 104 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); 105 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); 106 } 107 } 108 } 109 110 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) 111 { 112 switch (roce_mode) { 113 case ROCE_V1: 114 return PLAIN_ROCE; 115 case ROCE_V2_IPV4: 116 return RROCE_IPV4; 117 case ROCE_V2_IPV6: 118 return RROCE_IPV6; 119 default: 120 return MAX_ROCE_FLAVOR; 121 } 122 } 123 124 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) 125 { 126 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 127 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 128 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1); 129 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 130 } 131 132 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) 133 { 134 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 135 u32 responder_icid; 136 u32 requester_icid; 137 int rc; 138 139 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 140 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 141 &responder_icid); 142 if (rc) { 143 spin_unlock_bh(&p_rdma_info->lock); 144 return rc; 145 } 146 147 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 148 &requester_icid); 149 150 spin_unlock_bh(&p_rdma_info->lock); 151 if (rc) 152 goto err; 153 154 /* the two icid's should be adjacent */ 155 if ((requester_icid - responder_icid) != 1) { 156 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); 157 rc = -EINVAL; 158 goto err; 159 } 160 161 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn, 162 p_rdma_info->proto); 163 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn, 164 p_rdma_info->proto); 165 166 /* If these icids require a new ILT line allocate DMA-able context for 167 * an ILT page 168 */ 169 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid); 170 if (rc) 171 goto err; 172 173 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid); 174 if (rc) 175 goto err; 176 177 *cid = (u16)responder_icid; 178 return rc; 179 180 err: 181 spin_lock_bh(&p_rdma_info->lock); 182 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid); 183 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid); 184 185 spin_unlock_bh(&p_rdma_info->lock); 186 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 187 "Allocate CID - failed, rc = %d\n", rc); 188 return rc; 189 } 190 191 static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) 192 { 193 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 194 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid); 195 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 196 } 197 198 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 199 { 200 u8 pri, tc = 0; 201 202 if (qp->vlan_id) { 203 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 204 tc = qed_dcbx_get_priority_tc(p_hwfn, pri); 205 } 206 207 DP_VERBOSE(p_hwfn, QED_MSG_SP, 208 "qp icid %u tc: %u (vlan priority %s)\n", 209 qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); 210 211 return tc; 212 } 213 214 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, 215 struct qed_rdma_qp *qp) 216 { 217 struct roce_create_qp_resp_ramrod_data *p_ramrod; 218 u16 regular_latency_queue, low_latency_queue; 219 struct qed_sp_init_data init_data; 220 enum roce_flavor roce_flavor; 221 struct qed_spq_entry *p_ent; 222 enum protocol_type proto; 223 int rc; 224 u8 tc; 225 226 if (!qp->has_resp) 227 return 0; 228 229 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 230 231 /* Allocate DMA-able memory for IRQ */ 232 qp->irq_num_pages = 1; 233 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 234 RDMA_RING_PAGE_SIZE, 235 &qp->irq_phys_addr, GFP_KERNEL); 236 if (!qp->irq) { 237 rc = -ENOMEM; 238 DP_NOTICE(p_hwfn, 239 "qed create responder failed: cannot allocate memory (irq). rc = %d\n", 240 rc); 241 return rc; 242 } 243 244 /* Get SPQ entry */ 245 memset(&init_data, 0, sizeof(init_data)); 246 init_data.cid = qp->icid; 247 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 248 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 249 250 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, 251 PROTOCOLID_ROCE, &init_data); 252 if (rc) 253 goto err; 254 255 p_ramrod = &p_ent->ramrod.roce_create_qp_resp; 256 257 p_ramrod->flags = 0; 258 259 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 260 SET_FIELD(p_ramrod->flags, 261 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 262 263 SET_FIELD(p_ramrod->flags, 264 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 265 qp->incoming_rdma_read_en); 266 267 SET_FIELD(p_ramrod->flags, 268 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 269 qp->incoming_rdma_write_en); 270 271 SET_FIELD(p_ramrod->flags, 272 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, 273 qp->incoming_atomic_en); 274 275 SET_FIELD(p_ramrod->flags, 276 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 277 qp->e2e_flow_control_en); 278 279 SET_FIELD(p_ramrod->flags, 280 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); 281 282 SET_FIELD(p_ramrod->flags, 283 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, 284 qp->fmr_and_reserved_lkey); 285 286 SET_FIELD(p_ramrod->flags, 287 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 288 qp->min_rnr_nak_timer); 289 290 SET_FIELD(p_ramrod->flags, 291 ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, 292 qed_rdma_is_xrc_qp(qp)); 293 294 p_ramrod->max_ird = qp->max_rd_atomic_resp; 295 p_ramrod->traffic_class = qp->traffic_class_tos; 296 p_ramrod->hop_limit = qp->hop_limit_ttl; 297 p_ramrod->irq_num_pages = qp->irq_num_pages; 298 p_ramrod->p_key = cpu_to_le16(qp->pkey); 299 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 300 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 301 p_ramrod->mtu = cpu_to_le16(qp->mtu); 302 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn); 303 p_ramrod->pd = cpu_to_le16(qp->pd); 304 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); 305 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); 306 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); 307 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 308 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 309 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 310 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 311 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 312 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 313 qp->rq_cq_id); 314 p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); 315 316 tc = qed_roce_get_qp_tc(p_hwfn, qp); 317 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); 318 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); 319 DP_VERBOSE(p_hwfn, QED_MSG_SP, 320 "qp icid %u pqs: regular_latency %u low_latency %u\n", 321 qp->icid, regular_latency_queue - CM_TX_PQ_BASE, 322 low_latency_queue - CM_TX_PQ_BASE); 323 p_ramrod->regular_latency_phy_queue = 324 cpu_to_le16(regular_latency_queue); 325 p_ramrod->low_latency_phy_queue = 326 cpu_to_le16(low_latency_queue); 327 328 p_ramrod->dpi = cpu_to_le16(qp->dpi); 329 330 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 331 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 332 333 p_ramrod->udp_src_port = qp->udp_src_port; 334 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 335 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); 336 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); 337 338 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 339 qp->stats_queue; 340 341 rc = qed_spq_post(p_hwfn, p_ent, NULL); 342 if (rc) 343 goto err; 344 345 qp->resp_offloaded = true; 346 qp->cq_prod = 0; 347 348 proto = p_hwfn->p_rdma_info->proto; 349 qed_roce_set_real_cid(p_hwfn, qp->icid - 350 qed_cxt_get_proto_cid_start(p_hwfn, proto)); 351 352 return rc; 353 354 err: 355 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc); 356 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 357 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 358 qp->irq, qp->irq_phys_addr); 359 360 return rc; 361 } 362 363 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, 364 struct qed_rdma_qp *qp) 365 { 366 struct roce_create_qp_req_ramrod_data *p_ramrod; 367 u16 regular_latency_queue, low_latency_queue; 368 struct qed_sp_init_data init_data; 369 enum roce_flavor roce_flavor; 370 struct qed_spq_entry *p_ent; 371 enum protocol_type proto; 372 int rc; 373 u8 tc; 374 375 if (!qp->has_req) 376 return 0; 377 378 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 379 380 /* Allocate DMA-able memory for ORQ */ 381 qp->orq_num_pages = 1; 382 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 383 RDMA_RING_PAGE_SIZE, 384 &qp->orq_phys_addr, GFP_KERNEL); 385 if (!qp->orq) { 386 rc = -ENOMEM; 387 DP_NOTICE(p_hwfn, 388 "qed create requester failed: cannot allocate memory (orq). rc = %d\n", 389 rc); 390 return rc; 391 } 392 393 /* Get SPQ entry */ 394 memset(&init_data, 0, sizeof(init_data)); 395 init_data.cid = qp->icid + 1; 396 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 397 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 398 399 rc = qed_sp_init_request(p_hwfn, &p_ent, 400 ROCE_RAMROD_CREATE_QP, 401 PROTOCOLID_ROCE, &init_data); 402 if (rc) 403 goto err; 404 405 p_ramrod = &p_ent->ramrod.roce_create_qp_req; 406 407 p_ramrod->flags = 0; 408 409 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 410 SET_FIELD(p_ramrod->flags, 411 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 412 413 SET_FIELD(p_ramrod->flags, 414 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, 415 qp->fmr_and_reserved_lkey); 416 417 SET_FIELD(p_ramrod->flags, 418 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); 419 420 SET_FIELD(p_ramrod->flags, 421 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 422 423 SET_FIELD(p_ramrod->flags, 424 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 425 qp->rnr_retry_cnt); 426 427 SET_FIELD(p_ramrod->flags, 428 ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, 429 qed_rdma_is_xrc_qp(qp)); 430 431 SET_FIELD(p_ramrod->flags2, 432 ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode); 433 434 p_ramrod->max_ord = qp->max_rd_atomic_req; 435 p_ramrod->traffic_class = qp->traffic_class_tos; 436 p_ramrod->hop_limit = qp->hop_limit_ttl; 437 p_ramrod->orq_num_pages = qp->orq_num_pages; 438 p_ramrod->p_key = cpu_to_le16(qp->pkey); 439 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 440 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 441 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 442 p_ramrod->mtu = cpu_to_le16(qp->mtu); 443 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn); 444 p_ramrod->pd = cpu_to_le16(qp->pd); 445 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); 446 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); 447 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); 448 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 449 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 450 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 451 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 452 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 453 p_ramrod->cq_cid = 454 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); 455 456 tc = qed_roce_get_qp_tc(p_hwfn, qp); 457 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); 458 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); 459 DP_VERBOSE(p_hwfn, QED_MSG_SP, 460 "qp icid %u pqs: regular_latency %u low_latency %u\n", 461 qp->icid, regular_latency_queue - CM_TX_PQ_BASE, 462 low_latency_queue - CM_TX_PQ_BASE); 463 p_ramrod->regular_latency_phy_queue = 464 cpu_to_le16(regular_latency_queue); 465 p_ramrod->low_latency_phy_queue = 466 cpu_to_le16(low_latency_queue); 467 468 p_ramrod->dpi = cpu_to_le16(qp->dpi); 469 470 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 471 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 472 473 p_ramrod->udp_src_port = qp->udp_src_port; 474 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 475 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 476 qp->stats_queue; 477 478 rc = qed_spq_post(p_hwfn, p_ent, NULL); 479 if (rc) 480 goto err; 481 482 qp->req_offloaded = true; 483 proto = p_hwfn->p_rdma_info->proto; 484 qed_roce_set_real_cid(p_hwfn, 485 qp->icid + 1 - 486 qed_cxt_get_proto_cid_start(p_hwfn, proto)); 487 488 return rc; 489 490 err: 491 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc); 492 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 493 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 494 qp->orq, qp->orq_phys_addr); 495 return rc; 496 } 497 498 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, 499 struct qed_rdma_qp *qp, 500 bool move_to_err, u32 modify_flags) 501 { 502 struct roce_modify_qp_resp_ramrod_data *p_ramrod; 503 struct qed_sp_init_data init_data; 504 struct qed_spq_entry *p_ent; 505 int rc; 506 507 if (!qp->has_resp) 508 return 0; 509 510 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 511 512 if (move_to_err && !qp->resp_offloaded) 513 return 0; 514 515 /* Get SPQ entry */ 516 memset(&init_data, 0, sizeof(init_data)); 517 init_data.cid = qp->icid; 518 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 519 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 520 521 rc = qed_sp_init_request(p_hwfn, &p_ent, 522 ROCE_EVENT_MODIFY_QP, 523 PROTOCOLID_ROCE, &init_data); 524 if (rc) { 525 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 526 return rc; 527 } 528 529 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; 530 531 p_ramrod->flags = 0; 532 533 SET_FIELD(p_ramrod->flags, 534 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 535 536 SET_FIELD(p_ramrod->flags, 537 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 538 qp->incoming_rdma_read_en); 539 540 SET_FIELD(p_ramrod->flags, 541 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 542 qp->incoming_rdma_write_en); 543 544 SET_FIELD(p_ramrod->flags, 545 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, 546 qp->incoming_atomic_en); 547 548 SET_FIELD(p_ramrod->flags, 549 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 550 qp->e2e_flow_control_en); 551 552 SET_FIELD(p_ramrod->flags, 553 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, 554 GET_FIELD(modify_flags, 555 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); 556 557 SET_FIELD(p_ramrod->flags, 558 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, 559 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 560 561 SET_FIELD(p_ramrod->flags, 562 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, 563 GET_FIELD(modify_flags, 564 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 565 566 SET_FIELD(p_ramrod->flags, 567 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, 568 GET_FIELD(modify_flags, 569 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); 570 571 SET_FIELD(p_ramrod->flags, 572 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, 573 GET_FIELD(modify_flags, 574 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); 575 576 p_ramrod->fields = 0; 577 SET_FIELD(p_ramrod->fields, 578 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 579 qp->min_rnr_nak_timer); 580 581 p_ramrod->max_ird = qp->max_rd_atomic_resp; 582 p_ramrod->traffic_class = qp->traffic_class_tos; 583 p_ramrod->hop_limit = qp->hop_limit_ttl; 584 p_ramrod->p_key = cpu_to_le16(qp->pkey); 585 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 586 p_ramrod->mtu = cpu_to_le16(qp->mtu); 587 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 588 rc = qed_spq_post(p_hwfn, p_ent, NULL); 589 590 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc); 591 return rc; 592 } 593 594 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, 595 struct qed_rdma_qp *qp, 596 bool move_to_sqd, 597 bool move_to_err, u32 modify_flags) 598 { 599 struct roce_modify_qp_req_ramrod_data *p_ramrod; 600 struct qed_sp_init_data init_data; 601 struct qed_spq_entry *p_ent; 602 int rc; 603 604 if (!qp->has_req) 605 return 0; 606 607 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 608 609 if (move_to_err && !(qp->req_offloaded)) 610 return 0; 611 612 /* Get SPQ entry */ 613 memset(&init_data, 0, sizeof(init_data)); 614 init_data.cid = qp->icid + 1; 615 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 616 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 617 618 rc = qed_sp_init_request(p_hwfn, &p_ent, 619 ROCE_EVENT_MODIFY_QP, 620 PROTOCOLID_ROCE, &init_data); 621 if (rc) { 622 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 623 return rc; 624 } 625 626 p_ramrod = &p_ent->ramrod.roce_modify_qp_req; 627 628 p_ramrod->flags = 0; 629 630 SET_FIELD(p_ramrod->flags, 631 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 632 633 SET_FIELD(p_ramrod->flags, 634 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); 635 636 SET_FIELD(p_ramrod->flags, 637 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, 638 qp->sqd_async); 639 640 SET_FIELD(p_ramrod->flags, 641 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, 642 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 643 644 SET_FIELD(p_ramrod->flags, 645 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, 646 GET_FIELD(modify_flags, 647 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 648 649 SET_FIELD(p_ramrod->flags, 650 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, 651 GET_FIELD(modify_flags, 652 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); 653 654 SET_FIELD(p_ramrod->flags, 655 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, 656 GET_FIELD(modify_flags, 657 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); 658 659 SET_FIELD(p_ramrod->flags, 660 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, 661 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); 662 663 SET_FIELD(p_ramrod->flags, 664 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, 665 GET_FIELD(modify_flags, 666 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); 667 668 p_ramrod->fields = 0; 669 SET_FIELD(p_ramrod->fields, 670 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 671 672 SET_FIELD(p_ramrod->fields, 673 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 674 qp->rnr_retry_cnt); 675 676 p_ramrod->max_ord = qp->max_rd_atomic_req; 677 p_ramrod->traffic_class = qp->traffic_class_tos; 678 p_ramrod->hop_limit = qp->hop_limit_ttl; 679 p_ramrod->p_key = cpu_to_le16(qp->pkey); 680 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 681 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 682 p_ramrod->mtu = cpu_to_le16(qp->mtu); 683 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 684 rc = qed_spq_post(p_hwfn, p_ent, NULL); 685 686 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc); 687 return rc; 688 } 689 690 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, 691 struct qed_rdma_qp *qp, 692 u32 *cq_prod) 693 { 694 struct roce_destroy_qp_resp_output_params *p_ramrod_res; 695 struct roce_destroy_qp_resp_ramrod_data *p_ramrod; 696 struct qed_sp_init_data init_data; 697 struct qed_spq_entry *p_ent; 698 dma_addr_t ramrod_res_phys; 699 int rc; 700 701 if (!qp->has_resp) { 702 *cq_prod = 0; 703 return 0; 704 } 705 706 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 707 *cq_prod = qp->cq_prod; 708 709 if (!qp->resp_offloaded) { 710 /* If a responder was never offload, we need to free the cids 711 * allocated in create_qp as a FW async event will never arrive 712 */ 713 u32 cid; 714 715 cid = qp->icid - 716 qed_cxt_get_proto_cid_start(p_hwfn, 717 p_hwfn->p_rdma_info->proto); 718 qed_roce_free_cid_pair(p_hwfn, (u16)cid); 719 720 return 0; 721 } 722 723 /* Get SPQ entry */ 724 memset(&init_data, 0, sizeof(init_data)); 725 init_data.cid = qp->icid; 726 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 727 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 728 729 rc = qed_sp_init_request(p_hwfn, &p_ent, 730 ROCE_RAMROD_DESTROY_QP, 731 PROTOCOLID_ROCE, &init_data); 732 if (rc) 733 return rc; 734 735 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; 736 737 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 738 sizeof(*p_ramrod_res), 739 &ramrod_res_phys, GFP_KERNEL); 740 741 if (!p_ramrod_res) { 742 rc = -ENOMEM; 743 DP_NOTICE(p_hwfn, 744 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 745 rc); 746 qed_sp_destroy_request(p_hwfn, p_ent); 747 return rc; 748 } 749 750 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 751 752 rc = qed_spq_post(p_hwfn, p_ent, NULL); 753 if (rc) 754 goto err; 755 756 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); 757 qp->cq_prod = *cq_prod; 758 759 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ 760 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 761 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 762 qp->irq, qp->irq_phys_addr); 763 764 qp->resp_offloaded = false; 765 766 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc); 767 768 err: 769 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 770 sizeof(struct roce_destroy_qp_resp_output_params), 771 p_ramrod_res, ramrod_res_phys); 772 773 return rc; 774 } 775 776 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, 777 struct qed_rdma_qp *qp) 778 { 779 struct roce_destroy_qp_req_output_params *p_ramrod_res; 780 struct roce_destroy_qp_req_ramrod_data *p_ramrod; 781 struct qed_sp_init_data init_data; 782 struct qed_spq_entry *p_ent; 783 dma_addr_t ramrod_res_phys; 784 int rc = -ENOMEM; 785 786 if (!qp->has_req) 787 return 0; 788 789 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 790 791 if (!qp->req_offloaded) 792 return 0; 793 794 p_ramrod_res = (struct roce_destroy_qp_req_output_params *) 795 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 796 sizeof(*p_ramrod_res), 797 &ramrod_res_phys, GFP_KERNEL); 798 if (!p_ramrod_res) { 799 DP_NOTICE(p_hwfn, 800 "qed destroy requester failed: cannot allocate memory (ramrod)\n"); 801 return rc; 802 } 803 804 /* Get SPQ entry */ 805 memset(&init_data, 0, sizeof(init_data)); 806 init_data.cid = qp->icid + 1; 807 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 808 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 809 810 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, 811 PROTOCOLID_ROCE, &init_data); 812 if (rc) 813 goto err; 814 815 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; 816 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 817 818 rc = qed_spq_post(p_hwfn, p_ent, NULL); 819 if (rc) 820 goto err; 821 822 823 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ 824 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 825 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 826 qp->orq, qp->orq_phys_addr); 827 828 qp->req_offloaded = false; 829 830 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc); 831 832 err: 833 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 834 p_ramrod_res, ramrod_res_phys); 835 836 return rc; 837 } 838 839 int qed_roce_query_qp(struct qed_hwfn *p_hwfn, 840 struct qed_rdma_qp *qp, 841 struct qed_rdma_query_qp_out_params *out_params) 842 { 843 struct roce_query_qp_resp_output_params *p_resp_ramrod_res; 844 struct roce_query_qp_req_output_params *p_req_ramrod_res; 845 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; 846 struct roce_query_qp_req_ramrod_data *p_req_ramrod; 847 struct qed_sp_init_data init_data; 848 dma_addr_t resp_ramrod_res_phys; 849 dma_addr_t req_ramrod_res_phys; 850 struct qed_spq_entry *p_ent; 851 bool rq_err_state; 852 bool sq_err_state; 853 bool sq_draining; 854 int rc = -ENOMEM; 855 856 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) { 857 /* We can't send ramrod to the fw since this qp wasn't offloaded 858 * to the fw yet 859 */ 860 out_params->draining = false; 861 out_params->rq_psn = qp->rq_psn; 862 out_params->sq_psn = qp->sq_psn; 863 out_params->state = qp->cur_state; 864 865 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n"); 866 return 0; 867 } 868 869 if (!(qp->resp_offloaded)) { 870 DP_NOTICE(p_hwfn, 871 "The responder's qp should be offloaded before requester's\n"); 872 return -EINVAL; 873 } 874 875 /* Send a query responder ramrod to FW to get RQ-PSN and state */ 876 p_resp_ramrod_res = 877 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 878 sizeof(*p_resp_ramrod_res), 879 &resp_ramrod_res_phys, GFP_KERNEL); 880 if (!p_resp_ramrod_res) { 881 DP_NOTICE(p_hwfn, 882 "qed query qp failed: cannot allocate memory (ramrod)\n"); 883 return rc; 884 } 885 886 /* Get SPQ entry */ 887 memset(&init_data, 0, sizeof(init_data)); 888 init_data.cid = qp->icid; 889 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 890 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 891 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 892 PROTOCOLID_ROCE, &init_data); 893 if (rc) 894 goto err_resp; 895 896 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; 897 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); 898 899 rc = qed_spq_post(p_hwfn, p_ent, NULL); 900 if (rc) 901 goto err_resp; 902 903 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); 904 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags), 905 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); 906 907 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 908 p_resp_ramrod_res, resp_ramrod_res_phys); 909 910 if (!(qp->req_offloaded)) { 911 /* Don't send query qp for the requester */ 912 out_params->sq_psn = qp->sq_psn; 913 out_params->draining = false; 914 915 if (rq_err_state) 916 qp->cur_state = QED_ROCE_QP_STATE_ERR; 917 918 out_params->state = qp->cur_state; 919 920 return 0; 921 } 922 923 /* Send a query requester ramrod to FW to get SQ-PSN and state */ 924 p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 925 sizeof(*p_req_ramrod_res), 926 &req_ramrod_res_phys, 927 GFP_KERNEL); 928 if (!p_req_ramrod_res) { 929 rc = -ENOMEM; 930 DP_NOTICE(p_hwfn, 931 "qed query qp failed: cannot allocate memory (ramrod)\n"); 932 return rc; 933 } 934 935 /* Get SPQ entry */ 936 init_data.cid = qp->icid + 1; 937 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 938 PROTOCOLID_ROCE, &init_data); 939 if (rc) 940 goto err_req; 941 942 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; 943 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); 944 945 rc = qed_spq_post(p_hwfn, p_ent, NULL); 946 if (rc) 947 goto err_req; 948 949 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); 950 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 951 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); 952 sq_draining = 953 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 954 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); 955 956 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 957 p_req_ramrod_res, req_ramrod_res_phys); 958 959 out_params->draining = false; 960 961 if (rq_err_state || sq_err_state) 962 qp->cur_state = QED_ROCE_QP_STATE_ERR; 963 else if (sq_draining) 964 out_params->draining = true; 965 out_params->state = qp->cur_state; 966 967 return 0; 968 969 err_req: 970 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 971 p_req_ramrod_res, req_ramrod_res_phys); 972 return rc; 973 err_resp: 974 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 975 p_resp_ramrod_res, resp_ramrod_res_phys); 976 return rc; 977 } 978 979 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 980 { 981 u32 cq_prod; 982 int rc; 983 984 /* Destroys the specified QP */ 985 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) && 986 (qp->cur_state != QED_ROCE_QP_STATE_ERR) && 987 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) { 988 DP_NOTICE(p_hwfn, 989 "QP must be in error, reset or init state before destroying it\n"); 990 return -EINVAL; 991 } 992 993 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { 994 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, 995 &cq_prod); 996 if (rc) 997 return rc; 998 999 /* Send destroy requester ramrod */ 1000 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); 1001 if (rc) 1002 return rc; 1003 } 1004 1005 return 0; 1006 } 1007 1008 int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, 1009 struct qed_rdma_qp *qp, 1010 enum qed_roce_qp_state prev_state, 1011 struct qed_rdma_modify_qp_in_params *params) 1012 { 1013 int rc = 0; 1014 1015 /* Perform additional operations according to the current state and the 1016 * next state 1017 */ 1018 if (((prev_state == QED_ROCE_QP_STATE_INIT) || 1019 (prev_state == QED_ROCE_QP_STATE_RESET)) && 1020 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) { 1021 /* Init->RTR or Reset->RTR */ 1022 rc = qed_roce_sp_create_responder(p_hwfn, qp); 1023 return rc; 1024 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) && 1025 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1026 /* RTR-> RTS */ 1027 rc = qed_roce_sp_create_requester(p_hwfn, qp); 1028 if (rc) 1029 return rc; 1030 1031 /* Send modify responder ramrod */ 1032 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1033 params->modify_flags); 1034 return rc; 1035 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 1036 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1037 /* RTS->RTS */ 1038 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1039 params->modify_flags); 1040 if (rc) 1041 return rc; 1042 1043 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1044 params->modify_flags); 1045 return rc; 1046 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 1047 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 1048 /* RTS->SQD */ 1049 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false, 1050 params->modify_flags); 1051 return rc; 1052 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 1053 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 1054 /* SQD->SQD */ 1055 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1056 params->modify_flags); 1057 if (rc) 1058 return rc; 1059 1060 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1061 params->modify_flags); 1062 return rc; 1063 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 1064 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1065 /* SQD->RTS */ 1066 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1067 params->modify_flags); 1068 if (rc) 1069 return rc; 1070 1071 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1072 params->modify_flags); 1073 1074 return rc; 1075 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) { 1076 /* ->ERR */ 1077 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, 1078 params->modify_flags); 1079 if (rc) 1080 return rc; 1081 1082 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true, 1083 params->modify_flags); 1084 return rc; 1085 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { 1086 /* Any state -> RESET */ 1087 u32 cq_prod; 1088 1089 /* Send destroy responder ramrod */ 1090 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, 1091 qp, 1092 &cq_prod); 1093 1094 if (rc) 1095 return rc; 1096 1097 qp->cq_prod = cq_prod; 1098 1099 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); 1100 } else { 1101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 1102 } 1103 1104 return rc; 1105 } 1106 1107 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) 1108 { 1109 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1110 u32 start_cid, cid, xcid; 1111 1112 /* an even icid belongs to a responder while an odd icid belongs to a 1113 * requester. The 'cid' received as an input can be either. We calculate 1114 * the "partner" icid and call it xcid. Only if both are free then the 1115 * "cid" map can be cleared. 1116 */ 1117 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); 1118 cid = icid - start_cid; 1119 xcid = cid ^ 1; 1120 1121 spin_lock_bh(&p_rdma_info->lock); 1122 1123 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid); 1124 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) { 1125 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); 1126 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid); 1127 } 1128 1129 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1130 } 1131 1132 void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1133 { 1134 u8 val; 1135 1136 /* if any QPs are already active, we want to disable DPM, since their 1137 * context information contains information from before the latest DCBx 1138 * update. Otherwise enable it. 1139 */ 1140 val = qed_rdma_allocated_qps(p_hwfn) ? true : false; 1141 p_hwfn->dcbx_no_edpm = (u8)val; 1142 1143 qed_rdma_dpm_conf(p_hwfn, p_ptt); 1144 } 1145 1146 int qed_roce_setup(struct qed_hwfn *p_hwfn) 1147 { 1148 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE, 1149 qed_roce_async_event); 1150 } 1151 1152 int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1153 { 1154 u32 ll2_ethertype_en; 1155 1156 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 1157 1158 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; 1159 1160 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 1161 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 1162 (ll2_ethertype_en | 0x01)); 1163 1164 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { 1165 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); 1166 return -EINVAL; 1167 } 1168 1169 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); 1170 return 0; 1171 } 1172