1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/types.h> 33 #include <asm/byteorder.h> 34 #include <linux/bitops.h> 35 #include <linux/delay.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/errno.h> 38 #include <linux/io.h> 39 #include <linux/kernel.h> 40 #include <linux/list.h> 41 #include <linux/module.h> 42 #include <linux/mutex.h> 43 #include <linux/pci.h> 44 #include <linux/slab.h> 45 #include <linux/spinlock.h> 46 #include <linux/string.h> 47 #include <linux/if_vlan.h> 48 #include "qed.h" 49 #include "qed_cxt.h" 50 #include "qed_dcbx.h" 51 #include "qed_hsi.h" 52 #include "qed_hw.h" 53 #include "qed_init_ops.h" 54 #include "qed_int.h" 55 #include "qed_ll2.h" 56 #include "qed_mcp.h" 57 #include "qed_reg_addr.h" 58 #include <linux/qed/qed_rdma_if.h> 59 #include "qed_rdma.h" 60 #include "qed_roce.h" 61 #include "qed_sp.h" 62 63 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); 64 65 static int 66 qed_roce_async_event(struct qed_hwfn *p_hwfn, 67 u8 fw_event_code, 68 u16 echo, union event_ring_data *data, u8 fw_return_code) 69 { 70 struct qed_rdma_events events = p_hwfn->p_rdma_info->events; 71 72 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { 73 u16 icid = 74 (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); 75 76 /* icid release in this async event can occur only if the icid 77 * was offloaded to the FW. In case it wasn't offloaded this is 78 * handled in qed_roce_sp_destroy_qp. 79 */ 80 qed_roce_free_real_icid(p_hwfn, icid); 81 } else { 82 if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || 83 fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { 84 u16 srq_id = (u16)data->rdma_data.async_handle.lo; 85 86 events.affiliated_event(events.context, fw_event_code, 87 &srq_id); 88 } else { 89 union rdma_eqe_data rdata = data->rdma_data; 90 91 events.affiliated_event(events.context, fw_event_code, 92 (void *)&rdata.async_handle); 93 } 94 } 95 96 return 0; 97 } 98 99 void qed_roce_stop(struct qed_hwfn *p_hwfn) 100 { 101 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; 102 int wait_count = 0; 103 104 /* when destroying a_RoCE QP the control is returned to the user after 105 * the synchronous part. The asynchronous part may take a little longer. 106 * We delay for a short while if an async destroy QP is still expected. 107 * Beyond the added delay we clear the bitmap anyway. 108 */ 109 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { 110 msleep(100); 111 if (wait_count++ > 20) { 112 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); 113 break; 114 } 115 } 116 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); 117 } 118 119 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, 120 __le32 *dst_gid) 121 { 122 u32 i; 123 124 if (qp->roce_mode == ROCE_V2_IPV4) { 125 /* The IPv4 addresses shall be aligned to the highest word. 126 * The lower words must be zero. 127 */ 128 memset(src_gid, 0, sizeof(union qed_gid)); 129 memset(dst_gid, 0, sizeof(union qed_gid)); 130 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); 131 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); 132 } else { 133 /* GIDs and IPv6 addresses coincide in location and size */ 134 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { 135 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); 136 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); 137 } 138 } 139 } 140 141 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) 142 { 143 switch (roce_mode) { 144 case ROCE_V1: 145 return PLAIN_ROCE; 146 case ROCE_V2_IPV4: 147 return RROCE_IPV4; 148 case ROCE_V2_IPV6: 149 return RROCE_IPV6; 150 default: 151 return MAX_ROCE_FLAVOR; 152 } 153 } 154 155 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) 156 { 157 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 158 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 159 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1); 160 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 161 } 162 163 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) 164 { 165 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 166 u32 responder_icid; 167 u32 requester_icid; 168 int rc; 169 170 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 171 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 172 &responder_icid); 173 if (rc) { 174 spin_unlock_bh(&p_rdma_info->lock); 175 return rc; 176 } 177 178 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 179 &requester_icid); 180 181 spin_unlock_bh(&p_rdma_info->lock); 182 if (rc) 183 goto err; 184 185 /* the two icid's should be adjacent */ 186 if ((requester_icid - responder_icid) != 1) { 187 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); 188 rc = -EINVAL; 189 goto err; 190 } 191 192 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn, 193 p_rdma_info->proto); 194 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn, 195 p_rdma_info->proto); 196 197 /* If these icids require a new ILT line allocate DMA-able context for 198 * an ILT page 199 */ 200 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid); 201 if (rc) 202 goto err; 203 204 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid); 205 if (rc) 206 goto err; 207 208 *cid = (u16)responder_icid; 209 return rc; 210 211 err: 212 spin_lock_bh(&p_rdma_info->lock); 213 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid); 214 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid); 215 216 spin_unlock_bh(&p_rdma_info->lock); 217 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 218 "Allocate CID - failed, rc = %d\n", rc); 219 return rc; 220 } 221 222 static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) 223 { 224 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 225 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid); 226 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 227 } 228 229 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 230 { 231 u8 pri, tc = 0; 232 233 if (qp->vlan_id) { 234 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 235 tc = qed_dcbx_get_priority_tc(p_hwfn, pri); 236 } 237 238 DP_VERBOSE(p_hwfn, QED_MSG_SP, 239 "qp icid %u tc: %u (vlan priority %s)\n", 240 qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); 241 242 return tc; 243 } 244 245 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, 246 struct qed_rdma_qp *qp) 247 { 248 struct roce_create_qp_resp_ramrod_data *p_ramrod; 249 u16 regular_latency_queue, low_latency_queue; 250 struct qed_sp_init_data init_data; 251 enum roce_flavor roce_flavor; 252 struct qed_spq_entry *p_ent; 253 enum protocol_type proto; 254 int rc; 255 u8 tc; 256 257 if (!qp->has_resp) 258 return 0; 259 260 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 261 262 /* Allocate DMA-able memory for IRQ */ 263 qp->irq_num_pages = 1; 264 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 265 RDMA_RING_PAGE_SIZE, 266 &qp->irq_phys_addr, GFP_KERNEL); 267 if (!qp->irq) { 268 rc = -ENOMEM; 269 DP_NOTICE(p_hwfn, 270 "qed create responder failed: cannot allocate memory (irq). rc = %d\n", 271 rc); 272 return rc; 273 } 274 275 /* Get SPQ entry */ 276 memset(&init_data, 0, sizeof(init_data)); 277 init_data.cid = qp->icid; 278 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 279 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 280 281 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, 282 PROTOCOLID_ROCE, &init_data); 283 if (rc) 284 goto err; 285 286 p_ramrod = &p_ent->ramrod.roce_create_qp_resp; 287 288 p_ramrod->flags = 0; 289 290 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 291 SET_FIELD(p_ramrod->flags, 292 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 293 294 SET_FIELD(p_ramrod->flags, 295 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 296 qp->incoming_rdma_read_en); 297 298 SET_FIELD(p_ramrod->flags, 299 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 300 qp->incoming_rdma_write_en); 301 302 SET_FIELD(p_ramrod->flags, 303 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, 304 qp->incoming_atomic_en); 305 306 SET_FIELD(p_ramrod->flags, 307 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 308 qp->e2e_flow_control_en); 309 310 SET_FIELD(p_ramrod->flags, 311 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); 312 313 SET_FIELD(p_ramrod->flags, 314 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, 315 qp->fmr_and_reserved_lkey); 316 317 SET_FIELD(p_ramrod->flags, 318 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 319 qp->min_rnr_nak_timer); 320 321 SET_FIELD(p_ramrod->flags, 322 ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, 323 qed_rdma_is_xrc_qp(qp)); 324 325 p_ramrod->max_ird = qp->max_rd_atomic_resp; 326 p_ramrod->traffic_class = qp->traffic_class_tos; 327 p_ramrod->hop_limit = qp->hop_limit_ttl; 328 p_ramrod->irq_num_pages = qp->irq_num_pages; 329 p_ramrod->p_key = cpu_to_le16(qp->pkey); 330 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 331 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 332 p_ramrod->mtu = cpu_to_le16(qp->mtu); 333 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn); 334 p_ramrod->pd = cpu_to_le16(qp->pd); 335 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); 336 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); 337 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); 338 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 339 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 340 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 341 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 342 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 343 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 344 qp->rq_cq_id); 345 p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); 346 347 tc = qed_roce_get_qp_tc(p_hwfn, qp); 348 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); 349 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); 350 DP_VERBOSE(p_hwfn, QED_MSG_SP, 351 "qp icid %u pqs: regular_latency %u low_latency %u\n", 352 qp->icid, regular_latency_queue - CM_TX_PQ_BASE, 353 low_latency_queue - CM_TX_PQ_BASE); 354 p_ramrod->regular_latency_phy_queue = 355 cpu_to_le16(regular_latency_queue); 356 p_ramrod->low_latency_phy_queue = 357 cpu_to_le16(low_latency_queue); 358 359 p_ramrod->dpi = cpu_to_le16(qp->dpi); 360 361 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 362 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 363 364 p_ramrod->udp_src_port = qp->udp_src_port; 365 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 366 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); 367 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); 368 369 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 370 qp->stats_queue; 371 372 rc = qed_spq_post(p_hwfn, p_ent, NULL); 373 if (rc) 374 goto err; 375 376 qp->resp_offloaded = true; 377 qp->cq_prod = 0; 378 379 proto = p_hwfn->p_rdma_info->proto; 380 qed_roce_set_real_cid(p_hwfn, qp->icid - 381 qed_cxt_get_proto_cid_start(p_hwfn, proto)); 382 383 return rc; 384 385 err: 386 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc); 387 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 388 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 389 qp->irq, qp->irq_phys_addr); 390 391 return rc; 392 } 393 394 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, 395 struct qed_rdma_qp *qp) 396 { 397 struct roce_create_qp_req_ramrod_data *p_ramrod; 398 u16 regular_latency_queue, low_latency_queue; 399 struct qed_sp_init_data init_data; 400 enum roce_flavor roce_flavor; 401 struct qed_spq_entry *p_ent; 402 enum protocol_type proto; 403 int rc; 404 u8 tc; 405 406 if (!qp->has_req) 407 return 0; 408 409 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 410 411 /* Allocate DMA-able memory for ORQ */ 412 qp->orq_num_pages = 1; 413 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 414 RDMA_RING_PAGE_SIZE, 415 &qp->orq_phys_addr, GFP_KERNEL); 416 if (!qp->orq) { 417 rc = -ENOMEM; 418 DP_NOTICE(p_hwfn, 419 "qed create requester failed: cannot allocate memory (orq). rc = %d\n", 420 rc); 421 return rc; 422 } 423 424 /* Get SPQ entry */ 425 memset(&init_data, 0, sizeof(init_data)); 426 init_data.cid = qp->icid + 1; 427 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 428 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 429 430 rc = qed_sp_init_request(p_hwfn, &p_ent, 431 ROCE_RAMROD_CREATE_QP, 432 PROTOCOLID_ROCE, &init_data); 433 if (rc) 434 goto err; 435 436 p_ramrod = &p_ent->ramrod.roce_create_qp_req; 437 438 p_ramrod->flags = 0; 439 440 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 441 SET_FIELD(p_ramrod->flags, 442 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 443 444 SET_FIELD(p_ramrod->flags, 445 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, 446 qp->fmr_and_reserved_lkey); 447 448 SET_FIELD(p_ramrod->flags, 449 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); 450 451 SET_FIELD(p_ramrod->flags, 452 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 453 454 SET_FIELD(p_ramrod->flags, 455 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 456 qp->rnr_retry_cnt); 457 458 SET_FIELD(p_ramrod->flags, 459 ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, 460 qed_rdma_is_xrc_qp(qp)); 461 462 SET_FIELD(p_ramrod->flags2, 463 ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode); 464 465 p_ramrod->max_ord = qp->max_rd_atomic_req; 466 p_ramrod->traffic_class = qp->traffic_class_tos; 467 p_ramrod->hop_limit = qp->hop_limit_ttl; 468 p_ramrod->orq_num_pages = qp->orq_num_pages; 469 p_ramrod->p_key = cpu_to_le16(qp->pkey); 470 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 471 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 472 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 473 p_ramrod->mtu = cpu_to_le16(qp->mtu); 474 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn); 475 p_ramrod->pd = cpu_to_le16(qp->pd); 476 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); 477 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); 478 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); 479 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 480 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 481 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 482 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 483 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 484 p_ramrod->cq_cid = 485 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); 486 487 tc = qed_roce_get_qp_tc(p_hwfn, qp); 488 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); 489 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); 490 DP_VERBOSE(p_hwfn, QED_MSG_SP, 491 "qp icid %u pqs: regular_latency %u low_latency %u\n", 492 qp->icid, regular_latency_queue - CM_TX_PQ_BASE, 493 low_latency_queue - CM_TX_PQ_BASE); 494 p_ramrod->regular_latency_phy_queue = 495 cpu_to_le16(regular_latency_queue); 496 p_ramrod->low_latency_phy_queue = 497 cpu_to_le16(low_latency_queue); 498 499 p_ramrod->dpi = cpu_to_le16(qp->dpi); 500 501 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 502 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 503 504 p_ramrod->udp_src_port = qp->udp_src_port; 505 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 506 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 507 qp->stats_queue; 508 509 rc = qed_spq_post(p_hwfn, p_ent, NULL); 510 if (rc) 511 goto err; 512 513 qp->req_offloaded = true; 514 proto = p_hwfn->p_rdma_info->proto; 515 qed_roce_set_real_cid(p_hwfn, 516 qp->icid + 1 - 517 qed_cxt_get_proto_cid_start(p_hwfn, proto)); 518 519 return rc; 520 521 err: 522 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc); 523 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 524 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 525 qp->orq, qp->orq_phys_addr); 526 return rc; 527 } 528 529 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, 530 struct qed_rdma_qp *qp, 531 bool move_to_err, u32 modify_flags) 532 { 533 struct roce_modify_qp_resp_ramrod_data *p_ramrod; 534 struct qed_sp_init_data init_data; 535 struct qed_spq_entry *p_ent; 536 int rc; 537 538 if (!qp->has_resp) 539 return 0; 540 541 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 542 543 if (move_to_err && !qp->resp_offloaded) 544 return 0; 545 546 /* Get SPQ entry */ 547 memset(&init_data, 0, sizeof(init_data)); 548 init_data.cid = qp->icid; 549 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 550 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 551 552 rc = qed_sp_init_request(p_hwfn, &p_ent, 553 ROCE_EVENT_MODIFY_QP, 554 PROTOCOLID_ROCE, &init_data); 555 if (rc) { 556 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 557 return rc; 558 } 559 560 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; 561 562 p_ramrod->flags = 0; 563 564 SET_FIELD(p_ramrod->flags, 565 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 566 567 SET_FIELD(p_ramrod->flags, 568 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 569 qp->incoming_rdma_read_en); 570 571 SET_FIELD(p_ramrod->flags, 572 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 573 qp->incoming_rdma_write_en); 574 575 SET_FIELD(p_ramrod->flags, 576 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, 577 qp->incoming_atomic_en); 578 579 SET_FIELD(p_ramrod->flags, 580 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 581 qp->e2e_flow_control_en); 582 583 SET_FIELD(p_ramrod->flags, 584 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, 585 GET_FIELD(modify_flags, 586 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); 587 588 SET_FIELD(p_ramrod->flags, 589 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, 590 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 591 592 SET_FIELD(p_ramrod->flags, 593 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, 594 GET_FIELD(modify_flags, 595 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 596 597 SET_FIELD(p_ramrod->flags, 598 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, 599 GET_FIELD(modify_flags, 600 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); 601 602 SET_FIELD(p_ramrod->flags, 603 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, 604 GET_FIELD(modify_flags, 605 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); 606 607 p_ramrod->fields = 0; 608 SET_FIELD(p_ramrod->fields, 609 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 610 qp->min_rnr_nak_timer); 611 612 p_ramrod->max_ird = qp->max_rd_atomic_resp; 613 p_ramrod->traffic_class = qp->traffic_class_tos; 614 p_ramrod->hop_limit = qp->hop_limit_ttl; 615 p_ramrod->p_key = cpu_to_le16(qp->pkey); 616 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 617 p_ramrod->mtu = cpu_to_le16(qp->mtu); 618 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 619 rc = qed_spq_post(p_hwfn, p_ent, NULL); 620 621 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc); 622 return rc; 623 } 624 625 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, 626 struct qed_rdma_qp *qp, 627 bool move_to_sqd, 628 bool move_to_err, u32 modify_flags) 629 { 630 struct roce_modify_qp_req_ramrod_data *p_ramrod; 631 struct qed_sp_init_data init_data; 632 struct qed_spq_entry *p_ent; 633 int rc; 634 635 if (!qp->has_req) 636 return 0; 637 638 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 639 640 if (move_to_err && !(qp->req_offloaded)) 641 return 0; 642 643 /* Get SPQ entry */ 644 memset(&init_data, 0, sizeof(init_data)); 645 init_data.cid = qp->icid + 1; 646 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 647 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 648 649 rc = qed_sp_init_request(p_hwfn, &p_ent, 650 ROCE_EVENT_MODIFY_QP, 651 PROTOCOLID_ROCE, &init_data); 652 if (rc) { 653 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 654 return rc; 655 } 656 657 p_ramrod = &p_ent->ramrod.roce_modify_qp_req; 658 659 p_ramrod->flags = 0; 660 661 SET_FIELD(p_ramrod->flags, 662 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 663 664 SET_FIELD(p_ramrod->flags, 665 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); 666 667 SET_FIELD(p_ramrod->flags, 668 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, 669 qp->sqd_async); 670 671 SET_FIELD(p_ramrod->flags, 672 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, 673 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 674 675 SET_FIELD(p_ramrod->flags, 676 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, 677 GET_FIELD(modify_flags, 678 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 679 680 SET_FIELD(p_ramrod->flags, 681 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, 682 GET_FIELD(modify_flags, 683 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); 684 685 SET_FIELD(p_ramrod->flags, 686 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, 687 GET_FIELD(modify_flags, 688 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); 689 690 SET_FIELD(p_ramrod->flags, 691 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, 692 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); 693 694 SET_FIELD(p_ramrod->flags, 695 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, 696 GET_FIELD(modify_flags, 697 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); 698 699 p_ramrod->fields = 0; 700 SET_FIELD(p_ramrod->fields, 701 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 702 703 SET_FIELD(p_ramrod->fields, 704 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 705 qp->rnr_retry_cnt); 706 707 p_ramrod->max_ord = qp->max_rd_atomic_req; 708 p_ramrod->traffic_class = qp->traffic_class_tos; 709 p_ramrod->hop_limit = qp->hop_limit_ttl; 710 p_ramrod->p_key = cpu_to_le16(qp->pkey); 711 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 712 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 713 p_ramrod->mtu = cpu_to_le16(qp->mtu); 714 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 715 rc = qed_spq_post(p_hwfn, p_ent, NULL); 716 717 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc); 718 return rc; 719 } 720 721 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, 722 struct qed_rdma_qp *qp, 723 u32 *cq_prod) 724 { 725 struct roce_destroy_qp_resp_output_params *p_ramrod_res; 726 struct roce_destroy_qp_resp_ramrod_data *p_ramrod; 727 struct qed_sp_init_data init_data; 728 struct qed_spq_entry *p_ent; 729 dma_addr_t ramrod_res_phys; 730 int rc; 731 732 if (!qp->has_resp) { 733 *cq_prod = 0; 734 return 0; 735 } 736 737 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 738 *cq_prod = qp->cq_prod; 739 740 if (!qp->resp_offloaded) { 741 /* If a responder was never offload, we need to free the cids 742 * allocated in create_qp as a FW async event will never arrive 743 */ 744 u32 cid; 745 746 cid = qp->icid - 747 qed_cxt_get_proto_cid_start(p_hwfn, 748 p_hwfn->p_rdma_info->proto); 749 qed_roce_free_cid_pair(p_hwfn, (u16)cid); 750 751 return 0; 752 } 753 754 /* Get SPQ entry */ 755 memset(&init_data, 0, sizeof(init_data)); 756 init_data.cid = qp->icid; 757 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 758 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 759 760 rc = qed_sp_init_request(p_hwfn, &p_ent, 761 ROCE_RAMROD_DESTROY_QP, 762 PROTOCOLID_ROCE, &init_data); 763 if (rc) 764 return rc; 765 766 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; 767 768 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 769 sizeof(*p_ramrod_res), 770 &ramrod_res_phys, GFP_KERNEL); 771 772 if (!p_ramrod_res) { 773 rc = -ENOMEM; 774 DP_NOTICE(p_hwfn, 775 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 776 rc); 777 qed_sp_destroy_request(p_hwfn, p_ent); 778 return rc; 779 } 780 781 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 782 783 rc = qed_spq_post(p_hwfn, p_ent, NULL); 784 if (rc) 785 goto err; 786 787 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); 788 qp->cq_prod = *cq_prod; 789 790 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ 791 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 792 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 793 qp->irq, qp->irq_phys_addr); 794 795 qp->resp_offloaded = false; 796 797 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc); 798 799 err: 800 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 801 sizeof(struct roce_destroy_qp_resp_output_params), 802 p_ramrod_res, ramrod_res_phys); 803 804 return rc; 805 } 806 807 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, 808 struct qed_rdma_qp *qp) 809 { 810 struct roce_destroy_qp_req_output_params *p_ramrod_res; 811 struct roce_destroy_qp_req_ramrod_data *p_ramrod; 812 struct qed_sp_init_data init_data; 813 struct qed_spq_entry *p_ent; 814 dma_addr_t ramrod_res_phys; 815 int rc = -ENOMEM; 816 817 if (!qp->has_req) 818 return 0; 819 820 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 821 822 if (!qp->req_offloaded) 823 return 0; 824 825 p_ramrod_res = (struct roce_destroy_qp_req_output_params *) 826 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 827 sizeof(*p_ramrod_res), 828 &ramrod_res_phys, GFP_KERNEL); 829 if (!p_ramrod_res) { 830 DP_NOTICE(p_hwfn, 831 "qed destroy requester failed: cannot allocate memory (ramrod)\n"); 832 return rc; 833 } 834 835 /* Get SPQ entry */ 836 memset(&init_data, 0, sizeof(init_data)); 837 init_data.cid = qp->icid + 1; 838 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 839 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 840 841 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, 842 PROTOCOLID_ROCE, &init_data); 843 if (rc) 844 goto err; 845 846 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; 847 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 848 849 rc = qed_spq_post(p_hwfn, p_ent, NULL); 850 if (rc) 851 goto err; 852 853 854 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ 855 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 856 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 857 qp->orq, qp->orq_phys_addr); 858 859 qp->req_offloaded = false; 860 861 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc); 862 863 err: 864 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 865 p_ramrod_res, ramrod_res_phys); 866 867 return rc; 868 } 869 870 int qed_roce_query_qp(struct qed_hwfn *p_hwfn, 871 struct qed_rdma_qp *qp, 872 struct qed_rdma_query_qp_out_params *out_params) 873 { 874 struct roce_query_qp_resp_output_params *p_resp_ramrod_res; 875 struct roce_query_qp_req_output_params *p_req_ramrod_res; 876 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; 877 struct roce_query_qp_req_ramrod_data *p_req_ramrod; 878 struct qed_sp_init_data init_data; 879 dma_addr_t resp_ramrod_res_phys; 880 dma_addr_t req_ramrod_res_phys; 881 struct qed_spq_entry *p_ent; 882 bool rq_err_state; 883 bool sq_err_state; 884 bool sq_draining; 885 int rc = -ENOMEM; 886 887 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) { 888 /* We can't send ramrod to the fw since this qp wasn't offloaded 889 * to the fw yet 890 */ 891 out_params->draining = false; 892 out_params->rq_psn = qp->rq_psn; 893 out_params->sq_psn = qp->sq_psn; 894 out_params->state = qp->cur_state; 895 896 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n"); 897 return 0; 898 } 899 900 if (!(qp->resp_offloaded)) { 901 DP_NOTICE(p_hwfn, 902 "The responder's qp should be offloaded before requester's\n"); 903 return -EINVAL; 904 } 905 906 /* Send a query responder ramrod to FW to get RQ-PSN and state */ 907 p_resp_ramrod_res = 908 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 909 sizeof(*p_resp_ramrod_res), 910 &resp_ramrod_res_phys, GFP_KERNEL); 911 if (!p_resp_ramrod_res) { 912 DP_NOTICE(p_hwfn, 913 "qed query qp failed: cannot allocate memory (ramrod)\n"); 914 return rc; 915 } 916 917 /* Get SPQ entry */ 918 memset(&init_data, 0, sizeof(init_data)); 919 init_data.cid = qp->icid; 920 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 921 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 922 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 923 PROTOCOLID_ROCE, &init_data); 924 if (rc) 925 goto err_resp; 926 927 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; 928 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); 929 930 rc = qed_spq_post(p_hwfn, p_ent, NULL); 931 if (rc) 932 goto err_resp; 933 934 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); 935 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags), 936 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); 937 938 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 939 p_resp_ramrod_res, resp_ramrod_res_phys); 940 941 if (!(qp->req_offloaded)) { 942 /* Don't send query qp for the requester */ 943 out_params->sq_psn = qp->sq_psn; 944 out_params->draining = false; 945 946 if (rq_err_state) 947 qp->cur_state = QED_ROCE_QP_STATE_ERR; 948 949 out_params->state = qp->cur_state; 950 951 return 0; 952 } 953 954 /* Send a query requester ramrod to FW to get SQ-PSN and state */ 955 p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 956 sizeof(*p_req_ramrod_res), 957 &req_ramrod_res_phys, 958 GFP_KERNEL); 959 if (!p_req_ramrod_res) { 960 rc = -ENOMEM; 961 DP_NOTICE(p_hwfn, 962 "qed query qp failed: cannot allocate memory (ramrod)\n"); 963 return rc; 964 } 965 966 /* Get SPQ entry */ 967 init_data.cid = qp->icid + 1; 968 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 969 PROTOCOLID_ROCE, &init_data); 970 if (rc) 971 goto err_req; 972 973 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; 974 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); 975 976 rc = qed_spq_post(p_hwfn, p_ent, NULL); 977 if (rc) 978 goto err_req; 979 980 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); 981 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 982 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); 983 sq_draining = 984 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 985 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); 986 987 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 988 p_req_ramrod_res, req_ramrod_res_phys); 989 990 out_params->draining = false; 991 992 if (rq_err_state || sq_err_state) 993 qp->cur_state = QED_ROCE_QP_STATE_ERR; 994 else if (sq_draining) 995 out_params->draining = true; 996 out_params->state = qp->cur_state; 997 998 return 0; 999 1000 err_req: 1001 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 1002 p_req_ramrod_res, req_ramrod_res_phys); 1003 return rc; 1004 err_resp: 1005 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 1006 p_resp_ramrod_res, resp_ramrod_res_phys); 1007 return rc; 1008 } 1009 1010 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 1011 { 1012 u32 cq_prod; 1013 int rc; 1014 1015 /* Destroys the specified QP */ 1016 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) && 1017 (qp->cur_state != QED_ROCE_QP_STATE_ERR) && 1018 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) { 1019 DP_NOTICE(p_hwfn, 1020 "QP must be in error, reset or init state before destroying it\n"); 1021 return -EINVAL; 1022 } 1023 1024 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { 1025 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, 1026 &cq_prod); 1027 if (rc) 1028 return rc; 1029 1030 /* Send destroy requester ramrod */ 1031 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); 1032 if (rc) 1033 return rc; 1034 } 1035 1036 return 0; 1037 } 1038 1039 int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, 1040 struct qed_rdma_qp *qp, 1041 enum qed_roce_qp_state prev_state, 1042 struct qed_rdma_modify_qp_in_params *params) 1043 { 1044 int rc = 0; 1045 1046 /* Perform additional operations according to the current state and the 1047 * next state 1048 */ 1049 if (((prev_state == QED_ROCE_QP_STATE_INIT) || 1050 (prev_state == QED_ROCE_QP_STATE_RESET)) && 1051 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) { 1052 /* Init->RTR or Reset->RTR */ 1053 rc = qed_roce_sp_create_responder(p_hwfn, qp); 1054 return rc; 1055 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) && 1056 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1057 /* RTR-> RTS */ 1058 rc = qed_roce_sp_create_requester(p_hwfn, qp); 1059 if (rc) 1060 return rc; 1061 1062 /* Send modify responder ramrod */ 1063 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1064 params->modify_flags); 1065 return rc; 1066 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 1067 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1068 /* RTS->RTS */ 1069 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1070 params->modify_flags); 1071 if (rc) 1072 return rc; 1073 1074 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1075 params->modify_flags); 1076 return rc; 1077 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 1078 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 1079 /* RTS->SQD */ 1080 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false, 1081 params->modify_flags); 1082 return rc; 1083 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 1084 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 1085 /* SQD->SQD */ 1086 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1087 params->modify_flags); 1088 if (rc) 1089 return rc; 1090 1091 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1092 params->modify_flags); 1093 return rc; 1094 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 1095 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1096 /* SQD->RTS */ 1097 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1098 params->modify_flags); 1099 if (rc) 1100 return rc; 1101 1102 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1103 params->modify_flags); 1104 1105 return rc; 1106 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) { 1107 /* ->ERR */ 1108 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, 1109 params->modify_flags); 1110 if (rc) 1111 return rc; 1112 1113 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true, 1114 params->modify_flags); 1115 return rc; 1116 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { 1117 /* Any state -> RESET */ 1118 u32 cq_prod; 1119 1120 /* Send destroy responder ramrod */ 1121 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, 1122 qp, 1123 &cq_prod); 1124 1125 if (rc) 1126 return rc; 1127 1128 qp->cq_prod = cq_prod; 1129 1130 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); 1131 } else { 1132 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 1133 } 1134 1135 return rc; 1136 } 1137 1138 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) 1139 { 1140 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1141 u32 start_cid, cid, xcid; 1142 1143 /* an even icid belongs to a responder while an odd icid belongs to a 1144 * requester. The 'cid' received as an input can be either. We calculate 1145 * the "partner" icid and call it xcid. Only if both are free then the 1146 * "cid" map can be cleared. 1147 */ 1148 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); 1149 cid = icid - start_cid; 1150 xcid = cid ^ 1; 1151 1152 spin_lock_bh(&p_rdma_info->lock); 1153 1154 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid); 1155 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) { 1156 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); 1157 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid); 1158 } 1159 1160 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1161 } 1162 1163 void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1164 { 1165 u8 val; 1166 1167 /* if any QPs are already active, we want to disable DPM, since their 1168 * context information contains information from before the latest DCBx 1169 * update. Otherwise enable it. 1170 */ 1171 val = qed_rdma_allocated_qps(p_hwfn) ? true : false; 1172 p_hwfn->dcbx_no_edpm = (u8)val; 1173 1174 qed_rdma_dpm_conf(p_hwfn, p_ptt); 1175 } 1176 1177 int qed_roce_setup(struct qed_hwfn *p_hwfn) 1178 { 1179 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE, 1180 qed_roce_async_event); 1181 } 1182 1183 int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1184 { 1185 u32 ll2_ethertype_en; 1186 1187 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 1188 1189 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; 1190 1191 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 1192 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 1193 (ll2_ethertype_en | 0x01)); 1194 1195 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { 1196 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); 1197 return -EINVAL; 1198 } 1199 1200 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); 1201 return 0; 1202 } 1203