1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/types.h> 33 #include <asm/byteorder.h> 34 #include <linux/bitops.h> 35 #include <linux/delay.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/errno.h> 38 #include <linux/io.h> 39 #include <linux/kernel.h> 40 #include <linux/list.h> 41 #include <linux/module.h> 42 #include <linux/mutex.h> 43 #include <linux/pci.h> 44 #include <linux/slab.h> 45 #include <linux/spinlock.h> 46 #include <linux/string.h> 47 #include <linux/if_vlan.h> 48 #include "qed.h" 49 #include "qed_cxt.h" 50 #include "qed_dcbx.h" 51 #include "qed_hsi.h" 52 #include "qed_hw.h" 53 #include "qed_init_ops.h" 54 #include "qed_int.h" 55 #include "qed_ll2.h" 56 #include "qed_mcp.h" 57 #include "qed_reg_addr.h" 58 #include <linux/qed/qed_rdma_if.h> 59 #include "qed_rdma.h" 60 #include "qed_roce.h" 61 #include "qed_sp.h" 62 63 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); 64 65 static int 66 qed_roce_async_event(struct qed_hwfn *p_hwfn, 67 u8 fw_event_code, 68 u16 echo, union event_ring_data *data, u8 fw_return_code) 69 { 70 struct qed_rdma_events events = p_hwfn->p_rdma_info->events; 71 72 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { 73 u16 icid = 74 (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); 75 76 /* icid release in this async event can occur only if the icid 77 * was offloaded to the FW. In case it wasn't offloaded this is 78 * handled in qed_roce_sp_destroy_qp. 79 */ 80 qed_roce_free_real_icid(p_hwfn, icid); 81 } else { 82 if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || 83 fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { 84 u16 srq_id = (u16)data->rdma_data.async_handle.lo; 85 86 events.affiliated_event(events.context, fw_event_code, 87 &srq_id); 88 } else { 89 union rdma_eqe_data rdata = data->rdma_data; 90 91 events.affiliated_event(events.context, fw_event_code, 92 (void *)&rdata.async_handle); 93 } 94 } 95 96 return 0; 97 } 98 99 void qed_roce_stop(struct qed_hwfn *p_hwfn) 100 { 101 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; 102 int wait_count = 0; 103 104 /* when destroying a_RoCE QP the control is returned to the user after 105 * the synchronous part. The asynchronous part may take a little longer. 106 * We delay for a short while if an async destroy QP is still expected. 107 * Beyond the added delay we clear the bitmap anyway. 108 */ 109 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { 110 msleep(100); 111 if (wait_count++ > 20) { 112 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); 113 break; 114 } 115 } 116 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); 117 } 118 119 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, 120 __le32 *dst_gid) 121 { 122 u32 i; 123 124 if (qp->roce_mode == ROCE_V2_IPV4) { 125 /* The IPv4 addresses shall be aligned to the highest word. 126 * The lower words must be zero. 127 */ 128 memset(src_gid, 0, sizeof(union qed_gid)); 129 memset(dst_gid, 0, sizeof(union qed_gid)); 130 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); 131 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); 132 } else { 133 /* GIDs and IPv6 addresses coincide in location and size */ 134 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { 135 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); 136 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); 137 } 138 } 139 } 140 141 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) 142 { 143 enum roce_flavor flavor; 144 145 switch (roce_mode) { 146 case ROCE_V1: 147 flavor = PLAIN_ROCE; 148 break; 149 case ROCE_V2_IPV4: 150 flavor = RROCE_IPV4; 151 break; 152 case ROCE_V2_IPV6: 153 flavor = ROCE_V2_IPV6; 154 break; 155 default: 156 flavor = MAX_ROCE_MODE; 157 break; 158 } 159 return flavor; 160 } 161 162 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) 163 { 164 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 165 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 166 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1); 167 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 168 } 169 170 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) 171 { 172 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 173 u32 responder_icid; 174 u32 requester_icid; 175 int rc; 176 177 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 178 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 179 &responder_icid); 180 if (rc) { 181 spin_unlock_bh(&p_rdma_info->lock); 182 return rc; 183 } 184 185 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 186 &requester_icid); 187 188 spin_unlock_bh(&p_rdma_info->lock); 189 if (rc) 190 goto err; 191 192 /* the two icid's should be adjacent */ 193 if ((requester_icid - responder_icid) != 1) { 194 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); 195 rc = -EINVAL; 196 goto err; 197 } 198 199 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn, 200 p_rdma_info->proto); 201 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn, 202 p_rdma_info->proto); 203 204 /* If these icids require a new ILT line allocate DMA-able context for 205 * an ILT page 206 */ 207 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid); 208 if (rc) 209 goto err; 210 211 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid); 212 if (rc) 213 goto err; 214 215 *cid = (u16)responder_icid; 216 return rc; 217 218 err: 219 spin_lock_bh(&p_rdma_info->lock); 220 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid); 221 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid); 222 223 spin_unlock_bh(&p_rdma_info->lock); 224 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 225 "Allocate CID - failed, rc = %d\n", rc); 226 return rc; 227 } 228 229 static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) 230 { 231 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 232 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid); 233 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 234 } 235 236 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 237 { 238 u8 pri, tc = 0; 239 240 if (qp->vlan_id) { 241 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 242 tc = qed_dcbx_get_priority_tc(p_hwfn, pri); 243 } 244 245 DP_VERBOSE(p_hwfn, QED_MSG_SP, 246 "qp icid %u tc: %u (vlan priority %s)\n", 247 qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); 248 249 return tc; 250 } 251 252 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, 253 struct qed_rdma_qp *qp) 254 { 255 struct roce_create_qp_resp_ramrod_data *p_ramrod; 256 u16 regular_latency_queue, low_latency_queue; 257 struct qed_sp_init_data init_data; 258 enum roce_flavor roce_flavor; 259 struct qed_spq_entry *p_ent; 260 enum protocol_type proto; 261 int rc; 262 u8 tc; 263 264 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 265 266 /* Allocate DMA-able memory for IRQ */ 267 qp->irq_num_pages = 1; 268 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 269 RDMA_RING_PAGE_SIZE, 270 &qp->irq_phys_addr, GFP_KERNEL); 271 if (!qp->irq) { 272 rc = -ENOMEM; 273 DP_NOTICE(p_hwfn, 274 "qed create responder failed: cannot allocate memory (irq). rc = %d\n", 275 rc); 276 return rc; 277 } 278 279 /* Get SPQ entry */ 280 memset(&init_data, 0, sizeof(init_data)); 281 init_data.cid = qp->icid; 282 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 283 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 284 285 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, 286 PROTOCOLID_ROCE, &init_data); 287 if (rc) 288 goto err; 289 290 p_ramrod = &p_ent->ramrod.roce_create_qp_resp; 291 292 p_ramrod->flags = 0; 293 294 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 295 SET_FIELD(p_ramrod->flags, 296 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 297 298 SET_FIELD(p_ramrod->flags, 299 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 300 qp->incoming_rdma_read_en); 301 302 SET_FIELD(p_ramrod->flags, 303 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 304 qp->incoming_rdma_write_en); 305 306 SET_FIELD(p_ramrod->flags, 307 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, 308 qp->incoming_atomic_en); 309 310 SET_FIELD(p_ramrod->flags, 311 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 312 qp->e2e_flow_control_en); 313 314 SET_FIELD(p_ramrod->flags, 315 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); 316 317 SET_FIELD(p_ramrod->flags, 318 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, 319 qp->fmr_and_reserved_lkey); 320 321 SET_FIELD(p_ramrod->flags, 322 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 323 qp->min_rnr_nak_timer); 324 325 p_ramrod->max_ird = qp->max_rd_atomic_resp; 326 p_ramrod->traffic_class = qp->traffic_class_tos; 327 p_ramrod->hop_limit = qp->hop_limit_ttl; 328 p_ramrod->irq_num_pages = qp->irq_num_pages; 329 p_ramrod->p_key = cpu_to_le16(qp->pkey); 330 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 331 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 332 p_ramrod->mtu = cpu_to_le16(qp->mtu); 333 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn); 334 p_ramrod->pd = cpu_to_le16(qp->pd); 335 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); 336 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); 337 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); 338 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 339 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 340 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 341 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 342 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 343 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 344 qp->rq_cq_id); 345 346 tc = qed_roce_get_qp_tc(p_hwfn, qp); 347 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); 348 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); 349 DP_VERBOSE(p_hwfn, QED_MSG_SP, 350 "qp icid %u pqs: regular_latency %u low_latency %u\n", 351 qp->icid, regular_latency_queue - CM_TX_PQ_BASE, 352 low_latency_queue - CM_TX_PQ_BASE); 353 p_ramrod->regular_latency_phy_queue = 354 cpu_to_le16(regular_latency_queue); 355 p_ramrod->low_latency_phy_queue = 356 cpu_to_le16(low_latency_queue); 357 358 p_ramrod->dpi = cpu_to_le16(qp->dpi); 359 360 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 361 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 362 363 p_ramrod->udp_src_port = qp->udp_src_port; 364 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 365 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); 366 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); 367 368 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 369 qp->stats_queue; 370 371 rc = qed_spq_post(p_hwfn, p_ent, NULL); 372 if (rc) 373 goto err; 374 375 qp->resp_offloaded = true; 376 qp->cq_prod = 0; 377 378 proto = p_hwfn->p_rdma_info->proto; 379 qed_roce_set_real_cid(p_hwfn, qp->icid - 380 qed_cxt_get_proto_cid_start(p_hwfn, proto)); 381 382 return rc; 383 384 err: 385 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc); 386 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 387 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 388 qp->irq, qp->irq_phys_addr); 389 390 return rc; 391 } 392 393 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, 394 struct qed_rdma_qp *qp) 395 { 396 struct roce_create_qp_req_ramrod_data *p_ramrod; 397 u16 regular_latency_queue, low_latency_queue; 398 struct qed_sp_init_data init_data; 399 enum roce_flavor roce_flavor; 400 struct qed_spq_entry *p_ent; 401 enum protocol_type proto; 402 int rc; 403 u8 tc; 404 405 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 406 407 /* Allocate DMA-able memory for ORQ */ 408 qp->orq_num_pages = 1; 409 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 410 RDMA_RING_PAGE_SIZE, 411 &qp->orq_phys_addr, GFP_KERNEL); 412 if (!qp->orq) { 413 rc = -ENOMEM; 414 DP_NOTICE(p_hwfn, 415 "qed create requester failed: cannot allocate memory (orq). rc = %d\n", 416 rc); 417 return rc; 418 } 419 420 /* Get SPQ entry */ 421 memset(&init_data, 0, sizeof(init_data)); 422 init_data.cid = qp->icid + 1; 423 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 424 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 425 426 rc = qed_sp_init_request(p_hwfn, &p_ent, 427 ROCE_RAMROD_CREATE_QP, 428 PROTOCOLID_ROCE, &init_data); 429 if (rc) 430 goto err; 431 432 p_ramrod = &p_ent->ramrod.roce_create_qp_req; 433 434 p_ramrod->flags = 0; 435 436 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 437 SET_FIELD(p_ramrod->flags, 438 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 439 440 SET_FIELD(p_ramrod->flags, 441 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, 442 qp->fmr_and_reserved_lkey); 443 444 SET_FIELD(p_ramrod->flags, 445 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); 446 447 SET_FIELD(p_ramrod->flags, 448 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 449 450 SET_FIELD(p_ramrod->flags, 451 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 452 qp->rnr_retry_cnt); 453 454 p_ramrod->max_ord = qp->max_rd_atomic_req; 455 p_ramrod->traffic_class = qp->traffic_class_tos; 456 p_ramrod->hop_limit = qp->hop_limit_ttl; 457 p_ramrod->orq_num_pages = qp->orq_num_pages; 458 p_ramrod->p_key = cpu_to_le16(qp->pkey); 459 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 460 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 461 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 462 p_ramrod->mtu = cpu_to_le16(qp->mtu); 463 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn); 464 p_ramrod->pd = cpu_to_le16(qp->pd); 465 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); 466 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); 467 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); 468 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 469 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 470 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 471 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 472 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 473 p_ramrod->cq_cid = 474 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); 475 476 tc = qed_roce_get_qp_tc(p_hwfn, qp); 477 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); 478 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); 479 DP_VERBOSE(p_hwfn, QED_MSG_SP, 480 "qp icid %u pqs: regular_latency %u low_latency %u\n", 481 qp->icid, regular_latency_queue - CM_TX_PQ_BASE, 482 low_latency_queue - CM_TX_PQ_BASE); 483 p_ramrod->regular_latency_phy_queue = 484 cpu_to_le16(regular_latency_queue); 485 p_ramrod->low_latency_phy_queue = 486 cpu_to_le16(low_latency_queue); 487 488 p_ramrod->dpi = cpu_to_le16(qp->dpi); 489 490 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 491 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 492 493 p_ramrod->udp_src_port = qp->udp_src_port; 494 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 495 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 496 qp->stats_queue; 497 498 rc = qed_spq_post(p_hwfn, p_ent, NULL); 499 if (rc) 500 goto err; 501 502 qp->req_offloaded = true; 503 proto = p_hwfn->p_rdma_info->proto; 504 qed_roce_set_real_cid(p_hwfn, 505 qp->icid + 1 - 506 qed_cxt_get_proto_cid_start(p_hwfn, proto)); 507 508 return rc; 509 510 err: 511 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc); 512 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 513 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 514 qp->orq, qp->orq_phys_addr); 515 return rc; 516 } 517 518 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, 519 struct qed_rdma_qp *qp, 520 bool move_to_err, u32 modify_flags) 521 { 522 struct roce_modify_qp_resp_ramrod_data *p_ramrod; 523 struct qed_sp_init_data init_data; 524 struct qed_spq_entry *p_ent; 525 int rc; 526 527 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 528 529 if (move_to_err && !qp->resp_offloaded) 530 return 0; 531 532 /* Get SPQ entry */ 533 memset(&init_data, 0, sizeof(init_data)); 534 init_data.cid = qp->icid; 535 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 536 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 537 538 rc = qed_sp_init_request(p_hwfn, &p_ent, 539 ROCE_EVENT_MODIFY_QP, 540 PROTOCOLID_ROCE, &init_data); 541 if (rc) { 542 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 543 return rc; 544 } 545 546 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; 547 548 p_ramrod->flags = 0; 549 550 SET_FIELD(p_ramrod->flags, 551 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 552 553 SET_FIELD(p_ramrod->flags, 554 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 555 qp->incoming_rdma_read_en); 556 557 SET_FIELD(p_ramrod->flags, 558 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 559 qp->incoming_rdma_write_en); 560 561 SET_FIELD(p_ramrod->flags, 562 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, 563 qp->incoming_atomic_en); 564 565 SET_FIELD(p_ramrod->flags, 566 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 567 qp->e2e_flow_control_en); 568 569 SET_FIELD(p_ramrod->flags, 570 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, 571 GET_FIELD(modify_flags, 572 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); 573 574 SET_FIELD(p_ramrod->flags, 575 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, 576 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 577 578 SET_FIELD(p_ramrod->flags, 579 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, 580 GET_FIELD(modify_flags, 581 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 582 583 SET_FIELD(p_ramrod->flags, 584 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, 585 GET_FIELD(modify_flags, 586 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); 587 588 SET_FIELD(p_ramrod->flags, 589 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, 590 GET_FIELD(modify_flags, 591 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); 592 593 p_ramrod->fields = 0; 594 SET_FIELD(p_ramrod->fields, 595 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 596 qp->min_rnr_nak_timer); 597 598 p_ramrod->max_ird = qp->max_rd_atomic_resp; 599 p_ramrod->traffic_class = qp->traffic_class_tos; 600 p_ramrod->hop_limit = qp->hop_limit_ttl; 601 p_ramrod->p_key = cpu_to_le16(qp->pkey); 602 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 603 p_ramrod->mtu = cpu_to_le16(qp->mtu); 604 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 605 rc = qed_spq_post(p_hwfn, p_ent, NULL); 606 607 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc); 608 return rc; 609 } 610 611 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, 612 struct qed_rdma_qp *qp, 613 bool move_to_sqd, 614 bool move_to_err, u32 modify_flags) 615 { 616 struct roce_modify_qp_req_ramrod_data *p_ramrod; 617 struct qed_sp_init_data init_data; 618 struct qed_spq_entry *p_ent; 619 int rc; 620 621 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 622 623 if (move_to_err && !(qp->req_offloaded)) 624 return 0; 625 626 /* Get SPQ entry */ 627 memset(&init_data, 0, sizeof(init_data)); 628 init_data.cid = qp->icid + 1; 629 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 630 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 631 632 rc = qed_sp_init_request(p_hwfn, &p_ent, 633 ROCE_EVENT_MODIFY_QP, 634 PROTOCOLID_ROCE, &init_data); 635 if (rc) { 636 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 637 return rc; 638 } 639 640 p_ramrod = &p_ent->ramrod.roce_modify_qp_req; 641 642 p_ramrod->flags = 0; 643 644 SET_FIELD(p_ramrod->flags, 645 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 646 647 SET_FIELD(p_ramrod->flags, 648 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); 649 650 SET_FIELD(p_ramrod->flags, 651 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, 652 qp->sqd_async); 653 654 SET_FIELD(p_ramrod->flags, 655 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, 656 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 657 658 SET_FIELD(p_ramrod->flags, 659 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, 660 GET_FIELD(modify_flags, 661 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 662 663 SET_FIELD(p_ramrod->flags, 664 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, 665 GET_FIELD(modify_flags, 666 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); 667 668 SET_FIELD(p_ramrod->flags, 669 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, 670 GET_FIELD(modify_flags, 671 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); 672 673 SET_FIELD(p_ramrod->flags, 674 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, 675 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); 676 677 SET_FIELD(p_ramrod->flags, 678 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, 679 GET_FIELD(modify_flags, 680 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); 681 682 p_ramrod->fields = 0; 683 SET_FIELD(p_ramrod->fields, 684 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 685 686 SET_FIELD(p_ramrod->fields, 687 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 688 qp->rnr_retry_cnt); 689 690 p_ramrod->max_ord = qp->max_rd_atomic_req; 691 p_ramrod->traffic_class = qp->traffic_class_tos; 692 p_ramrod->hop_limit = qp->hop_limit_ttl; 693 p_ramrod->p_key = cpu_to_le16(qp->pkey); 694 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 695 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 696 p_ramrod->mtu = cpu_to_le16(qp->mtu); 697 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 698 rc = qed_spq_post(p_hwfn, p_ent, NULL); 699 700 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc); 701 return rc; 702 } 703 704 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, 705 struct qed_rdma_qp *qp, 706 u32 *cq_prod) 707 { 708 struct roce_destroy_qp_resp_output_params *p_ramrod_res; 709 struct roce_destroy_qp_resp_ramrod_data *p_ramrod; 710 struct qed_sp_init_data init_data; 711 struct qed_spq_entry *p_ent; 712 dma_addr_t ramrod_res_phys; 713 int rc; 714 715 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 716 *cq_prod = qp->cq_prod; 717 718 if (!qp->resp_offloaded) { 719 /* If a responder was never offload, we need to free the cids 720 * allocated in create_qp as a FW async event will never arrive 721 */ 722 u32 cid; 723 724 cid = qp->icid - 725 qed_cxt_get_proto_cid_start(p_hwfn, 726 p_hwfn->p_rdma_info->proto); 727 qed_roce_free_cid_pair(p_hwfn, (u16)cid); 728 729 return 0; 730 } 731 732 /* Get SPQ entry */ 733 memset(&init_data, 0, sizeof(init_data)); 734 init_data.cid = qp->icid; 735 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 736 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 737 738 rc = qed_sp_init_request(p_hwfn, &p_ent, 739 ROCE_RAMROD_DESTROY_QP, 740 PROTOCOLID_ROCE, &init_data); 741 if (rc) 742 return rc; 743 744 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; 745 746 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *) 747 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 748 &ramrod_res_phys, GFP_KERNEL); 749 750 if (!p_ramrod_res) { 751 rc = -ENOMEM; 752 DP_NOTICE(p_hwfn, 753 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 754 rc); 755 return rc; 756 } 757 758 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 759 760 rc = qed_spq_post(p_hwfn, p_ent, NULL); 761 if (rc) 762 goto err; 763 764 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); 765 qp->cq_prod = *cq_prod; 766 767 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ 768 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 769 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 770 qp->irq, qp->irq_phys_addr); 771 772 qp->resp_offloaded = false; 773 774 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc); 775 776 err: 777 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 778 sizeof(struct roce_destroy_qp_resp_output_params), 779 p_ramrod_res, ramrod_res_phys); 780 781 return rc; 782 } 783 784 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, 785 struct qed_rdma_qp *qp) 786 { 787 struct roce_destroy_qp_req_output_params *p_ramrod_res; 788 struct roce_destroy_qp_req_ramrod_data *p_ramrod; 789 struct qed_sp_init_data init_data; 790 struct qed_spq_entry *p_ent; 791 dma_addr_t ramrod_res_phys; 792 int rc = -ENOMEM; 793 794 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 795 796 if (!qp->req_offloaded) 797 return 0; 798 799 p_ramrod_res = (struct roce_destroy_qp_req_output_params *) 800 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 801 sizeof(*p_ramrod_res), 802 &ramrod_res_phys, GFP_KERNEL); 803 if (!p_ramrod_res) { 804 DP_NOTICE(p_hwfn, 805 "qed destroy requester failed: cannot allocate memory (ramrod)\n"); 806 return rc; 807 } 808 809 /* Get SPQ entry */ 810 memset(&init_data, 0, sizeof(init_data)); 811 init_data.cid = qp->icid + 1; 812 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 813 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 814 815 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, 816 PROTOCOLID_ROCE, &init_data); 817 if (rc) 818 goto err; 819 820 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; 821 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 822 823 rc = qed_spq_post(p_hwfn, p_ent, NULL); 824 if (rc) 825 goto err; 826 827 828 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ 829 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 830 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 831 qp->orq, qp->orq_phys_addr); 832 833 qp->req_offloaded = false; 834 835 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc); 836 837 err: 838 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 839 p_ramrod_res, ramrod_res_phys); 840 841 return rc; 842 } 843 844 int qed_roce_query_qp(struct qed_hwfn *p_hwfn, 845 struct qed_rdma_qp *qp, 846 struct qed_rdma_query_qp_out_params *out_params) 847 { 848 struct roce_query_qp_resp_output_params *p_resp_ramrod_res; 849 struct roce_query_qp_req_output_params *p_req_ramrod_res; 850 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; 851 struct roce_query_qp_req_ramrod_data *p_req_ramrod; 852 struct qed_sp_init_data init_data; 853 dma_addr_t resp_ramrod_res_phys; 854 dma_addr_t req_ramrod_res_phys; 855 struct qed_spq_entry *p_ent; 856 bool rq_err_state; 857 bool sq_err_state; 858 bool sq_draining; 859 int rc = -ENOMEM; 860 861 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) { 862 /* We can't send ramrod to the fw since this qp wasn't offloaded 863 * to the fw yet 864 */ 865 out_params->draining = false; 866 out_params->rq_psn = qp->rq_psn; 867 out_params->sq_psn = qp->sq_psn; 868 out_params->state = qp->cur_state; 869 870 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n"); 871 return 0; 872 } 873 874 if (!(qp->resp_offloaded)) { 875 DP_NOTICE(p_hwfn, 876 "The responder's qp should be offloaded before requester's\n"); 877 return -EINVAL; 878 } 879 880 /* Send a query responder ramrod to FW to get RQ-PSN and state */ 881 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *) 882 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 883 sizeof(*p_resp_ramrod_res), 884 &resp_ramrod_res_phys, GFP_KERNEL); 885 if (!p_resp_ramrod_res) { 886 DP_NOTICE(p_hwfn, 887 "qed query qp failed: cannot allocate memory (ramrod)\n"); 888 return rc; 889 } 890 891 /* Get SPQ entry */ 892 memset(&init_data, 0, sizeof(init_data)); 893 init_data.cid = qp->icid; 894 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 895 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 896 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 897 PROTOCOLID_ROCE, &init_data); 898 if (rc) 899 goto err_resp; 900 901 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; 902 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); 903 904 rc = qed_spq_post(p_hwfn, p_ent, NULL); 905 if (rc) 906 goto err_resp; 907 908 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); 909 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag), 910 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); 911 912 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 913 p_resp_ramrod_res, resp_ramrod_res_phys); 914 915 if (!(qp->req_offloaded)) { 916 /* Don't send query qp for the requester */ 917 out_params->sq_psn = qp->sq_psn; 918 out_params->draining = false; 919 920 if (rq_err_state) 921 qp->cur_state = QED_ROCE_QP_STATE_ERR; 922 923 out_params->state = qp->cur_state; 924 925 return 0; 926 } 927 928 /* Send a query requester ramrod to FW to get SQ-PSN and state */ 929 p_req_ramrod_res = (struct roce_query_qp_req_output_params *) 930 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 931 sizeof(*p_req_ramrod_res), 932 &req_ramrod_res_phys, 933 GFP_KERNEL); 934 if (!p_req_ramrod_res) { 935 rc = -ENOMEM; 936 DP_NOTICE(p_hwfn, 937 "qed query qp failed: cannot allocate memory (ramrod)\n"); 938 return rc; 939 } 940 941 /* Get SPQ entry */ 942 init_data.cid = qp->icid + 1; 943 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 944 PROTOCOLID_ROCE, &init_data); 945 if (rc) 946 goto err_req; 947 948 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; 949 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); 950 951 rc = qed_spq_post(p_hwfn, p_ent, NULL); 952 if (rc) 953 goto err_req; 954 955 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); 956 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 957 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); 958 sq_draining = 959 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 960 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); 961 962 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 963 p_req_ramrod_res, req_ramrod_res_phys); 964 965 out_params->draining = false; 966 967 if (rq_err_state || sq_err_state) 968 qp->cur_state = QED_ROCE_QP_STATE_ERR; 969 else if (sq_draining) 970 out_params->draining = true; 971 out_params->state = qp->cur_state; 972 973 return 0; 974 975 err_req: 976 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 977 p_req_ramrod_res, req_ramrod_res_phys); 978 return rc; 979 err_resp: 980 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 981 p_resp_ramrod_res, resp_ramrod_res_phys); 982 return rc; 983 } 984 985 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 986 { 987 u32 cq_prod; 988 int rc; 989 990 /* Destroys the specified QP */ 991 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) && 992 (qp->cur_state != QED_ROCE_QP_STATE_ERR) && 993 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) { 994 DP_NOTICE(p_hwfn, 995 "QP must be in error, reset or init state before destroying it\n"); 996 return -EINVAL; 997 } 998 999 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { 1000 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, 1001 &cq_prod); 1002 if (rc) 1003 return rc; 1004 1005 /* Send destroy requester ramrod */ 1006 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); 1007 if (rc) 1008 return rc; 1009 } 1010 1011 return 0; 1012 } 1013 1014 int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, 1015 struct qed_rdma_qp *qp, 1016 enum qed_roce_qp_state prev_state, 1017 struct qed_rdma_modify_qp_in_params *params) 1018 { 1019 int rc = 0; 1020 1021 /* Perform additional operations according to the current state and the 1022 * next state 1023 */ 1024 if (((prev_state == QED_ROCE_QP_STATE_INIT) || 1025 (prev_state == QED_ROCE_QP_STATE_RESET)) && 1026 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) { 1027 /* Init->RTR or Reset->RTR */ 1028 rc = qed_roce_sp_create_responder(p_hwfn, qp); 1029 return rc; 1030 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) && 1031 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1032 /* RTR-> RTS */ 1033 rc = qed_roce_sp_create_requester(p_hwfn, qp); 1034 if (rc) 1035 return rc; 1036 1037 /* Send modify responder ramrod */ 1038 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1039 params->modify_flags); 1040 return rc; 1041 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 1042 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1043 /* RTS->RTS */ 1044 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1045 params->modify_flags); 1046 if (rc) 1047 return rc; 1048 1049 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1050 params->modify_flags); 1051 return rc; 1052 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 1053 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 1054 /* RTS->SQD */ 1055 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false, 1056 params->modify_flags); 1057 return rc; 1058 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 1059 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 1060 /* SQD->SQD */ 1061 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1062 params->modify_flags); 1063 if (rc) 1064 return rc; 1065 1066 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1067 params->modify_flags); 1068 return rc; 1069 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 1070 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 1071 /* SQD->RTS */ 1072 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 1073 params->modify_flags); 1074 if (rc) 1075 return rc; 1076 1077 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 1078 params->modify_flags); 1079 1080 return rc; 1081 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) { 1082 /* ->ERR */ 1083 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, 1084 params->modify_flags); 1085 if (rc) 1086 return rc; 1087 1088 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true, 1089 params->modify_flags); 1090 return rc; 1091 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { 1092 /* Any state -> RESET */ 1093 u32 cq_prod; 1094 1095 /* Send destroy responder ramrod */ 1096 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, 1097 qp, 1098 &cq_prod); 1099 1100 if (rc) 1101 return rc; 1102 1103 qp->cq_prod = cq_prod; 1104 1105 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); 1106 } else { 1107 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 1108 } 1109 1110 return rc; 1111 } 1112 1113 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) 1114 { 1115 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1116 u32 start_cid, cid, xcid; 1117 1118 /* an even icid belongs to a responder while an odd icid belongs to a 1119 * requester. The 'cid' received as an input can be either. We calculate 1120 * the "partner" icid and call it xcid. Only if both are free then the 1121 * "cid" map can be cleared. 1122 */ 1123 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); 1124 cid = icid - start_cid; 1125 xcid = cid ^ 1; 1126 1127 spin_lock_bh(&p_rdma_info->lock); 1128 1129 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid); 1130 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) { 1131 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); 1132 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid); 1133 } 1134 1135 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1136 } 1137 1138 void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1139 { 1140 u8 val; 1141 1142 /* if any QPs are already active, we want to disable DPM, since their 1143 * context information contains information from before the latest DCBx 1144 * update. Otherwise enable it. 1145 */ 1146 val = qed_rdma_allocated_qps(p_hwfn) ? true : false; 1147 p_hwfn->dcbx_no_edpm = (u8)val; 1148 1149 qed_rdma_dpm_conf(p_hwfn, p_ptt); 1150 } 1151 1152 int qed_roce_setup(struct qed_hwfn *p_hwfn) 1153 { 1154 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE, 1155 qed_roce_async_event); 1156 } 1157 1158 int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1159 { 1160 u32 ll2_ethertype_en; 1161 1162 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 1163 1164 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; 1165 1166 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 1167 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 1168 (ll2_ethertype_en | 0x01)); 1169 1170 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { 1171 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); 1172 return -EINVAL; 1173 } 1174 1175 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); 1176 return 0; 1177 } 1178