1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/bitops.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/errno.h> 13 #include <linux/io.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/string.h> 22 #include "qed.h" 23 #include "qed_cxt.h" 24 #include "qed_hsi.h" 25 #include "qed_hw.h" 26 #include "qed_init_ops.h" 27 #include "qed_int.h" 28 #include "qed_ll2.h" 29 #include "qed_mcp.h" 30 #include "qed_reg_addr.h" 31 #include <linux/qed/qed_rdma_if.h> 32 #include "qed_rdma.h" 33 #include "qed_roce.h" 34 #include "qed_sp.h" 35 36 37 int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, 38 struct qed_bmap *bmap, u32 max_count, char *name) 39 { 40 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); 41 42 bmap->max_count = max_count; 43 44 bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long), 45 GFP_KERNEL); 46 if (!bmap->bitmap) 47 return -ENOMEM; 48 49 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); 50 51 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 52 return 0; 53 } 54 55 int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, 56 struct qed_bmap *bmap, u32 *id_num) 57 { 58 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); 59 if (*id_num >= bmap->max_count) 60 return -EINVAL; 61 62 __set_bit(*id_num, bmap->bitmap); 63 64 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", 65 bmap->name, *id_num); 66 67 return 0; 68 } 69 70 void qed_bmap_set_id(struct qed_hwfn *p_hwfn, 71 struct qed_bmap *bmap, u32 id_num) 72 { 73 if (id_num >= bmap->max_count) 74 return; 75 76 __set_bit(id_num, bmap->bitmap); 77 } 78 79 void qed_bmap_release_id(struct qed_hwfn *p_hwfn, 80 struct qed_bmap *bmap, u32 id_num) 81 { 82 bool b_acquired; 83 84 if (id_num >= bmap->max_count) 85 return; 86 87 b_acquired = test_and_clear_bit(id_num, bmap->bitmap); 88 if (!b_acquired) { 89 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", 90 bmap->name, id_num); 91 return; 92 } 93 94 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", 95 bmap->name, id_num); 96 } 97 98 int qed_bmap_test_id(struct qed_hwfn *p_hwfn, 99 struct qed_bmap *bmap, u32 id_num) 100 { 101 if (id_num >= bmap->max_count) 102 return -1; 103 104 return test_bit(id_num, bmap->bitmap); 105 } 106 107 static bool qed_bmap_is_empty(struct qed_bmap *bmap) 108 { 109 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); 110 } 111 112 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 113 { 114 /* First sb id for RoCE is after all the l2 sb */ 115 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 116 } 117 118 int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) 119 { 120 struct qed_rdma_info *p_rdma_info; 121 122 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); 123 if (!p_rdma_info) 124 return -ENOMEM; 125 126 spin_lock_init(&p_rdma_info->lock); 127 128 p_hwfn->p_rdma_info = p_rdma_info; 129 return 0; 130 } 131 132 void qed_rdma_info_free(struct qed_hwfn *p_hwfn) 133 { 134 kfree(p_hwfn->p_rdma_info); 135 p_hwfn->p_rdma_info = NULL; 136 } 137 138 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) 139 { 140 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 141 u32 num_cons, num_tasks; 142 int rc = -ENOMEM; 143 144 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); 145 146 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 147 p_rdma_info->proto = PROTOCOLID_IWARP; 148 else 149 p_rdma_info->proto = PROTOCOLID_ROCE; 150 151 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 152 NULL); 153 154 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 155 p_rdma_info->num_qps = num_cons; 156 else 157 p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ 158 159 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); 160 161 /* Each MR uses a single task */ 162 p_rdma_info->num_mrs = num_tasks; 163 164 /* Queue zone lines are shared between RoCE and L2 in such a way that 165 * they can be used by each without obstructing the other. 166 */ 167 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); 168 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); 169 170 /* Allocate a struct with device params and fill it */ 171 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 172 if (!p_rdma_info->dev) 173 return rc; 174 175 /* Allocate a struct with port params and fill it */ 176 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); 177 if (!p_rdma_info->port) 178 goto free_rdma_dev; 179 180 /* Allocate bit map for pd's */ 181 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, 182 "PD"); 183 if (rc) { 184 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 185 "Failed to allocate pd_map, rc = %d\n", 186 rc); 187 goto free_rdma_port; 188 } 189 190 /* Allocate bit map for XRC Domains */ 191 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map, 192 QED_RDMA_MAX_XRCDS, "XRCD"); 193 if (rc) { 194 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 195 "Failed to allocate xrcd_map,rc = %d\n", rc); 196 goto free_pd_map; 197 } 198 199 /* Allocate DPI bitmap */ 200 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, 201 p_hwfn->dpi_count, "DPI"); 202 if (rc) { 203 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 204 "Failed to allocate DPI bitmap, rc = %d\n", rc); 205 goto free_xrcd_map; 206 } 207 208 /* Allocate bitmap for cq's. The maximum number of CQs is bound to 209 * the number of connections we support. (num_qps in iWARP or 210 * num_qps/2 in RoCE). 211 */ 212 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); 213 if (rc) { 214 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 215 "Failed to allocate cq bitmap, rc = %d\n", rc); 216 goto free_dpi_map; 217 } 218 219 /* Allocate bitmap for toggle bit for cq icids 220 * We toggle the bit every time we create or resize cq for a given icid. 221 * Size needs to equal the size of the cq bmap. 222 */ 223 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, 224 num_cons, "Toggle"); 225 if (rc) { 226 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 227 "Failed to allocate toggle bits, rc = %d\n", rc); 228 goto free_cq_map; 229 } 230 231 /* Allocate bitmap for itids */ 232 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, 233 p_rdma_info->num_mrs, "MR"); 234 if (rc) { 235 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 236 "Failed to allocate itids bitmaps, rc = %d\n", rc); 237 goto free_toggle_map; 238 } 239 240 /* Allocate bitmap for cids used for qps. */ 241 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, 242 "CID"); 243 if (rc) { 244 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 245 "Failed to allocate cid bitmap, rc = %d\n", rc); 246 goto free_tid_map; 247 } 248 249 /* Allocate bitmap for cids used for responders/requesters. */ 250 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, 251 "REAL_CID"); 252 if (rc) { 253 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 254 "Failed to allocate real cid bitmap, rc = %d\n", rc); 255 goto free_cid_map; 256 } 257 258 /* The first SRQ follows the last XRC SRQ. This means that the 259 * SRQ IDs start from an offset equals to max_xrc_srqs. 260 */ 261 p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count; 262 rc = qed_rdma_bmap_alloc(p_hwfn, 263 &p_rdma_info->xrc_srq_map, 264 p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ"); 265 if (rc) { 266 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 267 "Failed to allocate xrc srq bitmap, rc = %d\n", rc); 268 goto free_real_cid_map; 269 } 270 271 /* Allocate bitmap for srqs */ 272 p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count; 273 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, 274 p_rdma_info->num_srqs, "SRQ"); 275 if (rc) { 276 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 277 "Failed to allocate srq bitmap, rc = %d\n", rc); 278 goto free_xrc_srq_map; 279 } 280 281 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 282 rc = qed_iwarp_alloc(p_hwfn); 283 284 if (rc) 285 goto free_srq_map; 286 287 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); 288 return 0; 289 290 free_srq_map: 291 kfree(p_rdma_info->srq_map.bitmap); 292 free_xrc_srq_map: 293 kfree(p_rdma_info->xrc_srq_map.bitmap); 294 free_real_cid_map: 295 kfree(p_rdma_info->real_cid_map.bitmap); 296 free_cid_map: 297 kfree(p_rdma_info->cid_map.bitmap); 298 free_tid_map: 299 kfree(p_rdma_info->tid_map.bitmap); 300 free_toggle_map: 301 kfree(p_rdma_info->toggle_bits.bitmap); 302 free_cq_map: 303 kfree(p_rdma_info->cq_map.bitmap); 304 free_dpi_map: 305 kfree(p_rdma_info->dpi_map.bitmap); 306 free_xrcd_map: 307 kfree(p_rdma_info->xrcd_map.bitmap); 308 free_pd_map: 309 kfree(p_rdma_info->pd_map.bitmap); 310 free_rdma_port: 311 kfree(p_rdma_info->port); 312 free_rdma_dev: 313 kfree(p_rdma_info->dev); 314 315 return rc; 316 } 317 318 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, 319 struct qed_bmap *bmap, bool check) 320 { 321 int weight = bitmap_weight(bmap->bitmap, bmap->max_count); 322 int last_line = bmap->max_count / (64 * 8); 323 int last_item = last_line * 8 + 324 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); 325 u64 *pmap = (u64 *)bmap->bitmap; 326 int line, item, offset; 327 u8 str_last_line[200] = { 0 }; 328 329 if (!weight || !check) 330 goto end; 331 332 DP_NOTICE(p_hwfn, 333 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", 334 bmap->name, bmap->max_count, weight); 335 336 /* print aligned non-zero lines, if any */ 337 for (item = 0, line = 0; line < last_line; line++, item += 8) 338 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) 339 DP_NOTICE(p_hwfn, 340 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", 341 line, 342 pmap[item], 343 pmap[item + 1], 344 pmap[item + 2], 345 pmap[item + 3], 346 pmap[item + 4], 347 pmap[item + 5], 348 pmap[item + 6], pmap[item + 7]); 349 350 /* print last unaligned non-zero line, if any */ 351 if ((bmap->max_count % (64 * 8)) && 352 (bitmap_weight((unsigned long *)&pmap[item], 353 bmap->max_count - item * 64))) { 354 offset = sprintf(str_last_line, "line 0x%04x: ", line); 355 for (; item < last_item; item++) 356 offset += sprintf(str_last_line + offset, 357 "0x%016llx ", pmap[item]); 358 DP_NOTICE(p_hwfn, "%s\n", str_last_line); 359 } 360 361 end: 362 kfree(bmap->bitmap); 363 bmap->bitmap = NULL; 364 } 365 366 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 367 { 368 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 369 370 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 371 qed_iwarp_resc_free(p_hwfn); 372 373 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); 374 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); 375 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); 376 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); 377 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); 378 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); 379 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); 380 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); 381 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1); 382 383 kfree(p_rdma_info->port); 384 kfree(p_rdma_info->dev); 385 } 386 387 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 388 { 389 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 390 391 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 392 393 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 394 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); 395 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 396 } 397 398 static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) 399 { 400 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey); 401 } 402 403 static void qed_rdma_free(struct qed_hwfn *p_hwfn) 404 { 405 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 406 407 qed_rdma_free_reserved_lkey(p_hwfn); 408 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); 409 qed_rdma_resc_free(p_hwfn); 410 } 411 412 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) 413 { 414 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; 415 guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; 416 guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; 417 guid[3] = 0xff; 418 guid[4] = 0xfe; 419 guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; 420 guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; 421 guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; 422 } 423 424 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, 425 struct qed_rdma_start_in_params *params) 426 { 427 struct qed_rdma_events *events; 428 429 events = &p_hwfn->p_rdma_info->events; 430 431 events->unaffiliated_event = params->events->unaffiliated_event; 432 events->affiliated_event = params->events->affiliated_event; 433 events->context = params->events->context; 434 } 435 436 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, 437 struct qed_rdma_start_in_params *params) 438 { 439 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 440 struct qed_dev *cdev = p_hwfn->cdev; 441 u32 pci_status_control; 442 u32 num_qps; 443 444 /* Vendor specific information */ 445 dev->vendor_id = cdev->vendor_id; 446 dev->vendor_part_id = cdev->device_id; 447 dev->hw_ver = cdev->chip_rev; 448 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | 449 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); 450 451 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); 452 dev->node_guid = dev->sys_image_guid; 453 454 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, 455 RDMA_MAX_SGE_PER_RQ_WQE); 456 457 if (cdev->rdma_max_sge) 458 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); 459 460 dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; 461 if (p_hwfn->cdev->rdma_max_srq_sge) { 462 dev->max_srq_sge = min_t(u32, 463 p_hwfn->cdev->rdma_max_srq_sge, 464 dev->max_srq_sge); 465 } 466 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; 467 468 dev->max_inline = (cdev->rdma_max_inline) ? 469 min_t(u32, cdev->rdma_max_inline, dev->max_inline) : 470 dev->max_inline; 471 472 dev->max_wqe = QED_RDMA_MAX_WQE; 473 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); 474 475 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because 476 * it is up-aligned to 16 and then to ILT page size within qed cxt. 477 * This is OK in terms of ILT but we don't want to configure the FW 478 * above its abilities 479 */ 480 num_qps = ROCE_MAX_QPS; 481 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); 482 dev->max_qp = num_qps; 483 484 /* CQs uses the same icids that QPs use hence they are limited by the 485 * number of icids. There are two icids per QP. 486 */ 487 dev->max_cq = num_qps * 2; 488 489 /* The number of mrs is smaller by 1 since the first is reserved */ 490 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; 491 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; 492 493 /* The maximum CQE capacity per CQ supported. 494 * max number of cqes will be in two layer pbl, 495 * 8 is the pointer size in bytes 496 * 32 is the size of cq element in bytes 497 */ 498 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) 499 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; 500 else 501 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; 502 503 dev->max_mw = 0; 504 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); 505 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; 506 dev->max_pkey = QED_RDMA_MAX_P_KEY; 507 508 dev->max_srq = p_hwfn->p_rdma_info->num_srqs; 509 dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; 510 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 511 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); 512 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 513 RDMA_REQ_RD_ATOMIC_ELM_SIZE; 514 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * 515 p_hwfn->p_rdma_info->num_qps; 516 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; 517 dev->dev_ack_delay = QED_RDMA_ACK_DELAY; 518 dev->max_pd = RDMA_MAX_PDS; 519 dev->max_ah = p_hwfn->p_rdma_info->num_qps; 520 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); 521 522 /* Set capablities */ 523 dev->dev_caps = 0; 524 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); 525 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); 526 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); 527 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); 528 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); 529 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); 530 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); 531 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); 532 533 /* Check atomic operations support in PCI configuration space. */ 534 pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2, 535 &pci_status_control); 536 537 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) 538 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); 539 540 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 541 qed_iwarp_init_devinfo(p_hwfn); 542 } 543 544 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) 545 { 546 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; 547 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 548 549 port->port_state = p_hwfn->mcp_info->link_output.link_up ? 550 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 551 552 port->max_msg_size = min_t(u64, 553 (dev->max_mr_mw_fmr_size * 554 p_hwfn->cdev->rdma_max_sge), 555 BIT(31)); 556 557 port->pkey_bad_counter = 0; 558 } 559 560 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 561 { 562 int rc = 0; 563 564 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); 565 p_hwfn->b_rdma_enabled_in_prs = false; 566 567 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 568 qed_iwarp_init_hw(p_hwfn, p_ptt); 569 else 570 rc = qed_roce_init_hw(p_hwfn, p_ptt); 571 572 return rc; 573 } 574 575 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, 576 struct qed_rdma_start_in_params *params, 577 struct qed_ptt *p_ptt) 578 { 579 struct rdma_init_func_ramrod_data *p_ramrod; 580 struct qed_rdma_cnq_params *p_cnq_pbl_list; 581 struct rdma_init_func_hdr *p_params_header; 582 struct rdma_cnq_params *p_cnq_params; 583 struct qed_sp_init_data init_data; 584 struct qed_spq_entry *p_ent; 585 u32 cnq_id, sb_id; 586 u16 igu_sb_id; 587 int rc; 588 589 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); 590 591 /* Save the number of cnqs for the function close ramrod */ 592 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; 593 594 /* Get SPQ entry */ 595 memset(&init_data, 0, sizeof(init_data)); 596 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 597 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 598 599 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, 600 p_hwfn->p_rdma_info->proto, &init_data); 601 if (rc) 602 return rc; 603 604 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 605 qed_iwarp_init_fw_ramrod(p_hwfn, 606 &p_ent->ramrod.iwarp_init_func); 607 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; 608 } else { 609 p_ramrod = &p_ent->ramrod.roce_init_func.rdma; 610 } 611 612 p_params_header = &p_ramrod->params_header; 613 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, 614 QED_RDMA_CNQ_RAM); 615 p_params_header->num_cnqs = params->desired_cnq; 616 p_params_header->first_reg_srq_id = 617 cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset); 618 p_params_header->reg_srq_base_addr = 619 cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM)); 620 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) 621 p_params_header->cq_ring_mode = 1; 622 else 623 p_params_header->cq_ring_mode = 0; 624 625 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { 626 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); 627 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 628 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); 629 p_cnq_params = &p_ramrod->cnq_params[cnq_id]; 630 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; 631 632 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; 633 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; 634 635 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, 636 p_cnq_pbl_list->pbl_ptr); 637 638 /* we assume here that cnq_id and qz_offset are the same */ 639 p_cnq_params->queue_zone_num = 640 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + 641 cnq_id); 642 } 643 644 return qed_spq_post(p_hwfn, p_ent, NULL); 645 } 646 647 static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) 648 { 649 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 650 int rc; 651 652 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); 653 654 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 655 rc = qed_rdma_bmap_alloc_id(p_hwfn, 656 &p_hwfn->p_rdma_info->tid_map, itid); 657 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 658 if (rc) 659 goto out; 660 661 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); 662 out: 663 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); 664 return rc; 665 } 666 667 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) 668 { 669 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 670 671 /* Tid 0 will be used as the key for "reserved MR". 672 * The driver should allocate memory for it so it can be loaded but no 673 * ramrod should be passed on it. 674 */ 675 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); 676 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { 677 DP_NOTICE(p_hwfn, 678 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); 679 return -EINVAL; 680 } 681 682 return 0; 683 } 684 685 static int qed_rdma_setup(struct qed_hwfn *p_hwfn, 686 struct qed_ptt *p_ptt, 687 struct qed_rdma_start_in_params *params) 688 { 689 int rc; 690 691 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); 692 693 qed_rdma_init_devinfo(p_hwfn, params); 694 qed_rdma_init_port(p_hwfn); 695 qed_rdma_init_events(p_hwfn, params); 696 697 rc = qed_rdma_reserve_lkey(p_hwfn); 698 if (rc) 699 return rc; 700 701 rc = qed_rdma_init_hw(p_hwfn, p_ptt); 702 if (rc) 703 return rc; 704 705 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 706 rc = qed_iwarp_setup(p_hwfn, params); 707 if (rc) 708 return rc; 709 } else { 710 rc = qed_roce_setup(p_hwfn); 711 if (rc) 712 return rc; 713 } 714 715 return qed_rdma_start_fw(p_hwfn, params, p_ptt); 716 } 717 718 static int qed_rdma_stop(void *rdma_cxt) 719 { 720 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 721 struct rdma_close_func_ramrod_data *p_ramrod; 722 struct qed_sp_init_data init_data; 723 struct qed_spq_entry *p_ent; 724 struct qed_ptt *p_ptt; 725 u32 ll2_ethertype_en; 726 int rc = -EBUSY; 727 728 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); 729 730 p_ptt = qed_ptt_acquire(p_hwfn); 731 if (!p_ptt) { 732 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); 733 return rc; 734 } 735 736 /* Disable RoCE search */ 737 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); 738 p_hwfn->b_rdma_enabled_in_prs = false; 739 p_hwfn->p_rdma_info->active = 0; 740 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 741 742 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 743 744 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 745 (ll2_ethertype_en & 0xFFFE)); 746 747 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 748 rc = qed_iwarp_stop(p_hwfn); 749 if (rc) { 750 qed_ptt_release(p_hwfn, p_ptt); 751 return rc; 752 } 753 } else { 754 qed_roce_stop(p_hwfn); 755 } 756 757 qed_ptt_release(p_hwfn, p_ptt); 758 759 /* Get SPQ entry */ 760 memset(&init_data, 0, sizeof(init_data)); 761 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 762 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 763 764 /* Stop RoCE */ 765 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, 766 p_hwfn->p_rdma_info->proto, &init_data); 767 if (rc) 768 goto out; 769 770 p_ramrod = &p_ent->ramrod.rdma_close_func; 771 772 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; 773 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); 774 775 rc = qed_spq_post(p_hwfn, p_ent, NULL); 776 777 out: 778 qed_rdma_free(p_hwfn); 779 780 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); 781 return rc; 782 } 783 784 static int qed_rdma_add_user(void *rdma_cxt, 785 struct qed_rdma_add_user_out_params *out_params) 786 { 787 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 788 u32 dpi_start_offset; 789 u32 returned_id = 0; 790 int rc; 791 792 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); 793 794 /* Allocate DPI */ 795 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 796 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 797 &returned_id); 798 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 799 800 out_params->dpi = (u16)returned_id; 801 802 /* Calculate the corresponding DPI address */ 803 dpi_start_offset = p_hwfn->dpi_start_offset; 804 805 out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + 806 out_params->dpi * p_hwfn->dpi_size; 807 808 out_params->dpi_phys_addr = p_hwfn->db_phys_addr + 809 dpi_start_offset + 810 ((out_params->dpi) * p_hwfn->dpi_size); 811 812 out_params->dpi_size = p_hwfn->dpi_size; 813 out_params->wid_count = p_hwfn->wid_count; 814 815 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); 816 return rc; 817 } 818 819 static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) 820 { 821 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 822 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; 823 struct qed_mcp_link_state *p_link_output; 824 825 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); 826 827 /* The link state is saved only for the leading hwfn */ 828 p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; 829 830 p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP 831 : QED_RDMA_PORT_DOWN; 832 833 p_port->link_speed = p_link_output->speed; 834 835 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; 836 837 return p_port; 838 } 839 840 static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 841 { 842 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 843 844 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); 845 846 /* Return struct with device parameters */ 847 return p_hwfn->p_rdma_info->dev; 848 } 849 850 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 851 { 852 struct qed_hwfn *p_hwfn; 853 u16 qz_num; 854 u32 addr; 855 856 p_hwfn = (struct qed_hwfn *)rdma_cxt; 857 858 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { 859 DP_NOTICE(p_hwfn, 860 "queue zone offset %d is too large (max is %d)\n", 861 qz_offset, p_hwfn->p_rdma_info->max_queue_zones); 862 return; 863 } 864 865 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; 866 addr = GTT_BAR0_MAP_REG_USDM_RAM + 867 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); 868 869 REG_WR16(p_hwfn, addr, prod); 870 871 /* keep prod updates ordered */ 872 wmb(); 873 } 874 875 static int qed_fill_rdma_dev_info(struct qed_dev *cdev, 876 struct qed_dev_rdma_info *info) 877 { 878 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); 879 880 memset(info, 0, sizeof(*info)); 881 882 info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? 883 QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; 884 885 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); 886 887 qed_fill_dev_info(cdev, &info->common); 888 889 return 0; 890 } 891 892 static int qed_rdma_get_sb_start(struct qed_dev *cdev) 893 { 894 int feat_num; 895 896 if (cdev->num_hwfns > 1) 897 feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE); 898 else 899 feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) * 900 cdev->num_hwfns; 901 902 return feat_num; 903 } 904 905 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) 906 { 907 int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ); 908 int n_msix = cdev->int_params.rdma_msix_cnt; 909 910 return min_t(int, n_cnq, n_msix); 911 } 912 913 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) 914 { 915 int limit = 0; 916 917 /* Mark the fastpath as free/used */ 918 cdev->int_params.fp_initialized = cnt ? true : false; 919 920 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { 921 DP_ERR(cdev, 922 "qed roce supports only MSI-X interrupts (detected %d).\n", 923 cdev->int_params.out.int_mode); 924 return -EINVAL; 925 } else if (cdev->int_params.fp_msix_cnt) { 926 limit = cdev->int_params.rdma_msix_cnt; 927 } 928 929 if (!limit) 930 return -ENOMEM; 931 932 return min_t(int, cnt, limit); 933 } 934 935 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) 936 { 937 memset(info, 0, sizeof(*info)); 938 939 if (!cdev->int_params.fp_initialized) { 940 DP_INFO(cdev, 941 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 942 return -EINVAL; 943 } 944 945 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 946 int msix_base = cdev->int_params.rdma_msix_base; 947 948 info->msix_cnt = cdev->int_params.rdma_msix_cnt; 949 info->msix = &cdev->int_params.msix_table[msix_base]; 950 951 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", 952 info->msix_cnt, msix_base); 953 } 954 955 return 0; 956 } 957 958 static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) 959 { 960 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 961 u32 returned_id; 962 int rc; 963 964 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); 965 966 /* Allocates an unused protection domain */ 967 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 968 rc = qed_rdma_bmap_alloc_id(p_hwfn, 969 &p_hwfn->p_rdma_info->pd_map, &returned_id); 970 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 971 972 *pd = (u16)returned_id; 973 974 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); 975 return rc; 976 } 977 978 static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) 979 { 980 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 981 982 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); 983 984 /* Returns a previously allocated protection domain for reuse */ 985 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 986 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); 987 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 988 } 989 990 static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id) 991 { 992 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 993 u32 returned_id; 994 int rc; 995 996 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n"); 997 998 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 999 rc = qed_rdma_bmap_alloc_id(p_hwfn, 1000 &p_hwfn->p_rdma_info->xrcd_map, 1001 &returned_id); 1002 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1003 if (rc) { 1004 DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n"); 1005 return rc; 1006 } 1007 1008 *xrcd_id = (u16)returned_id; 1009 1010 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc); 1011 return rc; 1012 } 1013 1014 static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id) 1015 { 1016 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1017 1018 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id); 1019 1020 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1021 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id); 1022 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1023 } 1024 1025 static enum qed_rdma_toggle_bit 1026 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) 1027 { 1028 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 1029 enum qed_rdma_toggle_bit toggle_bit; 1030 u32 bmap_id; 1031 1032 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); 1033 1034 /* the function toggle the bit that is related to a given icid 1035 * and returns the new toggle bit's value 1036 */ 1037 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); 1038 1039 spin_lock_bh(&p_info->lock); 1040 toggle_bit = !test_and_change_bit(bmap_id, 1041 p_info->toggle_bits.bitmap); 1042 spin_unlock_bh(&p_info->lock); 1043 1044 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", 1045 toggle_bit); 1046 1047 return toggle_bit; 1048 } 1049 1050 static int qed_rdma_create_cq(void *rdma_cxt, 1051 struct qed_rdma_create_cq_in_params *params, 1052 u16 *icid) 1053 { 1054 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1055 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 1056 struct rdma_create_cq_ramrod_data *p_ramrod; 1057 enum qed_rdma_toggle_bit toggle_bit; 1058 struct qed_sp_init_data init_data; 1059 struct qed_spq_entry *p_ent; 1060 u32 returned_id, start_cid; 1061 int rc; 1062 1063 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", 1064 params->cq_handle_hi, params->cq_handle_lo); 1065 1066 /* Allocate icid */ 1067 spin_lock_bh(&p_info->lock); 1068 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); 1069 spin_unlock_bh(&p_info->lock); 1070 1071 if (rc) { 1072 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); 1073 return rc; 1074 } 1075 1076 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, 1077 p_info->proto); 1078 *icid = returned_id + start_cid; 1079 1080 /* Check if icid requires a page allocation */ 1081 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); 1082 if (rc) 1083 goto err; 1084 1085 /* Get SPQ entry */ 1086 memset(&init_data, 0, sizeof(init_data)); 1087 init_data.cid = *icid; 1088 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1089 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1090 1091 /* Send create CQ ramrod */ 1092 rc = qed_sp_init_request(p_hwfn, &p_ent, 1093 RDMA_RAMROD_CREATE_CQ, 1094 p_info->proto, &init_data); 1095 if (rc) 1096 goto err; 1097 1098 p_ramrod = &p_ent->ramrod.rdma_create_cq; 1099 1100 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); 1101 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); 1102 p_ramrod->dpi = cpu_to_le16(params->dpi); 1103 p_ramrod->is_two_level_pbl = params->pbl_two_level; 1104 p_ramrod->max_cqes = cpu_to_le32(params->cq_size); 1105 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); 1106 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); 1107 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + 1108 params->cnq_id; 1109 p_ramrod->int_timeout = cpu_to_le16(params->int_timeout); 1110 1111 /* toggle the bit for every resize or create cq for a given icid */ 1112 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1113 1114 p_ramrod->toggle_bit = toggle_bit; 1115 1116 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1117 if (rc) { 1118 /* restore toggle bit */ 1119 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1120 goto err; 1121 } 1122 1123 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); 1124 return rc; 1125 1126 err: 1127 /* release allocated icid */ 1128 spin_lock_bh(&p_info->lock); 1129 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); 1130 spin_unlock_bh(&p_info->lock); 1131 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); 1132 1133 return rc; 1134 } 1135 1136 static int 1137 qed_rdma_destroy_cq(void *rdma_cxt, 1138 struct qed_rdma_destroy_cq_in_params *in_params, 1139 struct qed_rdma_destroy_cq_out_params *out_params) 1140 { 1141 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1142 struct rdma_destroy_cq_output_params *p_ramrod_res; 1143 struct rdma_destroy_cq_ramrod_data *p_ramrod; 1144 struct qed_sp_init_data init_data; 1145 struct qed_spq_entry *p_ent; 1146 dma_addr_t ramrod_res_phys; 1147 enum protocol_type proto; 1148 int rc = -ENOMEM; 1149 1150 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); 1151 1152 p_ramrod_res = 1153 (struct rdma_destroy_cq_output_params *) 1154 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1155 sizeof(struct rdma_destroy_cq_output_params), 1156 &ramrod_res_phys, GFP_KERNEL); 1157 if (!p_ramrod_res) { 1158 DP_NOTICE(p_hwfn, 1159 "qed destroy cq failed: cannot allocate memory (ramrod)\n"); 1160 return rc; 1161 } 1162 1163 /* Get SPQ entry */ 1164 memset(&init_data, 0, sizeof(init_data)); 1165 init_data.cid = in_params->icid; 1166 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1167 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1168 proto = p_hwfn->p_rdma_info->proto; 1169 /* Send destroy CQ ramrod */ 1170 rc = qed_sp_init_request(p_hwfn, &p_ent, 1171 RDMA_RAMROD_DESTROY_CQ, 1172 proto, &init_data); 1173 if (rc) 1174 goto err; 1175 1176 p_ramrod = &p_ent->ramrod.rdma_destroy_cq; 1177 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1178 1179 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1180 if (rc) 1181 goto err; 1182 1183 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); 1184 1185 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1186 sizeof(struct rdma_destroy_cq_output_params), 1187 p_ramrod_res, ramrod_res_phys); 1188 1189 /* Free icid */ 1190 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1191 1192 qed_bmap_release_id(p_hwfn, 1193 &p_hwfn->p_rdma_info->cq_map, 1194 (in_params->icid - 1195 qed_cxt_get_proto_cid_start(p_hwfn, proto))); 1196 1197 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1198 1199 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); 1200 return rc; 1201 1202 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1203 sizeof(struct rdma_destroy_cq_output_params), 1204 p_ramrod_res, ramrod_res_phys); 1205 1206 return rc; 1207 } 1208 1209 void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac) 1210 { 1211 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); 1212 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); 1213 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); 1214 } 1215 1216 static int qed_rdma_query_qp(void *rdma_cxt, 1217 struct qed_rdma_qp *qp, 1218 struct qed_rdma_query_qp_out_params *out_params) 1219 { 1220 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1221 int rc = 0; 1222 1223 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1224 1225 /* The following fields are filled in from qp and not FW as they can't 1226 * be modified by FW 1227 */ 1228 out_params->mtu = qp->mtu; 1229 out_params->dest_qp = qp->dest_qp; 1230 out_params->incoming_atomic_en = qp->incoming_atomic_en; 1231 out_params->e2e_flow_control_en = qp->e2e_flow_control_en; 1232 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; 1233 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; 1234 out_params->dgid = qp->dgid; 1235 out_params->flow_label = qp->flow_label; 1236 out_params->hop_limit_ttl = qp->hop_limit_ttl; 1237 out_params->traffic_class_tos = qp->traffic_class_tos; 1238 out_params->timeout = qp->ack_timeout; 1239 out_params->rnr_retry = qp->rnr_retry_cnt; 1240 out_params->retry_cnt = qp->retry_cnt; 1241 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; 1242 out_params->pkey_index = 0; 1243 out_params->max_rd_atomic = qp->max_rd_atomic_req; 1244 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; 1245 out_params->sqd_async = qp->sqd_async; 1246 1247 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 1248 qed_iwarp_query_qp(qp, out_params); 1249 else 1250 rc = qed_roce_query_qp(p_hwfn, qp, out_params); 1251 1252 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); 1253 return rc; 1254 } 1255 1256 static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) 1257 { 1258 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1259 int rc = 0; 1260 1261 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1262 1263 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 1264 rc = qed_iwarp_destroy_qp(p_hwfn, qp); 1265 else 1266 rc = qed_roce_destroy_qp(p_hwfn, qp); 1267 1268 /* free qp params struct */ 1269 kfree(qp); 1270 1271 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); 1272 return rc; 1273 } 1274 1275 static struct qed_rdma_qp * 1276 qed_rdma_create_qp(void *rdma_cxt, 1277 struct qed_rdma_create_qp_in_params *in_params, 1278 struct qed_rdma_create_qp_out_params *out_params) 1279 { 1280 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1281 struct qed_rdma_qp *qp; 1282 u8 max_stats_queues; 1283 int rc; 1284 1285 if (!rdma_cxt || !in_params || !out_params || 1286 !p_hwfn->p_rdma_info->active) { 1287 DP_ERR(p_hwfn->cdev, 1288 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 1289 rdma_cxt, in_params, out_params); 1290 return NULL; 1291 } 1292 1293 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1294 "qed rdma create qp called with qp_handle = %08x%08x\n", 1295 in_params->qp_handle_hi, in_params->qp_handle_lo); 1296 1297 /* Some sanity checks... */ 1298 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; 1299 if (in_params->stats_queue >= max_stats_queues) { 1300 DP_ERR(p_hwfn->cdev, 1301 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", 1302 in_params->stats_queue, max_stats_queues); 1303 return NULL; 1304 } 1305 1306 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1307 if (in_params->sq_num_pages * sizeof(struct regpair) > 1308 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { 1309 DP_NOTICE(p_hwfn->cdev, 1310 "Sq num pages: %d exceeds maximum\n", 1311 in_params->sq_num_pages); 1312 return NULL; 1313 } 1314 if (in_params->rq_num_pages * sizeof(struct regpair) > 1315 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { 1316 DP_NOTICE(p_hwfn->cdev, 1317 "Rq num pages: %d exceeds maximum\n", 1318 in_params->rq_num_pages); 1319 return NULL; 1320 } 1321 } 1322 1323 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1324 if (!qp) 1325 return NULL; 1326 1327 qp->cur_state = QED_ROCE_QP_STATE_RESET; 1328 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); 1329 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); 1330 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); 1331 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); 1332 qp->use_srq = in_params->use_srq; 1333 qp->signal_all = in_params->signal_all; 1334 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; 1335 qp->pd = in_params->pd; 1336 qp->dpi = in_params->dpi; 1337 qp->sq_cq_id = in_params->sq_cq_id; 1338 qp->sq_num_pages = in_params->sq_num_pages; 1339 qp->sq_pbl_ptr = in_params->sq_pbl_ptr; 1340 qp->rq_cq_id = in_params->rq_cq_id; 1341 qp->rq_num_pages = in_params->rq_num_pages; 1342 qp->rq_pbl_ptr = in_params->rq_pbl_ptr; 1343 qp->srq_id = in_params->srq_id; 1344 qp->req_offloaded = false; 1345 qp->resp_offloaded = false; 1346 qp->e2e_flow_control_en = qp->use_srq ? false : true; 1347 qp->stats_queue = in_params->stats_queue; 1348 qp->qp_type = in_params->qp_type; 1349 qp->xrcd_id = in_params->xrcd_id; 1350 1351 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1352 rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); 1353 qp->qpid = qp->icid; 1354 } else { 1355 qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE); 1356 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); 1357 qp->qpid = ((0xFF << 16) | qp->icid); 1358 } 1359 1360 if (rc) { 1361 kfree(qp); 1362 return NULL; 1363 } 1364 1365 out_params->icid = qp->icid; 1366 out_params->qp_id = qp->qpid; 1367 1368 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); 1369 return qp; 1370 } 1371 1372 static int qed_rdma_modify_qp(void *rdma_cxt, 1373 struct qed_rdma_qp *qp, 1374 struct qed_rdma_modify_qp_in_params *params) 1375 { 1376 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1377 enum qed_roce_qp_state prev_state; 1378 int rc = 0; 1379 1380 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", 1381 qp->icid, params->new_state); 1382 1383 if (rc) { 1384 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1385 return rc; 1386 } 1387 1388 if (GET_FIELD(params->modify_flags, 1389 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { 1390 qp->incoming_rdma_read_en = params->incoming_rdma_read_en; 1391 qp->incoming_rdma_write_en = params->incoming_rdma_write_en; 1392 qp->incoming_atomic_en = params->incoming_atomic_en; 1393 } 1394 1395 /* Update QP structure with the updated values */ 1396 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) 1397 qp->roce_mode = params->roce_mode; 1398 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) 1399 qp->pkey = params->pkey; 1400 if (GET_FIELD(params->modify_flags, 1401 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) 1402 qp->e2e_flow_control_en = params->e2e_flow_control_en; 1403 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) 1404 qp->dest_qp = params->dest_qp; 1405 if (GET_FIELD(params->modify_flags, 1406 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { 1407 /* Indicates that the following parameters have changed: 1408 * Traffic class, flow label, hop limit, source GID, 1409 * destination GID, loopback indicator 1410 */ 1411 qp->traffic_class_tos = params->traffic_class_tos; 1412 qp->flow_label = params->flow_label; 1413 qp->hop_limit_ttl = params->hop_limit_ttl; 1414 1415 qp->sgid = params->sgid; 1416 qp->dgid = params->dgid; 1417 qp->udp_src_port = 0; 1418 qp->vlan_id = params->vlan_id; 1419 qp->mtu = params->mtu; 1420 qp->lb_indication = params->lb_indication; 1421 memcpy((u8 *)&qp->remote_mac_addr[0], 1422 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); 1423 if (params->use_local_mac) { 1424 memcpy((u8 *)&qp->local_mac_addr[0], 1425 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); 1426 } else { 1427 memcpy((u8 *)&qp->local_mac_addr[0], 1428 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 1429 } 1430 } 1431 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) 1432 qp->rq_psn = params->rq_psn; 1433 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) 1434 qp->sq_psn = params->sq_psn; 1435 if (GET_FIELD(params->modify_flags, 1436 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) 1437 qp->max_rd_atomic_req = params->max_rd_atomic_req; 1438 if (GET_FIELD(params->modify_flags, 1439 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) 1440 qp->max_rd_atomic_resp = params->max_rd_atomic_resp; 1441 if (GET_FIELD(params->modify_flags, 1442 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) 1443 qp->ack_timeout = params->ack_timeout; 1444 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) 1445 qp->retry_cnt = params->retry_cnt; 1446 if (GET_FIELD(params->modify_flags, 1447 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) 1448 qp->rnr_retry_cnt = params->rnr_retry_cnt; 1449 if (GET_FIELD(params->modify_flags, 1450 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) 1451 qp->min_rnr_nak_timer = params->min_rnr_nak_timer; 1452 1453 qp->sqd_async = params->sqd_async; 1454 1455 prev_state = qp->cur_state; 1456 if (GET_FIELD(params->modify_flags, 1457 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { 1458 qp->cur_state = params->new_state; 1459 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", 1460 qp->cur_state); 1461 } 1462 1463 switch (qp->qp_type) { 1464 case QED_RDMA_QP_TYPE_XRC_INI: 1465 qp->has_req = 1; 1466 break; 1467 case QED_RDMA_QP_TYPE_XRC_TGT: 1468 qp->has_resp = 1; 1469 break; 1470 default: 1471 qp->has_req = 1; 1472 qp->has_resp = 1; 1473 } 1474 1475 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1476 enum qed_iwarp_qp_state new_state = 1477 qed_roce2iwarp_state(qp->cur_state); 1478 1479 rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); 1480 } else { 1481 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); 1482 } 1483 1484 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); 1485 return rc; 1486 } 1487 1488 static int 1489 qed_rdma_register_tid(void *rdma_cxt, 1490 struct qed_rdma_register_tid_in_params *params) 1491 { 1492 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1493 struct rdma_register_tid_ramrod_data *p_ramrod; 1494 struct qed_sp_init_data init_data; 1495 struct qed_spq_entry *p_ent; 1496 enum rdma_tid_type tid_type; 1497 u8 fw_return_code; 1498 u16 flags = 0; 1499 int rc; 1500 1501 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); 1502 1503 /* Get SPQ entry */ 1504 memset(&init_data, 0, sizeof(init_data)); 1505 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1506 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1507 1508 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, 1509 p_hwfn->p_rdma_info->proto, &init_data); 1510 if (rc) { 1511 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1512 return rc; 1513 } 1514 1515 if (p_hwfn->p_rdma_info->last_tid < params->itid) 1516 p_hwfn->p_rdma_info->last_tid = params->itid; 1517 1518 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, 1519 params->pbl_two_level); 1520 1521 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, 1522 params->zbva); 1523 1524 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); 1525 1526 /* Don't initialize D/C field, as it may override other bits. */ 1527 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) 1528 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, 1529 params->page_size_log - 12); 1530 1531 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, 1532 params->remote_read); 1533 1534 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, 1535 params->remote_write); 1536 1537 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, 1538 params->remote_atomic); 1539 1540 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, 1541 params->local_write); 1542 1543 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, 1544 params->local_read); 1545 1546 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, 1547 params->mw_bind); 1548 1549 p_ramrod = &p_ent->ramrod.rdma_register_tid; 1550 p_ramrod->flags = cpu_to_le16(flags); 1551 1552 SET_FIELD(p_ramrod->flags1, 1553 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, 1554 params->pbl_page_size_log - 12); 1555 1556 SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, 1557 params->dma_mr); 1558 1559 switch (params->tid_type) { 1560 case QED_RDMA_TID_REGISTERED_MR: 1561 tid_type = RDMA_TID_REGISTERED_MR; 1562 break; 1563 case QED_RDMA_TID_FMR: 1564 tid_type = RDMA_TID_FMR; 1565 break; 1566 case QED_RDMA_TID_MW: 1567 tid_type = RDMA_TID_MW; 1568 break; 1569 default: 1570 rc = -EINVAL; 1571 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1572 qed_sp_destroy_request(p_hwfn, p_ent); 1573 return rc; 1574 } 1575 1576 SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, 1577 tid_type); 1578 1579 p_ramrod->itid = cpu_to_le32(params->itid); 1580 p_ramrod->key = params->key; 1581 p_ramrod->pd = cpu_to_le16(params->pd); 1582 p_ramrod->length_hi = (u8)(params->length >> 32); 1583 p_ramrod->length_lo = DMA_LO_LE(params->length); 1584 if (params->zbva) { 1585 /* Lower 32 bits of the registered MR address. 1586 * In case of zero based MR, will hold FBO 1587 */ 1588 p_ramrod->va.hi = 0; 1589 p_ramrod->va.lo = cpu_to_le32(params->fbo); 1590 } else { 1591 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); 1592 } 1593 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); 1594 1595 /* DIF */ 1596 if (params->dif_enabled) { 1597 SET_FIELD(p_ramrod->flags2, 1598 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); 1599 DMA_REGPAIR_LE(p_ramrod->dif_error_addr, 1600 params->dif_error_addr); 1601 } 1602 1603 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1604 if (rc) 1605 return rc; 1606 1607 if (fw_return_code != RDMA_RETURN_OK) { 1608 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 1609 return -EINVAL; 1610 } 1611 1612 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); 1613 return rc; 1614 } 1615 1616 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) 1617 { 1618 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1619 struct rdma_deregister_tid_ramrod_data *p_ramrod; 1620 struct qed_sp_init_data init_data; 1621 struct qed_spq_entry *p_ent; 1622 struct qed_ptt *p_ptt; 1623 u8 fw_return_code; 1624 int rc; 1625 1626 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 1627 1628 /* Get SPQ entry */ 1629 memset(&init_data, 0, sizeof(init_data)); 1630 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1631 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1632 1633 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, 1634 p_hwfn->p_rdma_info->proto, &init_data); 1635 if (rc) { 1636 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1637 return rc; 1638 } 1639 1640 p_ramrod = &p_ent->ramrod.rdma_deregister_tid; 1641 p_ramrod->itid = cpu_to_le32(itid); 1642 1643 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1644 if (rc) { 1645 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1646 return rc; 1647 } 1648 1649 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { 1650 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 1651 return -EINVAL; 1652 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { 1653 /* Bit indicating that the TID is in use and a nig drain is 1654 * required before sending the ramrod again 1655 */ 1656 p_ptt = qed_ptt_acquire(p_hwfn); 1657 if (!p_ptt) { 1658 rc = -EBUSY; 1659 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1660 "Failed to acquire PTT\n"); 1661 return rc; 1662 } 1663 1664 rc = qed_mcp_drain(p_hwfn, p_ptt); 1665 if (rc) { 1666 qed_ptt_release(p_hwfn, p_ptt); 1667 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1668 "Drain failed\n"); 1669 return rc; 1670 } 1671 1672 qed_ptt_release(p_hwfn, p_ptt); 1673 1674 /* Resend the ramrod */ 1675 rc = qed_sp_init_request(p_hwfn, &p_ent, 1676 RDMA_RAMROD_DEREGISTER_MR, 1677 p_hwfn->p_rdma_info->proto, 1678 &init_data); 1679 if (rc) { 1680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1681 "Failed to init sp-element\n"); 1682 return rc; 1683 } 1684 1685 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1686 if (rc) { 1687 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1688 "Ramrod failed\n"); 1689 return rc; 1690 } 1691 1692 if (fw_return_code != RDMA_RETURN_OK) { 1693 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", 1694 fw_return_code); 1695 return rc; 1696 } 1697 } 1698 1699 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); 1700 return rc; 1701 } 1702 1703 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 1704 { 1705 return QED_AFFIN_HWFN(cdev); 1706 } 1707 1708 static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn, 1709 bool is_xrc) 1710 { 1711 if (is_xrc) 1712 return &p_hwfn->p_rdma_info->xrc_srq_map; 1713 1714 return &p_hwfn->p_rdma_info->srq_map; 1715 } 1716 1717 static int qed_rdma_modify_srq(void *rdma_cxt, 1718 struct qed_rdma_modify_srq_in_params *in_params) 1719 { 1720 struct rdma_srq_modify_ramrod_data *p_ramrod; 1721 struct qed_sp_init_data init_data = {}; 1722 struct qed_hwfn *p_hwfn = rdma_cxt; 1723 struct qed_spq_entry *p_ent; 1724 u16 opaque_fid; 1725 int rc; 1726 1727 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1728 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1729 1730 rc = qed_sp_init_request(p_hwfn, &p_ent, 1731 RDMA_RAMROD_MODIFY_SRQ, 1732 p_hwfn->p_rdma_info->proto, &init_data); 1733 if (rc) 1734 return rc; 1735 1736 p_ramrod = &p_ent->ramrod.rdma_modify_srq; 1737 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); 1738 opaque_fid = p_hwfn->hw_info.opaque_fid; 1739 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1740 p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); 1741 1742 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1743 if (rc) 1744 return rc; 1745 1746 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n", 1747 in_params->srq_id, in_params->is_xrc); 1748 1749 return rc; 1750 } 1751 1752 static int 1753 qed_rdma_destroy_srq(void *rdma_cxt, 1754 struct qed_rdma_destroy_srq_in_params *in_params) 1755 { 1756 struct rdma_srq_destroy_ramrod_data *p_ramrod; 1757 struct qed_sp_init_data init_data = {}; 1758 struct qed_hwfn *p_hwfn = rdma_cxt; 1759 struct qed_spq_entry *p_ent; 1760 struct qed_bmap *bmap; 1761 u16 opaque_fid; 1762 u16 offset; 1763 int rc; 1764 1765 opaque_fid = p_hwfn->hw_info.opaque_fid; 1766 1767 init_data.opaque_fid = opaque_fid; 1768 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1769 1770 rc = qed_sp_init_request(p_hwfn, &p_ent, 1771 RDMA_RAMROD_DESTROY_SRQ, 1772 p_hwfn->p_rdma_info->proto, &init_data); 1773 if (rc) 1774 return rc; 1775 1776 p_ramrod = &p_ent->ramrod.rdma_destroy_srq; 1777 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); 1778 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1779 1780 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1781 if (rc) 1782 return rc; 1783 1784 bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); 1785 offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; 1786 1787 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1788 qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset); 1789 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1790 1791 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1792 "XRC/SRQ destroyed Id = %x, is_xrc=%u\n", 1793 in_params->srq_id, in_params->is_xrc); 1794 1795 return rc; 1796 } 1797 1798 static int 1799 qed_rdma_create_srq(void *rdma_cxt, 1800 struct qed_rdma_create_srq_in_params *in_params, 1801 struct qed_rdma_create_srq_out_params *out_params) 1802 { 1803 struct rdma_srq_create_ramrod_data *p_ramrod; 1804 struct qed_sp_init_data init_data = {}; 1805 struct qed_hwfn *p_hwfn = rdma_cxt; 1806 enum qed_cxt_elem_type elem_type; 1807 struct qed_spq_entry *p_ent; 1808 u16 opaque_fid, srq_id; 1809 struct qed_bmap *bmap; 1810 u32 returned_id; 1811 u16 offset; 1812 int rc; 1813 1814 bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); 1815 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1816 rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); 1817 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1818 1819 if (rc) { 1820 DP_NOTICE(p_hwfn, 1821 "failed to allocate xrc/srq id (is_xrc=%u)\n", 1822 in_params->is_xrc); 1823 return rc; 1824 } 1825 1826 elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ); 1827 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); 1828 if (rc) 1829 goto err; 1830 1831 opaque_fid = p_hwfn->hw_info.opaque_fid; 1832 1833 opaque_fid = p_hwfn->hw_info.opaque_fid; 1834 init_data.opaque_fid = opaque_fid; 1835 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1836 1837 rc = qed_sp_init_request(p_hwfn, &p_ent, 1838 RDMA_RAMROD_CREATE_SRQ, 1839 p_hwfn->p_rdma_info->proto, &init_data); 1840 if (rc) 1841 goto err; 1842 1843 p_ramrod = &p_ent->ramrod.rdma_create_srq; 1844 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); 1845 p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); 1846 p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); 1847 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1848 p_ramrod->page_size = cpu_to_le16(in_params->page_size); 1849 DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); 1850 offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; 1851 srq_id = (u16)returned_id + offset; 1852 p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); 1853 1854 if (in_params->is_xrc) { 1855 SET_FIELD(p_ramrod->flags, 1856 RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1); 1857 SET_FIELD(p_ramrod->flags, 1858 RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN, 1859 in_params->reserved_key_en); 1860 p_ramrod->xrc_srq_cq_cid = 1861 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 1862 in_params->cq_cid); 1863 p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id); 1864 } 1865 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1866 if (rc) 1867 goto err; 1868 1869 out_params->srq_id = srq_id; 1870 1871 DP_VERBOSE(p_hwfn, 1872 QED_MSG_RDMA, 1873 "XRC/SRQ created Id = %x (is_xrc=%u)\n", 1874 out_params->srq_id, in_params->is_xrc); 1875 return rc; 1876 1877 err: 1878 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1879 qed_bmap_release_id(p_hwfn, bmap, returned_id); 1880 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1881 1882 return rc; 1883 } 1884 1885 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) 1886 { 1887 bool result; 1888 1889 /* if rdma wasn't activated yet, naturally there are no qps */ 1890 if (!p_hwfn->p_rdma_info->active) 1891 return false; 1892 1893 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1894 if (!p_hwfn->p_rdma_info->cid_map.bitmap) 1895 result = false; 1896 else 1897 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); 1898 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1899 return result; 1900 } 1901 1902 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1903 { 1904 u32 val; 1905 1906 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; 1907 1908 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); 1909 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), 1910 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", 1911 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); 1912 } 1913 1914 1915 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1916 { 1917 p_hwfn->db_bar_no_edpm = true; 1918 1919 qed_rdma_dpm_conf(p_hwfn, p_ptt); 1920 } 1921 1922 static int qed_rdma_start(void *rdma_cxt, 1923 struct qed_rdma_start_in_params *params) 1924 { 1925 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1926 struct qed_ptt *p_ptt; 1927 int rc = -EBUSY; 1928 1929 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1930 "desired_cnq = %08x\n", params->desired_cnq); 1931 1932 p_ptt = qed_ptt_acquire(p_hwfn); 1933 if (!p_ptt) 1934 goto err; 1935 1936 rc = qed_rdma_alloc(p_hwfn); 1937 if (rc) 1938 goto err1; 1939 1940 rc = qed_rdma_setup(p_hwfn, p_ptt, params); 1941 if (rc) 1942 goto err2; 1943 1944 qed_ptt_release(p_hwfn, p_ptt); 1945 p_hwfn->p_rdma_info->active = 1; 1946 1947 return rc; 1948 1949 err2: 1950 qed_rdma_free(p_hwfn); 1951 err1: 1952 qed_ptt_release(p_hwfn, p_ptt); 1953 err: 1954 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); 1955 return rc; 1956 } 1957 1958 static int qed_rdma_init(struct qed_dev *cdev, 1959 struct qed_rdma_start_in_params *params) 1960 { 1961 return qed_rdma_start(QED_AFFIN_HWFN(cdev), params); 1962 } 1963 1964 static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) 1965 { 1966 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1967 1968 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); 1969 1970 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1971 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); 1972 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1973 } 1974 1975 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, 1976 u8 *old_mac_address, 1977 u8 *new_mac_address) 1978 { 1979 int rc = 0; 1980 1981 if (old_mac_address) 1982 qed_llh_remove_mac_filter(cdev, 0, old_mac_address); 1983 if (new_mac_address) 1984 rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address); 1985 1986 if (rc) 1987 DP_ERR(cdev, 1988 "qed roce ll2 mac filter set: failed to add MAC filter\n"); 1989 1990 return rc; 1991 } 1992 1993 static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset) 1994 { 1995 enum qed_eng eng; 1996 u8 ppfid = 0; 1997 int rc; 1998 1999 /* Make sure iwarp cmt mode is enabled before setting affinity */ 2000 if (!cdev->iwarp_cmt) 2001 return -EINVAL; 2002 2003 if (b_reset) 2004 eng = QED_BOTH_ENG; 2005 else 2006 eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0; 2007 2008 rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); 2009 if (rc) { 2010 DP_NOTICE(cdev, 2011 "Failed to set the engine affinity of ppfid %d\n", 2012 ppfid); 2013 return rc; 2014 } 2015 2016 DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP), 2017 "LLH: Set the engine affinity of non-RoCE packets as %d\n", 2018 eng); 2019 2020 return 0; 2021 } 2022 2023 static const struct qed_rdma_ops qed_rdma_ops_pass = { 2024 .common = &qed_common_ops_pass, 2025 .fill_dev_info = &qed_fill_rdma_dev_info, 2026 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, 2027 .rdma_init = &qed_rdma_init, 2028 .rdma_add_user = &qed_rdma_add_user, 2029 .rdma_remove_user = &qed_rdma_remove_user, 2030 .rdma_stop = &qed_rdma_stop, 2031 .rdma_query_port = &qed_rdma_query_port, 2032 .rdma_query_device = &qed_rdma_query_device, 2033 .rdma_get_start_sb = &qed_rdma_get_sb_start, 2034 .rdma_get_rdma_int = &qed_rdma_get_int, 2035 .rdma_set_rdma_int = &qed_rdma_set_int, 2036 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, 2037 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, 2038 .rdma_alloc_pd = &qed_rdma_alloc_pd, 2039 .rdma_dealloc_pd = &qed_rdma_free_pd, 2040 .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd, 2041 .rdma_dealloc_xrcd = &qed_rdma_free_xrcd, 2042 .rdma_create_cq = &qed_rdma_create_cq, 2043 .rdma_destroy_cq = &qed_rdma_destroy_cq, 2044 .rdma_create_qp = &qed_rdma_create_qp, 2045 .rdma_modify_qp = &qed_rdma_modify_qp, 2046 .rdma_query_qp = &qed_rdma_query_qp, 2047 .rdma_destroy_qp = &qed_rdma_destroy_qp, 2048 .rdma_alloc_tid = &qed_rdma_alloc_tid, 2049 .rdma_free_tid = &qed_rdma_free_tid, 2050 .rdma_register_tid = &qed_rdma_register_tid, 2051 .rdma_deregister_tid = &qed_rdma_deregister_tid, 2052 .rdma_create_srq = &qed_rdma_create_srq, 2053 .rdma_modify_srq = &qed_rdma_modify_srq, 2054 .rdma_destroy_srq = &qed_rdma_destroy_srq, 2055 .ll2_acquire_connection = &qed_ll2_acquire_connection, 2056 .ll2_establish_connection = &qed_ll2_establish_connection, 2057 .ll2_terminate_connection = &qed_ll2_terminate_connection, 2058 .ll2_release_connection = &qed_ll2_release_connection, 2059 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, 2060 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, 2061 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, 2062 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, 2063 .ll2_get_stats = &qed_ll2_get_stats, 2064 .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin, 2065 .iwarp_connect = &qed_iwarp_connect, 2066 .iwarp_create_listen = &qed_iwarp_create_listen, 2067 .iwarp_destroy_listen = &qed_iwarp_destroy_listen, 2068 .iwarp_accept = &qed_iwarp_accept, 2069 .iwarp_reject = &qed_iwarp_reject, 2070 .iwarp_send_rtr = &qed_iwarp_send_rtr, 2071 }; 2072 2073 const struct qed_rdma_ops *qed_get_rdma_ops(void) 2074 { 2075 return &qed_rdma_ops_pass; 2076 } 2077 EXPORT_SYMBOL(qed_get_rdma_ops); 2078