1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/types.h> 33 #include <asm/byteorder.h> 34 #include <linux/bitops.h> 35 #include <linux/delay.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/errno.h> 38 #include <linux/io.h> 39 #include <linux/kernel.h> 40 #include <linux/list.h> 41 #include <linux/module.h> 42 #include <linux/mutex.h> 43 #include <linux/pci.h> 44 #include <linux/slab.h> 45 #include <linux/spinlock.h> 46 #include <linux/string.h> 47 #include "qed.h" 48 #include "qed_cxt.h" 49 #include "qed_hsi.h" 50 #include "qed_hw.h" 51 #include "qed_init_ops.h" 52 #include "qed_int.h" 53 #include "qed_ll2.h" 54 #include "qed_mcp.h" 55 #include "qed_reg_addr.h" 56 #include <linux/qed/qed_rdma_if.h> 57 #include "qed_rdma.h" 58 #include "qed_roce.h" 59 #include "qed_sp.h" 60 61 62 int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, 63 struct qed_bmap *bmap, u32 max_count, char *name) 64 { 65 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); 66 67 bmap->max_count = max_count; 68 69 bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long), 70 GFP_KERNEL); 71 if (!bmap->bitmap) 72 return -ENOMEM; 73 74 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); 75 76 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 77 return 0; 78 } 79 80 int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, 81 struct qed_bmap *bmap, u32 *id_num) 82 { 83 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); 84 if (*id_num >= bmap->max_count) 85 return -EINVAL; 86 87 __set_bit(*id_num, bmap->bitmap); 88 89 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", 90 bmap->name, *id_num); 91 92 return 0; 93 } 94 95 void qed_bmap_set_id(struct qed_hwfn *p_hwfn, 96 struct qed_bmap *bmap, u32 id_num) 97 { 98 if (id_num >= bmap->max_count) 99 return; 100 101 __set_bit(id_num, bmap->bitmap); 102 } 103 104 void qed_bmap_release_id(struct qed_hwfn *p_hwfn, 105 struct qed_bmap *bmap, u32 id_num) 106 { 107 bool b_acquired; 108 109 if (id_num >= bmap->max_count) 110 return; 111 112 b_acquired = test_and_clear_bit(id_num, bmap->bitmap); 113 if (!b_acquired) { 114 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", 115 bmap->name, id_num); 116 return; 117 } 118 119 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", 120 bmap->name, id_num); 121 } 122 123 int qed_bmap_test_id(struct qed_hwfn *p_hwfn, 124 struct qed_bmap *bmap, u32 id_num) 125 { 126 if (id_num >= bmap->max_count) 127 return -1; 128 129 return test_bit(id_num, bmap->bitmap); 130 } 131 132 static bool qed_bmap_is_empty(struct qed_bmap *bmap) 133 { 134 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); 135 } 136 137 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 138 { 139 /* First sb id for RoCE is after all the l2 sb */ 140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 141 } 142 143 int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) 144 { 145 struct qed_rdma_info *p_rdma_info; 146 147 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); 148 if (!p_rdma_info) 149 return -ENOMEM; 150 151 spin_lock_init(&p_rdma_info->lock); 152 153 p_hwfn->p_rdma_info = p_rdma_info; 154 return 0; 155 } 156 157 void qed_rdma_info_free(struct qed_hwfn *p_hwfn) 158 { 159 kfree(p_hwfn->p_rdma_info); 160 p_hwfn->p_rdma_info = NULL; 161 } 162 163 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) 164 { 165 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 166 u32 num_cons, num_tasks; 167 int rc = -ENOMEM; 168 169 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); 170 171 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 172 p_rdma_info->proto = PROTOCOLID_IWARP; 173 else 174 p_rdma_info->proto = PROTOCOLID_ROCE; 175 176 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 177 NULL); 178 179 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 180 p_rdma_info->num_qps = num_cons; 181 else 182 p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ 183 184 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); 185 186 /* Each MR uses a single task */ 187 p_rdma_info->num_mrs = num_tasks; 188 189 /* Queue zone lines are shared between RoCE and L2 in such a way that 190 * they can be used by each without obstructing the other. 191 */ 192 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); 193 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); 194 195 /* Allocate a struct with device params and fill it */ 196 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 197 if (!p_rdma_info->dev) 198 return rc; 199 200 /* Allocate a struct with port params and fill it */ 201 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); 202 if (!p_rdma_info->port) 203 goto free_rdma_dev; 204 205 /* Allocate bit map for pd's */ 206 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, 207 "PD"); 208 if (rc) { 209 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 210 "Failed to allocate pd_map, rc = %d\n", 211 rc); 212 goto free_rdma_port; 213 } 214 215 /* Allocate bit map for XRC Domains */ 216 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map, 217 QED_RDMA_MAX_XRCDS, "XRCD"); 218 if (rc) { 219 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 220 "Failed to allocate xrcd_map,rc = %d\n", rc); 221 goto free_pd_map; 222 } 223 224 /* Allocate DPI bitmap */ 225 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, 226 p_hwfn->dpi_count, "DPI"); 227 if (rc) { 228 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 229 "Failed to allocate DPI bitmap, rc = %d\n", rc); 230 goto free_xrcd_map; 231 } 232 233 /* Allocate bitmap for cq's. The maximum number of CQs is bound to 234 * the number of connections we support. (num_qps in iWARP or 235 * num_qps/2 in RoCE). 236 */ 237 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); 238 if (rc) { 239 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 240 "Failed to allocate cq bitmap, rc = %d\n", rc); 241 goto free_dpi_map; 242 } 243 244 /* Allocate bitmap for toggle bit for cq icids 245 * We toggle the bit every time we create or resize cq for a given icid. 246 * Size needs to equal the size of the cq bmap. 247 */ 248 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, 249 num_cons, "Toggle"); 250 if (rc) { 251 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 252 "Failed to allocate toggle bits, rc = %d\n", rc); 253 goto free_cq_map; 254 } 255 256 /* Allocate bitmap for itids */ 257 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, 258 p_rdma_info->num_mrs, "MR"); 259 if (rc) { 260 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 261 "Failed to allocate itids bitmaps, rc = %d\n", rc); 262 goto free_toggle_map; 263 } 264 265 /* Allocate bitmap for cids used for qps. */ 266 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, 267 "CID"); 268 if (rc) { 269 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 270 "Failed to allocate cid bitmap, rc = %d\n", rc); 271 goto free_tid_map; 272 } 273 274 /* Allocate bitmap for cids used for responders/requesters. */ 275 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, 276 "REAL_CID"); 277 if (rc) { 278 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 279 "Failed to allocate real cid bitmap, rc = %d\n", rc); 280 goto free_cid_map; 281 } 282 283 /* The first SRQ follows the last XRC SRQ. This means that the 284 * SRQ IDs start from an offset equals to max_xrc_srqs. 285 */ 286 p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count; 287 rc = qed_rdma_bmap_alloc(p_hwfn, 288 &p_rdma_info->xrc_srq_map, 289 p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ"); 290 if (rc) { 291 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 292 "Failed to allocate xrc srq bitmap, rc = %d\n", rc); 293 goto free_real_cid_map; 294 } 295 296 /* Allocate bitmap for srqs */ 297 p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count; 298 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, 299 p_rdma_info->num_srqs, "SRQ"); 300 if (rc) { 301 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 302 "Failed to allocate srq bitmap, rc = %d\n", rc); 303 goto free_xrc_srq_map; 304 } 305 306 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 307 rc = qed_iwarp_alloc(p_hwfn); 308 309 if (rc) 310 goto free_srq_map; 311 312 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); 313 return 0; 314 315 free_srq_map: 316 kfree(p_rdma_info->srq_map.bitmap); 317 free_xrc_srq_map: 318 kfree(p_rdma_info->xrc_srq_map.bitmap); 319 free_real_cid_map: 320 kfree(p_rdma_info->real_cid_map.bitmap); 321 free_cid_map: 322 kfree(p_rdma_info->cid_map.bitmap); 323 free_tid_map: 324 kfree(p_rdma_info->tid_map.bitmap); 325 free_toggle_map: 326 kfree(p_rdma_info->toggle_bits.bitmap); 327 free_cq_map: 328 kfree(p_rdma_info->cq_map.bitmap); 329 free_dpi_map: 330 kfree(p_rdma_info->dpi_map.bitmap); 331 free_xrcd_map: 332 kfree(p_rdma_info->xrcd_map.bitmap); 333 free_pd_map: 334 kfree(p_rdma_info->pd_map.bitmap); 335 free_rdma_port: 336 kfree(p_rdma_info->port); 337 free_rdma_dev: 338 kfree(p_rdma_info->dev); 339 340 return rc; 341 } 342 343 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, 344 struct qed_bmap *bmap, bool check) 345 { 346 int weight = bitmap_weight(bmap->bitmap, bmap->max_count); 347 int last_line = bmap->max_count / (64 * 8); 348 int last_item = last_line * 8 + 349 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); 350 u64 *pmap = (u64 *)bmap->bitmap; 351 int line, item, offset; 352 u8 str_last_line[200] = { 0 }; 353 354 if (!weight || !check) 355 goto end; 356 357 DP_NOTICE(p_hwfn, 358 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", 359 bmap->name, bmap->max_count, weight); 360 361 /* print aligned non-zero lines, if any */ 362 for (item = 0, line = 0; line < last_line; line++, item += 8) 363 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) 364 DP_NOTICE(p_hwfn, 365 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", 366 line, 367 pmap[item], 368 pmap[item + 1], 369 pmap[item + 2], 370 pmap[item + 3], 371 pmap[item + 4], 372 pmap[item + 5], 373 pmap[item + 6], pmap[item + 7]); 374 375 /* print last unaligned non-zero line, if any */ 376 if ((bmap->max_count % (64 * 8)) && 377 (bitmap_weight((unsigned long *)&pmap[item], 378 bmap->max_count - item * 64))) { 379 offset = sprintf(str_last_line, "line 0x%04x: ", line); 380 for (; item < last_item; item++) 381 offset += sprintf(str_last_line + offset, 382 "0x%016llx ", pmap[item]); 383 DP_NOTICE(p_hwfn, "%s\n", str_last_line); 384 } 385 386 end: 387 kfree(bmap->bitmap); 388 bmap->bitmap = NULL; 389 } 390 391 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 392 { 393 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 394 395 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 396 qed_iwarp_resc_free(p_hwfn); 397 398 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); 399 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); 400 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); 401 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); 402 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); 403 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); 404 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); 405 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); 406 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1); 407 408 kfree(p_rdma_info->port); 409 kfree(p_rdma_info->dev); 410 } 411 412 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 413 { 414 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 415 416 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 417 418 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 419 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); 420 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 421 } 422 423 static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) 424 { 425 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey); 426 } 427 428 static void qed_rdma_free(struct qed_hwfn *p_hwfn) 429 { 430 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 431 432 qed_rdma_free_reserved_lkey(p_hwfn); 433 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); 434 qed_rdma_resc_free(p_hwfn); 435 } 436 437 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) 438 { 439 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; 440 guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; 441 guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; 442 guid[3] = 0xff; 443 guid[4] = 0xfe; 444 guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; 445 guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; 446 guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; 447 } 448 449 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, 450 struct qed_rdma_start_in_params *params) 451 { 452 struct qed_rdma_events *events; 453 454 events = &p_hwfn->p_rdma_info->events; 455 456 events->unaffiliated_event = params->events->unaffiliated_event; 457 events->affiliated_event = params->events->affiliated_event; 458 events->context = params->events->context; 459 } 460 461 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, 462 struct qed_rdma_start_in_params *params) 463 { 464 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 465 struct qed_dev *cdev = p_hwfn->cdev; 466 u32 pci_status_control; 467 u32 num_qps; 468 469 /* Vendor specific information */ 470 dev->vendor_id = cdev->vendor_id; 471 dev->vendor_part_id = cdev->device_id; 472 dev->hw_ver = cdev->chip_rev; 473 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | 474 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); 475 476 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); 477 dev->node_guid = dev->sys_image_guid; 478 479 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, 480 RDMA_MAX_SGE_PER_RQ_WQE); 481 482 if (cdev->rdma_max_sge) 483 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); 484 485 dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; 486 if (p_hwfn->cdev->rdma_max_srq_sge) { 487 dev->max_srq_sge = min_t(u32, 488 p_hwfn->cdev->rdma_max_srq_sge, 489 dev->max_srq_sge); 490 } 491 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; 492 493 dev->max_inline = (cdev->rdma_max_inline) ? 494 min_t(u32, cdev->rdma_max_inline, dev->max_inline) : 495 dev->max_inline; 496 497 dev->max_wqe = QED_RDMA_MAX_WQE; 498 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); 499 500 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because 501 * it is up-aligned to 16 and then to ILT page size within qed cxt. 502 * This is OK in terms of ILT but we don't want to configure the FW 503 * above its abilities 504 */ 505 num_qps = ROCE_MAX_QPS; 506 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); 507 dev->max_qp = num_qps; 508 509 /* CQs uses the same icids that QPs use hence they are limited by the 510 * number of icids. There are two icids per QP. 511 */ 512 dev->max_cq = num_qps * 2; 513 514 /* The number of mrs is smaller by 1 since the first is reserved */ 515 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; 516 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; 517 518 /* The maximum CQE capacity per CQ supported. 519 * max number of cqes will be in two layer pbl, 520 * 8 is the pointer size in bytes 521 * 32 is the size of cq element in bytes 522 */ 523 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) 524 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; 525 else 526 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; 527 528 dev->max_mw = 0; 529 dev->max_fmr = QED_RDMA_MAX_FMR; 530 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); 531 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; 532 dev->max_pkey = QED_RDMA_MAX_P_KEY; 533 534 dev->max_srq = p_hwfn->p_rdma_info->num_srqs; 535 dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; 536 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 537 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); 538 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 539 RDMA_REQ_RD_ATOMIC_ELM_SIZE; 540 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * 541 p_hwfn->p_rdma_info->num_qps; 542 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; 543 dev->dev_ack_delay = QED_RDMA_ACK_DELAY; 544 dev->max_pd = RDMA_MAX_PDS; 545 dev->max_ah = p_hwfn->p_rdma_info->num_qps; 546 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); 547 548 /* Set capablities */ 549 dev->dev_caps = 0; 550 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); 551 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); 552 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); 553 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); 554 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); 555 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); 556 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); 557 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); 558 559 /* Check atomic operations support in PCI configuration space. */ 560 pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2, 561 &pci_status_control); 562 563 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) 564 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); 565 566 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 567 qed_iwarp_init_devinfo(p_hwfn); 568 } 569 570 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) 571 { 572 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; 573 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 574 575 port->port_state = p_hwfn->mcp_info->link_output.link_up ? 576 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 577 578 port->max_msg_size = min_t(u64, 579 (dev->max_mr_mw_fmr_size * 580 p_hwfn->cdev->rdma_max_sge), 581 BIT(31)); 582 583 port->pkey_bad_counter = 0; 584 } 585 586 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 587 { 588 int rc = 0; 589 590 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); 591 p_hwfn->b_rdma_enabled_in_prs = false; 592 593 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 594 qed_iwarp_init_hw(p_hwfn, p_ptt); 595 else 596 rc = qed_roce_init_hw(p_hwfn, p_ptt); 597 598 return rc; 599 } 600 601 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, 602 struct qed_rdma_start_in_params *params, 603 struct qed_ptt *p_ptt) 604 { 605 struct rdma_init_func_ramrod_data *p_ramrod; 606 struct qed_rdma_cnq_params *p_cnq_pbl_list; 607 struct rdma_init_func_hdr *p_params_header; 608 struct rdma_cnq_params *p_cnq_params; 609 struct qed_sp_init_data init_data; 610 struct qed_spq_entry *p_ent; 611 u32 cnq_id, sb_id; 612 u16 igu_sb_id; 613 int rc; 614 615 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); 616 617 /* Save the number of cnqs for the function close ramrod */ 618 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; 619 620 /* Get SPQ entry */ 621 memset(&init_data, 0, sizeof(init_data)); 622 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 623 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 624 625 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, 626 p_hwfn->p_rdma_info->proto, &init_data); 627 if (rc) 628 return rc; 629 630 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 631 qed_iwarp_init_fw_ramrod(p_hwfn, 632 &p_ent->ramrod.iwarp_init_func); 633 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; 634 } else { 635 p_ramrod = &p_ent->ramrod.roce_init_func.rdma; 636 } 637 638 p_params_header = &p_ramrod->params_header; 639 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, 640 QED_RDMA_CNQ_RAM); 641 p_params_header->num_cnqs = params->desired_cnq; 642 p_params_header->first_reg_srq_id = 643 cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset); 644 p_params_header->reg_srq_base_addr = 645 cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM)); 646 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) 647 p_params_header->cq_ring_mode = 1; 648 else 649 p_params_header->cq_ring_mode = 0; 650 651 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { 652 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); 653 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 654 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); 655 p_cnq_params = &p_ramrod->cnq_params[cnq_id]; 656 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; 657 658 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; 659 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; 660 661 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, 662 p_cnq_pbl_list->pbl_ptr); 663 664 /* we assume here that cnq_id and qz_offset are the same */ 665 p_cnq_params->queue_zone_num = 666 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + 667 cnq_id); 668 } 669 670 return qed_spq_post(p_hwfn, p_ent, NULL); 671 } 672 673 static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) 674 { 675 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 676 int rc; 677 678 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); 679 680 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 681 rc = qed_rdma_bmap_alloc_id(p_hwfn, 682 &p_hwfn->p_rdma_info->tid_map, itid); 683 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 684 if (rc) 685 goto out; 686 687 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); 688 out: 689 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); 690 return rc; 691 } 692 693 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) 694 { 695 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 696 697 /* Tid 0 will be used as the key for "reserved MR". 698 * The driver should allocate memory for it so it can be loaded but no 699 * ramrod should be passed on it. 700 */ 701 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); 702 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { 703 DP_NOTICE(p_hwfn, 704 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 static int qed_rdma_setup(struct qed_hwfn *p_hwfn, 712 struct qed_ptt *p_ptt, 713 struct qed_rdma_start_in_params *params) 714 { 715 int rc; 716 717 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); 718 719 qed_rdma_init_devinfo(p_hwfn, params); 720 qed_rdma_init_port(p_hwfn); 721 qed_rdma_init_events(p_hwfn, params); 722 723 rc = qed_rdma_reserve_lkey(p_hwfn); 724 if (rc) 725 return rc; 726 727 rc = qed_rdma_init_hw(p_hwfn, p_ptt); 728 if (rc) 729 return rc; 730 731 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 732 rc = qed_iwarp_setup(p_hwfn, params); 733 if (rc) 734 return rc; 735 } else { 736 rc = qed_roce_setup(p_hwfn); 737 if (rc) 738 return rc; 739 } 740 741 return qed_rdma_start_fw(p_hwfn, params, p_ptt); 742 } 743 744 static int qed_rdma_stop(void *rdma_cxt) 745 { 746 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 747 struct rdma_close_func_ramrod_data *p_ramrod; 748 struct qed_sp_init_data init_data; 749 struct qed_spq_entry *p_ent; 750 struct qed_ptt *p_ptt; 751 u32 ll2_ethertype_en; 752 int rc = -EBUSY; 753 754 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); 755 756 p_ptt = qed_ptt_acquire(p_hwfn); 757 if (!p_ptt) { 758 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); 759 return rc; 760 } 761 762 /* Disable RoCE search */ 763 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); 764 p_hwfn->b_rdma_enabled_in_prs = false; 765 p_hwfn->p_rdma_info->active = 0; 766 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 767 768 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 769 770 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 771 (ll2_ethertype_en & 0xFFFE)); 772 773 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 774 rc = qed_iwarp_stop(p_hwfn); 775 if (rc) { 776 qed_ptt_release(p_hwfn, p_ptt); 777 return rc; 778 } 779 } else { 780 qed_roce_stop(p_hwfn); 781 } 782 783 qed_ptt_release(p_hwfn, p_ptt); 784 785 /* Get SPQ entry */ 786 memset(&init_data, 0, sizeof(init_data)); 787 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 788 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 789 790 /* Stop RoCE */ 791 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, 792 p_hwfn->p_rdma_info->proto, &init_data); 793 if (rc) 794 goto out; 795 796 p_ramrod = &p_ent->ramrod.rdma_close_func; 797 798 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; 799 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); 800 801 rc = qed_spq_post(p_hwfn, p_ent, NULL); 802 803 out: 804 qed_rdma_free(p_hwfn); 805 806 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); 807 return rc; 808 } 809 810 static int qed_rdma_add_user(void *rdma_cxt, 811 struct qed_rdma_add_user_out_params *out_params) 812 { 813 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 814 u32 dpi_start_offset; 815 u32 returned_id = 0; 816 int rc; 817 818 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); 819 820 /* Allocate DPI */ 821 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 822 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 823 &returned_id); 824 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 825 826 out_params->dpi = (u16)returned_id; 827 828 /* Calculate the corresponding DPI address */ 829 dpi_start_offset = p_hwfn->dpi_start_offset; 830 831 out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + 832 out_params->dpi * p_hwfn->dpi_size; 833 834 out_params->dpi_phys_addr = p_hwfn->db_phys_addr + 835 dpi_start_offset + 836 ((out_params->dpi) * p_hwfn->dpi_size); 837 838 out_params->dpi_size = p_hwfn->dpi_size; 839 out_params->wid_count = p_hwfn->wid_count; 840 841 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); 842 return rc; 843 } 844 845 static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) 846 { 847 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 848 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; 849 struct qed_mcp_link_state *p_link_output; 850 851 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); 852 853 /* The link state is saved only for the leading hwfn */ 854 p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; 855 856 p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP 857 : QED_RDMA_PORT_DOWN; 858 859 p_port->link_speed = p_link_output->speed; 860 861 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; 862 863 return p_port; 864 } 865 866 static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 867 { 868 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 869 870 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); 871 872 /* Return struct with device parameters */ 873 return p_hwfn->p_rdma_info->dev; 874 } 875 876 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 877 { 878 struct qed_hwfn *p_hwfn; 879 u16 qz_num; 880 u32 addr; 881 882 p_hwfn = (struct qed_hwfn *)rdma_cxt; 883 884 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { 885 DP_NOTICE(p_hwfn, 886 "queue zone offset %d is too large (max is %d)\n", 887 qz_offset, p_hwfn->p_rdma_info->max_queue_zones); 888 return; 889 } 890 891 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; 892 addr = GTT_BAR0_MAP_REG_USDM_RAM + 893 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); 894 895 REG_WR16(p_hwfn, addr, prod); 896 897 /* keep prod updates ordered */ 898 wmb(); 899 } 900 901 static int qed_fill_rdma_dev_info(struct qed_dev *cdev, 902 struct qed_dev_rdma_info *info) 903 { 904 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); 905 906 memset(info, 0, sizeof(*info)); 907 908 info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? 909 QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; 910 911 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); 912 913 qed_fill_dev_info(cdev, &info->common); 914 915 return 0; 916 } 917 918 static int qed_rdma_get_sb_start(struct qed_dev *cdev) 919 { 920 int feat_num; 921 922 if (cdev->num_hwfns > 1) 923 feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE); 924 else 925 feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) * 926 cdev->num_hwfns; 927 928 return feat_num; 929 } 930 931 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) 932 { 933 int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ); 934 int n_msix = cdev->int_params.rdma_msix_cnt; 935 936 return min_t(int, n_cnq, n_msix); 937 } 938 939 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) 940 { 941 int limit = 0; 942 943 /* Mark the fastpath as free/used */ 944 cdev->int_params.fp_initialized = cnt ? true : false; 945 946 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { 947 DP_ERR(cdev, 948 "qed roce supports only MSI-X interrupts (detected %d).\n", 949 cdev->int_params.out.int_mode); 950 return -EINVAL; 951 } else if (cdev->int_params.fp_msix_cnt) { 952 limit = cdev->int_params.rdma_msix_cnt; 953 } 954 955 if (!limit) 956 return -ENOMEM; 957 958 return min_t(int, cnt, limit); 959 } 960 961 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) 962 { 963 memset(info, 0, sizeof(*info)); 964 965 if (!cdev->int_params.fp_initialized) { 966 DP_INFO(cdev, 967 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 968 return -EINVAL; 969 } 970 971 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 972 int msix_base = cdev->int_params.rdma_msix_base; 973 974 info->msix_cnt = cdev->int_params.rdma_msix_cnt; 975 info->msix = &cdev->int_params.msix_table[msix_base]; 976 977 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", 978 info->msix_cnt, msix_base); 979 } 980 981 return 0; 982 } 983 984 static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) 985 { 986 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 987 u32 returned_id; 988 int rc; 989 990 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); 991 992 /* Allocates an unused protection domain */ 993 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 994 rc = qed_rdma_bmap_alloc_id(p_hwfn, 995 &p_hwfn->p_rdma_info->pd_map, &returned_id); 996 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 997 998 *pd = (u16)returned_id; 999 1000 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); 1001 return rc; 1002 } 1003 1004 static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) 1005 { 1006 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1007 1008 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); 1009 1010 /* Returns a previously allocated protection domain for reuse */ 1011 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1012 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); 1013 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1014 } 1015 1016 static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id) 1017 { 1018 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1019 u32 returned_id; 1020 int rc; 1021 1022 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n"); 1023 1024 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1025 rc = qed_rdma_bmap_alloc_id(p_hwfn, 1026 &p_hwfn->p_rdma_info->xrcd_map, 1027 &returned_id); 1028 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1029 if (rc) { 1030 DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n"); 1031 return rc; 1032 } 1033 1034 *xrcd_id = (u16)returned_id; 1035 1036 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc); 1037 return rc; 1038 } 1039 1040 static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id) 1041 { 1042 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1043 1044 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id); 1045 1046 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1047 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id); 1048 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1049 } 1050 1051 static enum qed_rdma_toggle_bit 1052 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) 1053 { 1054 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 1055 enum qed_rdma_toggle_bit toggle_bit; 1056 u32 bmap_id; 1057 1058 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); 1059 1060 /* the function toggle the bit that is related to a given icid 1061 * and returns the new toggle bit's value 1062 */ 1063 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); 1064 1065 spin_lock_bh(&p_info->lock); 1066 toggle_bit = !test_and_change_bit(bmap_id, 1067 p_info->toggle_bits.bitmap); 1068 spin_unlock_bh(&p_info->lock); 1069 1070 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", 1071 toggle_bit); 1072 1073 return toggle_bit; 1074 } 1075 1076 static int qed_rdma_create_cq(void *rdma_cxt, 1077 struct qed_rdma_create_cq_in_params *params, 1078 u16 *icid) 1079 { 1080 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1081 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 1082 struct rdma_create_cq_ramrod_data *p_ramrod; 1083 enum qed_rdma_toggle_bit toggle_bit; 1084 struct qed_sp_init_data init_data; 1085 struct qed_spq_entry *p_ent; 1086 u32 returned_id, start_cid; 1087 int rc; 1088 1089 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", 1090 params->cq_handle_hi, params->cq_handle_lo); 1091 1092 /* Allocate icid */ 1093 spin_lock_bh(&p_info->lock); 1094 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); 1095 spin_unlock_bh(&p_info->lock); 1096 1097 if (rc) { 1098 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); 1099 return rc; 1100 } 1101 1102 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, 1103 p_info->proto); 1104 *icid = returned_id + start_cid; 1105 1106 /* Check if icid requires a page allocation */ 1107 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); 1108 if (rc) 1109 goto err; 1110 1111 /* Get SPQ entry */ 1112 memset(&init_data, 0, sizeof(init_data)); 1113 init_data.cid = *icid; 1114 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1115 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1116 1117 /* Send create CQ ramrod */ 1118 rc = qed_sp_init_request(p_hwfn, &p_ent, 1119 RDMA_RAMROD_CREATE_CQ, 1120 p_info->proto, &init_data); 1121 if (rc) 1122 goto err; 1123 1124 p_ramrod = &p_ent->ramrod.rdma_create_cq; 1125 1126 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); 1127 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); 1128 p_ramrod->dpi = cpu_to_le16(params->dpi); 1129 p_ramrod->is_two_level_pbl = params->pbl_two_level; 1130 p_ramrod->max_cqes = cpu_to_le32(params->cq_size); 1131 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); 1132 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); 1133 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + 1134 params->cnq_id; 1135 p_ramrod->int_timeout = params->int_timeout; 1136 1137 /* toggle the bit for every resize or create cq for a given icid */ 1138 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1139 1140 p_ramrod->toggle_bit = toggle_bit; 1141 1142 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1143 if (rc) { 1144 /* restore toggle bit */ 1145 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1146 goto err; 1147 } 1148 1149 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); 1150 return rc; 1151 1152 err: 1153 /* release allocated icid */ 1154 spin_lock_bh(&p_info->lock); 1155 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); 1156 spin_unlock_bh(&p_info->lock); 1157 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); 1158 1159 return rc; 1160 } 1161 1162 static int 1163 qed_rdma_destroy_cq(void *rdma_cxt, 1164 struct qed_rdma_destroy_cq_in_params *in_params, 1165 struct qed_rdma_destroy_cq_out_params *out_params) 1166 { 1167 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1168 struct rdma_destroy_cq_output_params *p_ramrod_res; 1169 struct rdma_destroy_cq_ramrod_data *p_ramrod; 1170 struct qed_sp_init_data init_data; 1171 struct qed_spq_entry *p_ent; 1172 dma_addr_t ramrod_res_phys; 1173 enum protocol_type proto; 1174 int rc = -ENOMEM; 1175 1176 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); 1177 1178 p_ramrod_res = 1179 (struct rdma_destroy_cq_output_params *) 1180 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1181 sizeof(struct rdma_destroy_cq_output_params), 1182 &ramrod_res_phys, GFP_KERNEL); 1183 if (!p_ramrod_res) { 1184 DP_NOTICE(p_hwfn, 1185 "qed destroy cq failed: cannot allocate memory (ramrod)\n"); 1186 return rc; 1187 } 1188 1189 /* Get SPQ entry */ 1190 memset(&init_data, 0, sizeof(init_data)); 1191 init_data.cid = in_params->icid; 1192 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1193 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1194 proto = p_hwfn->p_rdma_info->proto; 1195 /* Send destroy CQ ramrod */ 1196 rc = qed_sp_init_request(p_hwfn, &p_ent, 1197 RDMA_RAMROD_DESTROY_CQ, 1198 proto, &init_data); 1199 if (rc) 1200 goto err; 1201 1202 p_ramrod = &p_ent->ramrod.rdma_destroy_cq; 1203 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1204 1205 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1206 if (rc) 1207 goto err; 1208 1209 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); 1210 1211 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1212 sizeof(struct rdma_destroy_cq_output_params), 1213 p_ramrod_res, ramrod_res_phys); 1214 1215 /* Free icid */ 1216 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1217 1218 qed_bmap_release_id(p_hwfn, 1219 &p_hwfn->p_rdma_info->cq_map, 1220 (in_params->icid - 1221 qed_cxt_get_proto_cid_start(p_hwfn, proto))); 1222 1223 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1224 1225 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); 1226 return rc; 1227 1228 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1229 sizeof(struct rdma_destroy_cq_output_params), 1230 p_ramrod_res, ramrod_res_phys); 1231 1232 return rc; 1233 } 1234 1235 void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) 1236 { 1237 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); 1238 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); 1239 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); 1240 } 1241 1242 static int qed_rdma_query_qp(void *rdma_cxt, 1243 struct qed_rdma_qp *qp, 1244 struct qed_rdma_query_qp_out_params *out_params) 1245 { 1246 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1247 int rc = 0; 1248 1249 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1250 1251 /* The following fields are filled in from qp and not FW as they can't 1252 * be modified by FW 1253 */ 1254 out_params->mtu = qp->mtu; 1255 out_params->dest_qp = qp->dest_qp; 1256 out_params->incoming_atomic_en = qp->incoming_atomic_en; 1257 out_params->e2e_flow_control_en = qp->e2e_flow_control_en; 1258 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; 1259 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; 1260 out_params->dgid = qp->dgid; 1261 out_params->flow_label = qp->flow_label; 1262 out_params->hop_limit_ttl = qp->hop_limit_ttl; 1263 out_params->traffic_class_tos = qp->traffic_class_tos; 1264 out_params->timeout = qp->ack_timeout; 1265 out_params->rnr_retry = qp->rnr_retry_cnt; 1266 out_params->retry_cnt = qp->retry_cnt; 1267 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; 1268 out_params->pkey_index = 0; 1269 out_params->max_rd_atomic = qp->max_rd_atomic_req; 1270 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; 1271 out_params->sqd_async = qp->sqd_async; 1272 1273 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 1274 qed_iwarp_query_qp(qp, out_params); 1275 else 1276 rc = qed_roce_query_qp(p_hwfn, qp, out_params); 1277 1278 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); 1279 return rc; 1280 } 1281 1282 static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) 1283 { 1284 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1285 int rc = 0; 1286 1287 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1288 1289 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 1290 rc = qed_iwarp_destroy_qp(p_hwfn, qp); 1291 else 1292 rc = qed_roce_destroy_qp(p_hwfn, qp); 1293 1294 /* free qp params struct */ 1295 kfree(qp); 1296 1297 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); 1298 return rc; 1299 } 1300 1301 static struct qed_rdma_qp * 1302 qed_rdma_create_qp(void *rdma_cxt, 1303 struct qed_rdma_create_qp_in_params *in_params, 1304 struct qed_rdma_create_qp_out_params *out_params) 1305 { 1306 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1307 struct qed_rdma_qp *qp; 1308 u8 max_stats_queues; 1309 int rc; 1310 1311 if (!rdma_cxt || !in_params || !out_params || 1312 !p_hwfn->p_rdma_info->active) { 1313 DP_ERR(p_hwfn->cdev, 1314 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 1315 rdma_cxt, in_params, out_params); 1316 return NULL; 1317 } 1318 1319 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1320 "qed rdma create qp called with qp_handle = %08x%08x\n", 1321 in_params->qp_handle_hi, in_params->qp_handle_lo); 1322 1323 /* Some sanity checks... */ 1324 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; 1325 if (in_params->stats_queue >= max_stats_queues) { 1326 DP_ERR(p_hwfn->cdev, 1327 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", 1328 in_params->stats_queue, max_stats_queues); 1329 return NULL; 1330 } 1331 1332 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1333 if (in_params->sq_num_pages * sizeof(struct regpair) > 1334 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { 1335 DP_NOTICE(p_hwfn->cdev, 1336 "Sq num pages: %d exceeds maximum\n", 1337 in_params->sq_num_pages); 1338 return NULL; 1339 } 1340 if (in_params->rq_num_pages * sizeof(struct regpair) > 1341 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { 1342 DP_NOTICE(p_hwfn->cdev, 1343 "Rq num pages: %d exceeds maximum\n", 1344 in_params->rq_num_pages); 1345 return NULL; 1346 } 1347 } 1348 1349 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1350 if (!qp) 1351 return NULL; 1352 1353 qp->cur_state = QED_ROCE_QP_STATE_RESET; 1354 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); 1355 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); 1356 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); 1357 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); 1358 qp->use_srq = in_params->use_srq; 1359 qp->signal_all = in_params->signal_all; 1360 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; 1361 qp->pd = in_params->pd; 1362 qp->dpi = in_params->dpi; 1363 qp->sq_cq_id = in_params->sq_cq_id; 1364 qp->sq_num_pages = in_params->sq_num_pages; 1365 qp->sq_pbl_ptr = in_params->sq_pbl_ptr; 1366 qp->rq_cq_id = in_params->rq_cq_id; 1367 qp->rq_num_pages = in_params->rq_num_pages; 1368 qp->rq_pbl_ptr = in_params->rq_pbl_ptr; 1369 qp->srq_id = in_params->srq_id; 1370 qp->req_offloaded = false; 1371 qp->resp_offloaded = false; 1372 qp->e2e_flow_control_en = qp->use_srq ? false : true; 1373 qp->stats_queue = in_params->stats_queue; 1374 qp->qp_type = in_params->qp_type; 1375 qp->xrcd_id = in_params->xrcd_id; 1376 1377 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1378 rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); 1379 qp->qpid = qp->icid; 1380 } else { 1381 qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE); 1382 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); 1383 qp->qpid = ((0xFF << 16) | qp->icid); 1384 } 1385 1386 if (rc) { 1387 kfree(qp); 1388 return NULL; 1389 } 1390 1391 out_params->icid = qp->icid; 1392 out_params->qp_id = qp->qpid; 1393 1394 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); 1395 return qp; 1396 } 1397 1398 static int qed_rdma_modify_qp(void *rdma_cxt, 1399 struct qed_rdma_qp *qp, 1400 struct qed_rdma_modify_qp_in_params *params) 1401 { 1402 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1403 enum qed_roce_qp_state prev_state; 1404 int rc = 0; 1405 1406 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", 1407 qp->icid, params->new_state); 1408 1409 if (rc) { 1410 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1411 return rc; 1412 } 1413 1414 if (GET_FIELD(params->modify_flags, 1415 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { 1416 qp->incoming_rdma_read_en = params->incoming_rdma_read_en; 1417 qp->incoming_rdma_write_en = params->incoming_rdma_write_en; 1418 qp->incoming_atomic_en = params->incoming_atomic_en; 1419 } 1420 1421 /* Update QP structure with the updated values */ 1422 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) 1423 qp->roce_mode = params->roce_mode; 1424 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) 1425 qp->pkey = params->pkey; 1426 if (GET_FIELD(params->modify_flags, 1427 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) 1428 qp->e2e_flow_control_en = params->e2e_flow_control_en; 1429 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) 1430 qp->dest_qp = params->dest_qp; 1431 if (GET_FIELD(params->modify_flags, 1432 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { 1433 /* Indicates that the following parameters have changed: 1434 * Traffic class, flow label, hop limit, source GID, 1435 * destination GID, loopback indicator 1436 */ 1437 qp->traffic_class_tos = params->traffic_class_tos; 1438 qp->flow_label = params->flow_label; 1439 qp->hop_limit_ttl = params->hop_limit_ttl; 1440 1441 qp->sgid = params->sgid; 1442 qp->dgid = params->dgid; 1443 qp->udp_src_port = 0; 1444 qp->vlan_id = params->vlan_id; 1445 qp->mtu = params->mtu; 1446 qp->lb_indication = params->lb_indication; 1447 memcpy((u8 *)&qp->remote_mac_addr[0], 1448 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); 1449 if (params->use_local_mac) { 1450 memcpy((u8 *)&qp->local_mac_addr[0], 1451 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); 1452 } else { 1453 memcpy((u8 *)&qp->local_mac_addr[0], 1454 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 1455 } 1456 } 1457 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) 1458 qp->rq_psn = params->rq_psn; 1459 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) 1460 qp->sq_psn = params->sq_psn; 1461 if (GET_FIELD(params->modify_flags, 1462 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) 1463 qp->max_rd_atomic_req = params->max_rd_atomic_req; 1464 if (GET_FIELD(params->modify_flags, 1465 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) 1466 qp->max_rd_atomic_resp = params->max_rd_atomic_resp; 1467 if (GET_FIELD(params->modify_flags, 1468 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) 1469 qp->ack_timeout = params->ack_timeout; 1470 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) 1471 qp->retry_cnt = params->retry_cnt; 1472 if (GET_FIELD(params->modify_flags, 1473 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) 1474 qp->rnr_retry_cnt = params->rnr_retry_cnt; 1475 if (GET_FIELD(params->modify_flags, 1476 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) 1477 qp->min_rnr_nak_timer = params->min_rnr_nak_timer; 1478 1479 qp->sqd_async = params->sqd_async; 1480 1481 prev_state = qp->cur_state; 1482 if (GET_FIELD(params->modify_flags, 1483 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { 1484 qp->cur_state = params->new_state; 1485 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", 1486 qp->cur_state); 1487 } 1488 1489 switch (qp->qp_type) { 1490 case QED_RDMA_QP_TYPE_XRC_INI: 1491 qp->has_req = 1; 1492 break; 1493 case QED_RDMA_QP_TYPE_XRC_TGT: 1494 qp->has_resp = 1; 1495 break; 1496 default: 1497 qp->has_req = 1; 1498 qp->has_resp = 1; 1499 } 1500 1501 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1502 enum qed_iwarp_qp_state new_state = 1503 qed_roce2iwarp_state(qp->cur_state); 1504 1505 rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); 1506 } else { 1507 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); 1508 } 1509 1510 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); 1511 return rc; 1512 } 1513 1514 static int 1515 qed_rdma_register_tid(void *rdma_cxt, 1516 struct qed_rdma_register_tid_in_params *params) 1517 { 1518 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1519 struct rdma_register_tid_ramrod_data *p_ramrod; 1520 struct qed_sp_init_data init_data; 1521 struct qed_spq_entry *p_ent; 1522 enum rdma_tid_type tid_type; 1523 u8 fw_return_code; 1524 int rc; 1525 1526 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); 1527 1528 /* Get SPQ entry */ 1529 memset(&init_data, 0, sizeof(init_data)); 1530 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1531 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1532 1533 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, 1534 p_hwfn->p_rdma_info->proto, &init_data); 1535 if (rc) { 1536 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1537 return rc; 1538 } 1539 1540 if (p_hwfn->p_rdma_info->last_tid < params->itid) 1541 p_hwfn->p_rdma_info->last_tid = params->itid; 1542 1543 p_ramrod = &p_ent->ramrod.rdma_register_tid; 1544 1545 p_ramrod->flags = 0; 1546 SET_FIELD(p_ramrod->flags, 1547 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, 1548 params->pbl_two_level); 1549 1550 SET_FIELD(p_ramrod->flags, 1551 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); 1552 1553 SET_FIELD(p_ramrod->flags, 1554 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); 1555 1556 /* Don't initialize D/C field, as it may override other bits. */ 1557 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) 1558 SET_FIELD(p_ramrod->flags, 1559 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, 1560 params->page_size_log - 12); 1561 1562 SET_FIELD(p_ramrod->flags, 1563 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, 1564 params->remote_read); 1565 1566 SET_FIELD(p_ramrod->flags, 1567 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, 1568 params->remote_write); 1569 1570 SET_FIELD(p_ramrod->flags, 1571 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, 1572 params->remote_atomic); 1573 1574 SET_FIELD(p_ramrod->flags, 1575 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, 1576 params->local_write); 1577 1578 SET_FIELD(p_ramrod->flags, 1579 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); 1580 1581 SET_FIELD(p_ramrod->flags, 1582 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, 1583 params->mw_bind); 1584 1585 SET_FIELD(p_ramrod->flags1, 1586 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, 1587 params->pbl_page_size_log - 12); 1588 1589 SET_FIELD(p_ramrod->flags2, 1590 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); 1591 1592 switch (params->tid_type) { 1593 case QED_RDMA_TID_REGISTERED_MR: 1594 tid_type = RDMA_TID_REGISTERED_MR; 1595 break; 1596 case QED_RDMA_TID_FMR: 1597 tid_type = RDMA_TID_FMR; 1598 break; 1599 case QED_RDMA_TID_MW: 1600 tid_type = RDMA_TID_MW; 1601 break; 1602 default: 1603 rc = -EINVAL; 1604 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1605 qed_sp_destroy_request(p_hwfn, p_ent); 1606 return rc; 1607 } 1608 SET_FIELD(p_ramrod->flags1, 1609 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); 1610 1611 p_ramrod->itid = cpu_to_le32(params->itid); 1612 p_ramrod->key = params->key; 1613 p_ramrod->pd = cpu_to_le16(params->pd); 1614 p_ramrod->length_hi = (u8)(params->length >> 32); 1615 p_ramrod->length_lo = DMA_LO_LE(params->length); 1616 if (params->zbva) { 1617 /* Lower 32 bits of the registered MR address. 1618 * In case of zero based MR, will hold FBO 1619 */ 1620 p_ramrod->va.hi = 0; 1621 p_ramrod->va.lo = cpu_to_le32(params->fbo); 1622 } else { 1623 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); 1624 } 1625 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); 1626 1627 /* DIF */ 1628 if (params->dif_enabled) { 1629 SET_FIELD(p_ramrod->flags2, 1630 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); 1631 DMA_REGPAIR_LE(p_ramrod->dif_error_addr, 1632 params->dif_error_addr); 1633 } 1634 1635 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1636 if (rc) 1637 return rc; 1638 1639 if (fw_return_code != RDMA_RETURN_OK) { 1640 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 1641 return -EINVAL; 1642 } 1643 1644 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); 1645 return rc; 1646 } 1647 1648 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) 1649 { 1650 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1651 struct rdma_deregister_tid_ramrod_data *p_ramrod; 1652 struct qed_sp_init_data init_data; 1653 struct qed_spq_entry *p_ent; 1654 struct qed_ptt *p_ptt; 1655 u8 fw_return_code; 1656 int rc; 1657 1658 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 1659 1660 /* Get SPQ entry */ 1661 memset(&init_data, 0, sizeof(init_data)); 1662 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1663 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1664 1665 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, 1666 p_hwfn->p_rdma_info->proto, &init_data); 1667 if (rc) { 1668 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1669 return rc; 1670 } 1671 1672 p_ramrod = &p_ent->ramrod.rdma_deregister_tid; 1673 p_ramrod->itid = cpu_to_le32(itid); 1674 1675 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1676 if (rc) { 1677 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1678 return rc; 1679 } 1680 1681 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { 1682 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 1683 return -EINVAL; 1684 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { 1685 /* Bit indicating that the TID is in use and a nig drain is 1686 * required before sending the ramrod again 1687 */ 1688 p_ptt = qed_ptt_acquire(p_hwfn); 1689 if (!p_ptt) { 1690 rc = -EBUSY; 1691 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1692 "Failed to acquire PTT\n"); 1693 return rc; 1694 } 1695 1696 rc = qed_mcp_drain(p_hwfn, p_ptt); 1697 if (rc) { 1698 qed_ptt_release(p_hwfn, p_ptt); 1699 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1700 "Drain failed\n"); 1701 return rc; 1702 } 1703 1704 qed_ptt_release(p_hwfn, p_ptt); 1705 1706 /* Resend the ramrod */ 1707 rc = qed_sp_init_request(p_hwfn, &p_ent, 1708 RDMA_RAMROD_DEREGISTER_MR, 1709 p_hwfn->p_rdma_info->proto, 1710 &init_data); 1711 if (rc) { 1712 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1713 "Failed to init sp-element\n"); 1714 return rc; 1715 } 1716 1717 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1718 if (rc) { 1719 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1720 "Ramrod failed\n"); 1721 return rc; 1722 } 1723 1724 if (fw_return_code != RDMA_RETURN_OK) { 1725 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", 1726 fw_return_code); 1727 return rc; 1728 } 1729 } 1730 1731 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); 1732 return rc; 1733 } 1734 1735 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 1736 { 1737 return QED_AFFIN_HWFN(cdev); 1738 } 1739 1740 static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn, 1741 bool is_xrc) 1742 { 1743 if (is_xrc) 1744 return &p_hwfn->p_rdma_info->xrc_srq_map; 1745 1746 return &p_hwfn->p_rdma_info->srq_map; 1747 } 1748 1749 static int qed_rdma_modify_srq(void *rdma_cxt, 1750 struct qed_rdma_modify_srq_in_params *in_params) 1751 { 1752 struct rdma_srq_modify_ramrod_data *p_ramrod; 1753 struct qed_sp_init_data init_data = {}; 1754 struct qed_hwfn *p_hwfn = rdma_cxt; 1755 struct qed_spq_entry *p_ent; 1756 u16 opaque_fid; 1757 int rc; 1758 1759 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1760 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1761 1762 rc = qed_sp_init_request(p_hwfn, &p_ent, 1763 RDMA_RAMROD_MODIFY_SRQ, 1764 p_hwfn->p_rdma_info->proto, &init_data); 1765 if (rc) 1766 return rc; 1767 1768 p_ramrod = &p_ent->ramrod.rdma_modify_srq; 1769 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); 1770 opaque_fid = p_hwfn->hw_info.opaque_fid; 1771 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1772 p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); 1773 1774 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1775 if (rc) 1776 return rc; 1777 1778 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n", 1779 in_params->srq_id, in_params->is_xrc); 1780 1781 return rc; 1782 } 1783 1784 static int 1785 qed_rdma_destroy_srq(void *rdma_cxt, 1786 struct qed_rdma_destroy_srq_in_params *in_params) 1787 { 1788 struct rdma_srq_destroy_ramrod_data *p_ramrod; 1789 struct qed_sp_init_data init_data = {}; 1790 struct qed_hwfn *p_hwfn = rdma_cxt; 1791 struct qed_spq_entry *p_ent; 1792 struct qed_bmap *bmap; 1793 u16 opaque_fid; 1794 u16 offset; 1795 int rc; 1796 1797 opaque_fid = p_hwfn->hw_info.opaque_fid; 1798 1799 init_data.opaque_fid = opaque_fid; 1800 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1801 1802 rc = qed_sp_init_request(p_hwfn, &p_ent, 1803 RDMA_RAMROD_DESTROY_SRQ, 1804 p_hwfn->p_rdma_info->proto, &init_data); 1805 if (rc) 1806 return rc; 1807 1808 p_ramrod = &p_ent->ramrod.rdma_destroy_srq; 1809 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); 1810 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1811 1812 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1813 if (rc) 1814 return rc; 1815 1816 bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); 1817 offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; 1818 1819 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1820 qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset); 1821 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1822 1823 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1824 "XRC/SRQ destroyed Id = %x, is_xrc=%u\n", 1825 in_params->srq_id, in_params->is_xrc); 1826 1827 return rc; 1828 } 1829 1830 static int 1831 qed_rdma_create_srq(void *rdma_cxt, 1832 struct qed_rdma_create_srq_in_params *in_params, 1833 struct qed_rdma_create_srq_out_params *out_params) 1834 { 1835 struct rdma_srq_create_ramrod_data *p_ramrod; 1836 struct qed_sp_init_data init_data = {}; 1837 struct qed_hwfn *p_hwfn = rdma_cxt; 1838 enum qed_cxt_elem_type elem_type; 1839 struct qed_spq_entry *p_ent; 1840 u16 opaque_fid, srq_id; 1841 struct qed_bmap *bmap; 1842 u32 returned_id; 1843 u16 offset; 1844 int rc; 1845 1846 bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); 1847 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1848 rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); 1849 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1850 1851 if (rc) { 1852 DP_NOTICE(p_hwfn, 1853 "failed to allocate xrc/srq id (is_xrc=%u)\n", 1854 in_params->is_xrc); 1855 return rc; 1856 } 1857 1858 elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ); 1859 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); 1860 if (rc) 1861 goto err; 1862 1863 opaque_fid = p_hwfn->hw_info.opaque_fid; 1864 1865 opaque_fid = p_hwfn->hw_info.opaque_fid; 1866 init_data.opaque_fid = opaque_fid; 1867 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1868 1869 rc = qed_sp_init_request(p_hwfn, &p_ent, 1870 RDMA_RAMROD_CREATE_SRQ, 1871 p_hwfn->p_rdma_info->proto, &init_data); 1872 if (rc) 1873 goto err; 1874 1875 p_ramrod = &p_ent->ramrod.rdma_create_srq; 1876 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); 1877 p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); 1878 p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); 1879 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1880 p_ramrod->page_size = cpu_to_le16(in_params->page_size); 1881 DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); 1882 offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; 1883 srq_id = (u16)returned_id + offset; 1884 p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); 1885 1886 if (in_params->is_xrc) { 1887 SET_FIELD(p_ramrod->flags, 1888 RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1); 1889 SET_FIELD(p_ramrod->flags, 1890 RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN, 1891 in_params->reserved_key_en); 1892 p_ramrod->xrc_srq_cq_cid = 1893 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 1894 in_params->cq_cid); 1895 p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id); 1896 } 1897 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1898 if (rc) 1899 goto err; 1900 1901 out_params->srq_id = srq_id; 1902 1903 DP_VERBOSE(p_hwfn, 1904 QED_MSG_RDMA, 1905 "XRC/SRQ created Id = %x (is_xrc=%u)\n", 1906 out_params->srq_id, in_params->is_xrc); 1907 return rc; 1908 1909 err: 1910 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1911 qed_bmap_release_id(p_hwfn, bmap, returned_id); 1912 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1913 1914 return rc; 1915 } 1916 1917 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) 1918 { 1919 bool result; 1920 1921 /* if rdma wasn't activated yet, naturally there are no qps */ 1922 if (!p_hwfn->p_rdma_info->active) 1923 return false; 1924 1925 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1926 if (!p_hwfn->p_rdma_info->cid_map.bitmap) 1927 result = false; 1928 else 1929 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); 1930 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1931 return result; 1932 } 1933 1934 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1935 { 1936 u32 val; 1937 1938 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; 1939 1940 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); 1941 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), 1942 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", 1943 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); 1944 } 1945 1946 1947 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1948 { 1949 p_hwfn->db_bar_no_edpm = true; 1950 1951 qed_rdma_dpm_conf(p_hwfn, p_ptt); 1952 } 1953 1954 static int qed_rdma_start(void *rdma_cxt, 1955 struct qed_rdma_start_in_params *params) 1956 { 1957 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1958 struct qed_ptt *p_ptt; 1959 int rc = -EBUSY; 1960 1961 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1962 "desired_cnq = %08x\n", params->desired_cnq); 1963 1964 p_ptt = qed_ptt_acquire(p_hwfn); 1965 if (!p_ptt) 1966 goto err; 1967 1968 rc = qed_rdma_alloc(p_hwfn); 1969 if (rc) 1970 goto err1; 1971 1972 rc = qed_rdma_setup(p_hwfn, p_ptt, params); 1973 if (rc) 1974 goto err2; 1975 1976 qed_ptt_release(p_hwfn, p_ptt); 1977 p_hwfn->p_rdma_info->active = 1; 1978 1979 return rc; 1980 1981 err2: 1982 qed_rdma_free(p_hwfn); 1983 err1: 1984 qed_ptt_release(p_hwfn, p_ptt); 1985 err: 1986 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); 1987 return rc; 1988 } 1989 1990 static int qed_rdma_init(struct qed_dev *cdev, 1991 struct qed_rdma_start_in_params *params) 1992 { 1993 return qed_rdma_start(QED_AFFIN_HWFN(cdev), params); 1994 } 1995 1996 static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) 1997 { 1998 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1999 2000 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); 2001 2002 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 2003 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); 2004 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 2005 } 2006 2007 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, 2008 u8 *old_mac_address, 2009 u8 *new_mac_address) 2010 { 2011 int rc = 0; 2012 2013 if (old_mac_address) 2014 qed_llh_remove_mac_filter(cdev, 0, old_mac_address); 2015 if (new_mac_address) 2016 rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address); 2017 2018 if (rc) 2019 DP_ERR(cdev, 2020 "qed roce ll2 mac filter set: failed to add MAC filter\n"); 2021 2022 return rc; 2023 } 2024 2025 static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset) 2026 { 2027 enum qed_eng eng; 2028 u8 ppfid = 0; 2029 int rc; 2030 2031 /* Make sure iwarp cmt mode is enabled before setting affinity */ 2032 if (!cdev->iwarp_cmt) 2033 return -EINVAL; 2034 2035 if (b_reset) 2036 eng = QED_BOTH_ENG; 2037 else 2038 eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0; 2039 2040 rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); 2041 if (rc) { 2042 DP_NOTICE(cdev, 2043 "Failed to set the engine affinity of ppfid %d\n", 2044 ppfid); 2045 return rc; 2046 } 2047 2048 DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP), 2049 "LLH: Set the engine affinity of non-RoCE packets as %d\n", 2050 eng); 2051 2052 return 0; 2053 } 2054 2055 static const struct qed_rdma_ops qed_rdma_ops_pass = { 2056 .common = &qed_common_ops_pass, 2057 .fill_dev_info = &qed_fill_rdma_dev_info, 2058 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, 2059 .rdma_init = &qed_rdma_init, 2060 .rdma_add_user = &qed_rdma_add_user, 2061 .rdma_remove_user = &qed_rdma_remove_user, 2062 .rdma_stop = &qed_rdma_stop, 2063 .rdma_query_port = &qed_rdma_query_port, 2064 .rdma_query_device = &qed_rdma_query_device, 2065 .rdma_get_start_sb = &qed_rdma_get_sb_start, 2066 .rdma_get_rdma_int = &qed_rdma_get_int, 2067 .rdma_set_rdma_int = &qed_rdma_set_int, 2068 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, 2069 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, 2070 .rdma_alloc_pd = &qed_rdma_alloc_pd, 2071 .rdma_dealloc_pd = &qed_rdma_free_pd, 2072 .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd, 2073 .rdma_dealloc_xrcd = &qed_rdma_free_xrcd, 2074 .rdma_create_cq = &qed_rdma_create_cq, 2075 .rdma_destroy_cq = &qed_rdma_destroy_cq, 2076 .rdma_create_qp = &qed_rdma_create_qp, 2077 .rdma_modify_qp = &qed_rdma_modify_qp, 2078 .rdma_query_qp = &qed_rdma_query_qp, 2079 .rdma_destroy_qp = &qed_rdma_destroy_qp, 2080 .rdma_alloc_tid = &qed_rdma_alloc_tid, 2081 .rdma_free_tid = &qed_rdma_free_tid, 2082 .rdma_register_tid = &qed_rdma_register_tid, 2083 .rdma_deregister_tid = &qed_rdma_deregister_tid, 2084 .rdma_create_srq = &qed_rdma_create_srq, 2085 .rdma_modify_srq = &qed_rdma_modify_srq, 2086 .rdma_destroy_srq = &qed_rdma_destroy_srq, 2087 .ll2_acquire_connection = &qed_ll2_acquire_connection, 2088 .ll2_establish_connection = &qed_ll2_establish_connection, 2089 .ll2_terminate_connection = &qed_ll2_terminate_connection, 2090 .ll2_release_connection = &qed_ll2_release_connection, 2091 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, 2092 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, 2093 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, 2094 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, 2095 .ll2_get_stats = &qed_ll2_get_stats, 2096 .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin, 2097 .iwarp_connect = &qed_iwarp_connect, 2098 .iwarp_create_listen = &qed_iwarp_create_listen, 2099 .iwarp_destroy_listen = &qed_iwarp_destroy_listen, 2100 .iwarp_accept = &qed_iwarp_accept, 2101 .iwarp_reject = &qed_iwarp_reject, 2102 .iwarp_send_rtr = &qed_iwarp_send_rtr, 2103 }; 2104 2105 const struct qed_rdma_ops *qed_get_rdma_ops(void) 2106 { 2107 return &qed_rdma_ops_pass; 2108 } 2109 EXPORT_SYMBOL(qed_get_rdma_ops); 2110