1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/types.h> 33 #include <asm/byteorder.h> 34 #include <linux/bitops.h> 35 #include <linux/delay.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/errno.h> 38 #include <linux/io.h> 39 #include <linux/kernel.h> 40 #include <linux/list.h> 41 #include <linux/module.h> 42 #include <linux/mutex.h> 43 #include <linux/pci.h> 44 #include <linux/slab.h> 45 #include <linux/spinlock.h> 46 #include <linux/string.h> 47 #include "qed.h" 48 #include "qed_cxt.h" 49 #include "qed_hsi.h" 50 #include "qed_hw.h" 51 #include "qed_init_ops.h" 52 #include "qed_int.h" 53 #include "qed_ll2.h" 54 #include "qed_mcp.h" 55 #include "qed_reg_addr.h" 56 #include <linux/qed/qed_rdma_if.h> 57 #include "qed_rdma.h" 58 #include "qed_roce.h" 59 #include "qed_sp.h" 60 61 62 int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, 63 struct qed_bmap *bmap, u32 max_count, char *name) 64 { 65 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); 66 67 bmap->max_count = max_count; 68 69 bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long), 70 GFP_KERNEL); 71 if (!bmap->bitmap) 72 return -ENOMEM; 73 74 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); 75 76 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 77 return 0; 78 } 79 80 int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, 81 struct qed_bmap *bmap, u32 *id_num) 82 { 83 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); 84 if (*id_num >= bmap->max_count) 85 return -EINVAL; 86 87 __set_bit(*id_num, bmap->bitmap); 88 89 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", 90 bmap->name, *id_num); 91 92 return 0; 93 } 94 95 void qed_bmap_set_id(struct qed_hwfn *p_hwfn, 96 struct qed_bmap *bmap, u32 id_num) 97 { 98 if (id_num >= bmap->max_count) 99 return; 100 101 __set_bit(id_num, bmap->bitmap); 102 } 103 104 void qed_bmap_release_id(struct qed_hwfn *p_hwfn, 105 struct qed_bmap *bmap, u32 id_num) 106 { 107 bool b_acquired; 108 109 if (id_num >= bmap->max_count) 110 return; 111 112 b_acquired = test_and_clear_bit(id_num, bmap->bitmap); 113 if (!b_acquired) { 114 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", 115 bmap->name, id_num); 116 return; 117 } 118 119 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", 120 bmap->name, id_num); 121 } 122 123 int qed_bmap_test_id(struct qed_hwfn *p_hwfn, 124 struct qed_bmap *bmap, u32 id_num) 125 { 126 if (id_num >= bmap->max_count) 127 return -1; 128 129 return test_bit(id_num, bmap->bitmap); 130 } 131 132 static bool qed_bmap_is_empty(struct qed_bmap *bmap) 133 { 134 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); 135 } 136 137 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 138 { 139 /* First sb id for RoCE is after all the l2 sb */ 140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 141 } 142 143 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, 144 struct qed_ptt *p_ptt, 145 struct qed_rdma_start_in_params *params) 146 { 147 struct qed_rdma_info *p_rdma_info; 148 u32 num_cons, num_tasks; 149 int rc = -ENOMEM; 150 151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); 152 153 /* Allocate a struct with current pf rdma info */ 154 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); 155 if (!p_rdma_info) 156 return rc; 157 158 p_hwfn->p_rdma_info = p_rdma_info; 159 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 160 p_rdma_info->proto = PROTOCOLID_IWARP; 161 else 162 p_rdma_info->proto = PROTOCOLID_ROCE; 163 164 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 165 NULL); 166 167 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 168 p_rdma_info->num_qps = num_cons; 169 else 170 p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ 171 172 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); 173 174 /* Each MR uses a single task */ 175 p_rdma_info->num_mrs = num_tasks; 176 177 /* Queue zone lines are shared between RoCE and L2 in such a way that 178 * they can be used by each without obstructing the other. 179 */ 180 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); 181 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); 182 183 /* Allocate a struct with device params and fill it */ 184 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 185 if (!p_rdma_info->dev) 186 goto free_rdma_info; 187 188 /* Allocate a struct with port params and fill it */ 189 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); 190 if (!p_rdma_info->port) 191 goto free_rdma_dev; 192 193 /* Allocate bit map for pd's */ 194 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, 195 "PD"); 196 if (rc) { 197 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 198 "Failed to allocate pd_map, rc = %d\n", 199 rc); 200 goto free_rdma_port; 201 } 202 203 /* Allocate DPI bitmap */ 204 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, 205 p_hwfn->dpi_count, "DPI"); 206 if (rc) { 207 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 208 "Failed to allocate DPI bitmap, rc = %d\n", rc); 209 goto free_pd_map; 210 } 211 212 /* Allocate bitmap for cq's. The maximum number of CQs is bound to 213 * the number of connections we support. (num_qps in iWARP or 214 * num_qps/2 in RoCE). 215 */ 216 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); 217 if (rc) { 218 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 219 "Failed to allocate cq bitmap, rc = %d\n", rc); 220 goto free_dpi_map; 221 } 222 223 /* Allocate bitmap for toggle bit for cq icids 224 * We toggle the bit every time we create or resize cq for a given icid. 225 * Size needs to equal the size of the cq bmap. 226 */ 227 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, 228 num_cons, "Toggle"); 229 if (rc) { 230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 231 "Failed to allocate toggle bits, rc = %d\n", rc); 232 goto free_cq_map; 233 } 234 235 /* Allocate bitmap for itids */ 236 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, 237 p_rdma_info->num_mrs, "MR"); 238 if (rc) { 239 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 240 "Failed to allocate itids bitmaps, rc = %d\n", rc); 241 goto free_toggle_map; 242 } 243 244 /* Allocate bitmap for cids used for qps. */ 245 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, 246 "CID"); 247 if (rc) { 248 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 249 "Failed to allocate cid bitmap, rc = %d\n", rc); 250 goto free_tid_map; 251 } 252 253 /* Allocate bitmap for cids used for responders/requesters. */ 254 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, 255 "REAL_CID"); 256 if (rc) { 257 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 258 "Failed to allocate real cid bitmap, rc = %d\n", rc); 259 goto free_cid_map; 260 } 261 262 /* Allocate bitmap for srqs */ 263 p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); 264 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, 265 p_rdma_info->num_srqs, "SRQ"); 266 if (rc) { 267 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 268 "Failed to allocate srq bitmap, rc = %d\n", rc); 269 goto free_real_cid_map; 270 } 271 272 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 273 rc = qed_iwarp_alloc(p_hwfn); 274 275 if (rc) 276 goto free_srq_map; 277 278 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); 279 return 0; 280 281 free_srq_map: 282 kfree(p_rdma_info->srq_map.bitmap); 283 free_real_cid_map: 284 kfree(p_rdma_info->real_cid_map.bitmap); 285 free_cid_map: 286 kfree(p_rdma_info->cid_map.bitmap); 287 free_tid_map: 288 kfree(p_rdma_info->tid_map.bitmap); 289 free_toggle_map: 290 kfree(p_rdma_info->toggle_bits.bitmap); 291 free_cq_map: 292 kfree(p_rdma_info->cq_map.bitmap); 293 free_dpi_map: 294 kfree(p_rdma_info->dpi_map.bitmap); 295 free_pd_map: 296 kfree(p_rdma_info->pd_map.bitmap); 297 free_rdma_port: 298 kfree(p_rdma_info->port); 299 free_rdma_dev: 300 kfree(p_rdma_info->dev); 301 free_rdma_info: 302 kfree(p_rdma_info); 303 304 return rc; 305 } 306 307 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, 308 struct qed_bmap *bmap, bool check) 309 { 310 int weight = bitmap_weight(bmap->bitmap, bmap->max_count); 311 int last_line = bmap->max_count / (64 * 8); 312 int last_item = last_line * 8 + 313 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); 314 u64 *pmap = (u64 *)bmap->bitmap; 315 int line, item, offset; 316 u8 str_last_line[200] = { 0 }; 317 318 if (!weight || !check) 319 goto end; 320 321 DP_NOTICE(p_hwfn, 322 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", 323 bmap->name, bmap->max_count, weight); 324 325 /* print aligned non-zero lines, if any */ 326 for (item = 0, line = 0; line < last_line; line++, item += 8) 327 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) 328 DP_NOTICE(p_hwfn, 329 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", 330 line, 331 pmap[item], 332 pmap[item + 1], 333 pmap[item + 2], 334 pmap[item + 3], 335 pmap[item + 4], 336 pmap[item + 5], 337 pmap[item + 6], pmap[item + 7]); 338 339 /* print last unaligned non-zero line, if any */ 340 if ((bmap->max_count % (64 * 8)) && 341 (bitmap_weight((unsigned long *)&pmap[item], 342 bmap->max_count - item * 64))) { 343 offset = sprintf(str_last_line, "line 0x%04x: ", line); 344 for (; item < last_item; item++) 345 offset += sprintf(str_last_line + offset, 346 "0x%016llx ", pmap[item]); 347 DP_NOTICE(p_hwfn, "%s\n", str_last_line); 348 } 349 350 end: 351 kfree(bmap->bitmap); 352 bmap->bitmap = NULL; 353 } 354 355 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 356 { 357 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 358 359 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 360 qed_iwarp_resc_free(p_hwfn); 361 362 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); 363 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); 364 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); 365 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); 366 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); 367 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); 368 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); 369 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); 370 371 kfree(p_rdma_info->port); 372 kfree(p_rdma_info->dev); 373 374 kfree(p_rdma_info); 375 } 376 377 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 378 { 379 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 380 381 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 382 383 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 384 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); 385 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 386 } 387 388 static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) 389 { 390 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey); 391 } 392 393 static void qed_rdma_free(struct qed_hwfn *p_hwfn) 394 { 395 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 396 397 qed_rdma_free_reserved_lkey(p_hwfn); 398 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); 399 qed_rdma_resc_free(p_hwfn); 400 } 401 402 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) 403 { 404 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; 405 guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; 406 guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; 407 guid[3] = 0xff; 408 guid[4] = 0xfe; 409 guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; 410 guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; 411 guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; 412 } 413 414 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, 415 struct qed_rdma_start_in_params *params) 416 { 417 struct qed_rdma_events *events; 418 419 events = &p_hwfn->p_rdma_info->events; 420 421 events->unaffiliated_event = params->events->unaffiliated_event; 422 events->affiliated_event = params->events->affiliated_event; 423 events->context = params->events->context; 424 } 425 426 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, 427 struct qed_rdma_start_in_params *params) 428 { 429 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 430 struct qed_dev *cdev = p_hwfn->cdev; 431 u32 pci_status_control; 432 u32 num_qps; 433 434 /* Vendor specific information */ 435 dev->vendor_id = cdev->vendor_id; 436 dev->vendor_part_id = cdev->device_id; 437 dev->hw_ver = 0; 438 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | 439 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); 440 441 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); 442 dev->node_guid = dev->sys_image_guid; 443 444 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, 445 RDMA_MAX_SGE_PER_RQ_WQE); 446 447 if (cdev->rdma_max_sge) 448 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); 449 450 dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; 451 if (p_hwfn->cdev->rdma_max_srq_sge) { 452 dev->max_srq_sge = min_t(u32, 453 p_hwfn->cdev->rdma_max_srq_sge, 454 dev->max_srq_sge); 455 } 456 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; 457 458 dev->max_inline = (cdev->rdma_max_inline) ? 459 min_t(u32, cdev->rdma_max_inline, dev->max_inline) : 460 dev->max_inline; 461 462 dev->max_wqe = QED_RDMA_MAX_WQE; 463 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); 464 465 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because 466 * it is up-aligned to 16 and then to ILT page size within qed cxt. 467 * This is OK in terms of ILT but we don't want to configure the FW 468 * above its abilities 469 */ 470 num_qps = ROCE_MAX_QPS; 471 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); 472 dev->max_qp = num_qps; 473 474 /* CQs uses the same icids that QPs use hence they are limited by the 475 * number of icids. There are two icids per QP. 476 */ 477 dev->max_cq = num_qps * 2; 478 479 /* The number of mrs is smaller by 1 since the first is reserved */ 480 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; 481 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; 482 483 /* The maximum CQE capacity per CQ supported. 484 * max number of cqes will be in two layer pbl, 485 * 8 is the pointer size in bytes 486 * 32 is the size of cq element in bytes 487 */ 488 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) 489 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; 490 else 491 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; 492 493 dev->max_mw = 0; 494 dev->max_fmr = QED_RDMA_MAX_FMR; 495 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); 496 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; 497 dev->max_pkey = QED_RDMA_MAX_P_KEY; 498 499 dev->max_srq = p_hwfn->p_rdma_info->num_srqs; 500 dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; 501 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 502 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); 503 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 504 RDMA_REQ_RD_ATOMIC_ELM_SIZE; 505 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * 506 p_hwfn->p_rdma_info->num_qps; 507 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; 508 dev->dev_ack_delay = QED_RDMA_ACK_DELAY; 509 dev->max_pd = RDMA_MAX_PDS; 510 dev->max_ah = p_hwfn->p_rdma_info->num_qps; 511 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); 512 513 /* Set capablities */ 514 dev->dev_caps = 0; 515 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); 516 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); 517 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); 518 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); 519 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); 520 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); 521 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); 522 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); 523 524 /* Check atomic operations support in PCI configuration space. */ 525 pci_read_config_dword(cdev->pdev, 526 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, 527 &pci_status_control); 528 529 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) 530 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); 531 532 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 533 qed_iwarp_init_devinfo(p_hwfn); 534 } 535 536 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) 537 { 538 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; 539 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 540 541 port->port_state = p_hwfn->mcp_info->link_output.link_up ? 542 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 543 544 port->max_msg_size = min_t(u64, 545 (dev->max_mr_mw_fmr_size * 546 p_hwfn->cdev->rdma_max_sge), 547 BIT(31)); 548 549 port->pkey_bad_counter = 0; 550 } 551 552 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 553 { 554 int rc = 0; 555 556 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); 557 p_hwfn->b_rdma_enabled_in_prs = false; 558 559 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 560 qed_iwarp_init_hw(p_hwfn, p_ptt); 561 else 562 rc = qed_roce_init_hw(p_hwfn, p_ptt); 563 564 return rc; 565 } 566 567 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, 568 struct qed_rdma_start_in_params *params, 569 struct qed_ptt *p_ptt) 570 { 571 struct rdma_init_func_ramrod_data *p_ramrod; 572 struct qed_rdma_cnq_params *p_cnq_pbl_list; 573 struct rdma_init_func_hdr *p_params_header; 574 struct rdma_cnq_params *p_cnq_params; 575 struct qed_sp_init_data init_data; 576 struct qed_spq_entry *p_ent; 577 u32 cnq_id, sb_id; 578 u16 igu_sb_id; 579 int rc; 580 581 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); 582 583 /* Save the number of cnqs for the function close ramrod */ 584 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; 585 586 /* Get SPQ entry */ 587 memset(&init_data, 0, sizeof(init_data)); 588 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 589 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 590 591 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, 592 p_hwfn->p_rdma_info->proto, &init_data); 593 if (rc) 594 return rc; 595 596 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 597 qed_iwarp_init_fw_ramrod(p_hwfn, 598 &p_ent->ramrod.iwarp_init_func); 599 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; 600 } else { 601 p_ramrod = &p_ent->ramrod.roce_init_func.rdma; 602 } 603 604 p_params_header = &p_ramrod->params_header; 605 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, 606 QED_RDMA_CNQ_RAM); 607 p_params_header->num_cnqs = params->desired_cnq; 608 609 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) 610 p_params_header->cq_ring_mode = 1; 611 else 612 p_params_header->cq_ring_mode = 0; 613 614 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { 615 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); 616 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 617 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); 618 p_cnq_params = &p_ramrod->cnq_params[cnq_id]; 619 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; 620 621 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; 622 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; 623 624 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, 625 p_cnq_pbl_list->pbl_ptr); 626 627 /* we assume here that cnq_id and qz_offset are the same */ 628 p_cnq_params->queue_zone_num = 629 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + 630 cnq_id); 631 } 632 633 return qed_spq_post(p_hwfn, p_ent, NULL); 634 } 635 636 static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) 637 { 638 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 639 int rc; 640 641 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); 642 643 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 644 rc = qed_rdma_bmap_alloc_id(p_hwfn, 645 &p_hwfn->p_rdma_info->tid_map, itid); 646 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 647 if (rc) 648 goto out; 649 650 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); 651 out: 652 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); 653 return rc; 654 } 655 656 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) 657 { 658 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 659 660 /* Tid 0 will be used as the key for "reserved MR". 661 * The driver should allocate memory for it so it can be loaded but no 662 * ramrod should be passed on it. 663 */ 664 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); 665 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { 666 DP_NOTICE(p_hwfn, 667 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); 668 return -EINVAL; 669 } 670 671 return 0; 672 } 673 674 static int qed_rdma_setup(struct qed_hwfn *p_hwfn, 675 struct qed_ptt *p_ptt, 676 struct qed_rdma_start_in_params *params) 677 { 678 int rc; 679 680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); 681 682 spin_lock_init(&p_hwfn->p_rdma_info->lock); 683 684 qed_rdma_init_devinfo(p_hwfn, params); 685 qed_rdma_init_port(p_hwfn); 686 qed_rdma_init_events(p_hwfn, params); 687 688 rc = qed_rdma_reserve_lkey(p_hwfn); 689 if (rc) 690 return rc; 691 692 rc = qed_rdma_init_hw(p_hwfn, p_ptt); 693 if (rc) 694 return rc; 695 696 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 697 rc = qed_iwarp_setup(p_hwfn, p_ptt, params); 698 if (rc) 699 return rc; 700 } else { 701 rc = qed_roce_setup(p_hwfn); 702 if (rc) 703 return rc; 704 } 705 706 return qed_rdma_start_fw(p_hwfn, params, p_ptt); 707 } 708 709 static int qed_rdma_stop(void *rdma_cxt) 710 { 711 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 712 struct rdma_close_func_ramrod_data *p_ramrod; 713 struct qed_sp_init_data init_data; 714 struct qed_spq_entry *p_ent; 715 struct qed_ptt *p_ptt; 716 u32 ll2_ethertype_en; 717 int rc = -EBUSY; 718 719 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); 720 721 p_ptt = qed_ptt_acquire(p_hwfn); 722 if (!p_ptt) { 723 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); 724 return rc; 725 } 726 727 /* Disable RoCE search */ 728 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); 729 p_hwfn->b_rdma_enabled_in_prs = false; 730 731 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 732 733 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 734 735 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 736 (ll2_ethertype_en & 0xFFFE)); 737 738 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 739 rc = qed_iwarp_stop(p_hwfn, p_ptt); 740 if (rc) { 741 qed_ptt_release(p_hwfn, p_ptt); 742 return rc; 743 } 744 } else { 745 qed_roce_stop(p_hwfn); 746 } 747 748 qed_ptt_release(p_hwfn, p_ptt); 749 750 /* Get SPQ entry */ 751 memset(&init_data, 0, sizeof(init_data)); 752 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 753 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 754 755 /* Stop RoCE */ 756 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, 757 p_hwfn->p_rdma_info->proto, &init_data); 758 if (rc) 759 goto out; 760 761 p_ramrod = &p_ent->ramrod.rdma_close_func; 762 763 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; 764 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); 765 766 rc = qed_spq_post(p_hwfn, p_ent, NULL); 767 768 out: 769 qed_rdma_free(p_hwfn); 770 771 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); 772 return rc; 773 } 774 775 static int qed_rdma_add_user(void *rdma_cxt, 776 struct qed_rdma_add_user_out_params *out_params) 777 { 778 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 779 u32 dpi_start_offset; 780 u32 returned_id = 0; 781 int rc; 782 783 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); 784 785 /* Allocate DPI */ 786 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 787 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 788 &returned_id); 789 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 790 791 out_params->dpi = (u16)returned_id; 792 793 /* Calculate the corresponding DPI address */ 794 dpi_start_offset = p_hwfn->dpi_start_offset; 795 796 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + 797 dpi_start_offset + 798 ((out_params->dpi) * p_hwfn->dpi_size)); 799 800 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + 801 dpi_start_offset + 802 ((out_params->dpi) * p_hwfn->dpi_size); 803 804 out_params->dpi_size = p_hwfn->dpi_size; 805 out_params->wid_count = p_hwfn->wid_count; 806 807 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); 808 return rc; 809 } 810 811 static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) 812 { 813 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 814 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; 815 816 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); 817 818 /* Link may have changed */ 819 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ? 820 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 821 822 p_port->link_speed = p_hwfn->mcp_info->link_output.speed; 823 824 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; 825 826 return p_port; 827 } 828 829 static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 830 { 831 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 832 833 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); 834 835 /* Return struct with device parameters */ 836 return p_hwfn->p_rdma_info->dev; 837 } 838 839 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 840 { 841 struct qed_hwfn *p_hwfn; 842 u16 qz_num; 843 u32 addr; 844 845 p_hwfn = (struct qed_hwfn *)rdma_cxt; 846 847 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { 848 DP_NOTICE(p_hwfn, 849 "queue zone offset %d is too large (max is %d)\n", 850 qz_offset, p_hwfn->p_rdma_info->max_queue_zones); 851 return; 852 } 853 854 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; 855 addr = GTT_BAR0_MAP_REG_USDM_RAM + 856 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); 857 858 REG_WR16(p_hwfn, addr, prod); 859 860 /* keep prod updates ordered */ 861 wmb(); 862 } 863 864 static int qed_fill_rdma_dev_info(struct qed_dev *cdev, 865 struct qed_dev_rdma_info *info) 866 { 867 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 868 869 memset(info, 0, sizeof(*info)); 870 871 info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? 872 QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; 873 874 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); 875 876 qed_fill_dev_info(cdev, &info->common); 877 878 return 0; 879 } 880 881 static int qed_rdma_get_sb_start(struct qed_dev *cdev) 882 { 883 int feat_num; 884 885 if (cdev->num_hwfns > 1) 886 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); 887 else 888 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * 889 cdev->num_hwfns; 890 891 return feat_num; 892 } 893 894 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) 895 { 896 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); 897 int n_msix = cdev->int_params.rdma_msix_cnt; 898 899 return min_t(int, n_cnq, n_msix); 900 } 901 902 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) 903 { 904 int limit = 0; 905 906 /* Mark the fastpath as free/used */ 907 cdev->int_params.fp_initialized = cnt ? true : false; 908 909 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { 910 DP_ERR(cdev, 911 "qed roce supports only MSI-X interrupts (detected %d).\n", 912 cdev->int_params.out.int_mode); 913 return -EINVAL; 914 } else if (cdev->int_params.fp_msix_cnt) { 915 limit = cdev->int_params.rdma_msix_cnt; 916 } 917 918 if (!limit) 919 return -ENOMEM; 920 921 return min_t(int, cnt, limit); 922 } 923 924 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) 925 { 926 memset(info, 0, sizeof(*info)); 927 928 if (!cdev->int_params.fp_initialized) { 929 DP_INFO(cdev, 930 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 931 return -EINVAL; 932 } 933 934 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 935 int msix_base = cdev->int_params.rdma_msix_base; 936 937 info->msix_cnt = cdev->int_params.rdma_msix_cnt; 938 info->msix = &cdev->int_params.msix_table[msix_base]; 939 940 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", 941 info->msix_cnt, msix_base); 942 } 943 944 return 0; 945 } 946 947 static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) 948 { 949 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 950 u32 returned_id; 951 int rc; 952 953 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); 954 955 /* Allocates an unused protection domain */ 956 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 957 rc = qed_rdma_bmap_alloc_id(p_hwfn, 958 &p_hwfn->p_rdma_info->pd_map, &returned_id); 959 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 960 961 *pd = (u16)returned_id; 962 963 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); 964 return rc; 965 } 966 967 static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) 968 { 969 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 970 971 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); 972 973 /* Returns a previously allocated protection domain for reuse */ 974 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 975 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); 976 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 977 } 978 979 static enum qed_rdma_toggle_bit 980 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) 981 { 982 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 983 enum qed_rdma_toggle_bit toggle_bit; 984 u32 bmap_id; 985 986 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); 987 988 /* the function toggle the bit that is related to a given icid 989 * and returns the new toggle bit's value 990 */ 991 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); 992 993 spin_lock_bh(&p_info->lock); 994 toggle_bit = !test_and_change_bit(bmap_id, 995 p_info->toggle_bits.bitmap); 996 spin_unlock_bh(&p_info->lock); 997 998 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", 999 toggle_bit); 1000 1001 return toggle_bit; 1002 } 1003 1004 static int qed_rdma_create_cq(void *rdma_cxt, 1005 struct qed_rdma_create_cq_in_params *params, 1006 u16 *icid) 1007 { 1008 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1009 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 1010 struct rdma_create_cq_ramrod_data *p_ramrod; 1011 enum qed_rdma_toggle_bit toggle_bit; 1012 struct qed_sp_init_data init_data; 1013 struct qed_spq_entry *p_ent; 1014 u32 returned_id, start_cid; 1015 int rc; 1016 1017 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", 1018 params->cq_handle_hi, params->cq_handle_lo); 1019 1020 /* Allocate icid */ 1021 spin_lock_bh(&p_info->lock); 1022 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); 1023 spin_unlock_bh(&p_info->lock); 1024 1025 if (rc) { 1026 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); 1027 return rc; 1028 } 1029 1030 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, 1031 p_info->proto); 1032 *icid = returned_id + start_cid; 1033 1034 /* Check if icid requires a page allocation */ 1035 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); 1036 if (rc) 1037 goto err; 1038 1039 /* Get SPQ entry */ 1040 memset(&init_data, 0, sizeof(init_data)); 1041 init_data.cid = *icid; 1042 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1043 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1044 1045 /* Send create CQ ramrod */ 1046 rc = qed_sp_init_request(p_hwfn, &p_ent, 1047 RDMA_RAMROD_CREATE_CQ, 1048 p_info->proto, &init_data); 1049 if (rc) 1050 goto err; 1051 1052 p_ramrod = &p_ent->ramrod.rdma_create_cq; 1053 1054 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); 1055 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); 1056 p_ramrod->dpi = cpu_to_le16(params->dpi); 1057 p_ramrod->is_two_level_pbl = params->pbl_two_level; 1058 p_ramrod->max_cqes = cpu_to_le32(params->cq_size); 1059 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); 1060 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); 1061 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + 1062 params->cnq_id; 1063 p_ramrod->int_timeout = params->int_timeout; 1064 1065 /* toggle the bit for every resize or create cq for a given icid */ 1066 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1067 1068 p_ramrod->toggle_bit = toggle_bit; 1069 1070 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1071 if (rc) { 1072 /* restore toggle bit */ 1073 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1074 goto err; 1075 } 1076 1077 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); 1078 return rc; 1079 1080 err: 1081 /* release allocated icid */ 1082 spin_lock_bh(&p_info->lock); 1083 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); 1084 spin_unlock_bh(&p_info->lock); 1085 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); 1086 1087 return rc; 1088 } 1089 1090 static int 1091 qed_rdma_destroy_cq(void *rdma_cxt, 1092 struct qed_rdma_destroy_cq_in_params *in_params, 1093 struct qed_rdma_destroy_cq_out_params *out_params) 1094 { 1095 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1096 struct rdma_destroy_cq_output_params *p_ramrod_res; 1097 struct rdma_destroy_cq_ramrod_data *p_ramrod; 1098 struct qed_sp_init_data init_data; 1099 struct qed_spq_entry *p_ent; 1100 dma_addr_t ramrod_res_phys; 1101 enum protocol_type proto; 1102 int rc = -ENOMEM; 1103 1104 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); 1105 1106 p_ramrod_res = 1107 (struct rdma_destroy_cq_output_params *) 1108 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1109 sizeof(struct rdma_destroy_cq_output_params), 1110 &ramrod_res_phys, GFP_KERNEL); 1111 if (!p_ramrod_res) { 1112 DP_NOTICE(p_hwfn, 1113 "qed destroy cq failed: cannot allocate memory (ramrod)\n"); 1114 return rc; 1115 } 1116 1117 /* Get SPQ entry */ 1118 memset(&init_data, 0, sizeof(init_data)); 1119 init_data.cid = in_params->icid; 1120 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1121 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1122 proto = p_hwfn->p_rdma_info->proto; 1123 /* Send destroy CQ ramrod */ 1124 rc = qed_sp_init_request(p_hwfn, &p_ent, 1125 RDMA_RAMROD_DESTROY_CQ, 1126 proto, &init_data); 1127 if (rc) 1128 goto err; 1129 1130 p_ramrod = &p_ent->ramrod.rdma_destroy_cq; 1131 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1132 1133 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1134 if (rc) 1135 goto err; 1136 1137 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); 1138 1139 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1140 sizeof(struct rdma_destroy_cq_output_params), 1141 p_ramrod_res, ramrod_res_phys); 1142 1143 /* Free icid */ 1144 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1145 1146 qed_bmap_release_id(p_hwfn, 1147 &p_hwfn->p_rdma_info->cq_map, 1148 (in_params->icid - 1149 qed_cxt_get_proto_cid_start(p_hwfn, proto))); 1150 1151 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1152 1153 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); 1154 return rc; 1155 1156 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1157 sizeof(struct rdma_destroy_cq_output_params), 1158 p_ramrod_res, ramrod_res_phys); 1159 1160 return rc; 1161 } 1162 1163 void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) 1164 { 1165 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); 1166 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); 1167 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); 1168 } 1169 1170 static int qed_rdma_query_qp(void *rdma_cxt, 1171 struct qed_rdma_qp *qp, 1172 struct qed_rdma_query_qp_out_params *out_params) 1173 { 1174 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1175 int rc = 0; 1176 1177 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1178 1179 /* The following fields are filled in from qp and not FW as they can't 1180 * be modified by FW 1181 */ 1182 out_params->mtu = qp->mtu; 1183 out_params->dest_qp = qp->dest_qp; 1184 out_params->incoming_atomic_en = qp->incoming_atomic_en; 1185 out_params->e2e_flow_control_en = qp->e2e_flow_control_en; 1186 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; 1187 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; 1188 out_params->dgid = qp->dgid; 1189 out_params->flow_label = qp->flow_label; 1190 out_params->hop_limit_ttl = qp->hop_limit_ttl; 1191 out_params->traffic_class_tos = qp->traffic_class_tos; 1192 out_params->timeout = qp->ack_timeout; 1193 out_params->rnr_retry = qp->rnr_retry_cnt; 1194 out_params->retry_cnt = qp->retry_cnt; 1195 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; 1196 out_params->pkey_index = 0; 1197 out_params->max_rd_atomic = qp->max_rd_atomic_req; 1198 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; 1199 out_params->sqd_async = qp->sqd_async; 1200 1201 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 1202 qed_iwarp_query_qp(qp, out_params); 1203 else 1204 rc = qed_roce_query_qp(p_hwfn, qp, out_params); 1205 1206 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); 1207 return rc; 1208 } 1209 1210 static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) 1211 { 1212 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1213 int rc = 0; 1214 1215 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1216 1217 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) 1218 rc = qed_iwarp_destroy_qp(p_hwfn, qp); 1219 else 1220 rc = qed_roce_destroy_qp(p_hwfn, qp); 1221 1222 /* free qp params struct */ 1223 kfree(qp); 1224 1225 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); 1226 return rc; 1227 } 1228 1229 static struct qed_rdma_qp * 1230 qed_rdma_create_qp(void *rdma_cxt, 1231 struct qed_rdma_create_qp_in_params *in_params, 1232 struct qed_rdma_create_qp_out_params *out_params) 1233 { 1234 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1235 struct qed_rdma_qp *qp; 1236 u8 max_stats_queues; 1237 int rc; 1238 1239 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { 1240 DP_ERR(p_hwfn->cdev, 1241 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 1242 rdma_cxt, in_params, out_params); 1243 return NULL; 1244 } 1245 1246 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1247 "qed rdma create qp called with qp_handle = %08x%08x\n", 1248 in_params->qp_handle_hi, in_params->qp_handle_lo); 1249 1250 /* Some sanity checks... */ 1251 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; 1252 if (in_params->stats_queue >= max_stats_queues) { 1253 DP_ERR(p_hwfn->cdev, 1254 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", 1255 in_params->stats_queue, max_stats_queues); 1256 return NULL; 1257 } 1258 1259 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1260 if (in_params->sq_num_pages * sizeof(struct regpair) > 1261 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { 1262 DP_NOTICE(p_hwfn->cdev, 1263 "Sq num pages: %d exceeds maximum\n", 1264 in_params->sq_num_pages); 1265 return NULL; 1266 } 1267 if (in_params->rq_num_pages * sizeof(struct regpair) > 1268 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { 1269 DP_NOTICE(p_hwfn->cdev, 1270 "Rq num pages: %d exceeds maximum\n", 1271 in_params->rq_num_pages); 1272 return NULL; 1273 } 1274 } 1275 1276 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1277 if (!qp) 1278 return NULL; 1279 1280 qp->cur_state = QED_ROCE_QP_STATE_RESET; 1281 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); 1282 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); 1283 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); 1284 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); 1285 qp->use_srq = in_params->use_srq; 1286 qp->signal_all = in_params->signal_all; 1287 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; 1288 qp->pd = in_params->pd; 1289 qp->dpi = in_params->dpi; 1290 qp->sq_cq_id = in_params->sq_cq_id; 1291 qp->sq_num_pages = in_params->sq_num_pages; 1292 qp->sq_pbl_ptr = in_params->sq_pbl_ptr; 1293 qp->rq_cq_id = in_params->rq_cq_id; 1294 qp->rq_num_pages = in_params->rq_num_pages; 1295 qp->rq_pbl_ptr = in_params->rq_pbl_ptr; 1296 qp->srq_id = in_params->srq_id; 1297 qp->req_offloaded = false; 1298 qp->resp_offloaded = false; 1299 qp->e2e_flow_control_en = qp->use_srq ? false : true; 1300 qp->stats_queue = in_params->stats_queue; 1301 1302 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1303 rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); 1304 qp->qpid = qp->icid; 1305 } else { 1306 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); 1307 qp->qpid = ((0xFF << 16) | qp->icid); 1308 } 1309 1310 if (rc) { 1311 kfree(qp); 1312 return NULL; 1313 } 1314 1315 out_params->icid = qp->icid; 1316 out_params->qp_id = qp->qpid; 1317 1318 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); 1319 return qp; 1320 } 1321 1322 static int qed_rdma_modify_qp(void *rdma_cxt, 1323 struct qed_rdma_qp *qp, 1324 struct qed_rdma_modify_qp_in_params *params) 1325 { 1326 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1327 enum qed_roce_qp_state prev_state; 1328 int rc = 0; 1329 1330 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", 1331 qp->icid, params->new_state); 1332 1333 if (rc) { 1334 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1335 return rc; 1336 } 1337 1338 if (GET_FIELD(params->modify_flags, 1339 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { 1340 qp->incoming_rdma_read_en = params->incoming_rdma_read_en; 1341 qp->incoming_rdma_write_en = params->incoming_rdma_write_en; 1342 qp->incoming_atomic_en = params->incoming_atomic_en; 1343 } 1344 1345 /* Update QP structure with the updated values */ 1346 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) 1347 qp->roce_mode = params->roce_mode; 1348 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) 1349 qp->pkey = params->pkey; 1350 if (GET_FIELD(params->modify_flags, 1351 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) 1352 qp->e2e_flow_control_en = params->e2e_flow_control_en; 1353 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) 1354 qp->dest_qp = params->dest_qp; 1355 if (GET_FIELD(params->modify_flags, 1356 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { 1357 /* Indicates that the following parameters have changed: 1358 * Traffic class, flow label, hop limit, source GID, 1359 * destination GID, loopback indicator 1360 */ 1361 qp->traffic_class_tos = params->traffic_class_tos; 1362 qp->flow_label = params->flow_label; 1363 qp->hop_limit_ttl = params->hop_limit_ttl; 1364 1365 qp->sgid = params->sgid; 1366 qp->dgid = params->dgid; 1367 qp->udp_src_port = 0; 1368 qp->vlan_id = params->vlan_id; 1369 qp->mtu = params->mtu; 1370 qp->lb_indication = params->lb_indication; 1371 memcpy((u8 *)&qp->remote_mac_addr[0], 1372 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); 1373 if (params->use_local_mac) { 1374 memcpy((u8 *)&qp->local_mac_addr[0], 1375 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); 1376 } else { 1377 memcpy((u8 *)&qp->local_mac_addr[0], 1378 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 1379 } 1380 } 1381 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) 1382 qp->rq_psn = params->rq_psn; 1383 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) 1384 qp->sq_psn = params->sq_psn; 1385 if (GET_FIELD(params->modify_flags, 1386 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) 1387 qp->max_rd_atomic_req = params->max_rd_atomic_req; 1388 if (GET_FIELD(params->modify_flags, 1389 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) 1390 qp->max_rd_atomic_resp = params->max_rd_atomic_resp; 1391 if (GET_FIELD(params->modify_flags, 1392 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) 1393 qp->ack_timeout = params->ack_timeout; 1394 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) 1395 qp->retry_cnt = params->retry_cnt; 1396 if (GET_FIELD(params->modify_flags, 1397 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) 1398 qp->rnr_retry_cnt = params->rnr_retry_cnt; 1399 if (GET_FIELD(params->modify_flags, 1400 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) 1401 qp->min_rnr_nak_timer = params->min_rnr_nak_timer; 1402 1403 qp->sqd_async = params->sqd_async; 1404 1405 prev_state = qp->cur_state; 1406 if (GET_FIELD(params->modify_flags, 1407 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { 1408 qp->cur_state = params->new_state; 1409 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", 1410 qp->cur_state); 1411 } 1412 1413 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { 1414 enum qed_iwarp_qp_state new_state = 1415 qed_roce2iwarp_state(qp->cur_state); 1416 1417 rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); 1418 } else { 1419 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); 1420 } 1421 1422 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); 1423 return rc; 1424 } 1425 1426 static int 1427 qed_rdma_register_tid(void *rdma_cxt, 1428 struct qed_rdma_register_tid_in_params *params) 1429 { 1430 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1431 struct rdma_register_tid_ramrod_data *p_ramrod; 1432 struct qed_sp_init_data init_data; 1433 struct qed_spq_entry *p_ent; 1434 enum rdma_tid_type tid_type; 1435 u8 fw_return_code; 1436 int rc; 1437 1438 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); 1439 1440 /* Get SPQ entry */ 1441 memset(&init_data, 0, sizeof(init_data)); 1442 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1443 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1444 1445 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, 1446 p_hwfn->p_rdma_info->proto, &init_data); 1447 if (rc) { 1448 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1449 return rc; 1450 } 1451 1452 if (p_hwfn->p_rdma_info->last_tid < params->itid) 1453 p_hwfn->p_rdma_info->last_tid = params->itid; 1454 1455 p_ramrod = &p_ent->ramrod.rdma_register_tid; 1456 1457 p_ramrod->flags = 0; 1458 SET_FIELD(p_ramrod->flags, 1459 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, 1460 params->pbl_two_level); 1461 1462 SET_FIELD(p_ramrod->flags, 1463 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); 1464 1465 SET_FIELD(p_ramrod->flags, 1466 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); 1467 1468 /* Don't initialize D/C field, as it may override other bits. */ 1469 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) 1470 SET_FIELD(p_ramrod->flags, 1471 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, 1472 params->page_size_log - 12); 1473 1474 SET_FIELD(p_ramrod->flags, 1475 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, 1476 params->remote_read); 1477 1478 SET_FIELD(p_ramrod->flags, 1479 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, 1480 params->remote_write); 1481 1482 SET_FIELD(p_ramrod->flags, 1483 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, 1484 params->remote_atomic); 1485 1486 SET_FIELD(p_ramrod->flags, 1487 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, 1488 params->local_write); 1489 1490 SET_FIELD(p_ramrod->flags, 1491 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); 1492 1493 SET_FIELD(p_ramrod->flags, 1494 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, 1495 params->mw_bind); 1496 1497 SET_FIELD(p_ramrod->flags1, 1498 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, 1499 params->pbl_page_size_log - 12); 1500 1501 SET_FIELD(p_ramrod->flags2, 1502 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); 1503 1504 switch (params->tid_type) { 1505 case QED_RDMA_TID_REGISTERED_MR: 1506 tid_type = RDMA_TID_REGISTERED_MR; 1507 break; 1508 case QED_RDMA_TID_FMR: 1509 tid_type = RDMA_TID_FMR; 1510 break; 1511 case QED_RDMA_TID_MW: 1512 tid_type = RDMA_TID_MW; 1513 break; 1514 default: 1515 rc = -EINVAL; 1516 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1517 qed_sp_destroy_request(p_hwfn, p_ent); 1518 return rc; 1519 } 1520 SET_FIELD(p_ramrod->flags1, 1521 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); 1522 1523 p_ramrod->itid = cpu_to_le32(params->itid); 1524 p_ramrod->key = params->key; 1525 p_ramrod->pd = cpu_to_le16(params->pd); 1526 p_ramrod->length_hi = (u8)(params->length >> 32); 1527 p_ramrod->length_lo = DMA_LO_LE(params->length); 1528 if (params->zbva) { 1529 /* Lower 32 bits of the registered MR address. 1530 * In case of zero based MR, will hold FBO 1531 */ 1532 p_ramrod->va.hi = 0; 1533 p_ramrod->va.lo = cpu_to_le32(params->fbo); 1534 } else { 1535 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); 1536 } 1537 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); 1538 1539 /* DIF */ 1540 if (params->dif_enabled) { 1541 SET_FIELD(p_ramrod->flags2, 1542 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); 1543 DMA_REGPAIR_LE(p_ramrod->dif_error_addr, 1544 params->dif_error_addr); 1545 } 1546 1547 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1548 if (rc) 1549 return rc; 1550 1551 if (fw_return_code != RDMA_RETURN_OK) { 1552 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 1553 return -EINVAL; 1554 } 1555 1556 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); 1557 return rc; 1558 } 1559 1560 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) 1561 { 1562 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1563 struct rdma_deregister_tid_ramrod_data *p_ramrod; 1564 struct qed_sp_init_data init_data; 1565 struct qed_spq_entry *p_ent; 1566 struct qed_ptt *p_ptt; 1567 u8 fw_return_code; 1568 int rc; 1569 1570 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 1571 1572 /* Get SPQ entry */ 1573 memset(&init_data, 0, sizeof(init_data)); 1574 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1575 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1576 1577 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, 1578 p_hwfn->p_rdma_info->proto, &init_data); 1579 if (rc) { 1580 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1581 return rc; 1582 } 1583 1584 p_ramrod = &p_ent->ramrod.rdma_deregister_tid; 1585 p_ramrod->itid = cpu_to_le32(itid); 1586 1587 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1588 if (rc) { 1589 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1590 return rc; 1591 } 1592 1593 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { 1594 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 1595 return -EINVAL; 1596 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { 1597 /* Bit indicating that the TID is in use and a nig drain is 1598 * required before sending the ramrod again 1599 */ 1600 p_ptt = qed_ptt_acquire(p_hwfn); 1601 if (!p_ptt) { 1602 rc = -EBUSY; 1603 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1604 "Failed to acquire PTT\n"); 1605 return rc; 1606 } 1607 1608 rc = qed_mcp_drain(p_hwfn, p_ptt); 1609 if (rc) { 1610 qed_ptt_release(p_hwfn, p_ptt); 1611 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1612 "Drain failed\n"); 1613 return rc; 1614 } 1615 1616 qed_ptt_release(p_hwfn, p_ptt); 1617 1618 /* Resend the ramrod */ 1619 rc = qed_sp_init_request(p_hwfn, &p_ent, 1620 RDMA_RAMROD_DEREGISTER_MR, 1621 p_hwfn->p_rdma_info->proto, 1622 &init_data); 1623 if (rc) { 1624 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1625 "Failed to init sp-element\n"); 1626 return rc; 1627 } 1628 1629 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1630 if (rc) { 1631 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1632 "Ramrod failed\n"); 1633 return rc; 1634 } 1635 1636 if (fw_return_code != RDMA_RETURN_OK) { 1637 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", 1638 fw_return_code); 1639 return rc; 1640 } 1641 } 1642 1643 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); 1644 return rc; 1645 } 1646 1647 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 1648 { 1649 return QED_LEADING_HWFN(cdev); 1650 } 1651 1652 static int qed_rdma_modify_srq(void *rdma_cxt, 1653 struct qed_rdma_modify_srq_in_params *in_params) 1654 { 1655 struct rdma_srq_modify_ramrod_data *p_ramrod; 1656 struct qed_sp_init_data init_data = {}; 1657 struct qed_hwfn *p_hwfn = rdma_cxt; 1658 struct qed_spq_entry *p_ent; 1659 u16 opaque_fid; 1660 int rc; 1661 1662 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1663 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1664 1665 rc = qed_sp_init_request(p_hwfn, &p_ent, 1666 RDMA_RAMROD_MODIFY_SRQ, 1667 p_hwfn->p_rdma_info->proto, &init_data); 1668 if (rc) 1669 return rc; 1670 1671 p_ramrod = &p_ent->ramrod.rdma_modify_srq; 1672 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); 1673 opaque_fid = p_hwfn->hw_info.opaque_fid; 1674 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1675 p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); 1676 1677 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1678 if (rc) 1679 return rc; 1680 1681 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", 1682 in_params->srq_id); 1683 1684 return rc; 1685 } 1686 1687 static int 1688 qed_rdma_destroy_srq(void *rdma_cxt, 1689 struct qed_rdma_destroy_srq_in_params *in_params) 1690 { 1691 struct rdma_srq_destroy_ramrod_data *p_ramrod; 1692 struct qed_sp_init_data init_data = {}; 1693 struct qed_hwfn *p_hwfn = rdma_cxt; 1694 struct qed_spq_entry *p_ent; 1695 struct qed_bmap *bmap; 1696 u16 opaque_fid; 1697 int rc; 1698 1699 opaque_fid = p_hwfn->hw_info.opaque_fid; 1700 1701 init_data.opaque_fid = opaque_fid; 1702 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1703 1704 rc = qed_sp_init_request(p_hwfn, &p_ent, 1705 RDMA_RAMROD_DESTROY_SRQ, 1706 p_hwfn->p_rdma_info->proto, &init_data); 1707 if (rc) 1708 return rc; 1709 1710 p_ramrod = &p_ent->ramrod.rdma_destroy_srq; 1711 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); 1712 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1713 1714 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1715 if (rc) 1716 return rc; 1717 1718 bmap = &p_hwfn->p_rdma_info->srq_map; 1719 1720 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1721 qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); 1722 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1723 1724 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", 1725 in_params->srq_id); 1726 1727 return rc; 1728 } 1729 1730 static int 1731 qed_rdma_create_srq(void *rdma_cxt, 1732 struct qed_rdma_create_srq_in_params *in_params, 1733 struct qed_rdma_create_srq_out_params *out_params) 1734 { 1735 struct rdma_srq_create_ramrod_data *p_ramrod; 1736 struct qed_sp_init_data init_data = {}; 1737 struct qed_hwfn *p_hwfn = rdma_cxt; 1738 enum qed_cxt_elem_type elem_type; 1739 struct qed_spq_entry *p_ent; 1740 u16 opaque_fid, srq_id; 1741 struct qed_bmap *bmap; 1742 u32 returned_id; 1743 int rc; 1744 1745 bmap = &p_hwfn->p_rdma_info->srq_map; 1746 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1747 rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); 1748 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1749 1750 if (rc) { 1751 DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); 1752 return rc; 1753 } 1754 1755 elem_type = QED_ELEM_SRQ; 1756 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); 1757 if (rc) 1758 goto err; 1759 /* returned id is no greater than u16 */ 1760 srq_id = (u16)returned_id; 1761 opaque_fid = p_hwfn->hw_info.opaque_fid; 1762 1763 opaque_fid = p_hwfn->hw_info.opaque_fid; 1764 init_data.opaque_fid = opaque_fid; 1765 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1766 1767 rc = qed_sp_init_request(p_hwfn, &p_ent, 1768 RDMA_RAMROD_CREATE_SRQ, 1769 p_hwfn->p_rdma_info->proto, &init_data); 1770 if (rc) 1771 goto err; 1772 1773 p_ramrod = &p_ent->ramrod.rdma_create_srq; 1774 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); 1775 p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); 1776 p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); 1777 p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); 1778 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); 1779 p_ramrod->page_size = cpu_to_le16(in_params->page_size); 1780 DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); 1781 1782 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1783 if (rc) 1784 goto err; 1785 1786 out_params->srq_id = srq_id; 1787 1788 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1789 "SRQ created Id = %x\n", out_params->srq_id); 1790 1791 return rc; 1792 1793 err: 1794 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1795 qed_bmap_release_id(p_hwfn, bmap, returned_id); 1796 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1797 1798 return rc; 1799 } 1800 1801 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) 1802 { 1803 bool result; 1804 1805 /* if rdma info has not been allocated, naturally there are no qps */ 1806 if (!p_hwfn->p_rdma_info) 1807 return false; 1808 1809 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1810 if (!p_hwfn->p_rdma_info->cid_map.bitmap) 1811 result = false; 1812 else 1813 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); 1814 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1815 return result; 1816 } 1817 1818 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1819 { 1820 u32 val; 1821 1822 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; 1823 1824 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); 1825 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), 1826 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", 1827 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); 1828 } 1829 1830 1831 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1832 { 1833 p_hwfn->db_bar_no_edpm = true; 1834 1835 qed_rdma_dpm_conf(p_hwfn, p_ptt); 1836 } 1837 1838 static int qed_rdma_start(void *rdma_cxt, 1839 struct qed_rdma_start_in_params *params) 1840 { 1841 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1842 struct qed_ptt *p_ptt; 1843 int rc = -EBUSY; 1844 1845 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1846 "desired_cnq = %08x\n", params->desired_cnq); 1847 1848 p_ptt = qed_ptt_acquire(p_hwfn); 1849 if (!p_ptt) 1850 goto err; 1851 1852 rc = qed_rdma_alloc(p_hwfn, p_ptt, params); 1853 if (rc) 1854 goto err1; 1855 1856 rc = qed_rdma_setup(p_hwfn, p_ptt, params); 1857 if (rc) 1858 goto err2; 1859 1860 qed_ptt_release(p_hwfn, p_ptt); 1861 1862 return rc; 1863 1864 err2: 1865 qed_rdma_free(p_hwfn); 1866 err1: 1867 qed_ptt_release(p_hwfn, p_ptt); 1868 err: 1869 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); 1870 return rc; 1871 } 1872 1873 static int qed_rdma_init(struct qed_dev *cdev, 1874 struct qed_rdma_start_in_params *params) 1875 { 1876 return qed_rdma_start(QED_LEADING_HWFN(cdev), params); 1877 } 1878 1879 static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) 1880 { 1881 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1882 1883 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); 1884 1885 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1886 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); 1887 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1888 } 1889 1890 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, 1891 u8 *old_mac_address, 1892 u8 *new_mac_address) 1893 { 1894 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1895 struct qed_ptt *p_ptt; 1896 int rc = 0; 1897 1898 p_ptt = qed_ptt_acquire(p_hwfn); 1899 if (!p_ptt) { 1900 DP_ERR(cdev, 1901 "qed roce ll2 mac filter set: failed to acquire PTT\n"); 1902 return -EINVAL; 1903 } 1904 1905 if (old_mac_address) 1906 qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address); 1907 if (new_mac_address) 1908 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address); 1909 1910 qed_ptt_release(p_hwfn, p_ptt); 1911 1912 if (rc) 1913 DP_ERR(cdev, 1914 "qed roce ll2 mac filter set: failed to add MAC filter\n"); 1915 1916 return rc; 1917 } 1918 1919 static const struct qed_rdma_ops qed_rdma_ops_pass = { 1920 .common = &qed_common_ops_pass, 1921 .fill_dev_info = &qed_fill_rdma_dev_info, 1922 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, 1923 .rdma_init = &qed_rdma_init, 1924 .rdma_add_user = &qed_rdma_add_user, 1925 .rdma_remove_user = &qed_rdma_remove_user, 1926 .rdma_stop = &qed_rdma_stop, 1927 .rdma_query_port = &qed_rdma_query_port, 1928 .rdma_query_device = &qed_rdma_query_device, 1929 .rdma_get_start_sb = &qed_rdma_get_sb_start, 1930 .rdma_get_rdma_int = &qed_rdma_get_int, 1931 .rdma_set_rdma_int = &qed_rdma_set_int, 1932 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, 1933 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, 1934 .rdma_alloc_pd = &qed_rdma_alloc_pd, 1935 .rdma_dealloc_pd = &qed_rdma_free_pd, 1936 .rdma_create_cq = &qed_rdma_create_cq, 1937 .rdma_destroy_cq = &qed_rdma_destroy_cq, 1938 .rdma_create_qp = &qed_rdma_create_qp, 1939 .rdma_modify_qp = &qed_rdma_modify_qp, 1940 .rdma_query_qp = &qed_rdma_query_qp, 1941 .rdma_destroy_qp = &qed_rdma_destroy_qp, 1942 .rdma_alloc_tid = &qed_rdma_alloc_tid, 1943 .rdma_free_tid = &qed_rdma_free_tid, 1944 .rdma_register_tid = &qed_rdma_register_tid, 1945 .rdma_deregister_tid = &qed_rdma_deregister_tid, 1946 .rdma_create_srq = &qed_rdma_create_srq, 1947 .rdma_modify_srq = &qed_rdma_modify_srq, 1948 .rdma_destroy_srq = &qed_rdma_destroy_srq, 1949 .ll2_acquire_connection = &qed_ll2_acquire_connection, 1950 .ll2_establish_connection = &qed_ll2_establish_connection, 1951 .ll2_terminate_connection = &qed_ll2_terminate_connection, 1952 .ll2_release_connection = &qed_ll2_release_connection, 1953 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, 1954 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, 1955 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, 1956 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, 1957 .ll2_get_stats = &qed_ll2_get_stats, 1958 .iwarp_connect = &qed_iwarp_connect, 1959 .iwarp_create_listen = &qed_iwarp_create_listen, 1960 .iwarp_destroy_listen = &qed_iwarp_destroy_listen, 1961 .iwarp_accept = &qed_iwarp_accept, 1962 .iwarp_reject = &qed_iwarp_reject, 1963 .iwarp_send_rtr = &qed_iwarp_send_rtr, 1964 }; 1965 1966 const struct qed_rdma_ops *qed_get_rdma_ops(void) 1967 { 1968 return &qed_rdma_ops_pass; 1969 } 1970 EXPORT_SYMBOL(qed_get_rdma_ops); 1971