1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2016 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/types.h> 33 #include <asm/byteorder.h> 34 #include <linux/bitops.h> 35 #include <linux/delay.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/errno.h> 38 #include <linux/etherdevice.h> 39 #include <linux/if_ether.h> 40 #include <linux/if_vlan.h> 41 #include <linux/io.h> 42 #include <linux/ip.h> 43 #include <linux/ipv6.h> 44 #include <linux/kernel.h> 45 #include <linux/list.h> 46 #include <linux/module.h> 47 #include <linux/mutex.h> 48 #include <linux/pci.h> 49 #include <linux/slab.h> 50 #include <linux/spinlock.h> 51 #include <linux/string.h> 52 #include <linux/tcp.h> 53 #include <linux/bitops.h> 54 #include <linux/qed/qed_roce_if.h> 55 #include <linux/qed/qed_roce_if.h> 56 #include "qed.h" 57 #include "qed_cxt.h" 58 #include "qed_hsi.h" 59 #include "qed_hw.h" 60 #include "qed_init_ops.h" 61 #include "qed_int.h" 62 #include "qed_ll2.h" 63 #include "qed_mcp.h" 64 #include "qed_reg_addr.h" 65 #include "qed_sp.h" 66 #include "qed_roce.h" 67 #include "qed_ll2.h" 68 69 void qed_async_roce_event(struct qed_hwfn *p_hwfn, 70 struct event_ring_entry *p_eqe) 71 { 72 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 73 74 p_rdma_info->events.affiliated_event(p_rdma_info->events.context, 75 p_eqe->opcode, &p_eqe->data); 76 } 77 78 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, 79 struct qed_bmap *bmap, u32 max_count) 80 { 81 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); 82 83 bmap->max_count = max_count; 84 85 bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long), 86 GFP_KERNEL); 87 if (!bmap->bitmap) { 88 DP_NOTICE(p_hwfn, 89 "qed bmap alloc failed: cannot allocate memory (bitmap)\n"); 90 return -ENOMEM; 91 } 92 93 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n", 94 bmap->bitmap); 95 return 0; 96 } 97 98 static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, 99 struct qed_bmap *bmap, u32 *id_num) 100 { 101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap); 102 103 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); 104 105 if (*id_num >= bmap->max_count) { 106 DP_NOTICE(p_hwfn, "no id available max_count=%d\n", 107 bmap->max_count); 108 return -EINVAL; 109 } 110 111 __set_bit(*id_num, bmap->bitmap); 112 113 return 0; 114 } 115 116 static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, 117 struct qed_bmap *bmap, u32 id_num) 118 { 119 bool b_acquired; 120 121 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num); 122 if (id_num >= bmap->max_count) 123 return; 124 125 b_acquired = test_and_clear_bit(id_num, bmap->bitmap); 126 if (!b_acquired) { 127 DP_NOTICE(p_hwfn, "ID %d already released\n", id_num); 128 return; 129 } 130 } 131 132 u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 133 { 134 /* First sb id for RoCE is after all the l2 sb */ 135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 136 } 137 138 u32 qed_rdma_query_cau_timer_res(void *rdma_cxt) 139 { 140 return QED_CAU_DEF_RX_TIMER_RES; 141 } 142 143 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, 144 struct qed_ptt *p_ptt, 145 struct qed_rdma_start_in_params *params) 146 { 147 struct qed_rdma_info *p_rdma_info; 148 u32 num_cons, num_tasks; 149 int rc = -ENOMEM; 150 151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); 152 153 /* Allocate a struct with current pf rdma info */ 154 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); 155 if (!p_rdma_info) { 156 DP_NOTICE(p_hwfn, 157 "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n", 158 rc); 159 return rc; 160 } 161 162 p_hwfn->p_rdma_info = p_rdma_info; 163 p_rdma_info->proto = PROTOCOLID_ROCE; 164 165 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0); 166 167 p_rdma_info->num_qps = num_cons / 2; 168 169 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); 170 171 /* Each MR uses a single task */ 172 p_rdma_info->num_mrs = num_tasks; 173 174 /* Queue zone lines are shared between RoCE and L2 in such a way that 175 * they can be used by each without obstructing the other. 176 */ 177 p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE); 178 179 /* Allocate a struct with device params and fill it */ 180 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 181 if (!p_rdma_info->dev) { 182 DP_NOTICE(p_hwfn, 183 "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n", 184 rc); 185 goto free_rdma_info; 186 } 187 188 /* Allocate a struct with port params and fill it */ 189 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); 190 if (!p_rdma_info->port) { 191 DP_NOTICE(p_hwfn, 192 "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n", 193 rc); 194 goto free_rdma_dev; 195 } 196 197 /* Allocate bit map for pd's */ 198 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS); 199 if (rc) { 200 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 201 "Failed to allocate pd_map, rc = %d\n", 202 rc); 203 goto free_rdma_port; 204 } 205 206 /* Allocate DPI bitmap */ 207 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, 208 p_hwfn->dpi_count); 209 if (rc) { 210 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 211 "Failed to allocate DPI bitmap, rc = %d\n", rc); 212 goto free_pd_map; 213 } 214 215 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to 216 * twice the number of QPs. 217 */ 218 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, 219 p_rdma_info->num_qps * 2); 220 if (rc) { 221 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 222 "Failed to allocate cq bitmap, rc = %d\n", rc); 223 goto free_dpi_map; 224 } 225 226 /* Allocate bitmap for toggle bit for cq icids 227 * We toggle the bit every time we create or resize cq for a given icid. 228 * The maximum number of CQs is bounded to twice the number of QPs. 229 */ 230 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, 231 p_rdma_info->num_qps * 2); 232 if (rc) { 233 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 234 "Failed to allocate toogle bits, rc = %d\n", rc); 235 goto free_cq_map; 236 } 237 238 /* Allocate bitmap for itids */ 239 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, 240 p_rdma_info->num_mrs); 241 if (rc) { 242 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 243 "Failed to allocate itids bitmaps, rc = %d\n", rc); 244 goto free_toggle_map; 245 } 246 247 /* Allocate bitmap for cids used for qps. */ 248 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons); 249 if (rc) { 250 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 251 "Failed to allocate cid bitmap, rc = %d\n", rc); 252 goto free_tid_map; 253 } 254 255 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); 256 return 0; 257 258 free_tid_map: 259 kfree(p_rdma_info->tid_map.bitmap); 260 free_toggle_map: 261 kfree(p_rdma_info->toggle_bits.bitmap); 262 free_cq_map: 263 kfree(p_rdma_info->cq_map.bitmap); 264 free_dpi_map: 265 kfree(p_rdma_info->dpi_map.bitmap); 266 free_pd_map: 267 kfree(p_rdma_info->pd_map.bitmap); 268 free_rdma_port: 269 kfree(p_rdma_info->port); 270 free_rdma_dev: 271 kfree(p_rdma_info->dev); 272 free_rdma_info: 273 kfree(p_rdma_info); 274 275 return rc; 276 } 277 278 void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 279 { 280 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 281 282 kfree(p_rdma_info->cid_map.bitmap); 283 kfree(p_rdma_info->tid_map.bitmap); 284 kfree(p_rdma_info->toggle_bits.bitmap); 285 kfree(p_rdma_info->cq_map.bitmap); 286 kfree(p_rdma_info->dpi_map.bitmap); 287 kfree(p_rdma_info->pd_map.bitmap); 288 289 kfree(p_rdma_info->port); 290 kfree(p_rdma_info->dev); 291 292 kfree(p_rdma_info); 293 } 294 295 static void qed_rdma_free(struct qed_hwfn *p_hwfn) 296 { 297 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 298 299 qed_rdma_resc_free(p_hwfn); 300 } 301 302 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) 303 { 304 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; 305 guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; 306 guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; 307 guid[3] = 0xff; 308 guid[4] = 0xfe; 309 guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; 310 guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; 311 guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; 312 } 313 314 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, 315 struct qed_rdma_start_in_params *params) 316 { 317 struct qed_rdma_events *events; 318 319 events = &p_hwfn->p_rdma_info->events; 320 321 events->unaffiliated_event = params->events->unaffiliated_event; 322 events->affiliated_event = params->events->affiliated_event; 323 events->context = params->events->context; 324 } 325 326 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, 327 struct qed_rdma_start_in_params *params) 328 { 329 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 330 struct qed_dev *cdev = p_hwfn->cdev; 331 u32 pci_status_control; 332 u32 num_qps; 333 334 /* Vendor specific information */ 335 dev->vendor_id = cdev->vendor_id; 336 dev->vendor_part_id = cdev->device_id; 337 dev->hw_ver = 0; 338 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | 339 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); 340 341 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); 342 dev->node_guid = dev->sys_image_guid; 343 344 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, 345 RDMA_MAX_SGE_PER_RQ_WQE); 346 347 if (cdev->rdma_max_sge) 348 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); 349 350 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; 351 352 dev->max_inline = (cdev->rdma_max_inline) ? 353 min_t(u32, cdev->rdma_max_inline, dev->max_inline) : 354 dev->max_inline; 355 356 dev->max_wqe = QED_RDMA_MAX_WQE; 357 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); 358 359 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because 360 * it is up-aligned to 16 and then to ILT page size within qed cxt. 361 * This is OK in terms of ILT but we don't want to configure the FW 362 * above its abilities 363 */ 364 num_qps = ROCE_MAX_QPS; 365 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); 366 dev->max_qp = num_qps; 367 368 /* CQs uses the same icids that QPs use hence they are limited by the 369 * number of icids. There are two icids per QP. 370 */ 371 dev->max_cq = num_qps * 2; 372 373 /* The number of mrs is smaller by 1 since the first is reserved */ 374 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; 375 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; 376 377 /* The maximum CQE capacity per CQ supported. 378 * max number of cqes will be in two layer pbl, 379 * 8 is the pointer size in bytes 380 * 32 is the size of cq element in bytes 381 */ 382 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) 383 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; 384 else 385 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; 386 387 dev->max_mw = 0; 388 dev->max_fmr = QED_RDMA_MAX_FMR; 389 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); 390 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; 391 dev->max_pkey = QED_RDMA_MAX_P_KEY; 392 393 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 394 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); 395 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 396 RDMA_REQ_RD_ATOMIC_ELM_SIZE; 397 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * 398 p_hwfn->p_rdma_info->num_qps; 399 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; 400 dev->dev_ack_delay = QED_RDMA_ACK_DELAY; 401 dev->max_pd = RDMA_MAX_PDS; 402 dev->max_ah = p_hwfn->p_rdma_info->num_qps; 403 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); 404 405 /* Set capablities */ 406 dev->dev_caps = 0; 407 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); 408 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); 409 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); 410 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); 411 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); 412 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); 413 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); 414 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); 415 416 /* Check atomic operations support in PCI configuration space. */ 417 pci_read_config_dword(cdev->pdev, 418 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, 419 &pci_status_control); 420 421 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) 422 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); 423 } 424 425 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) 426 { 427 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; 428 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 429 430 port->port_state = p_hwfn->mcp_info->link_output.link_up ? 431 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 432 433 port->max_msg_size = min_t(u64, 434 (dev->max_mr_mw_fmr_size * 435 p_hwfn->cdev->rdma_max_sge), 436 BIT(31)); 437 438 port->pkey_bad_counter = 0; 439 } 440 441 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 442 { 443 u32 ll2_ethertype_en; 444 445 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); 446 p_hwfn->b_rdma_enabled_in_prs = false; 447 448 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 449 450 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; 451 452 /* We delay writing to this reg until first cid is allocated. See 453 * qed_cxt_dynamic_ilt_alloc function for more details 454 */ 455 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 456 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 457 (ll2_ethertype_en | 0x01)); 458 459 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { 460 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); 461 return -EINVAL; 462 } 463 464 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); 465 return 0; 466 } 467 468 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, 469 struct qed_rdma_start_in_params *params, 470 struct qed_ptt *p_ptt) 471 { 472 struct rdma_init_func_ramrod_data *p_ramrod; 473 struct qed_rdma_cnq_params *p_cnq_pbl_list; 474 struct rdma_init_func_hdr *p_params_header; 475 struct rdma_cnq_params *p_cnq_params; 476 struct qed_sp_init_data init_data; 477 struct qed_spq_entry *p_ent; 478 u32 cnq_id, sb_id; 479 int rc; 480 481 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); 482 483 /* Save the number of cnqs for the function close ramrod */ 484 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; 485 486 /* Get SPQ entry */ 487 memset(&init_data, 0, sizeof(init_data)); 488 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 489 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 490 491 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, 492 p_hwfn->p_rdma_info->proto, &init_data); 493 if (rc) 494 return rc; 495 496 p_ramrod = &p_ent->ramrod.roce_init_func.rdma; 497 498 p_params_header = &p_ramrod->params_header; 499 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, 500 QED_RDMA_CNQ_RAM); 501 p_params_header->num_cnqs = params->desired_cnq; 502 503 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) 504 p_params_header->cq_ring_mode = 1; 505 else 506 p_params_header->cq_ring_mode = 0; 507 508 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { 509 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); 510 p_cnq_params = &p_ramrod->cnq_params[cnq_id]; 511 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; 512 p_cnq_params->sb_num = 513 cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id); 514 515 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; 516 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; 517 518 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, 519 p_cnq_pbl_list->pbl_ptr); 520 521 /* we assume here that cnq_id and qz_offset are the same */ 522 p_cnq_params->queue_zone_num = 523 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + 524 cnq_id); 525 } 526 527 return qed_spq_post(p_hwfn, p_ent, NULL); 528 } 529 530 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) 531 { 532 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 533 534 /* The first DPI is reserved for the Kernel */ 535 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap); 536 537 /* Tid 0 will be used as the key for "reserved MR". 538 * The driver should allocate memory for it so it can be loaded but no 539 * ramrod should be passed on it. 540 */ 541 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); 542 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { 543 DP_NOTICE(p_hwfn, 544 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); 545 return -EINVAL; 546 } 547 548 return 0; 549 } 550 551 static int qed_rdma_setup(struct qed_hwfn *p_hwfn, 552 struct qed_ptt *p_ptt, 553 struct qed_rdma_start_in_params *params) 554 { 555 int rc; 556 557 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); 558 559 spin_lock_init(&p_hwfn->p_rdma_info->lock); 560 561 qed_rdma_init_devinfo(p_hwfn, params); 562 qed_rdma_init_port(p_hwfn); 563 qed_rdma_init_events(p_hwfn, params); 564 565 rc = qed_rdma_reserve_lkey(p_hwfn); 566 if (rc) 567 return rc; 568 569 rc = qed_rdma_init_hw(p_hwfn, p_ptt); 570 if (rc) 571 return rc; 572 573 return qed_rdma_start_fw(p_hwfn, params, p_ptt); 574 } 575 576 int qed_rdma_stop(void *rdma_cxt) 577 { 578 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 579 struct rdma_close_func_ramrod_data *p_ramrod; 580 struct qed_sp_init_data init_data; 581 struct qed_spq_entry *p_ent; 582 struct qed_ptt *p_ptt; 583 u32 ll2_ethertype_en; 584 int rc = -EBUSY; 585 586 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); 587 588 p_ptt = qed_ptt_acquire(p_hwfn); 589 if (!p_ptt) { 590 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); 591 return rc; 592 } 593 594 /* Disable RoCE search */ 595 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); 596 p_hwfn->b_rdma_enabled_in_prs = false; 597 598 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 599 600 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 601 602 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 603 (ll2_ethertype_en & 0xFFFE)); 604 605 qed_ptt_release(p_hwfn, p_ptt); 606 607 /* Get SPQ entry */ 608 memset(&init_data, 0, sizeof(init_data)); 609 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 610 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 611 612 /* Stop RoCE */ 613 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, 614 p_hwfn->p_rdma_info->proto, &init_data); 615 if (rc) 616 goto out; 617 618 p_ramrod = &p_ent->ramrod.rdma_close_func; 619 620 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; 621 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); 622 623 rc = qed_spq_post(p_hwfn, p_ent, NULL); 624 625 out: 626 qed_rdma_free(p_hwfn); 627 628 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); 629 return rc; 630 } 631 632 int qed_rdma_add_user(void *rdma_cxt, 633 struct qed_rdma_add_user_out_params *out_params) 634 { 635 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 636 u32 dpi_start_offset; 637 u32 returned_id = 0; 638 int rc; 639 640 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); 641 642 /* Allocate DPI */ 643 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 644 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 645 &returned_id); 646 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 647 648 out_params->dpi = (u16)returned_id; 649 650 /* Calculate the corresponding DPI address */ 651 dpi_start_offset = p_hwfn->dpi_start_offset; 652 653 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + 654 dpi_start_offset + 655 ((out_params->dpi) * p_hwfn->dpi_size)); 656 657 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + 658 dpi_start_offset + 659 ((out_params->dpi) * p_hwfn->dpi_size); 660 661 out_params->dpi_size = p_hwfn->dpi_size; 662 663 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); 664 return rc; 665 } 666 667 struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) 668 { 669 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 670 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; 671 672 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); 673 674 /* Link may have changed */ 675 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ? 676 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 677 678 p_port->link_speed = p_hwfn->mcp_info->link_output.speed; 679 680 return p_port; 681 } 682 683 struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 684 { 685 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 686 687 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); 688 689 /* Return struct with device parameters */ 690 return p_hwfn->p_rdma_info->dev; 691 } 692 693 void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 694 { 695 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 696 697 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 698 699 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 700 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); 701 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 702 } 703 704 int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) 705 { 706 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 707 int rc; 708 709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); 710 711 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 712 rc = qed_rdma_bmap_alloc_id(p_hwfn, 713 &p_hwfn->p_rdma_info->tid_map, itid); 714 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 715 if (rc) 716 goto out; 717 718 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); 719 out: 720 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); 721 return rc; 722 } 723 724 void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 725 { 726 struct qed_hwfn *p_hwfn; 727 u16 qz_num; 728 u32 addr; 729 730 p_hwfn = (struct qed_hwfn *)rdma_cxt; 731 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; 732 addr = GTT_BAR0_MAP_REG_USDM_RAM + 733 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); 734 735 REG_WR16(p_hwfn, addr, prod); 736 737 /* keep prod updates ordered */ 738 wmb(); 739 } 740 741 static int qed_fill_rdma_dev_info(struct qed_dev *cdev, 742 struct qed_dev_rdma_info *info) 743 { 744 memset(info, 0, sizeof(*info)); 745 746 info->rdma_type = QED_RDMA_TYPE_ROCE; 747 748 qed_fill_dev_info(cdev, &info->common); 749 750 return 0; 751 } 752 753 static int qed_rdma_get_sb_start(struct qed_dev *cdev) 754 { 755 int feat_num; 756 757 if (cdev->num_hwfns > 1) 758 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); 759 else 760 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * 761 cdev->num_hwfns; 762 763 return feat_num; 764 } 765 766 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) 767 { 768 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); 769 int n_msix = cdev->int_params.rdma_msix_cnt; 770 771 return min_t(int, n_cnq, n_msix); 772 } 773 774 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) 775 { 776 int limit = 0; 777 778 /* Mark the fastpath as free/used */ 779 cdev->int_params.fp_initialized = cnt ? true : false; 780 781 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { 782 DP_ERR(cdev, 783 "qed roce supports only MSI-X interrupts (detected %d).\n", 784 cdev->int_params.out.int_mode); 785 return -EINVAL; 786 } else if (cdev->int_params.fp_msix_cnt) { 787 limit = cdev->int_params.rdma_msix_cnt; 788 } 789 790 if (!limit) 791 return -ENOMEM; 792 793 return min_t(int, cnt, limit); 794 } 795 796 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) 797 { 798 memset(info, 0, sizeof(*info)); 799 800 if (!cdev->int_params.fp_initialized) { 801 DP_INFO(cdev, 802 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 803 return -EINVAL; 804 } 805 806 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 807 int msix_base = cdev->int_params.rdma_msix_base; 808 809 info->msix_cnt = cdev->int_params.rdma_msix_cnt; 810 info->msix = &cdev->int_params.msix_table[msix_base]; 811 812 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", 813 info->msix_cnt, msix_base); 814 } 815 816 return 0; 817 } 818 819 int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) 820 { 821 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 822 u32 returned_id; 823 int rc; 824 825 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); 826 827 /* Allocates an unused protection domain */ 828 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 829 rc = qed_rdma_bmap_alloc_id(p_hwfn, 830 &p_hwfn->p_rdma_info->pd_map, &returned_id); 831 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 832 833 *pd = (u16)returned_id; 834 835 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); 836 return rc; 837 } 838 839 void qed_rdma_free_pd(void *rdma_cxt, u16 pd) 840 { 841 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 842 843 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); 844 845 /* Returns a previously allocated protection domain for reuse */ 846 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 847 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); 848 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 849 } 850 851 static enum qed_rdma_toggle_bit 852 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) 853 { 854 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 855 enum qed_rdma_toggle_bit toggle_bit; 856 u32 bmap_id; 857 858 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); 859 860 /* the function toggle the bit that is related to a given icid 861 * and returns the new toggle bit's value 862 */ 863 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); 864 865 spin_lock_bh(&p_info->lock); 866 toggle_bit = !test_and_change_bit(bmap_id, 867 p_info->toggle_bits.bitmap); 868 spin_unlock_bh(&p_info->lock); 869 870 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", 871 toggle_bit); 872 873 return toggle_bit; 874 } 875 876 int qed_rdma_create_cq(void *rdma_cxt, 877 struct qed_rdma_create_cq_in_params *params, u16 *icid) 878 { 879 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 880 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 881 struct rdma_create_cq_ramrod_data *p_ramrod; 882 enum qed_rdma_toggle_bit toggle_bit; 883 struct qed_sp_init_data init_data; 884 struct qed_spq_entry *p_ent; 885 u32 returned_id, start_cid; 886 int rc; 887 888 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", 889 params->cq_handle_hi, params->cq_handle_lo); 890 891 /* Allocate icid */ 892 spin_lock_bh(&p_info->lock); 893 rc = qed_rdma_bmap_alloc_id(p_hwfn, 894 &p_info->cq_map, &returned_id); 895 spin_unlock_bh(&p_info->lock); 896 897 if (rc) { 898 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); 899 return rc; 900 } 901 902 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, 903 p_info->proto); 904 *icid = returned_id + start_cid; 905 906 /* Check if icid requires a page allocation */ 907 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); 908 if (rc) 909 goto err; 910 911 /* Get SPQ entry */ 912 memset(&init_data, 0, sizeof(init_data)); 913 init_data.cid = *icid; 914 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 915 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 916 917 /* Send create CQ ramrod */ 918 rc = qed_sp_init_request(p_hwfn, &p_ent, 919 RDMA_RAMROD_CREATE_CQ, 920 p_info->proto, &init_data); 921 if (rc) 922 goto err; 923 924 p_ramrod = &p_ent->ramrod.rdma_create_cq; 925 926 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); 927 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); 928 p_ramrod->dpi = cpu_to_le16(params->dpi); 929 p_ramrod->is_two_level_pbl = params->pbl_two_level; 930 p_ramrod->max_cqes = cpu_to_le32(params->cq_size); 931 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); 932 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); 933 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + 934 params->cnq_id; 935 p_ramrod->int_timeout = params->int_timeout; 936 937 /* toggle the bit for every resize or create cq for a given icid */ 938 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 939 940 p_ramrod->toggle_bit = toggle_bit; 941 942 rc = qed_spq_post(p_hwfn, p_ent, NULL); 943 if (rc) { 944 /* restore toggle bit */ 945 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 946 goto err; 947 } 948 949 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); 950 return rc; 951 952 err: 953 /* release allocated icid */ 954 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); 955 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); 956 957 return rc; 958 } 959 960 int qed_rdma_resize_cq(void *rdma_cxt, 961 struct qed_rdma_resize_cq_in_params *in_params, 962 struct qed_rdma_resize_cq_out_params *out_params) 963 { 964 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 965 struct rdma_resize_cq_output_params *p_ramrod_res; 966 struct rdma_resize_cq_ramrod_data *p_ramrod; 967 enum qed_rdma_toggle_bit toggle_bit; 968 struct qed_sp_init_data init_data; 969 struct qed_spq_entry *p_ent; 970 dma_addr_t ramrod_res_phys; 971 u8 fw_return_code; 972 int rc = -ENOMEM; 973 974 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); 975 976 p_ramrod_res = 977 (struct rdma_resize_cq_output_params *) 978 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 979 sizeof(struct rdma_resize_cq_output_params), 980 &ramrod_res_phys, GFP_KERNEL); 981 if (!p_ramrod_res) { 982 DP_NOTICE(p_hwfn, 983 "qed resize cq failed: cannot allocate memory (ramrod)\n"); 984 return rc; 985 } 986 987 /* Get SPQ entry */ 988 memset(&init_data, 0, sizeof(init_data)); 989 init_data.cid = in_params->icid; 990 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 991 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 992 993 rc = qed_sp_init_request(p_hwfn, &p_ent, 994 RDMA_RAMROD_RESIZE_CQ, 995 p_hwfn->p_rdma_info->proto, &init_data); 996 if (rc) 997 goto err; 998 999 p_ramrod = &p_ent->ramrod.rdma_resize_cq; 1000 1001 p_ramrod->flags = 0; 1002 1003 /* toggle the bit for every resize or create cq for a given icid */ 1004 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, 1005 in_params->icid); 1006 1007 SET_FIELD(p_ramrod->flags, 1008 RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit); 1009 1010 SET_FIELD(p_ramrod->flags, 1011 RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL, 1012 in_params->pbl_two_level); 1013 1014 p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12; 1015 p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages); 1016 p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size); 1017 DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr); 1018 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1019 1020 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 1021 if (rc) 1022 goto err; 1023 1024 if (fw_return_code != RDMA_RETURN_OK) { 1025 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 1026 rc = -EINVAL; 1027 goto err; 1028 } 1029 1030 out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod); 1031 out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons); 1032 1033 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1034 sizeof(struct rdma_resize_cq_output_params), 1035 p_ramrod_res, ramrod_res_phys); 1036 1037 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc); 1038 1039 return rc; 1040 1041 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1042 sizeof(struct rdma_resize_cq_output_params), 1043 p_ramrod_res, ramrod_res_phys); 1044 DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc); 1045 1046 return rc; 1047 } 1048 1049 int qed_rdma_destroy_cq(void *rdma_cxt, 1050 struct qed_rdma_destroy_cq_in_params *in_params, 1051 struct qed_rdma_destroy_cq_out_params *out_params) 1052 { 1053 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1054 struct rdma_destroy_cq_output_params *p_ramrod_res; 1055 struct rdma_destroy_cq_ramrod_data *p_ramrod; 1056 struct qed_sp_init_data init_data; 1057 struct qed_spq_entry *p_ent; 1058 dma_addr_t ramrod_res_phys; 1059 int rc = -ENOMEM; 1060 1061 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); 1062 1063 p_ramrod_res = 1064 (struct rdma_destroy_cq_output_params *) 1065 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1066 sizeof(struct rdma_destroy_cq_output_params), 1067 &ramrod_res_phys, GFP_KERNEL); 1068 if (!p_ramrod_res) { 1069 DP_NOTICE(p_hwfn, 1070 "qed destroy cq failed: cannot allocate memory (ramrod)\n"); 1071 return rc; 1072 } 1073 1074 /* Get SPQ entry */ 1075 memset(&init_data, 0, sizeof(init_data)); 1076 init_data.cid = in_params->icid; 1077 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1078 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1079 1080 /* Send destroy CQ ramrod */ 1081 rc = qed_sp_init_request(p_hwfn, &p_ent, 1082 RDMA_RAMROD_DESTROY_CQ, 1083 p_hwfn->p_rdma_info->proto, &init_data); 1084 if (rc) 1085 goto err; 1086 1087 p_ramrod = &p_ent->ramrod.rdma_destroy_cq; 1088 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1089 1090 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1091 if (rc) 1092 goto err; 1093 1094 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); 1095 1096 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1097 sizeof(struct rdma_destroy_cq_output_params), 1098 p_ramrod_res, ramrod_res_phys); 1099 1100 /* Free icid */ 1101 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1102 1103 qed_bmap_release_id(p_hwfn, 1104 &p_hwfn->p_rdma_info->cq_map, 1105 (in_params->icid - 1106 qed_cxt_get_proto_cid_start(p_hwfn, 1107 p_hwfn-> 1108 p_rdma_info->proto))); 1109 1110 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1111 1112 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); 1113 return rc; 1114 1115 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1116 sizeof(struct rdma_destroy_cq_output_params), 1117 p_ramrod_res, ramrod_res_phys); 1118 1119 return rc; 1120 } 1121 1122 static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) 1123 { 1124 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); 1125 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); 1126 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); 1127 } 1128 1129 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, 1130 __le32 *dst_gid) 1131 { 1132 u32 i; 1133 1134 if (qp->roce_mode == ROCE_V2_IPV4) { 1135 /* The IPv4 addresses shall be aligned to the highest word. 1136 * The lower words must be zero. 1137 */ 1138 memset(src_gid, 0, sizeof(union qed_gid)); 1139 memset(dst_gid, 0, sizeof(union qed_gid)); 1140 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); 1141 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); 1142 } else { 1143 /* GIDs and IPv6 addresses coincide in location and size */ 1144 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { 1145 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); 1146 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); 1147 } 1148 } 1149 } 1150 1151 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) 1152 { 1153 enum roce_flavor flavor; 1154 1155 switch (roce_mode) { 1156 case ROCE_V1: 1157 flavor = PLAIN_ROCE; 1158 break; 1159 case ROCE_V2_IPV4: 1160 flavor = RROCE_IPV4; 1161 break; 1162 case ROCE_V2_IPV6: 1163 flavor = ROCE_V2_IPV6; 1164 break; 1165 default: 1166 flavor = MAX_ROCE_MODE; 1167 break; 1168 } 1169 return flavor; 1170 } 1171 1172 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) 1173 { 1174 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1175 u32 responder_icid; 1176 u32 requester_icid; 1177 int rc; 1178 1179 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1180 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 1181 &responder_icid); 1182 if (rc) { 1183 spin_unlock_bh(&p_rdma_info->lock); 1184 return rc; 1185 } 1186 1187 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 1188 &requester_icid); 1189 1190 spin_unlock_bh(&p_rdma_info->lock); 1191 if (rc) 1192 goto err; 1193 1194 /* the two icid's should be adjacent */ 1195 if ((requester_icid - responder_icid) != 1) { 1196 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); 1197 rc = -EINVAL; 1198 goto err; 1199 } 1200 1201 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn, 1202 p_rdma_info->proto); 1203 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn, 1204 p_rdma_info->proto); 1205 1206 /* If these icids require a new ILT line allocate DMA-able context for 1207 * an ILT page 1208 */ 1209 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid); 1210 if (rc) 1211 goto err; 1212 1213 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid); 1214 if (rc) 1215 goto err; 1216 1217 *cid = (u16)responder_icid; 1218 return rc; 1219 1220 err: 1221 spin_lock_bh(&p_rdma_info->lock); 1222 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid); 1223 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid); 1224 1225 spin_unlock_bh(&p_rdma_info->lock); 1226 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1227 "Allocate CID - failed, rc = %d\n", rc); 1228 return rc; 1229 } 1230 1231 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, 1232 struct qed_rdma_qp *qp) 1233 { 1234 struct roce_create_qp_resp_ramrod_data *p_ramrod; 1235 struct qed_sp_init_data init_data; 1236 union qed_qm_pq_params qm_params; 1237 enum roce_flavor roce_flavor; 1238 struct qed_spq_entry *p_ent; 1239 u16 physical_queue0 = 0; 1240 int rc; 1241 1242 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1243 1244 /* Allocate DMA-able memory for IRQ */ 1245 qp->irq_num_pages = 1; 1246 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1247 RDMA_RING_PAGE_SIZE, 1248 &qp->irq_phys_addr, GFP_KERNEL); 1249 if (!qp->irq) { 1250 rc = -ENOMEM; 1251 DP_NOTICE(p_hwfn, 1252 "qed create responder failed: cannot allocate memory (irq). rc = %d\n", 1253 rc); 1254 return rc; 1255 } 1256 1257 /* Get SPQ entry */ 1258 memset(&init_data, 0, sizeof(init_data)); 1259 init_data.cid = qp->icid; 1260 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1261 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1262 1263 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, 1264 PROTOCOLID_ROCE, &init_data); 1265 if (rc) 1266 goto err; 1267 1268 p_ramrod = &p_ent->ramrod.roce_create_qp_resp; 1269 1270 p_ramrod->flags = 0; 1271 1272 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 1273 SET_FIELD(p_ramrod->flags, 1274 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 1275 1276 SET_FIELD(p_ramrod->flags, 1277 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 1278 qp->incoming_rdma_read_en); 1279 1280 SET_FIELD(p_ramrod->flags, 1281 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 1282 qp->incoming_rdma_write_en); 1283 1284 SET_FIELD(p_ramrod->flags, 1285 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, 1286 qp->incoming_atomic_en); 1287 1288 SET_FIELD(p_ramrod->flags, 1289 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 1290 qp->e2e_flow_control_en); 1291 1292 SET_FIELD(p_ramrod->flags, 1293 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); 1294 1295 SET_FIELD(p_ramrod->flags, 1296 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, 1297 qp->fmr_and_reserved_lkey); 1298 1299 SET_FIELD(p_ramrod->flags, 1300 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 1301 qp->min_rnr_nak_timer); 1302 1303 p_ramrod->max_ird = qp->max_rd_atomic_resp; 1304 p_ramrod->traffic_class = qp->traffic_class_tos; 1305 p_ramrod->hop_limit = qp->hop_limit_ttl; 1306 p_ramrod->irq_num_pages = qp->irq_num_pages; 1307 p_ramrod->p_key = cpu_to_le16(qp->pkey); 1308 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1309 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 1310 p_ramrod->mtu = cpu_to_le16(qp->mtu); 1311 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn); 1312 p_ramrod->pd = cpu_to_le16(qp->pd); 1313 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); 1314 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); 1315 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); 1316 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1317 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 1318 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 1319 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 1320 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 1321 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id; 1322 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 1323 qp->rq_cq_id); 1324 1325 memset(&qm_params, 0, sizeof(qm_params)); 1326 qm_params.roce.qpid = qp->icid >> 1; 1327 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params); 1328 1329 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0); 1330 p_ramrod->dpi = cpu_to_le16(qp->dpi); 1331 1332 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 1333 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 1334 1335 p_ramrod->udp_src_port = qp->udp_src_port; 1336 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 1337 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); 1338 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); 1339 1340 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 1341 qp->stats_queue; 1342 1343 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1344 1345 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n", 1346 rc, physical_queue0); 1347 1348 if (rc) 1349 goto err; 1350 1351 qp->resp_offloaded = true; 1352 1353 return rc; 1354 1355 err: 1356 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc); 1357 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1358 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 1359 qp->irq, qp->irq_phys_addr); 1360 1361 return rc; 1362 } 1363 1364 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, 1365 struct qed_rdma_qp *qp) 1366 { 1367 struct roce_create_qp_req_ramrod_data *p_ramrod; 1368 struct qed_sp_init_data init_data; 1369 union qed_qm_pq_params qm_params; 1370 enum roce_flavor roce_flavor; 1371 struct qed_spq_entry *p_ent; 1372 u16 physical_queue0 = 0; 1373 int rc; 1374 1375 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1376 1377 /* Allocate DMA-able memory for ORQ */ 1378 qp->orq_num_pages = 1; 1379 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1380 RDMA_RING_PAGE_SIZE, 1381 &qp->orq_phys_addr, GFP_KERNEL); 1382 if (!qp->orq) { 1383 rc = -ENOMEM; 1384 DP_NOTICE(p_hwfn, 1385 "qed create requester failed: cannot allocate memory (orq). rc = %d\n", 1386 rc); 1387 return rc; 1388 } 1389 1390 /* Get SPQ entry */ 1391 memset(&init_data, 0, sizeof(init_data)); 1392 init_data.cid = qp->icid + 1; 1393 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1394 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1395 1396 rc = qed_sp_init_request(p_hwfn, &p_ent, 1397 ROCE_RAMROD_CREATE_QP, 1398 PROTOCOLID_ROCE, &init_data); 1399 if (rc) 1400 goto err; 1401 1402 p_ramrod = &p_ent->ramrod.roce_create_qp_req; 1403 1404 p_ramrod->flags = 0; 1405 1406 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 1407 SET_FIELD(p_ramrod->flags, 1408 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 1409 1410 SET_FIELD(p_ramrod->flags, 1411 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, 1412 qp->fmr_and_reserved_lkey); 1413 1414 SET_FIELD(p_ramrod->flags, 1415 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); 1416 1417 SET_FIELD(p_ramrod->flags, 1418 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 1419 1420 SET_FIELD(p_ramrod->flags, 1421 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 1422 qp->rnr_retry_cnt); 1423 1424 p_ramrod->max_ord = qp->max_rd_atomic_req; 1425 p_ramrod->traffic_class = qp->traffic_class_tos; 1426 p_ramrod->hop_limit = qp->hop_limit_ttl; 1427 p_ramrod->orq_num_pages = qp->orq_num_pages; 1428 p_ramrod->p_key = cpu_to_le16(qp->pkey); 1429 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1430 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 1431 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 1432 p_ramrod->mtu = cpu_to_le16(qp->mtu); 1433 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn); 1434 p_ramrod->pd = cpu_to_le16(qp->pd); 1435 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); 1436 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); 1437 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); 1438 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1439 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 1440 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 1441 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 1442 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 1443 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id; 1444 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 1445 qp->sq_cq_id); 1446 1447 memset(&qm_params, 0, sizeof(qm_params)); 1448 qm_params.roce.qpid = qp->icid >> 1; 1449 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params); 1450 1451 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0); 1452 p_ramrod->dpi = cpu_to_le16(qp->dpi); 1453 1454 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 1455 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 1456 1457 p_ramrod->udp_src_port = qp->udp_src_port; 1458 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 1459 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 1460 qp->stats_queue; 1461 1462 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1463 1464 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1465 1466 if (rc) 1467 goto err; 1468 1469 qp->req_offloaded = true; 1470 1471 return rc; 1472 1473 err: 1474 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc); 1475 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1476 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 1477 qp->orq, qp->orq_phys_addr); 1478 return rc; 1479 } 1480 1481 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, 1482 struct qed_rdma_qp *qp, 1483 bool move_to_err, u32 modify_flags) 1484 { 1485 struct roce_modify_qp_resp_ramrod_data *p_ramrod; 1486 struct qed_sp_init_data init_data; 1487 struct qed_spq_entry *p_ent; 1488 int rc; 1489 1490 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1491 1492 if (move_to_err && !qp->resp_offloaded) 1493 return 0; 1494 1495 /* Get SPQ entry */ 1496 memset(&init_data, 0, sizeof(init_data)); 1497 init_data.cid = qp->icid; 1498 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1499 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1500 1501 rc = qed_sp_init_request(p_hwfn, &p_ent, 1502 ROCE_EVENT_MODIFY_QP, 1503 PROTOCOLID_ROCE, &init_data); 1504 if (rc) { 1505 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 1506 return rc; 1507 } 1508 1509 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; 1510 1511 p_ramrod->flags = 0; 1512 1513 SET_FIELD(p_ramrod->flags, 1514 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 1515 1516 SET_FIELD(p_ramrod->flags, 1517 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 1518 qp->incoming_rdma_read_en); 1519 1520 SET_FIELD(p_ramrod->flags, 1521 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 1522 qp->incoming_rdma_write_en); 1523 1524 SET_FIELD(p_ramrod->flags, 1525 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, 1526 qp->incoming_atomic_en); 1527 1528 SET_FIELD(p_ramrod->flags, 1529 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 1530 qp->e2e_flow_control_en); 1531 1532 SET_FIELD(p_ramrod->flags, 1533 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, 1534 GET_FIELD(modify_flags, 1535 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); 1536 1537 SET_FIELD(p_ramrod->flags, 1538 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, 1539 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 1540 1541 SET_FIELD(p_ramrod->flags, 1542 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, 1543 GET_FIELD(modify_flags, 1544 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 1545 1546 SET_FIELD(p_ramrod->flags, 1547 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, 1548 GET_FIELD(modify_flags, 1549 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); 1550 1551 SET_FIELD(p_ramrod->flags, 1552 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, 1553 GET_FIELD(modify_flags, 1554 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); 1555 1556 p_ramrod->fields = 0; 1557 SET_FIELD(p_ramrod->fields, 1558 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 1559 qp->min_rnr_nak_timer); 1560 1561 p_ramrod->max_ird = qp->max_rd_atomic_resp; 1562 p_ramrod->traffic_class = qp->traffic_class_tos; 1563 p_ramrod->hop_limit = qp->hop_limit_ttl; 1564 p_ramrod->p_key = cpu_to_le16(qp->pkey); 1565 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1566 p_ramrod->mtu = cpu_to_le16(qp->mtu); 1567 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1568 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1569 1570 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc); 1571 return rc; 1572 } 1573 1574 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, 1575 struct qed_rdma_qp *qp, 1576 bool move_to_sqd, 1577 bool move_to_err, u32 modify_flags) 1578 { 1579 struct roce_modify_qp_req_ramrod_data *p_ramrod; 1580 struct qed_sp_init_data init_data; 1581 struct qed_spq_entry *p_ent; 1582 int rc; 1583 1584 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1585 1586 if (move_to_err && !(qp->req_offloaded)) 1587 return 0; 1588 1589 /* Get SPQ entry */ 1590 memset(&init_data, 0, sizeof(init_data)); 1591 init_data.cid = qp->icid + 1; 1592 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1593 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1594 1595 rc = qed_sp_init_request(p_hwfn, &p_ent, 1596 ROCE_EVENT_MODIFY_QP, 1597 PROTOCOLID_ROCE, &init_data); 1598 if (rc) { 1599 DP_NOTICE(p_hwfn, "rc = %d\n", rc); 1600 return rc; 1601 } 1602 1603 p_ramrod = &p_ent->ramrod.roce_modify_qp_req; 1604 1605 p_ramrod->flags = 0; 1606 1607 SET_FIELD(p_ramrod->flags, 1608 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 1609 1610 SET_FIELD(p_ramrod->flags, 1611 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); 1612 1613 SET_FIELD(p_ramrod->flags, 1614 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, 1615 qp->sqd_async); 1616 1617 SET_FIELD(p_ramrod->flags, 1618 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, 1619 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 1620 1621 SET_FIELD(p_ramrod->flags, 1622 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, 1623 GET_FIELD(modify_flags, 1624 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 1625 1626 SET_FIELD(p_ramrod->flags, 1627 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, 1628 GET_FIELD(modify_flags, 1629 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); 1630 1631 SET_FIELD(p_ramrod->flags, 1632 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, 1633 GET_FIELD(modify_flags, 1634 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); 1635 1636 SET_FIELD(p_ramrod->flags, 1637 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, 1638 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); 1639 1640 SET_FIELD(p_ramrod->flags, 1641 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, 1642 GET_FIELD(modify_flags, 1643 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); 1644 1645 p_ramrod->fields = 0; 1646 SET_FIELD(p_ramrod->fields, 1647 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 1648 1649 SET_FIELD(p_ramrod->fields, 1650 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 1651 qp->rnr_retry_cnt); 1652 1653 p_ramrod->max_ord = qp->max_rd_atomic_req; 1654 p_ramrod->traffic_class = qp->traffic_class_tos; 1655 p_ramrod->hop_limit = qp->hop_limit_ttl; 1656 p_ramrod->p_key = cpu_to_le16(qp->pkey); 1657 p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1658 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 1659 p_ramrod->mtu = cpu_to_le16(qp->mtu); 1660 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1661 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1662 1663 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc); 1664 return rc; 1665 } 1666 1667 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, 1668 struct qed_rdma_qp *qp, 1669 u32 *num_invalidated_mw) 1670 { 1671 struct roce_destroy_qp_resp_output_params *p_ramrod_res; 1672 struct roce_destroy_qp_resp_ramrod_data *p_ramrod; 1673 struct qed_sp_init_data init_data; 1674 struct qed_spq_entry *p_ent; 1675 dma_addr_t ramrod_res_phys; 1676 int rc; 1677 1678 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1679 1680 if (!qp->resp_offloaded) 1681 return 0; 1682 1683 /* Get SPQ entry */ 1684 memset(&init_data, 0, sizeof(init_data)); 1685 init_data.cid = qp->icid; 1686 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1687 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1688 1689 rc = qed_sp_init_request(p_hwfn, &p_ent, 1690 ROCE_RAMROD_DESTROY_QP, 1691 PROTOCOLID_ROCE, &init_data); 1692 if (rc) 1693 return rc; 1694 1695 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; 1696 1697 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *) 1698 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 1699 &ramrod_res_phys, GFP_KERNEL); 1700 1701 if (!p_ramrod_res) { 1702 rc = -ENOMEM; 1703 DP_NOTICE(p_hwfn, 1704 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 1705 rc); 1706 return rc; 1707 } 1708 1709 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1710 1711 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1712 if (rc) 1713 goto err; 1714 1715 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); 1716 1717 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ 1718 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1719 qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 1720 qp->irq, qp->irq_phys_addr); 1721 1722 qp->resp_offloaded = false; 1723 1724 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc); 1725 1726 err: 1727 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1728 sizeof(struct roce_destroy_qp_resp_output_params), 1729 p_ramrod_res, ramrod_res_phys); 1730 1731 return rc; 1732 } 1733 1734 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, 1735 struct qed_rdma_qp *qp, 1736 u32 *num_bound_mw) 1737 { 1738 struct roce_destroy_qp_req_output_params *p_ramrod_res; 1739 struct roce_destroy_qp_req_ramrod_data *p_ramrod; 1740 struct qed_sp_init_data init_data; 1741 struct qed_spq_entry *p_ent; 1742 dma_addr_t ramrod_res_phys; 1743 int rc = -ENOMEM; 1744 1745 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1746 1747 if (!qp->req_offloaded) 1748 return 0; 1749 1750 p_ramrod_res = (struct roce_destroy_qp_req_output_params *) 1751 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1752 sizeof(*p_ramrod_res), 1753 &ramrod_res_phys, GFP_KERNEL); 1754 if (!p_ramrod_res) { 1755 DP_NOTICE(p_hwfn, 1756 "qed destroy requester failed: cannot allocate memory (ramrod)\n"); 1757 return rc; 1758 } 1759 1760 /* Get SPQ entry */ 1761 memset(&init_data, 0, sizeof(init_data)); 1762 init_data.cid = qp->icid + 1; 1763 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1764 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1765 1766 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, 1767 PROTOCOLID_ROCE, &init_data); 1768 if (rc) 1769 goto err; 1770 1771 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; 1772 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1773 1774 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1775 if (rc) 1776 goto err; 1777 1778 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw); 1779 1780 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ 1781 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1782 qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 1783 qp->orq, qp->orq_phys_addr); 1784 1785 qp->req_offloaded = false; 1786 1787 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc); 1788 1789 err: 1790 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 1791 p_ramrod_res, ramrod_res_phys); 1792 1793 return rc; 1794 } 1795 1796 int qed_roce_query_qp(struct qed_hwfn *p_hwfn, 1797 struct qed_rdma_qp *qp, 1798 struct qed_rdma_query_qp_out_params *out_params) 1799 { 1800 struct roce_query_qp_resp_output_params *p_resp_ramrod_res; 1801 struct roce_query_qp_req_output_params *p_req_ramrod_res; 1802 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; 1803 struct roce_query_qp_req_ramrod_data *p_req_ramrod; 1804 struct qed_sp_init_data init_data; 1805 dma_addr_t resp_ramrod_res_phys; 1806 dma_addr_t req_ramrod_res_phys; 1807 struct qed_spq_entry *p_ent; 1808 bool rq_err_state; 1809 bool sq_err_state; 1810 bool sq_draining; 1811 int rc = -ENOMEM; 1812 1813 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) { 1814 /* We can't send ramrod to the fw since this qp wasn't offloaded 1815 * to the fw yet 1816 */ 1817 out_params->draining = false; 1818 out_params->rq_psn = qp->rq_psn; 1819 out_params->sq_psn = qp->sq_psn; 1820 out_params->state = qp->cur_state; 1821 1822 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n"); 1823 return 0; 1824 } 1825 1826 if (!(qp->resp_offloaded)) { 1827 DP_NOTICE(p_hwfn, 1828 "The responder's qp should be offloded before requester's\n"); 1829 return -EINVAL; 1830 } 1831 1832 /* Send a query responder ramrod to FW to get RQ-PSN and state */ 1833 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *) 1834 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1835 sizeof(*p_resp_ramrod_res), 1836 &resp_ramrod_res_phys, GFP_KERNEL); 1837 if (!p_resp_ramrod_res) { 1838 DP_NOTICE(p_hwfn, 1839 "qed query qp failed: cannot allocate memory (ramrod)\n"); 1840 return rc; 1841 } 1842 1843 /* Get SPQ entry */ 1844 memset(&init_data, 0, sizeof(init_data)); 1845 init_data.cid = qp->icid; 1846 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1847 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1848 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 1849 PROTOCOLID_ROCE, &init_data); 1850 if (rc) 1851 goto err_resp; 1852 1853 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; 1854 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); 1855 1856 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1857 if (rc) 1858 goto err_resp; 1859 1860 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 1861 p_resp_ramrod_res, resp_ramrod_res_phys); 1862 1863 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); 1864 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag), 1865 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); 1866 1867 if (!(qp->req_offloaded)) { 1868 /* Don't send query qp for the requester */ 1869 out_params->sq_psn = qp->sq_psn; 1870 out_params->draining = false; 1871 1872 if (rq_err_state) 1873 qp->cur_state = QED_ROCE_QP_STATE_ERR; 1874 1875 out_params->state = qp->cur_state; 1876 1877 return 0; 1878 } 1879 1880 /* Send a query requester ramrod to FW to get SQ-PSN and state */ 1881 p_req_ramrod_res = (struct roce_query_qp_req_output_params *) 1882 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1883 sizeof(*p_req_ramrod_res), 1884 &req_ramrod_res_phys, 1885 GFP_KERNEL); 1886 if (!p_req_ramrod_res) { 1887 rc = -ENOMEM; 1888 DP_NOTICE(p_hwfn, 1889 "qed query qp failed: cannot allocate memory (ramrod)\n"); 1890 return rc; 1891 } 1892 1893 /* Get SPQ entry */ 1894 init_data.cid = qp->icid + 1; 1895 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 1896 PROTOCOLID_ROCE, &init_data); 1897 if (rc) 1898 goto err_req; 1899 1900 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; 1901 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); 1902 1903 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1904 if (rc) 1905 goto err_req; 1906 1907 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 1908 p_req_ramrod_res, req_ramrod_res_phys); 1909 1910 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); 1911 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 1912 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); 1913 sq_draining = 1914 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 1915 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); 1916 1917 out_params->draining = false; 1918 1919 if (rq_err_state) 1920 qp->cur_state = QED_ROCE_QP_STATE_ERR; 1921 else if (sq_err_state) 1922 qp->cur_state = QED_ROCE_QP_STATE_SQE; 1923 else if (sq_draining) 1924 out_params->draining = true; 1925 out_params->state = qp->cur_state; 1926 1927 return 0; 1928 1929 err_req: 1930 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 1931 p_req_ramrod_res, req_ramrod_res_phys); 1932 return rc; 1933 err_resp: 1934 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 1935 p_resp_ramrod_res, resp_ramrod_res_phys); 1936 return rc; 1937 } 1938 1939 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 1940 { 1941 u32 num_invalidated_mw = 0; 1942 u32 num_bound_mw = 0; 1943 u32 start_cid; 1944 int rc; 1945 1946 /* Destroys the specified QP */ 1947 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) && 1948 (qp->cur_state != QED_ROCE_QP_STATE_ERR) && 1949 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) { 1950 DP_NOTICE(p_hwfn, 1951 "QP must be in error, reset or init state before destroying it\n"); 1952 return -EINVAL; 1953 } 1954 1955 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw); 1956 if (rc) 1957 return rc; 1958 1959 /* Send destroy requester ramrod */ 1960 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw); 1961 if (rc) 1962 return rc; 1963 1964 if (num_invalidated_mw != num_bound_mw) { 1965 DP_NOTICE(p_hwfn, 1966 "number of invalidate memory windows is different from bounded ones\n"); 1967 return -EINVAL; 1968 } 1969 1970 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1971 1972 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, 1973 p_hwfn->p_rdma_info->proto); 1974 1975 /* Release responder's icid */ 1976 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1977 qp->icid - start_cid); 1978 1979 /* Release requester's icid */ 1980 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1981 qp->icid + 1 - start_cid); 1982 1983 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1984 1985 return 0; 1986 } 1987 1988 int qed_rdma_query_qp(void *rdma_cxt, 1989 struct qed_rdma_qp *qp, 1990 struct qed_rdma_query_qp_out_params *out_params) 1991 { 1992 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1993 int rc; 1994 1995 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1996 1997 /* The following fields are filled in from qp and not FW as they can't 1998 * be modified by FW 1999 */ 2000 out_params->mtu = qp->mtu; 2001 out_params->dest_qp = qp->dest_qp; 2002 out_params->incoming_atomic_en = qp->incoming_atomic_en; 2003 out_params->e2e_flow_control_en = qp->e2e_flow_control_en; 2004 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; 2005 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; 2006 out_params->dgid = qp->dgid; 2007 out_params->flow_label = qp->flow_label; 2008 out_params->hop_limit_ttl = qp->hop_limit_ttl; 2009 out_params->traffic_class_tos = qp->traffic_class_tos; 2010 out_params->timeout = qp->ack_timeout; 2011 out_params->rnr_retry = qp->rnr_retry_cnt; 2012 out_params->retry_cnt = qp->retry_cnt; 2013 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; 2014 out_params->pkey_index = 0; 2015 out_params->max_rd_atomic = qp->max_rd_atomic_req; 2016 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; 2017 out_params->sqd_async = qp->sqd_async; 2018 2019 rc = qed_roce_query_qp(p_hwfn, qp, out_params); 2020 2021 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); 2022 return rc; 2023 } 2024 2025 int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) 2026 { 2027 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2028 int rc = 0; 2029 2030 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 2031 2032 rc = qed_roce_destroy_qp(p_hwfn, qp); 2033 2034 /* free qp params struct */ 2035 kfree(qp); 2036 2037 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); 2038 return rc; 2039 } 2040 2041 struct qed_rdma_qp * 2042 qed_rdma_create_qp(void *rdma_cxt, 2043 struct qed_rdma_create_qp_in_params *in_params, 2044 struct qed_rdma_create_qp_out_params *out_params) 2045 { 2046 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2047 struct qed_rdma_qp *qp; 2048 u8 max_stats_queues; 2049 int rc; 2050 2051 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { 2052 DP_ERR(p_hwfn->cdev, 2053 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 2054 rdma_cxt, in_params, out_params); 2055 return NULL; 2056 } 2057 2058 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2059 "qed rdma create qp called with qp_handle = %08x%08x\n", 2060 in_params->qp_handle_hi, in_params->qp_handle_lo); 2061 2062 /* Some sanity checks... */ 2063 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; 2064 if (in_params->stats_queue >= max_stats_queues) { 2065 DP_ERR(p_hwfn->cdev, 2066 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", 2067 in_params->stats_queue, max_stats_queues); 2068 return NULL; 2069 } 2070 2071 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 2072 if (!qp) { 2073 DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n"); 2074 return NULL; 2075 } 2076 2077 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); 2078 qp->qpid = ((0xFF << 16) | qp->icid); 2079 2080 DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid); 2081 2082 if (rc) { 2083 kfree(qp); 2084 return NULL; 2085 } 2086 2087 qp->cur_state = QED_ROCE_QP_STATE_RESET; 2088 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); 2089 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); 2090 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); 2091 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); 2092 qp->use_srq = in_params->use_srq; 2093 qp->signal_all = in_params->signal_all; 2094 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; 2095 qp->pd = in_params->pd; 2096 qp->dpi = in_params->dpi; 2097 qp->sq_cq_id = in_params->sq_cq_id; 2098 qp->sq_num_pages = in_params->sq_num_pages; 2099 qp->sq_pbl_ptr = in_params->sq_pbl_ptr; 2100 qp->rq_cq_id = in_params->rq_cq_id; 2101 qp->rq_num_pages = in_params->rq_num_pages; 2102 qp->rq_pbl_ptr = in_params->rq_pbl_ptr; 2103 qp->srq_id = in_params->srq_id; 2104 qp->req_offloaded = false; 2105 qp->resp_offloaded = false; 2106 qp->e2e_flow_control_en = qp->use_srq ? false : true; 2107 qp->stats_queue = in_params->stats_queue; 2108 2109 out_params->icid = qp->icid; 2110 out_params->qp_id = qp->qpid; 2111 2112 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); 2113 return qp; 2114 } 2115 2116 static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, 2117 struct qed_rdma_qp *qp, 2118 enum qed_roce_qp_state prev_state, 2119 struct qed_rdma_modify_qp_in_params *params) 2120 { 2121 u32 num_invalidated_mw = 0, num_bound_mw = 0; 2122 int rc = 0; 2123 2124 /* Perform additional operations according to the current state and the 2125 * next state 2126 */ 2127 if (((prev_state == QED_ROCE_QP_STATE_INIT) || 2128 (prev_state == QED_ROCE_QP_STATE_RESET)) && 2129 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) { 2130 /* Init->RTR or Reset->RTR */ 2131 rc = qed_roce_sp_create_responder(p_hwfn, qp); 2132 return rc; 2133 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) && 2134 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 2135 /* RTR-> RTS */ 2136 rc = qed_roce_sp_create_requester(p_hwfn, qp); 2137 if (rc) 2138 return rc; 2139 2140 /* Send modify responder ramrod */ 2141 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2142 params->modify_flags); 2143 return rc; 2144 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 2145 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 2146 /* RTS->RTS */ 2147 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2148 params->modify_flags); 2149 if (rc) 2150 return rc; 2151 2152 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 2153 params->modify_flags); 2154 return rc; 2155 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 2156 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 2157 /* RTS->SQD */ 2158 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false, 2159 params->modify_flags); 2160 return rc; 2161 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 2162 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 2163 /* SQD->SQD */ 2164 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2165 params->modify_flags); 2166 if (rc) 2167 return rc; 2168 2169 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 2170 params->modify_flags); 2171 return rc; 2172 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 2173 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 2174 /* SQD->RTS */ 2175 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2176 params->modify_flags); 2177 if (rc) 2178 return rc; 2179 2180 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 2181 params->modify_flags); 2182 2183 return rc; 2184 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR || 2185 qp->cur_state == QED_ROCE_QP_STATE_SQE) { 2186 /* ->ERR */ 2187 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, 2188 params->modify_flags); 2189 if (rc) 2190 return rc; 2191 2192 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true, 2193 params->modify_flags); 2194 return rc; 2195 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { 2196 /* Any state -> RESET */ 2197 2198 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, 2199 &num_invalidated_mw); 2200 if (rc) 2201 return rc; 2202 2203 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, 2204 &num_bound_mw); 2205 2206 if (num_invalidated_mw != num_bound_mw) { 2207 DP_NOTICE(p_hwfn, 2208 "number of invalidate memory windows is different from bounded ones\n"); 2209 return -EINVAL; 2210 } 2211 } else { 2212 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 2213 } 2214 2215 return rc; 2216 } 2217 2218 int qed_rdma_modify_qp(void *rdma_cxt, 2219 struct qed_rdma_qp *qp, 2220 struct qed_rdma_modify_qp_in_params *params) 2221 { 2222 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2223 enum qed_roce_qp_state prev_state; 2224 int rc = 0; 2225 2226 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", 2227 qp->icid, params->new_state); 2228 2229 if (rc) { 2230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2231 return rc; 2232 } 2233 2234 if (GET_FIELD(params->modify_flags, 2235 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { 2236 qp->incoming_rdma_read_en = params->incoming_rdma_read_en; 2237 qp->incoming_rdma_write_en = params->incoming_rdma_write_en; 2238 qp->incoming_atomic_en = params->incoming_atomic_en; 2239 } 2240 2241 /* Update QP structure with the updated values */ 2242 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) 2243 qp->roce_mode = params->roce_mode; 2244 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) 2245 qp->pkey = params->pkey; 2246 if (GET_FIELD(params->modify_flags, 2247 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) 2248 qp->e2e_flow_control_en = params->e2e_flow_control_en; 2249 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) 2250 qp->dest_qp = params->dest_qp; 2251 if (GET_FIELD(params->modify_flags, 2252 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { 2253 /* Indicates that the following parameters have changed: 2254 * Traffic class, flow label, hop limit, source GID, 2255 * destination GID, loopback indicator 2256 */ 2257 qp->traffic_class_tos = params->traffic_class_tos; 2258 qp->flow_label = params->flow_label; 2259 qp->hop_limit_ttl = params->hop_limit_ttl; 2260 2261 qp->sgid = params->sgid; 2262 qp->dgid = params->dgid; 2263 qp->udp_src_port = 0; 2264 qp->vlan_id = params->vlan_id; 2265 qp->mtu = params->mtu; 2266 qp->lb_indication = params->lb_indication; 2267 memcpy((u8 *)&qp->remote_mac_addr[0], 2268 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); 2269 if (params->use_local_mac) { 2270 memcpy((u8 *)&qp->local_mac_addr[0], 2271 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); 2272 } else { 2273 memcpy((u8 *)&qp->local_mac_addr[0], 2274 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 2275 } 2276 } 2277 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) 2278 qp->rq_psn = params->rq_psn; 2279 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) 2280 qp->sq_psn = params->sq_psn; 2281 if (GET_FIELD(params->modify_flags, 2282 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) 2283 qp->max_rd_atomic_req = params->max_rd_atomic_req; 2284 if (GET_FIELD(params->modify_flags, 2285 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) 2286 qp->max_rd_atomic_resp = params->max_rd_atomic_resp; 2287 if (GET_FIELD(params->modify_flags, 2288 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) 2289 qp->ack_timeout = params->ack_timeout; 2290 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) 2291 qp->retry_cnt = params->retry_cnt; 2292 if (GET_FIELD(params->modify_flags, 2293 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) 2294 qp->rnr_retry_cnt = params->rnr_retry_cnt; 2295 if (GET_FIELD(params->modify_flags, 2296 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) 2297 qp->min_rnr_nak_timer = params->min_rnr_nak_timer; 2298 2299 qp->sqd_async = params->sqd_async; 2300 2301 prev_state = qp->cur_state; 2302 if (GET_FIELD(params->modify_flags, 2303 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { 2304 qp->cur_state = params->new_state; 2305 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", 2306 qp->cur_state); 2307 } 2308 2309 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); 2310 2311 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); 2312 return rc; 2313 } 2314 2315 int qed_rdma_register_tid(void *rdma_cxt, 2316 struct qed_rdma_register_tid_in_params *params) 2317 { 2318 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2319 struct rdma_register_tid_ramrod_data *p_ramrod; 2320 struct qed_sp_init_data init_data; 2321 struct qed_spq_entry *p_ent; 2322 enum rdma_tid_type tid_type; 2323 u8 fw_return_code; 2324 int rc; 2325 2326 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); 2327 2328 /* Get SPQ entry */ 2329 memset(&init_data, 0, sizeof(init_data)); 2330 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2331 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2332 2333 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, 2334 p_hwfn->p_rdma_info->proto, &init_data); 2335 if (rc) { 2336 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2337 return rc; 2338 } 2339 2340 if (p_hwfn->p_rdma_info->last_tid < params->itid) 2341 p_hwfn->p_rdma_info->last_tid = params->itid; 2342 2343 p_ramrod = &p_ent->ramrod.rdma_register_tid; 2344 2345 p_ramrod->flags = 0; 2346 SET_FIELD(p_ramrod->flags, 2347 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, 2348 params->pbl_two_level); 2349 2350 SET_FIELD(p_ramrod->flags, 2351 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); 2352 2353 SET_FIELD(p_ramrod->flags, 2354 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); 2355 2356 /* Don't initialize D/C field, as it may override other bits. */ 2357 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) 2358 SET_FIELD(p_ramrod->flags, 2359 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, 2360 params->page_size_log - 12); 2361 2362 SET_FIELD(p_ramrod->flags, 2363 RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID, 2364 p_hwfn->p_rdma_info->last_tid); 2365 2366 SET_FIELD(p_ramrod->flags, 2367 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, 2368 params->remote_read); 2369 2370 SET_FIELD(p_ramrod->flags, 2371 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, 2372 params->remote_write); 2373 2374 SET_FIELD(p_ramrod->flags, 2375 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, 2376 params->remote_atomic); 2377 2378 SET_FIELD(p_ramrod->flags, 2379 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, 2380 params->local_write); 2381 2382 SET_FIELD(p_ramrod->flags, 2383 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); 2384 2385 SET_FIELD(p_ramrod->flags, 2386 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, 2387 params->mw_bind); 2388 2389 SET_FIELD(p_ramrod->flags1, 2390 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, 2391 params->pbl_page_size_log - 12); 2392 2393 SET_FIELD(p_ramrod->flags2, 2394 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); 2395 2396 switch (params->tid_type) { 2397 case QED_RDMA_TID_REGISTERED_MR: 2398 tid_type = RDMA_TID_REGISTERED_MR; 2399 break; 2400 case QED_RDMA_TID_FMR: 2401 tid_type = RDMA_TID_FMR; 2402 break; 2403 case QED_RDMA_TID_MW_TYPE1: 2404 tid_type = RDMA_TID_MW_TYPE1; 2405 break; 2406 case QED_RDMA_TID_MW_TYPE2A: 2407 tid_type = RDMA_TID_MW_TYPE2A; 2408 break; 2409 default: 2410 rc = -EINVAL; 2411 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2412 return rc; 2413 } 2414 SET_FIELD(p_ramrod->flags1, 2415 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); 2416 2417 p_ramrod->itid = cpu_to_le32(params->itid); 2418 p_ramrod->key = params->key; 2419 p_ramrod->pd = cpu_to_le16(params->pd); 2420 p_ramrod->length_hi = (u8)(params->length >> 32); 2421 p_ramrod->length_lo = DMA_LO_LE(params->length); 2422 if (params->zbva) { 2423 /* Lower 32 bits of the registered MR address. 2424 * In case of zero based MR, will hold FBO 2425 */ 2426 p_ramrod->va.hi = 0; 2427 p_ramrod->va.lo = cpu_to_le32(params->fbo); 2428 } else { 2429 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); 2430 } 2431 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); 2432 2433 /* DIF */ 2434 if (params->dif_enabled) { 2435 SET_FIELD(p_ramrod->flags2, 2436 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); 2437 DMA_REGPAIR_LE(p_ramrod->dif_error_addr, 2438 params->dif_error_addr); 2439 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); 2440 } 2441 2442 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 2443 2444 if (fw_return_code != RDMA_RETURN_OK) { 2445 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 2446 return -EINVAL; 2447 } 2448 2449 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); 2450 return rc; 2451 } 2452 2453 int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) 2454 { 2455 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2456 struct rdma_deregister_tid_ramrod_data *p_ramrod; 2457 struct qed_sp_init_data init_data; 2458 struct qed_spq_entry *p_ent; 2459 struct qed_ptt *p_ptt; 2460 u8 fw_return_code; 2461 int rc; 2462 2463 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 2464 2465 /* Get SPQ entry */ 2466 memset(&init_data, 0, sizeof(init_data)); 2467 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2468 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2469 2470 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, 2471 p_hwfn->p_rdma_info->proto, &init_data); 2472 if (rc) { 2473 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2474 return rc; 2475 } 2476 2477 p_ramrod = &p_ent->ramrod.rdma_deregister_tid; 2478 p_ramrod->itid = cpu_to_le32(itid); 2479 2480 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 2481 if (rc) { 2482 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2483 return rc; 2484 } 2485 2486 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { 2487 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 2488 return -EINVAL; 2489 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { 2490 /* Bit indicating that the TID is in use and a nig drain is 2491 * required before sending the ramrod again 2492 */ 2493 p_ptt = qed_ptt_acquire(p_hwfn); 2494 if (!p_ptt) { 2495 rc = -EBUSY; 2496 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2497 "Failed to acquire PTT\n"); 2498 return rc; 2499 } 2500 2501 rc = qed_mcp_drain(p_hwfn, p_ptt); 2502 if (rc) { 2503 qed_ptt_release(p_hwfn, p_ptt); 2504 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2505 "Drain failed\n"); 2506 return rc; 2507 } 2508 2509 qed_ptt_release(p_hwfn, p_ptt); 2510 2511 /* Resend the ramrod */ 2512 rc = qed_sp_init_request(p_hwfn, &p_ent, 2513 RDMA_RAMROD_DEREGISTER_MR, 2514 p_hwfn->p_rdma_info->proto, 2515 &init_data); 2516 if (rc) { 2517 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2518 "Failed to init sp-element\n"); 2519 return rc; 2520 } 2521 2522 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 2523 if (rc) { 2524 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2525 "Ramrod failed\n"); 2526 return rc; 2527 } 2528 2529 if (fw_return_code != RDMA_RETURN_OK) { 2530 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", 2531 fw_return_code); 2532 return rc; 2533 } 2534 } 2535 2536 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); 2537 return rc; 2538 } 2539 2540 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 2541 { 2542 return QED_LEADING_HWFN(cdev); 2543 } 2544 2545 static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2546 { 2547 u32 val; 2548 2549 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; 2550 2551 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); 2552 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), 2553 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", 2554 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); 2555 } 2556 2557 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2558 { 2559 p_hwfn->db_bar_no_edpm = true; 2560 2561 qed_rdma_dpm_conf(p_hwfn, p_ptt); 2562 } 2563 2564 int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) 2565 { 2566 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2567 struct qed_ptt *p_ptt; 2568 int rc = -EBUSY; 2569 2570 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2571 "desired_cnq = %08x\n", params->desired_cnq); 2572 2573 p_ptt = qed_ptt_acquire(p_hwfn); 2574 if (!p_ptt) 2575 goto err; 2576 2577 rc = qed_rdma_alloc(p_hwfn, p_ptt, params); 2578 if (rc) 2579 goto err1; 2580 2581 rc = qed_rdma_setup(p_hwfn, p_ptt, params); 2582 if (rc) 2583 goto err2; 2584 2585 qed_ptt_release(p_hwfn, p_ptt); 2586 2587 return rc; 2588 2589 err2: 2590 qed_rdma_free(p_hwfn); 2591 err1: 2592 qed_ptt_release(p_hwfn, p_ptt); 2593 err: 2594 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); 2595 return rc; 2596 } 2597 2598 static int qed_rdma_init(struct qed_dev *cdev, 2599 struct qed_rdma_start_in_params *params) 2600 { 2601 return qed_rdma_start(QED_LEADING_HWFN(cdev), params); 2602 } 2603 2604 void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) 2605 { 2606 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2607 2608 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); 2609 2610 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 2611 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); 2612 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 2613 } 2614 2615 void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, 2616 u8 connection_handle, 2617 void *cookie, 2618 dma_addr_t first_frag_addr, 2619 bool b_last_fragment, bool b_last_packet) 2620 { 2621 struct qed_roce_ll2_packet *packet = cookie; 2622 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2; 2623 2624 roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet); 2625 } 2626 2627 void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, 2628 u8 connection_handle, 2629 void *cookie, 2630 dma_addr_t first_frag_addr, 2631 bool b_last_fragment, bool b_last_packet) 2632 { 2633 qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle, 2634 cookie, first_frag_addr, 2635 b_last_fragment, b_last_packet); 2636 } 2637 2638 void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, 2639 u8 connection_handle, 2640 void *cookie, 2641 dma_addr_t rx_buf_addr, 2642 u16 data_length, 2643 u8 data_length_error, 2644 u16 parse_flags, 2645 u16 vlan, 2646 u32 src_mac_addr_hi, 2647 u16 src_mac_addr_lo, bool b_last_packet) 2648 { 2649 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2; 2650 struct qed_roce_ll2_rx_params params; 2651 struct qed_dev *cdev = p_hwfn->cdev; 2652 struct qed_roce_ll2_packet pkt; 2653 2654 DP_VERBOSE(cdev, 2655 QED_MSG_LL2, 2656 "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n", 2657 (void *)(uintptr_t)rx_buf_addr, 2658 data_length, data_length_error); 2659 2660 memset(&pkt, 0, sizeof(pkt)); 2661 pkt.n_seg = 1; 2662 pkt.payload[0].baddr = rx_buf_addr; 2663 pkt.payload[0].len = data_length; 2664 2665 memset(¶ms, 0, sizeof(params)); 2666 params.vlan_id = vlan; 2667 *((u32 *)¶ms.smac[0]) = ntohl(src_mac_addr_hi); 2668 *((u16 *)¶ms.smac[4]) = ntohs(src_mac_addr_lo); 2669 2670 if (data_length_error) { 2671 DP_ERR(cdev, 2672 "roce ll2 rx complete: data length error %d, length=%d\n", 2673 data_length_error, data_length); 2674 params.rc = -EINVAL; 2675 } 2676 2677 roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, ¶ms); 2678 } 2679 2680 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, 2681 u8 *old_mac_address, 2682 u8 *new_mac_address) 2683 { 2684 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2685 struct qed_ptt *p_ptt; 2686 int rc = 0; 2687 2688 if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) { 2689 DP_ERR(cdev, 2690 "qed roce mac filter failed - roce_info/ll2 NULL\n"); 2691 return -EINVAL; 2692 } 2693 2694 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 2695 if (!p_ptt) { 2696 DP_ERR(cdev, 2697 "qed roce ll2 mac filter set: failed to acquire PTT\n"); 2698 return -EINVAL; 2699 } 2700 2701 mutex_lock(&hwfn->ll2->lock); 2702 if (old_mac_address) 2703 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, 2704 old_mac_address); 2705 if (new_mac_address) 2706 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, 2707 new_mac_address); 2708 mutex_unlock(&hwfn->ll2->lock); 2709 2710 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); 2711 2712 if (rc) 2713 DP_ERR(cdev, 2714 "qed roce ll2 mac filter set: failed to add mac filter\n"); 2715 2716 return rc; 2717 } 2718 2719 static int qed_roce_ll2_start(struct qed_dev *cdev, 2720 struct qed_roce_ll2_params *params) 2721 { 2722 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2723 struct qed_roce_ll2_info *roce_ll2; 2724 struct qed_ll2_info ll2_params; 2725 int rc; 2726 2727 if (!params) { 2728 DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n"); 2729 return -EINVAL; 2730 } 2731 if (!params->cbs.tx_cb || !params->cbs.rx_cb) { 2732 DP_ERR(cdev, 2733 "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n", 2734 params->cbs.tx_cb, params->cbs.rx_cb); 2735 return -EINVAL; 2736 } 2737 if (!is_valid_ether_addr(params->mac_address)) { 2738 DP_ERR(cdev, 2739 "qed roce ll2 start: failed due to invalid Ethernet address %pM\n", 2740 params->mac_address); 2741 return -EINVAL; 2742 } 2743 2744 /* Initialize */ 2745 roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC); 2746 if (!roce_ll2) { 2747 DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n"); 2748 return -ENOMEM; 2749 } 2750 memset(roce_ll2, 0, sizeof(*roce_ll2)); 2751 roce_ll2->handle = QED_LL2_UNUSED_HANDLE; 2752 roce_ll2->cbs = params->cbs; 2753 roce_ll2->cb_cookie = params->cb_cookie; 2754 mutex_init(&roce_ll2->lock); 2755 2756 memset(&ll2_params, 0, sizeof(ll2_params)); 2757 ll2_params.conn_type = QED_LL2_TYPE_ROCE; 2758 ll2_params.mtu = params->mtu; 2759 ll2_params.rx_drop_ttl0_flg = true; 2760 ll2_params.rx_vlan_removal_en = false; 2761 ll2_params.tx_dest = CORE_TX_DEST_NW; 2762 ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET; 2763 ll2_params.ai_err_no_buf = LL2_DROP_PACKET; 2764 ll2_params.gsi_enable = true; 2765 2766 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params, 2767 params->max_rx_buffers, 2768 params->max_tx_buffers, 2769 &roce_ll2->handle); 2770 if (rc) { 2771 DP_ERR(cdev, 2772 "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n", 2773 rc); 2774 goto err; 2775 } 2776 2777 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev), 2778 roce_ll2->handle); 2779 if (rc) { 2780 DP_ERR(cdev, 2781 "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n", 2782 rc); 2783 goto err1; 2784 } 2785 2786 hwfn->ll2 = roce_ll2; 2787 2788 rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address); 2789 if (rc) { 2790 hwfn->ll2 = NULL; 2791 goto err2; 2792 } 2793 ether_addr_copy(roce_ll2->mac_address, params->mac_address); 2794 2795 return 0; 2796 2797 err2: 2798 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); 2799 err1: 2800 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); 2801 err: 2802 kfree(roce_ll2); 2803 return rc; 2804 } 2805 2806 static int qed_roce_ll2_stop(struct qed_dev *cdev) 2807 { 2808 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2809 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; 2810 int rc; 2811 2812 if (!cdev) { 2813 DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n"); 2814 return -EINVAL; 2815 } 2816 2817 if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { 2818 DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); 2819 return -EINVAL; 2820 } 2821 2822 /* remove LL2 MAC address filter */ 2823 rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL); 2824 eth_zero_addr(roce_ll2->mac_address); 2825 2826 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), 2827 roce_ll2->handle); 2828 if (rc) 2829 DP_ERR(cdev, 2830 "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n", 2831 rc); 2832 2833 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); 2834 2835 roce_ll2->handle = QED_LL2_UNUSED_HANDLE; 2836 2837 kfree(roce_ll2); 2838 2839 return rc; 2840 } 2841 2842 static int qed_roce_ll2_tx(struct qed_dev *cdev, 2843 struct qed_roce_ll2_packet *pkt, 2844 struct qed_roce_ll2_tx_params *params) 2845 { 2846 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2847 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; 2848 enum qed_ll2_roce_flavor_type qed_roce_flavor; 2849 u8 flags = 0; 2850 int rc; 2851 int i; 2852 2853 if (!cdev || !pkt || !params) { 2854 DP_ERR(cdev, 2855 "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", 2856 cdev, pkt, params); 2857 return -EINVAL; 2858 } 2859 2860 qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE 2861 : QED_LL2_RROCE; 2862 2863 if (pkt->roce_mode == ROCE_V2_IPV4) 2864 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT); 2865 2866 /* Tx header */ 2867 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, 2868 1 + pkt->n_seg, 0, flags, 0, 2869 qed_roce_flavor, pkt->header.baddr, 2870 pkt->header.len, pkt, 1); 2871 if (rc) { 2872 DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc); 2873 return QED_ROCE_TX_HEAD_FAILURE; 2874 } 2875 2876 /* Tx payload */ 2877 for (i = 0; i < pkt->n_seg; i++) { 2878 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev), 2879 roce_ll2->handle, 2880 pkt->payload[i].baddr, 2881 pkt->payload[i].len); 2882 if (rc) { 2883 /* If failed not much to do here, partial packet has 2884 * been posted * we can't free memory, will need to wait 2885 * for completion 2886 */ 2887 DP_ERR(cdev, 2888 "roce ll2 tx: payload failed (rc=%d)\n", rc); 2889 return QED_ROCE_TX_FRAG_FAILURE; 2890 } 2891 } 2892 2893 return 0; 2894 } 2895 2896 static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev, 2897 struct qed_roce_ll2_buffer *buf, 2898 u64 cookie, u8 notify_fw) 2899 { 2900 return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), 2901 QED_LEADING_HWFN(cdev)->ll2->handle, 2902 buf->baddr, buf->len, 2903 (void *)(uintptr_t)cookie, notify_fw); 2904 } 2905 2906 static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) 2907 { 2908 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2909 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; 2910 2911 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev), 2912 roce_ll2->handle, stats); 2913 } 2914 2915 static const struct qed_rdma_ops qed_rdma_ops_pass = { 2916 .common = &qed_common_ops_pass, 2917 .fill_dev_info = &qed_fill_rdma_dev_info, 2918 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, 2919 .rdma_init = &qed_rdma_init, 2920 .rdma_add_user = &qed_rdma_add_user, 2921 .rdma_remove_user = &qed_rdma_remove_user, 2922 .rdma_stop = &qed_rdma_stop, 2923 .rdma_query_port = &qed_rdma_query_port, 2924 .rdma_query_device = &qed_rdma_query_device, 2925 .rdma_get_start_sb = &qed_rdma_get_sb_start, 2926 .rdma_get_rdma_int = &qed_rdma_get_int, 2927 .rdma_set_rdma_int = &qed_rdma_set_int, 2928 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, 2929 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, 2930 .rdma_alloc_pd = &qed_rdma_alloc_pd, 2931 .rdma_dealloc_pd = &qed_rdma_free_pd, 2932 .rdma_create_cq = &qed_rdma_create_cq, 2933 .rdma_destroy_cq = &qed_rdma_destroy_cq, 2934 .rdma_create_qp = &qed_rdma_create_qp, 2935 .rdma_modify_qp = &qed_rdma_modify_qp, 2936 .rdma_query_qp = &qed_rdma_query_qp, 2937 .rdma_destroy_qp = &qed_rdma_destroy_qp, 2938 .rdma_alloc_tid = &qed_rdma_alloc_tid, 2939 .rdma_free_tid = &qed_rdma_free_tid, 2940 .rdma_register_tid = &qed_rdma_register_tid, 2941 .rdma_deregister_tid = &qed_rdma_deregister_tid, 2942 .roce_ll2_start = &qed_roce_ll2_start, 2943 .roce_ll2_stop = &qed_roce_ll2_stop, 2944 .roce_ll2_tx = &qed_roce_ll2_tx, 2945 .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer, 2946 .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, 2947 .roce_ll2_stats = &qed_roce_ll2_stats, 2948 }; 2949 2950 const struct qed_rdma_ops *qed_get_rdma_ops(void) 2951 { 2952 return &qed_rdma_ops_pass; 2953 } 2954 EXPORT_SYMBOL(qed_get_rdma_ops); 2955