1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/if_ether.h> 33 #include <linux/if_vlan.h> 34 #include <linux/ip.h> 35 #include <linux/ipv6.h> 36 #include <linux/spinlock.h> 37 #include <linux/tcp.h> 38 #include "qed_cxt.h" 39 #include "qed_hw.h" 40 #include "qed_ll2.h" 41 #include "qed_rdma.h" 42 #include "qed_reg_addr.h" 43 #include "qed_sp.h" 44 #include "qed_ooo.h" 45 46 #define QED_IWARP_ORD_DEFAULT 32 47 #define QED_IWARP_IRD_DEFAULT 32 48 #define QED_IWARP_MAX_FW_MSS 4120 49 50 #define QED_EP_SIG 0xecabcdef 51 52 struct mpa_v2_hdr { 53 __be16 ird; 54 __be16 ord; 55 }; 56 57 #define MPA_V2_PEER2PEER_MODEL 0x8000 58 #define MPA_V2_SEND_RTR 0x4000 /* on ird */ 59 #define MPA_V2_READ_RTR 0x4000 /* on ord */ 60 #define MPA_V2_WRITE_RTR 0x8000 61 #define MPA_V2_IRD_ORD_MASK 0x3FFF 62 63 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED) 64 65 #define QED_IWARP_INVALID_TCP_CID 0xffffffff 66 #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) 67 #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff) 68 #define TIMESTAMP_HEADER_SIZE (12) 69 #define QED_IWARP_MAX_FIN_RT_DEFAULT (2) 70 71 #define QED_IWARP_TS_EN BIT(0) 72 #define QED_IWARP_DA_EN BIT(1) 73 #define QED_IWARP_PARAM_CRC_NEEDED (1) 74 #define QED_IWARP_PARAM_P2P (1) 75 76 #define QED_IWARP_DEF_MAX_RT_TIME (0) 77 #define QED_IWARP_DEF_CWND_FACTOR (4) 78 #define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5) 79 #define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ 80 #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ 81 82 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, 83 u8 fw_event_code, u16 echo, 84 union event_ring_data *data, 85 u8 fw_return_code); 86 87 /* Override devinfo with iWARP specific values */ 88 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) 89 { 90 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 91 92 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; 93 dev->max_qp = min_t(u32, 94 IWARP_MAX_QPS, 95 p_hwfn->p_rdma_info->num_qps) - 96 QED_IWARP_PREALLOC_CNT; 97 98 dev->max_cq = dev->max_qp; 99 100 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT; 101 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT; 102 } 103 104 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 105 { 106 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; 107 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); 108 p_hwfn->b_rdma_enabled_in_prs = true; 109 } 110 111 /* We have two cid maps, one for tcp which should be used only from passive 112 * syn processing and replacing a pre-allocated ep in the list. The second 113 * for active tcp and for QPs. 114 */ 115 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) 116 { 117 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); 118 119 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 120 121 if (cid < QED_IWARP_PREALLOC_CNT) 122 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 123 cid); 124 else 125 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 126 127 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 128 } 129 130 void 131 qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, 132 struct iwarp_init_func_ramrod_data *p_ramrod) 133 { 134 p_ramrod->iwarp.ll2_ooo_q_index = 135 RESC_START(p_hwfn, QED_LL2_QUEUE) + 136 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; 137 138 p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; 139 140 return; 141 } 142 143 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) 144 { 145 int rc; 146 147 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 148 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 149 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 150 if (rc) { 151 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n"); 152 return rc; 153 } 154 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); 155 156 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid); 157 if (rc) 158 qed_iwarp_cid_cleaned(p_hwfn, *cid); 159 160 return rc; 161 } 162 163 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid) 164 { 165 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); 166 167 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 168 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); 169 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 170 } 171 172 /* This function allocates a cid for passive tcp (called from syn receive) 173 * the reason it's separate from the regular cid allocation is because it 174 * is assured that these cids already have ilt allocated. They are preallocated 175 * to ensure that we won't need to allocate memory during syn processing 176 */ 177 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid) 178 { 179 int rc; 180 181 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 182 183 rc = qed_rdma_bmap_alloc_id(p_hwfn, 184 &p_hwfn->p_rdma_info->tcp_cid_map, cid); 185 186 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 187 188 if (rc) { 189 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 190 "can't allocate iwarp tcp cid max-count=%d\n", 191 p_hwfn->p_rdma_info->tcp_cid_map.max_count); 192 193 *cid = QED_IWARP_INVALID_TCP_CID; 194 return rc; 195 } 196 197 *cid += qed_cxt_get_proto_cid_start(p_hwfn, 198 p_hwfn->p_rdma_info->proto); 199 return 0; 200 } 201 202 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, 203 struct qed_rdma_qp *qp, 204 struct qed_rdma_create_qp_out_params *out_params) 205 { 206 struct iwarp_create_qp_ramrod_data *p_ramrod; 207 struct qed_sp_init_data init_data; 208 struct qed_spq_entry *p_ent; 209 u16 physical_queue; 210 u32 cid; 211 int rc; 212 213 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 214 IWARP_SHARED_QUEUE_PAGE_SIZE, 215 &qp->shared_queue_phys_addr, 216 GFP_KERNEL); 217 if (!qp->shared_queue) 218 return -ENOMEM; 219 220 out_params->sq_pbl_virt = (u8 *)qp->shared_queue + 221 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; 222 out_params->sq_pbl_phys = qp->shared_queue_phys_addr + 223 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; 224 out_params->rq_pbl_virt = (u8 *)qp->shared_queue + 225 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; 226 out_params->rq_pbl_phys = qp->shared_queue_phys_addr + 227 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; 228 229 rc = qed_iwarp_alloc_cid(p_hwfn, &cid); 230 if (rc) 231 goto err1; 232 233 qp->icid = (u16)cid; 234 235 memset(&init_data, 0, sizeof(init_data)); 236 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 237 init_data.cid = qp->icid; 238 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 239 240 rc = qed_sp_init_request(p_hwfn, &p_ent, 241 IWARP_RAMROD_CMD_ID_CREATE_QP, 242 PROTOCOLID_IWARP, &init_data); 243 if (rc) 244 goto err2; 245 246 p_ramrod = &p_ent->ramrod.iwarp_create_qp; 247 248 SET_FIELD(p_ramrod->flags, 249 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, 250 qp->fmr_and_reserved_lkey); 251 252 SET_FIELD(p_ramrod->flags, 253 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); 254 255 SET_FIELD(p_ramrod->flags, 256 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, 257 qp->incoming_rdma_read_en); 258 259 SET_FIELD(p_ramrod->flags, 260 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, 261 qp->incoming_rdma_write_en); 262 263 SET_FIELD(p_ramrod->flags, 264 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, 265 qp->incoming_atomic_en); 266 267 SET_FIELD(p_ramrod->flags, 268 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); 269 270 p_ramrod->pd = qp->pd; 271 p_ramrod->sq_num_pages = qp->sq_num_pages; 272 p_ramrod->rq_num_pages = qp->rq_num_pages; 273 274 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); 275 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); 276 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 277 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 278 279 p_ramrod->cq_cid_for_sq = 280 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); 281 p_ramrod->cq_cid_for_rq = 282 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); 283 284 p_ramrod->dpi = cpu_to_le16(qp->dpi); 285 286 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 287 p_ramrod->physical_q0 = cpu_to_le16(physical_queue); 288 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); 289 p_ramrod->physical_q1 = cpu_to_le16(physical_queue); 290 291 rc = qed_spq_post(p_hwfn, p_ent, NULL); 292 if (rc) 293 goto err2; 294 295 return rc; 296 297 err2: 298 qed_iwarp_cid_cleaned(p_hwfn, cid); 299 err1: 300 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 301 IWARP_SHARED_QUEUE_PAGE_SIZE, 302 qp->shared_queue, qp->shared_queue_phys_addr); 303 304 return rc; 305 } 306 307 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 308 { 309 struct iwarp_modify_qp_ramrod_data *p_ramrod; 310 struct qed_sp_init_data init_data; 311 struct qed_spq_entry *p_ent; 312 int rc; 313 314 /* Get SPQ entry */ 315 memset(&init_data, 0, sizeof(init_data)); 316 init_data.cid = qp->icid; 317 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 318 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 319 320 rc = qed_sp_init_request(p_hwfn, &p_ent, 321 IWARP_RAMROD_CMD_ID_MODIFY_QP, 322 p_hwfn->p_rdma_info->proto, &init_data); 323 if (rc) 324 return rc; 325 326 p_ramrod = &p_ent->ramrod.iwarp_modify_qp; 327 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 328 0x1); 329 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) 330 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING; 331 else 332 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR; 333 334 rc = qed_spq_post(p_hwfn, p_ent, NULL); 335 336 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc); 337 338 return rc; 339 } 340 341 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state) 342 { 343 switch (state) { 344 case QED_ROCE_QP_STATE_RESET: 345 case QED_ROCE_QP_STATE_INIT: 346 case QED_ROCE_QP_STATE_RTR: 347 return QED_IWARP_QP_STATE_IDLE; 348 case QED_ROCE_QP_STATE_RTS: 349 return QED_IWARP_QP_STATE_RTS; 350 case QED_ROCE_QP_STATE_SQD: 351 return QED_IWARP_QP_STATE_CLOSING; 352 case QED_ROCE_QP_STATE_ERR: 353 return QED_IWARP_QP_STATE_ERROR; 354 case QED_ROCE_QP_STATE_SQE: 355 return QED_IWARP_QP_STATE_TERMINATE; 356 default: 357 return QED_IWARP_QP_STATE_ERROR; 358 } 359 } 360 361 static enum qed_roce_qp_state 362 qed_iwarp2roce_state(enum qed_iwarp_qp_state state) 363 { 364 switch (state) { 365 case QED_IWARP_QP_STATE_IDLE: 366 return QED_ROCE_QP_STATE_INIT; 367 case QED_IWARP_QP_STATE_RTS: 368 return QED_ROCE_QP_STATE_RTS; 369 case QED_IWARP_QP_STATE_TERMINATE: 370 return QED_ROCE_QP_STATE_SQE; 371 case QED_IWARP_QP_STATE_CLOSING: 372 return QED_ROCE_QP_STATE_SQD; 373 case QED_IWARP_QP_STATE_ERROR: 374 return QED_ROCE_QP_STATE_ERR; 375 default: 376 return QED_ROCE_QP_STATE_ERR; 377 } 378 } 379 380 const char *iwarp_state_names[] = { 381 "IDLE", 382 "RTS", 383 "TERMINATE", 384 "CLOSING", 385 "ERROR", 386 }; 387 388 int 389 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, 390 struct qed_rdma_qp *qp, 391 enum qed_iwarp_qp_state new_state, bool internal) 392 { 393 enum qed_iwarp_qp_state prev_iw_state; 394 bool modify_fw = false; 395 int rc = 0; 396 397 /* modify QP can be called from upper-layer or as a result of async 398 * RST/FIN... therefore need to protect 399 */ 400 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); 401 prev_iw_state = qp->iwarp_state; 402 403 if (prev_iw_state == new_state) { 404 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); 405 return 0; 406 } 407 408 switch (prev_iw_state) { 409 case QED_IWARP_QP_STATE_IDLE: 410 switch (new_state) { 411 case QED_IWARP_QP_STATE_RTS: 412 qp->iwarp_state = QED_IWARP_QP_STATE_RTS; 413 break; 414 case QED_IWARP_QP_STATE_ERROR: 415 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; 416 if (!internal) 417 modify_fw = true; 418 break; 419 default: 420 break; 421 } 422 break; 423 case QED_IWARP_QP_STATE_RTS: 424 switch (new_state) { 425 case QED_IWARP_QP_STATE_CLOSING: 426 if (!internal) 427 modify_fw = true; 428 429 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING; 430 break; 431 case QED_IWARP_QP_STATE_ERROR: 432 if (!internal) 433 modify_fw = true; 434 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; 435 break; 436 default: 437 break; 438 } 439 break; 440 case QED_IWARP_QP_STATE_ERROR: 441 switch (new_state) { 442 case QED_IWARP_QP_STATE_IDLE: 443 444 qp->iwarp_state = new_state; 445 break; 446 case QED_IWARP_QP_STATE_CLOSING: 447 /* could happen due to race... do nothing.... */ 448 break; 449 default: 450 rc = -EINVAL; 451 } 452 break; 453 case QED_IWARP_QP_STATE_TERMINATE: 454 case QED_IWARP_QP_STATE_CLOSING: 455 qp->iwarp_state = new_state; 456 break; 457 default: 458 break; 459 } 460 461 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n", 462 qp->icid, 463 iwarp_state_names[prev_iw_state], 464 iwarp_state_names[qp->iwarp_state], 465 internal ? "internal" : ""); 466 467 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); 468 469 if (modify_fw) 470 rc = qed_iwarp_modify_fw(p_hwfn, qp); 471 472 return rc; 473 } 474 475 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 476 { 477 struct qed_sp_init_data init_data; 478 struct qed_spq_entry *p_ent; 479 int rc; 480 481 /* Get SPQ entry */ 482 memset(&init_data, 0, sizeof(init_data)); 483 init_data.cid = qp->icid; 484 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 485 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 486 487 rc = qed_sp_init_request(p_hwfn, &p_ent, 488 IWARP_RAMROD_CMD_ID_DESTROY_QP, 489 p_hwfn->p_rdma_info->proto, &init_data); 490 if (rc) 491 return rc; 492 493 rc = qed_spq_post(p_hwfn, p_ent, NULL); 494 495 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc); 496 497 return rc; 498 } 499 500 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, 501 struct qed_iwarp_ep *ep, 502 bool remove_from_active_list) 503 { 504 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 505 sizeof(*ep->ep_buffer_virt), 506 ep->ep_buffer_virt, ep->ep_buffer_phys); 507 508 if (remove_from_active_list) { 509 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 510 list_del(&ep->list_entry); 511 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 512 } 513 514 if (ep->qp) 515 ep->qp->ep = NULL; 516 517 kfree(ep); 518 } 519 520 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 521 { 522 struct qed_iwarp_ep *ep = qp->ep; 523 int wait_count = 0; 524 int rc = 0; 525 526 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { 527 rc = qed_iwarp_modify_qp(p_hwfn, qp, 528 QED_IWARP_QP_STATE_ERROR, false); 529 if (rc) 530 return rc; 531 } 532 533 /* Make sure ep is closed before returning and freeing memory. */ 534 if (ep) { 535 while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200) 536 msleep(100); 537 538 if (ep->state != QED_IWARP_EP_CLOSED) 539 DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n", 540 ep->state); 541 542 qed_iwarp_destroy_ep(p_hwfn, ep, false); 543 } 544 545 rc = qed_iwarp_fw_destroy(p_hwfn, qp); 546 547 if (qp->shared_queue) 548 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 549 IWARP_SHARED_QUEUE_PAGE_SIZE, 550 qp->shared_queue, qp->shared_queue_phys_addr); 551 552 return rc; 553 } 554 555 static int 556 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out) 557 { 558 struct qed_iwarp_ep *ep; 559 int rc; 560 561 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 562 if (!ep) 563 return -ENOMEM; 564 565 ep->state = QED_IWARP_EP_INIT; 566 567 ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 568 sizeof(*ep->ep_buffer_virt), 569 &ep->ep_buffer_phys, 570 GFP_KERNEL); 571 if (!ep->ep_buffer_virt) { 572 rc = -ENOMEM; 573 goto err; 574 } 575 576 ep->sig = QED_EP_SIG; 577 578 *ep_out = ep; 579 580 return 0; 581 582 err: 583 kfree(ep); 584 return rc; 585 } 586 587 static void 588 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, 589 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) 590 { 591 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n", 592 p_tcp_ramrod->tcp.local_mac_addr_lo, 593 p_tcp_ramrod->tcp.local_mac_addr_mid, 594 p_tcp_ramrod->tcp.local_mac_addr_hi, 595 p_tcp_ramrod->tcp.remote_mac_addr_lo, 596 p_tcp_ramrod->tcp.remote_mac_addr_mid, 597 p_tcp_ramrod->tcp.remote_mac_addr_hi); 598 599 if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { 600 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 601 "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n", 602 p_tcp_ramrod->tcp.local_ip, 603 p_tcp_ramrod->tcp.local_port, 604 p_tcp_ramrod->tcp.remote_ip, 605 p_tcp_ramrod->tcp.remote_port, 606 p_tcp_ramrod->tcp.vlan_id); 607 } else { 608 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 609 "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n", 610 p_tcp_ramrod->tcp.local_ip, 611 p_tcp_ramrod->tcp.local_port, 612 p_tcp_ramrod->tcp.remote_ip, 613 p_tcp_ramrod->tcp.remote_port, 614 p_tcp_ramrod->tcp.vlan_id); 615 } 616 617 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 618 "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n", 619 p_tcp_ramrod->tcp.flow_label, 620 p_tcp_ramrod->tcp.ttl, 621 p_tcp_ramrod->tcp.tos_or_tc, 622 p_tcp_ramrod->tcp.mss, 623 p_tcp_ramrod->tcp.rcv_wnd_scale, 624 p_tcp_ramrod->tcp.connect_mode, 625 p_tcp_ramrod->tcp.flags); 626 627 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n", 628 p_tcp_ramrod->tcp.syn_ip_payload_length, 629 p_tcp_ramrod->tcp.syn_phy_addr_lo, 630 p_tcp_ramrod->tcp.syn_phy_addr_hi); 631 } 632 633 static int 634 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 635 { 636 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 637 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; 638 struct tcp_offload_params_opt2 *tcp; 639 struct qed_sp_init_data init_data; 640 struct qed_spq_entry *p_ent; 641 dma_addr_t async_output_phys; 642 dma_addr_t in_pdata_phys; 643 u16 physical_q; 644 u8 tcp_flags; 645 int rc; 646 int i; 647 648 memset(&init_data, 0, sizeof(init_data)); 649 init_data.cid = ep->tcp_cid; 650 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 651 if (ep->connect_mode == TCP_CONNECT_PASSIVE) 652 init_data.comp_mode = QED_SPQ_MODE_CB; 653 else 654 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 655 656 rc = qed_sp_init_request(p_hwfn, &p_ent, 657 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, 658 PROTOCOLID_IWARP, &init_data); 659 if (rc) 660 return rc; 661 662 p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; 663 664 in_pdata_phys = ep->ep_buffer_phys + 665 offsetof(struct qed_iwarp_ep_memory, in_pdata); 666 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr, 667 in_pdata_phys); 668 669 p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = 670 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); 671 672 async_output_phys = ep->ep_buffer_phys + 673 offsetof(struct qed_iwarp_ep_memory, async_output); 674 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf, 675 async_output_phys); 676 677 p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); 678 p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); 679 680 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 681 p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q); 682 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); 683 p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q); 684 p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; 685 686 tcp = &p_tcp_ramrod->tcp; 687 qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi, 688 &tcp->remote_mac_addr_mid, 689 &tcp->remote_mac_addr_lo, ep->remote_mac_addr); 690 qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid, 691 &tcp->local_mac_addr_lo, ep->local_mac_addr); 692 693 tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); 694 695 tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; 696 tcp->flags = 0; 697 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 698 !!(tcp_flags & QED_IWARP_TS_EN)); 699 700 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 701 !!(tcp_flags & QED_IWARP_DA_EN)); 702 703 tcp->ip_version = ep->cm_info.ip_version; 704 705 for (i = 0; i < 4; i++) { 706 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]); 707 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]); 708 } 709 710 tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port); 711 tcp->local_port = cpu_to_le16(ep->cm_info.local_port); 712 tcp->mss = cpu_to_le16(ep->mss); 713 tcp->flow_label = 0; 714 tcp->ttl = 0x40; 715 tcp->tos_or_tc = 0; 716 717 tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; 718 tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss; 719 tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; 720 tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT; 721 tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL; 722 723 tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; 724 tcp->connect_mode = ep->connect_mode; 725 726 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 727 tcp->syn_ip_payload_length = 728 cpu_to_le16(ep->syn_ip_payload_length); 729 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr); 730 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr); 731 } 732 733 qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); 734 735 rc = qed_spq_post(p_hwfn, p_ent, NULL); 736 737 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 738 "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc); 739 740 return rc; 741 } 742 743 static void 744 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 745 { 746 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 747 struct qed_iwarp_cm_event_params params; 748 struct mpa_v2_hdr *mpa_v2; 749 union async_output *async_data; 750 u16 mpa_ord, mpa_ird; 751 u8 mpa_hdr_size = 0; 752 u8 mpa_rev; 753 754 async_data = &ep->ep_buffer_virt->async_output; 755 756 mpa_rev = async_data->mpa_request.mpa_handshake_mode; 757 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 758 "private_data_len=%x handshake_mode=%x private_data=(%x)\n", 759 async_data->mpa_request.ulp_data_len, 760 mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata))); 761 762 if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { 763 /* Read ord/ird values from private data buffer */ 764 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata; 765 mpa_hdr_size = sizeof(*mpa_v2); 766 767 mpa_ord = ntohs(mpa_v2->ord); 768 mpa_ird = ntohs(mpa_v2->ird); 769 770 /* Temprary store in cm_info incoming ord/ird requested, later 771 * replace with negotiated value during accept 772 */ 773 ep->cm_info.ord = (u8)min_t(u16, 774 (mpa_ord & MPA_V2_IRD_ORD_MASK), 775 QED_IWARP_ORD_DEFAULT); 776 777 ep->cm_info.ird = (u8)min_t(u16, 778 (mpa_ird & MPA_V2_IRD_ORD_MASK), 779 QED_IWARP_IRD_DEFAULT); 780 781 /* Peer2Peer negotiation */ 782 ep->rtr_type = MPA_RTR_TYPE_NONE; 783 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { 784 if (mpa_ord & MPA_V2_WRITE_RTR) 785 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; 786 787 if (mpa_ord & MPA_V2_READ_RTR) 788 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; 789 790 if (mpa_ird & MPA_V2_SEND_RTR) 791 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; 792 793 ep->rtr_type &= iwarp_info->rtr_type; 794 795 /* if we're left with no match send our capabilities */ 796 if (ep->rtr_type == MPA_RTR_TYPE_NONE) 797 ep->rtr_type = iwarp_info->rtr_type; 798 } 799 800 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; 801 } else { 802 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT; 803 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT; 804 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; 805 } 806 807 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 808 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n", 809 mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, 810 async_data->mpa_request.ulp_data_len, mpa_hdr_size); 811 812 /* Strip mpa v2 hdr from private data before sending to upper layer */ 813 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; 814 815 ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len - 816 mpa_hdr_size; 817 818 params.event = QED_IWARP_EVENT_MPA_REQUEST; 819 params.cm_info = &ep->cm_info; 820 params.ep_context = ep; 821 params.status = 0; 822 823 ep->state = QED_IWARP_EP_MPA_REQ_RCVD; 824 ep->event_cb(ep->cb_context, ¶ms); 825 } 826 827 static int 828 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 829 { 830 struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; 831 struct qed_iwarp_info *iwarp_info; 832 struct qed_sp_init_data init_data; 833 dma_addr_t async_output_phys; 834 struct qed_spq_entry *p_ent; 835 dma_addr_t out_pdata_phys; 836 dma_addr_t in_pdata_phys; 837 struct qed_rdma_qp *qp; 838 bool reject; 839 int rc; 840 841 if (!ep) 842 return -EINVAL; 843 844 qp = ep->qp; 845 reject = !qp; 846 847 memset(&init_data, 0, sizeof(init_data)); 848 init_data.cid = reject ? ep->tcp_cid : qp->icid; 849 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 850 851 if (ep->connect_mode == TCP_CONNECT_ACTIVE) 852 init_data.comp_mode = QED_SPQ_MODE_CB; 853 else 854 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 855 856 rc = qed_sp_init_request(p_hwfn, &p_ent, 857 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, 858 PROTOCOLID_IWARP, &init_data); 859 if (rc) 860 return rc; 861 862 p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; 863 out_pdata_phys = ep->ep_buffer_phys + 864 offsetof(struct qed_iwarp_ep_memory, out_pdata); 865 DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr, 866 out_pdata_phys); 867 p_mpa_ramrod->common.outgoing_ulp_buffer.len = 868 ep->cm_info.private_data_len; 869 p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; 870 871 p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord; 872 p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird; 873 874 p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; 875 876 in_pdata_phys = ep->ep_buffer_phys + 877 offsetof(struct qed_iwarp_ep_memory, in_pdata); 878 p_mpa_ramrod->tcp_connect_side = ep->connect_mode; 879 DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr, 880 in_pdata_phys); 881 p_mpa_ramrod->incoming_ulp_buffer.len = 882 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); 883 async_output_phys = ep->ep_buffer_phys + 884 offsetof(struct qed_iwarp_ep_memory, async_output); 885 DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf, 886 async_output_phys); 887 p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); 888 p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); 889 890 if (!reject) { 891 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr, 892 qp->shared_queue_phys_addr); 893 p_mpa_ramrod->stats_counter_id = 894 RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; 895 } else { 896 p_mpa_ramrod->common.reject = 1; 897 } 898 899 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 900 p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size; 901 p_mpa_ramrod->mode = ep->mpa_rev; 902 SET_FIELD(p_mpa_ramrod->rtr_pref, 903 IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); 904 905 ep->state = QED_IWARP_EP_MPA_OFFLOADED; 906 rc = qed_spq_post(p_hwfn, p_ent, NULL); 907 if (!reject) 908 ep->cid = qp->icid; /* Now they're migrated. */ 909 910 DP_VERBOSE(p_hwfn, 911 QED_MSG_RDMA, 912 "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n", 913 reject ? 0xffff : qp->icid, 914 ep->tcp_cid, 915 rc, 916 ep->cm_info.ird, 917 ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); 918 return rc; 919 } 920 921 static void 922 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 923 { 924 ep->state = QED_IWARP_EP_INIT; 925 if (ep->qp) 926 ep->qp->ep = NULL; 927 ep->qp = NULL; 928 memset(&ep->cm_info, 0, sizeof(ep->cm_info)); 929 930 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { 931 /* We don't care about the return code, it's ok if tcp_cid 932 * remains invalid...in this case we'll defer allocation 933 */ 934 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); 935 } 936 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 937 938 list_del(&ep->list_entry); 939 list_add_tail(&ep->list_entry, 940 &p_hwfn->p_rdma_info->iwarp.ep_free_list); 941 942 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 943 } 944 945 void 946 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 947 { 948 struct mpa_v2_hdr *mpa_v2_params; 949 union async_output *async_data; 950 u16 mpa_ird, mpa_ord; 951 u8 mpa_data_size = 0; 952 953 if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { 954 mpa_v2_params = 955 (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); 956 mpa_data_size = sizeof(*mpa_v2_params); 957 mpa_ird = ntohs(mpa_v2_params->ird); 958 mpa_ord = ntohs(mpa_v2_params->ord); 959 960 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); 961 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); 962 } 963 async_data = &ep->ep_buffer_virt->async_output; 964 965 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; 966 ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len - 967 mpa_data_size; 968 } 969 970 void 971 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 972 { 973 struct qed_iwarp_cm_event_params params; 974 975 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 976 DP_NOTICE(p_hwfn, 977 "MPA reply event not expected on passive side!\n"); 978 return; 979 } 980 981 params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; 982 983 qed_iwarp_parse_private_data(p_hwfn, ep); 984 985 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 986 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", 987 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); 988 989 params.cm_info = &ep->cm_info; 990 params.ep_context = ep; 991 params.status = 0; 992 993 ep->mpa_reply_processed = true; 994 995 ep->event_cb(ep->cb_context, ¶ms); 996 } 997 998 #define QED_IWARP_CONNECT_MODE_STRING(ep) \ 999 ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" 1000 1001 /* Called as a result of the event: 1002 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE 1003 */ 1004 static void 1005 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, 1006 struct qed_iwarp_ep *ep, u8 fw_return_code) 1007 { 1008 struct qed_iwarp_cm_event_params params; 1009 1010 if (ep->connect_mode == TCP_CONNECT_ACTIVE) 1011 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; 1012 else 1013 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; 1014 1015 if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed) 1016 qed_iwarp_parse_private_data(p_hwfn, ep); 1017 1018 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1019 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", 1020 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); 1021 1022 params.cm_info = &ep->cm_info; 1023 1024 params.ep_context = ep; 1025 1026 ep->state = QED_IWARP_EP_CLOSED; 1027 1028 switch (fw_return_code) { 1029 case RDMA_RETURN_OK: 1030 ep->qp->max_rd_atomic_req = ep->cm_info.ord; 1031 ep->qp->max_rd_atomic_resp = ep->cm_info.ird; 1032 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1); 1033 ep->state = QED_IWARP_EP_ESTABLISHED; 1034 params.status = 0; 1035 break; 1036 case IWARP_CONN_ERROR_MPA_TIMEOUT: 1037 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n", 1038 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1039 params.status = -EBUSY; 1040 break; 1041 case IWARP_CONN_ERROR_MPA_ERROR_REJECT: 1042 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n", 1043 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1044 params.status = -ECONNREFUSED; 1045 break; 1046 case IWARP_CONN_ERROR_MPA_RST: 1047 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n", 1048 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid, 1049 ep->tcp_cid); 1050 params.status = -ECONNRESET; 1051 break; 1052 case IWARP_CONN_ERROR_MPA_FIN: 1053 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n", 1054 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1055 params.status = -ECONNREFUSED; 1056 break; 1057 case IWARP_CONN_ERROR_MPA_INSUF_IRD: 1058 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n", 1059 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1060 params.status = -ECONNREFUSED; 1061 break; 1062 case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: 1063 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n", 1064 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1065 params.status = -ECONNREFUSED; 1066 break; 1067 case IWARP_CONN_ERROR_MPA_INVALID_PACKET: 1068 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", 1069 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1070 params.status = -ECONNREFUSED; 1071 break; 1072 case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: 1073 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n", 1074 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1075 params.status = -ECONNREFUSED; 1076 break; 1077 case IWARP_CONN_ERROR_MPA_TERMINATE: 1078 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n", 1079 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1080 params.status = -ECONNREFUSED; 1081 break; 1082 default: 1083 params.status = -ECONNRESET; 1084 break; 1085 } 1086 1087 ep->event_cb(ep->cb_context, ¶ms); 1088 1089 /* on passive side, if there is no associated QP (REJECT) we need to 1090 * return the ep to the pool, (in the regular case we add an element 1091 * in accept instead of this one. 1092 * In both cases we need to remove it from the ep_list. 1093 */ 1094 if (fw_return_code != RDMA_RETURN_OK) { 1095 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; 1096 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && 1097 (!ep->qp)) { /* Rejected */ 1098 qed_iwarp_return_ep(p_hwfn, ep); 1099 } else { 1100 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1101 list_del(&ep->list_entry); 1102 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1103 } 1104 } 1105 } 1106 1107 static void 1108 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, 1109 struct qed_iwarp_ep *ep, u8 *mpa_data_size) 1110 { 1111 struct mpa_v2_hdr *mpa_v2_params; 1112 u16 mpa_ird, mpa_ord; 1113 1114 *mpa_data_size = 0; 1115 if (MPA_REV2(ep->mpa_rev)) { 1116 mpa_v2_params = 1117 (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; 1118 *mpa_data_size = sizeof(*mpa_v2_params); 1119 1120 mpa_ird = (u16)ep->cm_info.ird; 1121 mpa_ord = (u16)ep->cm_info.ord; 1122 1123 if (ep->rtr_type != MPA_RTR_TYPE_NONE) { 1124 mpa_ird |= MPA_V2_PEER2PEER_MODEL; 1125 1126 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) 1127 mpa_ird |= MPA_V2_SEND_RTR; 1128 1129 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) 1130 mpa_ord |= MPA_V2_WRITE_RTR; 1131 1132 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) 1133 mpa_ord |= MPA_V2_READ_RTR; 1134 } 1135 1136 mpa_v2_params->ird = htons(mpa_ird); 1137 mpa_v2_params->ord = htons(mpa_ord); 1138 1139 DP_VERBOSE(p_hwfn, 1140 QED_MSG_RDMA, 1141 "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n", 1142 mpa_v2_params->ird, 1143 mpa_v2_params->ord, 1144 *((u32 *)mpa_v2_params), 1145 mpa_ord & MPA_V2_IRD_ORD_MASK, 1146 mpa_ird & MPA_V2_IRD_ORD_MASK, 1147 !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), 1148 !!(mpa_ird & MPA_V2_SEND_RTR), 1149 !!(mpa_ord & MPA_V2_WRITE_RTR), 1150 !!(mpa_ord & MPA_V2_READ_RTR)); 1151 } 1152 } 1153 1154 int qed_iwarp_connect(void *rdma_cxt, 1155 struct qed_iwarp_connect_in *iparams, 1156 struct qed_iwarp_connect_out *oparams) 1157 { 1158 struct qed_hwfn *p_hwfn = rdma_cxt; 1159 struct qed_iwarp_info *iwarp_info; 1160 struct qed_iwarp_ep *ep; 1161 u8 mpa_data_size = 0; 1162 u8 ts_hdr_size = 0; 1163 u32 cid; 1164 int rc; 1165 1166 if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) || 1167 (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) { 1168 DP_NOTICE(p_hwfn, 1169 "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", 1170 iparams->qp->icid, iparams->cm_info.ord, 1171 iparams->cm_info.ird); 1172 1173 return -EINVAL; 1174 } 1175 1176 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1177 1178 /* Allocate ep object */ 1179 rc = qed_iwarp_alloc_cid(p_hwfn, &cid); 1180 if (rc) 1181 return rc; 1182 1183 rc = qed_iwarp_create_ep(p_hwfn, &ep); 1184 if (rc) 1185 goto err; 1186 1187 ep->tcp_cid = cid; 1188 1189 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1190 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); 1191 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1192 1193 ep->qp = iparams->qp; 1194 ep->qp->ep = ep; 1195 ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr); 1196 ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr); 1197 memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); 1198 1199 ep->cm_info.ord = iparams->cm_info.ord; 1200 ep->cm_info.ird = iparams->cm_info.ird; 1201 1202 ep->rtr_type = iwarp_info->rtr_type; 1203 if (!iwarp_info->peer2peer) 1204 ep->rtr_type = MPA_RTR_TYPE_NONE; 1205 1206 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0)) 1207 ep->cm_info.ord = 1; 1208 1209 ep->mpa_rev = iwarp_info->mpa_rev; 1210 1211 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1212 1213 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1214 ep->cm_info.private_data_len = iparams->cm_info.private_data_len + 1215 mpa_data_size; 1216 1217 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1218 iparams->cm_info.private_data, 1219 iparams->cm_info.private_data_len); 1220 1221 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) 1222 ts_hdr_size = TIMESTAMP_HEADER_SIZE; 1223 1224 ep->mss = iparams->mss - ts_hdr_size; 1225 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); 1226 1227 ep->event_cb = iparams->event_cb; 1228 ep->cb_context = iparams->cb_context; 1229 ep->connect_mode = TCP_CONNECT_ACTIVE; 1230 1231 oparams->ep_context = ep; 1232 1233 rc = qed_iwarp_tcp_offload(p_hwfn, ep); 1234 1235 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n", 1236 iparams->qp->icid, ep->tcp_cid, rc); 1237 1238 if (rc) { 1239 qed_iwarp_destroy_ep(p_hwfn, ep, true); 1240 goto err; 1241 } 1242 1243 return rc; 1244 err: 1245 qed_iwarp_cid_cleaned(p_hwfn, cid); 1246 1247 return rc; 1248 } 1249 1250 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) 1251 { 1252 struct qed_iwarp_ep *ep = NULL; 1253 int rc; 1254 1255 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1256 1257 if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { 1258 DP_ERR(p_hwfn, "Ep list is empty\n"); 1259 goto out; 1260 } 1261 1262 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, 1263 struct qed_iwarp_ep, list_entry); 1264 1265 /* in some cases we could have failed allocating a tcp cid when added 1266 * from accept / failure... retry now..this is not the common case. 1267 */ 1268 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { 1269 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); 1270 1271 /* if we fail we could look for another entry with a valid 1272 * tcp_cid, but since we don't expect to reach this anyway 1273 * it's not worth the handling 1274 */ 1275 if (rc) { 1276 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; 1277 ep = NULL; 1278 goto out; 1279 } 1280 } 1281 1282 list_del(&ep->list_entry); 1283 1284 out: 1285 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1286 return ep; 1287 } 1288 1289 #define QED_IWARP_MAX_CID_CLEAN_TIME 100 1290 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5 1291 1292 /* This function waits for all the bits of a bmap to be cleared, as long as 1293 * there is progress ( i.e. the number of bits left to be cleared decreases ) 1294 * the function continues. 1295 */ 1296 static int 1297 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) 1298 { 1299 int prev_weight = 0; 1300 int wait_count = 0; 1301 int weight = 0; 1302 1303 weight = bitmap_weight(bmap->bitmap, bmap->max_count); 1304 prev_weight = weight; 1305 1306 while (weight) { 1307 msleep(QED_IWARP_MAX_CID_CLEAN_TIME); 1308 1309 weight = bitmap_weight(bmap->bitmap, bmap->max_count); 1310 1311 if (prev_weight == weight) { 1312 wait_count++; 1313 } else { 1314 prev_weight = weight; 1315 wait_count = 0; 1316 } 1317 1318 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) { 1319 DP_NOTICE(p_hwfn, 1320 "%s bitmap wait timed out (%d cids pending)\n", 1321 bmap->name, weight); 1322 return -EBUSY; 1323 } 1324 } 1325 return 0; 1326 } 1327 1328 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) 1329 { 1330 int rc; 1331 int i; 1332 1333 rc = qed_iwarp_wait_cid_map_cleared(p_hwfn, 1334 &p_hwfn->p_rdma_info->tcp_cid_map); 1335 if (rc) 1336 return rc; 1337 1338 /* Now free the tcp cids from the main cid map */ 1339 for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++) 1340 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i); 1341 1342 /* Now wait for all cids to be completed */ 1343 return qed_iwarp_wait_cid_map_cleared(p_hwfn, 1344 &p_hwfn->p_rdma_info->cid_map); 1345 } 1346 1347 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn) 1348 { 1349 struct qed_iwarp_ep *ep; 1350 1351 while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { 1352 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1353 1354 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, 1355 struct qed_iwarp_ep, list_entry); 1356 1357 if (!ep) { 1358 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1359 break; 1360 } 1361 list_del(&ep->list_entry); 1362 1363 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1364 1365 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID) 1366 qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid); 1367 1368 qed_iwarp_destroy_ep(p_hwfn, ep, false); 1369 } 1370 } 1371 1372 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init) 1373 { 1374 struct qed_iwarp_ep *ep; 1375 int rc = 0; 1376 int count; 1377 u32 cid; 1378 int i; 1379 1380 count = init ? QED_IWARP_PREALLOC_CNT : 1; 1381 for (i = 0; i < count; i++) { 1382 rc = qed_iwarp_create_ep(p_hwfn, &ep); 1383 if (rc) 1384 return rc; 1385 1386 /* During initialization we allocate from the main pool, 1387 * afterwards we allocate only from the tcp_cid. 1388 */ 1389 if (init) { 1390 rc = qed_iwarp_alloc_cid(p_hwfn, &cid); 1391 if (rc) 1392 goto err; 1393 qed_iwarp_set_tcp_cid(p_hwfn, cid); 1394 } else { 1395 /* We don't care about the return code, it's ok if 1396 * tcp_cid remains invalid...in this case we'll 1397 * defer allocation 1398 */ 1399 qed_iwarp_alloc_tcp_cid(p_hwfn, &cid); 1400 } 1401 1402 ep->tcp_cid = cid; 1403 1404 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1405 list_add_tail(&ep->list_entry, 1406 &p_hwfn->p_rdma_info->iwarp.ep_free_list); 1407 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1408 } 1409 1410 return rc; 1411 1412 err: 1413 qed_iwarp_destroy_ep(p_hwfn, ep, false); 1414 1415 return rc; 1416 } 1417 1418 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) 1419 { 1420 int rc; 1421 1422 /* Allocate bitmap for tcp cid. These are used by passive side 1423 * to ensure it can allocate a tcp cid during dpc that was 1424 * pre-acquired and doesn't require dynamic allocation of ilt 1425 */ 1426 rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1427 QED_IWARP_PREALLOC_CNT, "TCP_CID"); 1428 if (rc) { 1429 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1430 "Failed to allocate tcp cid, rc = %d\n", rc); 1431 return rc; 1432 } 1433 1434 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list); 1435 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1436 1437 rc = qed_iwarp_prealloc_ep(p_hwfn, true); 1438 if (rc) 1439 return rc; 1440 1441 return qed_ooo_alloc(p_hwfn); 1442 } 1443 1444 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) 1445 { 1446 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1447 1448 qed_ooo_free(p_hwfn); 1449 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); 1450 kfree(iwarp_info->mpa_bufs); 1451 kfree(iwarp_info->partial_fpdus); 1452 kfree(iwarp_info->mpa_intermediate_buf); 1453 } 1454 1455 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) 1456 { 1457 struct qed_hwfn *p_hwfn = rdma_cxt; 1458 struct qed_iwarp_ep *ep; 1459 u8 mpa_data_size = 0; 1460 int rc; 1461 1462 ep = iparams->ep_context; 1463 if (!ep) { 1464 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); 1465 return -EINVAL; 1466 } 1467 1468 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", 1469 iparams->qp->icid, ep->tcp_cid); 1470 1471 if ((iparams->ord > QED_IWARP_ORD_DEFAULT) || 1472 (iparams->ird > QED_IWARP_IRD_DEFAULT)) { 1473 DP_VERBOSE(p_hwfn, 1474 QED_MSG_RDMA, 1475 "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", 1476 iparams->qp->icid, 1477 ep->tcp_cid, iparams->ord, iparams->ord); 1478 return -EINVAL; 1479 } 1480 1481 qed_iwarp_prealloc_ep(p_hwfn, false); 1482 1483 ep->cb_context = iparams->cb_context; 1484 ep->qp = iparams->qp; 1485 ep->qp->ep = ep; 1486 1487 if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { 1488 /* Negotiate ord/ird: if upperlayer requested ord larger than 1489 * ird advertised by remote, we need to decrease our ord 1490 */ 1491 if (iparams->ord > ep->cm_info.ird) 1492 iparams->ord = ep->cm_info.ird; 1493 1494 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && 1495 (iparams->ird == 0)) 1496 iparams->ird = 1; 1497 } 1498 1499 /* Update cm_info ord/ird to be negotiated values */ 1500 ep->cm_info.ord = iparams->ord; 1501 ep->cm_info.ird = iparams->ird; 1502 1503 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1504 1505 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1506 ep->cm_info.private_data_len = iparams->private_data_len + 1507 mpa_data_size; 1508 1509 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1510 iparams->private_data, iparams->private_data_len); 1511 1512 rc = qed_iwarp_mpa_offload(p_hwfn, ep); 1513 if (rc) 1514 qed_iwarp_modify_qp(p_hwfn, 1515 iparams->qp, QED_IWARP_QP_STATE_ERROR, 1); 1516 1517 return rc; 1518 } 1519 1520 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams) 1521 { 1522 struct qed_hwfn *p_hwfn = rdma_cxt; 1523 struct qed_iwarp_ep *ep; 1524 u8 mpa_data_size = 0; 1525 1526 ep = iparams->ep_context; 1527 if (!ep) { 1528 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n"); 1529 return -EINVAL; 1530 } 1531 1532 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid); 1533 1534 ep->cb_context = iparams->cb_context; 1535 ep->qp = NULL; 1536 1537 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1538 1539 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1540 ep->cm_info.private_data_len = iparams->private_data_len + 1541 mpa_data_size; 1542 1543 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1544 iparams->private_data, iparams->private_data_len); 1545 1546 return qed_iwarp_mpa_offload(p_hwfn, ep); 1547 } 1548 1549 static void 1550 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, 1551 struct qed_iwarp_cm_info *cm_info) 1552 { 1553 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n", 1554 cm_info->ip_version); 1555 1556 if (cm_info->ip_version == QED_TCP_IPV4) 1557 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1558 "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n", 1559 cm_info->remote_ip, cm_info->remote_port, 1560 cm_info->local_ip, cm_info->local_port, 1561 cm_info->vlan); 1562 else 1563 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1564 "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n", 1565 cm_info->remote_ip, cm_info->remote_port, 1566 cm_info->local_ip, cm_info->local_port, 1567 cm_info->vlan); 1568 1569 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1570 "private_data_len = %x ord = %d, ird = %d\n", 1571 cm_info->private_data_len, cm_info->ord, cm_info->ird); 1572 } 1573 1574 static int 1575 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, 1576 struct qed_iwarp_ll2_buff *buf, u8 handle) 1577 { 1578 int rc; 1579 1580 rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr, 1581 (u16)buf->buff_size, buf, 1); 1582 if (rc) { 1583 DP_NOTICE(p_hwfn, 1584 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n", 1585 rc, handle); 1586 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size, 1587 buf->data, buf->data_phys_addr); 1588 kfree(buf); 1589 } 1590 1591 return rc; 1592 } 1593 1594 static bool 1595 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) 1596 { 1597 struct qed_iwarp_ep *ep = NULL; 1598 bool found = false; 1599 1600 list_for_each_entry(ep, 1601 &p_hwfn->p_rdma_info->iwarp.ep_list, 1602 list_entry) { 1603 if ((ep->cm_info.local_port == cm_info->local_port) && 1604 (ep->cm_info.remote_port == cm_info->remote_port) && 1605 (ep->cm_info.vlan == cm_info->vlan) && 1606 !memcmp(&ep->cm_info.local_ip, cm_info->local_ip, 1607 sizeof(cm_info->local_ip)) && 1608 !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip, 1609 sizeof(cm_info->remote_ip))) { 1610 found = true; 1611 break; 1612 } 1613 } 1614 1615 if (found) { 1616 DP_NOTICE(p_hwfn, 1617 "SYN received on active connection - dropping\n"); 1618 qed_iwarp_print_cm_info(p_hwfn, cm_info); 1619 1620 return true; 1621 } 1622 1623 return false; 1624 } 1625 1626 static struct qed_iwarp_listener * 1627 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, 1628 struct qed_iwarp_cm_info *cm_info) 1629 { 1630 struct qed_iwarp_listener *listener = NULL; 1631 static const u32 ip_zero[4] = { 0, 0, 0, 0 }; 1632 bool found = false; 1633 1634 qed_iwarp_print_cm_info(p_hwfn, cm_info); 1635 1636 list_for_each_entry(listener, 1637 &p_hwfn->p_rdma_info->iwarp.listen_list, 1638 list_entry) { 1639 if (listener->port == cm_info->local_port) { 1640 if (!memcmp(listener->ip_addr, 1641 ip_zero, sizeof(ip_zero))) { 1642 found = true; 1643 break; 1644 } 1645 1646 if (!memcmp(listener->ip_addr, 1647 cm_info->local_ip, 1648 sizeof(cm_info->local_ip)) && 1649 (listener->vlan == cm_info->vlan)) { 1650 found = true; 1651 break; 1652 } 1653 } 1654 } 1655 1656 if (found) { 1657 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n", 1658 listener); 1659 return listener; 1660 } 1661 1662 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n"); 1663 return NULL; 1664 } 1665 1666 static int 1667 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, 1668 struct qed_iwarp_cm_info *cm_info, 1669 void *buf, 1670 u8 *remote_mac_addr, 1671 u8 *local_mac_addr, 1672 int *payload_len, int *tcp_start_offset) 1673 { 1674 struct vlan_ethhdr *vethh; 1675 bool vlan_valid = false; 1676 struct ipv6hdr *ip6h; 1677 struct ethhdr *ethh; 1678 struct tcphdr *tcph; 1679 struct iphdr *iph; 1680 int eth_hlen; 1681 int ip_hlen; 1682 int eth_type; 1683 int i; 1684 1685 ethh = buf; 1686 eth_type = ntohs(ethh->h_proto); 1687 if (eth_type == ETH_P_8021Q) { 1688 vlan_valid = true; 1689 vethh = (struct vlan_ethhdr *)ethh; 1690 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; 1691 eth_type = ntohs(vethh->h_vlan_encapsulated_proto); 1692 } 1693 1694 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); 1695 1696 ether_addr_copy(remote_mac_addr, ethh->h_source); 1697 ether_addr_copy(local_mac_addr, ethh->h_dest); 1698 1699 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n", 1700 eth_type, ethh->h_source); 1701 1702 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n", 1703 eth_hlen, ethh->h_dest); 1704 1705 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); 1706 1707 if (eth_type == ETH_P_IP) { 1708 if (iph->protocol != IPPROTO_TCP) { 1709 DP_NOTICE(p_hwfn, 1710 "Unexpected ip protocol on ll2 %x\n", 1711 iph->protocol); 1712 return -EINVAL; 1713 } 1714 1715 cm_info->local_ip[0] = ntohl(iph->daddr); 1716 cm_info->remote_ip[0] = ntohl(iph->saddr); 1717 cm_info->ip_version = TCP_IPV4; 1718 1719 ip_hlen = (iph->ihl) * sizeof(u32); 1720 *payload_len = ntohs(iph->tot_len) - ip_hlen; 1721 } else if (eth_type == ETH_P_IPV6) { 1722 ip6h = (struct ipv6hdr *)iph; 1723 1724 if (ip6h->nexthdr != IPPROTO_TCP) { 1725 DP_NOTICE(p_hwfn, 1726 "Unexpected ip protocol on ll2 %x\n", 1727 iph->protocol); 1728 return -EINVAL; 1729 } 1730 1731 for (i = 0; i < 4; i++) { 1732 cm_info->local_ip[i] = 1733 ntohl(ip6h->daddr.in6_u.u6_addr32[i]); 1734 cm_info->remote_ip[i] = 1735 ntohl(ip6h->saddr.in6_u.u6_addr32[i]); 1736 } 1737 cm_info->ip_version = TCP_IPV6; 1738 1739 ip_hlen = sizeof(*ip6h); 1740 *payload_len = ntohs(ip6h->payload_len); 1741 } else { 1742 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type); 1743 return -EINVAL; 1744 } 1745 1746 tcph = (struct tcphdr *)((u8 *)iph + ip_hlen); 1747 1748 if (!tcph->syn) { 1749 DP_NOTICE(p_hwfn, 1750 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n", 1751 iph->ihl, tcph->source, tcph->dest); 1752 return -EINVAL; 1753 } 1754 1755 cm_info->local_port = ntohs(tcph->dest); 1756 cm_info->remote_port = ntohs(tcph->source); 1757 1758 qed_iwarp_print_cm_info(p_hwfn, cm_info); 1759 1760 *tcp_start_offset = eth_hlen + ip_hlen; 1761 1762 return 0; 1763 } 1764 1765 static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn, 1766 u16 cid) 1767 { 1768 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1769 struct qed_iwarp_fpdu *partial_fpdu; 1770 u32 idx; 1771 1772 idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); 1773 if (idx >= iwarp_info->max_num_partial_fpdus) { 1774 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid, 1775 iwarp_info->max_num_partial_fpdus); 1776 return NULL; 1777 } 1778 1779 partial_fpdu = &iwarp_info->partial_fpdus[idx]; 1780 1781 return partial_fpdu; 1782 } 1783 1784 enum qed_iwarp_mpa_pkt_type { 1785 QED_IWARP_MPA_PKT_PACKED, 1786 QED_IWARP_MPA_PKT_PARTIAL, 1787 QED_IWARP_MPA_PKT_UNALIGNED 1788 }; 1789 1790 #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff 1791 #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2) 1792 #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4) 1793 1794 /* Pad to multiple of 4 */ 1795 #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4) 1796 #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \ 1797 (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \ 1798 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \ 1799 QED_IWARP_MPA_CRC32_DIGEST_SIZE) 1800 1801 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ 1802 #define QED_IWARP_MAX_BDS_PER_FPDU 3 1803 1804 static const char * const pkt_type_str[] = { 1805 "QED_IWARP_MPA_PKT_PACKED", 1806 "QED_IWARP_MPA_PKT_PARTIAL", 1807 "QED_IWARP_MPA_PKT_UNALIGNED" 1808 }; 1809 1810 static int 1811 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, 1812 struct qed_iwarp_fpdu *fpdu, 1813 struct qed_iwarp_ll2_buff *buf); 1814 1815 static enum qed_iwarp_mpa_pkt_type 1816 qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, 1817 struct qed_iwarp_fpdu *fpdu, 1818 u16 tcp_payload_len, u8 *mpa_data) 1819 { 1820 enum qed_iwarp_mpa_pkt_type pkt_type; 1821 u16 mpa_len; 1822 1823 if (fpdu->incomplete_bytes) { 1824 pkt_type = QED_IWARP_MPA_PKT_UNALIGNED; 1825 goto out; 1826 } 1827 1828 /* special case of one byte remaining... 1829 * lower byte will be read next packet 1830 */ 1831 if (tcp_payload_len == 1) { 1832 fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE; 1833 pkt_type = QED_IWARP_MPA_PKT_PARTIAL; 1834 goto out; 1835 } 1836 1837 mpa_len = ntohs(*((u16 *)(mpa_data))); 1838 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 1839 1840 if (fpdu->fpdu_length <= tcp_payload_len) 1841 pkt_type = QED_IWARP_MPA_PKT_PACKED; 1842 else 1843 pkt_type = QED_IWARP_MPA_PKT_PARTIAL; 1844 1845 out: 1846 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1847 "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n", 1848 pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len); 1849 1850 return pkt_type; 1851 } 1852 1853 static void 1854 qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, 1855 struct qed_iwarp_fpdu *fpdu, 1856 struct unaligned_opaque_data *pkt_data, 1857 u16 tcp_payload_size, u8 placement_offset) 1858 { 1859 fpdu->mpa_buf = buf; 1860 fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; 1861 fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; 1862 fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset; 1863 fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset; 1864 1865 if (tcp_payload_size == 1) 1866 fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; 1867 else if (tcp_payload_size < fpdu->fpdu_length) 1868 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; 1869 else 1870 fpdu->incomplete_bytes = 0; /* complete fpdu */ 1871 1872 fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; 1873 } 1874 1875 static int 1876 qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, 1877 struct qed_iwarp_fpdu *fpdu, 1878 struct unaligned_opaque_data *pkt_data, 1879 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) 1880 { 1881 u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; 1882 int rc; 1883 1884 /* need to copy the data from the partial packet stored in fpdu 1885 * to the new buf, for this we also need to move the data currently 1886 * placed on the buf. The assumption is that the buffer is big enough 1887 * since fpdu_length <= mss, we use an intermediate buffer since 1888 * we may need to copy the new data to an overlapping location 1889 */ 1890 if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) { 1891 DP_ERR(p_hwfn, 1892 "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", 1893 buf->buff_size, fpdu->mpa_frag_len, 1894 tcp_payload_size, fpdu->incomplete_bytes); 1895 return -EINVAL; 1896 } 1897 1898 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1899 "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", 1900 fpdu->mpa_frag_virt, fpdu->mpa_frag_len, 1901 (u8 *)(buf->data) + pkt_data->first_mpa_offset, 1902 tcp_payload_size); 1903 1904 memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); 1905 memcpy(tmp_buf + fpdu->mpa_frag_len, 1906 (u8 *)(buf->data) + pkt_data->first_mpa_offset, 1907 tcp_payload_size); 1908 1909 rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); 1910 if (rc) 1911 return rc; 1912 1913 /* If we managed to post the buffer copy the data to the new buffer 1914 * o/w this will occur in the next round... 1915 */ 1916 memcpy((u8 *)(buf->data), tmp_buf, 1917 fpdu->mpa_frag_len + tcp_payload_size); 1918 1919 fpdu->mpa_buf = buf; 1920 /* fpdu->pkt_hdr remains as is */ 1921 /* fpdu->mpa_frag is overridden with new buf */ 1922 fpdu->mpa_frag = buf->data_phys_addr; 1923 fpdu->mpa_frag_virt = buf->data; 1924 fpdu->mpa_frag_len += tcp_payload_size; 1925 1926 fpdu->incomplete_bytes -= tcp_payload_size; 1927 1928 DP_VERBOSE(p_hwfn, 1929 QED_MSG_RDMA, 1930 "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", 1931 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, 1932 fpdu->incomplete_bytes); 1933 1934 return 0; 1935 } 1936 1937 static void 1938 qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, 1939 struct qed_iwarp_fpdu *fpdu, u8 *mpa_data) 1940 { 1941 u16 mpa_len; 1942 1943 /* Update incomplete packets if needed */ 1944 if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) { 1945 /* Missing lower byte is now available */ 1946 mpa_len = fpdu->fpdu_length | *mpa_data; 1947 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 1948 /* one byte of hdr */ 1949 fpdu->mpa_frag_len = 1; 1950 fpdu->incomplete_bytes = fpdu->fpdu_length - 1; 1951 DP_VERBOSE(p_hwfn, 1952 QED_MSG_RDMA, 1953 "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n", 1954 mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes); 1955 } 1956 } 1957 1958 #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \ 1959 (GET_FIELD((_curr_pkt)->flags, \ 1960 UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE)) 1961 1962 /* This function is used to recycle a buffer using the ll2 drop option. It 1963 * uses the mechanism to ensure that all buffers posted to tx before this one 1964 * were completed. The buffer sent here will be sent as a cookie in the tx 1965 * completion function and can then be reposted to rx chain when done. The flow 1966 * that requires this is the flow where a FPDU splits over more than 3 tcp 1967 * segments. In this case the driver needs to re-post a rx buffer instead of 1968 * the one received, but driver can't simply repost a buffer it copied from 1969 * as there is a case where the buffer was originally a packed FPDU, and is 1970 * partially posted to FW. Driver needs to ensure FW is done with it. 1971 */ 1972 static int 1973 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, 1974 struct qed_iwarp_fpdu *fpdu, 1975 struct qed_iwarp_ll2_buff *buf) 1976 { 1977 struct qed_ll2_tx_pkt_info tx_pkt; 1978 u8 ll2_handle; 1979 int rc; 1980 1981 memset(&tx_pkt, 0, sizeof(tx_pkt)); 1982 tx_pkt.num_of_bds = 1; 1983 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; 1984 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; 1985 tx_pkt.first_frag = fpdu->pkt_hdr; 1986 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 1987 buf->piggy_buf = NULL; 1988 tx_pkt.cookie = buf; 1989 1990 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 1991 1992 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); 1993 if (rc) 1994 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1995 "Can't drop packet rc=%d\n", rc); 1996 1997 DP_VERBOSE(p_hwfn, 1998 QED_MSG_RDMA, 1999 "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n", 2000 (unsigned long int)tx_pkt.first_frag, 2001 tx_pkt.first_frag_len, buf, rc); 2002 2003 return rc; 2004 } 2005 2006 static int 2007 qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu) 2008 { 2009 struct qed_ll2_tx_pkt_info tx_pkt; 2010 u8 ll2_handle; 2011 int rc; 2012 2013 memset(&tx_pkt, 0, sizeof(tx_pkt)); 2014 tx_pkt.num_of_bds = 1; 2015 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; 2016 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; 2017 2018 tx_pkt.first_frag = fpdu->pkt_hdr; 2019 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 2020 tx_pkt.enable_ip_cksum = true; 2021 tx_pkt.enable_l4_cksum = true; 2022 tx_pkt.calc_ip_len = true; 2023 /* vlan overload with enum iwarp_ll2_tx_queues */ 2024 tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE; 2025 2026 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 2027 2028 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); 2029 if (rc) 2030 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2031 "Can't send right edge rc=%d\n", rc); 2032 DP_VERBOSE(p_hwfn, 2033 QED_MSG_RDMA, 2034 "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n", 2035 tx_pkt.num_of_bds, 2036 (unsigned long int)tx_pkt.first_frag, 2037 tx_pkt.first_frag_len, rc); 2038 2039 return rc; 2040 } 2041 2042 static int 2043 qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, 2044 struct qed_iwarp_fpdu *fpdu, 2045 struct unaligned_opaque_data *curr_pkt, 2046 struct qed_iwarp_ll2_buff *buf, 2047 u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) 2048 { 2049 struct qed_ll2_tx_pkt_info tx_pkt; 2050 u8 ll2_handle; 2051 int rc; 2052 2053 memset(&tx_pkt, 0, sizeof(tx_pkt)); 2054 2055 /* An unaligned packet means it's split over two tcp segments. So the 2056 * complete packet requires 3 bds, one for the header, one for the 2057 * part of the fpdu of the first tcp segment, and the last fragment 2058 * will point to the remainder of the fpdu. A packed pdu, requires only 2059 * two bds, one for the header and one for the data. 2060 */ 2061 tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2; 2062 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; 2063 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */ 2064 2065 /* Send the mpa_buf only with the last fpdu (in case of packed) */ 2066 if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED || 2067 tcp_payload_size <= fpdu->fpdu_length) 2068 tx_pkt.cookie = fpdu->mpa_buf; 2069 2070 tx_pkt.first_frag = fpdu->pkt_hdr; 2071 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 2072 tx_pkt.enable_ip_cksum = true; 2073 tx_pkt.enable_l4_cksum = true; 2074 tx_pkt.calc_ip_len = true; 2075 /* vlan overload with enum iwarp_ll2_tx_queues */ 2076 tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; 2077 2078 /* special case of unaligned packet and not packed, need to send 2079 * both buffers as cookie to release. 2080 */ 2081 if (tcp_payload_size == fpdu->incomplete_bytes) 2082 fpdu->mpa_buf->piggy_buf = buf; 2083 2084 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 2085 2086 /* Set first fragment to header */ 2087 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); 2088 if (rc) 2089 goto out; 2090 2091 /* Set second fragment to first part of packet */ 2092 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, 2093 fpdu->mpa_frag, 2094 fpdu->mpa_frag_len); 2095 if (rc) 2096 goto out; 2097 2098 if (!fpdu->incomplete_bytes) 2099 goto out; 2100 2101 /* Set third fragment to second part of the packet */ 2102 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, 2103 ll2_handle, 2104 buf->data_phys_addr + 2105 curr_pkt->first_mpa_offset, 2106 fpdu->incomplete_bytes); 2107 out: 2108 DP_VERBOSE(p_hwfn, 2109 QED_MSG_RDMA, 2110 "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n", 2111 tx_pkt.num_of_bds, 2112 tx_pkt.first_frag_len, 2113 fpdu->mpa_frag_len, 2114 fpdu->incomplete_bytes, rc); 2115 2116 return rc; 2117 } 2118 2119 static void 2120 qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, 2121 struct unaligned_opaque_data *curr_pkt, 2122 u32 opaque_data0, u32 opaque_data1) 2123 { 2124 u64 opaque_data; 2125 2126 opaque_data = HILO_64(opaque_data1, opaque_data0); 2127 *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); 2128 2129 curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset + 2130 le16_to_cpu(curr_pkt->first_mpa_offset); 2131 curr_pkt->cid = le32_to_cpu(curr_pkt->cid); 2132 } 2133 2134 /* This function is called when an unaligned or incomplete MPA packet arrives 2135 * driver needs to align the packet, perhaps using previous data and send 2136 * it down to FW once it is aligned. 2137 */ 2138 static int 2139 qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, 2140 struct qed_iwarp_ll2_mpa_buf *mpa_buf) 2141 { 2142 struct unaligned_opaque_data *curr_pkt = &mpa_buf->data; 2143 struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; 2144 enum qed_iwarp_mpa_pkt_type pkt_type; 2145 struct qed_iwarp_fpdu *fpdu; 2146 int rc = -EINVAL; 2147 u8 *mpa_data; 2148 2149 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff); 2150 if (!fpdu) { /* something corrupt with cid, post rx back */ 2151 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", 2152 curr_pkt->cid); 2153 goto err; 2154 } 2155 2156 do { 2157 mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset); 2158 2159 pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, 2160 mpa_buf->tcp_payload_len, 2161 mpa_data); 2162 2163 switch (pkt_type) { 2164 case QED_IWARP_MPA_PKT_PARTIAL: 2165 qed_iwarp_init_fpdu(buf, fpdu, 2166 curr_pkt, 2167 mpa_buf->tcp_payload_len, 2168 mpa_buf->placement_offset); 2169 2170 if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { 2171 mpa_buf->tcp_payload_len = 0; 2172 break; 2173 } 2174 2175 rc = qed_iwarp_win_right_edge(p_hwfn, fpdu); 2176 2177 if (rc) { 2178 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2179 "Can't send FPDU:reset rc=%d\n", rc); 2180 memset(fpdu, 0, sizeof(*fpdu)); 2181 break; 2182 } 2183 2184 mpa_buf->tcp_payload_len = 0; 2185 break; 2186 case QED_IWARP_MPA_PKT_PACKED: 2187 qed_iwarp_init_fpdu(buf, fpdu, 2188 curr_pkt, 2189 mpa_buf->tcp_payload_len, 2190 mpa_buf->placement_offset); 2191 2192 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, 2193 mpa_buf->tcp_payload_len, 2194 pkt_type); 2195 if (rc) { 2196 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2197 "Can't send FPDU:reset rc=%d\n", rc); 2198 memset(fpdu, 0, sizeof(*fpdu)); 2199 break; 2200 } 2201 2202 mpa_buf->tcp_payload_len -= fpdu->fpdu_length; 2203 curr_pkt->first_mpa_offset += fpdu->fpdu_length; 2204 break; 2205 case QED_IWARP_MPA_PKT_UNALIGNED: 2206 qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); 2207 if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) { 2208 /* special handling of fpdu split over more 2209 * than 2 segments 2210 */ 2211 if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { 2212 rc = qed_iwarp_win_right_edge(p_hwfn, 2213 fpdu); 2214 /* packet will be re-processed later */ 2215 if (rc) 2216 return rc; 2217 } 2218 2219 rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt, 2220 buf, 2221 mpa_buf->tcp_payload_len); 2222 if (rc) /* packet will be re-processed later */ 2223 return rc; 2224 2225 mpa_buf->tcp_payload_len = 0; 2226 break; 2227 } 2228 2229 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, 2230 mpa_buf->tcp_payload_len, 2231 pkt_type); 2232 if (rc) { 2233 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2234 "Can't send FPDU:delay rc=%d\n", rc); 2235 /* don't reset fpdu -> we need it for next 2236 * classify 2237 */ 2238 break; 2239 } 2240 2241 mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; 2242 curr_pkt->first_mpa_offset += fpdu->incomplete_bytes; 2243 /* The framed PDU was sent - no more incomplete bytes */ 2244 fpdu->incomplete_bytes = 0; 2245 break; 2246 } 2247 } while (mpa_buf->tcp_payload_len && !rc); 2248 2249 return rc; 2250 2251 err: 2252 qed_iwarp_ll2_post_rx(p_hwfn, 2253 buf, 2254 p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); 2255 return rc; 2256 } 2257 2258 static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) 2259 { 2260 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2261 struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL; 2262 int rc; 2263 2264 while (!list_empty(&iwarp_info->mpa_buf_pending_list)) { 2265 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list, 2266 struct qed_iwarp_ll2_mpa_buf, 2267 list_entry); 2268 2269 rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf); 2270 2271 /* busy means break and continue processing later, don't 2272 * remove the buf from the pending list. 2273 */ 2274 if (rc == -EBUSY) 2275 break; 2276 2277 list_del(&mpa_buf->list_entry); 2278 list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list); 2279 2280 if (rc) { /* different error, don't continue */ 2281 DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc); 2282 break; 2283 } 2284 } 2285 } 2286 2287 static void 2288 qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) 2289 { 2290 struct qed_iwarp_ll2_mpa_buf *mpa_buf; 2291 struct qed_iwarp_info *iwarp_info; 2292 struct qed_hwfn *p_hwfn = cxt; 2293 2294 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2295 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, 2296 struct qed_iwarp_ll2_mpa_buf, list_entry); 2297 if (!mpa_buf) { 2298 DP_ERR(p_hwfn, "No free mpa buf\n"); 2299 goto err; 2300 } 2301 2302 list_del(&mpa_buf->list_entry); 2303 qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, 2304 data->opaque_data_0, data->opaque_data_1); 2305 2306 DP_VERBOSE(p_hwfn, 2307 QED_MSG_RDMA, 2308 "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", 2309 data->length.packet_length, mpa_buf->data.first_mpa_offset, 2310 mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, 2311 mpa_buf->data.cid); 2312 2313 mpa_buf->ll2_buf = data->cookie; 2314 mpa_buf->tcp_payload_len = data->length.packet_length - 2315 mpa_buf->data.first_mpa_offset; 2316 mpa_buf->data.first_mpa_offset += data->u.placement_offset; 2317 mpa_buf->placement_offset = data->u.placement_offset; 2318 2319 list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list); 2320 2321 qed_iwarp_process_pending_pkts(p_hwfn); 2322 return; 2323 err: 2324 qed_iwarp_ll2_post_rx(p_hwfn, data->cookie, 2325 iwarp_info->ll2_mpa_handle); 2326 } 2327 2328 static void 2329 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) 2330 { 2331 struct qed_iwarp_ll2_buff *buf = data->cookie; 2332 struct qed_iwarp_listener *listener; 2333 struct qed_ll2_tx_pkt_info tx_pkt; 2334 struct qed_iwarp_cm_info cm_info; 2335 struct qed_hwfn *p_hwfn = cxt; 2336 u8 remote_mac_addr[ETH_ALEN]; 2337 u8 local_mac_addr[ETH_ALEN]; 2338 struct qed_iwarp_ep *ep; 2339 int tcp_start_offset; 2340 u8 ts_hdr_size = 0; 2341 u8 ll2_syn_handle; 2342 int payload_len; 2343 u32 hdr_size; 2344 int rc; 2345 2346 memset(&cm_info, 0, sizeof(cm_info)); 2347 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; 2348 2349 /* Check if packet was received with errors... */ 2350 if (data->err_flags) { 2351 DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n", 2352 data->err_flags); 2353 goto err; 2354 } 2355 2356 if (GET_FIELD(data->parse_flags, 2357 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && 2358 GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { 2359 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n"); 2360 goto err; 2361 } 2362 2363 rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) + 2364 data->u.placement_offset, remote_mac_addr, 2365 local_mac_addr, &payload_len, 2366 &tcp_start_offset); 2367 if (rc) 2368 goto err; 2369 2370 /* Check if there is a listener for this 4-tuple+vlan */ 2371 listener = qed_iwarp_get_listener(p_hwfn, &cm_info); 2372 if (!listener) { 2373 DP_VERBOSE(p_hwfn, 2374 QED_MSG_RDMA, 2375 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n", 2376 data->parse_flags, data->length.packet_length); 2377 2378 memset(&tx_pkt, 0, sizeof(tx_pkt)); 2379 tx_pkt.num_of_bds = 1; 2380 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; 2381 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; 2382 tx_pkt.first_frag = buf->data_phys_addr + 2383 data->u.placement_offset; 2384 tx_pkt.first_frag_len = data->length.packet_length; 2385 tx_pkt.cookie = buf; 2386 2387 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle, 2388 &tx_pkt, true); 2389 2390 if (rc) { 2391 DP_NOTICE(p_hwfn, 2392 "Can't post SYN back to chip rc=%d\n", rc); 2393 goto err; 2394 } 2395 return; 2396 } 2397 2398 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n"); 2399 /* There may be an open ep on this connection if this is a syn 2400 * retrasnmit... need to make sure there isn't... 2401 */ 2402 if (qed_iwarp_ep_exists(p_hwfn, &cm_info)) 2403 goto err; 2404 2405 ep = qed_iwarp_get_free_ep(p_hwfn); 2406 if (!ep) 2407 goto err; 2408 2409 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2410 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); 2411 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2412 2413 ether_addr_copy(ep->remote_mac_addr, remote_mac_addr); 2414 ether_addr_copy(ep->local_mac_addr, local_mac_addr); 2415 2416 memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); 2417 2418 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) 2419 ts_hdr_size = TIMESTAMP_HEADER_SIZE; 2420 2421 hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) + 2422 ts_hdr_size; 2423 ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; 2424 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); 2425 2426 ep->event_cb = listener->event_cb; 2427 ep->cb_context = listener->cb_context; 2428 ep->connect_mode = TCP_CONNECT_PASSIVE; 2429 2430 ep->syn = buf; 2431 ep->syn_ip_payload_length = (u16)payload_len; 2432 ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + 2433 tcp_start_offset; 2434 2435 rc = qed_iwarp_tcp_offload(p_hwfn, ep); 2436 if (rc) { 2437 qed_iwarp_return_ep(p_hwfn, ep); 2438 goto err; 2439 } 2440 2441 return; 2442 err: 2443 qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle); 2444 } 2445 2446 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle, 2447 void *cookie, dma_addr_t rx_buf_addr, 2448 bool b_last_packet) 2449 { 2450 struct qed_iwarp_ll2_buff *buffer = cookie; 2451 struct qed_hwfn *p_hwfn = cxt; 2452 2453 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, 2454 buffer->data, buffer->data_phys_addr); 2455 kfree(buffer); 2456 } 2457 2458 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, 2459 void *cookie, dma_addr_t first_frag_addr, 2460 bool b_last_fragment, bool b_last_packet) 2461 { 2462 struct qed_iwarp_ll2_buff *buffer = cookie; 2463 struct qed_iwarp_ll2_buff *piggy; 2464 struct qed_hwfn *p_hwfn = cxt; 2465 2466 if (!buffer) /* can happen in packed mpa unaligned... */ 2467 return; 2468 2469 /* this was originally an rx packet, post it back */ 2470 piggy = buffer->piggy_buf; 2471 if (piggy) { 2472 buffer->piggy_buf = NULL; 2473 qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle); 2474 } 2475 2476 qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); 2477 2478 if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) 2479 qed_iwarp_process_pending_pkts(p_hwfn); 2480 2481 return; 2482 } 2483 2484 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, 2485 void *cookie, dma_addr_t first_frag_addr, 2486 bool b_last_fragment, bool b_last_packet) 2487 { 2488 struct qed_iwarp_ll2_buff *buffer = cookie; 2489 struct qed_hwfn *p_hwfn = cxt; 2490 2491 if (!buffer) 2492 return; 2493 2494 if (buffer->piggy_buf) { 2495 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2496 buffer->piggy_buf->buff_size, 2497 buffer->piggy_buf->data, 2498 buffer->piggy_buf->data_phys_addr); 2499 2500 kfree(buffer->piggy_buf); 2501 } 2502 2503 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, 2504 buffer->data, buffer->data_phys_addr); 2505 2506 kfree(buffer); 2507 } 2508 2509 /* The only slowpath for iwarp ll2 is unalign flush. When this completion 2510 * is received, need to reset the FPDU. 2511 */ 2512 void 2513 qed_iwarp_ll2_slowpath(void *cxt, 2514 u8 connection_handle, 2515 u32 opaque_data_0, u32 opaque_data_1) 2516 { 2517 struct unaligned_opaque_data unalign_data; 2518 struct qed_hwfn *p_hwfn = cxt; 2519 struct qed_iwarp_fpdu *fpdu; 2520 2521 qed_iwarp_mpa_get_data(p_hwfn, &unalign_data, 2522 opaque_data_0, opaque_data_1); 2523 2524 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", 2525 unalign_data.cid); 2526 2527 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid); 2528 if (fpdu) 2529 memset(fpdu, 0, sizeof(*fpdu)); 2530 } 2531 2532 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2533 { 2534 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2535 int rc = 0; 2536 2537 if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) { 2538 rc = qed_ll2_terminate_connection(p_hwfn, 2539 iwarp_info->ll2_syn_handle); 2540 if (rc) 2541 DP_INFO(p_hwfn, "Failed to terminate syn connection\n"); 2542 2543 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle); 2544 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; 2545 } 2546 2547 if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) { 2548 rc = qed_ll2_terminate_connection(p_hwfn, 2549 iwarp_info->ll2_ooo_handle); 2550 if (rc) 2551 DP_INFO(p_hwfn, "Failed to terminate ooo connection\n"); 2552 2553 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle); 2554 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; 2555 } 2556 2557 if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) { 2558 rc = qed_ll2_terminate_connection(p_hwfn, 2559 iwarp_info->ll2_mpa_handle); 2560 if (rc) 2561 DP_INFO(p_hwfn, "Failed to terminate mpa connection\n"); 2562 2563 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle); 2564 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; 2565 } 2566 2567 qed_llh_remove_mac_filter(p_hwfn, 2568 p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr); 2569 return rc; 2570 } 2571 2572 static int 2573 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn, 2574 int num_rx_bufs, int buff_size, u8 ll2_handle) 2575 { 2576 struct qed_iwarp_ll2_buff *buffer; 2577 int rc = 0; 2578 int i; 2579 2580 for (i = 0; i < num_rx_bufs; i++) { 2581 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 2582 if (!buffer) { 2583 rc = -ENOMEM; 2584 break; 2585 } 2586 2587 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 2588 buff_size, 2589 &buffer->data_phys_addr, 2590 GFP_KERNEL); 2591 if (!buffer->data) { 2592 kfree(buffer); 2593 rc = -ENOMEM; 2594 break; 2595 } 2596 2597 buffer->buff_size = buff_size; 2598 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle); 2599 if (rc) 2600 /* buffers will be deallocated by qed_ll2 */ 2601 break; 2602 } 2603 return rc; 2604 } 2605 2606 #define QED_IWARP_MAX_BUF_SIZE(mtu) \ 2607 ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \ 2608 ETH_CACHE_LINE_SIZE) 2609 2610 static int 2611 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, 2612 struct qed_rdma_start_in_params *params, 2613 struct qed_ptt *p_ptt) 2614 { 2615 struct qed_iwarp_info *iwarp_info; 2616 struct qed_ll2_acquire_data data; 2617 struct qed_ll2_cbs cbs; 2618 u32 mpa_buff_size; 2619 u16 n_ooo_bufs; 2620 int rc = 0; 2621 int i; 2622 2623 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2624 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; 2625 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; 2626 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; 2627 2628 iwarp_info->max_mtu = params->max_mtu; 2629 2630 ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr); 2631 2632 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr); 2633 if (rc) 2634 return rc; 2635 2636 /* Start SYN connection */ 2637 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt; 2638 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt; 2639 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt; 2640 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt; 2641 cbs.cookie = p_hwfn; 2642 2643 memset(&data, 0, sizeof(data)); 2644 data.input.conn_type = QED_LL2_TYPE_IWARP; 2645 data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; 2646 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; 2647 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; 2648 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 2649 data.input.tx_tc = PKT_LB_TC; 2650 data.input.tx_dest = QED_LL2_TX_DEST_LB; 2651 data.p_connection_handle = &iwarp_info->ll2_syn_handle; 2652 data.cbs = &cbs; 2653 2654 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2655 if (rc) { 2656 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n"); 2657 qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr); 2658 return rc; 2659 } 2660 2661 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle); 2662 if (rc) { 2663 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n"); 2664 goto err; 2665 } 2666 2667 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2668 QED_IWARP_LL2_SYN_RX_SIZE, 2669 QED_IWARP_MAX_SYN_PKT_SIZE, 2670 iwarp_info->ll2_syn_handle); 2671 if (rc) 2672 goto err; 2673 2674 /* Start OOO connection */ 2675 data.input.conn_type = QED_LL2_TYPE_OOO; 2676 data.input.mtu = params->max_mtu; 2677 2678 n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) / 2679 iwarp_info->max_mtu; 2680 n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE); 2681 2682 data.input.rx_num_desc = n_ooo_bufs; 2683 data.input.rx_num_ooo_buffers = n_ooo_bufs; 2684 2685 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 2686 data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE; 2687 data.p_connection_handle = &iwarp_info->ll2_ooo_handle; 2688 2689 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2690 if (rc) 2691 goto err; 2692 2693 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle); 2694 if (rc) 2695 goto err; 2696 2697 /* Start Unaligned MPA connection */ 2698 cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt; 2699 cbs.slowpath_cb = qed_iwarp_ll2_slowpath; 2700 2701 memset(&data, 0, sizeof(data)); 2702 data.input.conn_type = QED_LL2_TYPE_IWARP; 2703 data.input.mtu = params->max_mtu; 2704 /* FW requires that once a packet arrives OOO, it must have at 2705 * least 2 rx buffers available on the unaligned connection 2706 * for handling the case that it is a partial fpdu. 2707 */ 2708 data.input.rx_num_desc = n_ooo_bufs * 2; 2709 data.input.tx_num_desc = data.input.rx_num_desc; 2710 data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU; 2711 data.p_connection_handle = &iwarp_info->ll2_mpa_handle; 2712 data.input.secondary_queue = true; 2713 data.cbs = &cbs; 2714 2715 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2716 if (rc) 2717 goto err; 2718 2719 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle); 2720 if (rc) 2721 goto err; 2722 2723 mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); 2724 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2725 data.input.rx_num_desc, 2726 mpa_buff_size, 2727 iwarp_info->ll2_mpa_handle); 2728 if (rc) 2729 goto err; 2730 2731 iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps, 2732 sizeof(*iwarp_info->partial_fpdus), 2733 GFP_KERNEL); 2734 if (!iwarp_info->partial_fpdus) 2735 goto err; 2736 2737 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; 2738 2739 iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); 2740 if (!iwarp_info->mpa_intermediate_buf) 2741 goto err; 2742 2743 /* The mpa_bufs array serves for pending RX packets received on the 2744 * mpa ll2 that don't have place on the tx ring and require later 2745 * processing. We can't fail on allocation of such a struct therefore 2746 * we allocate enough to take care of all rx packets 2747 */ 2748 iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc, 2749 sizeof(*iwarp_info->mpa_bufs), 2750 GFP_KERNEL); 2751 if (!iwarp_info->mpa_bufs) 2752 goto err; 2753 2754 INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list); 2755 INIT_LIST_HEAD(&iwarp_info->mpa_buf_list); 2756 for (i = 0; i < data.input.rx_num_desc; i++) 2757 list_add_tail(&iwarp_info->mpa_bufs[i].list_entry, 2758 &iwarp_info->mpa_buf_list); 2759 return rc; 2760 err: 2761 qed_iwarp_ll2_stop(p_hwfn, p_ptt); 2762 2763 return rc; 2764 } 2765 2766 int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2767 struct qed_rdma_start_in_params *params) 2768 { 2769 struct qed_iwarp_info *iwarp_info; 2770 u32 rcv_wnd_size; 2771 2772 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2773 2774 iwarp_info->tcp_flags = QED_IWARP_TS_EN; 2775 rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF; 2776 2777 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ 2778 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - 2779 ilog2(QED_IWARP_RCV_WND_SIZE_MIN); 2780 iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; 2781 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; 2782 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; 2783 2784 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; 2785 2786 iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND | 2787 MPA_RTR_TYPE_ZERO_WRITE | 2788 MPA_RTR_TYPE_ZERO_READ; 2789 2790 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); 2791 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list); 2792 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list); 2793 2794 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, 2795 qed_iwarp_async_event); 2796 qed_ooo_setup(p_hwfn); 2797 2798 return qed_iwarp_ll2_start(p_hwfn, params, p_ptt); 2799 } 2800 2801 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2802 { 2803 int rc; 2804 2805 qed_iwarp_free_prealloc_ep(p_hwfn); 2806 rc = qed_iwarp_wait_for_all_cids(p_hwfn); 2807 if (rc) 2808 return rc; 2809 2810 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); 2811 2812 return qed_iwarp_ll2_stop(p_hwfn, p_ptt); 2813 } 2814 2815 void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, 2816 struct qed_iwarp_ep *ep, u8 fw_return_code) 2817 { 2818 struct qed_iwarp_cm_event_params params; 2819 2820 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true); 2821 2822 params.event = QED_IWARP_EVENT_CLOSE; 2823 params.ep_context = ep; 2824 params.cm_info = &ep->cm_info; 2825 params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? 2826 0 : -ECONNRESET; 2827 2828 ep->state = QED_IWARP_EP_CLOSED; 2829 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2830 list_del(&ep->list_entry); 2831 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2832 2833 ep->event_cb(ep->cb_context, ¶ms); 2834 } 2835 2836 void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, 2837 struct qed_iwarp_ep *ep, int fw_ret_code) 2838 { 2839 struct qed_iwarp_cm_event_params params; 2840 bool event_cb = false; 2841 2842 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n", 2843 ep->cid, fw_ret_code); 2844 2845 switch (fw_ret_code) { 2846 case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: 2847 params.status = 0; 2848 params.event = QED_IWARP_EVENT_DISCONNECT; 2849 event_cb = true; 2850 break; 2851 case IWARP_EXCEPTION_DETECTED_LLP_RESET: 2852 params.status = -ECONNRESET; 2853 params.event = QED_IWARP_EVENT_DISCONNECT; 2854 event_cb = true; 2855 break; 2856 case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: 2857 params.event = QED_IWARP_EVENT_RQ_EMPTY; 2858 event_cb = true; 2859 break; 2860 case IWARP_EXCEPTION_DETECTED_IRQ_FULL: 2861 params.event = QED_IWARP_EVENT_IRQ_FULL; 2862 event_cb = true; 2863 break; 2864 case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: 2865 params.event = QED_IWARP_EVENT_LLP_TIMEOUT; 2866 event_cb = true; 2867 break; 2868 case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: 2869 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; 2870 event_cb = true; 2871 break; 2872 case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: 2873 params.event = QED_IWARP_EVENT_CQ_OVERFLOW; 2874 event_cb = true; 2875 break; 2876 case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: 2877 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; 2878 event_cb = true; 2879 break; 2880 case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: 2881 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; 2882 event_cb = true; 2883 break; 2884 case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: 2885 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; 2886 event_cb = true; 2887 break; 2888 case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: 2889 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; 2890 event_cb = true; 2891 break; 2892 default: 2893 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2894 "Unhandled exception received...fw_ret_code=%d\n", 2895 fw_ret_code); 2896 break; 2897 } 2898 2899 if (event_cb) { 2900 params.ep_context = ep; 2901 params.cm_info = &ep->cm_info; 2902 ep->event_cb(ep->cb_context, ¶ms); 2903 } 2904 } 2905 2906 static void 2907 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, 2908 struct qed_iwarp_ep *ep, u8 fw_return_code) 2909 { 2910 struct qed_iwarp_cm_event_params params; 2911 2912 memset(¶ms, 0, sizeof(params)); 2913 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; 2914 params.ep_context = ep; 2915 params.cm_info = &ep->cm_info; 2916 ep->state = QED_IWARP_EP_CLOSED; 2917 2918 switch (fw_return_code) { 2919 case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: 2920 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2921 "%s(0x%x) TCP connect got invalid packet\n", 2922 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2923 params.status = -ECONNRESET; 2924 break; 2925 case IWARP_CONN_ERROR_TCP_CONNECTION_RST: 2926 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2927 "%s(0x%x) TCP Connection Reset\n", 2928 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2929 params.status = -ECONNRESET; 2930 break; 2931 case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: 2932 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n", 2933 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2934 params.status = -EBUSY; 2935 break; 2936 case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: 2937 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n", 2938 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2939 params.status = -ECONNREFUSED; 2940 break; 2941 case IWARP_CONN_ERROR_MPA_INVALID_PACKET: 2942 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", 2943 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2944 params.status = -ECONNRESET; 2945 break; 2946 default: 2947 DP_ERR(p_hwfn, 2948 "%s(0x%x) Unexpected return code tcp connect: %d\n", 2949 QED_IWARP_CONNECT_MODE_STRING(ep), 2950 ep->tcp_cid, fw_return_code); 2951 params.status = -ECONNRESET; 2952 break; 2953 } 2954 2955 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 2956 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; 2957 qed_iwarp_return_ep(p_hwfn, ep); 2958 } else { 2959 ep->event_cb(ep->cb_context, ¶ms); 2960 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2961 list_del(&ep->list_entry); 2962 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2963 } 2964 } 2965 2966 void 2967 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, 2968 struct qed_iwarp_ep *ep, u8 fw_return_code) 2969 { 2970 u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; 2971 2972 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 2973 /* Done with the SYN packet, post back to ll2 rx */ 2974 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle); 2975 2976 ep->syn = NULL; 2977 2978 /* If connect failed - upper layer doesn't know about it */ 2979 if (fw_return_code == RDMA_RETURN_OK) 2980 qed_iwarp_mpa_received(p_hwfn, ep); 2981 else 2982 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, 2983 fw_return_code); 2984 } else { 2985 if (fw_return_code == RDMA_RETURN_OK) 2986 qed_iwarp_mpa_offload(p_hwfn, ep); 2987 else 2988 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, 2989 fw_return_code); 2990 } 2991 } 2992 2993 static inline bool 2994 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 2995 { 2996 if (!ep || (ep->sig != QED_EP_SIG)) { 2997 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); 2998 return false; 2999 } 3000 3001 return true; 3002 } 3003 3004 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, 3005 u8 fw_event_code, u16 echo, 3006 union event_ring_data *data, 3007 u8 fw_return_code) 3008 { 3009 struct qed_rdma_events events = p_hwfn->p_rdma_info->events; 3010 struct regpair *fw_handle = &data->rdma_data.async_handle; 3011 struct qed_iwarp_ep *ep = NULL; 3012 u16 srq_offset; 3013 u16 srq_id; 3014 u16 cid; 3015 3016 ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, 3017 fw_handle->lo); 3018 3019 switch (fw_event_code) { 3020 case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: 3021 /* Async completion after TCP 3-way handshake */ 3022 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3023 return -EINVAL; 3024 DP_VERBOSE(p_hwfn, 3025 QED_MSG_RDMA, 3026 "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n", 3027 ep->tcp_cid, fw_return_code); 3028 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); 3029 break; 3030 case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: 3031 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3032 return -EINVAL; 3033 DP_VERBOSE(p_hwfn, 3034 QED_MSG_RDMA, 3035 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n", 3036 ep->cid, fw_return_code); 3037 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code); 3038 break; 3039 case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: 3040 /* Async completion for Close Connection ramrod */ 3041 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3042 return -EINVAL; 3043 DP_VERBOSE(p_hwfn, 3044 QED_MSG_RDMA, 3045 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n", 3046 ep->cid, fw_return_code); 3047 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); 3048 break; 3049 case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: 3050 /* Async event for active side only */ 3051 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3052 return -EINVAL; 3053 DP_VERBOSE(p_hwfn, 3054 QED_MSG_RDMA, 3055 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n", 3056 ep->cid, fw_return_code); 3057 qed_iwarp_mpa_reply_arrived(p_hwfn, ep); 3058 break; 3059 case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: 3060 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3061 return -EINVAL; 3062 DP_VERBOSE(p_hwfn, 3063 QED_MSG_RDMA, 3064 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n", 3065 ep->cid, fw_return_code); 3066 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); 3067 break; 3068 case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: 3069 cid = (u16)le32_to_cpu(fw_handle->lo); 3070 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 3071 "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid); 3072 qed_iwarp_cid_cleaned(p_hwfn, cid); 3073 3074 break; 3075 case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: 3076 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); 3077 srq_offset = p_hwfn->p_rdma_info->srq_id_offset; 3078 /* FW assigns value that is no greater than u16 */ 3079 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; 3080 events.affiliated_event(events.context, 3081 QED_IWARP_EVENT_SRQ_EMPTY, 3082 &srq_id); 3083 break; 3084 case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: 3085 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); 3086 srq_offset = p_hwfn->p_rdma_info->srq_id_offset; 3087 /* FW assigns value that is no greater than u16 */ 3088 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; 3089 events.affiliated_event(events.context, 3090 QED_IWARP_EVENT_SRQ_LIMIT, 3091 &srq_id); 3092 break; 3093 case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: 3094 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); 3095 3096 p_hwfn->p_rdma_info->events.affiliated_event( 3097 p_hwfn->p_rdma_info->events.context, 3098 QED_IWARP_EVENT_CQ_OVERFLOW, 3099 (void *)fw_handle); 3100 break; 3101 default: 3102 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n", 3103 fw_event_code); 3104 return -EINVAL; 3105 } 3106 return 0; 3107 } 3108 3109 int 3110 qed_iwarp_create_listen(void *rdma_cxt, 3111 struct qed_iwarp_listen_in *iparams, 3112 struct qed_iwarp_listen_out *oparams) 3113 { 3114 struct qed_hwfn *p_hwfn = rdma_cxt; 3115 struct qed_iwarp_listener *listener; 3116 3117 listener = kzalloc(sizeof(*listener), GFP_KERNEL); 3118 if (!listener) 3119 return -ENOMEM; 3120 3121 listener->ip_version = iparams->ip_version; 3122 memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr)); 3123 listener->port = iparams->port; 3124 listener->vlan = iparams->vlan; 3125 3126 listener->event_cb = iparams->event_cb; 3127 listener->cb_context = iparams->cb_context; 3128 listener->max_backlog = iparams->max_backlog; 3129 oparams->handle = listener; 3130 3131 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3132 list_add_tail(&listener->list_entry, 3133 &p_hwfn->p_rdma_info->iwarp.listen_list); 3134 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3135 3136 DP_VERBOSE(p_hwfn, 3137 QED_MSG_RDMA, 3138 "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n", 3139 listener->event_cb, 3140 listener, 3141 listener->ip_addr[0], 3142 listener->ip_addr[1], 3143 listener->ip_addr[2], 3144 listener->ip_addr[3], listener->port, listener->vlan); 3145 3146 return 0; 3147 } 3148 3149 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) 3150 { 3151 struct qed_iwarp_listener *listener = handle; 3152 struct qed_hwfn *p_hwfn = rdma_cxt; 3153 3154 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle); 3155 3156 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3157 list_del(&listener->list_entry); 3158 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3159 3160 kfree(listener); 3161 3162 return 0; 3163 } 3164 3165 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams) 3166 { 3167 struct qed_hwfn *p_hwfn = rdma_cxt; 3168 struct qed_sp_init_data init_data; 3169 struct qed_spq_entry *p_ent; 3170 struct qed_iwarp_ep *ep; 3171 struct qed_rdma_qp *qp; 3172 int rc; 3173 3174 ep = iparams->ep_context; 3175 if (!ep) { 3176 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n"); 3177 return -EINVAL; 3178 } 3179 3180 qp = ep->qp; 3181 3182 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", 3183 qp->icid, ep->tcp_cid); 3184 3185 memset(&init_data, 0, sizeof(init_data)); 3186 init_data.cid = qp->icid; 3187 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 3188 init_data.comp_mode = QED_SPQ_MODE_CB; 3189 3190 rc = qed_sp_init_request(p_hwfn, &p_ent, 3191 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, 3192 PROTOCOLID_IWARP, &init_data); 3193 3194 if (rc) 3195 return rc; 3196 3197 rc = qed_spq_post(p_hwfn, p_ent, NULL); 3198 3199 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc); 3200 3201 return rc; 3202 } 3203 3204 void 3205 qed_iwarp_query_qp(struct qed_rdma_qp *qp, 3206 struct qed_rdma_query_qp_out_params *out_params) 3207 { 3208 out_params->state = qed_iwarp2roce_state(qp->iwarp_state); 3209 } 3210