1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/if_ether.h> 33 #include <linux/if_vlan.h> 34 #include <linux/ip.h> 35 #include <linux/ipv6.h> 36 #include <linux/spinlock.h> 37 #include <linux/tcp.h> 38 #include "qed_cxt.h" 39 #include "qed_hw.h" 40 #include "qed_ll2.h" 41 #include "qed_rdma.h" 42 #include "qed_reg_addr.h" 43 #include "qed_sp.h" 44 #include "qed_ooo.h" 45 46 #define QED_IWARP_ORD_DEFAULT 32 47 #define QED_IWARP_IRD_DEFAULT 32 48 #define QED_IWARP_MAX_FW_MSS 4120 49 50 #define QED_EP_SIG 0xecabcdef 51 52 struct mpa_v2_hdr { 53 __be16 ird; 54 __be16 ord; 55 }; 56 57 #define MPA_V2_PEER2PEER_MODEL 0x8000 58 #define MPA_V2_SEND_RTR 0x4000 /* on ird */ 59 #define MPA_V2_READ_RTR 0x4000 /* on ord */ 60 #define MPA_V2_WRITE_RTR 0x8000 61 #define MPA_V2_IRD_ORD_MASK 0x3FFF 62 63 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED) 64 65 #define QED_IWARP_INVALID_TCP_CID 0xffffffff 66 #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) 67 #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff) 68 #define TIMESTAMP_HEADER_SIZE (12) 69 #define QED_IWARP_MAX_FIN_RT_DEFAULT (2) 70 71 #define QED_IWARP_TS_EN BIT(0) 72 #define QED_IWARP_DA_EN BIT(1) 73 #define QED_IWARP_PARAM_CRC_NEEDED (1) 74 #define QED_IWARP_PARAM_P2P (1) 75 76 #define QED_IWARP_DEF_MAX_RT_TIME (0) 77 #define QED_IWARP_DEF_CWND_FACTOR (4) 78 #define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5) 79 #define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ 80 #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ 81 82 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, 83 u8 fw_event_code, u16 echo, 84 union event_ring_data *data, 85 u8 fw_return_code); 86 87 /* Override devinfo with iWARP specific values */ 88 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) 89 { 90 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 91 92 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; 93 dev->max_qp = min_t(u32, 94 IWARP_MAX_QPS, 95 p_hwfn->p_rdma_info->num_qps) - 96 QED_IWARP_PREALLOC_CNT; 97 98 dev->max_cq = dev->max_qp; 99 100 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT; 101 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT; 102 } 103 104 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 105 { 106 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; 107 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); 108 p_hwfn->b_rdma_enabled_in_prs = true; 109 } 110 111 /* We have two cid maps, one for tcp which should be used only from passive 112 * syn processing and replacing a pre-allocated ep in the list. The second 113 * for active tcp and for QPs. 114 */ 115 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) 116 { 117 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); 118 119 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 120 121 if (cid < QED_IWARP_PREALLOC_CNT) 122 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 123 cid); 124 else 125 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 126 127 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 128 } 129 130 void 131 qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, 132 struct iwarp_init_func_ramrod_data *p_ramrod) 133 { 134 p_ramrod->iwarp.ll2_ooo_q_index = 135 RESC_START(p_hwfn, QED_LL2_QUEUE) + 136 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; 137 138 p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; 139 140 return; 141 } 142 143 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) 144 { 145 int rc; 146 147 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 148 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 149 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 150 if (rc) { 151 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n"); 152 return rc; 153 } 154 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); 155 156 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid); 157 if (rc) 158 qed_iwarp_cid_cleaned(p_hwfn, *cid); 159 160 return rc; 161 } 162 163 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid) 164 { 165 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); 166 167 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 168 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); 169 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 170 } 171 172 /* This function allocates a cid for passive tcp (called from syn receive) 173 * the reason it's separate from the regular cid allocation is because it 174 * is assured that these cids already have ilt allocated. They are preallocated 175 * to ensure that we won't need to allocate memory during syn processing 176 */ 177 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid) 178 { 179 int rc; 180 181 spin_lock_bh(&p_hwfn->p_rdma_info->lock); 182 183 rc = qed_rdma_bmap_alloc_id(p_hwfn, 184 &p_hwfn->p_rdma_info->tcp_cid_map, cid); 185 186 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 187 188 if (rc) { 189 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 190 "can't allocate iwarp tcp cid max-count=%d\n", 191 p_hwfn->p_rdma_info->tcp_cid_map.max_count); 192 193 *cid = QED_IWARP_INVALID_TCP_CID; 194 return rc; 195 } 196 197 *cid += qed_cxt_get_proto_cid_start(p_hwfn, 198 p_hwfn->p_rdma_info->proto); 199 return 0; 200 } 201 202 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, 203 struct qed_rdma_qp *qp, 204 struct qed_rdma_create_qp_out_params *out_params) 205 { 206 struct iwarp_create_qp_ramrod_data *p_ramrod; 207 struct qed_sp_init_data init_data; 208 struct qed_spq_entry *p_ent; 209 u16 physical_queue; 210 u32 cid; 211 int rc; 212 213 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 214 IWARP_SHARED_QUEUE_PAGE_SIZE, 215 &qp->shared_queue_phys_addr, 216 GFP_KERNEL); 217 if (!qp->shared_queue) 218 return -ENOMEM; 219 220 out_params->sq_pbl_virt = (u8 *)qp->shared_queue + 221 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; 222 out_params->sq_pbl_phys = qp->shared_queue_phys_addr + 223 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; 224 out_params->rq_pbl_virt = (u8 *)qp->shared_queue + 225 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; 226 out_params->rq_pbl_phys = qp->shared_queue_phys_addr + 227 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; 228 229 rc = qed_iwarp_alloc_cid(p_hwfn, &cid); 230 if (rc) 231 goto err1; 232 233 qp->icid = (u16)cid; 234 235 memset(&init_data, 0, sizeof(init_data)); 236 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 237 init_data.cid = qp->icid; 238 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 239 240 rc = qed_sp_init_request(p_hwfn, &p_ent, 241 IWARP_RAMROD_CMD_ID_CREATE_QP, 242 PROTOCOLID_IWARP, &init_data); 243 if (rc) 244 goto err2; 245 246 p_ramrod = &p_ent->ramrod.iwarp_create_qp; 247 248 SET_FIELD(p_ramrod->flags, 249 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, 250 qp->fmr_and_reserved_lkey); 251 252 SET_FIELD(p_ramrod->flags, 253 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); 254 255 SET_FIELD(p_ramrod->flags, 256 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, 257 qp->incoming_rdma_read_en); 258 259 SET_FIELD(p_ramrod->flags, 260 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, 261 qp->incoming_rdma_write_en); 262 263 SET_FIELD(p_ramrod->flags, 264 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, 265 qp->incoming_atomic_en); 266 267 SET_FIELD(p_ramrod->flags, 268 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); 269 270 p_ramrod->pd = qp->pd; 271 p_ramrod->sq_num_pages = qp->sq_num_pages; 272 p_ramrod->rq_num_pages = qp->rq_num_pages; 273 274 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); 275 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); 276 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 277 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 278 279 p_ramrod->cq_cid_for_sq = 280 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); 281 p_ramrod->cq_cid_for_rq = 282 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); 283 284 p_ramrod->dpi = cpu_to_le16(qp->dpi); 285 286 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 287 p_ramrod->physical_q0 = cpu_to_le16(physical_queue); 288 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); 289 p_ramrod->physical_q1 = cpu_to_le16(physical_queue); 290 291 rc = qed_spq_post(p_hwfn, p_ent, NULL); 292 if (rc) 293 goto err2; 294 295 return rc; 296 297 err2: 298 qed_iwarp_cid_cleaned(p_hwfn, cid); 299 err1: 300 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 301 IWARP_SHARED_QUEUE_PAGE_SIZE, 302 qp->shared_queue, qp->shared_queue_phys_addr); 303 304 return rc; 305 } 306 307 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 308 { 309 struct iwarp_modify_qp_ramrod_data *p_ramrod; 310 struct qed_sp_init_data init_data; 311 struct qed_spq_entry *p_ent; 312 int rc; 313 314 /* Get SPQ entry */ 315 memset(&init_data, 0, sizeof(init_data)); 316 init_data.cid = qp->icid; 317 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 318 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 319 320 rc = qed_sp_init_request(p_hwfn, &p_ent, 321 IWARP_RAMROD_CMD_ID_MODIFY_QP, 322 p_hwfn->p_rdma_info->proto, &init_data); 323 if (rc) 324 return rc; 325 326 p_ramrod = &p_ent->ramrod.iwarp_modify_qp; 327 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 328 0x1); 329 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) 330 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING; 331 else 332 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR; 333 334 rc = qed_spq_post(p_hwfn, p_ent, NULL); 335 336 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc); 337 338 return rc; 339 } 340 341 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state) 342 { 343 switch (state) { 344 case QED_ROCE_QP_STATE_RESET: 345 case QED_ROCE_QP_STATE_INIT: 346 case QED_ROCE_QP_STATE_RTR: 347 return QED_IWARP_QP_STATE_IDLE; 348 case QED_ROCE_QP_STATE_RTS: 349 return QED_IWARP_QP_STATE_RTS; 350 case QED_ROCE_QP_STATE_SQD: 351 return QED_IWARP_QP_STATE_CLOSING; 352 case QED_ROCE_QP_STATE_ERR: 353 return QED_IWARP_QP_STATE_ERROR; 354 case QED_ROCE_QP_STATE_SQE: 355 return QED_IWARP_QP_STATE_TERMINATE; 356 default: 357 return QED_IWARP_QP_STATE_ERROR; 358 } 359 } 360 361 static enum qed_roce_qp_state 362 qed_iwarp2roce_state(enum qed_iwarp_qp_state state) 363 { 364 switch (state) { 365 case QED_IWARP_QP_STATE_IDLE: 366 return QED_ROCE_QP_STATE_INIT; 367 case QED_IWARP_QP_STATE_RTS: 368 return QED_ROCE_QP_STATE_RTS; 369 case QED_IWARP_QP_STATE_TERMINATE: 370 return QED_ROCE_QP_STATE_SQE; 371 case QED_IWARP_QP_STATE_CLOSING: 372 return QED_ROCE_QP_STATE_SQD; 373 case QED_IWARP_QP_STATE_ERROR: 374 return QED_ROCE_QP_STATE_ERR; 375 default: 376 return QED_ROCE_QP_STATE_ERR; 377 } 378 } 379 380 const static char *iwarp_state_names[] = { 381 "IDLE", 382 "RTS", 383 "TERMINATE", 384 "CLOSING", 385 "ERROR", 386 }; 387 388 int 389 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, 390 struct qed_rdma_qp *qp, 391 enum qed_iwarp_qp_state new_state, bool internal) 392 { 393 enum qed_iwarp_qp_state prev_iw_state; 394 bool modify_fw = false; 395 int rc = 0; 396 397 /* modify QP can be called from upper-layer or as a result of async 398 * RST/FIN... therefore need to protect 399 */ 400 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); 401 prev_iw_state = qp->iwarp_state; 402 403 if (prev_iw_state == new_state) { 404 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); 405 return 0; 406 } 407 408 switch (prev_iw_state) { 409 case QED_IWARP_QP_STATE_IDLE: 410 switch (new_state) { 411 case QED_IWARP_QP_STATE_RTS: 412 qp->iwarp_state = QED_IWARP_QP_STATE_RTS; 413 break; 414 case QED_IWARP_QP_STATE_ERROR: 415 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; 416 if (!internal) 417 modify_fw = true; 418 break; 419 default: 420 break; 421 } 422 break; 423 case QED_IWARP_QP_STATE_RTS: 424 switch (new_state) { 425 case QED_IWARP_QP_STATE_CLOSING: 426 if (!internal) 427 modify_fw = true; 428 429 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING; 430 break; 431 case QED_IWARP_QP_STATE_ERROR: 432 if (!internal) 433 modify_fw = true; 434 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; 435 break; 436 default: 437 break; 438 } 439 break; 440 case QED_IWARP_QP_STATE_ERROR: 441 switch (new_state) { 442 case QED_IWARP_QP_STATE_IDLE: 443 444 qp->iwarp_state = new_state; 445 break; 446 case QED_IWARP_QP_STATE_CLOSING: 447 /* could happen due to race... do nothing.... */ 448 break; 449 default: 450 rc = -EINVAL; 451 } 452 break; 453 case QED_IWARP_QP_STATE_TERMINATE: 454 case QED_IWARP_QP_STATE_CLOSING: 455 qp->iwarp_state = new_state; 456 break; 457 default: 458 break; 459 } 460 461 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n", 462 qp->icid, 463 iwarp_state_names[prev_iw_state], 464 iwarp_state_names[qp->iwarp_state], 465 internal ? "internal" : ""); 466 467 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); 468 469 if (modify_fw) 470 rc = qed_iwarp_modify_fw(p_hwfn, qp); 471 472 return rc; 473 } 474 475 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 476 { 477 struct qed_sp_init_data init_data; 478 struct qed_spq_entry *p_ent; 479 int rc; 480 481 /* Get SPQ entry */ 482 memset(&init_data, 0, sizeof(init_data)); 483 init_data.cid = qp->icid; 484 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 485 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 486 487 rc = qed_sp_init_request(p_hwfn, &p_ent, 488 IWARP_RAMROD_CMD_ID_DESTROY_QP, 489 p_hwfn->p_rdma_info->proto, &init_data); 490 if (rc) 491 return rc; 492 493 rc = qed_spq_post(p_hwfn, p_ent, NULL); 494 495 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc); 496 497 return rc; 498 } 499 500 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, 501 struct qed_iwarp_ep *ep, 502 bool remove_from_active_list) 503 { 504 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 505 sizeof(*ep->ep_buffer_virt), 506 ep->ep_buffer_virt, ep->ep_buffer_phys); 507 508 if (remove_from_active_list) { 509 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 510 list_del(&ep->list_entry); 511 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 512 } 513 514 if (ep->qp) 515 ep->qp->ep = NULL; 516 517 kfree(ep); 518 } 519 520 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 521 { 522 struct qed_iwarp_ep *ep = qp->ep; 523 int wait_count = 0; 524 int rc = 0; 525 526 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { 527 rc = qed_iwarp_modify_qp(p_hwfn, qp, 528 QED_IWARP_QP_STATE_ERROR, false); 529 if (rc) 530 return rc; 531 } 532 533 /* Make sure ep is closed before returning and freeing memory. */ 534 if (ep) { 535 while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED && 536 wait_count++ < 200) 537 msleep(100); 538 539 if (ep->state != QED_IWARP_EP_CLOSED) 540 DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n", 541 ep->state); 542 543 qed_iwarp_destroy_ep(p_hwfn, ep, false); 544 } 545 546 rc = qed_iwarp_fw_destroy(p_hwfn, qp); 547 548 if (qp->shared_queue) 549 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 550 IWARP_SHARED_QUEUE_PAGE_SIZE, 551 qp->shared_queue, qp->shared_queue_phys_addr); 552 553 return rc; 554 } 555 556 static int 557 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out) 558 { 559 struct qed_iwarp_ep *ep; 560 int rc; 561 562 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 563 if (!ep) 564 return -ENOMEM; 565 566 ep->state = QED_IWARP_EP_INIT; 567 568 ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 569 sizeof(*ep->ep_buffer_virt), 570 &ep->ep_buffer_phys, 571 GFP_KERNEL); 572 if (!ep->ep_buffer_virt) { 573 rc = -ENOMEM; 574 goto err; 575 } 576 577 ep->sig = QED_EP_SIG; 578 579 *ep_out = ep; 580 581 return 0; 582 583 err: 584 kfree(ep); 585 return rc; 586 } 587 588 static void 589 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, 590 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) 591 { 592 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n", 593 p_tcp_ramrod->tcp.local_mac_addr_lo, 594 p_tcp_ramrod->tcp.local_mac_addr_mid, 595 p_tcp_ramrod->tcp.local_mac_addr_hi, 596 p_tcp_ramrod->tcp.remote_mac_addr_lo, 597 p_tcp_ramrod->tcp.remote_mac_addr_mid, 598 p_tcp_ramrod->tcp.remote_mac_addr_hi); 599 600 if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { 601 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 602 "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n", 603 p_tcp_ramrod->tcp.local_ip, 604 p_tcp_ramrod->tcp.local_port, 605 p_tcp_ramrod->tcp.remote_ip, 606 p_tcp_ramrod->tcp.remote_port, 607 p_tcp_ramrod->tcp.vlan_id); 608 } else { 609 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 610 "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n", 611 p_tcp_ramrod->tcp.local_ip, 612 p_tcp_ramrod->tcp.local_port, 613 p_tcp_ramrod->tcp.remote_ip, 614 p_tcp_ramrod->tcp.remote_port, 615 p_tcp_ramrod->tcp.vlan_id); 616 } 617 618 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 619 "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n", 620 p_tcp_ramrod->tcp.flow_label, 621 p_tcp_ramrod->tcp.ttl, 622 p_tcp_ramrod->tcp.tos_or_tc, 623 p_tcp_ramrod->tcp.mss, 624 p_tcp_ramrod->tcp.rcv_wnd_scale, 625 p_tcp_ramrod->tcp.connect_mode, 626 p_tcp_ramrod->tcp.flags); 627 628 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n", 629 p_tcp_ramrod->tcp.syn_ip_payload_length, 630 p_tcp_ramrod->tcp.syn_phy_addr_lo, 631 p_tcp_ramrod->tcp.syn_phy_addr_hi); 632 } 633 634 static int 635 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 636 { 637 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 638 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; 639 struct tcp_offload_params_opt2 *tcp; 640 struct qed_sp_init_data init_data; 641 struct qed_spq_entry *p_ent; 642 dma_addr_t async_output_phys; 643 dma_addr_t in_pdata_phys; 644 u16 physical_q; 645 u8 tcp_flags; 646 int rc; 647 int i; 648 649 memset(&init_data, 0, sizeof(init_data)); 650 init_data.cid = ep->tcp_cid; 651 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 652 if (ep->connect_mode == TCP_CONNECT_PASSIVE) 653 init_data.comp_mode = QED_SPQ_MODE_CB; 654 else 655 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 656 657 rc = qed_sp_init_request(p_hwfn, &p_ent, 658 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, 659 PROTOCOLID_IWARP, &init_data); 660 if (rc) 661 return rc; 662 663 p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; 664 665 in_pdata_phys = ep->ep_buffer_phys + 666 offsetof(struct qed_iwarp_ep_memory, in_pdata); 667 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr, 668 in_pdata_phys); 669 670 p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = 671 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); 672 673 async_output_phys = ep->ep_buffer_phys + 674 offsetof(struct qed_iwarp_ep_memory, async_output); 675 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf, 676 async_output_phys); 677 678 p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); 679 p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); 680 681 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 682 p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q); 683 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); 684 p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q); 685 p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; 686 687 tcp = &p_tcp_ramrod->tcp; 688 qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi, 689 &tcp->remote_mac_addr_mid, 690 &tcp->remote_mac_addr_lo, ep->remote_mac_addr); 691 qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid, 692 &tcp->local_mac_addr_lo, ep->local_mac_addr); 693 694 tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); 695 696 tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; 697 tcp->flags = 0; 698 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 699 !!(tcp_flags & QED_IWARP_TS_EN)); 700 701 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 702 !!(tcp_flags & QED_IWARP_DA_EN)); 703 704 tcp->ip_version = ep->cm_info.ip_version; 705 706 for (i = 0; i < 4; i++) { 707 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]); 708 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]); 709 } 710 711 tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port); 712 tcp->local_port = cpu_to_le16(ep->cm_info.local_port); 713 tcp->mss = cpu_to_le16(ep->mss); 714 tcp->flow_label = 0; 715 tcp->ttl = 0x40; 716 tcp->tos_or_tc = 0; 717 718 tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; 719 tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss; 720 tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; 721 tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT; 722 tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL; 723 724 tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; 725 tcp->connect_mode = ep->connect_mode; 726 727 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 728 tcp->syn_ip_payload_length = 729 cpu_to_le16(ep->syn_ip_payload_length); 730 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr); 731 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr); 732 } 733 734 qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); 735 736 rc = qed_spq_post(p_hwfn, p_ent, NULL); 737 738 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 739 "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc); 740 741 return rc; 742 } 743 744 static void 745 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 746 { 747 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 748 struct qed_iwarp_cm_event_params params; 749 struct mpa_v2_hdr *mpa_v2; 750 union async_output *async_data; 751 u16 mpa_ord, mpa_ird; 752 u8 mpa_hdr_size = 0; 753 u8 mpa_rev; 754 755 async_data = &ep->ep_buffer_virt->async_output; 756 757 mpa_rev = async_data->mpa_request.mpa_handshake_mode; 758 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 759 "private_data_len=%x handshake_mode=%x private_data=(%x)\n", 760 async_data->mpa_request.ulp_data_len, 761 mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata))); 762 763 if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { 764 /* Read ord/ird values from private data buffer */ 765 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata; 766 mpa_hdr_size = sizeof(*mpa_v2); 767 768 mpa_ord = ntohs(mpa_v2->ord); 769 mpa_ird = ntohs(mpa_v2->ird); 770 771 /* Temprary store in cm_info incoming ord/ird requested, later 772 * replace with negotiated value during accept 773 */ 774 ep->cm_info.ord = (u8)min_t(u16, 775 (mpa_ord & MPA_V2_IRD_ORD_MASK), 776 QED_IWARP_ORD_DEFAULT); 777 778 ep->cm_info.ird = (u8)min_t(u16, 779 (mpa_ird & MPA_V2_IRD_ORD_MASK), 780 QED_IWARP_IRD_DEFAULT); 781 782 /* Peer2Peer negotiation */ 783 ep->rtr_type = MPA_RTR_TYPE_NONE; 784 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { 785 if (mpa_ord & MPA_V2_WRITE_RTR) 786 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; 787 788 if (mpa_ord & MPA_V2_READ_RTR) 789 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; 790 791 if (mpa_ird & MPA_V2_SEND_RTR) 792 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; 793 794 ep->rtr_type &= iwarp_info->rtr_type; 795 796 /* if we're left with no match send our capabilities */ 797 if (ep->rtr_type == MPA_RTR_TYPE_NONE) 798 ep->rtr_type = iwarp_info->rtr_type; 799 } 800 801 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; 802 } else { 803 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT; 804 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT; 805 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; 806 } 807 808 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 809 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n", 810 mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, 811 async_data->mpa_request.ulp_data_len, mpa_hdr_size); 812 813 /* Strip mpa v2 hdr from private data before sending to upper layer */ 814 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; 815 816 ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len - 817 mpa_hdr_size; 818 819 params.event = QED_IWARP_EVENT_MPA_REQUEST; 820 params.cm_info = &ep->cm_info; 821 params.ep_context = ep; 822 params.status = 0; 823 824 ep->state = QED_IWARP_EP_MPA_REQ_RCVD; 825 ep->event_cb(ep->cb_context, ¶ms); 826 } 827 828 static int 829 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 830 { 831 struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; 832 struct qed_iwarp_info *iwarp_info; 833 struct qed_sp_init_data init_data; 834 dma_addr_t async_output_phys; 835 struct qed_spq_entry *p_ent; 836 dma_addr_t out_pdata_phys; 837 dma_addr_t in_pdata_phys; 838 struct qed_rdma_qp *qp; 839 bool reject; 840 int rc; 841 842 if (!ep) 843 return -EINVAL; 844 845 qp = ep->qp; 846 reject = !qp; 847 848 memset(&init_data, 0, sizeof(init_data)); 849 init_data.cid = reject ? ep->tcp_cid : qp->icid; 850 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 851 852 if (ep->connect_mode == TCP_CONNECT_ACTIVE) 853 init_data.comp_mode = QED_SPQ_MODE_CB; 854 else 855 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 856 857 rc = qed_sp_init_request(p_hwfn, &p_ent, 858 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, 859 PROTOCOLID_IWARP, &init_data); 860 if (rc) 861 return rc; 862 863 p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; 864 out_pdata_phys = ep->ep_buffer_phys + 865 offsetof(struct qed_iwarp_ep_memory, out_pdata); 866 DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr, 867 out_pdata_phys); 868 p_mpa_ramrod->common.outgoing_ulp_buffer.len = 869 ep->cm_info.private_data_len; 870 p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; 871 872 p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord; 873 p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird; 874 875 p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; 876 877 in_pdata_phys = ep->ep_buffer_phys + 878 offsetof(struct qed_iwarp_ep_memory, in_pdata); 879 p_mpa_ramrod->tcp_connect_side = ep->connect_mode; 880 DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr, 881 in_pdata_phys); 882 p_mpa_ramrod->incoming_ulp_buffer.len = 883 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); 884 async_output_phys = ep->ep_buffer_phys + 885 offsetof(struct qed_iwarp_ep_memory, async_output); 886 DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf, 887 async_output_phys); 888 p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); 889 p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); 890 891 if (!reject) { 892 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr, 893 qp->shared_queue_phys_addr); 894 p_mpa_ramrod->stats_counter_id = 895 RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; 896 } else { 897 p_mpa_ramrod->common.reject = 1; 898 } 899 900 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 901 p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size; 902 p_mpa_ramrod->mode = ep->mpa_rev; 903 SET_FIELD(p_mpa_ramrod->rtr_pref, 904 IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); 905 906 ep->state = QED_IWARP_EP_MPA_OFFLOADED; 907 rc = qed_spq_post(p_hwfn, p_ent, NULL); 908 if (!reject) 909 ep->cid = qp->icid; /* Now they're migrated. */ 910 911 DP_VERBOSE(p_hwfn, 912 QED_MSG_RDMA, 913 "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n", 914 reject ? 0xffff : qp->icid, 915 ep->tcp_cid, 916 rc, 917 ep->cm_info.ird, 918 ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); 919 return rc; 920 } 921 922 static void 923 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 924 { 925 ep->state = QED_IWARP_EP_INIT; 926 if (ep->qp) 927 ep->qp->ep = NULL; 928 ep->qp = NULL; 929 memset(&ep->cm_info, 0, sizeof(ep->cm_info)); 930 931 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { 932 /* We don't care about the return code, it's ok if tcp_cid 933 * remains invalid...in this case we'll defer allocation 934 */ 935 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); 936 } 937 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 938 939 list_move_tail(&ep->list_entry, 940 &p_hwfn->p_rdma_info->iwarp.ep_free_list); 941 942 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 943 } 944 945 static void 946 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 947 { 948 struct mpa_v2_hdr *mpa_v2_params; 949 union async_output *async_data; 950 u16 mpa_ird, mpa_ord; 951 u8 mpa_data_size = 0; 952 953 if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { 954 mpa_v2_params = 955 (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); 956 mpa_data_size = sizeof(*mpa_v2_params); 957 mpa_ird = ntohs(mpa_v2_params->ird); 958 mpa_ord = ntohs(mpa_v2_params->ord); 959 960 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); 961 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); 962 } 963 async_data = &ep->ep_buffer_virt->async_output; 964 965 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; 966 ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len - 967 mpa_data_size; 968 } 969 970 static void 971 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 972 { 973 struct qed_iwarp_cm_event_params params; 974 975 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 976 DP_NOTICE(p_hwfn, 977 "MPA reply event not expected on passive side!\n"); 978 return; 979 } 980 981 params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; 982 983 qed_iwarp_parse_private_data(p_hwfn, ep); 984 985 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 986 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", 987 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); 988 989 params.cm_info = &ep->cm_info; 990 params.ep_context = ep; 991 params.status = 0; 992 993 ep->mpa_reply_processed = true; 994 995 ep->event_cb(ep->cb_context, ¶ms); 996 } 997 998 #define QED_IWARP_CONNECT_MODE_STRING(ep) \ 999 ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" 1000 1001 /* Called as a result of the event: 1002 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE 1003 */ 1004 static void 1005 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, 1006 struct qed_iwarp_ep *ep, u8 fw_return_code) 1007 { 1008 struct qed_iwarp_cm_event_params params; 1009 1010 if (ep->connect_mode == TCP_CONNECT_ACTIVE) 1011 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; 1012 else 1013 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; 1014 1015 if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed) 1016 qed_iwarp_parse_private_data(p_hwfn, ep); 1017 1018 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1019 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", 1020 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); 1021 1022 params.cm_info = &ep->cm_info; 1023 1024 params.ep_context = ep; 1025 1026 switch (fw_return_code) { 1027 case RDMA_RETURN_OK: 1028 ep->qp->max_rd_atomic_req = ep->cm_info.ord; 1029 ep->qp->max_rd_atomic_resp = ep->cm_info.ird; 1030 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1); 1031 ep->state = QED_IWARP_EP_ESTABLISHED; 1032 params.status = 0; 1033 break; 1034 case IWARP_CONN_ERROR_MPA_TIMEOUT: 1035 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n", 1036 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1037 params.status = -EBUSY; 1038 break; 1039 case IWARP_CONN_ERROR_MPA_ERROR_REJECT: 1040 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n", 1041 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1042 params.status = -ECONNREFUSED; 1043 break; 1044 case IWARP_CONN_ERROR_MPA_RST: 1045 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n", 1046 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid, 1047 ep->tcp_cid); 1048 params.status = -ECONNRESET; 1049 break; 1050 case IWARP_CONN_ERROR_MPA_FIN: 1051 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n", 1052 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1053 params.status = -ECONNREFUSED; 1054 break; 1055 case IWARP_CONN_ERROR_MPA_INSUF_IRD: 1056 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n", 1057 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1058 params.status = -ECONNREFUSED; 1059 break; 1060 case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: 1061 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n", 1062 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1063 params.status = -ECONNREFUSED; 1064 break; 1065 case IWARP_CONN_ERROR_MPA_INVALID_PACKET: 1066 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", 1067 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1068 params.status = -ECONNREFUSED; 1069 break; 1070 case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: 1071 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n", 1072 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1073 params.status = -ECONNREFUSED; 1074 break; 1075 case IWARP_CONN_ERROR_MPA_TERMINATE: 1076 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n", 1077 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1078 params.status = -ECONNREFUSED; 1079 break; 1080 default: 1081 params.status = -ECONNRESET; 1082 break; 1083 } 1084 1085 if (fw_return_code != RDMA_RETURN_OK) 1086 /* paired with READ_ONCE in destroy_qp */ 1087 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); 1088 1089 ep->event_cb(ep->cb_context, ¶ms); 1090 1091 /* on passive side, if there is no associated QP (REJECT) we need to 1092 * return the ep to the pool, (in the regular case we add an element 1093 * in accept instead of this one. 1094 * In both cases we need to remove it from the ep_list. 1095 */ 1096 if (fw_return_code != RDMA_RETURN_OK) { 1097 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; 1098 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && 1099 (!ep->qp)) { /* Rejected */ 1100 qed_iwarp_return_ep(p_hwfn, ep); 1101 } else { 1102 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1103 list_del(&ep->list_entry); 1104 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1105 } 1106 } 1107 } 1108 1109 static void 1110 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, 1111 struct qed_iwarp_ep *ep, u8 *mpa_data_size) 1112 { 1113 struct mpa_v2_hdr *mpa_v2_params; 1114 u16 mpa_ird, mpa_ord; 1115 1116 *mpa_data_size = 0; 1117 if (MPA_REV2(ep->mpa_rev)) { 1118 mpa_v2_params = 1119 (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; 1120 *mpa_data_size = sizeof(*mpa_v2_params); 1121 1122 mpa_ird = (u16)ep->cm_info.ird; 1123 mpa_ord = (u16)ep->cm_info.ord; 1124 1125 if (ep->rtr_type != MPA_RTR_TYPE_NONE) { 1126 mpa_ird |= MPA_V2_PEER2PEER_MODEL; 1127 1128 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) 1129 mpa_ird |= MPA_V2_SEND_RTR; 1130 1131 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) 1132 mpa_ord |= MPA_V2_WRITE_RTR; 1133 1134 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) 1135 mpa_ord |= MPA_V2_READ_RTR; 1136 } 1137 1138 mpa_v2_params->ird = htons(mpa_ird); 1139 mpa_v2_params->ord = htons(mpa_ord); 1140 1141 DP_VERBOSE(p_hwfn, 1142 QED_MSG_RDMA, 1143 "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n", 1144 mpa_v2_params->ird, 1145 mpa_v2_params->ord, 1146 *((u32 *)mpa_v2_params), 1147 mpa_ord & MPA_V2_IRD_ORD_MASK, 1148 mpa_ird & MPA_V2_IRD_ORD_MASK, 1149 !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), 1150 !!(mpa_ird & MPA_V2_SEND_RTR), 1151 !!(mpa_ord & MPA_V2_WRITE_RTR), 1152 !!(mpa_ord & MPA_V2_READ_RTR)); 1153 } 1154 } 1155 1156 int qed_iwarp_connect(void *rdma_cxt, 1157 struct qed_iwarp_connect_in *iparams, 1158 struct qed_iwarp_connect_out *oparams) 1159 { 1160 struct qed_hwfn *p_hwfn = rdma_cxt; 1161 struct qed_iwarp_info *iwarp_info; 1162 struct qed_iwarp_ep *ep; 1163 u8 mpa_data_size = 0; 1164 u32 cid; 1165 int rc; 1166 1167 if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) || 1168 (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) { 1169 DP_NOTICE(p_hwfn, 1170 "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", 1171 iparams->qp->icid, iparams->cm_info.ord, 1172 iparams->cm_info.ird); 1173 1174 return -EINVAL; 1175 } 1176 1177 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1178 1179 /* Allocate ep object */ 1180 rc = qed_iwarp_alloc_cid(p_hwfn, &cid); 1181 if (rc) 1182 return rc; 1183 1184 rc = qed_iwarp_create_ep(p_hwfn, &ep); 1185 if (rc) 1186 goto err; 1187 1188 ep->tcp_cid = cid; 1189 1190 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1191 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); 1192 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1193 1194 ep->qp = iparams->qp; 1195 ep->qp->ep = ep; 1196 ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr); 1197 ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr); 1198 memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); 1199 1200 ep->cm_info.ord = iparams->cm_info.ord; 1201 ep->cm_info.ird = iparams->cm_info.ird; 1202 1203 ep->rtr_type = iwarp_info->rtr_type; 1204 if (!iwarp_info->peer2peer) 1205 ep->rtr_type = MPA_RTR_TYPE_NONE; 1206 1207 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0)) 1208 ep->cm_info.ord = 1; 1209 1210 ep->mpa_rev = iwarp_info->mpa_rev; 1211 1212 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1213 1214 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1215 ep->cm_info.private_data_len = iparams->cm_info.private_data_len + 1216 mpa_data_size; 1217 1218 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1219 iparams->cm_info.private_data, 1220 iparams->cm_info.private_data_len); 1221 1222 ep->mss = iparams->mss; 1223 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); 1224 1225 ep->event_cb = iparams->event_cb; 1226 ep->cb_context = iparams->cb_context; 1227 ep->connect_mode = TCP_CONNECT_ACTIVE; 1228 1229 oparams->ep_context = ep; 1230 1231 rc = qed_iwarp_tcp_offload(p_hwfn, ep); 1232 1233 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n", 1234 iparams->qp->icid, ep->tcp_cid, rc); 1235 1236 if (rc) { 1237 qed_iwarp_destroy_ep(p_hwfn, ep, true); 1238 goto err; 1239 } 1240 1241 return rc; 1242 err: 1243 qed_iwarp_cid_cleaned(p_hwfn, cid); 1244 1245 return rc; 1246 } 1247 1248 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) 1249 { 1250 struct qed_iwarp_ep *ep = NULL; 1251 int rc; 1252 1253 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1254 1255 if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { 1256 DP_ERR(p_hwfn, "Ep list is empty\n"); 1257 goto out; 1258 } 1259 1260 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, 1261 struct qed_iwarp_ep, list_entry); 1262 1263 /* in some cases we could have failed allocating a tcp cid when added 1264 * from accept / failure... retry now..this is not the common case. 1265 */ 1266 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { 1267 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); 1268 1269 /* if we fail we could look for another entry with a valid 1270 * tcp_cid, but since we don't expect to reach this anyway 1271 * it's not worth the handling 1272 */ 1273 if (rc) { 1274 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; 1275 ep = NULL; 1276 goto out; 1277 } 1278 } 1279 1280 list_del(&ep->list_entry); 1281 1282 out: 1283 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1284 return ep; 1285 } 1286 1287 #define QED_IWARP_MAX_CID_CLEAN_TIME 100 1288 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5 1289 1290 /* This function waits for all the bits of a bmap to be cleared, as long as 1291 * there is progress ( i.e. the number of bits left to be cleared decreases ) 1292 * the function continues. 1293 */ 1294 static int 1295 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) 1296 { 1297 int prev_weight = 0; 1298 int wait_count = 0; 1299 int weight = 0; 1300 1301 weight = bitmap_weight(bmap->bitmap, bmap->max_count); 1302 prev_weight = weight; 1303 1304 while (weight) { 1305 msleep(QED_IWARP_MAX_CID_CLEAN_TIME); 1306 1307 weight = bitmap_weight(bmap->bitmap, bmap->max_count); 1308 1309 if (prev_weight == weight) { 1310 wait_count++; 1311 } else { 1312 prev_weight = weight; 1313 wait_count = 0; 1314 } 1315 1316 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) { 1317 DP_NOTICE(p_hwfn, 1318 "%s bitmap wait timed out (%d cids pending)\n", 1319 bmap->name, weight); 1320 return -EBUSY; 1321 } 1322 } 1323 return 0; 1324 } 1325 1326 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) 1327 { 1328 int rc; 1329 int i; 1330 1331 rc = qed_iwarp_wait_cid_map_cleared(p_hwfn, 1332 &p_hwfn->p_rdma_info->tcp_cid_map); 1333 if (rc) 1334 return rc; 1335 1336 /* Now free the tcp cids from the main cid map */ 1337 for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++) 1338 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i); 1339 1340 /* Now wait for all cids to be completed */ 1341 return qed_iwarp_wait_cid_map_cleared(p_hwfn, 1342 &p_hwfn->p_rdma_info->cid_map); 1343 } 1344 1345 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn) 1346 { 1347 struct qed_iwarp_ep *ep; 1348 1349 while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { 1350 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1351 1352 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, 1353 struct qed_iwarp_ep, list_entry); 1354 1355 if (!ep) { 1356 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1357 break; 1358 } 1359 list_del(&ep->list_entry); 1360 1361 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1362 1363 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID) 1364 qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid); 1365 1366 qed_iwarp_destroy_ep(p_hwfn, ep, false); 1367 } 1368 } 1369 1370 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init) 1371 { 1372 struct qed_iwarp_ep *ep; 1373 int rc = 0; 1374 int count; 1375 u32 cid; 1376 int i; 1377 1378 count = init ? QED_IWARP_PREALLOC_CNT : 1; 1379 for (i = 0; i < count; i++) { 1380 rc = qed_iwarp_create_ep(p_hwfn, &ep); 1381 if (rc) 1382 return rc; 1383 1384 /* During initialization we allocate from the main pool, 1385 * afterwards we allocate only from the tcp_cid. 1386 */ 1387 if (init) { 1388 rc = qed_iwarp_alloc_cid(p_hwfn, &cid); 1389 if (rc) 1390 goto err; 1391 qed_iwarp_set_tcp_cid(p_hwfn, cid); 1392 } else { 1393 /* We don't care about the return code, it's ok if 1394 * tcp_cid remains invalid...in this case we'll 1395 * defer allocation 1396 */ 1397 qed_iwarp_alloc_tcp_cid(p_hwfn, &cid); 1398 } 1399 1400 ep->tcp_cid = cid; 1401 1402 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1403 list_add_tail(&ep->list_entry, 1404 &p_hwfn->p_rdma_info->iwarp.ep_free_list); 1405 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1406 } 1407 1408 return rc; 1409 1410 err: 1411 qed_iwarp_destroy_ep(p_hwfn, ep, false); 1412 1413 return rc; 1414 } 1415 1416 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) 1417 { 1418 int rc; 1419 1420 /* Allocate bitmap for tcp cid. These are used by passive side 1421 * to ensure it can allocate a tcp cid during dpc that was 1422 * pre-acquired and doesn't require dynamic allocation of ilt 1423 */ 1424 rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1425 QED_IWARP_PREALLOC_CNT, "TCP_CID"); 1426 if (rc) { 1427 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1428 "Failed to allocate tcp cid, rc = %d\n", rc); 1429 return rc; 1430 } 1431 1432 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list); 1433 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1434 1435 rc = qed_iwarp_prealloc_ep(p_hwfn, true); 1436 if (rc) 1437 return rc; 1438 1439 return qed_ooo_alloc(p_hwfn); 1440 } 1441 1442 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) 1443 { 1444 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1445 1446 qed_ooo_free(p_hwfn); 1447 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); 1448 kfree(iwarp_info->mpa_bufs); 1449 kfree(iwarp_info->partial_fpdus); 1450 kfree(iwarp_info->mpa_intermediate_buf); 1451 } 1452 1453 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) 1454 { 1455 struct qed_hwfn *p_hwfn = rdma_cxt; 1456 struct qed_iwarp_ep *ep; 1457 u8 mpa_data_size = 0; 1458 int rc; 1459 1460 ep = iparams->ep_context; 1461 if (!ep) { 1462 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); 1463 return -EINVAL; 1464 } 1465 1466 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", 1467 iparams->qp->icid, ep->tcp_cid); 1468 1469 if ((iparams->ord > QED_IWARP_ORD_DEFAULT) || 1470 (iparams->ird > QED_IWARP_IRD_DEFAULT)) { 1471 DP_VERBOSE(p_hwfn, 1472 QED_MSG_RDMA, 1473 "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", 1474 iparams->qp->icid, 1475 ep->tcp_cid, iparams->ord, iparams->ord); 1476 return -EINVAL; 1477 } 1478 1479 qed_iwarp_prealloc_ep(p_hwfn, false); 1480 1481 ep->cb_context = iparams->cb_context; 1482 ep->qp = iparams->qp; 1483 ep->qp->ep = ep; 1484 1485 if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { 1486 /* Negotiate ord/ird: if upperlayer requested ord larger than 1487 * ird advertised by remote, we need to decrease our ord 1488 */ 1489 if (iparams->ord > ep->cm_info.ird) 1490 iparams->ord = ep->cm_info.ird; 1491 1492 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && 1493 (iparams->ird == 0)) 1494 iparams->ird = 1; 1495 } 1496 1497 /* Update cm_info ord/ird to be negotiated values */ 1498 ep->cm_info.ord = iparams->ord; 1499 ep->cm_info.ird = iparams->ird; 1500 1501 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1502 1503 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1504 ep->cm_info.private_data_len = iparams->private_data_len + 1505 mpa_data_size; 1506 1507 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1508 iparams->private_data, iparams->private_data_len); 1509 1510 rc = qed_iwarp_mpa_offload(p_hwfn, ep); 1511 if (rc) 1512 qed_iwarp_modify_qp(p_hwfn, 1513 iparams->qp, QED_IWARP_QP_STATE_ERROR, 1); 1514 1515 return rc; 1516 } 1517 1518 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams) 1519 { 1520 struct qed_hwfn *p_hwfn = rdma_cxt; 1521 struct qed_iwarp_ep *ep; 1522 u8 mpa_data_size = 0; 1523 1524 ep = iparams->ep_context; 1525 if (!ep) { 1526 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n"); 1527 return -EINVAL; 1528 } 1529 1530 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid); 1531 1532 ep->cb_context = iparams->cb_context; 1533 ep->qp = NULL; 1534 1535 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1536 1537 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1538 ep->cm_info.private_data_len = iparams->private_data_len + 1539 mpa_data_size; 1540 1541 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1542 iparams->private_data, iparams->private_data_len); 1543 1544 return qed_iwarp_mpa_offload(p_hwfn, ep); 1545 } 1546 1547 static void 1548 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, 1549 struct qed_iwarp_cm_info *cm_info) 1550 { 1551 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n", 1552 cm_info->ip_version); 1553 1554 if (cm_info->ip_version == QED_TCP_IPV4) 1555 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1556 "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n", 1557 cm_info->remote_ip, cm_info->remote_port, 1558 cm_info->local_ip, cm_info->local_port, 1559 cm_info->vlan); 1560 else 1561 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1562 "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n", 1563 cm_info->remote_ip, cm_info->remote_port, 1564 cm_info->local_ip, cm_info->local_port, 1565 cm_info->vlan); 1566 1567 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1568 "private_data_len = %x ord = %d, ird = %d\n", 1569 cm_info->private_data_len, cm_info->ord, cm_info->ird); 1570 } 1571 1572 static int 1573 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, 1574 struct qed_iwarp_ll2_buff *buf, u8 handle) 1575 { 1576 int rc; 1577 1578 rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr, 1579 (u16)buf->buff_size, buf, 1); 1580 if (rc) { 1581 DP_NOTICE(p_hwfn, 1582 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n", 1583 rc, handle); 1584 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size, 1585 buf->data, buf->data_phys_addr); 1586 kfree(buf); 1587 } 1588 1589 return rc; 1590 } 1591 1592 static bool 1593 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) 1594 { 1595 struct qed_iwarp_ep *ep = NULL; 1596 bool found = false; 1597 1598 list_for_each_entry(ep, 1599 &p_hwfn->p_rdma_info->iwarp.ep_list, 1600 list_entry) { 1601 if ((ep->cm_info.local_port == cm_info->local_port) && 1602 (ep->cm_info.remote_port == cm_info->remote_port) && 1603 (ep->cm_info.vlan == cm_info->vlan) && 1604 !memcmp(&ep->cm_info.local_ip, cm_info->local_ip, 1605 sizeof(cm_info->local_ip)) && 1606 !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip, 1607 sizeof(cm_info->remote_ip))) { 1608 found = true; 1609 break; 1610 } 1611 } 1612 1613 if (found) { 1614 DP_NOTICE(p_hwfn, 1615 "SYN received on active connection - dropping\n"); 1616 qed_iwarp_print_cm_info(p_hwfn, cm_info); 1617 1618 return true; 1619 } 1620 1621 return false; 1622 } 1623 1624 static struct qed_iwarp_listener * 1625 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, 1626 struct qed_iwarp_cm_info *cm_info) 1627 { 1628 struct qed_iwarp_listener *listener = NULL; 1629 static const u32 ip_zero[4] = { 0, 0, 0, 0 }; 1630 bool found = false; 1631 1632 qed_iwarp_print_cm_info(p_hwfn, cm_info); 1633 1634 list_for_each_entry(listener, 1635 &p_hwfn->p_rdma_info->iwarp.listen_list, 1636 list_entry) { 1637 if (listener->port == cm_info->local_port) { 1638 if (!memcmp(listener->ip_addr, 1639 ip_zero, sizeof(ip_zero))) { 1640 found = true; 1641 break; 1642 } 1643 1644 if (!memcmp(listener->ip_addr, 1645 cm_info->local_ip, 1646 sizeof(cm_info->local_ip)) && 1647 (listener->vlan == cm_info->vlan)) { 1648 found = true; 1649 break; 1650 } 1651 } 1652 } 1653 1654 if (found) { 1655 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n", 1656 listener); 1657 return listener; 1658 } 1659 1660 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n"); 1661 return NULL; 1662 } 1663 1664 static int 1665 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, 1666 struct qed_iwarp_cm_info *cm_info, 1667 void *buf, 1668 u8 *remote_mac_addr, 1669 u8 *local_mac_addr, 1670 int *payload_len, int *tcp_start_offset) 1671 { 1672 struct vlan_ethhdr *vethh; 1673 bool vlan_valid = false; 1674 struct ipv6hdr *ip6h; 1675 struct ethhdr *ethh; 1676 struct tcphdr *tcph; 1677 struct iphdr *iph; 1678 int eth_hlen; 1679 int ip_hlen; 1680 int eth_type; 1681 int i; 1682 1683 ethh = buf; 1684 eth_type = ntohs(ethh->h_proto); 1685 if (eth_type == ETH_P_8021Q) { 1686 vlan_valid = true; 1687 vethh = (struct vlan_ethhdr *)ethh; 1688 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; 1689 eth_type = ntohs(vethh->h_vlan_encapsulated_proto); 1690 } 1691 1692 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); 1693 1694 if (!ether_addr_equal(ethh->h_dest, 1695 p_hwfn->p_rdma_info->iwarp.mac_addr)) { 1696 DP_VERBOSE(p_hwfn, 1697 QED_MSG_RDMA, 1698 "Got unexpected mac %pM instead of %pM\n", 1699 ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); 1700 return -EINVAL; 1701 } 1702 1703 ether_addr_copy(remote_mac_addr, ethh->h_source); 1704 ether_addr_copy(local_mac_addr, ethh->h_dest); 1705 1706 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n", 1707 eth_type, ethh->h_source); 1708 1709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n", 1710 eth_hlen, ethh->h_dest); 1711 1712 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); 1713 1714 if (eth_type == ETH_P_IP) { 1715 if (iph->protocol != IPPROTO_TCP) { 1716 DP_NOTICE(p_hwfn, 1717 "Unexpected ip protocol on ll2 %x\n", 1718 iph->protocol); 1719 return -EINVAL; 1720 } 1721 1722 cm_info->local_ip[0] = ntohl(iph->daddr); 1723 cm_info->remote_ip[0] = ntohl(iph->saddr); 1724 cm_info->ip_version = QED_TCP_IPV4; 1725 1726 ip_hlen = (iph->ihl) * sizeof(u32); 1727 *payload_len = ntohs(iph->tot_len) - ip_hlen; 1728 } else if (eth_type == ETH_P_IPV6) { 1729 ip6h = (struct ipv6hdr *)iph; 1730 1731 if (ip6h->nexthdr != IPPROTO_TCP) { 1732 DP_NOTICE(p_hwfn, 1733 "Unexpected ip protocol on ll2 %x\n", 1734 iph->protocol); 1735 return -EINVAL; 1736 } 1737 1738 for (i = 0; i < 4; i++) { 1739 cm_info->local_ip[i] = 1740 ntohl(ip6h->daddr.in6_u.u6_addr32[i]); 1741 cm_info->remote_ip[i] = 1742 ntohl(ip6h->saddr.in6_u.u6_addr32[i]); 1743 } 1744 cm_info->ip_version = QED_TCP_IPV6; 1745 1746 ip_hlen = sizeof(*ip6h); 1747 *payload_len = ntohs(ip6h->payload_len); 1748 } else { 1749 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type); 1750 return -EINVAL; 1751 } 1752 1753 tcph = (struct tcphdr *)((u8 *)iph + ip_hlen); 1754 1755 if (!tcph->syn) { 1756 DP_NOTICE(p_hwfn, 1757 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n", 1758 iph->ihl, tcph->source, tcph->dest); 1759 return -EINVAL; 1760 } 1761 1762 cm_info->local_port = ntohs(tcph->dest); 1763 cm_info->remote_port = ntohs(tcph->source); 1764 1765 qed_iwarp_print_cm_info(p_hwfn, cm_info); 1766 1767 *tcp_start_offset = eth_hlen + ip_hlen; 1768 1769 return 0; 1770 } 1771 1772 static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn, 1773 u16 cid) 1774 { 1775 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1776 struct qed_iwarp_fpdu *partial_fpdu; 1777 u32 idx; 1778 1779 idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); 1780 if (idx >= iwarp_info->max_num_partial_fpdus) { 1781 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid, 1782 iwarp_info->max_num_partial_fpdus); 1783 return NULL; 1784 } 1785 1786 partial_fpdu = &iwarp_info->partial_fpdus[idx]; 1787 1788 return partial_fpdu; 1789 } 1790 1791 enum qed_iwarp_mpa_pkt_type { 1792 QED_IWARP_MPA_PKT_PACKED, 1793 QED_IWARP_MPA_PKT_PARTIAL, 1794 QED_IWARP_MPA_PKT_UNALIGNED 1795 }; 1796 1797 #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff 1798 #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2) 1799 #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4) 1800 1801 /* Pad to multiple of 4 */ 1802 #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4) 1803 #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \ 1804 (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \ 1805 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \ 1806 QED_IWARP_MPA_CRC32_DIGEST_SIZE) 1807 1808 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ 1809 #define QED_IWARP_MAX_BDS_PER_FPDU 3 1810 1811 static const char * const pkt_type_str[] = { 1812 "QED_IWARP_MPA_PKT_PACKED", 1813 "QED_IWARP_MPA_PKT_PARTIAL", 1814 "QED_IWARP_MPA_PKT_UNALIGNED" 1815 }; 1816 1817 static int 1818 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, 1819 struct qed_iwarp_fpdu *fpdu, 1820 struct qed_iwarp_ll2_buff *buf); 1821 1822 static enum qed_iwarp_mpa_pkt_type 1823 qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, 1824 struct qed_iwarp_fpdu *fpdu, 1825 u16 tcp_payload_len, u8 *mpa_data) 1826 { 1827 enum qed_iwarp_mpa_pkt_type pkt_type; 1828 u16 mpa_len; 1829 1830 if (fpdu->incomplete_bytes) { 1831 pkt_type = QED_IWARP_MPA_PKT_UNALIGNED; 1832 goto out; 1833 } 1834 1835 /* special case of one byte remaining... 1836 * lower byte will be read next packet 1837 */ 1838 if (tcp_payload_len == 1) { 1839 fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE; 1840 pkt_type = QED_IWARP_MPA_PKT_PARTIAL; 1841 goto out; 1842 } 1843 1844 mpa_len = ntohs(*((u16 *)(mpa_data))); 1845 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 1846 1847 if (fpdu->fpdu_length <= tcp_payload_len) 1848 pkt_type = QED_IWARP_MPA_PKT_PACKED; 1849 else 1850 pkt_type = QED_IWARP_MPA_PKT_PARTIAL; 1851 1852 out: 1853 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1854 "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n", 1855 pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len); 1856 1857 return pkt_type; 1858 } 1859 1860 static void 1861 qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, 1862 struct qed_iwarp_fpdu *fpdu, 1863 struct unaligned_opaque_data *pkt_data, 1864 u16 tcp_payload_size, u8 placement_offset) 1865 { 1866 fpdu->mpa_buf = buf; 1867 fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; 1868 fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; 1869 fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset; 1870 fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset; 1871 1872 if (tcp_payload_size == 1) 1873 fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; 1874 else if (tcp_payload_size < fpdu->fpdu_length) 1875 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; 1876 else 1877 fpdu->incomplete_bytes = 0; /* complete fpdu */ 1878 1879 fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; 1880 } 1881 1882 static int 1883 qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, 1884 struct qed_iwarp_fpdu *fpdu, 1885 struct unaligned_opaque_data *pkt_data, 1886 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) 1887 { 1888 u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; 1889 int rc; 1890 1891 /* need to copy the data from the partial packet stored in fpdu 1892 * to the new buf, for this we also need to move the data currently 1893 * placed on the buf. The assumption is that the buffer is big enough 1894 * since fpdu_length <= mss, we use an intermediate buffer since 1895 * we may need to copy the new data to an overlapping location 1896 */ 1897 if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) { 1898 DP_ERR(p_hwfn, 1899 "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", 1900 buf->buff_size, fpdu->mpa_frag_len, 1901 tcp_payload_size, fpdu->incomplete_bytes); 1902 return -EINVAL; 1903 } 1904 1905 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1906 "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", 1907 fpdu->mpa_frag_virt, fpdu->mpa_frag_len, 1908 (u8 *)(buf->data) + pkt_data->first_mpa_offset, 1909 tcp_payload_size); 1910 1911 memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); 1912 memcpy(tmp_buf + fpdu->mpa_frag_len, 1913 (u8 *)(buf->data) + pkt_data->first_mpa_offset, 1914 tcp_payload_size); 1915 1916 rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); 1917 if (rc) 1918 return rc; 1919 1920 /* If we managed to post the buffer copy the data to the new buffer 1921 * o/w this will occur in the next round... 1922 */ 1923 memcpy((u8 *)(buf->data), tmp_buf, 1924 fpdu->mpa_frag_len + tcp_payload_size); 1925 1926 fpdu->mpa_buf = buf; 1927 /* fpdu->pkt_hdr remains as is */ 1928 /* fpdu->mpa_frag is overridden with new buf */ 1929 fpdu->mpa_frag = buf->data_phys_addr; 1930 fpdu->mpa_frag_virt = buf->data; 1931 fpdu->mpa_frag_len += tcp_payload_size; 1932 1933 fpdu->incomplete_bytes -= tcp_payload_size; 1934 1935 DP_VERBOSE(p_hwfn, 1936 QED_MSG_RDMA, 1937 "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", 1938 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, 1939 fpdu->incomplete_bytes); 1940 1941 return 0; 1942 } 1943 1944 static void 1945 qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, 1946 struct qed_iwarp_fpdu *fpdu, u8 *mpa_data) 1947 { 1948 u16 mpa_len; 1949 1950 /* Update incomplete packets if needed */ 1951 if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) { 1952 /* Missing lower byte is now available */ 1953 mpa_len = fpdu->fpdu_length | *mpa_data; 1954 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 1955 /* one byte of hdr */ 1956 fpdu->mpa_frag_len = 1; 1957 fpdu->incomplete_bytes = fpdu->fpdu_length - 1; 1958 DP_VERBOSE(p_hwfn, 1959 QED_MSG_RDMA, 1960 "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n", 1961 mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes); 1962 } 1963 } 1964 1965 #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \ 1966 (GET_FIELD((_curr_pkt)->flags, \ 1967 UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE)) 1968 1969 /* This function is used to recycle a buffer using the ll2 drop option. It 1970 * uses the mechanism to ensure that all buffers posted to tx before this one 1971 * were completed. The buffer sent here will be sent as a cookie in the tx 1972 * completion function and can then be reposted to rx chain when done. The flow 1973 * that requires this is the flow where a FPDU splits over more than 3 tcp 1974 * segments. In this case the driver needs to re-post a rx buffer instead of 1975 * the one received, but driver can't simply repost a buffer it copied from 1976 * as there is a case where the buffer was originally a packed FPDU, and is 1977 * partially posted to FW. Driver needs to ensure FW is done with it. 1978 */ 1979 static int 1980 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, 1981 struct qed_iwarp_fpdu *fpdu, 1982 struct qed_iwarp_ll2_buff *buf) 1983 { 1984 struct qed_ll2_tx_pkt_info tx_pkt; 1985 u8 ll2_handle; 1986 int rc; 1987 1988 memset(&tx_pkt, 0, sizeof(tx_pkt)); 1989 tx_pkt.num_of_bds = 1; 1990 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; 1991 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; 1992 tx_pkt.first_frag = fpdu->pkt_hdr; 1993 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 1994 buf->piggy_buf = NULL; 1995 tx_pkt.cookie = buf; 1996 1997 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 1998 1999 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); 2000 if (rc) 2001 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2002 "Can't drop packet rc=%d\n", rc); 2003 2004 DP_VERBOSE(p_hwfn, 2005 QED_MSG_RDMA, 2006 "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n", 2007 (unsigned long int)tx_pkt.first_frag, 2008 tx_pkt.first_frag_len, buf, rc); 2009 2010 return rc; 2011 } 2012 2013 static int 2014 qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu) 2015 { 2016 struct qed_ll2_tx_pkt_info tx_pkt; 2017 u8 ll2_handle; 2018 int rc; 2019 2020 memset(&tx_pkt, 0, sizeof(tx_pkt)); 2021 tx_pkt.num_of_bds = 1; 2022 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; 2023 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; 2024 2025 tx_pkt.first_frag = fpdu->pkt_hdr; 2026 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 2027 tx_pkt.enable_ip_cksum = true; 2028 tx_pkt.enable_l4_cksum = true; 2029 tx_pkt.calc_ip_len = true; 2030 /* vlan overload with enum iwarp_ll2_tx_queues */ 2031 tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE; 2032 2033 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 2034 2035 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); 2036 if (rc) 2037 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2038 "Can't send right edge rc=%d\n", rc); 2039 DP_VERBOSE(p_hwfn, 2040 QED_MSG_RDMA, 2041 "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n", 2042 tx_pkt.num_of_bds, 2043 (unsigned long int)tx_pkt.first_frag, 2044 tx_pkt.first_frag_len, rc); 2045 2046 return rc; 2047 } 2048 2049 static int 2050 qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, 2051 struct qed_iwarp_fpdu *fpdu, 2052 struct unaligned_opaque_data *curr_pkt, 2053 struct qed_iwarp_ll2_buff *buf, 2054 u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) 2055 { 2056 struct qed_ll2_tx_pkt_info tx_pkt; 2057 u8 ll2_handle; 2058 int rc; 2059 2060 memset(&tx_pkt, 0, sizeof(tx_pkt)); 2061 2062 /* An unaligned packet means it's split over two tcp segments. So the 2063 * complete packet requires 3 bds, one for the header, one for the 2064 * part of the fpdu of the first tcp segment, and the last fragment 2065 * will point to the remainder of the fpdu. A packed pdu, requires only 2066 * two bds, one for the header and one for the data. 2067 */ 2068 tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2; 2069 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; 2070 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */ 2071 2072 /* Send the mpa_buf only with the last fpdu (in case of packed) */ 2073 if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED || 2074 tcp_payload_size <= fpdu->fpdu_length) 2075 tx_pkt.cookie = fpdu->mpa_buf; 2076 2077 tx_pkt.first_frag = fpdu->pkt_hdr; 2078 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 2079 tx_pkt.enable_ip_cksum = true; 2080 tx_pkt.enable_l4_cksum = true; 2081 tx_pkt.calc_ip_len = true; 2082 /* vlan overload with enum iwarp_ll2_tx_queues */ 2083 tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; 2084 2085 /* special case of unaligned packet and not packed, need to send 2086 * both buffers as cookie to release. 2087 */ 2088 if (tcp_payload_size == fpdu->incomplete_bytes) 2089 fpdu->mpa_buf->piggy_buf = buf; 2090 2091 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 2092 2093 /* Set first fragment to header */ 2094 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); 2095 if (rc) 2096 goto out; 2097 2098 /* Set second fragment to first part of packet */ 2099 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, 2100 fpdu->mpa_frag, 2101 fpdu->mpa_frag_len); 2102 if (rc) 2103 goto out; 2104 2105 if (!fpdu->incomplete_bytes) 2106 goto out; 2107 2108 /* Set third fragment to second part of the packet */ 2109 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, 2110 ll2_handle, 2111 buf->data_phys_addr + 2112 curr_pkt->first_mpa_offset, 2113 fpdu->incomplete_bytes); 2114 out: 2115 DP_VERBOSE(p_hwfn, 2116 QED_MSG_RDMA, 2117 "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n", 2118 tx_pkt.num_of_bds, 2119 tx_pkt.first_frag_len, 2120 fpdu->mpa_frag_len, 2121 fpdu->incomplete_bytes, rc); 2122 2123 return rc; 2124 } 2125 2126 static void 2127 qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, 2128 struct unaligned_opaque_data *curr_pkt, 2129 u32 opaque_data0, u32 opaque_data1) 2130 { 2131 u64 opaque_data; 2132 2133 opaque_data = HILO_64(opaque_data1, opaque_data0); 2134 *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); 2135 2136 curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset + 2137 le16_to_cpu(curr_pkt->first_mpa_offset); 2138 curr_pkt->cid = le32_to_cpu(curr_pkt->cid); 2139 } 2140 2141 /* This function is called when an unaligned or incomplete MPA packet arrives 2142 * driver needs to align the packet, perhaps using previous data and send 2143 * it down to FW once it is aligned. 2144 */ 2145 static int 2146 qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, 2147 struct qed_iwarp_ll2_mpa_buf *mpa_buf) 2148 { 2149 struct unaligned_opaque_data *curr_pkt = &mpa_buf->data; 2150 struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; 2151 enum qed_iwarp_mpa_pkt_type pkt_type; 2152 struct qed_iwarp_fpdu *fpdu; 2153 int rc = -EINVAL; 2154 u8 *mpa_data; 2155 2156 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff); 2157 if (!fpdu) { /* something corrupt with cid, post rx back */ 2158 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", 2159 curr_pkt->cid); 2160 goto err; 2161 } 2162 2163 do { 2164 mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset); 2165 2166 pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, 2167 mpa_buf->tcp_payload_len, 2168 mpa_data); 2169 2170 switch (pkt_type) { 2171 case QED_IWARP_MPA_PKT_PARTIAL: 2172 qed_iwarp_init_fpdu(buf, fpdu, 2173 curr_pkt, 2174 mpa_buf->tcp_payload_len, 2175 mpa_buf->placement_offset); 2176 2177 if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { 2178 mpa_buf->tcp_payload_len = 0; 2179 break; 2180 } 2181 2182 rc = qed_iwarp_win_right_edge(p_hwfn, fpdu); 2183 2184 if (rc) { 2185 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2186 "Can't send FPDU:reset rc=%d\n", rc); 2187 memset(fpdu, 0, sizeof(*fpdu)); 2188 break; 2189 } 2190 2191 mpa_buf->tcp_payload_len = 0; 2192 break; 2193 case QED_IWARP_MPA_PKT_PACKED: 2194 qed_iwarp_init_fpdu(buf, fpdu, 2195 curr_pkt, 2196 mpa_buf->tcp_payload_len, 2197 mpa_buf->placement_offset); 2198 2199 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, 2200 mpa_buf->tcp_payload_len, 2201 pkt_type); 2202 if (rc) { 2203 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2204 "Can't send FPDU:reset rc=%d\n", rc); 2205 memset(fpdu, 0, sizeof(*fpdu)); 2206 break; 2207 } 2208 2209 mpa_buf->tcp_payload_len -= fpdu->fpdu_length; 2210 curr_pkt->first_mpa_offset += fpdu->fpdu_length; 2211 break; 2212 case QED_IWARP_MPA_PKT_UNALIGNED: 2213 qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); 2214 if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) { 2215 /* special handling of fpdu split over more 2216 * than 2 segments 2217 */ 2218 if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { 2219 rc = qed_iwarp_win_right_edge(p_hwfn, 2220 fpdu); 2221 /* packet will be re-processed later */ 2222 if (rc) 2223 return rc; 2224 } 2225 2226 rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt, 2227 buf, 2228 mpa_buf->tcp_payload_len); 2229 if (rc) /* packet will be re-processed later */ 2230 return rc; 2231 2232 mpa_buf->tcp_payload_len = 0; 2233 break; 2234 } 2235 2236 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, 2237 mpa_buf->tcp_payload_len, 2238 pkt_type); 2239 if (rc) { 2240 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2241 "Can't send FPDU:delay rc=%d\n", rc); 2242 /* don't reset fpdu -> we need it for next 2243 * classify 2244 */ 2245 break; 2246 } 2247 2248 mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; 2249 curr_pkt->first_mpa_offset += fpdu->incomplete_bytes; 2250 /* The framed PDU was sent - no more incomplete bytes */ 2251 fpdu->incomplete_bytes = 0; 2252 break; 2253 } 2254 } while (mpa_buf->tcp_payload_len && !rc); 2255 2256 return rc; 2257 2258 err: 2259 qed_iwarp_ll2_post_rx(p_hwfn, 2260 buf, 2261 p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); 2262 return rc; 2263 } 2264 2265 static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) 2266 { 2267 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2268 struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL; 2269 int rc; 2270 2271 while (!list_empty(&iwarp_info->mpa_buf_pending_list)) { 2272 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list, 2273 struct qed_iwarp_ll2_mpa_buf, 2274 list_entry); 2275 2276 rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf); 2277 2278 /* busy means break and continue processing later, don't 2279 * remove the buf from the pending list. 2280 */ 2281 if (rc == -EBUSY) 2282 break; 2283 2284 list_move_tail(&mpa_buf->list_entry, 2285 &iwarp_info->mpa_buf_list); 2286 2287 if (rc) { /* different error, don't continue */ 2288 DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc); 2289 break; 2290 } 2291 } 2292 } 2293 2294 static void 2295 qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) 2296 { 2297 struct qed_iwarp_ll2_mpa_buf *mpa_buf; 2298 struct qed_iwarp_info *iwarp_info; 2299 struct qed_hwfn *p_hwfn = cxt; 2300 2301 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2302 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, 2303 struct qed_iwarp_ll2_mpa_buf, list_entry); 2304 if (!mpa_buf) { 2305 DP_ERR(p_hwfn, "No free mpa buf\n"); 2306 goto err; 2307 } 2308 2309 list_del(&mpa_buf->list_entry); 2310 qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, 2311 data->opaque_data_0, data->opaque_data_1); 2312 2313 DP_VERBOSE(p_hwfn, 2314 QED_MSG_RDMA, 2315 "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", 2316 data->length.packet_length, mpa_buf->data.first_mpa_offset, 2317 mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, 2318 mpa_buf->data.cid); 2319 2320 mpa_buf->ll2_buf = data->cookie; 2321 mpa_buf->tcp_payload_len = data->length.packet_length - 2322 mpa_buf->data.first_mpa_offset; 2323 mpa_buf->data.first_mpa_offset += data->u.placement_offset; 2324 mpa_buf->placement_offset = data->u.placement_offset; 2325 2326 list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list); 2327 2328 qed_iwarp_process_pending_pkts(p_hwfn); 2329 return; 2330 err: 2331 qed_iwarp_ll2_post_rx(p_hwfn, data->cookie, 2332 iwarp_info->ll2_mpa_handle); 2333 } 2334 2335 static void 2336 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) 2337 { 2338 struct qed_iwarp_ll2_buff *buf = data->cookie; 2339 struct qed_iwarp_listener *listener; 2340 struct qed_ll2_tx_pkt_info tx_pkt; 2341 struct qed_iwarp_cm_info cm_info; 2342 struct qed_hwfn *p_hwfn = cxt; 2343 u8 remote_mac_addr[ETH_ALEN]; 2344 u8 local_mac_addr[ETH_ALEN]; 2345 struct qed_iwarp_ep *ep; 2346 int tcp_start_offset; 2347 u8 ll2_syn_handle; 2348 int payload_len; 2349 u32 hdr_size; 2350 int rc; 2351 2352 memset(&cm_info, 0, sizeof(cm_info)); 2353 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; 2354 2355 /* Check if packet was received with errors... */ 2356 if (data->err_flags) { 2357 DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n", 2358 data->err_flags); 2359 goto err; 2360 } 2361 2362 if (GET_FIELD(data->parse_flags, 2363 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && 2364 GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { 2365 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n"); 2366 goto err; 2367 } 2368 2369 rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) + 2370 data->u.placement_offset, remote_mac_addr, 2371 local_mac_addr, &payload_len, 2372 &tcp_start_offset); 2373 if (rc) 2374 goto err; 2375 2376 /* Check if there is a listener for this 4-tuple+vlan */ 2377 listener = qed_iwarp_get_listener(p_hwfn, &cm_info); 2378 if (!listener) { 2379 DP_VERBOSE(p_hwfn, 2380 QED_MSG_RDMA, 2381 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n", 2382 data->parse_flags, data->length.packet_length); 2383 2384 memset(&tx_pkt, 0, sizeof(tx_pkt)); 2385 tx_pkt.num_of_bds = 1; 2386 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; 2387 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; 2388 tx_pkt.first_frag = buf->data_phys_addr + 2389 data->u.placement_offset; 2390 tx_pkt.first_frag_len = data->length.packet_length; 2391 tx_pkt.cookie = buf; 2392 2393 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle, 2394 &tx_pkt, true); 2395 2396 if (rc) { 2397 DP_NOTICE(p_hwfn, 2398 "Can't post SYN back to chip rc=%d\n", rc); 2399 goto err; 2400 } 2401 return; 2402 } 2403 2404 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n"); 2405 /* There may be an open ep on this connection if this is a syn 2406 * retrasnmit... need to make sure there isn't... 2407 */ 2408 if (qed_iwarp_ep_exists(p_hwfn, &cm_info)) 2409 goto err; 2410 2411 ep = qed_iwarp_get_free_ep(p_hwfn); 2412 if (!ep) 2413 goto err; 2414 2415 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2416 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); 2417 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2418 2419 ether_addr_copy(ep->remote_mac_addr, remote_mac_addr); 2420 ether_addr_copy(ep->local_mac_addr, local_mac_addr); 2421 2422 memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); 2423 2424 hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); 2425 ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; 2426 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); 2427 2428 ep->event_cb = listener->event_cb; 2429 ep->cb_context = listener->cb_context; 2430 ep->connect_mode = TCP_CONNECT_PASSIVE; 2431 2432 ep->syn = buf; 2433 ep->syn_ip_payload_length = (u16)payload_len; 2434 ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + 2435 tcp_start_offset; 2436 2437 rc = qed_iwarp_tcp_offload(p_hwfn, ep); 2438 if (rc) { 2439 qed_iwarp_return_ep(p_hwfn, ep); 2440 goto err; 2441 } 2442 2443 return; 2444 err: 2445 qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle); 2446 } 2447 2448 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle, 2449 void *cookie, dma_addr_t rx_buf_addr, 2450 bool b_last_packet) 2451 { 2452 struct qed_iwarp_ll2_buff *buffer = cookie; 2453 struct qed_hwfn *p_hwfn = cxt; 2454 2455 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, 2456 buffer->data, buffer->data_phys_addr); 2457 kfree(buffer); 2458 } 2459 2460 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, 2461 void *cookie, dma_addr_t first_frag_addr, 2462 bool b_last_fragment, bool b_last_packet) 2463 { 2464 struct qed_iwarp_ll2_buff *buffer = cookie; 2465 struct qed_iwarp_ll2_buff *piggy; 2466 struct qed_hwfn *p_hwfn = cxt; 2467 2468 if (!buffer) /* can happen in packed mpa unaligned... */ 2469 return; 2470 2471 /* this was originally an rx packet, post it back */ 2472 piggy = buffer->piggy_buf; 2473 if (piggy) { 2474 buffer->piggy_buf = NULL; 2475 qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle); 2476 } 2477 2478 qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); 2479 2480 if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) 2481 qed_iwarp_process_pending_pkts(p_hwfn); 2482 2483 return; 2484 } 2485 2486 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, 2487 void *cookie, dma_addr_t first_frag_addr, 2488 bool b_last_fragment, bool b_last_packet) 2489 { 2490 struct qed_iwarp_ll2_buff *buffer = cookie; 2491 struct qed_hwfn *p_hwfn = cxt; 2492 2493 if (!buffer) 2494 return; 2495 2496 if (buffer->piggy_buf) { 2497 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2498 buffer->piggy_buf->buff_size, 2499 buffer->piggy_buf->data, 2500 buffer->piggy_buf->data_phys_addr); 2501 2502 kfree(buffer->piggy_buf); 2503 } 2504 2505 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, 2506 buffer->data, buffer->data_phys_addr); 2507 2508 kfree(buffer); 2509 } 2510 2511 /* The only slowpath for iwarp ll2 is unalign flush. When this completion 2512 * is received, need to reset the FPDU. 2513 */ 2514 static void 2515 qed_iwarp_ll2_slowpath(void *cxt, 2516 u8 connection_handle, 2517 u32 opaque_data_0, u32 opaque_data_1) 2518 { 2519 struct unaligned_opaque_data unalign_data; 2520 struct qed_hwfn *p_hwfn = cxt; 2521 struct qed_iwarp_fpdu *fpdu; 2522 2523 qed_iwarp_mpa_get_data(p_hwfn, &unalign_data, 2524 opaque_data_0, opaque_data_1); 2525 2526 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", 2527 unalign_data.cid); 2528 2529 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid); 2530 if (fpdu) 2531 memset(fpdu, 0, sizeof(*fpdu)); 2532 } 2533 2534 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn) 2535 { 2536 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2537 int rc = 0; 2538 2539 if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) { 2540 rc = qed_ll2_terminate_connection(p_hwfn, 2541 iwarp_info->ll2_syn_handle); 2542 if (rc) 2543 DP_INFO(p_hwfn, "Failed to terminate syn connection\n"); 2544 2545 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle); 2546 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; 2547 } 2548 2549 if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) { 2550 rc = qed_ll2_terminate_connection(p_hwfn, 2551 iwarp_info->ll2_ooo_handle); 2552 if (rc) 2553 DP_INFO(p_hwfn, "Failed to terminate ooo connection\n"); 2554 2555 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle); 2556 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; 2557 } 2558 2559 if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) { 2560 rc = qed_ll2_terminate_connection(p_hwfn, 2561 iwarp_info->ll2_mpa_handle); 2562 if (rc) 2563 DP_INFO(p_hwfn, "Failed to terminate mpa connection\n"); 2564 2565 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle); 2566 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; 2567 } 2568 2569 qed_llh_remove_mac_filter(p_hwfn->cdev, 0, 2570 p_hwfn->p_rdma_info->iwarp.mac_addr); 2571 2572 return rc; 2573 } 2574 2575 static int 2576 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn, 2577 int num_rx_bufs, int buff_size, u8 ll2_handle) 2578 { 2579 struct qed_iwarp_ll2_buff *buffer; 2580 int rc = 0; 2581 int i; 2582 2583 for (i = 0; i < num_rx_bufs; i++) { 2584 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 2585 if (!buffer) { 2586 rc = -ENOMEM; 2587 break; 2588 } 2589 2590 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 2591 buff_size, 2592 &buffer->data_phys_addr, 2593 GFP_KERNEL); 2594 if (!buffer->data) { 2595 kfree(buffer); 2596 rc = -ENOMEM; 2597 break; 2598 } 2599 2600 buffer->buff_size = buff_size; 2601 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle); 2602 if (rc) 2603 /* buffers will be deallocated by qed_ll2 */ 2604 break; 2605 } 2606 return rc; 2607 } 2608 2609 #define QED_IWARP_MAX_BUF_SIZE(mtu) \ 2610 ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \ 2611 ETH_CACHE_LINE_SIZE) 2612 2613 static int 2614 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, 2615 struct qed_rdma_start_in_params *params) 2616 { 2617 struct qed_iwarp_info *iwarp_info; 2618 struct qed_ll2_acquire_data data; 2619 struct qed_ll2_cbs cbs; 2620 u32 buff_size; 2621 u16 n_ooo_bufs; 2622 int rc = 0; 2623 int i; 2624 2625 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2626 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; 2627 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; 2628 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; 2629 2630 iwarp_info->max_mtu = params->max_mtu; 2631 2632 ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr); 2633 2634 rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr); 2635 if (rc) 2636 return rc; 2637 2638 /* Start SYN connection */ 2639 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt; 2640 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt; 2641 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt; 2642 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt; 2643 cbs.slowpath_cb = NULL; 2644 cbs.cookie = p_hwfn; 2645 2646 memset(&data, 0, sizeof(data)); 2647 data.input.conn_type = QED_LL2_TYPE_IWARP; 2648 data.input.mtu = params->max_mtu; 2649 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; 2650 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; 2651 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 2652 data.input.tx_tc = PKT_LB_TC; 2653 data.input.tx_dest = QED_LL2_TX_DEST_LB; 2654 data.p_connection_handle = &iwarp_info->ll2_syn_handle; 2655 data.cbs = &cbs; 2656 2657 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2658 if (rc) { 2659 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n"); 2660 qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr); 2661 return rc; 2662 } 2663 2664 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle); 2665 if (rc) { 2666 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n"); 2667 goto err; 2668 } 2669 2670 buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); 2671 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2672 QED_IWARP_LL2_SYN_RX_SIZE, 2673 buff_size, 2674 iwarp_info->ll2_syn_handle); 2675 if (rc) 2676 goto err; 2677 2678 /* Start OOO connection */ 2679 data.input.conn_type = QED_LL2_TYPE_OOO; 2680 data.input.mtu = params->max_mtu; 2681 2682 n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) / 2683 iwarp_info->max_mtu; 2684 n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE); 2685 2686 data.input.rx_num_desc = n_ooo_bufs; 2687 data.input.rx_num_ooo_buffers = n_ooo_bufs; 2688 2689 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 2690 data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE; 2691 data.p_connection_handle = &iwarp_info->ll2_ooo_handle; 2692 2693 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2694 if (rc) 2695 goto err; 2696 2697 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle); 2698 if (rc) 2699 goto err; 2700 2701 /* Start Unaligned MPA connection */ 2702 cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt; 2703 cbs.slowpath_cb = qed_iwarp_ll2_slowpath; 2704 2705 memset(&data, 0, sizeof(data)); 2706 data.input.conn_type = QED_LL2_TYPE_IWARP; 2707 data.input.mtu = params->max_mtu; 2708 /* FW requires that once a packet arrives OOO, it must have at 2709 * least 2 rx buffers available on the unaligned connection 2710 * for handling the case that it is a partial fpdu. 2711 */ 2712 data.input.rx_num_desc = n_ooo_bufs * 2; 2713 data.input.tx_num_desc = data.input.rx_num_desc; 2714 data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU; 2715 data.input.tx_tc = PKT_LB_TC; 2716 data.input.tx_dest = QED_LL2_TX_DEST_LB; 2717 data.p_connection_handle = &iwarp_info->ll2_mpa_handle; 2718 data.input.secondary_queue = true; 2719 data.cbs = &cbs; 2720 2721 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2722 if (rc) 2723 goto err; 2724 2725 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle); 2726 if (rc) 2727 goto err; 2728 2729 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2730 data.input.rx_num_desc, 2731 buff_size, 2732 iwarp_info->ll2_mpa_handle); 2733 if (rc) 2734 goto err; 2735 2736 iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps, 2737 sizeof(*iwarp_info->partial_fpdus), 2738 GFP_KERNEL); 2739 if (!iwarp_info->partial_fpdus) 2740 goto err; 2741 2742 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; 2743 2744 iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); 2745 if (!iwarp_info->mpa_intermediate_buf) 2746 goto err; 2747 2748 /* The mpa_bufs array serves for pending RX packets received on the 2749 * mpa ll2 that don't have place on the tx ring and require later 2750 * processing. We can't fail on allocation of such a struct therefore 2751 * we allocate enough to take care of all rx packets 2752 */ 2753 iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc, 2754 sizeof(*iwarp_info->mpa_bufs), 2755 GFP_KERNEL); 2756 if (!iwarp_info->mpa_bufs) 2757 goto err; 2758 2759 INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list); 2760 INIT_LIST_HEAD(&iwarp_info->mpa_buf_list); 2761 for (i = 0; i < data.input.rx_num_desc; i++) 2762 list_add_tail(&iwarp_info->mpa_bufs[i].list_entry, 2763 &iwarp_info->mpa_buf_list); 2764 return rc; 2765 err: 2766 qed_iwarp_ll2_stop(p_hwfn); 2767 2768 return rc; 2769 } 2770 2771 int qed_iwarp_setup(struct qed_hwfn *p_hwfn, 2772 struct qed_rdma_start_in_params *params) 2773 { 2774 struct qed_iwarp_info *iwarp_info; 2775 u32 rcv_wnd_size; 2776 2777 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2778 2779 iwarp_info->tcp_flags = QED_IWARP_TS_EN; 2780 rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF; 2781 2782 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ 2783 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - 2784 ilog2(QED_IWARP_RCV_WND_SIZE_MIN); 2785 iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; 2786 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; 2787 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; 2788 2789 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; 2790 2791 iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND | 2792 MPA_RTR_TYPE_ZERO_WRITE | 2793 MPA_RTR_TYPE_ZERO_READ; 2794 2795 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); 2796 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list); 2797 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list); 2798 2799 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, 2800 qed_iwarp_async_event); 2801 qed_ooo_setup(p_hwfn); 2802 2803 return qed_iwarp_ll2_start(p_hwfn, params); 2804 } 2805 2806 int qed_iwarp_stop(struct qed_hwfn *p_hwfn) 2807 { 2808 int rc; 2809 2810 qed_iwarp_free_prealloc_ep(p_hwfn); 2811 rc = qed_iwarp_wait_for_all_cids(p_hwfn); 2812 if (rc) 2813 return rc; 2814 2815 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); 2816 2817 return qed_iwarp_ll2_stop(p_hwfn); 2818 } 2819 2820 static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, 2821 struct qed_iwarp_ep *ep, 2822 u8 fw_return_code) 2823 { 2824 struct qed_iwarp_cm_event_params params; 2825 2826 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true); 2827 2828 params.event = QED_IWARP_EVENT_CLOSE; 2829 params.ep_context = ep; 2830 params.cm_info = &ep->cm_info; 2831 params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? 2832 0 : -ECONNRESET; 2833 2834 /* paired with READ_ONCE in destroy_qp */ 2835 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); 2836 2837 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2838 list_del(&ep->list_entry); 2839 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2840 2841 ep->event_cb(ep->cb_context, ¶ms); 2842 } 2843 2844 static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, 2845 struct qed_iwarp_ep *ep, 2846 int fw_ret_code) 2847 { 2848 struct qed_iwarp_cm_event_params params; 2849 bool event_cb = false; 2850 2851 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n", 2852 ep->cid, fw_ret_code); 2853 2854 switch (fw_ret_code) { 2855 case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: 2856 params.status = 0; 2857 params.event = QED_IWARP_EVENT_DISCONNECT; 2858 event_cb = true; 2859 break; 2860 case IWARP_EXCEPTION_DETECTED_LLP_RESET: 2861 params.status = -ECONNRESET; 2862 params.event = QED_IWARP_EVENT_DISCONNECT; 2863 event_cb = true; 2864 break; 2865 case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: 2866 params.event = QED_IWARP_EVENT_RQ_EMPTY; 2867 event_cb = true; 2868 break; 2869 case IWARP_EXCEPTION_DETECTED_IRQ_FULL: 2870 params.event = QED_IWARP_EVENT_IRQ_FULL; 2871 event_cb = true; 2872 break; 2873 case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: 2874 params.event = QED_IWARP_EVENT_LLP_TIMEOUT; 2875 event_cb = true; 2876 break; 2877 case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: 2878 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; 2879 event_cb = true; 2880 break; 2881 case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: 2882 params.event = QED_IWARP_EVENT_CQ_OVERFLOW; 2883 event_cb = true; 2884 break; 2885 case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: 2886 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; 2887 event_cb = true; 2888 break; 2889 case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: 2890 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; 2891 event_cb = true; 2892 break; 2893 case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: 2894 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; 2895 event_cb = true; 2896 break; 2897 case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: 2898 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; 2899 event_cb = true; 2900 break; 2901 default: 2902 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2903 "Unhandled exception received...fw_ret_code=%d\n", 2904 fw_ret_code); 2905 break; 2906 } 2907 2908 if (event_cb) { 2909 params.ep_context = ep; 2910 params.cm_info = &ep->cm_info; 2911 ep->event_cb(ep->cb_context, ¶ms); 2912 } 2913 } 2914 2915 static void 2916 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, 2917 struct qed_iwarp_ep *ep, u8 fw_return_code) 2918 { 2919 struct qed_iwarp_cm_event_params params; 2920 2921 memset(¶ms, 0, sizeof(params)); 2922 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; 2923 params.ep_context = ep; 2924 params.cm_info = &ep->cm_info; 2925 /* paired with READ_ONCE in destroy_qp */ 2926 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); 2927 2928 switch (fw_return_code) { 2929 case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: 2930 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2931 "%s(0x%x) TCP connect got invalid packet\n", 2932 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2933 params.status = -ECONNRESET; 2934 break; 2935 case IWARP_CONN_ERROR_TCP_CONNECTION_RST: 2936 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2937 "%s(0x%x) TCP Connection Reset\n", 2938 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2939 params.status = -ECONNRESET; 2940 break; 2941 case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: 2942 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n", 2943 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2944 params.status = -EBUSY; 2945 break; 2946 case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: 2947 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n", 2948 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2949 params.status = -ECONNREFUSED; 2950 break; 2951 case IWARP_CONN_ERROR_MPA_INVALID_PACKET: 2952 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", 2953 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 2954 params.status = -ECONNRESET; 2955 break; 2956 default: 2957 DP_ERR(p_hwfn, 2958 "%s(0x%x) Unexpected return code tcp connect: %d\n", 2959 QED_IWARP_CONNECT_MODE_STRING(ep), 2960 ep->tcp_cid, fw_return_code); 2961 params.status = -ECONNRESET; 2962 break; 2963 } 2964 2965 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 2966 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; 2967 qed_iwarp_return_ep(p_hwfn, ep); 2968 } else { 2969 ep->event_cb(ep->cb_context, ¶ms); 2970 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2971 list_del(&ep->list_entry); 2972 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 2973 } 2974 } 2975 2976 static void 2977 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, 2978 struct qed_iwarp_ep *ep, u8 fw_return_code) 2979 { 2980 u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; 2981 2982 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 2983 /* Done with the SYN packet, post back to ll2 rx */ 2984 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle); 2985 2986 ep->syn = NULL; 2987 2988 /* If connect failed - upper layer doesn't know about it */ 2989 if (fw_return_code == RDMA_RETURN_OK) 2990 qed_iwarp_mpa_received(p_hwfn, ep); 2991 else 2992 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, 2993 fw_return_code); 2994 } else { 2995 if (fw_return_code == RDMA_RETURN_OK) 2996 qed_iwarp_mpa_offload(p_hwfn, ep); 2997 else 2998 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, 2999 fw_return_code); 3000 } 3001 } 3002 3003 static inline bool 3004 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) 3005 { 3006 if (!ep || (ep->sig != QED_EP_SIG)) { 3007 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); 3008 return false; 3009 } 3010 3011 return true; 3012 } 3013 3014 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, 3015 u8 fw_event_code, u16 echo, 3016 union event_ring_data *data, 3017 u8 fw_return_code) 3018 { 3019 struct qed_rdma_events events = p_hwfn->p_rdma_info->events; 3020 struct regpair *fw_handle = &data->rdma_data.async_handle; 3021 struct qed_iwarp_ep *ep = NULL; 3022 u16 srq_offset; 3023 u16 srq_id; 3024 u16 cid; 3025 3026 ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, 3027 fw_handle->lo); 3028 3029 switch (fw_event_code) { 3030 case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: 3031 /* Async completion after TCP 3-way handshake */ 3032 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3033 return -EINVAL; 3034 DP_VERBOSE(p_hwfn, 3035 QED_MSG_RDMA, 3036 "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n", 3037 ep->tcp_cid, fw_return_code); 3038 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); 3039 break; 3040 case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: 3041 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3042 return -EINVAL; 3043 DP_VERBOSE(p_hwfn, 3044 QED_MSG_RDMA, 3045 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n", 3046 ep->cid, fw_return_code); 3047 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code); 3048 break; 3049 case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: 3050 /* Async completion for Close Connection ramrod */ 3051 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3052 return -EINVAL; 3053 DP_VERBOSE(p_hwfn, 3054 QED_MSG_RDMA, 3055 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n", 3056 ep->cid, fw_return_code); 3057 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); 3058 break; 3059 case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: 3060 /* Async event for active side only */ 3061 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3062 return -EINVAL; 3063 DP_VERBOSE(p_hwfn, 3064 QED_MSG_RDMA, 3065 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n", 3066 ep->cid, fw_return_code); 3067 qed_iwarp_mpa_reply_arrived(p_hwfn, ep); 3068 break; 3069 case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: 3070 if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) 3071 return -EINVAL; 3072 DP_VERBOSE(p_hwfn, 3073 QED_MSG_RDMA, 3074 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n", 3075 ep->cid, fw_return_code); 3076 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); 3077 break; 3078 case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: 3079 cid = (u16)le32_to_cpu(fw_handle->lo); 3080 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 3081 "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid); 3082 qed_iwarp_cid_cleaned(p_hwfn, cid); 3083 3084 break; 3085 case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: 3086 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); 3087 srq_offset = p_hwfn->p_rdma_info->srq_id_offset; 3088 /* FW assigns value that is no greater than u16 */ 3089 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; 3090 events.affiliated_event(events.context, 3091 QED_IWARP_EVENT_SRQ_EMPTY, 3092 &srq_id); 3093 break; 3094 case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: 3095 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); 3096 srq_offset = p_hwfn->p_rdma_info->srq_id_offset; 3097 /* FW assigns value that is no greater than u16 */ 3098 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; 3099 events.affiliated_event(events.context, 3100 QED_IWARP_EVENT_SRQ_LIMIT, 3101 &srq_id); 3102 break; 3103 case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: 3104 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); 3105 3106 p_hwfn->p_rdma_info->events.affiliated_event( 3107 p_hwfn->p_rdma_info->events.context, 3108 QED_IWARP_EVENT_CQ_OVERFLOW, 3109 (void *)fw_handle); 3110 break; 3111 default: 3112 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n", 3113 fw_event_code); 3114 return -EINVAL; 3115 } 3116 return 0; 3117 } 3118 3119 int 3120 qed_iwarp_create_listen(void *rdma_cxt, 3121 struct qed_iwarp_listen_in *iparams, 3122 struct qed_iwarp_listen_out *oparams) 3123 { 3124 struct qed_hwfn *p_hwfn = rdma_cxt; 3125 struct qed_iwarp_listener *listener; 3126 3127 listener = kzalloc(sizeof(*listener), GFP_KERNEL); 3128 if (!listener) 3129 return -ENOMEM; 3130 3131 listener->ip_version = iparams->ip_version; 3132 memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr)); 3133 listener->port = iparams->port; 3134 listener->vlan = iparams->vlan; 3135 3136 listener->event_cb = iparams->event_cb; 3137 listener->cb_context = iparams->cb_context; 3138 listener->max_backlog = iparams->max_backlog; 3139 oparams->handle = listener; 3140 3141 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3142 list_add_tail(&listener->list_entry, 3143 &p_hwfn->p_rdma_info->iwarp.listen_list); 3144 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3145 3146 DP_VERBOSE(p_hwfn, 3147 QED_MSG_RDMA, 3148 "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n", 3149 listener->event_cb, 3150 listener, 3151 listener->ip_addr[0], 3152 listener->ip_addr[1], 3153 listener->ip_addr[2], 3154 listener->ip_addr[3], listener->port, listener->vlan); 3155 3156 return 0; 3157 } 3158 3159 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) 3160 { 3161 struct qed_iwarp_listener *listener = handle; 3162 struct qed_hwfn *p_hwfn = rdma_cxt; 3163 3164 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle); 3165 3166 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3167 list_del(&listener->list_entry); 3168 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3169 3170 kfree(listener); 3171 3172 return 0; 3173 } 3174 3175 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams) 3176 { 3177 struct qed_hwfn *p_hwfn = rdma_cxt; 3178 struct qed_sp_init_data init_data; 3179 struct qed_spq_entry *p_ent; 3180 struct qed_iwarp_ep *ep; 3181 struct qed_rdma_qp *qp; 3182 int rc; 3183 3184 ep = iparams->ep_context; 3185 if (!ep) { 3186 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n"); 3187 return -EINVAL; 3188 } 3189 3190 qp = ep->qp; 3191 3192 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", 3193 qp->icid, ep->tcp_cid); 3194 3195 memset(&init_data, 0, sizeof(init_data)); 3196 init_data.cid = qp->icid; 3197 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 3198 init_data.comp_mode = QED_SPQ_MODE_CB; 3199 3200 rc = qed_sp_init_request(p_hwfn, &p_ent, 3201 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, 3202 PROTOCOLID_IWARP, &init_data); 3203 3204 if (rc) 3205 return rc; 3206 3207 rc = qed_spq_post(p_hwfn, p_ent, NULL); 3208 3209 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc); 3210 3211 return rc; 3212 } 3213 3214 void 3215 qed_iwarp_query_qp(struct qed_rdma_qp *qp, 3216 struct qed_rdma_query_qp_out_params *out_params) 3217 { 3218 out_params->state = qed_iwarp2roce_state(qp->iwarp_state); 3219 } 3220