1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <linux/kernel.h> 35 #include <linux/slab.h> 36 #include <linux/delay.h> 37 38 #include "iscsi_iser.h" 39 40 #define ISCSI_ISER_MAX_CONN 8 41 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) 42 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) 43 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \ 44 ISCSI_ISER_MAX_CONN) 45 46 static void iser_qp_event_callback(struct ib_event *cause, void *context) 47 { 48 iser_err("qp event %s (%d)\n", 49 ib_event_msg(cause->event), cause->event); 50 } 51 52 static void iser_event_handler(struct ib_event_handler *handler, 53 struct ib_event *event) 54 { 55 iser_err("async event %s (%d) on device %s port %d\n", 56 ib_event_msg(event->event), event->event, 57 dev_name(&event->device->dev), event->element.port_num); 58 } 59 60 /* 61 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 62 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 63 * the adaptor. 64 * 65 * Return: 0 on success, -1 on failure 66 */ 67 static int iser_create_device_ib_res(struct iser_device *device) 68 { 69 struct ib_device *ib_dev = device->ib_device; 70 71 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) { 72 iser_err("IB device does not support memory registrations\n"); 73 return -1; 74 } 75 76 device->pd = ib_alloc_pd(ib_dev, 77 iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); 78 if (IS_ERR(device->pd)) 79 goto pd_err; 80 81 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, 82 iser_event_handler); 83 ib_register_event_handler(&device->event_handler); 84 return 0; 85 86 pd_err: 87 iser_err("failed to allocate an IB resource\n"); 88 return -1; 89 } 90 91 /* 92 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, 93 * CQ and PD created with the device associated with the adaptor. 94 */ 95 static void iser_free_device_ib_res(struct iser_device *device) 96 { 97 ib_unregister_event_handler(&device->event_handler); 98 ib_dealloc_pd(device->pd); 99 100 device->pd = NULL; 101 } 102 103 static struct iser_fr_desc * 104 iser_create_fastreg_desc(struct iser_device *device, 105 struct ib_pd *pd, 106 bool pi_enable, 107 unsigned int size) 108 { 109 struct iser_fr_desc *desc; 110 struct ib_device *ib_dev = device->ib_device; 111 enum ib_mr_type mr_type; 112 int ret; 113 114 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 115 if (!desc) 116 return ERR_PTR(-ENOMEM); 117 118 if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) 119 mr_type = IB_MR_TYPE_SG_GAPS; 120 else 121 mr_type = IB_MR_TYPE_MEM_REG; 122 123 desc->rsc.mr = ib_alloc_mr(pd, mr_type, size); 124 if (IS_ERR(desc->rsc.mr)) { 125 ret = PTR_ERR(desc->rsc.mr); 126 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); 127 goto err_alloc_mr; 128 } 129 130 if (pi_enable) { 131 desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size); 132 if (IS_ERR(desc->rsc.sig_mr)) { 133 ret = PTR_ERR(desc->rsc.sig_mr); 134 iser_err("Failed to allocate sig_mr err=%d\n", ret); 135 goto err_alloc_mr_integrity; 136 } 137 } 138 desc->rsc.mr_valid = 0; 139 140 return desc; 141 142 err_alloc_mr_integrity: 143 ib_dereg_mr(desc->rsc.mr); 144 err_alloc_mr: 145 kfree(desc); 146 147 return ERR_PTR(ret); 148 } 149 150 static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc) 151 { 152 struct iser_reg_resources *res = &desc->rsc; 153 154 ib_dereg_mr(res->mr); 155 if (res->sig_mr) { 156 ib_dereg_mr(res->sig_mr); 157 res->sig_mr = NULL; 158 } 159 kfree(desc); 160 } 161 162 /** 163 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors 164 * for fast registration work requests. 165 * @ib_conn: connection RDMA resources 166 * @cmds_max: max number of SCSI commands for this connection 167 * @size: max number of pages per map request 168 * 169 * Return: 0 on success, or errno code on failure 170 */ 171 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, 172 unsigned cmds_max, 173 unsigned int size) 174 { 175 struct iser_device *device = ib_conn->device; 176 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 177 struct iser_fr_desc *desc; 178 int i, ret; 179 180 INIT_LIST_HEAD(&fr_pool->list); 181 INIT_LIST_HEAD(&fr_pool->all_list); 182 spin_lock_init(&fr_pool->lock); 183 fr_pool->size = 0; 184 for (i = 0; i < cmds_max; i++) { 185 desc = iser_create_fastreg_desc(device, device->pd, 186 ib_conn->pi_support, size); 187 if (IS_ERR(desc)) { 188 ret = PTR_ERR(desc); 189 goto err; 190 } 191 192 list_add_tail(&desc->list, &fr_pool->list); 193 list_add_tail(&desc->all_list, &fr_pool->all_list); 194 fr_pool->size++; 195 } 196 197 return 0; 198 199 err: 200 iser_free_fastreg_pool(ib_conn); 201 return ret; 202 } 203 204 /** 205 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors 206 * @ib_conn: connection RDMA resources 207 */ 208 void iser_free_fastreg_pool(struct ib_conn *ib_conn) 209 { 210 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 211 struct iser_fr_desc *desc, *tmp; 212 int i = 0; 213 214 if (list_empty(&fr_pool->all_list)) 215 return; 216 217 iser_info("freeing conn %p fr pool\n", ib_conn); 218 219 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { 220 list_del(&desc->all_list); 221 iser_destroy_fastreg_desc(desc); 222 ++i; 223 } 224 225 if (i < fr_pool->size) 226 iser_warn("pool still has %d regions registered\n", 227 fr_pool->size - i); 228 } 229 230 /* 231 * iser_create_ib_conn_res - Queue-Pair (QP) 232 * 233 * Return: 0 on success, -1 on failure 234 */ 235 static int iser_create_ib_conn_res(struct ib_conn *ib_conn) 236 { 237 struct iser_conn *iser_conn = to_iser_conn(ib_conn); 238 struct iser_device *device; 239 struct ib_device *ib_dev; 240 struct ib_qp_init_attr init_attr; 241 int ret = -ENOMEM; 242 unsigned int max_send_wr, cq_size; 243 244 BUG_ON(ib_conn->device == NULL); 245 246 device = ib_conn->device; 247 ib_dev = device->ib_device; 248 249 /* +1 for drain */ 250 if (ib_conn->pi_support) 251 max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; 252 else 253 max_send_wr = ISER_QP_MAX_REQ_DTOS + 1; 254 max_send_wr = min_t(unsigned int, max_send_wr, 255 (unsigned int)ib_dev->attrs.max_qp_wr); 256 257 cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS; 258 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); 259 if (IS_ERR(ib_conn->cq)) { 260 ret = PTR_ERR(ib_conn->cq); 261 goto cq_err; 262 } 263 ib_conn->cq_size = cq_size; 264 265 memset(&init_attr, 0, sizeof(init_attr)); 266 267 init_attr.event_handler = iser_qp_event_callback; 268 init_attr.qp_context = (void *)ib_conn; 269 init_attr.send_cq = ib_conn->cq; 270 init_attr.recv_cq = ib_conn->cq; 271 /* +1 for drain */ 272 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1; 273 init_attr.cap.max_send_sge = 2; 274 init_attr.cap.max_recv_sge = 1; 275 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 276 init_attr.qp_type = IB_QPT_RC; 277 init_attr.cap.max_send_wr = max_send_wr; 278 if (ib_conn->pi_support) 279 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 280 iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1); 281 282 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 283 if (ret) 284 goto out_err; 285 286 ib_conn->qp = ib_conn->cma_id->qp; 287 iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn, 288 ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr); 289 return ret; 290 291 out_err: 292 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); 293 cq_err: 294 iser_err("unable to alloc mem or create resource, err %d\n", ret); 295 296 return ret; 297 } 298 299 /* 300 * based on the resolved device node GUID see if there already allocated 301 * device for this device. If there's no such, create one. 302 */ 303 static 304 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 305 { 306 struct iser_device *device; 307 308 mutex_lock(&ig.device_list_mutex); 309 310 list_for_each_entry(device, &ig.device_list, ig_list) 311 /* find if there's a match using the node GUID */ 312 if (device->ib_device->node_guid == cma_id->device->node_guid) 313 goto inc_refcnt; 314 315 device = kzalloc(sizeof *device, GFP_KERNEL); 316 if (!device) 317 goto out; 318 319 /* assign this device to the device */ 320 device->ib_device = cma_id->device; 321 /* init the device and link it into ig device list */ 322 if (iser_create_device_ib_res(device)) { 323 kfree(device); 324 device = NULL; 325 goto out; 326 } 327 list_add(&device->ig_list, &ig.device_list); 328 329 inc_refcnt: 330 device->refcount++; 331 out: 332 mutex_unlock(&ig.device_list_mutex); 333 return device; 334 } 335 336 /* if there's no demand for this device, release it */ 337 static void iser_device_try_release(struct iser_device *device) 338 { 339 mutex_lock(&ig.device_list_mutex); 340 device->refcount--; 341 iser_info("device %p refcount %d\n", device, device->refcount); 342 if (!device->refcount) { 343 iser_free_device_ib_res(device); 344 list_del(&device->ig_list); 345 kfree(device); 346 } 347 mutex_unlock(&ig.device_list_mutex); 348 } 349 350 void iser_release_work(struct work_struct *work) 351 { 352 struct iser_conn *iser_conn; 353 354 iser_conn = container_of(work, struct iser_conn, release_work); 355 356 /* Wait for conn_stop to complete */ 357 wait_for_completion(&iser_conn->stop_completion); 358 /* Wait for IB resouces cleanup to complete */ 359 wait_for_completion(&iser_conn->ib_completion); 360 361 mutex_lock(&iser_conn->state_mutex); 362 iser_conn->state = ISER_CONN_DOWN; 363 mutex_unlock(&iser_conn->state_mutex); 364 365 iser_conn_release(iser_conn); 366 } 367 368 /** 369 * iser_free_ib_conn_res - release IB related resources 370 * @iser_conn: iser connection struct 371 * @destroy: indicator if we need to try to release the 372 * iser device and memory regoins pool (only iscsi 373 * shutdown and DEVICE_REMOVAL will use this). 374 * 375 * This routine is called with the iser state mutex held 376 * so the cm_id removal is out of here. It is Safe to 377 * be invoked multiple times. 378 */ 379 static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy) 380 { 381 struct ib_conn *ib_conn = &iser_conn->ib_conn; 382 struct iser_device *device = ib_conn->device; 383 384 iser_info("freeing conn %p cma_id %p qp %p\n", 385 iser_conn, ib_conn->cma_id, ib_conn->qp); 386 387 if (ib_conn->qp) { 388 rdma_destroy_qp(ib_conn->cma_id); 389 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); 390 ib_conn->qp = NULL; 391 } 392 393 if (destroy) { 394 if (iser_conn->rx_descs) 395 iser_free_rx_descriptors(iser_conn); 396 397 if (device) { 398 iser_device_try_release(device); 399 ib_conn->device = NULL; 400 } 401 } 402 } 403 404 /** 405 * iser_conn_release - Frees all conn objects and deallocs conn descriptor 406 * @iser_conn: iSER connection context 407 */ 408 void iser_conn_release(struct iser_conn *iser_conn) 409 { 410 struct ib_conn *ib_conn = &iser_conn->ib_conn; 411 412 mutex_lock(&ig.connlist_mutex); 413 list_del(&iser_conn->conn_list); 414 mutex_unlock(&ig.connlist_mutex); 415 416 mutex_lock(&iser_conn->state_mutex); 417 /* In case we endup here without ep_disconnect being invoked. */ 418 if (iser_conn->state != ISER_CONN_DOWN) { 419 iser_warn("iser conn %p state %d, expected state down.\n", 420 iser_conn, iser_conn->state); 421 iscsi_destroy_endpoint(iser_conn->ep); 422 iser_conn->state = ISER_CONN_DOWN; 423 } 424 /* 425 * In case we never got to bind stage, we still need to 426 * release IB resources (which is safe to call more than once). 427 */ 428 iser_free_ib_conn_res(iser_conn, true); 429 mutex_unlock(&iser_conn->state_mutex); 430 431 if (ib_conn->cma_id) { 432 rdma_destroy_id(ib_conn->cma_id); 433 ib_conn->cma_id = NULL; 434 } 435 436 kfree(iser_conn); 437 } 438 439 /** 440 * iser_conn_terminate - triggers start of the disconnect procedures and 441 * waits for them to be done 442 * @iser_conn: iSER connection context 443 * 444 * Called with state mutex held 445 */ 446 int iser_conn_terminate(struct iser_conn *iser_conn) 447 { 448 struct ib_conn *ib_conn = &iser_conn->ib_conn; 449 int err = 0; 450 451 lockdep_assert_held(&iser_conn->state_mutex); 452 453 /* terminate the iser conn only if the conn state is UP */ 454 if (iser_conn->state != ISER_CONN_UP) 455 return 0; 456 457 iser_conn->state = ISER_CONN_TERMINATING; 458 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state); 459 460 /* suspend queuing of new iscsi commands */ 461 if (iser_conn->iscsi_conn) 462 iscsi_suspend_queue(iser_conn->iscsi_conn); 463 464 /* 465 * In case we didn't already clean up the cma_id (peer initiated 466 * a disconnection), we need to Cause the CMA to change the QP 467 * state to ERROR. 468 */ 469 if (ib_conn->cma_id) { 470 err = rdma_disconnect(ib_conn->cma_id); 471 if (err) 472 iser_err("Failed to disconnect, conn: 0x%p err %d\n", 473 iser_conn, err); 474 475 /* block until all flush errors are consumed */ 476 ib_drain_qp(ib_conn->qp); 477 } 478 479 return 1; 480 } 481 482 /* 483 * Called with state mutex held 484 */ 485 static void iser_connect_error(struct rdma_cm_id *cma_id) 486 { 487 struct iser_conn *iser_conn = cma_id->context; 488 489 lockdep_assert_held(&iser_conn->state_mutex); 490 491 iser_conn->state = ISER_CONN_TERMINATING; 492 } 493 494 static void iser_calc_scsi_params(struct iser_conn *iser_conn, 495 unsigned int max_sectors) 496 { 497 struct iser_device *device = iser_conn->ib_conn.device; 498 struct ib_device_attr *attr = &device->ib_device->attrs; 499 unsigned short sg_tablesize, sup_sg_tablesize; 500 unsigned short reserved_mr_pages; 501 u32 max_num_sg; 502 503 /* 504 * FRs without SG_GAPS can only map up to a (device) page per entry, 505 * but if the first entry is misaligned we'll end up using two entries 506 * (head and tail) for a single page worth data, so one additional 507 * entry is required. 508 */ 509 if (attr->kernel_cap_flags & IBK_SG_GAPS_REG) 510 reserved_mr_pages = 0; 511 else 512 reserved_mr_pages = 1; 513 514 if (iser_conn->ib_conn.pi_support) 515 max_num_sg = attr->max_pi_fast_reg_page_list_len; 516 else 517 max_num_sg = attr->max_fast_reg_page_list_len; 518 519 sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K); 520 sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE, 521 max_num_sg - reserved_mr_pages); 522 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); 523 iser_conn->pages_per_mr = 524 iser_conn->scsi_sg_tablesize + reserved_mr_pages; 525 } 526 527 /* 528 * Called with state mutex held 529 */ 530 static void iser_addr_handler(struct rdma_cm_id *cma_id) 531 { 532 struct iser_conn *iser_conn = cma_id->context; 533 struct iser_device *device; 534 struct ib_conn *ib_conn; 535 int ret; 536 537 lockdep_assert_held(&iser_conn->state_mutex); 538 539 if (iser_conn->state != ISER_CONN_PENDING) 540 /* bailout */ 541 return; 542 543 ib_conn = &iser_conn->ib_conn; 544 device = iser_device_find_by_ib_device(cma_id); 545 if (!device) { 546 iser_err("device lookup/creation failed\n"); 547 iser_connect_error(cma_id); 548 return; 549 } 550 551 ib_conn->device = device; 552 553 /* connection T10-PI support */ 554 if (iser_pi_enable) { 555 if (!(device->ib_device->attrs.kernel_cap_flags & 556 IBK_INTEGRITY_HANDOVER)) { 557 iser_warn("T10-PI requested but not supported on %s, " 558 "continue without T10-PI\n", 559 dev_name(&ib_conn->device->ib_device->dev)); 560 ib_conn->pi_support = false; 561 } else { 562 ib_conn->pi_support = true; 563 } 564 } 565 566 iser_calc_scsi_params(iser_conn, iser_max_sectors); 567 568 ret = rdma_resolve_route(cma_id, 1000); 569 if (ret) { 570 iser_err("resolve route failed: %d\n", ret); 571 iser_connect_error(cma_id); 572 return; 573 } 574 } 575 576 /* 577 * Called with state mutex held 578 */ 579 static void iser_route_handler(struct rdma_cm_id *cma_id) 580 { 581 struct rdma_conn_param conn_param; 582 int ret; 583 struct iser_cm_hdr req_hdr; 584 struct iser_conn *iser_conn = cma_id->context; 585 struct ib_conn *ib_conn = &iser_conn->ib_conn; 586 struct ib_device *ib_dev = ib_conn->device->ib_device; 587 588 lockdep_assert_held(&iser_conn->state_mutex); 589 590 if (iser_conn->state != ISER_CONN_PENDING) 591 /* bailout */ 592 return; 593 594 ret = iser_create_ib_conn_res(ib_conn); 595 if (ret) 596 goto failure; 597 598 memset(&conn_param, 0, sizeof conn_param); 599 conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom; 600 conn_param.initiator_depth = 1; 601 conn_param.retry_count = 7; 602 conn_param.rnr_retry_count = 6; 603 604 memset(&req_hdr, 0, sizeof(req_hdr)); 605 req_hdr.flags = ISER_ZBVA_NOT_SUP; 606 if (!iser_always_reg) 607 req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP; 608 conn_param.private_data = (void *)&req_hdr; 609 conn_param.private_data_len = sizeof(struct iser_cm_hdr); 610 611 ret = rdma_connect_locked(cma_id, &conn_param); 612 if (ret) { 613 iser_err("failure connecting: %d\n", ret); 614 goto failure; 615 } 616 617 return; 618 failure: 619 iser_connect_error(cma_id); 620 } 621 622 /* 623 * Called with state mutex held 624 */ 625 static void iser_connected_handler(struct rdma_cm_id *cma_id, 626 const void *private_data) 627 { 628 struct iser_conn *iser_conn = cma_id->context; 629 struct ib_qp_attr attr; 630 struct ib_qp_init_attr init_attr; 631 632 lockdep_assert_held(&iser_conn->state_mutex); 633 634 if (iser_conn->state != ISER_CONN_PENDING) 635 /* bailout */ 636 return; 637 638 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); 639 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); 640 641 if (private_data) { 642 u8 flags = *(u8 *)private_data; 643 644 iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP); 645 } 646 647 iser_info("conn %p: negotiated %s invalidation\n", 648 iser_conn, iser_conn->snd_w_inv ? "remote" : "local"); 649 650 iser_conn->state = ISER_CONN_UP; 651 complete(&iser_conn->up_completion); 652 } 653 654 /* 655 * Called with state mutex held 656 */ 657 static void iser_cleanup_handler(struct rdma_cm_id *cma_id, 658 bool destroy) 659 { 660 struct iser_conn *iser_conn = cma_id->context; 661 662 lockdep_assert_held(&iser_conn->state_mutex); 663 /* 664 * We are not guaranteed that we visited disconnected_handler 665 * by now, call it here to be safe that we handle CM drep 666 * and flush errors. 667 */ 668 if (iser_conn_terminate(iser_conn)) { 669 if (iser_conn->iscsi_conn) 670 iscsi_conn_failure(iser_conn->iscsi_conn, 671 ISCSI_ERR_CONN_FAILED); 672 else 673 iser_err("iscsi_iser connection isn't bound\n"); 674 } 675 iser_free_ib_conn_res(iser_conn, destroy); 676 complete(&iser_conn->ib_completion); 677 } 678 679 static int iser_cma_handler(struct rdma_cm_id *cma_id, 680 struct rdma_cm_event *event) 681 { 682 struct iser_conn *iser_conn; 683 int ret = 0; 684 685 iser_conn = cma_id->context; 686 iser_info("%s (%d): status %d conn %p id %p\n", 687 rdma_event_msg(event->event), event->event, 688 event->status, cma_id->context, cma_id); 689 690 mutex_lock(&iser_conn->state_mutex); 691 switch (event->event) { 692 case RDMA_CM_EVENT_ADDR_RESOLVED: 693 iser_addr_handler(cma_id); 694 break; 695 case RDMA_CM_EVENT_ROUTE_RESOLVED: 696 iser_route_handler(cma_id); 697 break; 698 case RDMA_CM_EVENT_ESTABLISHED: 699 iser_connected_handler(cma_id, event->param.conn.private_data); 700 break; 701 case RDMA_CM_EVENT_REJECTED: 702 iser_info("Connection rejected: %s\n", 703 rdma_reject_msg(cma_id, event->status)); 704 fallthrough; 705 case RDMA_CM_EVENT_ADDR_ERROR: 706 case RDMA_CM_EVENT_ROUTE_ERROR: 707 case RDMA_CM_EVENT_CONNECT_ERROR: 708 case RDMA_CM_EVENT_UNREACHABLE: 709 iser_connect_error(cma_id); 710 break; 711 case RDMA_CM_EVENT_DISCONNECTED: 712 case RDMA_CM_EVENT_ADDR_CHANGE: 713 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 714 iser_cleanup_handler(cma_id, false); 715 break; 716 case RDMA_CM_EVENT_DEVICE_REMOVAL: 717 /* 718 * we *must* destroy the device as we cannot rely 719 * on iscsid to be around to initiate error handling. 720 * also if we are not in state DOWN implicitly destroy 721 * the cma_id. 722 */ 723 iser_cleanup_handler(cma_id, true); 724 if (iser_conn->state != ISER_CONN_DOWN) { 725 iser_conn->ib_conn.cma_id = NULL; 726 ret = 1; 727 } 728 break; 729 default: 730 iser_err("Unexpected RDMA CM event: %s (%d)\n", 731 rdma_event_msg(event->event), event->event); 732 break; 733 } 734 mutex_unlock(&iser_conn->state_mutex); 735 736 return ret; 737 } 738 739 void iser_conn_init(struct iser_conn *iser_conn) 740 { 741 struct ib_conn *ib_conn = &iser_conn->ib_conn; 742 743 iser_conn->state = ISER_CONN_INIT; 744 init_completion(&iser_conn->stop_completion); 745 init_completion(&iser_conn->ib_completion); 746 init_completion(&iser_conn->up_completion); 747 INIT_LIST_HEAD(&iser_conn->conn_list); 748 mutex_init(&iser_conn->state_mutex); 749 750 ib_conn->reg_cqe.done = iser_reg_comp; 751 } 752 753 /* 754 * starts the process of connecting to the target 755 * sleeps until the connection is established or rejected 756 */ 757 int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr, 758 struct sockaddr *dst_addr, int non_blocking) 759 { 760 struct ib_conn *ib_conn = &iser_conn->ib_conn; 761 int err = 0; 762 763 mutex_lock(&iser_conn->state_mutex); 764 765 sprintf(iser_conn->name, "%pISp", dst_addr); 766 767 iser_info("connecting to: %s\n", iser_conn->name); 768 769 /* the device is known only --after-- address resolution */ 770 ib_conn->device = NULL; 771 772 iser_conn->state = ISER_CONN_PENDING; 773 774 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, 775 iser_conn, RDMA_PS_TCP, IB_QPT_RC); 776 if (IS_ERR(ib_conn->cma_id)) { 777 err = PTR_ERR(ib_conn->cma_id); 778 iser_err("rdma_create_id failed: %d\n", err); 779 goto id_failure; 780 } 781 782 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); 783 if (err) { 784 iser_err("rdma_resolve_addr failed: %d\n", err); 785 goto addr_failure; 786 } 787 788 if (!non_blocking) { 789 wait_for_completion_interruptible(&iser_conn->up_completion); 790 791 if (iser_conn->state != ISER_CONN_UP) { 792 err = -EIO; 793 goto connect_failure; 794 } 795 } 796 mutex_unlock(&iser_conn->state_mutex); 797 798 mutex_lock(&ig.connlist_mutex); 799 list_add(&iser_conn->conn_list, &ig.connlist); 800 mutex_unlock(&ig.connlist_mutex); 801 return 0; 802 803 id_failure: 804 ib_conn->cma_id = NULL; 805 addr_failure: 806 iser_conn->state = ISER_CONN_DOWN; 807 connect_failure: 808 mutex_unlock(&iser_conn->state_mutex); 809 iser_conn_release(iser_conn); 810 return err; 811 } 812 813 int iser_post_recvl(struct iser_conn *iser_conn) 814 { 815 struct ib_conn *ib_conn = &iser_conn->ib_conn; 816 struct iser_login_desc *desc = &iser_conn->login_desc; 817 struct ib_recv_wr wr; 818 int ret; 819 820 desc->sge.addr = desc->rsp_dma; 821 desc->sge.length = ISER_RX_LOGIN_SIZE; 822 desc->sge.lkey = ib_conn->device->pd->local_dma_lkey; 823 824 desc->cqe.done = iser_login_rsp; 825 wr.wr_cqe = &desc->cqe; 826 wr.sg_list = &desc->sge; 827 wr.num_sge = 1; 828 wr.next = NULL; 829 830 ret = ib_post_recv(ib_conn->qp, &wr, NULL); 831 if (unlikely(ret)) 832 iser_err("ib_post_recv login failed ret=%d\n", ret); 833 834 return ret; 835 } 836 837 int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc) 838 { 839 struct ib_conn *ib_conn = &iser_conn->ib_conn; 840 struct ib_recv_wr wr; 841 int ret; 842 843 rx_desc->cqe.done = iser_task_rsp; 844 wr.wr_cqe = &rx_desc->cqe; 845 wr.sg_list = &rx_desc->rx_sg; 846 wr.num_sge = 1; 847 wr.next = NULL; 848 849 ret = ib_post_recv(ib_conn->qp, &wr, NULL); 850 if (unlikely(ret)) 851 iser_err("ib_post_recv failed ret=%d\n", ret); 852 853 return ret; 854 } 855 856 857 /** 858 * iser_post_send - Initiate a Send DTO operation 859 * @ib_conn: connection RDMA resources 860 * @tx_desc: iSER TX descriptor 861 * 862 * Return: 0 on success, -1 on failure 863 */ 864 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc) 865 { 866 struct ib_send_wr *wr = &tx_desc->send_wr; 867 struct ib_send_wr *first_wr; 868 int ret; 869 870 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 871 tx_desc->dma_addr, ISER_HEADERS_LEN, 872 DMA_TO_DEVICE); 873 874 wr->next = NULL; 875 wr->wr_cqe = &tx_desc->cqe; 876 wr->sg_list = tx_desc->tx_sg; 877 wr->num_sge = tx_desc->num_sge; 878 wr->opcode = IB_WR_SEND; 879 wr->send_flags = IB_SEND_SIGNALED; 880 881 if (tx_desc->inv_wr.next) 882 first_wr = &tx_desc->inv_wr; 883 else if (tx_desc->reg_wr.wr.next) 884 first_wr = &tx_desc->reg_wr.wr; 885 else 886 first_wr = wr; 887 888 ret = ib_post_send(ib_conn->qp, first_wr, NULL); 889 if (unlikely(ret)) 890 iser_err("ib_post_send failed, ret:%d opcode:%d\n", 891 ret, wr->opcode); 892 893 return ret; 894 } 895 896 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, 897 enum iser_data_dir cmd_dir, sector_t *sector) 898 { 899 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; 900 struct iser_fr_desc *desc = reg->desc; 901 unsigned long sector_size = iser_task->sc->device->sector_size; 902 struct ib_mr_status mr_status; 903 int ret; 904 905 if (desc && desc->sig_protected) { 906 desc->sig_protected = false; 907 ret = ib_check_mr_status(desc->rsc.sig_mr, 908 IB_MR_CHECK_SIG_STATUS, &mr_status); 909 if (ret) { 910 iser_err("ib_check_mr_status failed, ret %d\n", ret); 911 /* Not a lot we can do, return ambiguous guard error */ 912 *sector = 0; 913 return 0x1; 914 } 915 916 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 917 sector_t sector_off = mr_status.sig_err.sig_err_offset; 918 919 sector_div(sector_off, sector_size + 8); 920 *sector = scsi_get_sector(iser_task->sc) + sector_off; 921 922 iser_err("PI error found type %d at sector %llx " 923 "expected %x vs actual %x\n", 924 mr_status.sig_err.err_type, 925 (unsigned long long)*sector, 926 mr_status.sig_err.expected, 927 mr_status.sig_err.actual); 928 929 switch (mr_status.sig_err.err_type) { 930 case IB_SIG_BAD_GUARD: 931 return 0x1; 932 case IB_SIG_BAD_REFTAG: 933 return 0x3; 934 case IB_SIG_BAD_APPTAG: 935 return 0x2; 936 } 937 } 938 } 939 940 return 0; 941 } 942 943 void iser_err_comp(struct ib_wc *wc, const char *type) 944 { 945 if (wc->status != IB_WC_WR_FLUSH_ERR) { 946 struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context); 947 948 iser_err("%s failure: %s (%d) vend_err %#x\n", type, 949 ib_wc_status_msg(wc->status), wc->status, 950 wc->vendor_err); 951 952 if (iser_conn->iscsi_conn) 953 iscsi_conn_failure(iser_conn->iscsi_conn, 954 ISCSI_ERR_CONN_FAILED); 955 } else { 956 iser_dbg("%s failure: %s (%d)\n", type, 957 ib_wc_status_msg(wc->status), wc->status); 958 } 959 } 960