1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/slab.h> 37 #include <linux/delay.h> 38 39 #include "iscsi_iser.h" 40 41 #define ISCSI_ISER_MAX_CONN 8 42 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) 43 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) 44 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \ 45 ISCSI_ISER_MAX_CONN) 46 47 static void iser_qp_event_callback(struct ib_event *cause, void *context) 48 { 49 iser_err("qp event %s (%d)\n", 50 ib_event_msg(cause->event), cause->event); 51 } 52 53 static void iser_event_handler(struct ib_event_handler *handler, 54 struct ib_event *event) 55 { 56 iser_err("async event %s (%d) on device %s port %d\n", 57 ib_event_msg(event->event), event->event, 58 event->device->name, event->element.port_num); 59 } 60 61 /** 62 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 63 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 64 * the adapator. 65 * 66 * returns 0 on success, -1 on failure 67 */ 68 static int iser_create_device_ib_res(struct iser_device *device) 69 { 70 struct ib_device *ib_dev = device->ib_device; 71 int ret, i, max_cqe; 72 73 ret = iser_assign_reg_ops(device); 74 if (ret) 75 return ret; 76 77 device->comps_used = min_t(int, num_online_cpus(), 78 ib_dev->num_comp_vectors); 79 80 device->comps = kcalloc(device->comps_used, sizeof(*device->comps), 81 GFP_KERNEL); 82 if (!device->comps) 83 goto comps_err; 84 85 max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe); 86 87 iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n", 88 device->comps_used, ib_dev->name, 89 ib_dev->num_comp_vectors, max_cqe); 90 91 device->pd = ib_alloc_pd(ib_dev, 92 iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); 93 if (IS_ERR(device->pd)) 94 goto pd_err; 95 96 for (i = 0; i < device->comps_used; i++) { 97 struct iser_comp *comp = &device->comps[i]; 98 99 comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i, 100 IB_POLL_SOFTIRQ); 101 if (IS_ERR(comp->cq)) { 102 comp->cq = NULL; 103 goto cq_err; 104 } 105 } 106 107 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, 108 iser_event_handler); 109 ib_register_event_handler(&device->event_handler); 110 return 0; 111 112 cq_err: 113 for (i = 0; i < device->comps_used; i++) { 114 struct iser_comp *comp = &device->comps[i]; 115 116 if (comp->cq) 117 ib_free_cq(comp->cq); 118 } 119 ib_dealloc_pd(device->pd); 120 pd_err: 121 kfree(device->comps); 122 comps_err: 123 iser_err("failed to allocate an IB resource\n"); 124 return -1; 125 } 126 127 /** 128 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, 129 * CQ and PD created with the device associated with the adapator. 130 */ 131 static void iser_free_device_ib_res(struct iser_device *device) 132 { 133 int i; 134 135 for (i = 0; i < device->comps_used; i++) { 136 struct iser_comp *comp = &device->comps[i]; 137 138 ib_free_cq(comp->cq); 139 comp->cq = NULL; 140 } 141 142 ib_unregister_event_handler(&device->event_handler); 143 ib_dealloc_pd(device->pd); 144 145 kfree(device->comps); 146 device->comps = NULL; 147 device->pd = NULL; 148 } 149 150 /** 151 * iser_alloc_fmr_pool - Creates FMR pool and page_vector 152 * 153 * returns 0 on success, or errno code on failure 154 */ 155 int iser_alloc_fmr_pool(struct ib_conn *ib_conn, 156 unsigned cmds_max, 157 unsigned int size) 158 { 159 struct iser_device *device = ib_conn->device; 160 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 161 struct iser_page_vec *page_vec; 162 struct iser_fr_desc *desc; 163 struct ib_fmr_pool *fmr_pool; 164 struct ib_fmr_pool_param params; 165 int ret; 166 167 INIT_LIST_HEAD(&fr_pool->list); 168 spin_lock_init(&fr_pool->lock); 169 170 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 171 if (!desc) 172 return -ENOMEM; 173 174 page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size), 175 GFP_KERNEL); 176 if (!page_vec) { 177 ret = -ENOMEM; 178 goto err_frpl; 179 } 180 181 page_vec->pages = (u64 *)(page_vec + 1); 182 183 params.page_shift = SHIFT_4K; 184 params.max_pages_per_fmr = size; 185 /* make the pool size twice the max number of SCSI commands * 186 * the ML is expected to queue, watermark for unmap at 50% */ 187 params.pool_size = cmds_max * 2; 188 params.dirty_watermark = cmds_max; 189 params.cache = 0; 190 params.flush_function = NULL; 191 params.access = (IB_ACCESS_LOCAL_WRITE | 192 IB_ACCESS_REMOTE_WRITE | 193 IB_ACCESS_REMOTE_READ); 194 195 fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); 196 if (IS_ERR(fmr_pool)) { 197 ret = PTR_ERR(fmr_pool); 198 iser_err("FMR allocation failed, err %d\n", ret); 199 goto err_fmr; 200 } 201 202 desc->rsc.page_vec = page_vec; 203 desc->rsc.fmr_pool = fmr_pool; 204 list_add(&desc->list, &fr_pool->list); 205 206 return 0; 207 208 err_fmr: 209 kfree(page_vec); 210 err_frpl: 211 kfree(desc); 212 213 return ret; 214 } 215 216 /** 217 * iser_free_fmr_pool - releases the FMR pool and page vec 218 */ 219 void iser_free_fmr_pool(struct ib_conn *ib_conn) 220 { 221 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 222 struct iser_fr_desc *desc; 223 224 desc = list_first_entry(&fr_pool->list, 225 struct iser_fr_desc, list); 226 list_del(&desc->list); 227 228 iser_info("freeing conn %p fmr pool %p\n", 229 ib_conn, desc->rsc.fmr_pool); 230 231 ib_destroy_fmr_pool(desc->rsc.fmr_pool); 232 kfree(desc->rsc.page_vec); 233 kfree(desc); 234 } 235 236 static int 237 iser_alloc_reg_res(struct iser_device *device, 238 struct ib_pd *pd, 239 struct iser_reg_resources *res, 240 unsigned int size) 241 { 242 struct ib_device *ib_dev = device->ib_device; 243 enum ib_mr_type mr_type; 244 int ret; 245 246 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) 247 mr_type = IB_MR_TYPE_SG_GAPS; 248 else 249 mr_type = IB_MR_TYPE_MEM_REG; 250 251 res->mr = ib_alloc_mr(pd, mr_type, size); 252 if (IS_ERR(res->mr)) { 253 ret = PTR_ERR(res->mr); 254 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); 255 return ret; 256 } 257 res->mr_valid = 0; 258 259 return 0; 260 } 261 262 static void 263 iser_free_reg_res(struct iser_reg_resources *rsc) 264 { 265 ib_dereg_mr(rsc->mr); 266 } 267 268 static int 269 iser_alloc_pi_ctx(struct iser_device *device, 270 struct ib_pd *pd, 271 struct iser_fr_desc *desc, 272 unsigned int size) 273 { 274 struct iser_pi_context *pi_ctx = NULL; 275 int ret; 276 277 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); 278 if (!desc->pi_ctx) 279 return -ENOMEM; 280 281 pi_ctx = desc->pi_ctx; 282 283 ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size); 284 if (ret) { 285 iser_err("failed to allocate reg_resources\n"); 286 goto alloc_reg_res_err; 287 } 288 289 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); 290 if (IS_ERR(pi_ctx->sig_mr)) { 291 ret = PTR_ERR(pi_ctx->sig_mr); 292 goto sig_mr_failure; 293 } 294 pi_ctx->sig_mr_valid = 0; 295 desc->pi_ctx->sig_protected = 0; 296 297 return 0; 298 299 sig_mr_failure: 300 iser_free_reg_res(&pi_ctx->rsc); 301 alloc_reg_res_err: 302 kfree(desc->pi_ctx); 303 304 return ret; 305 } 306 307 static void 308 iser_free_pi_ctx(struct iser_pi_context *pi_ctx) 309 { 310 iser_free_reg_res(&pi_ctx->rsc); 311 ib_dereg_mr(pi_ctx->sig_mr); 312 kfree(pi_ctx); 313 } 314 315 static struct iser_fr_desc * 316 iser_create_fastreg_desc(struct iser_device *device, 317 struct ib_pd *pd, 318 bool pi_enable, 319 unsigned int size) 320 { 321 struct iser_fr_desc *desc; 322 int ret; 323 324 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 325 if (!desc) 326 return ERR_PTR(-ENOMEM); 327 328 ret = iser_alloc_reg_res(device, pd, &desc->rsc, size); 329 if (ret) 330 goto reg_res_alloc_failure; 331 332 if (pi_enable) { 333 ret = iser_alloc_pi_ctx(device, pd, desc, size); 334 if (ret) 335 goto pi_ctx_alloc_failure; 336 } 337 338 return desc; 339 340 pi_ctx_alloc_failure: 341 iser_free_reg_res(&desc->rsc); 342 reg_res_alloc_failure: 343 kfree(desc); 344 345 return ERR_PTR(ret); 346 } 347 348 /** 349 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors 350 * for fast registration work requests. 351 * returns 0 on success, or errno code on failure 352 */ 353 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, 354 unsigned cmds_max, 355 unsigned int size) 356 { 357 struct iser_device *device = ib_conn->device; 358 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 359 struct iser_fr_desc *desc; 360 int i, ret; 361 362 INIT_LIST_HEAD(&fr_pool->list); 363 INIT_LIST_HEAD(&fr_pool->all_list); 364 spin_lock_init(&fr_pool->lock); 365 fr_pool->size = 0; 366 for (i = 0; i < cmds_max; i++) { 367 desc = iser_create_fastreg_desc(device, device->pd, 368 ib_conn->pi_support, size); 369 if (IS_ERR(desc)) { 370 ret = PTR_ERR(desc); 371 goto err; 372 } 373 374 list_add_tail(&desc->list, &fr_pool->list); 375 list_add_tail(&desc->all_list, &fr_pool->all_list); 376 fr_pool->size++; 377 } 378 379 return 0; 380 381 err: 382 iser_free_fastreg_pool(ib_conn); 383 return ret; 384 } 385 386 /** 387 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors 388 */ 389 void iser_free_fastreg_pool(struct ib_conn *ib_conn) 390 { 391 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 392 struct iser_fr_desc *desc, *tmp; 393 int i = 0; 394 395 if (list_empty(&fr_pool->all_list)) 396 return; 397 398 iser_info("freeing conn %p fr pool\n", ib_conn); 399 400 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { 401 list_del(&desc->all_list); 402 iser_free_reg_res(&desc->rsc); 403 if (desc->pi_ctx) 404 iser_free_pi_ctx(desc->pi_ctx); 405 kfree(desc); 406 ++i; 407 } 408 409 if (i < fr_pool->size) 410 iser_warn("pool still has %d regions registered\n", 411 fr_pool->size - i); 412 } 413 414 /** 415 * iser_create_ib_conn_res - Queue-Pair (QP) 416 * 417 * returns 0 on success, -1 on failure 418 */ 419 static int iser_create_ib_conn_res(struct ib_conn *ib_conn) 420 { 421 struct iser_conn *iser_conn = to_iser_conn(ib_conn); 422 struct iser_device *device; 423 struct ib_device *ib_dev; 424 struct ib_qp_init_attr init_attr; 425 int ret = -ENOMEM; 426 int index, min_index = 0; 427 428 BUG_ON(ib_conn->device == NULL); 429 430 device = ib_conn->device; 431 ib_dev = device->ib_device; 432 433 memset(&init_attr, 0, sizeof init_attr); 434 435 mutex_lock(&ig.connlist_mutex); 436 /* select the CQ with the minimal number of usages */ 437 for (index = 0; index < device->comps_used; index++) { 438 if (device->comps[index].active_qps < 439 device->comps[min_index].active_qps) 440 min_index = index; 441 } 442 ib_conn->comp = &device->comps[min_index]; 443 ib_conn->comp->active_qps++; 444 mutex_unlock(&ig.connlist_mutex); 445 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); 446 447 init_attr.event_handler = iser_qp_event_callback; 448 init_attr.qp_context = (void *)ib_conn; 449 init_attr.send_cq = ib_conn->comp->cq; 450 init_attr.recv_cq = ib_conn->comp->cq; 451 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 452 init_attr.cap.max_send_sge = 2; 453 init_attr.cap.max_recv_sge = 1; 454 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 455 init_attr.qp_type = IB_QPT_RC; 456 if (ib_conn->pi_support) { 457 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; 458 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 459 iser_conn->max_cmds = 460 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS); 461 } else { 462 if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) { 463 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1; 464 iser_conn->max_cmds = 465 ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS); 466 } else { 467 init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr; 468 iser_conn->max_cmds = 469 ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr); 470 iser_dbg("device %s supports max_send_wr %d\n", 471 device->ib_device->name, ib_dev->attrs.max_qp_wr); 472 } 473 } 474 475 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 476 if (ret) 477 goto out_err; 478 479 ib_conn->qp = ib_conn->cma_id->qp; 480 iser_info("setting conn %p cma_id %p qp %p\n", 481 ib_conn, ib_conn->cma_id, 482 ib_conn->cma_id->qp); 483 return ret; 484 485 out_err: 486 mutex_lock(&ig.connlist_mutex); 487 ib_conn->comp->active_qps--; 488 mutex_unlock(&ig.connlist_mutex); 489 iser_err("unable to alloc mem or create resource, err %d\n", ret); 490 491 return ret; 492 } 493 494 /** 495 * based on the resolved device node GUID see if there already allocated 496 * device for this device. If there's no such, create one. 497 */ 498 static 499 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 500 { 501 struct iser_device *device; 502 503 mutex_lock(&ig.device_list_mutex); 504 505 list_for_each_entry(device, &ig.device_list, ig_list) 506 /* find if there's a match using the node GUID */ 507 if (device->ib_device->node_guid == cma_id->device->node_guid) 508 goto inc_refcnt; 509 510 device = kzalloc(sizeof *device, GFP_KERNEL); 511 if (device == NULL) 512 goto out; 513 514 /* assign this device to the device */ 515 device->ib_device = cma_id->device; 516 /* init the device and link it into ig device list */ 517 if (iser_create_device_ib_res(device)) { 518 kfree(device); 519 device = NULL; 520 goto out; 521 } 522 list_add(&device->ig_list, &ig.device_list); 523 524 inc_refcnt: 525 device->refcount++; 526 out: 527 mutex_unlock(&ig.device_list_mutex); 528 return device; 529 } 530 531 /* if there's no demand for this device, release it */ 532 static void iser_device_try_release(struct iser_device *device) 533 { 534 mutex_lock(&ig.device_list_mutex); 535 device->refcount--; 536 iser_info("device %p refcount %d\n", device, device->refcount); 537 if (!device->refcount) { 538 iser_free_device_ib_res(device); 539 list_del(&device->ig_list); 540 kfree(device); 541 } 542 mutex_unlock(&ig.device_list_mutex); 543 } 544 545 /** 546 * Called with state mutex held 547 **/ 548 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn, 549 enum iser_conn_state comp, 550 enum iser_conn_state exch) 551 { 552 int ret; 553 554 ret = (iser_conn->state == comp); 555 if (ret) 556 iser_conn->state = exch; 557 558 return ret; 559 } 560 561 void iser_release_work(struct work_struct *work) 562 { 563 struct iser_conn *iser_conn; 564 565 iser_conn = container_of(work, struct iser_conn, release_work); 566 567 /* Wait for conn_stop to complete */ 568 wait_for_completion(&iser_conn->stop_completion); 569 /* Wait for IB resouces cleanup to complete */ 570 wait_for_completion(&iser_conn->ib_completion); 571 572 mutex_lock(&iser_conn->state_mutex); 573 iser_conn->state = ISER_CONN_DOWN; 574 mutex_unlock(&iser_conn->state_mutex); 575 576 iser_conn_release(iser_conn); 577 } 578 579 /** 580 * iser_free_ib_conn_res - release IB related resources 581 * @iser_conn: iser connection struct 582 * @destroy: indicator if we need to try to release the 583 * iser device and memory regoins pool (only iscsi 584 * shutdown and DEVICE_REMOVAL will use this). 585 * 586 * This routine is called with the iser state mutex held 587 * so the cm_id removal is out of here. It is Safe to 588 * be invoked multiple times. 589 */ 590 static void iser_free_ib_conn_res(struct iser_conn *iser_conn, 591 bool destroy) 592 { 593 struct ib_conn *ib_conn = &iser_conn->ib_conn; 594 struct iser_device *device = ib_conn->device; 595 596 iser_info("freeing conn %p cma_id %p qp %p\n", 597 iser_conn, ib_conn->cma_id, ib_conn->qp); 598 599 if (ib_conn->qp != NULL) { 600 mutex_lock(&ig.connlist_mutex); 601 ib_conn->comp->active_qps--; 602 mutex_unlock(&ig.connlist_mutex); 603 rdma_destroy_qp(ib_conn->cma_id); 604 ib_conn->qp = NULL; 605 } 606 607 if (destroy) { 608 if (iser_conn->rx_descs) 609 iser_free_rx_descriptors(iser_conn); 610 611 if (device != NULL) { 612 iser_device_try_release(device); 613 ib_conn->device = NULL; 614 } 615 } 616 } 617 618 /** 619 * Frees all conn objects and deallocs conn descriptor 620 */ 621 void iser_conn_release(struct iser_conn *iser_conn) 622 { 623 struct ib_conn *ib_conn = &iser_conn->ib_conn; 624 625 mutex_lock(&ig.connlist_mutex); 626 list_del(&iser_conn->conn_list); 627 mutex_unlock(&ig.connlist_mutex); 628 629 mutex_lock(&iser_conn->state_mutex); 630 /* In case we endup here without ep_disconnect being invoked. */ 631 if (iser_conn->state != ISER_CONN_DOWN) { 632 iser_warn("iser conn %p state %d, expected state down.\n", 633 iser_conn, iser_conn->state); 634 iscsi_destroy_endpoint(iser_conn->ep); 635 iser_conn->state = ISER_CONN_DOWN; 636 } 637 /* 638 * In case we never got to bind stage, we still need to 639 * release IB resources (which is safe to call more than once). 640 */ 641 iser_free_ib_conn_res(iser_conn, true); 642 mutex_unlock(&iser_conn->state_mutex); 643 644 if (ib_conn->cma_id != NULL) { 645 rdma_destroy_id(ib_conn->cma_id); 646 ib_conn->cma_id = NULL; 647 } 648 649 kfree(iser_conn); 650 } 651 652 /** 653 * triggers start of the disconnect procedures and wait for them to be done 654 * Called with state mutex held 655 */ 656 int iser_conn_terminate(struct iser_conn *iser_conn) 657 { 658 struct ib_conn *ib_conn = &iser_conn->ib_conn; 659 int err = 0; 660 661 /* terminate the iser conn only if the conn state is UP */ 662 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, 663 ISER_CONN_TERMINATING)) 664 return 0; 665 666 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state); 667 668 /* suspend queuing of new iscsi commands */ 669 if (iser_conn->iscsi_conn) 670 iscsi_suspend_queue(iser_conn->iscsi_conn); 671 672 /* 673 * In case we didn't already clean up the cma_id (peer initiated 674 * a disconnection), we need to Cause the CMA to change the QP 675 * state to ERROR. 676 */ 677 if (ib_conn->cma_id) { 678 err = rdma_disconnect(ib_conn->cma_id); 679 if (err) 680 iser_err("Failed to disconnect, conn: 0x%p err %d\n", 681 iser_conn, err); 682 683 /* block until all flush errors are consumed */ 684 ib_drain_sq(ib_conn->qp); 685 } 686 687 return 1; 688 } 689 690 /** 691 * Called with state mutex held 692 **/ 693 static void iser_connect_error(struct rdma_cm_id *cma_id) 694 { 695 struct iser_conn *iser_conn; 696 697 iser_conn = (struct iser_conn *)cma_id->context; 698 iser_conn->state = ISER_CONN_TERMINATING; 699 } 700 701 static void 702 iser_calc_scsi_params(struct iser_conn *iser_conn, 703 unsigned int max_sectors) 704 { 705 struct iser_device *device = iser_conn->ib_conn.device; 706 struct ib_device_attr *attr = &device->ib_device->attrs; 707 unsigned short sg_tablesize, sup_sg_tablesize; 708 unsigned short reserved_mr_pages; 709 710 /* 711 * FRs without SG_GAPS or FMRs can only map up to a (device) page per 712 * entry, but if the first entry is misaligned we'll end up using two 713 * entries (head and tail) for a single page worth data, so one 714 * additional entry is required. 715 */ 716 if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) && 717 (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)) 718 reserved_mr_pages = 0; 719 else 720 reserved_mr_pages = 1; 721 722 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); 723 if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) 724 sup_sg_tablesize = 725 min_t( 726 uint, ISCSI_ISER_MAX_SG_TABLESIZE, 727 attr->max_fast_reg_page_list_len - reserved_mr_pages); 728 else 729 sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; 730 731 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); 732 iser_conn->pages_per_mr = 733 iser_conn->scsi_sg_tablesize + reserved_mr_pages; 734 } 735 736 /** 737 * Called with state mutex held 738 **/ 739 static void iser_addr_handler(struct rdma_cm_id *cma_id) 740 { 741 struct iser_device *device; 742 struct iser_conn *iser_conn; 743 struct ib_conn *ib_conn; 744 int ret; 745 746 iser_conn = (struct iser_conn *)cma_id->context; 747 if (iser_conn->state != ISER_CONN_PENDING) 748 /* bailout */ 749 return; 750 751 ib_conn = &iser_conn->ib_conn; 752 device = iser_device_find_by_ib_device(cma_id); 753 if (!device) { 754 iser_err("device lookup/creation failed\n"); 755 iser_connect_error(cma_id); 756 return; 757 } 758 759 ib_conn->device = device; 760 761 /* connection T10-PI support */ 762 if (iser_pi_enable) { 763 if (!(device->ib_device->attrs.device_cap_flags & 764 IB_DEVICE_SIGNATURE_HANDOVER)) { 765 iser_warn("T10-PI requested but not supported on %s, " 766 "continue without T10-PI\n", 767 ib_conn->device->ib_device->name); 768 ib_conn->pi_support = false; 769 } else { 770 ib_conn->pi_support = true; 771 } 772 } 773 774 iser_calc_scsi_params(iser_conn, iser_max_sectors); 775 776 ret = rdma_resolve_route(cma_id, 1000); 777 if (ret) { 778 iser_err("resolve route failed: %d\n", ret); 779 iser_connect_error(cma_id); 780 return; 781 } 782 } 783 784 /** 785 * Called with state mutex held 786 **/ 787 static void iser_route_handler(struct rdma_cm_id *cma_id) 788 { 789 struct rdma_conn_param conn_param; 790 int ret; 791 struct iser_cm_hdr req_hdr; 792 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 793 struct ib_conn *ib_conn = &iser_conn->ib_conn; 794 struct iser_device *device = ib_conn->device; 795 796 if (iser_conn->state != ISER_CONN_PENDING) 797 /* bailout */ 798 return; 799 800 ret = iser_create_ib_conn_res(ib_conn); 801 if (ret) 802 goto failure; 803 804 memset(&conn_param, 0, sizeof conn_param); 805 conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom; 806 conn_param.initiator_depth = 1; 807 conn_param.retry_count = 7; 808 conn_param.rnr_retry_count = 6; 809 810 memset(&req_hdr, 0, sizeof(req_hdr)); 811 req_hdr.flags = ISER_ZBVA_NOT_SUP; 812 if (!device->remote_inv_sup) 813 req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP; 814 conn_param.private_data = (void *)&req_hdr; 815 conn_param.private_data_len = sizeof(struct iser_cm_hdr); 816 817 ret = rdma_connect(cma_id, &conn_param); 818 if (ret) { 819 iser_err("failure connecting: %d\n", ret); 820 goto failure; 821 } 822 823 return; 824 failure: 825 iser_connect_error(cma_id); 826 } 827 828 static void iser_connected_handler(struct rdma_cm_id *cma_id, 829 const void *private_data) 830 { 831 struct iser_conn *iser_conn; 832 struct ib_qp_attr attr; 833 struct ib_qp_init_attr init_attr; 834 835 iser_conn = (struct iser_conn *)cma_id->context; 836 if (iser_conn->state != ISER_CONN_PENDING) 837 /* bailout */ 838 return; 839 840 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); 841 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); 842 843 if (private_data) { 844 u8 flags = *(u8 *)private_data; 845 846 iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP); 847 } 848 849 iser_info("conn %p: negotiated %s invalidation\n", 850 iser_conn, iser_conn->snd_w_inv ? "remote" : "local"); 851 852 iser_conn->state = ISER_CONN_UP; 853 complete(&iser_conn->up_completion); 854 } 855 856 static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 857 { 858 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 859 860 if (iser_conn_terminate(iser_conn)) { 861 if (iser_conn->iscsi_conn) 862 iscsi_conn_failure(iser_conn->iscsi_conn, 863 ISCSI_ERR_CONN_FAILED); 864 else 865 iser_err("iscsi_iser connection isn't bound\n"); 866 } 867 } 868 869 static void iser_cleanup_handler(struct rdma_cm_id *cma_id, 870 bool destroy) 871 { 872 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 873 874 /* 875 * We are not guaranteed that we visited disconnected_handler 876 * by now, call it here to be safe that we handle CM drep 877 * and flush errors. 878 */ 879 iser_disconnected_handler(cma_id); 880 iser_free_ib_conn_res(iser_conn, destroy); 881 complete(&iser_conn->ib_completion); 882 }; 883 884 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 885 { 886 struct iser_conn *iser_conn; 887 int ret = 0; 888 889 iser_conn = (struct iser_conn *)cma_id->context; 890 iser_info("%s (%d): status %d conn %p id %p\n", 891 rdma_event_msg(event->event), event->event, 892 event->status, cma_id->context, cma_id); 893 894 mutex_lock(&iser_conn->state_mutex); 895 switch (event->event) { 896 case RDMA_CM_EVENT_ADDR_RESOLVED: 897 iser_addr_handler(cma_id); 898 break; 899 case RDMA_CM_EVENT_ROUTE_RESOLVED: 900 iser_route_handler(cma_id); 901 break; 902 case RDMA_CM_EVENT_ESTABLISHED: 903 iser_connected_handler(cma_id, event->param.conn.private_data); 904 break; 905 case RDMA_CM_EVENT_REJECTED: 906 iser_info("Connection rejected: %s\n", 907 rdma_reject_msg(cma_id, event->status)); 908 /* FALLTHROUGH */ 909 case RDMA_CM_EVENT_ADDR_ERROR: 910 case RDMA_CM_EVENT_ROUTE_ERROR: 911 case RDMA_CM_EVENT_CONNECT_ERROR: 912 case RDMA_CM_EVENT_UNREACHABLE: 913 iser_connect_error(cma_id); 914 break; 915 case RDMA_CM_EVENT_DISCONNECTED: 916 case RDMA_CM_EVENT_ADDR_CHANGE: 917 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 918 iser_cleanup_handler(cma_id, false); 919 break; 920 case RDMA_CM_EVENT_DEVICE_REMOVAL: 921 /* 922 * we *must* destroy the device as we cannot rely 923 * on iscsid to be around to initiate error handling. 924 * also if we are not in state DOWN implicitly destroy 925 * the cma_id. 926 */ 927 iser_cleanup_handler(cma_id, true); 928 if (iser_conn->state != ISER_CONN_DOWN) { 929 iser_conn->ib_conn.cma_id = NULL; 930 ret = 1; 931 } 932 break; 933 default: 934 iser_err("Unexpected RDMA CM event: %s (%d)\n", 935 rdma_event_msg(event->event), event->event); 936 break; 937 } 938 mutex_unlock(&iser_conn->state_mutex); 939 940 return ret; 941 } 942 943 void iser_conn_init(struct iser_conn *iser_conn) 944 { 945 struct ib_conn *ib_conn = &iser_conn->ib_conn; 946 947 iser_conn->state = ISER_CONN_INIT; 948 init_completion(&iser_conn->stop_completion); 949 init_completion(&iser_conn->ib_completion); 950 init_completion(&iser_conn->up_completion); 951 INIT_LIST_HEAD(&iser_conn->conn_list); 952 mutex_init(&iser_conn->state_mutex); 953 954 ib_conn->post_recv_buf_count = 0; 955 ib_conn->reg_cqe.done = iser_reg_comp; 956 } 957 958 /** 959 * starts the process of connecting to the target 960 * sleeps until the connection is established or rejected 961 */ 962 int iser_connect(struct iser_conn *iser_conn, 963 struct sockaddr *src_addr, 964 struct sockaddr *dst_addr, 965 int non_blocking) 966 { 967 struct ib_conn *ib_conn = &iser_conn->ib_conn; 968 int err = 0; 969 970 mutex_lock(&iser_conn->state_mutex); 971 972 sprintf(iser_conn->name, "%pISp", dst_addr); 973 974 iser_info("connecting to: %s\n", iser_conn->name); 975 976 /* the device is known only --after-- address resolution */ 977 ib_conn->device = NULL; 978 979 iser_conn->state = ISER_CONN_PENDING; 980 981 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, 982 (void *)iser_conn, 983 RDMA_PS_TCP, IB_QPT_RC); 984 if (IS_ERR(ib_conn->cma_id)) { 985 err = PTR_ERR(ib_conn->cma_id); 986 iser_err("rdma_create_id failed: %d\n", err); 987 goto id_failure; 988 } 989 990 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); 991 if (err) { 992 iser_err("rdma_resolve_addr failed: %d\n", err); 993 goto addr_failure; 994 } 995 996 if (!non_blocking) { 997 wait_for_completion_interruptible(&iser_conn->up_completion); 998 999 if (iser_conn->state != ISER_CONN_UP) { 1000 err = -EIO; 1001 goto connect_failure; 1002 } 1003 } 1004 mutex_unlock(&iser_conn->state_mutex); 1005 1006 mutex_lock(&ig.connlist_mutex); 1007 list_add(&iser_conn->conn_list, &ig.connlist); 1008 mutex_unlock(&ig.connlist_mutex); 1009 return 0; 1010 1011 id_failure: 1012 ib_conn->cma_id = NULL; 1013 addr_failure: 1014 iser_conn->state = ISER_CONN_DOWN; 1015 connect_failure: 1016 mutex_unlock(&iser_conn->state_mutex); 1017 iser_conn_release(iser_conn); 1018 return err; 1019 } 1020 1021 int iser_post_recvl(struct iser_conn *iser_conn) 1022 { 1023 struct ib_conn *ib_conn = &iser_conn->ib_conn; 1024 struct iser_login_desc *desc = &iser_conn->login_desc; 1025 struct ib_recv_wr wr, *wr_failed; 1026 int ib_ret; 1027 1028 desc->sge.addr = desc->rsp_dma; 1029 desc->sge.length = ISER_RX_LOGIN_SIZE; 1030 desc->sge.lkey = ib_conn->device->pd->local_dma_lkey; 1031 1032 desc->cqe.done = iser_login_rsp; 1033 wr.wr_cqe = &desc->cqe; 1034 wr.sg_list = &desc->sge; 1035 wr.num_sge = 1; 1036 wr.next = NULL; 1037 1038 ib_conn->post_recv_buf_count++; 1039 ib_ret = ib_post_recv(ib_conn->qp, &wr, &wr_failed); 1040 if (ib_ret) { 1041 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 1042 ib_conn->post_recv_buf_count--; 1043 } 1044 1045 return ib_ret; 1046 } 1047 1048 int iser_post_recvm(struct iser_conn *iser_conn, int count) 1049 { 1050 struct ib_conn *ib_conn = &iser_conn->ib_conn; 1051 unsigned int my_rx_head = iser_conn->rx_desc_head; 1052 struct iser_rx_desc *rx_desc; 1053 struct ib_recv_wr *wr, *wr_failed; 1054 int i, ib_ret; 1055 1056 for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { 1057 rx_desc = &iser_conn->rx_descs[my_rx_head]; 1058 rx_desc->cqe.done = iser_task_rsp; 1059 wr->wr_cqe = &rx_desc->cqe; 1060 wr->sg_list = &rx_desc->rx_sg; 1061 wr->num_sge = 1; 1062 wr->next = wr + 1; 1063 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask; 1064 } 1065 1066 wr--; 1067 wr->next = NULL; /* mark end of work requests list */ 1068 1069 ib_conn->post_recv_buf_count += count; 1070 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &wr_failed); 1071 if (ib_ret) { 1072 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 1073 ib_conn->post_recv_buf_count -= count; 1074 } else 1075 iser_conn->rx_desc_head = my_rx_head; 1076 1077 return ib_ret; 1078 } 1079 1080 1081 /** 1082 * iser_start_send - Initiate a Send DTO operation 1083 * 1084 * returns 0 on success, -1 on failure 1085 */ 1086 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, 1087 bool signal) 1088 { 1089 struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc); 1090 int ib_ret; 1091 1092 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 1093 tx_desc->dma_addr, ISER_HEADERS_LEN, 1094 DMA_TO_DEVICE); 1095 1096 wr->next = NULL; 1097 wr->wr_cqe = &tx_desc->cqe; 1098 wr->sg_list = tx_desc->tx_sg; 1099 wr->num_sge = tx_desc->num_sge; 1100 wr->opcode = IB_WR_SEND; 1101 wr->send_flags = signal ? IB_SEND_SIGNALED : 0; 1102 1103 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr); 1104 if (ib_ret) 1105 iser_err("ib_post_send failed, ret:%d opcode:%d\n", 1106 ib_ret, bad_wr->opcode); 1107 1108 return ib_ret; 1109 } 1110 1111 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, 1112 enum iser_data_dir cmd_dir, sector_t *sector) 1113 { 1114 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; 1115 struct iser_fr_desc *desc = reg->mem_h; 1116 unsigned long sector_size = iser_task->sc->device->sector_size; 1117 struct ib_mr_status mr_status; 1118 int ret; 1119 1120 if (desc && desc->pi_ctx->sig_protected) { 1121 desc->pi_ctx->sig_protected = 0; 1122 ret = ib_check_mr_status(desc->pi_ctx->sig_mr, 1123 IB_MR_CHECK_SIG_STATUS, &mr_status); 1124 if (ret) { 1125 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1126 goto err; 1127 } 1128 1129 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1130 sector_t sector_off = mr_status.sig_err.sig_err_offset; 1131 1132 sector_div(sector_off, sector_size + 8); 1133 *sector = scsi_get_lba(iser_task->sc) + sector_off; 1134 1135 pr_err("PI error found type %d at sector %llx " 1136 "expected %x vs actual %x\n", 1137 mr_status.sig_err.err_type, 1138 (unsigned long long)*sector, 1139 mr_status.sig_err.expected, 1140 mr_status.sig_err.actual); 1141 1142 switch (mr_status.sig_err.err_type) { 1143 case IB_SIG_BAD_GUARD: 1144 return 0x1; 1145 case IB_SIG_BAD_REFTAG: 1146 return 0x3; 1147 case IB_SIG_BAD_APPTAG: 1148 return 0x2; 1149 } 1150 } 1151 } 1152 1153 return 0; 1154 err: 1155 /* Not alot we can do here, return ambiguous guard error */ 1156 return 0x1; 1157 } 1158 1159 void iser_err_comp(struct ib_wc *wc, const char *type) 1160 { 1161 if (wc->status != IB_WC_WR_FLUSH_ERR) { 1162 struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context); 1163 1164 iser_err("%s failure: %s (%d) vend_err %#x\n", type, 1165 ib_wc_status_msg(wc->status), wc->status, 1166 wc->vendor_err); 1167 1168 if (iser_conn->iscsi_conn) 1169 iscsi_conn_failure(iser_conn->iscsi_conn, 1170 ISCSI_ERR_CONN_FAILED); 1171 } else { 1172 iser_dbg("%s failure: %s (%d)\n", type, 1173 ib_wc_status_msg(wc->status), wc->status); 1174 } 1175 } 1176