1 /******************************************************************************* 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 3 * 4 * (c) Copyright 2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ****************************************************************************/ 18 19 #include <linux/string.h> 20 #include <linux/module.h> 21 #include <linux/scatterlist.h> 22 #include <linux/socket.h> 23 #include <linux/in.h> 24 #include <linux/in6.h> 25 #include <rdma/ib_verbs.h> 26 #include <rdma/rdma_cm.h> 27 #include <target/target_core_base.h> 28 #include <target/target_core_fabric.h> 29 #include <target/iscsi/iscsi_transport.h> 30 #include <linux/semaphore.h> 31 32 #include "ib_isert.h" 33 34 #define ISERT_MAX_CONN 8 35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 36 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) 37 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 38 ISERT_MAX_CONN) 39 40 static int isert_debug_level; 41 module_param_named(debug_level, isert_debug_level, int, 0644); 42 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 43 44 static DEFINE_MUTEX(device_list_mutex); 45 static LIST_HEAD(device_list); 46 static struct workqueue_struct *isert_comp_wq; 47 static struct workqueue_struct *isert_release_wq; 48 49 static void 50 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 51 static int 52 isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn); 53 static void 54 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 55 static int 56 isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn); 57 static int 58 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 59 static int 60 isert_login_post_recv(struct isert_conn *isert_conn); 61 static int 62 isert_rdma_accept(struct isert_conn *isert_conn); 63 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 64 65 static void isert_release_work(struct work_struct *work); 66 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 67 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 68 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 69 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 70 71 static inline bool 72 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 73 { 74 return (conn->pi_support && 75 cmd->prot_op != TARGET_PROT_NORMAL); 76 } 77 78 79 static void 80 isert_qp_event_callback(struct ib_event *e, void *context) 81 { 82 struct isert_conn *isert_conn = context; 83 84 isert_err("%s (%d): conn %p\n", 85 ib_event_msg(e->event), e->event, isert_conn); 86 87 switch (e->event) { 88 case IB_EVENT_COMM_EST: 89 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 90 break; 91 case IB_EVENT_QP_LAST_WQE_REACHED: 92 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 93 break; 94 default: 95 break; 96 } 97 } 98 99 static struct isert_comp * 100 isert_comp_get(struct isert_conn *isert_conn) 101 { 102 struct isert_device *device = isert_conn->device; 103 struct isert_comp *comp; 104 int i, min = 0; 105 106 mutex_lock(&device_list_mutex); 107 for (i = 0; i < device->comps_used; i++) 108 if (device->comps[i].active_qps < 109 device->comps[min].active_qps) 110 min = i; 111 comp = &device->comps[min]; 112 comp->active_qps++; 113 mutex_unlock(&device_list_mutex); 114 115 isert_info("conn %p, using comp %p min_index: %d\n", 116 isert_conn, comp, min); 117 118 return comp; 119 } 120 121 static void 122 isert_comp_put(struct isert_comp *comp) 123 { 124 mutex_lock(&device_list_mutex); 125 comp->active_qps--; 126 mutex_unlock(&device_list_mutex); 127 } 128 129 static struct ib_qp * 130 isert_create_qp(struct isert_conn *isert_conn, 131 struct isert_comp *comp, 132 struct rdma_cm_id *cma_id) 133 { 134 struct isert_device *device = isert_conn->device; 135 struct ib_qp_init_attr attr; 136 int ret; 137 138 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 139 attr.event_handler = isert_qp_event_callback; 140 attr.qp_context = isert_conn; 141 attr.send_cq = comp->cq; 142 attr.recv_cq = comp->cq; 143 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 144 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 145 attr.cap.max_send_sge = device->ib_device->attrs.max_sge; 146 isert_conn->max_sge = min(device->ib_device->attrs.max_sge, 147 device->ib_device->attrs.max_sge_rd); 148 attr.cap.max_recv_sge = 1; 149 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 150 attr.qp_type = IB_QPT_RC; 151 if (device->pi_capable) 152 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 153 154 ret = rdma_create_qp(cma_id, device->pd, &attr); 155 if (ret) { 156 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 157 return ERR_PTR(ret); 158 } 159 160 return cma_id->qp; 161 } 162 163 static int 164 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 165 { 166 struct isert_comp *comp; 167 int ret; 168 169 comp = isert_comp_get(isert_conn); 170 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); 171 if (IS_ERR(isert_conn->qp)) { 172 ret = PTR_ERR(isert_conn->qp); 173 goto err; 174 } 175 176 return 0; 177 err: 178 isert_comp_put(comp); 179 return ret; 180 } 181 182 static int 183 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 184 { 185 struct isert_device *device = isert_conn->device; 186 struct ib_device *ib_dev = device->ib_device; 187 struct iser_rx_desc *rx_desc; 188 struct ib_sge *rx_sg; 189 u64 dma_addr; 190 int i, j; 191 192 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 193 sizeof(struct iser_rx_desc), GFP_KERNEL); 194 if (!isert_conn->rx_descs) 195 goto fail; 196 197 rx_desc = isert_conn->rx_descs; 198 199 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 200 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 201 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 202 if (ib_dma_mapping_error(ib_dev, dma_addr)) 203 goto dma_map_fail; 204 205 rx_desc->dma_addr = dma_addr; 206 207 rx_sg = &rx_desc->rx_sg; 208 rx_sg->addr = rx_desc->dma_addr; 209 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 210 rx_sg->lkey = device->pd->local_dma_lkey; 211 rx_desc->rx_cqe.done = isert_recv_done; 212 } 213 214 return 0; 215 216 dma_map_fail: 217 rx_desc = isert_conn->rx_descs; 218 for (j = 0; j < i; j++, rx_desc++) { 219 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 220 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 221 } 222 kfree(isert_conn->rx_descs); 223 isert_conn->rx_descs = NULL; 224 fail: 225 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 226 227 return -ENOMEM; 228 } 229 230 static void 231 isert_free_rx_descriptors(struct isert_conn *isert_conn) 232 { 233 struct ib_device *ib_dev = isert_conn->device->ib_device; 234 struct iser_rx_desc *rx_desc; 235 int i; 236 237 if (!isert_conn->rx_descs) 238 return; 239 240 rx_desc = isert_conn->rx_descs; 241 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 242 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 243 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 244 } 245 246 kfree(isert_conn->rx_descs); 247 isert_conn->rx_descs = NULL; 248 } 249 250 static void 251 isert_free_comps(struct isert_device *device) 252 { 253 int i; 254 255 for (i = 0; i < device->comps_used; i++) { 256 struct isert_comp *comp = &device->comps[i]; 257 258 if (comp->cq) 259 ib_free_cq(comp->cq); 260 } 261 kfree(device->comps); 262 } 263 264 static int 265 isert_alloc_comps(struct isert_device *device) 266 { 267 int i, max_cqe, ret = 0; 268 269 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), 270 device->ib_device->num_comp_vectors)); 271 272 isert_info("Using %d CQs, %s supports %d vectors support " 273 "Fast registration %d pi_capable %d\n", 274 device->comps_used, device->ib_device->name, 275 device->ib_device->num_comp_vectors, device->use_fastreg, 276 device->pi_capable); 277 278 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), 279 GFP_KERNEL); 280 if (!device->comps) { 281 isert_err("Unable to allocate completion contexts\n"); 282 return -ENOMEM; 283 } 284 285 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); 286 287 for (i = 0; i < device->comps_used; i++) { 288 struct isert_comp *comp = &device->comps[i]; 289 290 comp->device = device; 291 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, 292 IB_POLL_WORKQUEUE); 293 if (IS_ERR(comp->cq)) { 294 isert_err("Unable to allocate cq\n"); 295 ret = PTR_ERR(comp->cq); 296 comp->cq = NULL; 297 goto out_cq; 298 } 299 } 300 301 return 0; 302 out_cq: 303 isert_free_comps(device); 304 return ret; 305 } 306 307 static int 308 isert_create_device_ib_res(struct isert_device *device) 309 { 310 struct ib_device *ib_dev = device->ib_device; 311 int ret; 312 313 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge); 314 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 315 316 /* asign function handlers */ 317 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 318 ib_dev->attrs.device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { 319 device->use_fastreg = 1; 320 device->reg_rdma_mem = isert_reg_rdma; 321 device->unreg_rdma_mem = isert_unreg_rdma; 322 } else { 323 device->use_fastreg = 0; 324 device->reg_rdma_mem = isert_map_rdma; 325 device->unreg_rdma_mem = isert_unmap_cmd; 326 } 327 328 ret = isert_alloc_comps(device); 329 if (ret) 330 goto out; 331 332 device->pd = ib_alloc_pd(ib_dev); 333 if (IS_ERR(device->pd)) { 334 ret = PTR_ERR(device->pd); 335 isert_err("failed to allocate pd, device %p, ret=%d\n", 336 device, ret); 337 goto out_cq; 338 } 339 340 /* Check signature cap */ 341 device->pi_capable = ib_dev->attrs.device_cap_flags & 342 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 343 344 return 0; 345 346 out_cq: 347 isert_free_comps(device); 348 out: 349 if (ret > 0) 350 ret = -EINVAL; 351 return ret; 352 } 353 354 static void 355 isert_free_device_ib_res(struct isert_device *device) 356 { 357 isert_info("device %p\n", device); 358 359 ib_dealloc_pd(device->pd); 360 isert_free_comps(device); 361 } 362 363 static void 364 isert_device_put(struct isert_device *device) 365 { 366 mutex_lock(&device_list_mutex); 367 device->refcount--; 368 isert_info("device %p refcount %d\n", device, device->refcount); 369 if (!device->refcount) { 370 isert_free_device_ib_res(device); 371 list_del(&device->dev_node); 372 kfree(device); 373 } 374 mutex_unlock(&device_list_mutex); 375 } 376 377 static struct isert_device * 378 isert_device_get(struct rdma_cm_id *cma_id) 379 { 380 struct isert_device *device; 381 int ret; 382 383 mutex_lock(&device_list_mutex); 384 list_for_each_entry(device, &device_list, dev_node) { 385 if (device->ib_device->node_guid == cma_id->device->node_guid) { 386 device->refcount++; 387 isert_info("Found iser device %p refcount %d\n", 388 device, device->refcount); 389 mutex_unlock(&device_list_mutex); 390 return device; 391 } 392 } 393 394 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 395 if (!device) { 396 mutex_unlock(&device_list_mutex); 397 return ERR_PTR(-ENOMEM); 398 } 399 400 INIT_LIST_HEAD(&device->dev_node); 401 402 device->ib_device = cma_id->device; 403 ret = isert_create_device_ib_res(device); 404 if (ret) { 405 kfree(device); 406 mutex_unlock(&device_list_mutex); 407 return ERR_PTR(ret); 408 } 409 410 device->refcount++; 411 list_add_tail(&device->dev_node, &device_list); 412 isert_info("Created a new iser device %p refcount %d\n", 413 device, device->refcount); 414 mutex_unlock(&device_list_mutex); 415 416 return device; 417 } 418 419 static void 420 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) 421 { 422 struct fast_reg_descriptor *fr_desc, *tmp; 423 int i = 0; 424 425 if (list_empty(&isert_conn->fr_pool)) 426 return; 427 428 isert_info("Freeing conn %p fastreg pool", isert_conn); 429 430 list_for_each_entry_safe(fr_desc, tmp, 431 &isert_conn->fr_pool, list) { 432 list_del(&fr_desc->list); 433 ib_dereg_mr(fr_desc->data_mr); 434 if (fr_desc->pi_ctx) { 435 ib_dereg_mr(fr_desc->pi_ctx->prot_mr); 436 ib_dereg_mr(fr_desc->pi_ctx->sig_mr); 437 kfree(fr_desc->pi_ctx); 438 } 439 kfree(fr_desc); 440 ++i; 441 } 442 443 if (i < isert_conn->fr_pool_size) 444 isert_warn("Pool still has %d regions registered\n", 445 isert_conn->fr_pool_size - i); 446 } 447 448 static int 449 isert_create_pi_ctx(struct fast_reg_descriptor *desc, 450 struct ib_device *device, 451 struct ib_pd *pd) 452 { 453 struct pi_context *pi_ctx; 454 int ret; 455 456 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); 457 if (!pi_ctx) { 458 isert_err("Failed to allocate pi context\n"); 459 return -ENOMEM; 460 } 461 462 pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 463 ISCSI_ISER_SG_TABLESIZE); 464 if (IS_ERR(pi_ctx->prot_mr)) { 465 isert_err("Failed to allocate prot frmr err=%ld\n", 466 PTR_ERR(pi_ctx->prot_mr)); 467 ret = PTR_ERR(pi_ctx->prot_mr); 468 goto err_pi_ctx; 469 } 470 desc->ind |= ISERT_PROT_KEY_VALID; 471 472 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); 473 if (IS_ERR(pi_ctx->sig_mr)) { 474 isert_err("Failed to allocate signature enabled mr err=%ld\n", 475 PTR_ERR(pi_ctx->sig_mr)); 476 ret = PTR_ERR(pi_ctx->sig_mr); 477 goto err_prot_mr; 478 } 479 480 desc->pi_ctx = pi_ctx; 481 desc->ind |= ISERT_SIG_KEY_VALID; 482 desc->ind &= ~ISERT_PROTECTED; 483 484 return 0; 485 486 err_prot_mr: 487 ib_dereg_mr(pi_ctx->prot_mr); 488 err_pi_ctx: 489 kfree(pi_ctx); 490 491 return ret; 492 } 493 494 static int 495 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 496 struct fast_reg_descriptor *fr_desc) 497 { 498 fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 499 ISCSI_ISER_SG_TABLESIZE); 500 if (IS_ERR(fr_desc->data_mr)) { 501 isert_err("Failed to allocate data frmr err=%ld\n", 502 PTR_ERR(fr_desc->data_mr)); 503 return PTR_ERR(fr_desc->data_mr); 504 } 505 fr_desc->ind |= ISERT_DATA_KEY_VALID; 506 507 isert_dbg("Created fr_desc %p\n", fr_desc); 508 509 return 0; 510 } 511 512 static int 513 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) 514 { 515 struct fast_reg_descriptor *fr_desc; 516 struct isert_device *device = isert_conn->device; 517 struct se_session *se_sess = isert_conn->conn->sess->se_sess; 518 struct se_node_acl *se_nacl = se_sess->se_node_acl; 519 int i, ret, tag_num; 520 /* 521 * Setup the number of FRMRs based upon the number of tags 522 * available to session in iscsi_target_locate_portal(). 523 */ 524 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth); 525 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; 526 527 isert_conn->fr_pool_size = 0; 528 for (i = 0; i < tag_num; i++) { 529 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 530 if (!fr_desc) { 531 isert_err("Failed to allocate fast_reg descriptor\n"); 532 ret = -ENOMEM; 533 goto err; 534 } 535 536 ret = isert_create_fr_desc(device->ib_device, 537 device->pd, fr_desc); 538 if (ret) { 539 isert_err("Failed to create fastreg descriptor err=%d\n", 540 ret); 541 kfree(fr_desc); 542 goto err; 543 } 544 545 list_add_tail(&fr_desc->list, &isert_conn->fr_pool); 546 isert_conn->fr_pool_size++; 547 } 548 549 isert_dbg("Creating conn %p fastreg pool size=%d", 550 isert_conn, isert_conn->fr_pool_size); 551 552 return 0; 553 554 err: 555 isert_conn_free_fastreg_pool(isert_conn); 556 return ret; 557 } 558 559 static void 560 isert_init_conn(struct isert_conn *isert_conn) 561 { 562 isert_conn->state = ISER_CONN_INIT; 563 INIT_LIST_HEAD(&isert_conn->node); 564 init_completion(&isert_conn->login_comp); 565 init_completion(&isert_conn->login_req_comp); 566 kref_init(&isert_conn->kref); 567 mutex_init(&isert_conn->mutex); 568 spin_lock_init(&isert_conn->pool_lock); 569 INIT_LIST_HEAD(&isert_conn->fr_pool); 570 INIT_WORK(&isert_conn->release_work, isert_release_work); 571 } 572 573 static void 574 isert_free_login_buf(struct isert_conn *isert_conn) 575 { 576 struct ib_device *ib_dev = isert_conn->device->ib_device; 577 578 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 579 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 580 kfree(isert_conn->login_rsp_buf); 581 582 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 583 ISER_RX_PAYLOAD_SIZE, 584 DMA_FROM_DEVICE); 585 kfree(isert_conn->login_req_buf); 586 } 587 588 static int 589 isert_alloc_login_buf(struct isert_conn *isert_conn, 590 struct ib_device *ib_dev) 591 { 592 int ret; 593 594 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), 595 GFP_KERNEL); 596 if (!isert_conn->login_req_buf) { 597 isert_err("Unable to allocate isert_conn->login_buf\n"); 598 return -ENOMEM; 599 } 600 601 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 602 isert_conn->login_req_buf, 603 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 604 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 605 if (ret) { 606 isert_err("login_req_dma mapping error: %d\n", ret); 607 isert_conn->login_req_dma = 0; 608 goto out_free_login_req_buf; 609 } 610 611 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 612 if (!isert_conn->login_rsp_buf) { 613 isert_err("Unable to allocate isert_conn->login_rspbuf\n"); 614 goto out_unmap_login_req_buf; 615 } 616 617 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 618 isert_conn->login_rsp_buf, 619 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 620 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 621 if (ret) { 622 isert_err("login_rsp_dma mapping error: %d\n", ret); 623 isert_conn->login_rsp_dma = 0; 624 goto out_free_login_rsp_buf; 625 } 626 627 return 0; 628 629 out_free_login_rsp_buf: 630 kfree(isert_conn->login_rsp_buf); 631 out_unmap_login_req_buf: 632 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 633 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 634 out_free_login_req_buf: 635 kfree(isert_conn->login_req_buf); 636 return ret; 637 } 638 639 static void 640 isert_set_nego_params(struct isert_conn *isert_conn, 641 struct rdma_conn_param *param) 642 { 643 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 644 645 /* Set max inflight RDMA READ requests */ 646 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 647 attr->max_qp_init_rd_atom); 648 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 649 650 if (param->private_data) { 651 u8 flags = *(u8 *)param->private_data; 652 653 /* 654 * use remote invalidation if the both initiator 655 * and the HCA support it 656 */ 657 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 658 (attr->device_cap_flags & 659 IB_DEVICE_MEM_MGT_EXTENSIONS); 660 if (isert_conn->snd_w_inv) 661 isert_info("Using remote invalidation\n"); 662 } 663 } 664 665 static int 666 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 667 { 668 struct isert_np *isert_np = cma_id->context; 669 struct iscsi_np *np = isert_np->np; 670 struct isert_conn *isert_conn; 671 struct isert_device *device; 672 int ret = 0; 673 674 spin_lock_bh(&np->np_thread_lock); 675 if (!np->enabled) { 676 spin_unlock_bh(&np->np_thread_lock); 677 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 678 return rdma_reject(cma_id, NULL, 0); 679 } 680 spin_unlock_bh(&np->np_thread_lock); 681 682 isert_dbg("cma_id: %p, portal: %p\n", 683 cma_id, cma_id->context); 684 685 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 686 if (!isert_conn) 687 return -ENOMEM; 688 689 isert_init_conn(isert_conn); 690 isert_conn->cm_id = cma_id; 691 692 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 693 if (ret) 694 goto out; 695 696 device = isert_device_get(cma_id); 697 if (IS_ERR(device)) { 698 ret = PTR_ERR(device); 699 goto out_rsp_dma_map; 700 } 701 isert_conn->device = device; 702 703 isert_set_nego_params(isert_conn, &event->param.conn); 704 705 ret = isert_conn_setup_qp(isert_conn, cma_id); 706 if (ret) 707 goto out_conn_dev; 708 709 ret = isert_login_post_recv(isert_conn); 710 if (ret) 711 goto out_conn_dev; 712 713 ret = isert_rdma_accept(isert_conn); 714 if (ret) 715 goto out_conn_dev; 716 717 mutex_lock(&isert_np->mutex); 718 list_add_tail(&isert_conn->node, &isert_np->accepted); 719 mutex_unlock(&isert_np->mutex); 720 721 return 0; 722 723 out_conn_dev: 724 isert_device_put(device); 725 out_rsp_dma_map: 726 isert_free_login_buf(isert_conn); 727 out: 728 kfree(isert_conn); 729 rdma_reject(cma_id, NULL, 0); 730 return ret; 731 } 732 733 static void 734 isert_connect_release(struct isert_conn *isert_conn) 735 { 736 struct isert_device *device = isert_conn->device; 737 738 isert_dbg("conn %p\n", isert_conn); 739 740 BUG_ON(!device); 741 742 if (device->use_fastreg) 743 isert_conn_free_fastreg_pool(isert_conn); 744 745 isert_free_rx_descriptors(isert_conn); 746 if (isert_conn->cm_id) 747 rdma_destroy_id(isert_conn->cm_id); 748 749 if (isert_conn->qp) { 750 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; 751 752 isert_comp_put(comp); 753 ib_destroy_qp(isert_conn->qp); 754 } 755 756 if (isert_conn->login_req_buf) 757 isert_free_login_buf(isert_conn); 758 759 isert_device_put(device); 760 761 kfree(isert_conn); 762 } 763 764 static void 765 isert_connected_handler(struct rdma_cm_id *cma_id) 766 { 767 struct isert_conn *isert_conn = cma_id->qp->qp_context; 768 struct isert_np *isert_np = cma_id->context; 769 770 isert_info("conn %p\n", isert_conn); 771 772 mutex_lock(&isert_conn->mutex); 773 isert_conn->state = ISER_CONN_UP; 774 kref_get(&isert_conn->kref); 775 mutex_unlock(&isert_conn->mutex); 776 777 mutex_lock(&isert_np->mutex); 778 list_move_tail(&isert_conn->node, &isert_np->pending); 779 mutex_unlock(&isert_np->mutex); 780 781 isert_info("np %p: Allow accept_np to continue\n", isert_np); 782 up(&isert_np->sem); 783 } 784 785 static void 786 isert_release_kref(struct kref *kref) 787 { 788 struct isert_conn *isert_conn = container_of(kref, 789 struct isert_conn, kref); 790 791 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 792 current->pid); 793 794 isert_connect_release(isert_conn); 795 } 796 797 static void 798 isert_put_conn(struct isert_conn *isert_conn) 799 { 800 kref_put(&isert_conn->kref, isert_release_kref); 801 } 802 803 static void 804 isert_handle_unbound_conn(struct isert_conn *isert_conn) 805 { 806 struct isert_np *isert_np = isert_conn->cm_id->context; 807 808 mutex_lock(&isert_np->mutex); 809 if (!list_empty(&isert_conn->node)) { 810 /* 811 * This means iscsi doesn't know this connection 812 * so schedule a cleanup ourselves 813 */ 814 list_del_init(&isert_conn->node); 815 isert_put_conn(isert_conn); 816 queue_work(isert_release_wq, &isert_conn->release_work); 817 } 818 mutex_unlock(&isert_np->mutex); 819 } 820 821 /** 822 * isert_conn_terminate() - Initiate connection termination 823 * @isert_conn: isert connection struct 824 * 825 * Notes: 826 * In case the connection state is BOUND, move state 827 * to TEMINATING and start teardown sequence (rdma_disconnect). 828 * In case the connection state is UP, complete flush as well. 829 * 830 * This routine must be called with mutex held. Thus it is 831 * safe to call multiple times. 832 */ 833 static void 834 isert_conn_terminate(struct isert_conn *isert_conn) 835 { 836 int err; 837 838 if (isert_conn->state >= ISER_CONN_TERMINATING) 839 return; 840 841 isert_info("Terminating conn %p state %d\n", 842 isert_conn, isert_conn->state); 843 isert_conn->state = ISER_CONN_TERMINATING; 844 err = rdma_disconnect(isert_conn->cm_id); 845 if (err) 846 isert_warn("Failed rdma_disconnect isert_conn %p\n", 847 isert_conn); 848 } 849 850 static int 851 isert_np_cma_handler(struct isert_np *isert_np, 852 enum rdma_cm_event_type event) 853 { 854 isert_dbg("%s (%d): isert np %p\n", 855 rdma_event_msg(event), event, isert_np); 856 857 switch (event) { 858 case RDMA_CM_EVENT_DEVICE_REMOVAL: 859 isert_np->cm_id = NULL; 860 break; 861 case RDMA_CM_EVENT_ADDR_CHANGE: 862 isert_np->cm_id = isert_setup_id(isert_np); 863 if (IS_ERR(isert_np->cm_id)) { 864 isert_err("isert np %p setup id failed: %ld\n", 865 isert_np, PTR_ERR(isert_np->cm_id)); 866 isert_np->cm_id = NULL; 867 } 868 break; 869 default: 870 isert_err("isert np %p Unexpected event %d\n", 871 isert_np, event); 872 } 873 874 return -1; 875 } 876 877 static int 878 isert_disconnected_handler(struct rdma_cm_id *cma_id, 879 enum rdma_cm_event_type event) 880 { 881 struct isert_conn *isert_conn = cma_id->qp->qp_context; 882 883 mutex_lock(&isert_conn->mutex); 884 switch (isert_conn->state) { 885 case ISER_CONN_TERMINATING: 886 break; 887 case ISER_CONN_UP: 888 isert_conn_terminate(isert_conn); 889 ib_drain_qp(isert_conn->qp); 890 isert_handle_unbound_conn(isert_conn); 891 break; 892 case ISER_CONN_BOUND: 893 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 894 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 895 break; 896 default: 897 isert_warn("conn %p teminating in state %d\n", 898 isert_conn, isert_conn->state); 899 } 900 mutex_unlock(&isert_conn->mutex); 901 902 return 0; 903 } 904 905 static int 906 isert_connect_error(struct rdma_cm_id *cma_id) 907 { 908 struct isert_conn *isert_conn = cma_id->qp->qp_context; 909 910 list_del_init(&isert_conn->node); 911 isert_conn->cm_id = NULL; 912 isert_put_conn(isert_conn); 913 914 return -1; 915 } 916 917 static int 918 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 919 { 920 struct isert_np *isert_np = cma_id->context; 921 int ret = 0; 922 923 isert_info("%s (%d): status %d id %p np %p\n", 924 rdma_event_msg(event->event), event->event, 925 event->status, cma_id, cma_id->context); 926 927 if (isert_np->cm_id == cma_id) 928 return isert_np_cma_handler(cma_id->context, event->event); 929 930 switch (event->event) { 931 case RDMA_CM_EVENT_CONNECT_REQUEST: 932 ret = isert_connect_request(cma_id, event); 933 if (ret) 934 isert_err("failed handle connect request %d\n", ret); 935 break; 936 case RDMA_CM_EVENT_ESTABLISHED: 937 isert_connected_handler(cma_id); 938 break; 939 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 940 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 941 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ 942 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 943 ret = isert_disconnected_handler(cma_id, event->event); 944 break; 945 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 946 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 947 case RDMA_CM_EVENT_CONNECT_ERROR: 948 ret = isert_connect_error(cma_id); 949 break; 950 default: 951 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 952 break; 953 } 954 955 return ret; 956 } 957 958 static int 959 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 960 { 961 struct ib_recv_wr *rx_wr, *rx_wr_failed; 962 int i, ret; 963 struct iser_rx_desc *rx_desc; 964 965 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 966 rx_desc = &isert_conn->rx_descs[i]; 967 968 rx_wr->wr_cqe = &rx_desc->rx_cqe; 969 rx_wr->sg_list = &rx_desc->rx_sg; 970 rx_wr->num_sge = 1; 971 rx_wr->next = rx_wr + 1; 972 } 973 rx_wr--; 974 rx_wr->next = NULL; /* mark end of work requests list */ 975 976 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 977 &rx_wr_failed); 978 if (ret) 979 isert_err("ib_post_recv() failed with ret: %d\n", ret); 980 981 return ret; 982 } 983 984 static int 985 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 986 { 987 struct ib_recv_wr *rx_wr_failed, rx_wr; 988 int ret; 989 990 rx_wr.wr_cqe = &rx_desc->rx_cqe; 991 rx_wr.sg_list = &rx_desc->rx_sg; 992 rx_wr.num_sge = 1; 993 rx_wr.next = NULL; 994 995 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); 996 if (ret) 997 isert_err("ib_post_recv() failed with ret: %d\n", ret); 998 999 return ret; 1000 } 1001 1002 static int 1003 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 1004 { 1005 struct ib_device *ib_dev = isert_conn->cm_id->device; 1006 struct ib_send_wr send_wr, *send_wr_failed; 1007 int ret; 1008 1009 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 1010 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1011 1012 tx_desc->tx_cqe.done = isert_login_send_done; 1013 1014 send_wr.next = NULL; 1015 send_wr.wr_cqe = &tx_desc->tx_cqe; 1016 send_wr.sg_list = tx_desc->tx_sg; 1017 send_wr.num_sge = tx_desc->num_sge; 1018 send_wr.opcode = IB_WR_SEND; 1019 send_wr.send_flags = IB_SEND_SIGNALED; 1020 1021 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); 1022 if (ret) 1023 isert_err("ib_post_send() failed, ret: %d\n", ret); 1024 1025 return ret; 1026 } 1027 1028 static void 1029 isert_create_send_desc(struct isert_conn *isert_conn, 1030 struct isert_cmd *isert_cmd, 1031 struct iser_tx_desc *tx_desc) 1032 { 1033 struct isert_device *device = isert_conn->device; 1034 struct ib_device *ib_dev = device->ib_device; 1035 1036 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 1037 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1038 1039 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 1040 tx_desc->iser_header.flags = ISCSI_CTRL; 1041 1042 tx_desc->num_sge = 1; 1043 1044 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 1045 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 1046 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 1047 } 1048 } 1049 1050 static int 1051 isert_init_tx_hdrs(struct isert_conn *isert_conn, 1052 struct iser_tx_desc *tx_desc) 1053 { 1054 struct isert_device *device = isert_conn->device; 1055 struct ib_device *ib_dev = device->ib_device; 1056 u64 dma_addr; 1057 1058 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 1059 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1060 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 1061 isert_err("ib_dma_mapping_error() failed\n"); 1062 return -ENOMEM; 1063 } 1064 1065 tx_desc->dma_addr = dma_addr; 1066 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 1067 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 1068 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 1069 1070 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 1071 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 1072 tx_desc->tx_sg[0].lkey); 1073 1074 return 0; 1075 } 1076 1077 static void 1078 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1079 struct ib_send_wr *send_wr) 1080 { 1081 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 1082 1083 isert_cmd->iser_ib_op = ISER_IB_SEND; 1084 tx_desc->tx_cqe.done = isert_send_done; 1085 send_wr->wr_cqe = &tx_desc->tx_cqe; 1086 1087 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 1088 send_wr->opcode = IB_WR_SEND_WITH_INV; 1089 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 1090 } else { 1091 send_wr->opcode = IB_WR_SEND; 1092 } 1093 1094 send_wr->sg_list = &tx_desc->tx_sg[0]; 1095 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 1096 send_wr->send_flags = IB_SEND_SIGNALED; 1097 } 1098 1099 static int 1100 isert_login_post_recv(struct isert_conn *isert_conn) 1101 { 1102 struct ib_recv_wr rx_wr, *rx_wr_fail; 1103 struct ib_sge sge; 1104 int ret; 1105 1106 memset(&sge, 0, sizeof(struct ib_sge)); 1107 sge.addr = isert_conn->login_req_dma; 1108 sge.length = ISER_RX_PAYLOAD_SIZE; 1109 sge.lkey = isert_conn->device->pd->local_dma_lkey; 1110 1111 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 1112 sge.addr, sge.length, sge.lkey); 1113 1114 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; 1115 1116 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 1117 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; 1118 rx_wr.sg_list = &sge; 1119 rx_wr.num_sge = 1; 1120 1121 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); 1122 if (ret) 1123 isert_err("ib_post_recv() failed: %d\n", ret); 1124 1125 return ret; 1126 } 1127 1128 static int 1129 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 1130 u32 length) 1131 { 1132 struct isert_conn *isert_conn = conn->context; 1133 struct isert_device *device = isert_conn->device; 1134 struct ib_device *ib_dev = device->ib_device; 1135 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 1136 int ret; 1137 1138 isert_create_send_desc(isert_conn, NULL, tx_desc); 1139 1140 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 1141 sizeof(struct iscsi_hdr)); 1142 1143 isert_init_tx_hdrs(isert_conn, tx_desc); 1144 1145 if (length > 0) { 1146 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 1147 1148 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 1149 length, DMA_TO_DEVICE); 1150 1151 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 1152 1153 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 1154 length, DMA_TO_DEVICE); 1155 1156 tx_dsg->addr = isert_conn->login_rsp_dma; 1157 tx_dsg->length = length; 1158 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 1159 tx_desc->num_sge = 2; 1160 } 1161 if (!login->login_failed) { 1162 if (login->login_complete) { 1163 if (!conn->sess->sess_ops->SessionType && 1164 isert_conn->device->use_fastreg) { 1165 ret = isert_conn_create_fastreg_pool(isert_conn); 1166 if (ret) { 1167 isert_err("Conn: %p failed to create" 1168 " fastreg pool\n", isert_conn); 1169 return ret; 1170 } 1171 } 1172 1173 ret = isert_alloc_rx_descriptors(isert_conn); 1174 if (ret) 1175 return ret; 1176 1177 ret = isert_post_recvm(isert_conn, 1178 ISERT_QP_MAX_RECV_DTOS); 1179 if (ret) 1180 return ret; 1181 1182 /* Now we are in FULL_FEATURE phase */ 1183 mutex_lock(&isert_conn->mutex); 1184 isert_conn->state = ISER_CONN_FULL_FEATURE; 1185 mutex_unlock(&isert_conn->mutex); 1186 goto post_send; 1187 } 1188 1189 ret = isert_login_post_recv(isert_conn); 1190 if (ret) 1191 return ret; 1192 } 1193 post_send: 1194 ret = isert_login_post_send(isert_conn, tx_desc); 1195 if (ret) 1196 return ret; 1197 1198 return 0; 1199 } 1200 1201 static void 1202 isert_rx_login_req(struct isert_conn *isert_conn) 1203 { 1204 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; 1205 int rx_buflen = isert_conn->login_req_len; 1206 struct iscsi_conn *conn = isert_conn->conn; 1207 struct iscsi_login *login = conn->conn_login; 1208 int size; 1209 1210 isert_info("conn %p\n", isert_conn); 1211 1212 WARN_ON_ONCE(!login); 1213 1214 if (login->first_request) { 1215 struct iscsi_login_req *login_req = 1216 (struct iscsi_login_req *)&rx_desc->iscsi_header; 1217 /* 1218 * Setup the initial iscsi_login values from the leading 1219 * login request PDU. 1220 */ 1221 login->leading_connection = (!login_req->tsih) ? 1 : 0; 1222 login->current_stage = 1223 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 1224 >> 2; 1225 login->version_min = login_req->min_version; 1226 login->version_max = login_req->max_version; 1227 memcpy(login->isid, login_req->isid, 6); 1228 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1229 login->init_task_tag = login_req->itt; 1230 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1231 login->cid = be16_to_cpu(login_req->cid); 1232 login->tsih = be16_to_cpu(login_req->tsih); 1233 } 1234 1235 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1236 1237 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1238 isert_dbg("Using login payload size: %d, rx_buflen: %d " 1239 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 1240 MAX_KEY_VALUE_PAIRS); 1241 memcpy(login->req_buf, &rx_desc->data[0], size); 1242 1243 if (login->first_request) { 1244 complete(&isert_conn->login_comp); 1245 return; 1246 } 1247 schedule_delayed_work(&conn->login_work, 0); 1248 } 1249 1250 static struct iscsi_cmd 1251 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1252 { 1253 struct isert_conn *isert_conn = conn->context; 1254 struct isert_cmd *isert_cmd; 1255 struct iscsi_cmd *cmd; 1256 1257 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1258 if (!cmd) { 1259 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1260 return NULL; 1261 } 1262 isert_cmd = iscsit_priv_cmd(cmd); 1263 isert_cmd->conn = isert_conn; 1264 isert_cmd->iscsi_cmd = cmd; 1265 isert_cmd->rx_desc = rx_desc; 1266 1267 return cmd; 1268 } 1269 1270 static int 1271 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1272 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1273 struct iser_rx_desc *rx_desc, unsigned char *buf) 1274 { 1275 struct iscsi_conn *conn = isert_conn->conn; 1276 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1277 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1278 bool dump_payload = false; 1279 unsigned int data_len; 1280 1281 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1282 if (rc < 0) 1283 return rc; 1284 1285 imm_data = cmd->immediate_data; 1286 imm_data_len = cmd->first_burst_len; 1287 unsol_data = cmd->unsolicited_data; 1288 data_len = cmd->se_cmd.data_length; 1289 1290 if (imm_data && imm_data_len == data_len) 1291 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1292 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1293 if (rc < 0) { 1294 return 0; 1295 } else if (rc > 0) { 1296 dump_payload = true; 1297 goto sequence_cmd; 1298 } 1299 1300 if (!imm_data) 1301 return 0; 1302 1303 if (imm_data_len != data_len) { 1304 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1305 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1306 &rx_desc->data[0], imm_data_len); 1307 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1308 sg_nents, imm_data_len); 1309 } else { 1310 sg_init_table(&isert_cmd->sg, 1); 1311 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1312 cmd->se_cmd.t_data_nents = 1; 1313 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); 1314 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1315 imm_data_len); 1316 } 1317 1318 cmd->write_data_done += imm_data_len; 1319 1320 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1321 spin_lock_bh(&cmd->istate_lock); 1322 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1323 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1324 spin_unlock_bh(&cmd->istate_lock); 1325 } 1326 1327 sequence_cmd: 1328 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1329 1330 if (!rc && dump_payload == false && unsol_data) 1331 iscsit_set_unsoliticed_dataout(cmd); 1332 else if (dump_payload && imm_data) 1333 target_put_sess_cmd(&cmd->se_cmd); 1334 1335 return 0; 1336 } 1337 1338 static int 1339 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1340 struct iser_rx_desc *rx_desc, unsigned char *buf) 1341 { 1342 struct scatterlist *sg_start; 1343 struct iscsi_conn *conn = isert_conn->conn; 1344 struct iscsi_cmd *cmd = NULL; 1345 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1346 u32 unsol_data_len = ntoh24(hdr->dlength); 1347 int rc, sg_nents, sg_off, page_off; 1348 1349 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1350 if (rc < 0) 1351 return rc; 1352 else if (!cmd) 1353 return 0; 1354 /* 1355 * FIXME: Unexpected unsolicited_data out 1356 */ 1357 if (!cmd->unsolicited_data) { 1358 isert_err("Received unexpected solicited data payload\n"); 1359 dump_stack(); 1360 return -1; 1361 } 1362 1363 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1364 "write_data_done: %u, data_length: %u\n", 1365 unsol_data_len, cmd->write_data_done, 1366 cmd->se_cmd.data_length); 1367 1368 sg_off = cmd->write_data_done / PAGE_SIZE; 1369 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1370 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1371 page_off = cmd->write_data_done % PAGE_SIZE; 1372 /* 1373 * FIXME: Non page-aligned unsolicited_data out 1374 */ 1375 if (page_off) { 1376 isert_err("unexpected non-page aligned data payload\n"); 1377 dump_stack(); 1378 return -1; 1379 } 1380 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1381 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1382 sg_nents, &rx_desc->data[0], unsol_data_len); 1383 1384 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1385 unsol_data_len); 1386 1387 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1388 if (rc < 0) 1389 return rc; 1390 1391 /* 1392 * multiple data-outs on the same command can arrive - 1393 * so post the buffer before hand 1394 */ 1395 rc = isert_post_recv(isert_conn, rx_desc); 1396 if (rc) { 1397 isert_err("ib_post_recv failed with %d\n", rc); 1398 return rc; 1399 } 1400 return 0; 1401 } 1402 1403 static int 1404 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1405 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1406 unsigned char *buf) 1407 { 1408 struct iscsi_conn *conn = isert_conn->conn; 1409 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1410 int rc; 1411 1412 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1413 if (rc < 0) 1414 return rc; 1415 /* 1416 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1417 */ 1418 1419 return iscsit_process_nop_out(conn, cmd, hdr); 1420 } 1421 1422 static int 1423 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1424 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1425 struct iscsi_text *hdr) 1426 { 1427 struct iscsi_conn *conn = isert_conn->conn; 1428 u32 payload_length = ntoh24(hdr->dlength); 1429 int rc; 1430 unsigned char *text_in = NULL; 1431 1432 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1433 if (rc < 0) 1434 return rc; 1435 1436 if (payload_length) { 1437 text_in = kzalloc(payload_length, GFP_KERNEL); 1438 if (!text_in) { 1439 isert_err("Unable to allocate text_in of payload_length: %u\n", 1440 payload_length); 1441 return -ENOMEM; 1442 } 1443 } 1444 cmd->text_in_ptr = text_in; 1445 1446 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); 1447 1448 return iscsit_process_text_cmd(conn, cmd, hdr); 1449 } 1450 1451 static int 1452 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1453 uint32_t read_stag, uint64_t read_va, 1454 uint32_t write_stag, uint64_t write_va) 1455 { 1456 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1457 struct iscsi_conn *conn = isert_conn->conn; 1458 struct iscsi_cmd *cmd; 1459 struct isert_cmd *isert_cmd; 1460 int ret = -EINVAL; 1461 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1462 1463 if (conn->sess->sess_ops->SessionType && 1464 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1465 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1466 " ignoring\n", opcode); 1467 return 0; 1468 } 1469 1470 switch (opcode) { 1471 case ISCSI_OP_SCSI_CMD: 1472 cmd = isert_allocate_cmd(conn, rx_desc); 1473 if (!cmd) 1474 break; 1475 1476 isert_cmd = iscsit_priv_cmd(cmd); 1477 isert_cmd->read_stag = read_stag; 1478 isert_cmd->read_va = read_va; 1479 isert_cmd->write_stag = write_stag; 1480 isert_cmd->write_va = write_va; 1481 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1482 1483 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1484 rx_desc, (unsigned char *)hdr); 1485 break; 1486 case ISCSI_OP_NOOP_OUT: 1487 cmd = isert_allocate_cmd(conn, rx_desc); 1488 if (!cmd) 1489 break; 1490 1491 isert_cmd = iscsit_priv_cmd(cmd); 1492 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1493 rx_desc, (unsigned char *)hdr); 1494 break; 1495 case ISCSI_OP_SCSI_DATA_OUT: 1496 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1497 (unsigned char *)hdr); 1498 break; 1499 case ISCSI_OP_SCSI_TMFUNC: 1500 cmd = isert_allocate_cmd(conn, rx_desc); 1501 if (!cmd) 1502 break; 1503 1504 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1505 (unsigned char *)hdr); 1506 break; 1507 case ISCSI_OP_LOGOUT: 1508 cmd = isert_allocate_cmd(conn, rx_desc); 1509 if (!cmd) 1510 break; 1511 1512 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1513 break; 1514 case ISCSI_OP_TEXT: 1515 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1516 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1517 else 1518 cmd = isert_allocate_cmd(conn, rx_desc); 1519 1520 if (!cmd) 1521 break; 1522 1523 isert_cmd = iscsit_priv_cmd(cmd); 1524 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1525 rx_desc, (struct iscsi_text *)hdr); 1526 break; 1527 default: 1528 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1529 dump_stack(); 1530 break; 1531 } 1532 1533 return ret; 1534 } 1535 1536 static void 1537 isert_print_wc(struct ib_wc *wc, const char *type) 1538 { 1539 if (wc->status != IB_WC_WR_FLUSH_ERR) 1540 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1541 ib_wc_status_msg(wc->status), wc->status, 1542 wc->vendor_err); 1543 else 1544 isert_dbg("%s failure: %s (%d)\n", type, 1545 ib_wc_status_msg(wc->status), wc->status); 1546 } 1547 1548 static void 1549 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1550 { 1551 struct isert_conn *isert_conn = wc->qp->qp_context; 1552 struct ib_device *ib_dev = isert_conn->cm_id->device; 1553 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1554 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1555 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; 1556 uint64_t read_va = 0, write_va = 0; 1557 uint32_t read_stag = 0, write_stag = 0; 1558 1559 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1560 isert_print_wc(wc, "recv"); 1561 if (wc->status != IB_WC_WR_FLUSH_ERR) 1562 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1563 return; 1564 } 1565 1566 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1567 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1568 1569 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1570 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1571 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1572 1573 switch (iser_ctrl->flags & 0xF0) { 1574 case ISCSI_CTRL: 1575 if (iser_ctrl->flags & ISER_RSV) { 1576 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1577 read_va = be64_to_cpu(iser_ctrl->read_va); 1578 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1579 read_stag, (unsigned long long)read_va); 1580 } 1581 if (iser_ctrl->flags & ISER_WSV) { 1582 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1583 write_va = be64_to_cpu(iser_ctrl->write_va); 1584 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1585 write_stag, (unsigned long long)write_va); 1586 } 1587 1588 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1589 break; 1590 case ISER_HELLO: 1591 isert_err("iSER Hello message\n"); 1592 break; 1593 default: 1594 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1595 break; 1596 } 1597 1598 isert_rx_opcode(isert_conn, rx_desc, 1599 read_stag, read_va, write_stag, write_va); 1600 1601 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1602 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1603 } 1604 1605 static void 1606 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1607 { 1608 struct isert_conn *isert_conn = wc->qp->qp_context; 1609 struct ib_device *ib_dev = isert_conn->cm_id->device; 1610 1611 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1612 isert_print_wc(wc, "login recv"); 1613 return; 1614 } 1615 1616 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, 1617 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1618 1619 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1620 1621 if (isert_conn->conn) { 1622 struct iscsi_login *login = isert_conn->conn->conn_login; 1623 1624 if (login && !login->first_request) 1625 isert_rx_login_req(isert_conn); 1626 } 1627 1628 mutex_lock(&isert_conn->mutex); 1629 complete(&isert_conn->login_req_comp); 1630 mutex_unlock(&isert_conn->mutex); 1631 1632 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, 1633 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1634 } 1635 1636 static int 1637 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1638 struct scatterlist *sg, u32 nents, u32 length, u32 offset, 1639 enum iser_ib_op_code op, struct isert_data_buf *data) 1640 { 1641 struct ib_device *ib_dev = isert_conn->cm_id->device; 1642 1643 data->dma_dir = op == ISER_IB_RDMA_WRITE ? 1644 DMA_TO_DEVICE : DMA_FROM_DEVICE; 1645 1646 data->len = length - offset; 1647 data->offset = offset; 1648 data->sg_off = data->offset / PAGE_SIZE; 1649 1650 data->sg = &sg[data->sg_off]; 1651 data->nents = min_t(unsigned int, nents - data->sg_off, 1652 ISCSI_ISER_SG_TABLESIZE); 1653 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE * 1654 PAGE_SIZE); 1655 1656 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, 1657 data->dma_dir); 1658 if (unlikely(!data->dma_nents)) { 1659 isert_err("Cmd: unable to dma map SGs %p\n", sg); 1660 return -EINVAL; 1661 } 1662 1663 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", 1664 isert_cmd, data->dma_nents, data->sg, data->nents, data->len); 1665 1666 return 0; 1667 } 1668 1669 static void 1670 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) 1671 { 1672 struct ib_device *ib_dev = isert_conn->cm_id->device; 1673 1674 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); 1675 memset(data, 0, sizeof(*data)); 1676 } 1677 1678 1679 1680 static void 1681 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1682 { 1683 isert_dbg("Cmd %p\n", isert_cmd); 1684 1685 if (isert_cmd->data.sg) { 1686 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1687 isert_unmap_data_buf(isert_conn, &isert_cmd->data); 1688 } 1689 1690 if (isert_cmd->rdma_wr) { 1691 isert_dbg("Cmd %p free send_wr\n", isert_cmd); 1692 kfree(isert_cmd->rdma_wr); 1693 isert_cmd->rdma_wr = NULL; 1694 } 1695 1696 if (isert_cmd->ib_sge) { 1697 isert_dbg("Cmd %p free ib_sge\n", isert_cmd); 1698 kfree(isert_cmd->ib_sge); 1699 isert_cmd->ib_sge = NULL; 1700 } 1701 } 1702 1703 static void 1704 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1705 { 1706 isert_dbg("Cmd %p\n", isert_cmd); 1707 1708 if (isert_cmd->fr_desc) { 1709 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc); 1710 if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) { 1711 isert_unmap_data_buf(isert_conn, &isert_cmd->prot); 1712 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED; 1713 } 1714 spin_lock_bh(&isert_conn->pool_lock); 1715 list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool); 1716 spin_unlock_bh(&isert_conn->pool_lock); 1717 isert_cmd->fr_desc = NULL; 1718 } 1719 1720 if (isert_cmd->data.sg) { 1721 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1722 isert_unmap_data_buf(isert_conn, &isert_cmd->data); 1723 } 1724 1725 isert_cmd->ib_sge = NULL; 1726 isert_cmd->rdma_wr = NULL; 1727 } 1728 1729 static void 1730 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1731 { 1732 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1733 struct isert_conn *isert_conn = isert_cmd->conn; 1734 struct iscsi_conn *conn = isert_conn->conn; 1735 struct isert_device *device = isert_conn->device; 1736 struct iscsi_text_rsp *hdr; 1737 1738 isert_dbg("Cmd %p\n", isert_cmd); 1739 1740 switch (cmd->iscsi_opcode) { 1741 case ISCSI_OP_SCSI_CMD: 1742 spin_lock_bh(&conn->cmd_lock); 1743 if (!list_empty(&cmd->i_conn_node)) 1744 list_del_init(&cmd->i_conn_node); 1745 spin_unlock_bh(&conn->cmd_lock); 1746 1747 if (cmd->data_direction == DMA_TO_DEVICE) { 1748 iscsit_stop_dataout_timer(cmd); 1749 /* 1750 * Check for special case during comp_err where 1751 * WRITE_PENDING has been handed off from core, 1752 * but requires an extra target_put_sess_cmd() 1753 * before transport_generic_free_cmd() below. 1754 */ 1755 if (comp_err && 1756 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1757 struct se_cmd *se_cmd = &cmd->se_cmd; 1758 1759 target_put_sess_cmd(se_cmd); 1760 } 1761 } 1762 1763 device->unreg_rdma_mem(isert_cmd, isert_conn); 1764 transport_generic_free_cmd(&cmd->se_cmd, 0); 1765 break; 1766 case ISCSI_OP_SCSI_TMFUNC: 1767 spin_lock_bh(&conn->cmd_lock); 1768 if (!list_empty(&cmd->i_conn_node)) 1769 list_del_init(&cmd->i_conn_node); 1770 spin_unlock_bh(&conn->cmd_lock); 1771 1772 transport_generic_free_cmd(&cmd->se_cmd, 0); 1773 break; 1774 case ISCSI_OP_REJECT: 1775 case ISCSI_OP_NOOP_OUT: 1776 case ISCSI_OP_TEXT: 1777 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1778 /* If the continue bit is on, keep the command alive */ 1779 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1780 break; 1781 1782 spin_lock_bh(&conn->cmd_lock); 1783 if (!list_empty(&cmd->i_conn_node)) 1784 list_del_init(&cmd->i_conn_node); 1785 spin_unlock_bh(&conn->cmd_lock); 1786 1787 /* 1788 * Handle special case for REJECT when iscsi_add_reject*() has 1789 * overwritten the original iscsi_opcode assignment, and the 1790 * associated cmd->se_cmd needs to be released. 1791 */ 1792 if (cmd->se_cmd.se_tfo != NULL) { 1793 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1794 cmd->iscsi_opcode); 1795 transport_generic_free_cmd(&cmd->se_cmd, 0); 1796 break; 1797 } 1798 /* 1799 * Fall-through 1800 */ 1801 default: 1802 iscsit_release_cmd(cmd); 1803 break; 1804 } 1805 } 1806 1807 static void 1808 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1809 { 1810 if (tx_desc->dma_addr != 0) { 1811 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1812 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1813 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1814 tx_desc->dma_addr = 0; 1815 } 1816 } 1817 1818 static void 1819 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1820 struct ib_device *ib_dev, bool comp_err) 1821 { 1822 if (isert_cmd->pdu_buf_dma != 0) { 1823 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1824 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1825 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1826 isert_cmd->pdu_buf_dma = 0; 1827 } 1828 1829 isert_unmap_tx_desc(tx_desc, ib_dev); 1830 isert_put_cmd(isert_cmd, comp_err); 1831 } 1832 1833 static int 1834 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1835 { 1836 struct ib_mr_status mr_status; 1837 int ret; 1838 1839 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1840 if (ret) { 1841 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1842 goto fail_mr_status; 1843 } 1844 1845 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1846 u64 sec_offset_err; 1847 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1848 1849 switch (mr_status.sig_err.err_type) { 1850 case IB_SIG_BAD_GUARD: 1851 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1852 break; 1853 case IB_SIG_BAD_REFTAG: 1854 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1855 break; 1856 case IB_SIG_BAD_APPTAG: 1857 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1858 break; 1859 } 1860 sec_offset_err = mr_status.sig_err.sig_err_offset; 1861 do_div(sec_offset_err, block_size); 1862 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1863 1864 isert_err("PI error found type %d at sector 0x%llx " 1865 "expected 0x%x vs actual 0x%x\n", 1866 mr_status.sig_err.err_type, 1867 (unsigned long long)se_cmd->bad_sector, 1868 mr_status.sig_err.expected, 1869 mr_status.sig_err.actual); 1870 ret = 1; 1871 } 1872 1873 fail_mr_status: 1874 return ret; 1875 } 1876 1877 static void 1878 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1879 { 1880 struct isert_conn *isert_conn = wc->qp->qp_context; 1881 struct isert_device *device = isert_conn->device; 1882 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1883 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1884 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1885 int ret = 0; 1886 1887 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1888 isert_print_wc(wc, "rdma write"); 1889 if (wc->status != IB_WC_WR_FLUSH_ERR) 1890 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1891 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1892 return; 1893 } 1894 1895 isert_dbg("Cmd %p\n", isert_cmd); 1896 1897 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) { 1898 ret = isert_check_pi_status(cmd, 1899 isert_cmd->fr_desc->pi_ctx->sig_mr); 1900 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED; 1901 } 1902 1903 device->unreg_rdma_mem(isert_cmd, isert_conn); 1904 isert_cmd->rdma_wr_num = 0; 1905 if (ret) 1906 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1907 else 1908 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1909 } 1910 1911 static void 1912 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1913 { 1914 struct isert_conn *isert_conn = wc->qp->qp_context; 1915 struct isert_device *device = isert_conn->device; 1916 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1917 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1918 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1919 struct se_cmd *se_cmd = &cmd->se_cmd; 1920 int ret = 0; 1921 1922 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1923 isert_print_wc(wc, "rdma read"); 1924 if (wc->status != IB_WC_WR_FLUSH_ERR) 1925 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1926 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1927 return; 1928 } 1929 1930 isert_dbg("Cmd %p\n", isert_cmd); 1931 1932 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) { 1933 ret = isert_check_pi_status(se_cmd, 1934 isert_cmd->fr_desc->pi_ctx->sig_mr); 1935 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED; 1936 } 1937 1938 iscsit_stop_dataout_timer(cmd); 1939 device->unreg_rdma_mem(isert_cmd, isert_conn); 1940 cmd->write_data_done = isert_cmd->data.len; 1941 isert_cmd->rdma_wr_num = 0; 1942 1943 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1944 spin_lock_bh(&cmd->istate_lock); 1945 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1946 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1947 spin_unlock_bh(&cmd->istate_lock); 1948 1949 if (ret) { 1950 target_put_sess_cmd(se_cmd); 1951 transport_send_check_condition_and_sense(se_cmd, 1952 se_cmd->pi_err, 0); 1953 } else { 1954 target_execute_cmd(se_cmd); 1955 } 1956 } 1957 1958 static void 1959 isert_do_control_comp(struct work_struct *work) 1960 { 1961 struct isert_cmd *isert_cmd = container_of(work, 1962 struct isert_cmd, comp_work); 1963 struct isert_conn *isert_conn = isert_cmd->conn; 1964 struct ib_device *ib_dev = isert_conn->cm_id->device; 1965 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1966 1967 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1968 1969 switch (cmd->i_state) { 1970 case ISTATE_SEND_TASKMGTRSP: 1971 iscsit_tmr_post_handler(cmd, cmd->conn); 1972 case ISTATE_SEND_REJECT: /* FALLTHRU */ 1973 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */ 1974 cmd->i_state = ISTATE_SENT_STATUS; 1975 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1976 ib_dev, false); 1977 break; 1978 case ISTATE_SEND_LOGOUTRSP: 1979 iscsit_logout_post_handler(cmd, cmd->conn); 1980 break; 1981 default: 1982 isert_err("Unknown i_state %d\n", cmd->i_state); 1983 dump_stack(); 1984 break; 1985 } 1986 } 1987 1988 static void 1989 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1990 { 1991 struct isert_conn *isert_conn = wc->qp->qp_context; 1992 struct ib_device *ib_dev = isert_conn->cm_id->device; 1993 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1994 1995 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1996 isert_print_wc(wc, "login send"); 1997 if (wc->status != IB_WC_WR_FLUSH_ERR) 1998 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1999 } 2000 2001 isert_unmap_tx_desc(tx_desc, ib_dev); 2002 } 2003 2004 static void 2005 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 2006 { 2007 struct isert_conn *isert_conn = wc->qp->qp_context; 2008 struct ib_device *ib_dev = isert_conn->cm_id->device; 2009 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 2010 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 2011 2012 if (unlikely(wc->status != IB_WC_SUCCESS)) { 2013 isert_print_wc(wc, "send"); 2014 if (wc->status != IB_WC_WR_FLUSH_ERR) 2015 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 2016 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 2017 return; 2018 } 2019 2020 isert_dbg("Cmd %p\n", isert_cmd); 2021 2022 switch (isert_cmd->iscsi_cmd->i_state) { 2023 case ISTATE_SEND_TASKMGTRSP: 2024 case ISTATE_SEND_LOGOUTRSP: 2025 case ISTATE_SEND_REJECT: 2026 case ISTATE_SEND_TEXTRSP: 2027 isert_unmap_tx_desc(tx_desc, ib_dev); 2028 2029 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 2030 queue_work(isert_comp_wq, &isert_cmd->comp_work); 2031 return; 2032 default: 2033 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 2034 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 2035 break; 2036 } 2037 } 2038 2039 static int 2040 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 2041 { 2042 struct ib_send_wr *wr_failed; 2043 int ret; 2044 2045 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2046 if (ret) { 2047 isert_err("ib_post_recv failed with %d\n", ret); 2048 return ret; 2049 } 2050 2051 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, 2052 &wr_failed); 2053 if (ret) { 2054 isert_err("ib_post_send failed with %d\n", ret); 2055 return ret; 2056 } 2057 return ret; 2058 } 2059 2060 static int 2061 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2062 { 2063 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2064 struct isert_conn *isert_conn = conn->context; 2065 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2066 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 2067 &isert_cmd->tx_desc.iscsi_header; 2068 2069 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2070 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 2071 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2072 /* 2073 * Attach SENSE DATA payload to iSCSI Response PDU 2074 */ 2075 if (cmd->se_cmd.sense_buffer && 2076 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 2077 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 2078 struct isert_device *device = isert_conn->device; 2079 struct ib_device *ib_dev = device->ib_device; 2080 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2081 u32 padding, pdu_len; 2082 2083 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 2084 cmd->sense_buffer); 2085 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 2086 2087 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 2088 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 2089 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 2090 2091 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2092 (void *)cmd->sense_buffer, pdu_len, 2093 DMA_TO_DEVICE); 2094 2095 isert_cmd->pdu_buf_len = pdu_len; 2096 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2097 tx_dsg->length = pdu_len; 2098 tx_dsg->lkey = device->pd->local_dma_lkey; 2099 isert_cmd->tx_desc.num_sge = 2; 2100 } 2101 2102 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2103 2104 isert_dbg("Posting SCSI Response\n"); 2105 2106 return isert_post_response(isert_conn, isert_cmd); 2107 } 2108 2109 static void 2110 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2111 { 2112 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2113 struct isert_conn *isert_conn = conn->context; 2114 struct isert_device *device = isert_conn->device; 2115 2116 spin_lock_bh(&conn->cmd_lock); 2117 if (!list_empty(&cmd->i_conn_node)) 2118 list_del_init(&cmd->i_conn_node); 2119 spin_unlock_bh(&conn->cmd_lock); 2120 2121 if (cmd->data_direction == DMA_TO_DEVICE) 2122 iscsit_stop_dataout_timer(cmd); 2123 2124 device->unreg_rdma_mem(isert_cmd, isert_conn); 2125 } 2126 2127 static enum target_prot_op 2128 isert_get_sup_prot_ops(struct iscsi_conn *conn) 2129 { 2130 struct isert_conn *isert_conn = conn->context; 2131 struct isert_device *device = isert_conn->device; 2132 2133 if (conn->tpg->tpg_attrib.t10_pi) { 2134 if (device->pi_capable) { 2135 isert_info("conn %p PI offload enabled\n", isert_conn); 2136 isert_conn->pi_support = true; 2137 return TARGET_PROT_ALL; 2138 } 2139 } 2140 2141 isert_info("conn %p PI offload disabled\n", isert_conn); 2142 isert_conn->pi_support = false; 2143 2144 return TARGET_PROT_NORMAL; 2145 } 2146 2147 static int 2148 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2149 bool nopout_response) 2150 { 2151 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2152 struct isert_conn *isert_conn = conn->context; 2153 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2154 2155 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2156 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 2157 &isert_cmd->tx_desc.iscsi_header, 2158 nopout_response); 2159 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2160 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2161 2162 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 2163 2164 return isert_post_response(isert_conn, isert_cmd); 2165 } 2166 2167 static int 2168 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2169 { 2170 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2171 struct isert_conn *isert_conn = conn->context; 2172 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2173 2174 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2175 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 2176 &isert_cmd->tx_desc.iscsi_header); 2177 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2178 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2179 2180 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 2181 2182 return isert_post_response(isert_conn, isert_cmd); 2183 } 2184 2185 static int 2186 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2187 { 2188 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2189 struct isert_conn *isert_conn = conn->context; 2190 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2191 2192 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2193 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 2194 &isert_cmd->tx_desc.iscsi_header); 2195 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2196 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2197 2198 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 2199 2200 return isert_post_response(isert_conn, isert_cmd); 2201 } 2202 2203 static int 2204 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2205 { 2206 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2207 struct isert_conn *isert_conn = conn->context; 2208 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2209 struct isert_device *device = isert_conn->device; 2210 struct ib_device *ib_dev = device->ib_device; 2211 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2212 struct iscsi_reject *hdr = 2213 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 2214 2215 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2216 iscsit_build_reject(cmd, conn, hdr); 2217 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2218 2219 hton24(hdr->dlength, ISCSI_HDR_LEN); 2220 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2221 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 2222 DMA_TO_DEVICE); 2223 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 2224 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2225 tx_dsg->length = ISCSI_HDR_LEN; 2226 tx_dsg->lkey = device->pd->local_dma_lkey; 2227 isert_cmd->tx_desc.num_sge = 2; 2228 2229 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2230 2231 isert_dbg("conn %p Posting Reject\n", isert_conn); 2232 2233 return isert_post_response(isert_conn, isert_cmd); 2234 } 2235 2236 static int 2237 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2238 { 2239 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2240 struct isert_conn *isert_conn = conn->context; 2241 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2242 struct iscsi_text_rsp *hdr = 2243 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 2244 u32 txt_rsp_len; 2245 int rc; 2246 2247 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2248 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 2249 if (rc < 0) 2250 return rc; 2251 2252 txt_rsp_len = rc; 2253 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2254 2255 if (txt_rsp_len) { 2256 struct isert_device *device = isert_conn->device; 2257 struct ib_device *ib_dev = device->ib_device; 2258 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2259 void *txt_rsp_buf = cmd->buf_ptr; 2260 2261 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2262 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 2263 2264 isert_cmd->pdu_buf_len = txt_rsp_len; 2265 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2266 tx_dsg->length = txt_rsp_len; 2267 tx_dsg->lkey = device->pd->local_dma_lkey; 2268 isert_cmd->tx_desc.num_sge = 2; 2269 } 2270 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2271 2272 isert_dbg("conn %p Text Response\n", isert_conn); 2273 2274 return isert_post_response(isert_conn, isert_cmd); 2275 } 2276 2277 static int 2278 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 2279 struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr, 2280 u32 data_left, u32 offset) 2281 { 2282 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2283 struct scatterlist *sg_start, *tmp_sg; 2284 struct isert_device *device = isert_conn->device; 2285 struct ib_device *ib_dev = device->ib_device; 2286 u32 sg_off, page_off; 2287 int i = 0, sg_nents; 2288 2289 sg_off = offset / PAGE_SIZE; 2290 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2291 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); 2292 page_off = offset % PAGE_SIZE; 2293 2294 rdma_wr->wr.sg_list = ib_sge; 2295 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe; 2296 2297 /* 2298 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2299 */ 2300 for_each_sg(sg_start, tmp_sg, sg_nents, i) { 2301 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, " 2302 "page_off: %u\n", 2303 (unsigned long long)tmp_sg->dma_address, 2304 tmp_sg->length, page_off); 2305 2306 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; 2307 ib_sge->length = min_t(u32, data_left, 2308 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 2309 ib_sge->lkey = device->pd->local_dma_lkey; 2310 2311 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", 2312 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2313 page_off = 0; 2314 data_left -= ib_sge->length; 2315 if (!data_left) 2316 break; 2317 ib_sge++; 2318 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); 2319 } 2320 2321 rdma_wr->wr.num_sge = ++i; 2322 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 2323 rdma_wr->wr.sg_list, rdma_wr->wr.num_sge); 2324 2325 return rdma_wr->wr.num_sge; 2326 } 2327 2328 static int 2329 isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn) 2330 { 2331 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2332 struct se_cmd *se_cmd = &cmd->se_cmd; 2333 struct isert_conn *isert_conn = conn->context; 2334 struct isert_data_buf *data = &isert_cmd->data; 2335 struct ib_rdma_wr *rdma_wr; 2336 struct ib_sge *ib_sge; 2337 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; 2338 int ret = 0, i, ib_sge_cnt; 2339 2340 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ? 2341 cmd->write_data_done : 0; 2342 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2343 se_cmd->t_data_nents, se_cmd->data_length, 2344 offset, isert_cmd->iser_ib_op, 2345 &isert_cmd->data); 2346 if (ret) 2347 return ret; 2348 2349 data_left = data->len; 2350 offset = data->offset; 2351 2352 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); 2353 if (!ib_sge) { 2354 isert_warn("Unable to allocate ib_sge\n"); 2355 ret = -ENOMEM; 2356 goto unmap_cmd; 2357 } 2358 isert_cmd->ib_sge = ib_sge; 2359 2360 isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); 2361 isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * 2362 isert_cmd->rdma_wr_num, GFP_KERNEL); 2363 if (!isert_cmd->rdma_wr) { 2364 isert_dbg("Unable to allocate isert_cmd->rdma_wr\n"); 2365 ret = -ENOMEM; 2366 goto unmap_cmd; 2367 } 2368 2369 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2370 2371 for (i = 0; i < isert_cmd->rdma_wr_num; i++) { 2372 rdma_wr = &isert_cmd->rdma_wr[i]; 2373 data_len = min(data_left, rdma_write_max); 2374 2375 rdma_wr->wr.send_flags = 0; 2376 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) { 2377 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2378 2379 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 2380 rdma_wr->remote_addr = isert_cmd->read_va + offset; 2381 rdma_wr->rkey = isert_cmd->read_stag; 2382 if (i + 1 == isert_cmd->rdma_wr_num) 2383 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr; 2384 else 2385 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr; 2386 } else { 2387 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2388 2389 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 2390 rdma_wr->remote_addr = isert_cmd->write_va + va_offset; 2391 rdma_wr->rkey = isert_cmd->write_stag; 2392 if (i + 1 == isert_cmd->rdma_wr_num) 2393 rdma_wr->wr.send_flags = IB_SEND_SIGNALED; 2394 else 2395 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr; 2396 } 2397 2398 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2399 rdma_wr, data_len, offset); 2400 ib_sge += ib_sge_cnt; 2401 2402 offset += data_len; 2403 va_offset += data_len; 2404 data_left -= data_len; 2405 } 2406 2407 return 0; 2408 unmap_cmd: 2409 isert_unmap_data_buf(isert_conn, data); 2410 2411 return ret; 2412 } 2413 2414 static inline void 2415 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) 2416 { 2417 u32 rkey; 2418 2419 memset(inv_wr, 0, sizeof(*inv_wr)); 2420 inv_wr->wr_cqe = NULL; 2421 inv_wr->opcode = IB_WR_LOCAL_INV; 2422 inv_wr->ex.invalidate_rkey = mr->rkey; 2423 2424 /* Bump the key */ 2425 rkey = ib_inc_rkey(mr->rkey); 2426 ib_update_fast_reg_key(mr, rkey); 2427 } 2428 2429 static int 2430 isert_fast_reg_mr(struct isert_conn *isert_conn, 2431 struct fast_reg_descriptor *fr_desc, 2432 struct isert_data_buf *mem, 2433 enum isert_indicator ind, 2434 struct ib_sge *sge) 2435 { 2436 struct isert_device *device = isert_conn->device; 2437 struct ib_device *ib_dev = device->ib_device; 2438 struct ib_mr *mr; 2439 struct ib_reg_wr reg_wr; 2440 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; 2441 int ret, n; 2442 2443 if (mem->dma_nents == 1) { 2444 sge->lkey = device->pd->local_dma_lkey; 2445 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); 2446 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); 2447 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", 2448 sge->addr, sge->length, sge->lkey); 2449 return 0; 2450 } 2451 2452 if (ind == ISERT_DATA_KEY_VALID) 2453 /* Registering data buffer */ 2454 mr = fr_desc->data_mr; 2455 else 2456 /* Registering protection buffer */ 2457 mr = fr_desc->pi_ctx->prot_mr; 2458 2459 if (!(fr_desc->ind & ind)) { 2460 isert_inv_rkey(&inv_wr, mr); 2461 wr = &inv_wr; 2462 } 2463 2464 n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE); 2465 if (unlikely(n != mem->nents)) { 2466 isert_err("failed to map mr sg (%d/%d)\n", 2467 n, mem->nents); 2468 return n < 0 ? n : -EINVAL; 2469 } 2470 2471 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", 2472 fr_desc, mem->nents, mem->offset); 2473 2474 reg_wr.wr.next = NULL; 2475 reg_wr.wr.opcode = IB_WR_REG_MR; 2476 reg_wr.wr.wr_cqe = NULL; 2477 reg_wr.wr.send_flags = 0; 2478 reg_wr.wr.num_sge = 0; 2479 reg_wr.mr = mr; 2480 reg_wr.key = mr->lkey; 2481 reg_wr.access = IB_ACCESS_LOCAL_WRITE; 2482 2483 if (!wr) 2484 wr = ®_wr.wr; 2485 else 2486 wr->next = ®_wr.wr; 2487 2488 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2489 if (ret) { 2490 isert_err("fast registration failed, ret:%d\n", ret); 2491 return ret; 2492 } 2493 fr_desc->ind &= ~ind; 2494 2495 sge->lkey = mr->lkey; 2496 sge->addr = mr->iova; 2497 sge->length = mr->length; 2498 2499 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", 2500 sge->addr, sge->length, sge->lkey); 2501 2502 return ret; 2503 } 2504 2505 static inline void 2506 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, 2507 struct ib_sig_domain *domain) 2508 { 2509 domain->sig_type = IB_SIG_TYPE_T10_DIF; 2510 domain->sig.dif.bg_type = IB_T10DIF_CRC; 2511 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 2512 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 2513 /* 2514 * At the moment we hard code those, but if in the future 2515 * the target core would like to use it, we will take it 2516 * from se_cmd. 2517 */ 2518 domain->sig.dif.apptag_check_mask = 0xffff; 2519 domain->sig.dif.app_escape = true; 2520 domain->sig.dif.ref_escape = true; 2521 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 2522 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 2523 domain->sig.dif.ref_remap = true; 2524 }; 2525 2526 static int 2527 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2528 { 2529 switch (se_cmd->prot_op) { 2530 case TARGET_PROT_DIN_INSERT: 2531 case TARGET_PROT_DOUT_STRIP: 2532 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 2533 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2534 break; 2535 case TARGET_PROT_DOUT_INSERT: 2536 case TARGET_PROT_DIN_STRIP: 2537 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 2538 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2539 break; 2540 case TARGET_PROT_DIN_PASS: 2541 case TARGET_PROT_DOUT_PASS: 2542 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2543 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2544 break; 2545 default: 2546 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2547 return -EINVAL; 2548 } 2549 2550 return 0; 2551 } 2552 2553 static inline u8 2554 isert_set_prot_checks(u8 prot_checks) 2555 { 2556 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | 2557 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | 2558 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); 2559 } 2560 2561 static int 2562 isert_reg_sig_mr(struct isert_conn *isert_conn, 2563 struct isert_cmd *isert_cmd, 2564 struct fast_reg_descriptor *fr_desc) 2565 { 2566 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; 2567 struct ib_sig_handover_wr sig_wr; 2568 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; 2569 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2570 struct ib_sig_attrs sig_attrs; 2571 int ret; 2572 2573 memset(&sig_attrs, 0, sizeof(sig_attrs)); 2574 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2575 if (ret) 2576 goto err; 2577 2578 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); 2579 2580 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { 2581 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr); 2582 wr = &inv_wr; 2583 } 2584 2585 memset(&sig_wr, 0, sizeof(sig_wr)); 2586 sig_wr.wr.opcode = IB_WR_REG_SIG_MR; 2587 sig_wr.wr.wr_cqe = NULL; 2588 sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA]; 2589 sig_wr.wr.num_sge = 1; 2590 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; 2591 sig_wr.sig_attrs = &sig_attrs; 2592 sig_wr.sig_mr = pi_ctx->sig_mr; 2593 if (se_cmd->t_prot_sg) 2594 sig_wr.prot = &isert_cmd->ib_sg[PROT]; 2595 2596 if (!wr) 2597 wr = &sig_wr.wr; 2598 else 2599 wr->next = &sig_wr.wr; 2600 2601 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2602 if (ret) { 2603 isert_err("fast registration failed, ret:%d\n", ret); 2604 goto err; 2605 } 2606 fr_desc->ind &= ~ISERT_SIG_KEY_VALID; 2607 2608 isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey; 2609 isert_cmd->ib_sg[SIG].addr = 0; 2610 isert_cmd->ib_sg[SIG].length = se_cmd->data_length; 2611 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && 2612 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) 2613 /* 2614 * We have protection guards on the wire 2615 * so we need to set a larget transfer 2616 */ 2617 isert_cmd->ib_sg[SIG].length += se_cmd->prot_length; 2618 2619 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n", 2620 isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length, 2621 isert_cmd->ib_sg[SIG].lkey); 2622 err: 2623 return ret; 2624 } 2625 2626 static int 2627 isert_handle_prot_cmd(struct isert_conn *isert_conn, 2628 struct isert_cmd *isert_cmd) 2629 { 2630 struct isert_device *device = isert_conn->device; 2631 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; 2632 int ret; 2633 2634 if (!isert_cmd->fr_desc->pi_ctx) { 2635 ret = isert_create_pi_ctx(isert_cmd->fr_desc, 2636 device->ib_device, 2637 device->pd); 2638 if (ret) { 2639 isert_err("conn %p failed to allocate pi_ctx\n", 2640 isert_conn); 2641 return ret; 2642 } 2643 } 2644 2645 if (se_cmd->t_prot_sg) { 2646 ret = isert_map_data_buf(isert_conn, isert_cmd, 2647 se_cmd->t_prot_sg, 2648 se_cmd->t_prot_nents, 2649 se_cmd->prot_length, 2650 0, 2651 isert_cmd->iser_ib_op, 2652 &isert_cmd->prot); 2653 if (ret) { 2654 isert_err("conn %p failed to map protection buffer\n", 2655 isert_conn); 2656 return ret; 2657 } 2658 2659 memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT])); 2660 ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc, 2661 &isert_cmd->prot, 2662 ISERT_PROT_KEY_VALID, 2663 &isert_cmd->ib_sg[PROT]); 2664 if (ret) { 2665 isert_err("conn %p failed to fast reg mr\n", 2666 isert_conn); 2667 goto unmap_prot_cmd; 2668 } 2669 } 2670 2671 ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc); 2672 if (ret) { 2673 isert_err("conn %p failed to fast reg mr\n", 2674 isert_conn); 2675 goto unmap_prot_cmd; 2676 } 2677 isert_cmd->fr_desc->ind |= ISERT_PROTECTED; 2678 2679 return 0; 2680 2681 unmap_prot_cmd: 2682 if (se_cmd->t_prot_sg) 2683 isert_unmap_data_buf(isert_conn, &isert_cmd->prot); 2684 2685 return ret; 2686 } 2687 2688 static int 2689 isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn) 2690 { 2691 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2692 struct se_cmd *se_cmd = &cmd->se_cmd; 2693 struct isert_conn *isert_conn = conn->context; 2694 struct fast_reg_descriptor *fr_desc = NULL; 2695 struct ib_rdma_wr *rdma_wr; 2696 struct ib_sge *ib_sg; 2697 u32 offset; 2698 int ret = 0; 2699 unsigned long flags; 2700 2701 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ? 2702 cmd->write_data_done : 0; 2703 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2704 se_cmd->t_data_nents, se_cmd->data_length, 2705 offset, isert_cmd->iser_ib_op, 2706 &isert_cmd->data); 2707 if (ret) 2708 return ret; 2709 2710 if (isert_cmd->data.dma_nents != 1 || 2711 isert_prot_cmd(isert_conn, se_cmd)) { 2712 spin_lock_irqsave(&isert_conn->pool_lock, flags); 2713 fr_desc = list_first_entry(&isert_conn->fr_pool, 2714 struct fast_reg_descriptor, list); 2715 list_del(&fr_desc->list); 2716 spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2717 isert_cmd->fr_desc = fr_desc; 2718 } 2719 2720 ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data, 2721 ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]); 2722 if (ret) 2723 goto unmap_cmd; 2724 2725 if (isert_prot_cmd(isert_conn, se_cmd)) { 2726 ret = isert_handle_prot_cmd(isert_conn, isert_cmd); 2727 if (ret) 2728 goto unmap_cmd; 2729 2730 ib_sg = &isert_cmd->ib_sg[SIG]; 2731 } else { 2732 ib_sg = &isert_cmd->ib_sg[DATA]; 2733 } 2734 2735 memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg)); 2736 isert_cmd->ib_sge = &isert_cmd->s_ib_sge; 2737 isert_cmd->rdma_wr_num = 1; 2738 memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr)); 2739 isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr; 2740 2741 rdma_wr = &isert_cmd->s_rdma_wr; 2742 rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge; 2743 rdma_wr->wr.num_sge = 1; 2744 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe; 2745 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) { 2746 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2747 2748 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 2749 rdma_wr->remote_addr = isert_cmd->read_va; 2750 rdma_wr->rkey = isert_cmd->read_stag; 2751 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 2752 0 : IB_SEND_SIGNALED; 2753 } else { 2754 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2755 2756 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 2757 rdma_wr->remote_addr = isert_cmd->write_va; 2758 rdma_wr->rkey = isert_cmd->write_stag; 2759 rdma_wr->wr.send_flags = IB_SEND_SIGNALED; 2760 } 2761 2762 return 0; 2763 2764 unmap_cmd: 2765 if (fr_desc) { 2766 spin_lock_irqsave(&isert_conn->pool_lock, flags); 2767 list_add_tail(&fr_desc->list, &isert_conn->fr_pool); 2768 spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2769 } 2770 isert_unmap_data_buf(isert_conn, &isert_cmd->data); 2771 2772 return ret; 2773 } 2774 2775 static int 2776 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2777 { 2778 struct se_cmd *se_cmd = &cmd->se_cmd; 2779 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2780 struct isert_conn *isert_conn = conn->context; 2781 struct isert_device *device = isert_conn->device; 2782 struct ib_send_wr *wr_failed; 2783 int rc; 2784 2785 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2786 isert_cmd, se_cmd->data_length); 2787 2788 isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE; 2789 rc = device->reg_rdma_mem(isert_cmd, conn); 2790 if (rc) { 2791 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2792 return rc; 2793 } 2794 2795 if (!isert_prot_cmd(isert_conn, se_cmd)) { 2796 /* 2797 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2798 */ 2799 isert_create_send_desc(isert_conn, isert_cmd, 2800 &isert_cmd->tx_desc); 2801 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2802 &isert_cmd->tx_desc.iscsi_header); 2803 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2804 isert_init_send_wr(isert_conn, isert_cmd, 2805 &isert_cmd->tx_desc.send_wr); 2806 isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr; 2807 isert_cmd->rdma_wr_num += 1; 2808 2809 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2810 if (rc) { 2811 isert_err("ib_post_recv failed with %d\n", rc); 2812 return rc; 2813 } 2814 } 2815 2816 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed); 2817 if (rc) 2818 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2819 2820 if (!isert_prot_cmd(isert_conn, se_cmd)) 2821 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data " 2822 "READ\n", isert_cmd); 2823 else 2824 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", 2825 isert_cmd); 2826 2827 return 1; 2828 } 2829 2830 static int 2831 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2832 { 2833 struct se_cmd *se_cmd = &cmd->se_cmd; 2834 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2835 struct isert_conn *isert_conn = conn->context; 2836 struct isert_device *device = isert_conn->device; 2837 struct ib_send_wr *wr_failed; 2838 int rc; 2839 2840 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2841 isert_cmd, se_cmd->data_length, cmd->write_data_done); 2842 isert_cmd->iser_ib_op = ISER_IB_RDMA_READ; 2843 rc = device->reg_rdma_mem(isert_cmd, conn); 2844 if (rc) { 2845 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2846 return rc; 2847 } 2848 2849 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed); 2850 if (rc) 2851 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2852 2853 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2854 isert_cmd); 2855 2856 return 0; 2857 } 2858 2859 static int 2860 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2861 { 2862 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2863 int ret = 0; 2864 2865 switch (state) { 2866 case ISTATE_REMOVE: 2867 spin_lock_bh(&conn->cmd_lock); 2868 list_del_init(&cmd->i_conn_node); 2869 spin_unlock_bh(&conn->cmd_lock); 2870 isert_put_cmd(isert_cmd, true); 2871 break; 2872 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2873 ret = isert_put_nopin(cmd, conn, false); 2874 break; 2875 default: 2876 isert_err("Unknown immediate state: 0x%02x\n", state); 2877 ret = -EINVAL; 2878 break; 2879 } 2880 2881 return ret; 2882 } 2883 2884 static int 2885 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2886 { 2887 struct isert_conn *isert_conn = conn->context; 2888 int ret; 2889 2890 switch (state) { 2891 case ISTATE_SEND_LOGOUTRSP: 2892 ret = isert_put_logout_rsp(cmd, conn); 2893 if (!ret) 2894 isert_conn->logout_posted = true; 2895 break; 2896 case ISTATE_SEND_NOPIN: 2897 ret = isert_put_nopin(cmd, conn, true); 2898 break; 2899 case ISTATE_SEND_TASKMGTRSP: 2900 ret = isert_put_tm_rsp(cmd, conn); 2901 break; 2902 case ISTATE_SEND_REJECT: 2903 ret = isert_put_reject(cmd, conn); 2904 break; 2905 case ISTATE_SEND_TEXTRSP: 2906 ret = isert_put_text_rsp(cmd, conn); 2907 break; 2908 case ISTATE_SEND_STATUS: 2909 /* 2910 * Special case for sending non GOOD SCSI status from TX thread 2911 * context during pre se_cmd excecution failure. 2912 */ 2913 ret = isert_put_response(conn, cmd); 2914 break; 2915 default: 2916 isert_err("Unknown response state: 0x%02x\n", state); 2917 ret = -EINVAL; 2918 break; 2919 } 2920 2921 return ret; 2922 } 2923 2924 struct rdma_cm_id * 2925 isert_setup_id(struct isert_np *isert_np) 2926 { 2927 struct iscsi_np *np = isert_np->np; 2928 struct rdma_cm_id *id; 2929 struct sockaddr *sa; 2930 int ret; 2931 2932 sa = (struct sockaddr *)&np->np_sockaddr; 2933 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2934 2935 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2936 RDMA_PS_TCP, IB_QPT_RC); 2937 if (IS_ERR(id)) { 2938 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2939 ret = PTR_ERR(id); 2940 goto out; 2941 } 2942 isert_dbg("id %p context %p\n", id, id->context); 2943 2944 ret = rdma_bind_addr(id, sa); 2945 if (ret) { 2946 isert_err("rdma_bind_addr() failed: %d\n", ret); 2947 goto out_id; 2948 } 2949 2950 ret = rdma_listen(id, 0); 2951 if (ret) { 2952 isert_err("rdma_listen() failed: %d\n", ret); 2953 goto out_id; 2954 } 2955 2956 return id; 2957 out_id: 2958 rdma_destroy_id(id); 2959 out: 2960 return ERR_PTR(ret); 2961 } 2962 2963 static int 2964 isert_setup_np(struct iscsi_np *np, 2965 struct sockaddr_storage *ksockaddr) 2966 { 2967 struct isert_np *isert_np; 2968 struct rdma_cm_id *isert_lid; 2969 int ret; 2970 2971 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2972 if (!isert_np) { 2973 isert_err("Unable to allocate struct isert_np\n"); 2974 return -ENOMEM; 2975 } 2976 sema_init(&isert_np->sem, 0); 2977 mutex_init(&isert_np->mutex); 2978 INIT_LIST_HEAD(&isert_np->accepted); 2979 INIT_LIST_HEAD(&isert_np->pending); 2980 isert_np->np = np; 2981 2982 /* 2983 * Setup the np->np_sockaddr from the passed sockaddr setup 2984 * in iscsi_target_configfs.c code.. 2985 */ 2986 memcpy(&np->np_sockaddr, ksockaddr, 2987 sizeof(struct sockaddr_storage)); 2988 2989 isert_lid = isert_setup_id(isert_np); 2990 if (IS_ERR(isert_lid)) { 2991 ret = PTR_ERR(isert_lid); 2992 goto out; 2993 } 2994 2995 isert_np->cm_id = isert_lid; 2996 np->np_context = isert_np; 2997 2998 return 0; 2999 3000 out: 3001 kfree(isert_np); 3002 3003 return ret; 3004 } 3005 3006 static int 3007 isert_rdma_accept(struct isert_conn *isert_conn) 3008 { 3009 struct rdma_cm_id *cm_id = isert_conn->cm_id; 3010 struct rdma_conn_param cp; 3011 int ret; 3012 struct iser_cm_hdr rsp_hdr; 3013 3014 memset(&cp, 0, sizeof(struct rdma_conn_param)); 3015 cp.initiator_depth = isert_conn->initiator_depth; 3016 cp.retry_count = 7; 3017 cp.rnr_retry_count = 7; 3018 3019 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 3020 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 3021 if (!isert_conn->snd_w_inv) 3022 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 3023 cp.private_data = (void *)&rsp_hdr; 3024 cp.private_data_len = sizeof(rsp_hdr); 3025 3026 ret = rdma_accept(cm_id, &cp); 3027 if (ret) { 3028 isert_err("rdma_accept() failed with: %d\n", ret); 3029 return ret; 3030 } 3031 3032 return 0; 3033 } 3034 3035 static int 3036 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 3037 { 3038 struct isert_conn *isert_conn = conn->context; 3039 int ret; 3040 3041 isert_info("before login_req comp conn: %p\n", isert_conn); 3042 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 3043 if (ret) { 3044 isert_err("isert_conn %p interrupted before got login req\n", 3045 isert_conn); 3046 return ret; 3047 } 3048 reinit_completion(&isert_conn->login_req_comp); 3049 3050 /* 3051 * For login requests after the first PDU, isert_rx_login_req() will 3052 * kick schedule_delayed_work(&conn->login_work) as the packet is 3053 * received, which turns this callback from iscsi_target_do_login_rx() 3054 * into a NOP. 3055 */ 3056 if (!login->first_request) 3057 return 0; 3058 3059 isert_rx_login_req(isert_conn); 3060 3061 isert_info("before login_comp conn: %p\n", conn); 3062 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 3063 if (ret) 3064 return ret; 3065 3066 isert_info("processing login->req: %p\n", login->req); 3067 3068 return 0; 3069 } 3070 3071 static void 3072 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 3073 struct isert_conn *isert_conn) 3074 { 3075 struct rdma_cm_id *cm_id = isert_conn->cm_id; 3076 struct rdma_route *cm_route = &cm_id->route; 3077 3078 conn->login_family = np->np_sockaddr.ss_family; 3079 3080 conn->login_sockaddr = cm_route->addr.dst_addr; 3081 conn->local_sockaddr = cm_route->addr.src_addr; 3082 } 3083 3084 static int 3085 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 3086 { 3087 struct isert_np *isert_np = np->np_context; 3088 struct isert_conn *isert_conn; 3089 int ret; 3090 3091 accept_wait: 3092 ret = down_interruptible(&isert_np->sem); 3093 if (ret) 3094 return -ENODEV; 3095 3096 spin_lock_bh(&np->np_thread_lock); 3097 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 3098 spin_unlock_bh(&np->np_thread_lock); 3099 isert_dbg("np_thread_state %d\n", 3100 np->np_thread_state); 3101 /** 3102 * No point in stalling here when np_thread 3103 * is in state RESET/SHUTDOWN/EXIT - bail 3104 **/ 3105 return -ENODEV; 3106 } 3107 spin_unlock_bh(&np->np_thread_lock); 3108 3109 mutex_lock(&isert_np->mutex); 3110 if (list_empty(&isert_np->pending)) { 3111 mutex_unlock(&isert_np->mutex); 3112 goto accept_wait; 3113 } 3114 isert_conn = list_first_entry(&isert_np->pending, 3115 struct isert_conn, node); 3116 list_del_init(&isert_conn->node); 3117 mutex_unlock(&isert_np->mutex); 3118 3119 conn->context = isert_conn; 3120 isert_conn->conn = conn; 3121 isert_conn->state = ISER_CONN_BOUND; 3122 3123 isert_set_conn_info(np, conn, isert_conn); 3124 3125 isert_dbg("Processing isert_conn: %p\n", isert_conn); 3126 3127 return 0; 3128 } 3129 3130 static void 3131 isert_free_np(struct iscsi_np *np) 3132 { 3133 struct isert_np *isert_np = np->np_context; 3134 struct isert_conn *isert_conn, *n; 3135 3136 if (isert_np->cm_id) 3137 rdma_destroy_id(isert_np->cm_id); 3138 3139 /* 3140 * FIXME: At this point we don't have a good way to insure 3141 * that at this point we don't have hanging connections that 3142 * completed RDMA establishment but didn't start iscsi login 3143 * process. So work-around this by cleaning up what ever piled 3144 * up in accepted and pending lists. 3145 */ 3146 mutex_lock(&isert_np->mutex); 3147 if (!list_empty(&isert_np->pending)) { 3148 isert_info("Still have isert pending connections\n"); 3149 list_for_each_entry_safe(isert_conn, n, 3150 &isert_np->pending, 3151 node) { 3152 isert_info("cleaning isert_conn %p state (%d)\n", 3153 isert_conn, isert_conn->state); 3154 isert_connect_release(isert_conn); 3155 } 3156 } 3157 3158 if (!list_empty(&isert_np->accepted)) { 3159 isert_info("Still have isert accepted connections\n"); 3160 list_for_each_entry_safe(isert_conn, n, 3161 &isert_np->accepted, 3162 node) { 3163 isert_info("cleaning isert_conn %p state (%d)\n", 3164 isert_conn, isert_conn->state); 3165 isert_connect_release(isert_conn); 3166 } 3167 } 3168 mutex_unlock(&isert_np->mutex); 3169 3170 np->np_context = NULL; 3171 kfree(isert_np); 3172 } 3173 3174 static void isert_release_work(struct work_struct *work) 3175 { 3176 struct isert_conn *isert_conn = container_of(work, 3177 struct isert_conn, 3178 release_work); 3179 3180 isert_info("Starting release conn %p\n", isert_conn); 3181 3182 mutex_lock(&isert_conn->mutex); 3183 isert_conn->state = ISER_CONN_DOWN; 3184 mutex_unlock(&isert_conn->mutex); 3185 3186 isert_info("Destroying conn %p\n", isert_conn); 3187 isert_put_conn(isert_conn); 3188 } 3189 3190 static void 3191 isert_wait4logout(struct isert_conn *isert_conn) 3192 { 3193 struct iscsi_conn *conn = isert_conn->conn; 3194 3195 isert_info("conn %p\n", isert_conn); 3196 3197 if (isert_conn->logout_posted) { 3198 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 3199 wait_for_completion_timeout(&conn->conn_logout_comp, 3200 SECONDS_FOR_LOGOUT_COMP * HZ); 3201 } 3202 } 3203 3204 static void 3205 isert_wait4cmds(struct iscsi_conn *conn) 3206 { 3207 isert_info("iscsi_conn %p\n", conn); 3208 3209 if (conn->sess) { 3210 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 3211 target_wait_for_sess_cmds(conn->sess->se_sess); 3212 } 3213 } 3214 3215 /** 3216 * isert_put_unsol_pending_cmds() - Drop commands waiting for 3217 * unsolicitate dataout 3218 * @conn: iscsi connection 3219 * 3220 * We might still have commands that are waiting for unsolicited 3221 * dataouts messages. We must put the extra reference on those 3222 * before blocking on the target_wait_for_session_cmds 3223 */ 3224 static void 3225 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 3226 { 3227 struct iscsi_cmd *cmd, *tmp; 3228 static LIST_HEAD(drop_cmd_list); 3229 3230 spin_lock_bh(&conn->cmd_lock); 3231 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 3232 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 3233 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 3234 (cmd->write_data_done < cmd->se_cmd.data_length)) 3235 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 3236 } 3237 spin_unlock_bh(&conn->cmd_lock); 3238 3239 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 3240 list_del_init(&cmd->i_conn_node); 3241 if (cmd->i_state != ISTATE_REMOVE) { 3242 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 3243 3244 isert_info("conn %p dropping cmd %p\n", conn, cmd); 3245 isert_put_cmd(isert_cmd, true); 3246 } 3247 } 3248 } 3249 3250 static void isert_wait_conn(struct iscsi_conn *conn) 3251 { 3252 struct isert_conn *isert_conn = conn->context; 3253 3254 isert_info("Starting conn %p\n", isert_conn); 3255 3256 mutex_lock(&isert_conn->mutex); 3257 isert_conn_terminate(isert_conn); 3258 mutex_unlock(&isert_conn->mutex); 3259 3260 ib_drain_qp(isert_conn->qp); 3261 isert_put_unsol_pending_cmds(conn); 3262 isert_wait4cmds(conn); 3263 isert_wait4logout(isert_conn); 3264 3265 queue_work(isert_release_wq, &isert_conn->release_work); 3266 } 3267 3268 static void isert_free_conn(struct iscsi_conn *conn) 3269 { 3270 struct isert_conn *isert_conn = conn->context; 3271 3272 ib_drain_qp(isert_conn->qp); 3273 isert_put_conn(isert_conn); 3274 } 3275 3276 static struct iscsit_transport iser_target_transport = { 3277 .name = "IB/iSER", 3278 .transport_type = ISCSI_INFINIBAND, 3279 .priv_size = sizeof(struct isert_cmd), 3280 .owner = THIS_MODULE, 3281 .iscsit_setup_np = isert_setup_np, 3282 .iscsit_accept_np = isert_accept_np, 3283 .iscsit_free_np = isert_free_np, 3284 .iscsit_wait_conn = isert_wait_conn, 3285 .iscsit_free_conn = isert_free_conn, 3286 .iscsit_get_login_rx = isert_get_login_rx, 3287 .iscsit_put_login_tx = isert_put_login_tx, 3288 .iscsit_immediate_queue = isert_immediate_queue, 3289 .iscsit_response_queue = isert_response_queue, 3290 .iscsit_get_dataout = isert_get_dataout, 3291 .iscsit_queue_data_in = isert_put_datain, 3292 .iscsit_queue_status = isert_put_response, 3293 .iscsit_aborted_task = isert_aborted_task, 3294 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 3295 }; 3296 3297 static int __init isert_init(void) 3298 { 3299 int ret; 3300 3301 isert_comp_wq = alloc_workqueue("isert_comp_wq", 3302 WQ_UNBOUND | WQ_HIGHPRI, 0); 3303 if (!isert_comp_wq) { 3304 isert_err("Unable to allocate isert_comp_wq\n"); 3305 ret = -ENOMEM; 3306 return -ENOMEM; 3307 } 3308 3309 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 3310 WQ_UNBOUND_MAX_ACTIVE); 3311 if (!isert_release_wq) { 3312 isert_err("Unable to allocate isert_release_wq\n"); 3313 ret = -ENOMEM; 3314 goto destroy_comp_wq; 3315 } 3316 3317 iscsit_register_transport(&iser_target_transport); 3318 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 3319 3320 return 0; 3321 3322 destroy_comp_wq: 3323 destroy_workqueue(isert_comp_wq); 3324 3325 return ret; 3326 } 3327 3328 static void __exit isert_exit(void) 3329 { 3330 flush_scheduled_work(); 3331 destroy_workqueue(isert_release_wq); 3332 destroy_workqueue(isert_comp_wq); 3333 iscsit_unregister_transport(&iser_target_transport); 3334 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 3335 } 3336 3337 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 3338 MODULE_VERSION("1.0"); 3339 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 3340 MODULE_LICENSE("GPL"); 3341 3342 module_init(isert_init); 3343 module_exit(isert_exit); 3344