1 /******************************************************************************* 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 3 * 4 * (c) Copyright 2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ****************************************************************************/ 18 19 #include <linux/string.h> 20 #include <linux/module.h> 21 #include <linux/scatterlist.h> 22 #include <linux/socket.h> 23 #include <linux/in.h> 24 #include <linux/in6.h> 25 #include <rdma/ib_verbs.h> 26 #include <rdma/rdma_cm.h> 27 #include <target/target_core_base.h> 28 #include <target/target_core_fabric.h> 29 #include <target/iscsi/iscsi_transport.h> 30 #include <linux/semaphore.h> 31 32 #include "ib_isert.h" 33 34 #define ISERT_MAX_CONN 8 35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 36 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) 37 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 38 ISERT_MAX_CONN) 39 40 static int isert_debug_level; 41 module_param_named(debug_level, isert_debug_level, int, 0644); 42 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 43 44 static DEFINE_MUTEX(device_list_mutex); 45 static LIST_HEAD(device_list); 46 static struct workqueue_struct *isert_comp_wq; 47 static struct workqueue_struct *isert_release_wq; 48 49 static void 50 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 51 static int 52 isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn); 53 static void 54 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 55 static int 56 isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn); 57 static int 58 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 59 static int 60 isert_login_post_recv(struct isert_conn *isert_conn); 61 static int 62 isert_rdma_accept(struct isert_conn *isert_conn); 63 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 64 65 static void isert_release_work(struct work_struct *work); 66 static void isert_wait4flush(struct isert_conn *isert_conn); 67 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 68 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 69 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 70 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 71 72 static inline bool 73 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 74 { 75 return (conn->pi_support && 76 cmd->prot_op != TARGET_PROT_NORMAL); 77 } 78 79 80 static void 81 isert_qp_event_callback(struct ib_event *e, void *context) 82 { 83 struct isert_conn *isert_conn = context; 84 85 isert_err("%s (%d): conn %p\n", 86 ib_event_msg(e->event), e->event, isert_conn); 87 88 switch (e->event) { 89 case IB_EVENT_COMM_EST: 90 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 91 break; 92 case IB_EVENT_QP_LAST_WQE_REACHED: 93 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 94 break; 95 default: 96 break; 97 } 98 } 99 100 static struct isert_comp * 101 isert_comp_get(struct isert_conn *isert_conn) 102 { 103 struct isert_device *device = isert_conn->device; 104 struct isert_comp *comp; 105 int i, min = 0; 106 107 mutex_lock(&device_list_mutex); 108 for (i = 0; i < device->comps_used; i++) 109 if (device->comps[i].active_qps < 110 device->comps[min].active_qps) 111 min = i; 112 comp = &device->comps[min]; 113 comp->active_qps++; 114 mutex_unlock(&device_list_mutex); 115 116 isert_info("conn %p, using comp %p min_index: %d\n", 117 isert_conn, comp, min); 118 119 return comp; 120 } 121 122 static void 123 isert_comp_put(struct isert_comp *comp) 124 { 125 mutex_lock(&device_list_mutex); 126 comp->active_qps--; 127 mutex_unlock(&device_list_mutex); 128 } 129 130 static struct ib_qp * 131 isert_create_qp(struct isert_conn *isert_conn, 132 struct isert_comp *comp, 133 struct rdma_cm_id *cma_id) 134 { 135 struct isert_device *device = isert_conn->device; 136 struct ib_qp_init_attr attr; 137 int ret; 138 139 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 140 attr.event_handler = isert_qp_event_callback; 141 attr.qp_context = isert_conn; 142 attr.send_cq = comp->cq; 143 attr.recv_cq = comp->cq; 144 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 145 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 146 attr.cap.max_send_sge = device->ib_device->attrs.max_sge; 147 isert_conn->max_sge = min(device->ib_device->attrs.max_sge, 148 device->ib_device->attrs.max_sge_rd); 149 attr.cap.max_recv_sge = 1; 150 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 151 attr.qp_type = IB_QPT_RC; 152 if (device->pi_capable) 153 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 154 155 ret = rdma_create_qp(cma_id, device->pd, &attr); 156 if (ret) { 157 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 158 return ERR_PTR(ret); 159 } 160 161 return cma_id->qp; 162 } 163 164 static int 165 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 166 { 167 struct isert_comp *comp; 168 int ret; 169 170 comp = isert_comp_get(isert_conn); 171 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); 172 if (IS_ERR(isert_conn->qp)) { 173 ret = PTR_ERR(isert_conn->qp); 174 goto err; 175 } 176 177 return 0; 178 err: 179 isert_comp_put(comp); 180 return ret; 181 } 182 183 static int 184 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 185 { 186 struct isert_device *device = isert_conn->device; 187 struct ib_device *ib_dev = device->ib_device; 188 struct iser_rx_desc *rx_desc; 189 struct ib_sge *rx_sg; 190 u64 dma_addr; 191 int i, j; 192 193 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 194 sizeof(struct iser_rx_desc), GFP_KERNEL); 195 if (!isert_conn->rx_descs) 196 goto fail; 197 198 rx_desc = isert_conn->rx_descs; 199 200 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 201 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 202 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 203 if (ib_dma_mapping_error(ib_dev, dma_addr)) 204 goto dma_map_fail; 205 206 rx_desc->dma_addr = dma_addr; 207 208 rx_sg = &rx_desc->rx_sg; 209 rx_sg->addr = rx_desc->dma_addr; 210 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 211 rx_sg->lkey = device->pd->local_dma_lkey; 212 rx_desc->rx_cqe.done = isert_recv_done; 213 } 214 215 return 0; 216 217 dma_map_fail: 218 rx_desc = isert_conn->rx_descs; 219 for (j = 0; j < i; j++, rx_desc++) { 220 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 221 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 222 } 223 kfree(isert_conn->rx_descs); 224 isert_conn->rx_descs = NULL; 225 fail: 226 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 227 228 return -ENOMEM; 229 } 230 231 static void 232 isert_free_rx_descriptors(struct isert_conn *isert_conn) 233 { 234 struct ib_device *ib_dev = isert_conn->device->ib_device; 235 struct iser_rx_desc *rx_desc; 236 int i; 237 238 if (!isert_conn->rx_descs) 239 return; 240 241 rx_desc = isert_conn->rx_descs; 242 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 243 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 244 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 245 } 246 247 kfree(isert_conn->rx_descs); 248 isert_conn->rx_descs = NULL; 249 } 250 251 static void 252 isert_free_comps(struct isert_device *device) 253 { 254 int i; 255 256 for (i = 0; i < device->comps_used; i++) { 257 struct isert_comp *comp = &device->comps[i]; 258 259 if (comp->cq) 260 ib_free_cq(comp->cq); 261 } 262 kfree(device->comps); 263 } 264 265 static int 266 isert_alloc_comps(struct isert_device *device) 267 { 268 int i, max_cqe, ret = 0; 269 270 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), 271 device->ib_device->num_comp_vectors)); 272 273 isert_info("Using %d CQs, %s supports %d vectors support " 274 "Fast registration %d pi_capable %d\n", 275 device->comps_used, device->ib_device->name, 276 device->ib_device->num_comp_vectors, device->use_fastreg, 277 device->pi_capable); 278 279 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), 280 GFP_KERNEL); 281 if (!device->comps) { 282 isert_err("Unable to allocate completion contexts\n"); 283 return -ENOMEM; 284 } 285 286 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); 287 288 for (i = 0; i < device->comps_used; i++) { 289 struct isert_comp *comp = &device->comps[i]; 290 291 comp->device = device; 292 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, 293 IB_POLL_WORKQUEUE); 294 if (IS_ERR(comp->cq)) { 295 isert_err("Unable to allocate cq\n"); 296 ret = PTR_ERR(comp->cq); 297 comp->cq = NULL; 298 goto out_cq; 299 } 300 } 301 302 return 0; 303 out_cq: 304 isert_free_comps(device); 305 return ret; 306 } 307 308 static int 309 isert_create_device_ib_res(struct isert_device *device) 310 { 311 struct ib_device *ib_dev = device->ib_device; 312 int ret; 313 314 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge); 315 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 316 317 /* asign function handlers */ 318 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 319 ib_dev->attrs.device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { 320 device->use_fastreg = 1; 321 device->reg_rdma_mem = isert_reg_rdma; 322 device->unreg_rdma_mem = isert_unreg_rdma; 323 } else { 324 device->use_fastreg = 0; 325 device->reg_rdma_mem = isert_map_rdma; 326 device->unreg_rdma_mem = isert_unmap_cmd; 327 } 328 329 ret = isert_alloc_comps(device); 330 if (ret) 331 goto out; 332 333 device->pd = ib_alloc_pd(ib_dev); 334 if (IS_ERR(device->pd)) { 335 ret = PTR_ERR(device->pd); 336 isert_err("failed to allocate pd, device %p, ret=%d\n", 337 device, ret); 338 goto out_cq; 339 } 340 341 /* Check signature cap */ 342 device->pi_capable = ib_dev->attrs.device_cap_flags & 343 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 344 345 return 0; 346 347 out_cq: 348 isert_free_comps(device); 349 out: 350 if (ret > 0) 351 ret = -EINVAL; 352 return ret; 353 } 354 355 static void 356 isert_free_device_ib_res(struct isert_device *device) 357 { 358 isert_info("device %p\n", device); 359 360 ib_dealloc_pd(device->pd); 361 isert_free_comps(device); 362 } 363 364 static void 365 isert_device_put(struct isert_device *device) 366 { 367 mutex_lock(&device_list_mutex); 368 device->refcount--; 369 isert_info("device %p refcount %d\n", device, device->refcount); 370 if (!device->refcount) { 371 isert_free_device_ib_res(device); 372 list_del(&device->dev_node); 373 kfree(device); 374 } 375 mutex_unlock(&device_list_mutex); 376 } 377 378 static struct isert_device * 379 isert_device_get(struct rdma_cm_id *cma_id) 380 { 381 struct isert_device *device; 382 int ret; 383 384 mutex_lock(&device_list_mutex); 385 list_for_each_entry(device, &device_list, dev_node) { 386 if (device->ib_device->node_guid == cma_id->device->node_guid) { 387 device->refcount++; 388 isert_info("Found iser device %p refcount %d\n", 389 device, device->refcount); 390 mutex_unlock(&device_list_mutex); 391 return device; 392 } 393 } 394 395 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 396 if (!device) { 397 mutex_unlock(&device_list_mutex); 398 return ERR_PTR(-ENOMEM); 399 } 400 401 INIT_LIST_HEAD(&device->dev_node); 402 403 device->ib_device = cma_id->device; 404 ret = isert_create_device_ib_res(device); 405 if (ret) { 406 kfree(device); 407 mutex_unlock(&device_list_mutex); 408 return ERR_PTR(ret); 409 } 410 411 device->refcount++; 412 list_add_tail(&device->dev_node, &device_list); 413 isert_info("Created a new iser device %p refcount %d\n", 414 device, device->refcount); 415 mutex_unlock(&device_list_mutex); 416 417 return device; 418 } 419 420 static void 421 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) 422 { 423 struct fast_reg_descriptor *fr_desc, *tmp; 424 int i = 0; 425 426 if (list_empty(&isert_conn->fr_pool)) 427 return; 428 429 isert_info("Freeing conn %p fastreg pool", isert_conn); 430 431 list_for_each_entry_safe(fr_desc, tmp, 432 &isert_conn->fr_pool, list) { 433 list_del(&fr_desc->list); 434 ib_dereg_mr(fr_desc->data_mr); 435 if (fr_desc->pi_ctx) { 436 ib_dereg_mr(fr_desc->pi_ctx->prot_mr); 437 ib_dereg_mr(fr_desc->pi_ctx->sig_mr); 438 kfree(fr_desc->pi_ctx); 439 } 440 kfree(fr_desc); 441 ++i; 442 } 443 444 if (i < isert_conn->fr_pool_size) 445 isert_warn("Pool still has %d regions registered\n", 446 isert_conn->fr_pool_size - i); 447 } 448 449 static int 450 isert_create_pi_ctx(struct fast_reg_descriptor *desc, 451 struct ib_device *device, 452 struct ib_pd *pd) 453 { 454 struct pi_context *pi_ctx; 455 int ret; 456 457 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); 458 if (!pi_ctx) { 459 isert_err("Failed to allocate pi context\n"); 460 return -ENOMEM; 461 } 462 463 pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 464 ISCSI_ISER_SG_TABLESIZE); 465 if (IS_ERR(pi_ctx->prot_mr)) { 466 isert_err("Failed to allocate prot frmr err=%ld\n", 467 PTR_ERR(pi_ctx->prot_mr)); 468 ret = PTR_ERR(pi_ctx->prot_mr); 469 goto err_pi_ctx; 470 } 471 desc->ind |= ISERT_PROT_KEY_VALID; 472 473 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); 474 if (IS_ERR(pi_ctx->sig_mr)) { 475 isert_err("Failed to allocate signature enabled mr err=%ld\n", 476 PTR_ERR(pi_ctx->sig_mr)); 477 ret = PTR_ERR(pi_ctx->sig_mr); 478 goto err_prot_mr; 479 } 480 481 desc->pi_ctx = pi_ctx; 482 desc->ind |= ISERT_SIG_KEY_VALID; 483 desc->ind &= ~ISERT_PROTECTED; 484 485 return 0; 486 487 err_prot_mr: 488 ib_dereg_mr(pi_ctx->prot_mr); 489 err_pi_ctx: 490 kfree(pi_ctx); 491 492 return ret; 493 } 494 495 static int 496 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 497 struct fast_reg_descriptor *fr_desc) 498 { 499 fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 500 ISCSI_ISER_SG_TABLESIZE); 501 if (IS_ERR(fr_desc->data_mr)) { 502 isert_err("Failed to allocate data frmr err=%ld\n", 503 PTR_ERR(fr_desc->data_mr)); 504 return PTR_ERR(fr_desc->data_mr); 505 } 506 fr_desc->ind |= ISERT_DATA_KEY_VALID; 507 508 isert_dbg("Created fr_desc %p\n", fr_desc); 509 510 return 0; 511 } 512 513 static int 514 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) 515 { 516 struct fast_reg_descriptor *fr_desc; 517 struct isert_device *device = isert_conn->device; 518 struct se_session *se_sess = isert_conn->conn->sess->se_sess; 519 struct se_node_acl *se_nacl = se_sess->se_node_acl; 520 int i, ret, tag_num; 521 /* 522 * Setup the number of FRMRs based upon the number of tags 523 * available to session in iscsi_target_locate_portal(). 524 */ 525 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth); 526 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; 527 528 isert_conn->fr_pool_size = 0; 529 for (i = 0; i < tag_num; i++) { 530 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 531 if (!fr_desc) { 532 isert_err("Failed to allocate fast_reg descriptor\n"); 533 ret = -ENOMEM; 534 goto err; 535 } 536 537 ret = isert_create_fr_desc(device->ib_device, 538 device->pd, fr_desc); 539 if (ret) { 540 isert_err("Failed to create fastreg descriptor err=%d\n", 541 ret); 542 kfree(fr_desc); 543 goto err; 544 } 545 546 list_add_tail(&fr_desc->list, &isert_conn->fr_pool); 547 isert_conn->fr_pool_size++; 548 } 549 550 isert_dbg("Creating conn %p fastreg pool size=%d", 551 isert_conn, isert_conn->fr_pool_size); 552 553 return 0; 554 555 err: 556 isert_conn_free_fastreg_pool(isert_conn); 557 return ret; 558 } 559 560 static void 561 isert_init_conn(struct isert_conn *isert_conn) 562 { 563 isert_conn->state = ISER_CONN_INIT; 564 INIT_LIST_HEAD(&isert_conn->node); 565 init_completion(&isert_conn->login_comp); 566 init_completion(&isert_conn->login_req_comp); 567 kref_init(&isert_conn->kref); 568 mutex_init(&isert_conn->mutex); 569 spin_lock_init(&isert_conn->pool_lock); 570 INIT_LIST_HEAD(&isert_conn->fr_pool); 571 INIT_WORK(&isert_conn->release_work, isert_release_work); 572 } 573 574 static void 575 isert_free_login_buf(struct isert_conn *isert_conn) 576 { 577 struct ib_device *ib_dev = isert_conn->device->ib_device; 578 579 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 580 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 581 kfree(isert_conn->login_rsp_buf); 582 583 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 584 ISER_RX_PAYLOAD_SIZE, 585 DMA_FROM_DEVICE); 586 kfree(isert_conn->login_req_buf); 587 } 588 589 static int 590 isert_alloc_login_buf(struct isert_conn *isert_conn, 591 struct ib_device *ib_dev) 592 { 593 int ret; 594 595 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), 596 GFP_KERNEL); 597 if (!isert_conn->login_req_buf) { 598 isert_err("Unable to allocate isert_conn->login_buf\n"); 599 return -ENOMEM; 600 } 601 602 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 603 isert_conn->login_req_buf, 604 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 605 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 606 if (ret) { 607 isert_err("login_req_dma mapping error: %d\n", ret); 608 isert_conn->login_req_dma = 0; 609 goto out_free_login_req_buf; 610 } 611 612 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 613 if (!isert_conn->login_rsp_buf) { 614 isert_err("Unable to allocate isert_conn->login_rspbuf\n"); 615 goto out_unmap_login_req_buf; 616 } 617 618 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 619 isert_conn->login_rsp_buf, 620 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 621 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 622 if (ret) { 623 isert_err("login_rsp_dma mapping error: %d\n", ret); 624 isert_conn->login_rsp_dma = 0; 625 goto out_free_login_rsp_buf; 626 } 627 628 return 0; 629 630 out_free_login_rsp_buf: 631 kfree(isert_conn->login_rsp_buf); 632 out_unmap_login_req_buf: 633 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 634 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 635 out_free_login_req_buf: 636 kfree(isert_conn->login_req_buf); 637 return ret; 638 } 639 640 static void 641 isert_set_nego_params(struct isert_conn *isert_conn, 642 struct rdma_conn_param *param) 643 { 644 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 645 646 /* Set max inflight RDMA READ requests */ 647 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 648 attr->max_qp_init_rd_atom); 649 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 650 651 if (param->private_data) { 652 u8 flags = *(u8 *)param->private_data; 653 654 /* 655 * use remote invalidation if the both initiator 656 * and the HCA support it 657 */ 658 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 659 (attr->device_cap_flags & 660 IB_DEVICE_MEM_MGT_EXTENSIONS); 661 if (isert_conn->snd_w_inv) 662 isert_info("Using remote invalidation\n"); 663 } 664 } 665 666 static int 667 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 668 { 669 struct isert_np *isert_np = cma_id->context; 670 struct iscsi_np *np = isert_np->np; 671 struct isert_conn *isert_conn; 672 struct isert_device *device; 673 int ret = 0; 674 675 spin_lock_bh(&np->np_thread_lock); 676 if (!np->enabled) { 677 spin_unlock_bh(&np->np_thread_lock); 678 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 679 return rdma_reject(cma_id, NULL, 0); 680 } 681 spin_unlock_bh(&np->np_thread_lock); 682 683 isert_dbg("cma_id: %p, portal: %p\n", 684 cma_id, cma_id->context); 685 686 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 687 if (!isert_conn) 688 return -ENOMEM; 689 690 isert_init_conn(isert_conn); 691 isert_conn->cm_id = cma_id; 692 693 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 694 if (ret) 695 goto out; 696 697 device = isert_device_get(cma_id); 698 if (IS_ERR(device)) { 699 ret = PTR_ERR(device); 700 goto out_rsp_dma_map; 701 } 702 isert_conn->device = device; 703 704 isert_set_nego_params(isert_conn, &event->param.conn); 705 706 ret = isert_conn_setup_qp(isert_conn, cma_id); 707 if (ret) 708 goto out_conn_dev; 709 710 ret = isert_login_post_recv(isert_conn); 711 if (ret) 712 goto out_conn_dev; 713 714 ret = isert_rdma_accept(isert_conn); 715 if (ret) 716 goto out_conn_dev; 717 718 mutex_lock(&isert_np->mutex); 719 list_add_tail(&isert_conn->node, &isert_np->accepted); 720 mutex_unlock(&isert_np->mutex); 721 722 return 0; 723 724 out_conn_dev: 725 isert_device_put(device); 726 out_rsp_dma_map: 727 isert_free_login_buf(isert_conn); 728 out: 729 kfree(isert_conn); 730 rdma_reject(cma_id, NULL, 0); 731 return ret; 732 } 733 734 static void 735 isert_connect_release(struct isert_conn *isert_conn) 736 { 737 struct isert_device *device = isert_conn->device; 738 739 isert_dbg("conn %p\n", isert_conn); 740 741 BUG_ON(!device); 742 743 if (device->use_fastreg) 744 isert_conn_free_fastreg_pool(isert_conn); 745 746 isert_free_rx_descriptors(isert_conn); 747 if (isert_conn->cm_id) 748 rdma_destroy_id(isert_conn->cm_id); 749 750 if (isert_conn->qp) { 751 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; 752 753 isert_comp_put(comp); 754 ib_destroy_qp(isert_conn->qp); 755 } 756 757 if (isert_conn->login_req_buf) 758 isert_free_login_buf(isert_conn); 759 760 isert_device_put(device); 761 762 kfree(isert_conn); 763 } 764 765 static void 766 isert_connected_handler(struct rdma_cm_id *cma_id) 767 { 768 struct isert_conn *isert_conn = cma_id->qp->qp_context; 769 struct isert_np *isert_np = cma_id->context; 770 771 isert_info("conn %p\n", isert_conn); 772 773 mutex_lock(&isert_conn->mutex); 774 isert_conn->state = ISER_CONN_UP; 775 kref_get(&isert_conn->kref); 776 mutex_unlock(&isert_conn->mutex); 777 778 mutex_lock(&isert_np->mutex); 779 list_move_tail(&isert_conn->node, &isert_np->pending); 780 mutex_unlock(&isert_np->mutex); 781 782 isert_info("np %p: Allow accept_np to continue\n", isert_np); 783 up(&isert_np->sem); 784 } 785 786 static void 787 isert_release_kref(struct kref *kref) 788 { 789 struct isert_conn *isert_conn = container_of(kref, 790 struct isert_conn, kref); 791 792 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 793 current->pid); 794 795 isert_connect_release(isert_conn); 796 } 797 798 static void 799 isert_put_conn(struct isert_conn *isert_conn) 800 { 801 kref_put(&isert_conn->kref, isert_release_kref); 802 } 803 804 static void 805 isert_handle_unbound_conn(struct isert_conn *isert_conn) 806 { 807 struct isert_np *isert_np = isert_conn->cm_id->context; 808 809 mutex_lock(&isert_np->mutex); 810 if (!list_empty(&isert_conn->node)) { 811 /* 812 * This means iscsi doesn't know this connection 813 * so schedule a cleanup ourselves 814 */ 815 list_del_init(&isert_conn->node); 816 isert_put_conn(isert_conn); 817 queue_work(isert_release_wq, &isert_conn->release_work); 818 } 819 mutex_unlock(&isert_np->mutex); 820 } 821 822 /** 823 * isert_conn_terminate() - Initiate connection termination 824 * @isert_conn: isert connection struct 825 * 826 * Notes: 827 * In case the connection state is BOUND, move state 828 * to TEMINATING and start teardown sequence (rdma_disconnect). 829 * In case the connection state is UP, complete flush as well. 830 * 831 * This routine must be called with mutex held. Thus it is 832 * safe to call multiple times. 833 */ 834 static void 835 isert_conn_terminate(struct isert_conn *isert_conn) 836 { 837 int err; 838 839 if (isert_conn->state >= ISER_CONN_TERMINATING) 840 return; 841 842 isert_info("Terminating conn %p state %d\n", 843 isert_conn, isert_conn->state); 844 isert_conn->state = ISER_CONN_TERMINATING; 845 err = rdma_disconnect(isert_conn->cm_id); 846 if (err) 847 isert_warn("Failed rdma_disconnect isert_conn %p\n", 848 isert_conn); 849 } 850 851 static int 852 isert_np_cma_handler(struct isert_np *isert_np, 853 enum rdma_cm_event_type event) 854 { 855 isert_dbg("%s (%d): isert np %p\n", 856 rdma_event_msg(event), event, isert_np); 857 858 switch (event) { 859 case RDMA_CM_EVENT_DEVICE_REMOVAL: 860 isert_np->cm_id = NULL; 861 break; 862 case RDMA_CM_EVENT_ADDR_CHANGE: 863 isert_np->cm_id = isert_setup_id(isert_np); 864 if (IS_ERR(isert_np->cm_id)) { 865 isert_err("isert np %p setup id failed: %ld\n", 866 isert_np, PTR_ERR(isert_np->cm_id)); 867 isert_np->cm_id = NULL; 868 } 869 break; 870 default: 871 isert_err("isert np %p Unexpected event %d\n", 872 isert_np, event); 873 } 874 875 return -1; 876 } 877 878 static int 879 isert_disconnected_handler(struct rdma_cm_id *cma_id, 880 enum rdma_cm_event_type event) 881 { 882 struct isert_conn *isert_conn = cma_id->qp->qp_context; 883 884 mutex_lock(&isert_conn->mutex); 885 switch (isert_conn->state) { 886 case ISER_CONN_TERMINATING: 887 break; 888 case ISER_CONN_UP: 889 isert_conn_terminate(isert_conn); 890 isert_wait4flush(isert_conn); 891 isert_handle_unbound_conn(isert_conn); 892 break; 893 case ISER_CONN_BOUND: 894 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 895 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 896 break; 897 default: 898 isert_warn("conn %p teminating in state %d\n", 899 isert_conn, isert_conn->state); 900 } 901 mutex_unlock(&isert_conn->mutex); 902 903 return 0; 904 } 905 906 static int 907 isert_connect_error(struct rdma_cm_id *cma_id) 908 { 909 struct isert_conn *isert_conn = cma_id->qp->qp_context; 910 911 list_del_init(&isert_conn->node); 912 isert_conn->cm_id = NULL; 913 isert_put_conn(isert_conn); 914 915 return -1; 916 } 917 918 static int 919 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 920 { 921 struct isert_np *isert_np = cma_id->context; 922 int ret = 0; 923 924 isert_info("%s (%d): status %d id %p np %p\n", 925 rdma_event_msg(event->event), event->event, 926 event->status, cma_id, cma_id->context); 927 928 if (isert_np->cm_id == cma_id) 929 return isert_np_cma_handler(cma_id->context, event->event); 930 931 switch (event->event) { 932 case RDMA_CM_EVENT_CONNECT_REQUEST: 933 ret = isert_connect_request(cma_id, event); 934 if (ret) 935 isert_err("failed handle connect request %d\n", ret); 936 break; 937 case RDMA_CM_EVENT_ESTABLISHED: 938 isert_connected_handler(cma_id); 939 break; 940 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 941 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 942 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ 943 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 944 ret = isert_disconnected_handler(cma_id, event->event); 945 break; 946 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 947 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 948 case RDMA_CM_EVENT_CONNECT_ERROR: 949 ret = isert_connect_error(cma_id); 950 break; 951 default: 952 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 953 break; 954 } 955 956 return ret; 957 } 958 959 static int 960 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 961 { 962 struct ib_recv_wr *rx_wr, *rx_wr_failed; 963 int i, ret; 964 struct iser_rx_desc *rx_desc; 965 966 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 967 rx_desc = &isert_conn->rx_descs[i]; 968 969 rx_wr->wr_cqe = &rx_desc->rx_cqe; 970 rx_wr->sg_list = &rx_desc->rx_sg; 971 rx_wr->num_sge = 1; 972 rx_wr->next = rx_wr + 1; 973 } 974 rx_wr--; 975 rx_wr->next = NULL; /* mark end of work requests list */ 976 977 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 978 &rx_wr_failed); 979 if (ret) 980 isert_err("ib_post_recv() failed with ret: %d\n", ret); 981 982 return ret; 983 } 984 985 static int 986 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 987 { 988 struct ib_recv_wr *rx_wr_failed, rx_wr; 989 int ret; 990 991 rx_wr.wr_cqe = &rx_desc->rx_cqe; 992 rx_wr.sg_list = &rx_desc->rx_sg; 993 rx_wr.num_sge = 1; 994 rx_wr.next = NULL; 995 996 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); 997 if (ret) 998 isert_err("ib_post_recv() failed with ret: %d\n", ret); 999 1000 return ret; 1001 } 1002 1003 static int 1004 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 1005 { 1006 struct ib_device *ib_dev = isert_conn->cm_id->device; 1007 struct ib_send_wr send_wr, *send_wr_failed; 1008 int ret; 1009 1010 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 1011 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1012 1013 tx_desc->tx_cqe.done = isert_login_send_done; 1014 1015 send_wr.next = NULL; 1016 send_wr.wr_cqe = &tx_desc->tx_cqe; 1017 send_wr.sg_list = tx_desc->tx_sg; 1018 send_wr.num_sge = tx_desc->num_sge; 1019 send_wr.opcode = IB_WR_SEND; 1020 send_wr.send_flags = IB_SEND_SIGNALED; 1021 1022 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); 1023 if (ret) 1024 isert_err("ib_post_send() failed, ret: %d\n", ret); 1025 1026 return ret; 1027 } 1028 1029 static void 1030 isert_create_send_desc(struct isert_conn *isert_conn, 1031 struct isert_cmd *isert_cmd, 1032 struct iser_tx_desc *tx_desc) 1033 { 1034 struct isert_device *device = isert_conn->device; 1035 struct ib_device *ib_dev = device->ib_device; 1036 1037 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 1038 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1039 1040 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 1041 tx_desc->iser_header.flags = ISCSI_CTRL; 1042 1043 tx_desc->num_sge = 1; 1044 1045 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 1046 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 1047 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 1048 } 1049 } 1050 1051 static int 1052 isert_init_tx_hdrs(struct isert_conn *isert_conn, 1053 struct iser_tx_desc *tx_desc) 1054 { 1055 struct isert_device *device = isert_conn->device; 1056 struct ib_device *ib_dev = device->ib_device; 1057 u64 dma_addr; 1058 1059 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 1060 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1061 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 1062 isert_err("ib_dma_mapping_error() failed\n"); 1063 return -ENOMEM; 1064 } 1065 1066 tx_desc->dma_addr = dma_addr; 1067 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 1068 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 1069 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 1070 1071 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 1072 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 1073 tx_desc->tx_sg[0].lkey); 1074 1075 return 0; 1076 } 1077 1078 static void 1079 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1080 struct ib_send_wr *send_wr) 1081 { 1082 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 1083 1084 isert_cmd->iser_ib_op = ISER_IB_SEND; 1085 tx_desc->tx_cqe.done = isert_send_done; 1086 send_wr->wr_cqe = &tx_desc->tx_cqe; 1087 1088 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 1089 send_wr->opcode = IB_WR_SEND_WITH_INV; 1090 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 1091 } else { 1092 send_wr->opcode = IB_WR_SEND; 1093 } 1094 1095 send_wr->sg_list = &tx_desc->tx_sg[0]; 1096 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 1097 send_wr->send_flags = IB_SEND_SIGNALED; 1098 } 1099 1100 static int 1101 isert_login_post_recv(struct isert_conn *isert_conn) 1102 { 1103 struct ib_recv_wr rx_wr, *rx_wr_fail; 1104 struct ib_sge sge; 1105 int ret; 1106 1107 memset(&sge, 0, sizeof(struct ib_sge)); 1108 sge.addr = isert_conn->login_req_dma; 1109 sge.length = ISER_RX_PAYLOAD_SIZE; 1110 sge.lkey = isert_conn->device->pd->local_dma_lkey; 1111 1112 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 1113 sge.addr, sge.length, sge.lkey); 1114 1115 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; 1116 1117 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 1118 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; 1119 rx_wr.sg_list = &sge; 1120 rx_wr.num_sge = 1; 1121 1122 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); 1123 if (ret) 1124 isert_err("ib_post_recv() failed: %d\n", ret); 1125 1126 return ret; 1127 } 1128 1129 static int 1130 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 1131 u32 length) 1132 { 1133 struct isert_conn *isert_conn = conn->context; 1134 struct isert_device *device = isert_conn->device; 1135 struct ib_device *ib_dev = device->ib_device; 1136 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 1137 int ret; 1138 1139 isert_create_send_desc(isert_conn, NULL, tx_desc); 1140 1141 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 1142 sizeof(struct iscsi_hdr)); 1143 1144 isert_init_tx_hdrs(isert_conn, tx_desc); 1145 1146 if (length > 0) { 1147 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 1148 1149 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 1150 length, DMA_TO_DEVICE); 1151 1152 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 1153 1154 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 1155 length, DMA_TO_DEVICE); 1156 1157 tx_dsg->addr = isert_conn->login_rsp_dma; 1158 tx_dsg->length = length; 1159 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 1160 tx_desc->num_sge = 2; 1161 } 1162 if (!login->login_failed) { 1163 if (login->login_complete) { 1164 if (!conn->sess->sess_ops->SessionType && 1165 isert_conn->device->use_fastreg) { 1166 ret = isert_conn_create_fastreg_pool(isert_conn); 1167 if (ret) { 1168 isert_err("Conn: %p failed to create" 1169 " fastreg pool\n", isert_conn); 1170 return ret; 1171 } 1172 } 1173 1174 ret = isert_alloc_rx_descriptors(isert_conn); 1175 if (ret) 1176 return ret; 1177 1178 ret = isert_post_recvm(isert_conn, 1179 ISERT_QP_MAX_RECV_DTOS); 1180 if (ret) 1181 return ret; 1182 1183 /* Now we are in FULL_FEATURE phase */ 1184 mutex_lock(&isert_conn->mutex); 1185 isert_conn->state = ISER_CONN_FULL_FEATURE; 1186 mutex_unlock(&isert_conn->mutex); 1187 goto post_send; 1188 } 1189 1190 ret = isert_login_post_recv(isert_conn); 1191 if (ret) 1192 return ret; 1193 } 1194 post_send: 1195 ret = isert_login_post_send(isert_conn, tx_desc); 1196 if (ret) 1197 return ret; 1198 1199 return 0; 1200 } 1201 1202 static void 1203 isert_rx_login_req(struct isert_conn *isert_conn) 1204 { 1205 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; 1206 int rx_buflen = isert_conn->login_req_len; 1207 struct iscsi_conn *conn = isert_conn->conn; 1208 struct iscsi_login *login = conn->conn_login; 1209 int size; 1210 1211 isert_info("conn %p\n", isert_conn); 1212 1213 WARN_ON_ONCE(!login); 1214 1215 if (login->first_request) { 1216 struct iscsi_login_req *login_req = 1217 (struct iscsi_login_req *)&rx_desc->iscsi_header; 1218 /* 1219 * Setup the initial iscsi_login values from the leading 1220 * login request PDU. 1221 */ 1222 login->leading_connection = (!login_req->tsih) ? 1 : 0; 1223 login->current_stage = 1224 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 1225 >> 2; 1226 login->version_min = login_req->min_version; 1227 login->version_max = login_req->max_version; 1228 memcpy(login->isid, login_req->isid, 6); 1229 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1230 login->init_task_tag = login_req->itt; 1231 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1232 login->cid = be16_to_cpu(login_req->cid); 1233 login->tsih = be16_to_cpu(login_req->tsih); 1234 } 1235 1236 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1237 1238 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1239 isert_dbg("Using login payload size: %d, rx_buflen: %d " 1240 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 1241 MAX_KEY_VALUE_PAIRS); 1242 memcpy(login->req_buf, &rx_desc->data[0], size); 1243 1244 if (login->first_request) { 1245 complete(&isert_conn->login_comp); 1246 return; 1247 } 1248 schedule_delayed_work(&conn->login_work, 0); 1249 } 1250 1251 static struct iscsi_cmd 1252 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1253 { 1254 struct isert_conn *isert_conn = conn->context; 1255 struct isert_cmd *isert_cmd; 1256 struct iscsi_cmd *cmd; 1257 1258 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1259 if (!cmd) { 1260 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1261 return NULL; 1262 } 1263 isert_cmd = iscsit_priv_cmd(cmd); 1264 isert_cmd->conn = isert_conn; 1265 isert_cmd->iscsi_cmd = cmd; 1266 isert_cmd->rx_desc = rx_desc; 1267 1268 return cmd; 1269 } 1270 1271 static int 1272 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1273 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1274 struct iser_rx_desc *rx_desc, unsigned char *buf) 1275 { 1276 struct iscsi_conn *conn = isert_conn->conn; 1277 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1278 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1279 bool dump_payload = false; 1280 unsigned int data_len; 1281 1282 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1283 if (rc < 0) 1284 return rc; 1285 1286 imm_data = cmd->immediate_data; 1287 imm_data_len = cmd->first_burst_len; 1288 unsol_data = cmd->unsolicited_data; 1289 data_len = cmd->se_cmd.data_length; 1290 1291 if (imm_data && imm_data_len == data_len) 1292 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1293 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1294 if (rc < 0) { 1295 return 0; 1296 } else if (rc > 0) { 1297 dump_payload = true; 1298 goto sequence_cmd; 1299 } 1300 1301 if (!imm_data) 1302 return 0; 1303 1304 if (imm_data_len != data_len) { 1305 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1306 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1307 &rx_desc->data[0], imm_data_len); 1308 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1309 sg_nents, imm_data_len); 1310 } else { 1311 sg_init_table(&isert_cmd->sg, 1); 1312 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1313 cmd->se_cmd.t_data_nents = 1; 1314 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); 1315 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1316 imm_data_len); 1317 } 1318 1319 cmd->write_data_done += imm_data_len; 1320 1321 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1322 spin_lock_bh(&cmd->istate_lock); 1323 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1324 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1325 spin_unlock_bh(&cmd->istate_lock); 1326 } 1327 1328 sequence_cmd: 1329 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1330 1331 if (!rc && dump_payload == false && unsol_data) 1332 iscsit_set_unsoliticed_dataout(cmd); 1333 else if (dump_payload && imm_data) 1334 target_put_sess_cmd(&cmd->se_cmd); 1335 1336 return 0; 1337 } 1338 1339 static int 1340 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1341 struct iser_rx_desc *rx_desc, unsigned char *buf) 1342 { 1343 struct scatterlist *sg_start; 1344 struct iscsi_conn *conn = isert_conn->conn; 1345 struct iscsi_cmd *cmd = NULL; 1346 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1347 u32 unsol_data_len = ntoh24(hdr->dlength); 1348 int rc, sg_nents, sg_off, page_off; 1349 1350 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1351 if (rc < 0) 1352 return rc; 1353 else if (!cmd) 1354 return 0; 1355 /* 1356 * FIXME: Unexpected unsolicited_data out 1357 */ 1358 if (!cmd->unsolicited_data) { 1359 isert_err("Received unexpected solicited data payload\n"); 1360 dump_stack(); 1361 return -1; 1362 } 1363 1364 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1365 "write_data_done: %u, data_length: %u\n", 1366 unsol_data_len, cmd->write_data_done, 1367 cmd->se_cmd.data_length); 1368 1369 sg_off = cmd->write_data_done / PAGE_SIZE; 1370 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1371 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1372 page_off = cmd->write_data_done % PAGE_SIZE; 1373 /* 1374 * FIXME: Non page-aligned unsolicited_data out 1375 */ 1376 if (page_off) { 1377 isert_err("unexpected non-page aligned data payload\n"); 1378 dump_stack(); 1379 return -1; 1380 } 1381 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1382 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1383 sg_nents, &rx_desc->data[0], unsol_data_len); 1384 1385 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1386 unsol_data_len); 1387 1388 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1389 if (rc < 0) 1390 return rc; 1391 1392 /* 1393 * multiple data-outs on the same command can arrive - 1394 * so post the buffer before hand 1395 */ 1396 rc = isert_post_recv(isert_conn, rx_desc); 1397 if (rc) { 1398 isert_err("ib_post_recv failed with %d\n", rc); 1399 return rc; 1400 } 1401 return 0; 1402 } 1403 1404 static int 1405 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1406 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1407 unsigned char *buf) 1408 { 1409 struct iscsi_conn *conn = isert_conn->conn; 1410 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1411 int rc; 1412 1413 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1414 if (rc < 0) 1415 return rc; 1416 /* 1417 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1418 */ 1419 1420 return iscsit_process_nop_out(conn, cmd, hdr); 1421 } 1422 1423 static int 1424 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1425 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1426 struct iscsi_text *hdr) 1427 { 1428 struct iscsi_conn *conn = isert_conn->conn; 1429 u32 payload_length = ntoh24(hdr->dlength); 1430 int rc; 1431 unsigned char *text_in = NULL; 1432 1433 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1434 if (rc < 0) 1435 return rc; 1436 1437 if (payload_length) { 1438 text_in = kzalloc(payload_length, GFP_KERNEL); 1439 if (!text_in) { 1440 isert_err("Unable to allocate text_in of payload_length: %u\n", 1441 payload_length); 1442 return -ENOMEM; 1443 } 1444 } 1445 cmd->text_in_ptr = text_in; 1446 1447 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); 1448 1449 return iscsit_process_text_cmd(conn, cmd, hdr); 1450 } 1451 1452 static int 1453 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1454 uint32_t read_stag, uint64_t read_va, 1455 uint32_t write_stag, uint64_t write_va) 1456 { 1457 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1458 struct iscsi_conn *conn = isert_conn->conn; 1459 struct iscsi_cmd *cmd; 1460 struct isert_cmd *isert_cmd; 1461 int ret = -EINVAL; 1462 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1463 1464 if (conn->sess->sess_ops->SessionType && 1465 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1466 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1467 " ignoring\n", opcode); 1468 return 0; 1469 } 1470 1471 switch (opcode) { 1472 case ISCSI_OP_SCSI_CMD: 1473 cmd = isert_allocate_cmd(conn, rx_desc); 1474 if (!cmd) 1475 break; 1476 1477 isert_cmd = iscsit_priv_cmd(cmd); 1478 isert_cmd->read_stag = read_stag; 1479 isert_cmd->read_va = read_va; 1480 isert_cmd->write_stag = write_stag; 1481 isert_cmd->write_va = write_va; 1482 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1483 1484 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1485 rx_desc, (unsigned char *)hdr); 1486 break; 1487 case ISCSI_OP_NOOP_OUT: 1488 cmd = isert_allocate_cmd(conn, rx_desc); 1489 if (!cmd) 1490 break; 1491 1492 isert_cmd = iscsit_priv_cmd(cmd); 1493 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1494 rx_desc, (unsigned char *)hdr); 1495 break; 1496 case ISCSI_OP_SCSI_DATA_OUT: 1497 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1498 (unsigned char *)hdr); 1499 break; 1500 case ISCSI_OP_SCSI_TMFUNC: 1501 cmd = isert_allocate_cmd(conn, rx_desc); 1502 if (!cmd) 1503 break; 1504 1505 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1506 (unsigned char *)hdr); 1507 break; 1508 case ISCSI_OP_LOGOUT: 1509 cmd = isert_allocate_cmd(conn, rx_desc); 1510 if (!cmd) 1511 break; 1512 1513 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1514 break; 1515 case ISCSI_OP_TEXT: 1516 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1517 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1518 else 1519 cmd = isert_allocate_cmd(conn, rx_desc); 1520 1521 if (!cmd) 1522 break; 1523 1524 isert_cmd = iscsit_priv_cmd(cmd); 1525 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1526 rx_desc, (struct iscsi_text *)hdr); 1527 break; 1528 default: 1529 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1530 dump_stack(); 1531 break; 1532 } 1533 1534 return ret; 1535 } 1536 1537 static void 1538 isert_print_wc(struct ib_wc *wc, const char *type) 1539 { 1540 if (wc->status != IB_WC_WR_FLUSH_ERR) 1541 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1542 ib_wc_status_msg(wc->status), wc->status, 1543 wc->vendor_err); 1544 else 1545 isert_dbg("%s failure: %s (%d)\n", type, 1546 ib_wc_status_msg(wc->status), wc->status); 1547 } 1548 1549 static void 1550 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1551 { 1552 struct isert_conn *isert_conn = wc->qp->qp_context; 1553 struct ib_device *ib_dev = isert_conn->cm_id->device; 1554 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1555 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1556 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; 1557 uint64_t read_va = 0, write_va = 0; 1558 uint32_t read_stag = 0, write_stag = 0; 1559 1560 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1561 isert_print_wc(wc, "recv"); 1562 if (wc->status != IB_WC_WR_FLUSH_ERR) 1563 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1564 return; 1565 } 1566 1567 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1568 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1569 1570 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1571 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1572 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1573 1574 switch (iser_ctrl->flags & 0xF0) { 1575 case ISCSI_CTRL: 1576 if (iser_ctrl->flags & ISER_RSV) { 1577 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1578 read_va = be64_to_cpu(iser_ctrl->read_va); 1579 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1580 read_stag, (unsigned long long)read_va); 1581 } 1582 if (iser_ctrl->flags & ISER_WSV) { 1583 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1584 write_va = be64_to_cpu(iser_ctrl->write_va); 1585 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1586 write_stag, (unsigned long long)write_va); 1587 } 1588 1589 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1590 break; 1591 case ISER_HELLO: 1592 isert_err("iSER Hello message\n"); 1593 break; 1594 default: 1595 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1596 break; 1597 } 1598 1599 isert_rx_opcode(isert_conn, rx_desc, 1600 read_stag, read_va, write_stag, write_va); 1601 1602 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1603 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1604 } 1605 1606 static void 1607 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1608 { 1609 struct isert_conn *isert_conn = wc->qp->qp_context; 1610 struct ib_device *ib_dev = isert_conn->cm_id->device; 1611 1612 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1613 isert_print_wc(wc, "login recv"); 1614 return; 1615 } 1616 1617 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, 1618 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1619 1620 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1621 1622 if (isert_conn->conn) { 1623 struct iscsi_login *login = isert_conn->conn->conn_login; 1624 1625 if (login && !login->first_request) 1626 isert_rx_login_req(isert_conn); 1627 } 1628 1629 mutex_lock(&isert_conn->mutex); 1630 complete(&isert_conn->login_req_comp); 1631 mutex_unlock(&isert_conn->mutex); 1632 1633 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, 1634 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1635 } 1636 1637 static int 1638 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1639 struct scatterlist *sg, u32 nents, u32 length, u32 offset, 1640 enum iser_ib_op_code op, struct isert_data_buf *data) 1641 { 1642 struct ib_device *ib_dev = isert_conn->cm_id->device; 1643 1644 data->dma_dir = op == ISER_IB_RDMA_WRITE ? 1645 DMA_TO_DEVICE : DMA_FROM_DEVICE; 1646 1647 data->len = length - offset; 1648 data->offset = offset; 1649 data->sg_off = data->offset / PAGE_SIZE; 1650 1651 data->sg = &sg[data->sg_off]; 1652 data->nents = min_t(unsigned int, nents - data->sg_off, 1653 ISCSI_ISER_SG_TABLESIZE); 1654 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE * 1655 PAGE_SIZE); 1656 1657 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, 1658 data->dma_dir); 1659 if (unlikely(!data->dma_nents)) { 1660 isert_err("Cmd: unable to dma map SGs %p\n", sg); 1661 return -EINVAL; 1662 } 1663 1664 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", 1665 isert_cmd, data->dma_nents, data->sg, data->nents, data->len); 1666 1667 return 0; 1668 } 1669 1670 static void 1671 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) 1672 { 1673 struct ib_device *ib_dev = isert_conn->cm_id->device; 1674 1675 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); 1676 memset(data, 0, sizeof(*data)); 1677 } 1678 1679 1680 1681 static void 1682 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1683 { 1684 isert_dbg("Cmd %p\n", isert_cmd); 1685 1686 if (isert_cmd->data.sg) { 1687 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1688 isert_unmap_data_buf(isert_conn, &isert_cmd->data); 1689 } 1690 1691 if (isert_cmd->rdma_wr) { 1692 isert_dbg("Cmd %p free send_wr\n", isert_cmd); 1693 kfree(isert_cmd->rdma_wr); 1694 isert_cmd->rdma_wr = NULL; 1695 } 1696 1697 if (isert_cmd->ib_sge) { 1698 isert_dbg("Cmd %p free ib_sge\n", isert_cmd); 1699 kfree(isert_cmd->ib_sge); 1700 isert_cmd->ib_sge = NULL; 1701 } 1702 } 1703 1704 static void 1705 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1706 { 1707 isert_dbg("Cmd %p\n", isert_cmd); 1708 1709 if (isert_cmd->fr_desc) { 1710 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc); 1711 if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) { 1712 isert_unmap_data_buf(isert_conn, &isert_cmd->prot); 1713 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED; 1714 } 1715 spin_lock_bh(&isert_conn->pool_lock); 1716 list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool); 1717 spin_unlock_bh(&isert_conn->pool_lock); 1718 isert_cmd->fr_desc = NULL; 1719 } 1720 1721 if (isert_cmd->data.sg) { 1722 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1723 isert_unmap_data_buf(isert_conn, &isert_cmd->data); 1724 } 1725 1726 isert_cmd->ib_sge = NULL; 1727 isert_cmd->rdma_wr = NULL; 1728 } 1729 1730 static void 1731 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1732 { 1733 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1734 struct isert_conn *isert_conn = isert_cmd->conn; 1735 struct iscsi_conn *conn = isert_conn->conn; 1736 struct isert_device *device = isert_conn->device; 1737 struct iscsi_text_rsp *hdr; 1738 1739 isert_dbg("Cmd %p\n", isert_cmd); 1740 1741 switch (cmd->iscsi_opcode) { 1742 case ISCSI_OP_SCSI_CMD: 1743 spin_lock_bh(&conn->cmd_lock); 1744 if (!list_empty(&cmd->i_conn_node)) 1745 list_del_init(&cmd->i_conn_node); 1746 spin_unlock_bh(&conn->cmd_lock); 1747 1748 if (cmd->data_direction == DMA_TO_DEVICE) { 1749 iscsit_stop_dataout_timer(cmd); 1750 /* 1751 * Check for special case during comp_err where 1752 * WRITE_PENDING has been handed off from core, 1753 * but requires an extra target_put_sess_cmd() 1754 * before transport_generic_free_cmd() below. 1755 */ 1756 if (comp_err && 1757 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1758 struct se_cmd *se_cmd = &cmd->se_cmd; 1759 1760 target_put_sess_cmd(se_cmd); 1761 } 1762 } 1763 1764 device->unreg_rdma_mem(isert_cmd, isert_conn); 1765 transport_generic_free_cmd(&cmd->se_cmd, 0); 1766 break; 1767 case ISCSI_OP_SCSI_TMFUNC: 1768 spin_lock_bh(&conn->cmd_lock); 1769 if (!list_empty(&cmd->i_conn_node)) 1770 list_del_init(&cmd->i_conn_node); 1771 spin_unlock_bh(&conn->cmd_lock); 1772 1773 transport_generic_free_cmd(&cmd->se_cmd, 0); 1774 break; 1775 case ISCSI_OP_REJECT: 1776 case ISCSI_OP_NOOP_OUT: 1777 case ISCSI_OP_TEXT: 1778 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1779 /* If the continue bit is on, keep the command alive */ 1780 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1781 break; 1782 1783 spin_lock_bh(&conn->cmd_lock); 1784 if (!list_empty(&cmd->i_conn_node)) 1785 list_del_init(&cmd->i_conn_node); 1786 spin_unlock_bh(&conn->cmd_lock); 1787 1788 /* 1789 * Handle special case for REJECT when iscsi_add_reject*() has 1790 * overwritten the original iscsi_opcode assignment, and the 1791 * associated cmd->se_cmd needs to be released. 1792 */ 1793 if (cmd->se_cmd.se_tfo != NULL) { 1794 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1795 cmd->iscsi_opcode); 1796 transport_generic_free_cmd(&cmd->se_cmd, 0); 1797 break; 1798 } 1799 /* 1800 * Fall-through 1801 */ 1802 default: 1803 iscsit_release_cmd(cmd); 1804 break; 1805 } 1806 } 1807 1808 static void 1809 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1810 { 1811 if (tx_desc->dma_addr != 0) { 1812 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1813 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1814 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1815 tx_desc->dma_addr = 0; 1816 } 1817 } 1818 1819 static void 1820 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1821 struct ib_device *ib_dev, bool comp_err) 1822 { 1823 if (isert_cmd->pdu_buf_dma != 0) { 1824 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1825 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1826 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1827 isert_cmd->pdu_buf_dma = 0; 1828 } 1829 1830 isert_unmap_tx_desc(tx_desc, ib_dev); 1831 isert_put_cmd(isert_cmd, comp_err); 1832 } 1833 1834 static int 1835 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1836 { 1837 struct ib_mr_status mr_status; 1838 int ret; 1839 1840 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1841 if (ret) { 1842 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1843 goto fail_mr_status; 1844 } 1845 1846 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1847 u64 sec_offset_err; 1848 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1849 1850 switch (mr_status.sig_err.err_type) { 1851 case IB_SIG_BAD_GUARD: 1852 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1853 break; 1854 case IB_SIG_BAD_REFTAG: 1855 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1856 break; 1857 case IB_SIG_BAD_APPTAG: 1858 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1859 break; 1860 } 1861 sec_offset_err = mr_status.sig_err.sig_err_offset; 1862 do_div(sec_offset_err, block_size); 1863 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1864 1865 isert_err("PI error found type %d at sector 0x%llx " 1866 "expected 0x%x vs actual 0x%x\n", 1867 mr_status.sig_err.err_type, 1868 (unsigned long long)se_cmd->bad_sector, 1869 mr_status.sig_err.expected, 1870 mr_status.sig_err.actual); 1871 ret = 1; 1872 } 1873 1874 fail_mr_status: 1875 return ret; 1876 } 1877 1878 static void 1879 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1880 { 1881 struct isert_conn *isert_conn = wc->qp->qp_context; 1882 struct isert_device *device = isert_conn->device; 1883 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1884 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1885 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1886 int ret = 0; 1887 1888 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1889 isert_print_wc(wc, "rdma write"); 1890 if (wc->status != IB_WC_WR_FLUSH_ERR) 1891 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1892 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1893 return; 1894 } 1895 1896 isert_dbg("Cmd %p\n", isert_cmd); 1897 1898 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) { 1899 ret = isert_check_pi_status(cmd, 1900 isert_cmd->fr_desc->pi_ctx->sig_mr); 1901 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED; 1902 } 1903 1904 device->unreg_rdma_mem(isert_cmd, isert_conn); 1905 isert_cmd->rdma_wr_num = 0; 1906 if (ret) 1907 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1908 else 1909 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1910 } 1911 1912 static void 1913 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1914 { 1915 struct isert_conn *isert_conn = wc->qp->qp_context; 1916 struct isert_device *device = isert_conn->device; 1917 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1918 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1919 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1920 struct se_cmd *se_cmd = &cmd->se_cmd; 1921 int ret = 0; 1922 1923 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1924 isert_print_wc(wc, "rdma read"); 1925 if (wc->status != IB_WC_WR_FLUSH_ERR) 1926 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1927 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1928 return; 1929 } 1930 1931 isert_dbg("Cmd %p\n", isert_cmd); 1932 1933 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) { 1934 ret = isert_check_pi_status(se_cmd, 1935 isert_cmd->fr_desc->pi_ctx->sig_mr); 1936 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED; 1937 } 1938 1939 iscsit_stop_dataout_timer(cmd); 1940 device->unreg_rdma_mem(isert_cmd, isert_conn); 1941 cmd->write_data_done = isert_cmd->data.len; 1942 isert_cmd->rdma_wr_num = 0; 1943 1944 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1945 spin_lock_bh(&cmd->istate_lock); 1946 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1947 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1948 spin_unlock_bh(&cmd->istate_lock); 1949 1950 if (ret) { 1951 target_put_sess_cmd(se_cmd); 1952 transport_send_check_condition_and_sense(se_cmd, 1953 se_cmd->pi_err, 0); 1954 } else { 1955 target_execute_cmd(se_cmd); 1956 } 1957 } 1958 1959 static void 1960 isert_do_control_comp(struct work_struct *work) 1961 { 1962 struct isert_cmd *isert_cmd = container_of(work, 1963 struct isert_cmd, comp_work); 1964 struct isert_conn *isert_conn = isert_cmd->conn; 1965 struct ib_device *ib_dev = isert_conn->cm_id->device; 1966 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1967 1968 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1969 1970 switch (cmd->i_state) { 1971 case ISTATE_SEND_TASKMGTRSP: 1972 iscsit_tmr_post_handler(cmd, cmd->conn); 1973 case ISTATE_SEND_REJECT: /* FALLTHRU */ 1974 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */ 1975 cmd->i_state = ISTATE_SENT_STATUS; 1976 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1977 ib_dev, false); 1978 break; 1979 case ISTATE_SEND_LOGOUTRSP: 1980 iscsit_logout_post_handler(cmd, cmd->conn); 1981 break; 1982 default: 1983 isert_err("Unknown i_state %d\n", cmd->i_state); 1984 dump_stack(); 1985 break; 1986 } 1987 } 1988 1989 static void 1990 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1991 { 1992 struct isert_conn *isert_conn = wc->qp->qp_context; 1993 struct ib_device *ib_dev = isert_conn->cm_id->device; 1994 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1995 1996 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1997 isert_print_wc(wc, "login send"); 1998 if (wc->status != IB_WC_WR_FLUSH_ERR) 1999 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 2000 } 2001 2002 isert_unmap_tx_desc(tx_desc, ib_dev); 2003 } 2004 2005 static void 2006 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 2007 { 2008 struct isert_conn *isert_conn = wc->qp->qp_context; 2009 struct ib_device *ib_dev = isert_conn->cm_id->device; 2010 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 2011 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 2012 2013 if (unlikely(wc->status != IB_WC_SUCCESS)) { 2014 isert_print_wc(wc, "send"); 2015 if (wc->status != IB_WC_WR_FLUSH_ERR) 2016 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 2017 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 2018 return; 2019 } 2020 2021 isert_dbg("Cmd %p\n", isert_cmd); 2022 2023 switch (isert_cmd->iscsi_cmd->i_state) { 2024 case ISTATE_SEND_TASKMGTRSP: 2025 case ISTATE_SEND_LOGOUTRSP: 2026 case ISTATE_SEND_REJECT: 2027 case ISTATE_SEND_TEXTRSP: 2028 isert_unmap_tx_desc(tx_desc, ib_dev); 2029 2030 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 2031 queue_work(isert_comp_wq, &isert_cmd->comp_work); 2032 return; 2033 default: 2034 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 2035 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 2036 break; 2037 } 2038 } 2039 2040 static int 2041 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 2042 { 2043 struct ib_send_wr *wr_failed; 2044 int ret; 2045 2046 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2047 if (ret) { 2048 isert_err("ib_post_recv failed with %d\n", ret); 2049 return ret; 2050 } 2051 2052 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, 2053 &wr_failed); 2054 if (ret) { 2055 isert_err("ib_post_send failed with %d\n", ret); 2056 return ret; 2057 } 2058 return ret; 2059 } 2060 2061 static int 2062 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2063 { 2064 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2065 struct isert_conn *isert_conn = conn->context; 2066 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2067 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 2068 &isert_cmd->tx_desc.iscsi_header; 2069 2070 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2071 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 2072 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2073 /* 2074 * Attach SENSE DATA payload to iSCSI Response PDU 2075 */ 2076 if (cmd->se_cmd.sense_buffer && 2077 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 2078 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 2079 struct isert_device *device = isert_conn->device; 2080 struct ib_device *ib_dev = device->ib_device; 2081 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2082 u32 padding, pdu_len; 2083 2084 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 2085 cmd->sense_buffer); 2086 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 2087 2088 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 2089 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 2090 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 2091 2092 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2093 (void *)cmd->sense_buffer, pdu_len, 2094 DMA_TO_DEVICE); 2095 2096 isert_cmd->pdu_buf_len = pdu_len; 2097 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2098 tx_dsg->length = pdu_len; 2099 tx_dsg->lkey = device->pd->local_dma_lkey; 2100 isert_cmd->tx_desc.num_sge = 2; 2101 } 2102 2103 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2104 2105 isert_dbg("Posting SCSI Response\n"); 2106 2107 return isert_post_response(isert_conn, isert_cmd); 2108 } 2109 2110 static void 2111 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2112 { 2113 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2114 struct isert_conn *isert_conn = conn->context; 2115 struct isert_device *device = isert_conn->device; 2116 2117 spin_lock_bh(&conn->cmd_lock); 2118 if (!list_empty(&cmd->i_conn_node)) 2119 list_del_init(&cmd->i_conn_node); 2120 spin_unlock_bh(&conn->cmd_lock); 2121 2122 if (cmd->data_direction == DMA_TO_DEVICE) 2123 iscsit_stop_dataout_timer(cmd); 2124 2125 device->unreg_rdma_mem(isert_cmd, isert_conn); 2126 } 2127 2128 static enum target_prot_op 2129 isert_get_sup_prot_ops(struct iscsi_conn *conn) 2130 { 2131 struct isert_conn *isert_conn = conn->context; 2132 struct isert_device *device = isert_conn->device; 2133 2134 if (conn->tpg->tpg_attrib.t10_pi) { 2135 if (device->pi_capable) { 2136 isert_info("conn %p PI offload enabled\n", isert_conn); 2137 isert_conn->pi_support = true; 2138 return TARGET_PROT_ALL; 2139 } 2140 } 2141 2142 isert_info("conn %p PI offload disabled\n", isert_conn); 2143 isert_conn->pi_support = false; 2144 2145 return TARGET_PROT_NORMAL; 2146 } 2147 2148 static int 2149 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2150 bool nopout_response) 2151 { 2152 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2153 struct isert_conn *isert_conn = conn->context; 2154 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2155 2156 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2157 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 2158 &isert_cmd->tx_desc.iscsi_header, 2159 nopout_response); 2160 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2161 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2162 2163 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 2164 2165 return isert_post_response(isert_conn, isert_cmd); 2166 } 2167 2168 static int 2169 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2170 { 2171 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2172 struct isert_conn *isert_conn = conn->context; 2173 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2174 2175 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2176 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 2177 &isert_cmd->tx_desc.iscsi_header); 2178 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2179 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2180 2181 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 2182 2183 return isert_post_response(isert_conn, isert_cmd); 2184 } 2185 2186 static int 2187 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2188 { 2189 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2190 struct isert_conn *isert_conn = conn->context; 2191 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2192 2193 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2194 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 2195 &isert_cmd->tx_desc.iscsi_header); 2196 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2197 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2198 2199 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 2200 2201 return isert_post_response(isert_conn, isert_cmd); 2202 } 2203 2204 static int 2205 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2206 { 2207 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2208 struct isert_conn *isert_conn = conn->context; 2209 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2210 struct isert_device *device = isert_conn->device; 2211 struct ib_device *ib_dev = device->ib_device; 2212 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2213 struct iscsi_reject *hdr = 2214 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 2215 2216 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2217 iscsit_build_reject(cmd, conn, hdr); 2218 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2219 2220 hton24(hdr->dlength, ISCSI_HDR_LEN); 2221 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2222 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 2223 DMA_TO_DEVICE); 2224 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 2225 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2226 tx_dsg->length = ISCSI_HDR_LEN; 2227 tx_dsg->lkey = device->pd->local_dma_lkey; 2228 isert_cmd->tx_desc.num_sge = 2; 2229 2230 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2231 2232 isert_dbg("conn %p Posting Reject\n", isert_conn); 2233 2234 return isert_post_response(isert_conn, isert_cmd); 2235 } 2236 2237 static int 2238 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2239 { 2240 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2241 struct isert_conn *isert_conn = conn->context; 2242 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2243 struct iscsi_text_rsp *hdr = 2244 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 2245 u32 txt_rsp_len; 2246 int rc; 2247 2248 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2249 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 2250 if (rc < 0) 2251 return rc; 2252 2253 txt_rsp_len = rc; 2254 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2255 2256 if (txt_rsp_len) { 2257 struct isert_device *device = isert_conn->device; 2258 struct ib_device *ib_dev = device->ib_device; 2259 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2260 void *txt_rsp_buf = cmd->buf_ptr; 2261 2262 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2263 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 2264 2265 isert_cmd->pdu_buf_len = txt_rsp_len; 2266 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2267 tx_dsg->length = txt_rsp_len; 2268 tx_dsg->lkey = device->pd->local_dma_lkey; 2269 isert_cmd->tx_desc.num_sge = 2; 2270 } 2271 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2272 2273 isert_dbg("conn %p Text Response\n", isert_conn); 2274 2275 return isert_post_response(isert_conn, isert_cmd); 2276 } 2277 2278 static int 2279 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 2280 struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr, 2281 u32 data_left, u32 offset) 2282 { 2283 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2284 struct scatterlist *sg_start, *tmp_sg; 2285 struct isert_device *device = isert_conn->device; 2286 struct ib_device *ib_dev = device->ib_device; 2287 u32 sg_off, page_off; 2288 int i = 0, sg_nents; 2289 2290 sg_off = offset / PAGE_SIZE; 2291 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2292 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); 2293 page_off = offset % PAGE_SIZE; 2294 2295 rdma_wr->wr.sg_list = ib_sge; 2296 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe; 2297 2298 /* 2299 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2300 */ 2301 for_each_sg(sg_start, tmp_sg, sg_nents, i) { 2302 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, " 2303 "page_off: %u\n", 2304 (unsigned long long)tmp_sg->dma_address, 2305 tmp_sg->length, page_off); 2306 2307 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; 2308 ib_sge->length = min_t(u32, data_left, 2309 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 2310 ib_sge->lkey = device->pd->local_dma_lkey; 2311 2312 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", 2313 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2314 page_off = 0; 2315 data_left -= ib_sge->length; 2316 if (!data_left) 2317 break; 2318 ib_sge++; 2319 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); 2320 } 2321 2322 rdma_wr->wr.num_sge = ++i; 2323 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 2324 rdma_wr->wr.sg_list, rdma_wr->wr.num_sge); 2325 2326 return rdma_wr->wr.num_sge; 2327 } 2328 2329 static int 2330 isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn) 2331 { 2332 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2333 struct se_cmd *se_cmd = &cmd->se_cmd; 2334 struct isert_conn *isert_conn = conn->context; 2335 struct isert_data_buf *data = &isert_cmd->data; 2336 struct ib_rdma_wr *rdma_wr; 2337 struct ib_sge *ib_sge; 2338 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; 2339 int ret = 0, i, ib_sge_cnt; 2340 2341 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ? 2342 cmd->write_data_done : 0; 2343 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2344 se_cmd->t_data_nents, se_cmd->data_length, 2345 offset, isert_cmd->iser_ib_op, 2346 &isert_cmd->data); 2347 if (ret) 2348 return ret; 2349 2350 data_left = data->len; 2351 offset = data->offset; 2352 2353 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); 2354 if (!ib_sge) { 2355 isert_warn("Unable to allocate ib_sge\n"); 2356 ret = -ENOMEM; 2357 goto unmap_cmd; 2358 } 2359 isert_cmd->ib_sge = ib_sge; 2360 2361 isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); 2362 isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * 2363 isert_cmd->rdma_wr_num, GFP_KERNEL); 2364 if (!isert_cmd->rdma_wr) { 2365 isert_dbg("Unable to allocate isert_cmd->rdma_wr\n"); 2366 ret = -ENOMEM; 2367 goto unmap_cmd; 2368 } 2369 2370 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2371 2372 for (i = 0; i < isert_cmd->rdma_wr_num; i++) { 2373 rdma_wr = &isert_cmd->rdma_wr[i]; 2374 data_len = min(data_left, rdma_write_max); 2375 2376 rdma_wr->wr.send_flags = 0; 2377 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) { 2378 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2379 2380 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 2381 rdma_wr->remote_addr = isert_cmd->read_va + offset; 2382 rdma_wr->rkey = isert_cmd->read_stag; 2383 if (i + 1 == isert_cmd->rdma_wr_num) 2384 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr; 2385 else 2386 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr; 2387 } else { 2388 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2389 2390 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 2391 rdma_wr->remote_addr = isert_cmd->write_va + va_offset; 2392 rdma_wr->rkey = isert_cmd->write_stag; 2393 if (i + 1 == isert_cmd->rdma_wr_num) 2394 rdma_wr->wr.send_flags = IB_SEND_SIGNALED; 2395 else 2396 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr; 2397 } 2398 2399 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2400 rdma_wr, data_len, offset); 2401 ib_sge += ib_sge_cnt; 2402 2403 offset += data_len; 2404 va_offset += data_len; 2405 data_left -= data_len; 2406 } 2407 2408 return 0; 2409 unmap_cmd: 2410 isert_unmap_data_buf(isert_conn, data); 2411 2412 return ret; 2413 } 2414 2415 static inline void 2416 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) 2417 { 2418 u32 rkey; 2419 2420 memset(inv_wr, 0, sizeof(*inv_wr)); 2421 inv_wr->wr_cqe = NULL; 2422 inv_wr->opcode = IB_WR_LOCAL_INV; 2423 inv_wr->ex.invalidate_rkey = mr->rkey; 2424 2425 /* Bump the key */ 2426 rkey = ib_inc_rkey(mr->rkey); 2427 ib_update_fast_reg_key(mr, rkey); 2428 } 2429 2430 static int 2431 isert_fast_reg_mr(struct isert_conn *isert_conn, 2432 struct fast_reg_descriptor *fr_desc, 2433 struct isert_data_buf *mem, 2434 enum isert_indicator ind, 2435 struct ib_sge *sge) 2436 { 2437 struct isert_device *device = isert_conn->device; 2438 struct ib_device *ib_dev = device->ib_device; 2439 struct ib_mr *mr; 2440 struct ib_reg_wr reg_wr; 2441 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; 2442 int ret, n; 2443 2444 if (mem->dma_nents == 1) { 2445 sge->lkey = device->pd->local_dma_lkey; 2446 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); 2447 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); 2448 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", 2449 sge->addr, sge->length, sge->lkey); 2450 return 0; 2451 } 2452 2453 if (ind == ISERT_DATA_KEY_VALID) 2454 /* Registering data buffer */ 2455 mr = fr_desc->data_mr; 2456 else 2457 /* Registering protection buffer */ 2458 mr = fr_desc->pi_ctx->prot_mr; 2459 2460 if (!(fr_desc->ind & ind)) { 2461 isert_inv_rkey(&inv_wr, mr); 2462 wr = &inv_wr; 2463 } 2464 2465 n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE); 2466 if (unlikely(n != mem->nents)) { 2467 isert_err("failed to map mr sg (%d/%d)\n", 2468 n, mem->nents); 2469 return n < 0 ? n : -EINVAL; 2470 } 2471 2472 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", 2473 fr_desc, mem->nents, mem->offset); 2474 2475 reg_wr.wr.next = NULL; 2476 reg_wr.wr.opcode = IB_WR_REG_MR; 2477 reg_wr.wr.wr_cqe = NULL; 2478 reg_wr.wr.send_flags = 0; 2479 reg_wr.wr.num_sge = 0; 2480 reg_wr.mr = mr; 2481 reg_wr.key = mr->lkey; 2482 reg_wr.access = IB_ACCESS_LOCAL_WRITE; 2483 2484 if (!wr) 2485 wr = ®_wr.wr; 2486 else 2487 wr->next = ®_wr.wr; 2488 2489 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2490 if (ret) { 2491 isert_err("fast registration failed, ret:%d\n", ret); 2492 return ret; 2493 } 2494 fr_desc->ind &= ~ind; 2495 2496 sge->lkey = mr->lkey; 2497 sge->addr = mr->iova; 2498 sge->length = mr->length; 2499 2500 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", 2501 sge->addr, sge->length, sge->lkey); 2502 2503 return ret; 2504 } 2505 2506 static inline void 2507 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, 2508 struct ib_sig_domain *domain) 2509 { 2510 domain->sig_type = IB_SIG_TYPE_T10_DIF; 2511 domain->sig.dif.bg_type = IB_T10DIF_CRC; 2512 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 2513 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 2514 /* 2515 * At the moment we hard code those, but if in the future 2516 * the target core would like to use it, we will take it 2517 * from se_cmd. 2518 */ 2519 domain->sig.dif.apptag_check_mask = 0xffff; 2520 domain->sig.dif.app_escape = true; 2521 domain->sig.dif.ref_escape = true; 2522 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 2523 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 2524 domain->sig.dif.ref_remap = true; 2525 }; 2526 2527 static int 2528 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2529 { 2530 switch (se_cmd->prot_op) { 2531 case TARGET_PROT_DIN_INSERT: 2532 case TARGET_PROT_DOUT_STRIP: 2533 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 2534 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2535 break; 2536 case TARGET_PROT_DOUT_INSERT: 2537 case TARGET_PROT_DIN_STRIP: 2538 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 2539 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2540 break; 2541 case TARGET_PROT_DIN_PASS: 2542 case TARGET_PROT_DOUT_PASS: 2543 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2544 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2545 break; 2546 default: 2547 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2548 return -EINVAL; 2549 } 2550 2551 return 0; 2552 } 2553 2554 static inline u8 2555 isert_set_prot_checks(u8 prot_checks) 2556 { 2557 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | 2558 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | 2559 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); 2560 } 2561 2562 static int 2563 isert_reg_sig_mr(struct isert_conn *isert_conn, 2564 struct isert_cmd *isert_cmd, 2565 struct fast_reg_descriptor *fr_desc) 2566 { 2567 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; 2568 struct ib_sig_handover_wr sig_wr; 2569 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; 2570 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2571 struct ib_sig_attrs sig_attrs; 2572 int ret; 2573 2574 memset(&sig_attrs, 0, sizeof(sig_attrs)); 2575 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2576 if (ret) 2577 goto err; 2578 2579 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); 2580 2581 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { 2582 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr); 2583 wr = &inv_wr; 2584 } 2585 2586 memset(&sig_wr, 0, sizeof(sig_wr)); 2587 sig_wr.wr.opcode = IB_WR_REG_SIG_MR; 2588 sig_wr.wr.wr_cqe = NULL; 2589 sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA]; 2590 sig_wr.wr.num_sge = 1; 2591 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; 2592 sig_wr.sig_attrs = &sig_attrs; 2593 sig_wr.sig_mr = pi_ctx->sig_mr; 2594 if (se_cmd->t_prot_sg) 2595 sig_wr.prot = &isert_cmd->ib_sg[PROT]; 2596 2597 if (!wr) 2598 wr = &sig_wr.wr; 2599 else 2600 wr->next = &sig_wr.wr; 2601 2602 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2603 if (ret) { 2604 isert_err("fast registration failed, ret:%d\n", ret); 2605 goto err; 2606 } 2607 fr_desc->ind &= ~ISERT_SIG_KEY_VALID; 2608 2609 isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey; 2610 isert_cmd->ib_sg[SIG].addr = 0; 2611 isert_cmd->ib_sg[SIG].length = se_cmd->data_length; 2612 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && 2613 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) 2614 /* 2615 * We have protection guards on the wire 2616 * so we need to set a larget transfer 2617 */ 2618 isert_cmd->ib_sg[SIG].length += se_cmd->prot_length; 2619 2620 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n", 2621 isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length, 2622 isert_cmd->ib_sg[SIG].lkey); 2623 err: 2624 return ret; 2625 } 2626 2627 static int 2628 isert_handle_prot_cmd(struct isert_conn *isert_conn, 2629 struct isert_cmd *isert_cmd) 2630 { 2631 struct isert_device *device = isert_conn->device; 2632 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; 2633 int ret; 2634 2635 if (!isert_cmd->fr_desc->pi_ctx) { 2636 ret = isert_create_pi_ctx(isert_cmd->fr_desc, 2637 device->ib_device, 2638 device->pd); 2639 if (ret) { 2640 isert_err("conn %p failed to allocate pi_ctx\n", 2641 isert_conn); 2642 return ret; 2643 } 2644 } 2645 2646 if (se_cmd->t_prot_sg) { 2647 ret = isert_map_data_buf(isert_conn, isert_cmd, 2648 se_cmd->t_prot_sg, 2649 se_cmd->t_prot_nents, 2650 se_cmd->prot_length, 2651 0, 2652 isert_cmd->iser_ib_op, 2653 &isert_cmd->prot); 2654 if (ret) { 2655 isert_err("conn %p failed to map protection buffer\n", 2656 isert_conn); 2657 return ret; 2658 } 2659 2660 memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT])); 2661 ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc, 2662 &isert_cmd->prot, 2663 ISERT_PROT_KEY_VALID, 2664 &isert_cmd->ib_sg[PROT]); 2665 if (ret) { 2666 isert_err("conn %p failed to fast reg mr\n", 2667 isert_conn); 2668 goto unmap_prot_cmd; 2669 } 2670 } 2671 2672 ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc); 2673 if (ret) { 2674 isert_err("conn %p failed to fast reg mr\n", 2675 isert_conn); 2676 goto unmap_prot_cmd; 2677 } 2678 isert_cmd->fr_desc->ind |= ISERT_PROTECTED; 2679 2680 return 0; 2681 2682 unmap_prot_cmd: 2683 if (se_cmd->t_prot_sg) 2684 isert_unmap_data_buf(isert_conn, &isert_cmd->prot); 2685 2686 return ret; 2687 } 2688 2689 static int 2690 isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn) 2691 { 2692 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2693 struct se_cmd *se_cmd = &cmd->se_cmd; 2694 struct isert_conn *isert_conn = conn->context; 2695 struct fast_reg_descriptor *fr_desc = NULL; 2696 struct ib_rdma_wr *rdma_wr; 2697 struct ib_sge *ib_sg; 2698 u32 offset; 2699 int ret = 0; 2700 unsigned long flags; 2701 2702 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ? 2703 cmd->write_data_done : 0; 2704 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2705 se_cmd->t_data_nents, se_cmd->data_length, 2706 offset, isert_cmd->iser_ib_op, 2707 &isert_cmd->data); 2708 if (ret) 2709 return ret; 2710 2711 if (isert_cmd->data.dma_nents != 1 || 2712 isert_prot_cmd(isert_conn, se_cmd)) { 2713 spin_lock_irqsave(&isert_conn->pool_lock, flags); 2714 fr_desc = list_first_entry(&isert_conn->fr_pool, 2715 struct fast_reg_descriptor, list); 2716 list_del(&fr_desc->list); 2717 spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2718 isert_cmd->fr_desc = fr_desc; 2719 } 2720 2721 ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data, 2722 ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]); 2723 if (ret) 2724 goto unmap_cmd; 2725 2726 if (isert_prot_cmd(isert_conn, se_cmd)) { 2727 ret = isert_handle_prot_cmd(isert_conn, isert_cmd); 2728 if (ret) 2729 goto unmap_cmd; 2730 2731 ib_sg = &isert_cmd->ib_sg[SIG]; 2732 } else { 2733 ib_sg = &isert_cmd->ib_sg[DATA]; 2734 } 2735 2736 memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg)); 2737 isert_cmd->ib_sge = &isert_cmd->s_ib_sge; 2738 isert_cmd->rdma_wr_num = 1; 2739 memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr)); 2740 isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr; 2741 2742 rdma_wr = &isert_cmd->s_rdma_wr; 2743 rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge; 2744 rdma_wr->wr.num_sge = 1; 2745 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe; 2746 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) { 2747 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2748 2749 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 2750 rdma_wr->remote_addr = isert_cmd->read_va; 2751 rdma_wr->rkey = isert_cmd->read_stag; 2752 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 2753 0 : IB_SEND_SIGNALED; 2754 } else { 2755 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2756 2757 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 2758 rdma_wr->remote_addr = isert_cmd->write_va; 2759 rdma_wr->rkey = isert_cmd->write_stag; 2760 rdma_wr->wr.send_flags = IB_SEND_SIGNALED; 2761 } 2762 2763 return 0; 2764 2765 unmap_cmd: 2766 if (fr_desc) { 2767 spin_lock_irqsave(&isert_conn->pool_lock, flags); 2768 list_add_tail(&fr_desc->list, &isert_conn->fr_pool); 2769 spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2770 } 2771 isert_unmap_data_buf(isert_conn, &isert_cmd->data); 2772 2773 return ret; 2774 } 2775 2776 static int 2777 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2778 { 2779 struct se_cmd *se_cmd = &cmd->se_cmd; 2780 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2781 struct isert_conn *isert_conn = conn->context; 2782 struct isert_device *device = isert_conn->device; 2783 struct ib_send_wr *wr_failed; 2784 int rc; 2785 2786 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2787 isert_cmd, se_cmd->data_length); 2788 2789 isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE; 2790 rc = device->reg_rdma_mem(isert_cmd, conn); 2791 if (rc) { 2792 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2793 return rc; 2794 } 2795 2796 if (!isert_prot_cmd(isert_conn, se_cmd)) { 2797 /* 2798 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2799 */ 2800 isert_create_send_desc(isert_conn, isert_cmd, 2801 &isert_cmd->tx_desc); 2802 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2803 &isert_cmd->tx_desc.iscsi_header); 2804 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2805 isert_init_send_wr(isert_conn, isert_cmd, 2806 &isert_cmd->tx_desc.send_wr); 2807 isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr; 2808 isert_cmd->rdma_wr_num += 1; 2809 2810 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2811 if (rc) { 2812 isert_err("ib_post_recv failed with %d\n", rc); 2813 return rc; 2814 } 2815 } 2816 2817 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed); 2818 if (rc) 2819 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2820 2821 if (!isert_prot_cmd(isert_conn, se_cmd)) 2822 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data " 2823 "READ\n", isert_cmd); 2824 else 2825 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", 2826 isert_cmd); 2827 2828 return 1; 2829 } 2830 2831 static int 2832 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2833 { 2834 struct se_cmd *se_cmd = &cmd->se_cmd; 2835 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2836 struct isert_conn *isert_conn = conn->context; 2837 struct isert_device *device = isert_conn->device; 2838 struct ib_send_wr *wr_failed; 2839 int rc; 2840 2841 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2842 isert_cmd, se_cmd->data_length, cmd->write_data_done); 2843 isert_cmd->iser_ib_op = ISER_IB_RDMA_READ; 2844 rc = device->reg_rdma_mem(isert_cmd, conn); 2845 if (rc) { 2846 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2847 return rc; 2848 } 2849 2850 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed); 2851 if (rc) 2852 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2853 2854 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2855 isert_cmd); 2856 2857 return 0; 2858 } 2859 2860 static int 2861 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2862 { 2863 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2864 int ret = 0; 2865 2866 switch (state) { 2867 case ISTATE_REMOVE: 2868 spin_lock_bh(&conn->cmd_lock); 2869 list_del_init(&cmd->i_conn_node); 2870 spin_unlock_bh(&conn->cmd_lock); 2871 isert_put_cmd(isert_cmd, true); 2872 break; 2873 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2874 ret = isert_put_nopin(cmd, conn, false); 2875 break; 2876 default: 2877 isert_err("Unknown immediate state: 0x%02x\n", state); 2878 ret = -EINVAL; 2879 break; 2880 } 2881 2882 return ret; 2883 } 2884 2885 static int 2886 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2887 { 2888 struct isert_conn *isert_conn = conn->context; 2889 int ret; 2890 2891 switch (state) { 2892 case ISTATE_SEND_LOGOUTRSP: 2893 ret = isert_put_logout_rsp(cmd, conn); 2894 if (!ret) 2895 isert_conn->logout_posted = true; 2896 break; 2897 case ISTATE_SEND_NOPIN: 2898 ret = isert_put_nopin(cmd, conn, true); 2899 break; 2900 case ISTATE_SEND_TASKMGTRSP: 2901 ret = isert_put_tm_rsp(cmd, conn); 2902 break; 2903 case ISTATE_SEND_REJECT: 2904 ret = isert_put_reject(cmd, conn); 2905 break; 2906 case ISTATE_SEND_TEXTRSP: 2907 ret = isert_put_text_rsp(cmd, conn); 2908 break; 2909 case ISTATE_SEND_STATUS: 2910 /* 2911 * Special case for sending non GOOD SCSI status from TX thread 2912 * context during pre se_cmd excecution failure. 2913 */ 2914 ret = isert_put_response(conn, cmd); 2915 break; 2916 default: 2917 isert_err("Unknown response state: 0x%02x\n", state); 2918 ret = -EINVAL; 2919 break; 2920 } 2921 2922 return ret; 2923 } 2924 2925 struct rdma_cm_id * 2926 isert_setup_id(struct isert_np *isert_np) 2927 { 2928 struct iscsi_np *np = isert_np->np; 2929 struct rdma_cm_id *id; 2930 struct sockaddr *sa; 2931 int ret; 2932 2933 sa = (struct sockaddr *)&np->np_sockaddr; 2934 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2935 2936 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2937 RDMA_PS_TCP, IB_QPT_RC); 2938 if (IS_ERR(id)) { 2939 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2940 ret = PTR_ERR(id); 2941 goto out; 2942 } 2943 isert_dbg("id %p context %p\n", id, id->context); 2944 2945 ret = rdma_bind_addr(id, sa); 2946 if (ret) { 2947 isert_err("rdma_bind_addr() failed: %d\n", ret); 2948 goto out_id; 2949 } 2950 2951 ret = rdma_listen(id, 0); 2952 if (ret) { 2953 isert_err("rdma_listen() failed: %d\n", ret); 2954 goto out_id; 2955 } 2956 2957 return id; 2958 out_id: 2959 rdma_destroy_id(id); 2960 out: 2961 return ERR_PTR(ret); 2962 } 2963 2964 static int 2965 isert_setup_np(struct iscsi_np *np, 2966 struct sockaddr_storage *ksockaddr) 2967 { 2968 struct isert_np *isert_np; 2969 struct rdma_cm_id *isert_lid; 2970 int ret; 2971 2972 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2973 if (!isert_np) { 2974 isert_err("Unable to allocate struct isert_np\n"); 2975 return -ENOMEM; 2976 } 2977 sema_init(&isert_np->sem, 0); 2978 mutex_init(&isert_np->mutex); 2979 INIT_LIST_HEAD(&isert_np->accepted); 2980 INIT_LIST_HEAD(&isert_np->pending); 2981 isert_np->np = np; 2982 2983 /* 2984 * Setup the np->np_sockaddr from the passed sockaddr setup 2985 * in iscsi_target_configfs.c code.. 2986 */ 2987 memcpy(&np->np_sockaddr, ksockaddr, 2988 sizeof(struct sockaddr_storage)); 2989 2990 isert_lid = isert_setup_id(isert_np); 2991 if (IS_ERR(isert_lid)) { 2992 ret = PTR_ERR(isert_lid); 2993 goto out; 2994 } 2995 2996 isert_np->cm_id = isert_lid; 2997 np->np_context = isert_np; 2998 2999 return 0; 3000 3001 out: 3002 kfree(isert_np); 3003 3004 return ret; 3005 } 3006 3007 static int 3008 isert_rdma_accept(struct isert_conn *isert_conn) 3009 { 3010 struct rdma_cm_id *cm_id = isert_conn->cm_id; 3011 struct rdma_conn_param cp; 3012 int ret; 3013 struct iser_cm_hdr rsp_hdr; 3014 3015 memset(&cp, 0, sizeof(struct rdma_conn_param)); 3016 cp.initiator_depth = isert_conn->initiator_depth; 3017 cp.retry_count = 7; 3018 cp.rnr_retry_count = 7; 3019 3020 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 3021 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 3022 if (!isert_conn->snd_w_inv) 3023 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 3024 cp.private_data = (void *)&rsp_hdr; 3025 cp.private_data_len = sizeof(rsp_hdr); 3026 3027 ret = rdma_accept(cm_id, &cp); 3028 if (ret) { 3029 isert_err("rdma_accept() failed with: %d\n", ret); 3030 return ret; 3031 } 3032 3033 return 0; 3034 } 3035 3036 static int 3037 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 3038 { 3039 struct isert_conn *isert_conn = conn->context; 3040 int ret; 3041 3042 isert_info("before login_req comp conn: %p\n", isert_conn); 3043 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 3044 if (ret) { 3045 isert_err("isert_conn %p interrupted before got login req\n", 3046 isert_conn); 3047 return ret; 3048 } 3049 reinit_completion(&isert_conn->login_req_comp); 3050 3051 /* 3052 * For login requests after the first PDU, isert_rx_login_req() will 3053 * kick schedule_delayed_work(&conn->login_work) as the packet is 3054 * received, which turns this callback from iscsi_target_do_login_rx() 3055 * into a NOP. 3056 */ 3057 if (!login->first_request) 3058 return 0; 3059 3060 isert_rx_login_req(isert_conn); 3061 3062 isert_info("before login_comp conn: %p\n", conn); 3063 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 3064 if (ret) 3065 return ret; 3066 3067 isert_info("processing login->req: %p\n", login->req); 3068 3069 return 0; 3070 } 3071 3072 static void 3073 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 3074 struct isert_conn *isert_conn) 3075 { 3076 struct rdma_cm_id *cm_id = isert_conn->cm_id; 3077 struct rdma_route *cm_route = &cm_id->route; 3078 3079 conn->login_family = np->np_sockaddr.ss_family; 3080 3081 conn->login_sockaddr = cm_route->addr.dst_addr; 3082 conn->local_sockaddr = cm_route->addr.src_addr; 3083 } 3084 3085 static int 3086 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 3087 { 3088 struct isert_np *isert_np = np->np_context; 3089 struct isert_conn *isert_conn; 3090 int ret; 3091 3092 accept_wait: 3093 ret = down_interruptible(&isert_np->sem); 3094 if (ret) 3095 return -ENODEV; 3096 3097 spin_lock_bh(&np->np_thread_lock); 3098 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 3099 spin_unlock_bh(&np->np_thread_lock); 3100 isert_dbg("np_thread_state %d\n", 3101 np->np_thread_state); 3102 /** 3103 * No point in stalling here when np_thread 3104 * is in state RESET/SHUTDOWN/EXIT - bail 3105 **/ 3106 return -ENODEV; 3107 } 3108 spin_unlock_bh(&np->np_thread_lock); 3109 3110 mutex_lock(&isert_np->mutex); 3111 if (list_empty(&isert_np->pending)) { 3112 mutex_unlock(&isert_np->mutex); 3113 goto accept_wait; 3114 } 3115 isert_conn = list_first_entry(&isert_np->pending, 3116 struct isert_conn, node); 3117 list_del_init(&isert_conn->node); 3118 mutex_unlock(&isert_np->mutex); 3119 3120 conn->context = isert_conn; 3121 isert_conn->conn = conn; 3122 isert_conn->state = ISER_CONN_BOUND; 3123 3124 isert_set_conn_info(np, conn, isert_conn); 3125 3126 isert_dbg("Processing isert_conn: %p\n", isert_conn); 3127 3128 return 0; 3129 } 3130 3131 static void 3132 isert_free_np(struct iscsi_np *np) 3133 { 3134 struct isert_np *isert_np = np->np_context; 3135 struct isert_conn *isert_conn, *n; 3136 3137 if (isert_np->cm_id) 3138 rdma_destroy_id(isert_np->cm_id); 3139 3140 /* 3141 * FIXME: At this point we don't have a good way to insure 3142 * that at this point we don't have hanging connections that 3143 * completed RDMA establishment but didn't start iscsi login 3144 * process. So work-around this by cleaning up what ever piled 3145 * up in accepted and pending lists. 3146 */ 3147 mutex_lock(&isert_np->mutex); 3148 if (!list_empty(&isert_np->pending)) { 3149 isert_info("Still have isert pending connections\n"); 3150 list_for_each_entry_safe(isert_conn, n, 3151 &isert_np->pending, 3152 node) { 3153 isert_info("cleaning isert_conn %p state (%d)\n", 3154 isert_conn, isert_conn->state); 3155 isert_connect_release(isert_conn); 3156 } 3157 } 3158 3159 if (!list_empty(&isert_np->accepted)) { 3160 isert_info("Still have isert accepted connections\n"); 3161 list_for_each_entry_safe(isert_conn, n, 3162 &isert_np->accepted, 3163 node) { 3164 isert_info("cleaning isert_conn %p state (%d)\n", 3165 isert_conn, isert_conn->state); 3166 isert_connect_release(isert_conn); 3167 } 3168 } 3169 mutex_unlock(&isert_np->mutex); 3170 3171 np->np_context = NULL; 3172 kfree(isert_np); 3173 } 3174 3175 static void isert_release_work(struct work_struct *work) 3176 { 3177 struct isert_conn *isert_conn = container_of(work, 3178 struct isert_conn, 3179 release_work); 3180 3181 isert_info("Starting release conn %p\n", isert_conn); 3182 3183 mutex_lock(&isert_conn->mutex); 3184 isert_conn->state = ISER_CONN_DOWN; 3185 mutex_unlock(&isert_conn->mutex); 3186 3187 isert_info("Destroying conn %p\n", isert_conn); 3188 isert_put_conn(isert_conn); 3189 } 3190 3191 static void 3192 isert_wait4logout(struct isert_conn *isert_conn) 3193 { 3194 struct iscsi_conn *conn = isert_conn->conn; 3195 3196 isert_info("conn %p\n", isert_conn); 3197 3198 if (isert_conn->logout_posted) { 3199 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 3200 wait_for_completion_timeout(&conn->conn_logout_comp, 3201 SECONDS_FOR_LOGOUT_COMP * HZ); 3202 } 3203 } 3204 3205 static void 3206 isert_wait4cmds(struct iscsi_conn *conn) 3207 { 3208 isert_info("iscsi_conn %p\n", conn); 3209 3210 if (conn->sess) { 3211 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 3212 target_wait_for_sess_cmds(conn->sess->se_sess); 3213 } 3214 } 3215 3216 static void 3217 isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc) 3218 { 3219 struct isert_conn *isert_conn = wc->qp->qp_context; 3220 3221 isert_print_wc(wc, "beacon"); 3222 3223 isert_info("conn %p completing wait_comp_err\n", isert_conn); 3224 complete(&isert_conn->wait_comp_err); 3225 } 3226 3227 static void 3228 isert_wait4flush(struct isert_conn *isert_conn) 3229 { 3230 struct ib_recv_wr *bad_wr; 3231 static struct ib_cqe cqe = { .done = isert_beacon_done }; 3232 3233 isert_info("conn %p\n", isert_conn); 3234 3235 init_completion(&isert_conn->wait_comp_err); 3236 isert_conn->beacon.wr_cqe = &cqe; 3237 /* post an indication that all flush errors were consumed */ 3238 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) { 3239 isert_err("conn %p failed to post beacon", isert_conn); 3240 return; 3241 } 3242 3243 wait_for_completion(&isert_conn->wait_comp_err); 3244 } 3245 3246 /** 3247 * isert_put_unsol_pending_cmds() - Drop commands waiting for 3248 * unsolicitate dataout 3249 * @conn: iscsi connection 3250 * 3251 * We might still have commands that are waiting for unsolicited 3252 * dataouts messages. We must put the extra reference on those 3253 * before blocking on the target_wait_for_session_cmds 3254 */ 3255 static void 3256 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 3257 { 3258 struct iscsi_cmd *cmd, *tmp; 3259 static LIST_HEAD(drop_cmd_list); 3260 3261 spin_lock_bh(&conn->cmd_lock); 3262 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 3263 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 3264 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 3265 (cmd->write_data_done < cmd->se_cmd.data_length)) 3266 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 3267 } 3268 spin_unlock_bh(&conn->cmd_lock); 3269 3270 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 3271 list_del_init(&cmd->i_conn_node); 3272 if (cmd->i_state != ISTATE_REMOVE) { 3273 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 3274 3275 isert_info("conn %p dropping cmd %p\n", conn, cmd); 3276 isert_put_cmd(isert_cmd, true); 3277 } 3278 } 3279 } 3280 3281 static void isert_wait_conn(struct iscsi_conn *conn) 3282 { 3283 struct isert_conn *isert_conn = conn->context; 3284 3285 isert_info("Starting conn %p\n", isert_conn); 3286 3287 mutex_lock(&isert_conn->mutex); 3288 isert_conn_terminate(isert_conn); 3289 mutex_unlock(&isert_conn->mutex); 3290 3291 isert_wait4flush(isert_conn); 3292 isert_put_unsol_pending_cmds(conn); 3293 isert_wait4cmds(conn); 3294 isert_wait4logout(isert_conn); 3295 3296 queue_work(isert_release_wq, &isert_conn->release_work); 3297 } 3298 3299 static void isert_free_conn(struct iscsi_conn *conn) 3300 { 3301 struct isert_conn *isert_conn = conn->context; 3302 3303 isert_wait4flush(isert_conn); 3304 isert_put_conn(isert_conn); 3305 } 3306 3307 static struct iscsit_transport iser_target_transport = { 3308 .name = "IB/iSER", 3309 .transport_type = ISCSI_INFINIBAND, 3310 .priv_size = sizeof(struct isert_cmd), 3311 .owner = THIS_MODULE, 3312 .iscsit_setup_np = isert_setup_np, 3313 .iscsit_accept_np = isert_accept_np, 3314 .iscsit_free_np = isert_free_np, 3315 .iscsit_wait_conn = isert_wait_conn, 3316 .iscsit_free_conn = isert_free_conn, 3317 .iscsit_get_login_rx = isert_get_login_rx, 3318 .iscsit_put_login_tx = isert_put_login_tx, 3319 .iscsit_immediate_queue = isert_immediate_queue, 3320 .iscsit_response_queue = isert_response_queue, 3321 .iscsit_get_dataout = isert_get_dataout, 3322 .iscsit_queue_data_in = isert_put_datain, 3323 .iscsit_queue_status = isert_put_response, 3324 .iscsit_aborted_task = isert_aborted_task, 3325 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 3326 }; 3327 3328 static int __init isert_init(void) 3329 { 3330 int ret; 3331 3332 isert_comp_wq = alloc_workqueue("isert_comp_wq", 3333 WQ_UNBOUND | WQ_HIGHPRI, 0); 3334 if (!isert_comp_wq) { 3335 isert_err("Unable to allocate isert_comp_wq\n"); 3336 ret = -ENOMEM; 3337 return -ENOMEM; 3338 } 3339 3340 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 3341 WQ_UNBOUND_MAX_ACTIVE); 3342 if (!isert_release_wq) { 3343 isert_err("Unable to allocate isert_release_wq\n"); 3344 ret = -ENOMEM; 3345 goto destroy_comp_wq; 3346 } 3347 3348 iscsit_register_transport(&iser_target_transport); 3349 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 3350 3351 return 0; 3352 3353 destroy_comp_wq: 3354 destroy_workqueue(isert_comp_wq); 3355 3356 return ret; 3357 } 3358 3359 static void __exit isert_exit(void) 3360 { 3361 flush_scheduled_work(); 3362 destroy_workqueue(isert_release_wq); 3363 destroy_workqueue(isert_comp_wq); 3364 iscsit_unregister_transport(&iser_target_transport); 3365 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 3366 } 3367 3368 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 3369 MODULE_VERSION("1.0"); 3370 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 3371 MODULE_LICENSE("GPL"); 3372 3373 module_init(isert_init); 3374 module_exit(isert_exit); 3375