1 /******************************************************************************* 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 3 * 4 * (c) Copyright 2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ****************************************************************************/ 18 19 #include <linux/string.h> 20 #include <linux/module.h> 21 #include <linux/scatterlist.h> 22 #include <linux/socket.h> 23 #include <linux/in.h> 24 #include <linux/in6.h> 25 #include <rdma/ib_verbs.h> 26 #include <rdma/rdma_cm.h> 27 #include <target/target_core_base.h> 28 #include <target/target_core_fabric.h> 29 #include <target/iscsi/iscsi_transport.h> 30 #include <linux/semaphore.h> 31 32 #include "ib_isert.h" 33 34 #define ISERT_MAX_CONN 8 35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 36 #define ISER_MAX_TX_CQ_LEN \ 37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN) 38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 39 ISERT_MAX_CONN) 40 41 static int isert_debug_level; 42 module_param_named(debug_level, isert_debug_level, int, 0644); 43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 44 45 static DEFINE_MUTEX(device_list_mutex); 46 static LIST_HEAD(device_list); 47 static struct workqueue_struct *isert_comp_wq; 48 static struct workqueue_struct *isert_release_wq; 49 50 static int 51 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 52 static int 53 isert_login_post_recv(struct isert_conn *isert_conn); 54 static int 55 isert_rdma_accept(struct isert_conn *isert_conn); 56 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 57 58 static void isert_release_work(struct work_struct *work); 59 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 60 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 61 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 62 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 63 64 static inline bool 65 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 66 { 67 return (conn->pi_support && 68 cmd->prot_op != TARGET_PROT_NORMAL); 69 } 70 71 72 static void 73 isert_qp_event_callback(struct ib_event *e, void *context) 74 { 75 struct isert_conn *isert_conn = context; 76 77 isert_err("%s (%d): conn %p\n", 78 ib_event_msg(e->event), e->event, isert_conn); 79 80 switch (e->event) { 81 case IB_EVENT_COMM_EST: 82 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 83 break; 84 case IB_EVENT_QP_LAST_WQE_REACHED: 85 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 86 break; 87 default: 88 break; 89 } 90 } 91 92 static struct isert_comp * 93 isert_comp_get(struct isert_conn *isert_conn) 94 { 95 struct isert_device *device = isert_conn->device; 96 struct isert_comp *comp; 97 int i, min = 0; 98 99 mutex_lock(&device_list_mutex); 100 for (i = 0; i < device->comps_used; i++) 101 if (device->comps[i].active_qps < 102 device->comps[min].active_qps) 103 min = i; 104 comp = &device->comps[min]; 105 comp->active_qps++; 106 mutex_unlock(&device_list_mutex); 107 108 isert_info("conn %p, using comp %p min_index: %d\n", 109 isert_conn, comp, min); 110 111 return comp; 112 } 113 114 static void 115 isert_comp_put(struct isert_comp *comp) 116 { 117 mutex_lock(&device_list_mutex); 118 comp->active_qps--; 119 mutex_unlock(&device_list_mutex); 120 } 121 122 static struct ib_qp * 123 isert_create_qp(struct isert_conn *isert_conn, 124 struct isert_comp *comp, 125 struct rdma_cm_id *cma_id) 126 { 127 struct isert_device *device = isert_conn->device; 128 struct ib_qp_init_attr attr; 129 int ret; 130 131 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 132 attr.event_handler = isert_qp_event_callback; 133 attr.qp_context = isert_conn; 134 attr.send_cq = comp->cq; 135 attr.recv_cq = comp->cq; 136 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 137 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 138 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX; 139 attr.cap.max_send_sge = device->ib_device->attrs.max_sge; 140 attr.cap.max_recv_sge = 1; 141 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 142 attr.qp_type = IB_QPT_RC; 143 if (device->pi_capable) 144 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 145 146 ret = rdma_create_qp(cma_id, device->pd, &attr); 147 if (ret) { 148 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 149 return ERR_PTR(ret); 150 } 151 152 return cma_id->qp; 153 } 154 155 static int 156 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 157 { 158 struct isert_comp *comp; 159 int ret; 160 161 comp = isert_comp_get(isert_conn); 162 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); 163 if (IS_ERR(isert_conn->qp)) { 164 ret = PTR_ERR(isert_conn->qp); 165 goto err; 166 } 167 168 return 0; 169 err: 170 isert_comp_put(comp); 171 return ret; 172 } 173 174 static int 175 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 176 { 177 struct isert_device *device = isert_conn->device; 178 struct ib_device *ib_dev = device->ib_device; 179 struct iser_rx_desc *rx_desc; 180 struct ib_sge *rx_sg; 181 u64 dma_addr; 182 int i, j; 183 184 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 185 sizeof(struct iser_rx_desc), GFP_KERNEL); 186 if (!isert_conn->rx_descs) 187 return -ENOMEM; 188 189 rx_desc = isert_conn->rx_descs; 190 191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 194 if (ib_dma_mapping_error(ib_dev, dma_addr)) 195 goto dma_map_fail; 196 197 rx_desc->dma_addr = dma_addr; 198 199 rx_sg = &rx_desc->rx_sg; 200 rx_sg->addr = rx_desc->dma_addr; 201 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 202 rx_sg->lkey = device->pd->local_dma_lkey; 203 rx_desc->rx_cqe.done = isert_recv_done; 204 } 205 206 return 0; 207 208 dma_map_fail: 209 rx_desc = isert_conn->rx_descs; 210 for (j = 0; j < i; j++, rx_desc++) { 211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 213 } 214 kfree(isert_conn->rx_descs); 215 isert_conn->rx_descs = NULL; 216 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 217 return -ENOMEM; 218 } 219 220 static void 221 isert_free_rx_descriptors(struct isert_conn *isert_conn) 222 { 223 struct ib_device *ib_dev = isert_conn->device->ib_device; 224 struct iser_rx_desc *rx_desc; 225 int i; 226 227 if (!isert_conn->rx_descs) 228 return; 229 230 rx_desc = isert_conn->rx_descs; 231 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 232 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 233 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 234 } 235 236 kfree(isert_conn->rx_descs); 237 isert_conn->rx_descs = NULL; 238 } 239 240 static void 241 isert_free_comps(struct isert_device *device) 242 { 243 int i; 244 245 for (i = 0; i < device->comps_used; i++) { 246 struct isert_comp *comp = &device->comps[i]; 247 248 if (comp->cq) 249 ib_free_cq(comp->cq); 250 } 251 kfree(device->comps); 252 } 253 254 static int 255 isert_alloc_comps(struct isert_device *device) 256 { 257 int i, max_cqe, ret = 0; 258 259 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), 260 device->ib_device->num_comp_vectors)); 261 262 isert_info("Using %d CQs, %s supports %d vectors support " 263 "pi_capable %d\n", 264 device->comps_used, device->ib_device->name, 265 device->ib_device->num_comp_vectors, 266 device->pi_capable); 267 268 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), 269 GFP_KERNEL); 270 if (!device->comps) 271 return -ENOMEM; 272 273 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); 274 275 for (i = 0; i < device->comps_used; i++) { 276 struct isert_comp *comp = &device->comps[i]; 277 278 comp->device = device; 279 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, 280 IB_POLL_WORKQUEUE); 281 if (IS_ERR(comp->cq)) { 282 isert_err("Unable to allocate cq\n"); 283 ret = PTR_ERR(comp->cq); 284 comp->cq = NULL; 285 goto out_cq; 286 } 287 } 288 289 return 0; 290 out_cq: 291 isert_free_comps(device); 292 return ret; 293 } 294 295 static int 296 isert_create_device_ib_res(struct isert_device *device) 297 { 298 struct ib_device *ib_dev = device->ib_device; 299 int ret; 300 301 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge); 302 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 303 304 ret = isert_alloc_comps(device); 305 if (ret) 306 goto out; 307 308 device->pd = ib_alloc_pd(ib_dev, 0); 309 if (IS_ERR(device->pd)) { 310 ret = PTR_ERR(device->pd); 311 isert_err("failed to allocate pd, device %p, ret=%d\n", 312 device, ret); 313 goto out_cq; 314 } 315 316 /* Check signature cap */ 317 device->pi_capable = ib_dev->attrs.device_cap_flags & 318 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 319 320 return 0; 321 322 out_cq: 323 isert_free_comps(device); 324 out: 325 if (ret > 0) 326 ret = -EINVAL; 327 return ret; 328 } 329 330 static void 331 isert_free_device_ib_res(struct isert_device *device) 332 { 333 isert_info("device %p\n", device); 334 335 ib_dealloc_pd(device->pd); 336 isert_free_comps(device); 337 } 338 339 static void 340 isert_device_put(struct isert_device *device) 341 { 342 mutex_lock(&device_list_mutex); 343 device->refcount--; 344 isert_info("device %p refcount %d\n", device, device->refcount); 345 if (!device->refcount) { 346 isert_free_device_ib_res(device); 347 list_del(&device->dev_node); 348 kfree(device); 349 } 350 mutex_unlock(&device_list_mutex); 351 } 352 353 static struct isert_device * 354 isert_device_get(struct rdma_cm_id *cma_id) 355 { 356 struct isert_device *device; 357 int ret; 358 359 mutex_lock(&device_list_mutex); 360 list_for_each_entry(device, &device_list, dev_node) { 361 if (device->ib_device->node_guid == cma_id->device->node_guid) { 362 device->refcount++; 363 isert_info("Found iser device %p refcount %d\n", 364 device, device->refcount); 365 mutex_unlock(&device_list_mutex); 366 return device; 367 } 368 } 369 370 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 371 if (!device) { 372 mutex_unlock(&device_list_mutex); 373 return ERR_PTR(-ENOMEM); 374 } 375 376 INIT_LIST_HEAD(&device->dev_node); 377 378 device->ib_device = cma_id->device; 379 ret = isert_create_device_ib_res(device); 380 if (ret) { 381 kfree(device); 382 mutex_unlock(&device_list_mutex); 383 return ERR_PTR(ret); 384 } 385 386 device->refcount++; 387 list_add_tail(&device->dev_node, &device_list); 388 isert_info("Created a new iser device %p refcount %d\n", 389 device, device->refcount); 390 mutex_unlock(&device_list_mutex); 391 392 return device; 393 } 394 395 static void 396 isert_init_conn(struct isert_conn *isert_conn) 397 { 398 isert_conn->state = ISER_CONN_INIT; 399 INIT_LIST_HEAD(&isert_conn->node); 400 init_completion(&isert_conn->login_comp); 401 init_completion(&isert_conn->login_req_comp); 402 init_waitqueue_head(&isert_conn->rem_wait); 403 kref_init(&isert_conn->kref); 404 mutex_init(&isert_conn->mutex); 405 INIT_WORK(&isert_conn->release_work, isert_release_work); 406 } 407 408 static void 409 isert_free_login_buf(struct isert_conn *isert_conn) 410 { 411 struct ib_device *ib_dev = isert_conn->device->ib_device; 412 413 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 414 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 415 kfree(isert_conn->login_rsp_buf); 416 417 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 418 ISER_RX_PAYLOAD_SIZE, 419 DMA_FROM_DEVICE); 420 kfree(isert_conn->login_req_buf); 421 } 422 423 static int 424 isert_alloc_login_buf(struct isert_conn *isert_conn, 425 struct ib_device *ib_dev) 426 { 427 int ret; 428 429 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), 430 GFP_KERNEL); 431 if (!isert_conn->login_req_buf) 432 return -ENOMEM; 433 434 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 435 isert_conn->login_req_buf, 436 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 437 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 438 if (ret) { 439 isert_err("login_req_dma mapping error: %d\n", ret); 440 isert_conn->login_req_dma = 0; 441 goto out_free_login_req_buf; 442 } 443 444 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 445 if (!isert_conn->login_rsp_buf) { 446 ret = -ENOMEM; 447 goto out_unmap_login_req_buf; 448 } 449 450 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 451 isert_conn->login_rsp_buf, 452 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 453 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 454 if (ret) { 455 isert_err("login_rsp_dma mapping error: %d\n", ret); 456 isert_conn->login_rsp_dma = 0; 457 goto out_free_login_rsp_buf; 458 } 459 460 return 0; 461 462 out_free_login_rsp_buf: 463 kfree(isert_conn->login_rsp_buf); 464 out_unmap_login_req_buf: 465 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 466 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 467 out_free_login_req_buf: 468 kfree(isert_conn->login_req_buf); 469 return ret; 470 } 471 472 static void 473 isert_set_nego_params(struct isert_conn *isert_conn, 474 struct rdma_conn_param *param) 475 { 476 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 477 478 /* Set max inflight RDMA READ requests */ 479 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 480 attr->max_qp_init_rd_atom); 481 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 482 483 if (param->private_data) { 484 u8 flags = *(u8 *)param->private_data; 485 486 /* 487 * use remote invalidation if the both initiator 488 * and the HCA support it 489 */ 490 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 491 (attr->device_cap_flags & 492 IB_DEVICE_MEM_MGT_EXTENSIONS); 493 if (isert_conn->snd_w_inv) 494 isert_info("Using remote invalidation\n"); 495 } 496 } 497 498 static int 499 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 500 { 501 struct isert_np *isert_np = cma_id->context; 502 struct iscsi_np *np = isert_np->np; 503 struct isert_conn *isert_conn; 504 struct isert_device *device; 505 int ret = 0; 506 507 spin_lock_bh(&np->np_thread_lock); 508 if (!np->enabled) { 509 spin_unlock_bh(&np->np_thread_lock); 510 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 511 return rdma_reject(cma_id, NULL, 0); 512 } 513 spin_unlock_bh(&np->np_thread_lock); 514 515 isert_dbg("cma_id: %p, portal: %p\n", 516 cma_id, cma_id->context); 517 518 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 519 if (!isert_conn) 520 return -ENOMEM; 521 522 isert_init_conn(isert_conn); 523 isert_conn->cm_id = cma_id; 524 525 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 526 if (ret) 527 goto out; 528 529 device = isert_device_get(cma_id); 530 if (IS_ERR(device)) { 531 ret = PTR_ERR(device); 532 goto out_rsp_dma_map; 533 } 534 isert_conn->device = device; 535 536 isert_set_nego_params(isert_conn, &event->param.conn); 537 538 ret = isert_conn_setup_qp(isert_conn, cma_id); 539 if (ret) 540 goto out_conn_dev; 541 542 ret = isert_login_post_recv(isert_conn); 543 if (ret) 544 goto out_conn_dev; 545 546 ret = isert_rdma_accept(isert_conn); 547 if (ret) 548 goto out_conn_dev; 549 550 mutex_lock(&isert_np->mutex); 551 list_add_tail(&isert_conn->node, &isert_np->accepted); 552 mutex_unlock(&isert_np->mutex); 553 554 return 0; 555 556 out_conn_dev: 557 isert_device_put(device); 558 out_rsp_dma_map: 559 isert_free_login_buf(isert_conn); 560 out: 561 kfree(isert_conn); 562 rdma_reject(cma_id, NULL, 0); 563 return ret; 564 } 565 566 static void 567 isert_connect_release(struct isert_conn *isert_conn) 568 { 569 struct isert_device *device = isert_conn->device; 570 571 isert_dbg("conn %p\n", isert_conn); 572 573 BUG_ON(!device); 574 575 isert_free_rx_descriptors(isert_conn); 576 if (isert_conn->cm_id && 577 !isert_conn->dev_removed) 578 rdma_destroy_id(isert_conn->cm_id); 579 580 if (isert_conn->qp) { 581 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; 582 583 isert_comp_put(comp); 584 ib_destroy_qp(isert_conn->qp); 585 } 586 587 if (isert_conn->login_req_buf) 588 isert_free_login_buf(isert_conn); 589 590 isert_device_put(device); 591 592 if (isert_conn->dev_removed) 593 wake_up_interruptible(&isert_conn->rem_wait); 594 else 595 kfree(isert_conn); 596 } 597 598 static void 599 isert_connected_handler(struct rdma_cm_id *cma_id) 600 { 601 struct isert_conn *isert_conn = cma_id->qp->qp_context; 602 struct isert_np *isert_np = cma_id->context; 603 604 isert_info("conn %p\n", isert_conn); 605 606 mutex_lock(&isert_conn->mutex); 607 isert_conn->state = ISER_CONN_UP; 608 kref_get(&isert_conn->kref); 609 mutex_unlock(&isert_conn->mutex); 610 611 mutex_lock(&isert_np->mutex); 612 list_move_tail(&isert_conn->node, &isert_np->pending); 613 mutex_unlock(&isert_np->mutex); 614 615 isert_info("np %p: Allow accept_np to continue\n", isert_np); 616 up(&isert_np->sem); 617 } 618 619 static void 620 isert_release_kref(struct kref *kref) 621 { 622 struct isert_conn *isert_conn = container_of(kref, 623 struct isert_conn, kref); 624 625 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 626 current->pid); 627 628 isert_connect_release(isert_conn); 629 } 630 631 static void 632 isert_put_conn(struct isert_conn *isert_conn) 633 { 634 kref_put(&isert_conn->kref, isert_release_kref); 635 } 636 637 static void 638 isert_handle_unbound_conn(struct isert_conn *isert_conn) 639 { 640 struct isert_np *isert_np = isert_conn->cm_id->context; 641 642 mutex_lock(&isert_np->mutex); 643 if (!list_empty(&isert_conn->node)) { 644 /* 645 * This means iscsi doesn't know this connection 646 * so schedule a cleanup ourselves 647 */ 648 list_del_init(&isert_conn->node); 649 isert_put_conn(isert_conn); 650 queue_work(isert_release_wq, &isert_conn->release_work); 651 } 652 mutex_unlock(&isert_np->mutex); 653 } 654 655 /** 656 * isert_conn_terminate() - Initiate connection termination 657 * @isert_conn: isert connection struct 658 * 659 * Notes: 660 * In case the connection state is BOUND, move state 661 * to TEMINATING and start teardown sequence (rdma_disconnect). 662 * In case the connection state is UP, complete flush as well. 663 * 664 * This routine must be called with mutex held. Thus it is 665 * safe to call multiple times. 666 */ 667 static void 668 isert_conn_terminate(struct isert_conn *isert_conn) 669 { 670 int err; 671 672 if (isert_conn->state >= ISER_CONN_TERMINATING) 673 return; 674 675 isert_info("Terminating conn %p state %d\n", 676 isert_conn, isert_conn->state); 677 isert_conn->state = ISER_CONN_TERMINATING; 678 err = rdma_disconnect(isert_conn->cm_id); 679 if (err) 680 isert_warn("Failed rdma_disconnect isert_conn %p\n", 681 isert_conn); 682 } 683 684 static int 685 isert_np_cma_handler(struct isert_np *isert_np, 686 enum rdma_cm_event_type event) 687 { 688 isert_dbg("%s (%d): isert np %p\n", 689 rdma_event_msg(event), event, isert_np); 690 691 switch (event) { 692 case RDMA_CM_EVENT_DEVICE_REMOVAL: 693 isert_np->cm_id = NULL; 694 break; 695 case RDMA_CM_EVENT_ADDR_CHANGE: 696 isert_np->cm_id = isert_setup_id(isert_np); 697 if (IS_ERR(isert_np->cm_id)) { 698 isert_err("isert np %p setup id failed: %ld\n", 699 isert_np, PTR_ERR(isert_np->cm_id)); 700 isert_np->cm_id = NULL; 701 } 702 break; 703 default: 704 isert_err("isert np %p Unexpected event %d\n", 705 isert_np, event); 706 } 707 708 return -1; 709 } 710 711 static int 712 isert_disconnected_handler(struct rdma_cm_id *cma_id, 713 enum rdma_cm_event_type event) 714 { 715 struct isert_conn *isert_conn = cma_id->qp->qp_context; 716 717 mutex_lock(&isert_conn->mutex); 718 switch (isert_conn->state) { 719 case ISER_CONN_TERMINATING: 720 break; 721 case ISER_CONN_UP: 722 isert_conn_terminate(isert_conn); 723 ib_drain_qp(isert_conn->qp); 724 isert_handle_unbound_conn(isert_conn); 725 break; 726 case ISER_CONN_BOUND: 727 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 728 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 729 break; 730 default: 731 isert_warn("conn %p terminating in state %d\n", 732 isert_conn, isert_conn->state); 733 } 734 mutex_unlock(&isert_conn->mutex); 735 736 return 0; 737 } 738 739 static int 740 isert_connect_error(struct rdma_cm_id *cma_id) 741 { 742 struct isert_conn *isert_conn = cma_id->qp->qp_context; 743 744 list_del_init(&isert_conn->node); 745 isert_conn->cm_id = NULL; 746 isert_put_conn(isert_conn); 747 748 return -1; 749 } 750 751 static int 752 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 753 { 754 struct isert_np *isert_np = cma_id->context; 755 struct isert_conn *isert_conn; 756 int ret = 0; 757 758 isert_info("%s (%d): status %d id %p np %p\n", 759 rdma_event_msg(event->event), event->event, 760 event->status, cma_id, cma_id->context); 761 762 if (isert_np->cm_id == cma_id) 763 return isert_np_cma_handler(cma_id->context, event->event); 764 765 switch (event->event) { 766 case RDMA_CM_EVENT_CONNECT_REQUEST: 767 ret = isert_connect_request(cma_id, event); 768 if (ret) 769 isert_err("failed handle connect request %d\n", ret); 770 break; 771 case RDMA_CM_EVENT_ESTABLISHED: 772 isert_connected_handler(cma_id); 773 break; 774 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 775 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 776 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 777 ret = isert_disconnected_handler(cma_id, event->event); 778 break; 779 case RDMA_CM_EVENT_DEVICE_REMOVAL: 780 isert_conn = cma_id->qp->qp_context; 781 isert_conn->dev_removed = true; 782 isert_disconnected_handler(cma_id, event->event); 783 wait_event_interruptible(isert_conn->rem_wait, 784 isert_conn->state == ISER_CONN_DOWN); 785 kfree(isert_conn); 786 /* 787 * return non-zero from the callback to destroy 788 * the rdma cm id 789 */ 790 return 1; 791 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 792 isert_info("Connection rejected: %s\n", 793 rdma_reject_msg(cma_id, event->status)); 794 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 795 case RDMA_CM_EVENT_CONNECT_ERROR: 796 ret = isert_connect_error(cma_id); 797 break; 798 default: 799 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 800 break; 801 } 802 803 return ret; 804 } 805 806 static int 807 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 808 { 809 struct ib_recv_wr *rx_wr, *rx_wr_failed; 810 int i, ret; 811 struct iser_rx_desc *rx_desc; 812 813 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 814 rx_desc = &isert_conn->rx_descs[i]; 815 816 rx_wr->wr_cqe = &rx_desc->rx_cqe; 817 rx_wr->sg_list = &rx_desc->rx_sg; 818 rx_wr->num_sge = 1; 819 rx_wr->next = rx_wr + 1; 820 } 821 rx_wr--; 822 rx_wr->next = NULL; /* mark end of work requests list */ 823 824 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 825 &rx_wr_failed); 826 if (ret) 827 isert_err("ib_post_recv() failed with ret: %d\n", ret); 828 829 return ret; 830 } 831 832 static int 833 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 834 { 835 struct ib_recv_wr *rx_wr_failed, rx_wr; 836 int ret; 837 838 rx_wr.wr_cqe = &rx_desc->rx_cqe; 839 rx_wr.sg_list = &rx_desc->rx_sg; 840 rx_wr.num_sge = 1; 841 rx_wr.next = NULL; 842 843 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); 844 if (ret) 845 isert_err("ib_post_recv() failed with ret: %d\n", ret); 846 847 return ret; 848 } 849 850 static int 851 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 852 { 853 struct ib_device *ib_dev = isert_conn->cm_id->device; 854 struct ib_send_wr send_wr, *send_wr_failed; 855 int ret; 856 857 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 858 ISER_HEADERS_LEN, DMA_TO_DEVICE); 859 860 tx_desc->tx_cqe.done = isert_login_send_done; 861 862 send_wr.next = NULL; 863 send_wr.wr_cqe = &tx_desc->tx_cqe; 864 send_wr.sg_list = tx_desc->tx_sg; 865 send_wr.num_sge = tx_desc->num_sge; 866 send_wr.opcode = IB_WR_SEND; 867 send_wr.send_flags = IB_SEND_SIGNALED; 868 869 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); 870 if (ret) 871 isert_err("ib_post_send() failed, ret: %d\n", ret); 872 873 return ret; 874 } 875 876 static void 877 isert_create_send_desc(struct isert_conn *isert_conn, 878 struct isert_cmd *isert_cmd, 879 struct iser_tx_desc *tx_desc) 880 { 881 struct isert_device *device = isert_conn->device; 882 struct ib_device *ib_dev = device->ib_device; 883 884 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 885 ISER_HEADERS_LEN, DMA_TO_DEVICE); 886 887 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 888 tx_desc->iser_header.flags = ISCSI_CTRL; 889 890 tx_desc->num_sge = 1; 891 892 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 893 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 894 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 895 } 896 } 897 898 static int 899 isert_init_tx_hdrs(struct isert_conn *isert_conn, 900 struct iser_tx_desc *tx_desc) 901 { 902 struct isert_device *device = isert_conn->device; 903 struct ib_device *ib_dev = device->ib_device; 904 u64 dma_addr; 905 906 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 907 ISER_HEADERS_LEN, DMA_TO_DEVICE); 908 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 909 isert_err("ib_dma_mapping_error() failed\n"); 910 return -ENOMEM; 911 } 912 913 tx_desc->dma_addr = dma_addr; 914 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 915 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 916 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 917 918 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 919 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 920 tx_desc->tx_sg[0].lkey); 921 922 return 0; 923 } 924 925 static void 926 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 927 struct ib_send_wr *send_wr) 928 { 929 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 930 931 tx_desc->tx_cqe.done = isert_send_done; 932 send_wr->wr_cqe = &tx_desc->tx_cqe; 933 934 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 935 send_wr->opcode = IB_WR_SEND_WITH_INV; 936 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 937 } else { 938 send_wr->opcode = IB_WR_SEND; 939 } 940 941 send_wr->sg_list = &tx_desc->tx_sg[0]; 942 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 943 send_wr->send_flags = IB_SEND_SIGNALED; 944 } 945 946 static int 947 isert_login_post_recv(struct isert_conn *isert_conn) 948 { 949 struct ib_recv_wr rx_wr, *rx_wr_fail; 950 struct ib_sge sge; 951 int ret; 952 953 memset(&sge, 0, sizeof(struct ib_sge)); 954 sge.addr = isert_conn->login_req_dma; 955 sge.length = ISER_RX_PAYLOAD_SIZE; 956 sge.lkey = isert_conn->device->pd->local_dma_lkey; 957 958 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 959 sge.addr, sge.length, sge.lkey); 960 961 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; 962 963 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 964 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; 965 rx_wr.sg_list = &sge; 966 rx_wr.num_sge = 1; 967 968 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); 969 if (ret) 970 isert_err("ib_post_recv() failed: %d\n", ret); 971 972 return ret; 973 } 974 975 static int 976 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 977 u32 length) 978 { 979 struct isert_conn *isert_conn = conn->context; 980 struct isert_device *device = isert_conn->device; 981 struct ib_device *ib_dev = device->ib_device; 982 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 983 int ret; 984 985 isert_create_send_desc(isert_conn, NULL, tx_desc); 986 987 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 988 sizeof(struct iscsi_hdr)); 989 990 isert_init_tx_hdrs(isert_conn, tx_desc); 991 992 if (length > 0) { 993 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 994 995 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 996 length, DMA_TO_DEVICE); 997 998 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 999 1000 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 1001 length, DMA_TO_DEVICE); 1002 1003 tx_dsg->addr = isert_conn->login_rsp_dma; 1004 tx_dsg->length = length; 1005 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 1006 tx_desc->num_sge = 2; 1007 } 1008 if (!login->login_failed) { 1009 if (login->login_complete) { 1010 ret = isert_alloc_rx_descriptors(isert_conn); 1011 if (ret) 1012 return ret; 1013 1014 ret = isert_post_recvm(isert_conn, 1015 ISERT_QP_MAX_RECV_DTOS); 1016 if (ret) 1017 return ret; 1018 1019 /* Now we are in FULL_FEATURE phase */ 1020 mutex_lock(&isert_conn->mutex); 1021 isert_conn->state = ISER_CONN_FULL_FEATURE; 1022 mutex_unlock(&isert_conn->mutex); 1023 goto post_send; 1024 } 1025 1026 ret = isert_login_post_recv(isert_conn); 1027 if (ret) 1028 return ret; 1029 } 1030 post_send: 1031 ret = isert_login_post_send(isert_conn, tx_desc); 1032 if (ret) 1033 return ret; 1034 1035 return 0; 1036 } 1037 1038 static void 1039 isert_rx_login_req(struct isert_conn *isert_conn) 1040 { 1041 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; 1042 int rx_buflen = isert_conn->login_req_len; 1043 struct iscsi_conn *conn = isert_conn->conn; 1044 struct iscsi_login *login = conn->conn_login; 1045 int size; 1046 1047 isert_info("conn %p\n", isert_conn); 1048 1049 WARN_ON_ONCE(!login); 1050 1051 if (login->first_request) { 1052 struct iscsi_login_req *login_req = 1053 (struct iscsi_login_req *)&rx_desc->iscsi_header; 1054 /* 1055 * Setup the initial iscsi_login values from the leading 1056 * login request PDU. 1057 */ 1058 login->leading_connection = (!login_req->tsih) ? 1 : 0; 1059 login->current_stage = 1060 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 1061 >> 2; 1062 login->version_min = login_req->min_version; 1063 login->version_max = login_req->max_version; 1064 memcpy(login->isid, login_req->isid, 6); 1065 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1066 login->init_task_tag = login_req->itt; 1067 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1068 login->cid = be16_to_cpu(login_req->cid); 1069 login->tsih = be16_to_cpu(login_req->tsih); 1070 } 1071 1072 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1073 1074 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1075 isert_dbg("Using login payload size: %d, rx_buflen: %d " 1076 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 1077 MAX_KEY_VALUE_PAIRS); 1078 memcpy(login->req_buf, &rx_desc->data[0], size); 1079 1080 if (login->first_request) { 1081 complete(&isert_conn->login_comp); 1082 return; 1083 } 1084 schedule_delayed_work(&conn->login_work, 0); 1085 } 1086 1087 static struct iscsi_cmd 1088 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1089 { 1090 struct isert_conn *isert_conn = conn->context; 1091 struct isert_cmd *isert_cmd; 1092 struct iscsi_cmd *cmd; 1093 1094 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1095 if (!cmd) { 1096 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1097 return NULL; 1098 } 1099 isert_cmd = iscsit_priv_cmd(cmd); 1100 isert_cmd->conn = isert_conn; 1101 isert_cmd->iscsi_cmd = cmd; 1102 isert_cmd->rx_desc = rx_desc; 1103 1104 return cmd; 1105 } 1106 1107 static int 1108 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1109 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1110 struct iser_rx_desc *rx_desc, unsigned char *buf) 1111 { 1112 struct iscsi_conn *conn = isert_conn->conn; 1113 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1114 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1115 bool dump_payload = false; 1116 unsigned int data_len; 1117 1118 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1119 if (rc < 0) 1120 return rc; 1121 1122 imm_data = cmd->immediate_data; 1123 imm_data_len = cmd->first_burst_len; 1124 unsol_data = cmd->unsolicited_data; 1125 data_len = cmd->se_cmd.data_length; 1126 1127 if (imm_data && imm_data_len == data_len) 1128 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1129 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1130 if (rc < 0) { 1131 return 0; 1132 } else if (rc > 0) { 1133 dump_payload = true; 1134 goto sequence_cmd; 1135 } 1136 1137 if (!imm_data) 1138 return 0; 1139 1140 if (imm_data_len != data_len) { 1141 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1142 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1143 &rx_desc->data[0], imm_data_len); 1144 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1145 sg_nents, imm_data_len); 1146 } else { 1147 sg_init_table(&isert_cmd->sg, 1); 1148 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1149 cmd->se_cmd.t_data_nents = 1; 1150 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); 1151 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1152 imm_data_len); 1153 } 1154 1155 cmd->write_data_done += imm_data_len; 1156 1157 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1158 spin_lock_bh(&cmd->istate_lock); 1159 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1160 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1161 spin_unlock_bh(&cmd->istate_lock); 1162 } 1163 1164 sequence_cmd: 1165 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1166 1167 if (!rc && dump_payload == false && unsol_data) 1168 iscsit_set_unsoliticed_dataout(cmd); 1169 else if (dump_payload && imm_data) 1170 target_put_sess_cmd(&cmd->se_cmd); 1171 1172 return 0; 1173 } 1174 1175 static int 1176 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1177 struct iser_rx_desc *rx_desc, unsigned char *buf) 1178 { 1179 struct scatterlist *sg_start; 1180 struct iscsi_conn *conn = isert_conn->conn; 1181 struct iscsi_cmd *cmd = NULL; 1182 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1183 u32 unsol_data_len = ntoh24(hdr->dlength); 1184 int rc, sg_nents, sg_off, page_off; 1185 1186 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1187 if (rc < 0) 1188 return rc; 1189 else if (!cmd) 1190 return 0; 1191 /* 1192 * FIXME: Unexpected unsolicited_data out 1193 */ 1194 if (!cmd->unsolicited_data) { 1195 isert_err("Received unexpected solicited data payload\n"); 1196 dump_stack(); 1197 return -1; 1198 } 1199 1200 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1201 "write_data_done: %u, data_length: %u\n", 1202 unsol_data_len, cmd->write_data_done, 1203 cmd->se_cmd.data_length); 1204 1205 sg_off = cmd->write_data_done / PAGE_SIZE; 1206 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1207 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1208 page_off = cmd->write_data_done % PAGE_SIZE; 1209 /* 1210 * FIXME: Non page-aligned unsolicited_data out 1211 */ 1212 if (page_off) { 1213 isert_err("unexpected non-page aligned data payload\n"); 1214 dump_stack(); 1215 return -1; 1216 } 1217 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1218 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1219 sg_nents, &rx_desc->data[0], unsol_data_len); 1220 1221 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1222 unsol_data_len); 1223 1224 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1225 if (rc < 0) 1226 return rc; 1227 1228 /* 1229 * multiple data-outs on the same command can arrive - 1230 * so post the buffer before hand 1231 */ 1232 rc = isert_post_recv(isert_conn, rx_desc); 1233 if (rc) { 1234 isert_err("ib_post_recv failed with %d\n", rc); 1235 return rc; 1236 } 1237 return 0; 1238 } 1239 1240 static int 1241 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1242 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1243 unsigned char *buf) 1244 { 1245 struct iscsi_conn *conn = isert_conn->conn; 1246 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1247 int rc; 1248 1249 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1250 if (rc < 0) 1251 return rc; 1252 /* 1253 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1254 */ 1255 1256 return iscsit_process_nop_out(conn, cmd, hdr); 1257 } 1258 1259 static int 1260 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1261 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1262 struct iscsi_text *hdr) 1263 { 1264 struct iscsi_conn *conn = isert_conn->conn; 1265 u32 payload_length = ntoh24(hdr->dlength); 1266 int rc; 1267 unsigned char *text_in = NULL; 1268 1269 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1270 if (rc < 0) 1271 return rc; 1272 1273 if (payload_length) { 1274 text_in = kzalloc(payload_length, GFP_KERNEL); 1275 if (!text_in) 1276 return -ENOMEM; 1277 } 1278 cmd->text_in_ptr = text_in; 1279 1280 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); 1281 1282 return iscsit_process_text_cmd(conn, cmd, hdr); 1283 } 1284 1285 static int 1286 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1287 uint32_t read_stag, uint64_t read_va, 1288 uint32_t write_stag, uint64_t write_va) 1289 { 1290 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1291 struct iscsi_conn *conn = isert_conn->conn; 1292 struct iscsi_cmd *cmd; 1293 struct isert_cmd *isert_cmd; 1294 int ret = -EINVAL; 1295 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1296 1297 if (conn->sess->sess_ops->SessionType && 1298 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1299 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1300 " ignoring\n", opcode); 1301 return 0; 1302 } 1303 1304 switch (opcode) { 1305 case ISCSI_OP_SCSI_CMD: 1306 cmd = isert_allocate_cmd(conn, rx_desc); 1307 if (!cmd) 1308 break; 1309 1310 isert_cmd = iscsit_priv_cmd(cmd); 1311 isert_cmd->read_stag = read_stag; 1312 isert_cmd->read_va = read_va; 1313 isert_cmd->write_stag = write_stag; 1314 isert_cmd->write_va = write_va; 1315 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1316 1317 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1318 rx_desc, (unsigned char *)hdr); 1319 break; 1320 case ISCSI_OP_NOOP_OUT: 1321 cmd = isert_allocate_cmd(conn, rx_desc); 1322 if (!cmd) 1323 break; 1324 1325 isert_cmd = iscsit_priv_cmd(cmd); 1326 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1327 rx_desc, (unsigned char *)hdr); 1328 break; 1329 case ISCSI_OP_SCSI_DATA_OUT: 1330 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1331 (unsigned char *)hdr); 1332 break; 1333 case ISCSI_OP_SCSI_TMFUNC: 1334 cmd = isert_allocate_cmd(conn, rx_desc); 1335 if (!cmd) 1336 break; 1337 1338 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1339 (unsigned char *)hdr); 1340 break; 1341 case ISCSI_OP_LOGOUT: 1342 cmd = isert_allocate_cmd(conn, rx_desc); 1343 if (!cmd) 1344 break; 1345 1346 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1347 break; 1348 case ISCSI_OP_TEXT: 1349 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1350 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1351 else 1352 cmd = isert_allocate_cmd(conn, rx_desc); 1353 1354 if (!cmd) 1355 break; 1356 1357 isert_cmd = iscsit_priv_cmd(cmd); 1358 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1359 rx_desc, (struct iscsi_text *)hdr); 1360 break; 1361 default: 1362 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1363 dump_stack(); 1364 break; 1365 } 1366 1367 return ret; 1368 } 1369 1370 static void 1371 isert_print_wc(struct ib_wc *wc, const char *type) 1372 { 1373 if (wc->status != IB_WC_WR_FLUSH_ERR) 1374 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1375 ib_wc_status_msg(wc->status), wc->status, 1376 wc->vendor_err); 1377 else 1378 isert_dbg("%s failure: %s (%d)\n", type, 1379 ib_wc_status_msg(wc->status), wc->status); 1380 } 1381 1382 static void 1383 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1384 { 1385 struct isert_conn *isert_conn = wc->qp->qp_context; 1386 struct ib_device *ib_dev = isert_conn->cm_id->device; 1387 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1388 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1389 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; 1390 uint64_t read_va = 0, write_va = 0; 1391 uint32_t read_stag = 0, write_stag = 0; 1392 1393 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1394 isert_print_wc(wc, "recv"); 1395 if (wc->status != IB_WC_WR_FLUSH_ERR) 1396 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1397 return; 1398 } 1399 1400 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1401 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1402 1403 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1404 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1405 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1406 1407 switch (iser_ctrl->flags & 0xF0) { 1408 case ISCSI_CTRL: 1409 if (iser_ctrl->flags & ISER_RSV) { 1410 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1411 read_va = be64_to_cpu(iser_ctrl->read_va); 1412 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1413 read_stag, (unsigned long long)read_va); 1414 } 1415 if (iser_ctrl->flags & ISER_WSV) { 1416 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1417 write_va = be64_to_cpu(iser_ctrl->write_va); 1418 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1419 write_stag, (unsigned long long)write_va); 1420 } 1421 1422 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1423 break; 1424 case ISER_HELLO: 1425 isert_err("iSER Hello message\n"); 1426 break; 1427 default: 1428 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1429 break; 1430 } 1431 1432 isert_rx_opcode(isert_conn, rx_desc, 1433 read_stag, read_va, write_stag, write_va); 1434 1435 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1436 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1437 } 1438 1439 static void 1440 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1441 { 1442 struct isert_conn *isert_conn = wc->qp->qp_context; 1443 struct ib_device *ib_dev = isert_conn->cm_id->device; 1444 1445 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1446 isert_print_wc(wc, "login recv"); 1447 return; 1448 } 1449 1450 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, 1451 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1452 1453 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1454 1455 if (isert_conn->conn) { 1456 struct iscsi_login *login = isert_conn->conn->conn_login; 1457 1458 if (login && !login->first_request) 1459 isert_rx_login_req(isert_conn); 1460 } 1461 1462 mutex_lock(&isert_conn->mutex); 1463 complete(&isert_conn->login_req_comp); 1464 mutex_unlock(&isert_conn->mutex); 1465 1466 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, 1467 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1468 } 1469 1470 static void 1471 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) 1472 { 1473 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 1474 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 1475 1476 if (!cmd->rw.nr_ops) 1477 return; 1478 1479 if (isert_prot_cmd(conn, se_cmd)) { 1480 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, 1481 conn->cm_id->port_num, se_cmd->t_data_sg, 1482 se_cmd->t_data_nents, se_cmd->t_prot_sg, 1483 se_cmd->t_prot_nents, dir); 1484 } else { 1485 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, 1486 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); 1487 } 1488 1489 cmd->rw.nr_ops = 0; 1490 } 1491 1492 static void 1493 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1494 { 1495 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1496 struct isert_conn *isert_conn = isert_cmd->conn; 1497 struct iscsi_conn *conn = isert_conn->conn; 1498 struct iscsi_text_rsp *hdr; 1499 1500 isert_dbg("Cmd %p\n", isert_cmd); 1501 1502 switch (cmd->iscsi_opcode) { 1503 case ISCSI_OP_SCSI_CMD: 1504 spin_lock_bh(&conn->cmd_lock); 1505 if (!list_empty(&cmd->i_conn_node)) 1506 list_del_init(&cmd->i_conn_node); 1507 spin_unlock_bh(&conn->cmd_lock); 1508 1509 if (cmd->data_direction == DMA_TO_DEVICE) { 1510 iscsit_stop_dataout_timer(cmd); 1511 /* 1512 * Check for special case during comp_err where 1513 * WRITE_PENDING has been handed off from core, 1514 * but requires an extra target_put_sess_cmd() 1515 * before transport_generic_free_cmd() below. 1516 */ 1517 if (comp_err && 1518 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1519 struct se_cmd *se_cmd = &cmd->se_cmd; 1520 1521 target_put_sess_cmd(se_cmd); 1522 } 1523 } 1524 1525 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1526 transport_generic_free_cmd(&cmd->se_cmd, 0); 1527 break; 1528 case ISCSI_OP_SCSI_TMFUNC: 1529 spin_lock_bh(&conn->cmd_lock); 1530 if (!list_empty(&cmd->i_conn_node)) 1531 list_del_init(&cmd->i_conn_node); 1532 spin_unlock_bh(&conn->cmd_lock); 1533 1534 transport_generic_free_cmd(&cmd->se_cmd, 0); 1535 break; 1536 case ISCSI_OP_REJECT: 1537 case ISCSI_OP_NOOP_OUT: 1538 case ISCSI_OP_TEXT: 1539 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1540 /* If the continue bit is on, keep the command alive */ 1541 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1542 break; 1543 1544 spin_lock_bh(&conn->cmd_lock); 1545 if (!list_empty(&cmd->i_conn_node)) 1546 list_del_init(&cmd->i_conn_node); 1547 spin_unlock_bh(&conn->cmd_lock); 1548 1549 /* 1550 * Handle special case for REJECT when iscsi_add_reject*() has 1551 * overwritten the original iscsi_opcode assignment, and the 1552 * associated cmd->se_cmd needs to be released. 1553 */ 1554 if (cmd->se_cmd.se_tfo != NULL) { 1555 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1556 cmd->iscsi_opcode); 1557 transport_generic_free_cmd(&cmd->se_cmd, 0); 1558 break; 1559 } 1560 /* 1561 * Fall-through 1562 */ 1563 default: 1564 iscsit_release_cmd(cmd); 1565 break; 1566 } 1567 } 1568 1569 static void 1570 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1571 { 1572 if (tx_desc->dma_addr != 0) { 1573 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1574 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1575 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1576 tx_desc->dma_addr = 0; 1577 } 1578 } 1579 1580 static void 1581 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1582 struct ib_device *ib_dev, bool comp_err) 1583 { 1584 if (isert_cmd->pdu_buf_dma != 0) { 1585 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1586 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1587 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1588 isert_cmd->pdu_buf_dma = 0; 1589 } 1590 1591 isert_unmap_tx_desc(tx_desc, ib_dev); 1592 isert_put_cmd(isert_cmd, comp_err); 1593 } 1594 1595 static int 1596 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1597 { 1598 struct ib_mr_status mr_status; 1599 int ret; 1600 1601 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1602 if (ret) { 1603 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1604 goto fail_mr_status; 1605 } 1606 1607 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1608 u64 sec_offset_err; 1609 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1610 1611 switch (mr_status.sig_err.err_type) { 1612 case IB_SIG_BAD_GUARD: 1613 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1614 break; 1615 case IB_SIG_BAD_REFTAG: 1616 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1617 break; 1618 case IB_SIG_BAD_APPTAG: 1619 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1620 break; 1621 } 1622 sec_offset_err = mr_status.sig_err.sig_err_offset; 1623 do_div(sec_offset_err, block_size); 1624 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1625 1626 isert_err("PI error found type %d at sector 0x%llx " 1627 "expected 0x%x vs actual 0x%x\n", 1628 mr_status.sig_err.err_type, 1629 (unsigned long long)se_cmd->bad_sector, 1630 mr_status.sig_err.expected, 1631 mr_status.sig_err.actual); 1632 ret = 1; 1633 } 1634 1635 fail_mr_status: 1636 return ret; 1637 } 1638 1639 static void 1640 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1641 { 1642 struct isert_conn *isert_conn = wc->qp->qp_context; 1643 struct isert_device *device = isert_conn->device; 1644 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1645 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1646 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1647 int ret = 0; 1648 1649 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1650 isert_print_wc(wc, "rdma write"); 1651 if (wc->status != IB_WC_WR_FLUSH_ERR) 1652 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1653 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1654 return; 1655 } 1656 1657 isert_dbg("Cmd %p\n", isert_cmd); 1658 1659 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1660 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1661 1662 if (ret) 1663 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1664 else 1665 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1666 } 1667 1668 static void 1669 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1670 { 1671 struct isert_conn *isert_conn = wc->qp->qp_context; 1672 struct isert_device *device = isert_conn->device; 1673 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1674 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1675 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1676 struct se_cmd *se_cmd = &cmd->se_cmd; 1677 int ret = 0; 1678 1679 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1680 isert_print_wc(wc, "rdma read"); 1681 if (wc->status != IB_WC_WR_FLUSH_ERR) 1682 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1683 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1684 return; 1685 } 1686 1687 isert_dbg("Cmd %p\n", isert_cmd); 1688 1689 iscsit_stop_dataout_timer(cmd); 1690 1691 if (isert_prot_cmd(isert_conn, se_cmd)) 1692 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr); 1693 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1694 cmd->write_data_done = 0; 1695 1696 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1697 spin_lock_bh(&cmd->istate_lock); 1698 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1699 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1700 spin_unlock_bh(&cmd->istate_lock); 1701 1702 if (ret) { 1703 target_put_sess_cmd(se_cmd); 1704 transport_send_check_condition_and_sense(se_cmd, 1705 se_cmd->pi_err, 0); 1706 } else { 1707 target_execute_cmd(se_cmd); 1708 } 1709 } 1710 1711 static void 1712 isert_do_control_comp(struct work_struct *work) 1713 { 1714 struct isert_cmd *isert_cmd = container_of(work, 1715 struct isert_cmd, comp_work); 1716 struct isert_conn *isert_conn = isert_cmd->conn; 1717 struct ib_device *ib_dev = isert_conn->cm_id->device; 1718 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1719 1720 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1721 1722 switch (cmd->i_state) { 1723 case ISTATE_SEND_TASKMGTRSP: 1724 iscsit_tmr_post_handler(cmd, cmd->conn); 1725 case ISTATE_SEND_REJECT: /* FALLTHRU */ 1726 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */ 1727 cmd->i_state = ISTATE_SENT_STATUS; 1728 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1729 ib_dev, false); 1730 break; 1731 case ISTATE_SEND_LOGOUTRSP: 1732 iscsit_logout_post_handler(cmd, cmd->conn); 1733 break; 1734 default: 1735 isert_err("Unknown i_state %d\n", cmd->i_state); 1736 dump_stack(); 1737 break; 1738 } 1739 } 1740 1741 static void 1742 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1743 { 1744 struct isert_conn *isert_conn = wc->qp->qp_context; 1745 struct ib_device *ib_dev = isert_conn->cm_id->device; 1746 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1747 1748 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1749 isert_print_wc(wc, "login send"); 1750 if (wc->status != IB_WC_WR_FLUSH_ERR) 1751 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1752 } 1753 1754 isert_unmap_tx_desc(tx_desc, ib_dev); 1755 } 1756 1757 static void 1758 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 1759 { 1760 struct isert_conn *isert_conn = wc->qp->qp_context; 1761 struct ib_device *ib_dev = isert_conn->cm_id->device; 1762 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1763 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 1764 1765 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1766 isert_print_wc(wc, "send"); 1767 if (wc->status != IB_WC_WR_FLUSH_ERR) 1768 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1769 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 1770 return; 1771 } 1772 1773 isert_dbg("Cmd %p\n", isert_cmd); 1774 1775 switch (isert_cmd->iscsi_cmd->i_state) { 1776 case ISTATE_SEND_TASKMGTRSP: 1777 case ISTATE_SEND_LOGOUTRSP: 1778 case ISTATE_SEND_REJECT: 1779 case ISTATE_SEND_TEXTRSP: 1780 isert_unmap_tx_desc(tx_desc, ib_dev); 1781 1782 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1783 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1784 return; 1785 default: 1786 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 1787 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1788 break; 1789 } 1790 } 1791 1792 static int 1793 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1794 { 1795 struct ib_send_wr *wr_failed; 1796 int ret; 1797 1798 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 1799 if (ret) { 1800 isert_err("ib_post_recv failed with %d\n", ret); 1801 return ret; 1802 } 1803 1804 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, 1805 &wr_failed); 1806 if (ret) { 1807 isert_err("ib_post_send failed with %d\n", ret); 1808 return ret; 1809 } 1810 return ret; 1811 } 1812 1813 static int 1814 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1815 { 1816 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1817 struct isert_conn *isert_conn = conn->context; 1818 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1819 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1820 &isert_cmd->tx_desc.iscsi_header; 1821 1822 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1823 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1824 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1825 /* 1826 * Attach SENSE DATA payload to iSCSI Response PDU 1827 */ 1828 if (cmd->se_cmd.sense_buffer && 1829 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1830 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1831 struct isert_device *device = isert_conn->device; 1832 struct ib_device *ib_dev = device->ib_device; 1833 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1834 u32 padding, pdu_len; 1835 1836 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1837 cmd->sense_buffer); 1838 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1839 1840 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1841 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1842 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 1843 1844 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1845 (void *)cmd->sense_buffer, pdu_len, 1846 DMA_TO_DEVICE); 1847 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1848 return -ENOMEM; 1849 1850 isert_cmd->pdu_buf_len = pdu_len; 1851 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1852 tx_dsg->length = pdu_len; 1853 tx_dsg->lkey = device->pd->local_dma_lkey; 1854 isert_cmd->tx_desc.num_sge = 2; 1855 } 1856 1857 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1858 1859 isert_dbg("Posting SCSI Response\n"); 1860 1861 return isert_post_response(isert_conn, isert_cmd); 1862 } 1863 1864 static void 1865 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1866 { 1867 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1868 struct isert_conn *isert_conn = conn->context; 1869 1870 spin_lock_bh(&conn->cmd_lock); 1871 if (!list_empty(&cmd->i_conn_node)) 1872 list_del_init(&cmd->i_conn_node); 1873 spin_unlock_bh(&conn->cmd_lock); 1874 1875 if (cmd->data_direction == DMA_TO_DEVICE) 1876 iscsit_stop_dataout_timer(cmd); 1877 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1878 } 1879 1880 static enum target_prot_op 1881 isert_get_sup_prot_ops(struct iscsi_conn *conn) 1882 { 1883 struct isert_conn *isert_conn = conn->context; 1884 struct isert_device *device = isert_conn->device; 1885 1886 if (conn->tpg->tpg_attrib.t10_pi) { 1887 if (device->pi_capable) { 1888 isert_info("conn %p PI offload enabled\n", isert_conn); 1889 isert_conn->pi_support = true; 1890 return TARGET_PROT_ALL; 1891 } 1892 } 1893 1894 isert_info("conn %p PI offload disabled\n", isert_conn); 1895 isert_conn->pi_support = false; 1896 1897 return TARGET_PROT_NORMAL; 1898 } 1899 1900 static int 1901 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1902 bool nopout_response) 1903 { 1904 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1905 struct isert_conn *isert_conn = conn->context; 1906 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1907 1908 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1909 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1910 &isert_cmd->tx_desc.iscsi_header, 1911 nopout_response); 1912 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1913 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1914 1915 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 1916 1917 return isert_post_response(isert_conn, isert_cmd); 1918 } 1919 1920 static int 1921 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1922 { 1923 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1924 struct isert_conn *isert_conn = conn->context; 1925 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1926 1927 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1928 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1929 &isert_cmd->tx_desc.iscsi_header); 1930 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1931 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1932 1933 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 1934 1935 return isert_post_response(isert_conn, isert_cmd); 1936 } 1937 1938 static int 1939 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1940 { 1941 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1942 struct isert_conn *isert_conn = conn->context; 1943 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1944 1945 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1946 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1947 &isert_cmd->tx_desc.iscsi_header); 1948 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1949 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1950 1951 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 1952 1953 return isert_post_response(isert_conn, isert_cmd); 1954 } 1955 1956 static int 1957 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1958 { 1959 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1960 struct isert_conn *isert_conn = conn->context; 1961 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1962 struct isert_device *device = isert_conn->device; 1963 struct ib_device *ib_dev = device->ib_device; 1964 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1965 struct iscsi_reject *hdr = 1966 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 1967 1968 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1969 iscsit_build_reject(cmd, conn, hdr); 1970 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1971 1972 hton24(hdr->dlength, ISCSI_HDR_LEN); 1973 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1974 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 1975 DMA_TO_DEVICE); 1976 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1977 return -ENOMEM; 1978 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 1979 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1980 tx_dsg->length = ISCSI_HDR_LEN; 1981 tx_dsg->lkey = device->pd->local_dma_lkey; 1982 isert_cmd->tx_desc.num_sge = 2; 1983 1984 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1985 1986 isert_dbg("conn %p Posting Reject\n", isert_conn); 1987 1988 return isert_post_response(isert_conn, isert_cmd); 1989 } 1990 1991 static int 1992 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1993 { 1994 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1995 struct isert_conn *isert_conn = conn->context; 1996 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1997 struct iscsi_text_rsp *hdr = 1998 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1999 u32 txt_rsp_len; 2000 int rc; 2001 2002 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2003 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 2004 if (rc < 0) 2005 return rc; 2006 2007 txt_rsp_len = rc; 2008 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2009 2010 if (txt_rsp_len) { 2011 struct isert_device *device = isert_conn->device; 2012 struct ib_device *ib_dev = device->ib_device; 2013 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2014 void *txt_rsp_buf = cmd->buf_ptr; 2015 2016 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2017 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 2018 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 2019 return -ENOMEM; 2020 2021 isert_cmd->pdu_buf_len = txt_rsp_len; 2022 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2023 tx_dsg->length = txt_rsp_len; 2024 tx_dsg->lkey = device->pd->local_dma_lkey; 2025 isert_cmd->tx_desc.num_sge = 2; 2026 } 2027 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2028 2029 isert_dbg("conn %p Text Response\n", isert_conn); 2030 2031 return isert_post_response(isert_conn, isert_cmd); 2032 } 2033 2034 static inline void 2035 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, 2036 struct ib_sig_domain *domain) 2037 { 2038 domain->sig_type = IB_SIG_TYPE_T10_DIF; 2039 domain->sig.dif.bg_type = IB_T10DIF_CRC; 2040 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 2041 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 2042 /* 2043 * At the moment we hard code those, but if in the future 2044 * the target core would like to use it, we will take it 2045 * from se_cmd. 2046 */ 2047 domain->sig.dif.apptag_check_mask = 0xffff; 2048 domain->sig.dif.app_escape = true; 2049 domain->sig.dif.ref_escape = true; 2050 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 2051 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 2052 domain->sig.dif.ref_remap = true; 2053 }; 2054 2055 static int 2056 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2057 { 2058 memset(sig_attrs, 0, sizeof(*sig_attrs)); 2059 2060 switch (se_cmd->prot_op) { 2061 case TARGET_PROT_DIN_INSERT: 2062 case TARGET_PROT_DOUT_STRIP: 2063 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 2064 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2065 break; 2066 case TARGET_PROT_DOUT_INSERT: 2067 case TARGET_PROT_DIN_STRIP: 2068 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 2069 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2070 break; 2071 case TARGET_PROT_DIN_PASS: 2072 case TARGET_PROT_DOUT_PASS: 2073 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2074 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2075 break; 2076 default: 2077 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2078 return -EINVAL; 2079 } 2080 2081 sig_attrs->check_mask = 2082 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | 2083 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | 2084 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); 2085 return 0; 2086 } 2087 2088 static int 2089 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, 2090 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 2091 { 2092 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 2093 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 2094 u8 port_num = conn->cm_id->port_num; 2095 u64 addr; 2096 u32 rkey, offset; 2097 int ret; 2098 2099 if (dir == DMA_FROM_DEVICE) { 2100 addr = cmd->write_va; 2101 rkey = cmd->write_stag; 2102 offset = cmd->iscsi_cmd->write_data_done; 2103 } else { 2104 addr = cmd->read_va; 2105 rkey = cmd->read_stag; 2106 offset = 0; 2107 } 2108 2109 if (isert_prot_cmd(conn, se_cmd)) { 2110 struct ib_sig_attrs sig_attrs; 2111 2112 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2113 if (ret) 2114 return ret; 2115 2116 WARN_ON_ONCE(offset); 2117 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, 2118 se_cmd->t_data_sg, se_cmd->t_data_nents, 2119 se_cmd->t_prot_sg, se_cmd->t_prot_nents, 2120 &sig_attrs, addr, rkey, dir); 2121 } else { 2122 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, 2123 se_cmd->t_data_sg, se_cmd->t_data_nents, 2124 offset, addr, rkey, dir); 2125 } 2126 if (ret < 0) { 2127 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); 2128 return ret; 2129 } 2130 2131 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); 2132 if (ret < 0) 2133 isert_err("Cmd: %p failed to post RDMA res\n", cmd); 2134 return ret; 2135 } 2136 2137 static int 2138 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2139 { 2140 struct se_cmd *se_cmd = &cmd->se_cmd; 2141 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2142 struct isert_conn *isert_conn = conn->context; 2143 struct ib_cqe *cqe = NULL; 2144 struct ib_send_wr *chain_wr = NULL; 2145 int rc; 2146 2147 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2148 isert_cmd, se_cmd->data_length); 2149 2150 if (isert_prot_cmd(isert_conn, se_cmd)) { 2151 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2152 cqe = &isert_cmd->tx_desc.tx_cqe; 2153 } else { 2154 /* 2155 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2156 */ 2157 isert_create_send_desc(isert_conn, isert_cmd, 2158 &isert_cmd->tx_desc); 2159 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2160 &isert_cmd->tx_desc.iscsi_header); 2161 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2162 isert_init_send_wr(isert_conn, isert_cmd, 2163 &isert_cmd->tx_desc.send_wr); 2164 2165 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2166 if (rc) { 2167 isert_err("ib_post_recv failed with %d\n", rc); 2168 return rc; 2169 } 2170 2171 chain_wr = &isert_cmd->tx_desc.send_wr; 2172 } 2173 2174 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2175 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); 2176 return 1; 2177 } 2178 2179 static int 2180 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2181 { 2182 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2183 2184 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2185 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2186 2187 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2188 isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2189 &isert_cmd->tx_desc.tx_cqe, NULL); 2190 2191 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2192 isert_cmd); 2193 return 0; 2194 } 2195 2196 static int 2197 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2198 { 2199 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2200 int ret = 0; 2201 2202 switch (state) { 2203 case ISTATE_REMOVE: 2204 spin_lock_bh(&conn->cmd_lock); 2205 list_del_init(&cmd->i_conn_node); 2206 spin_unlock_bh(&conn->cmd_lock); 2207 isert_put_cmd(isert_cmd, true); 2208 break; 2209 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2210 ret = isert_put_nopin(cmd, conn, false); 2211 break; 2212 default: 2213 isert_err("Unknown immediate state: 0x%02x\n", state); 2214 ret = -EINVAL; 2215 break; 2216 } 2217 2218 return ret; 2219 } 2220 2221 static int 2222 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2223 { 2224 struct isert_conn *isert_conn = conn->context; 2225 int ret; 2226 2227 switch (state) { 2228 case ISTATE_SEND_LOGOUTRSP: 2229 ret = isert_put_logout_rsp(cmd, conn); 2230 if (!ret) 2231 isert_conn->logout_posted = true; 2232 break; 2233 case ISTATE_SEND_NOPIN: 2234 ret = isert_put_nopin(cmd, conn, true); 2235 break; 2236 case ISTATE_SEND_TASKMGTRSP: 2237 ret = isert_put_tm_rsp(cmd, conn); 2238 break; 2239 case ISTATE_SEND_REJECT: 2240 ret = isert_put_reject(cmd, conn); 2241 break; 2242 case ISTATE_SEND_TEXTRSP: 2243 ret = isert_put_text_rsp(cmd, conn); 2244 break; 2245 case ISTATE_SEND_STATUS: 2246 /* 2247 * Special case for sending non GOOD SCSI status from TX thread 2248 * context during pre se_cmd excecution failure. 2249 */ 2250 ret = isert_put_response(conn, cmd); 2251 break; 2252 default: 2253 isert_err("Unknown response state: 0x%02x\n", state); 2254 ret = -EINVAL; 2255 break; 2256 } 2257 2258 return ret; 2259 } 2260 2261 struct rdma_cm_id * 2262 isert_setup_id(struct isert_np *isert_np) 2263 { 2264 struct iscsi_np *np = isert_np->np; 2265 struct rdma_cm_id *id; 2266 struct sockaddr *sa; 2267 int ret; 2268 2269 sa = (struct sockaddr *)&np->np_sockaddr; 2270 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2271 2272 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2273 RDMA_PS_TCP, IB_QPT_RC); 2274 if (IS_ERR(id)) { 2275 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2276 ret = PTR_ERR(id); 2277 goto out; 2278 } 2279 isert_dbg("id %p context %p\n", id, id->context); 2280 2281 ret = rdma_bind_addr(id, sa); 2282 if (ret) { 2283 isert_err("rdma_bind_addr() failed: %d\n", ret); 2284 goto out_id; 2285 } 2286 2287 ret = rdma_listen(id, 0); 2288 if (ret) { 2289 isert_err("rdma_listen() failed: %d\n", ret); 2290 goto out_id; 2291 } 2292 2293 return id; 2294 out_id: 2295 rdma_destroy_id(id); 2296 out: 2297 return ERR_PTR(ret); 2298 } 2299 2300 static int 2301 isert_setup_np(struct iscsi_np *np, 2302 struct sockaddr_storage *ksockaddr) 2303 { 2304 struct isert_np *isert_np; 2305 struct rdma_cm_id *isert_lid; 2306 int ret; 2307 2308 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2309 if (!isert_np) 2310 return -ENOMEM; 2311 2312 sema_init(&isert_np->sem, 0); 2313 mutex_init(&isert_np->mutex); 2314 INIT_LIST_HEAD(&isert_np->accepted); 2315 INIT_LIST_HEAD(&isert_np->pending); 2316 isert_np->np = np; 2317 2318 /* 2319 * Setup the np->np_sockaddr from the passed sockaddr setup 2320 * in iscsi_target_configfs.c code.. 2321 */ 2322 memcpy(&np->np_sockaddr, ksockaddr, 2323 sizeof(struct sockaddr_storage)); 2324 2325 isert_lid = isert_setup_id(isert_np); 2326 if (IS_ERR(isert_lid)) { 2327 ret = PTR_ERR(isert_lid); 2328 goto out; 2329 } 2330 2331 isert_np->cm_id = isert_lid; 2332 np->np_context = isert_np; 2333 2334 return 0; 2335 2336 out: 2337 kfree(isert_np); 2338 2339 return ret; 2340 } 2341 2342 static int 2343 isert_rdma_accept(struct isert_conn *isert_conn) 2344 { 2345 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2346 struct rdma_conn_param cp; 2347 int ret; 2348 struct iser_cm_hdr rsp_hdr; 2349 2350 memset(&cp, 0, sizeof(struct rdma_conn_param)); 2351 cp.initiator_depth = isert_conn->initiator_depth; 2352 cp.retry_count = 7; 2353 cp.rnr_retry_count = 7; 2354 2355 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 2356 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 2357 if (!isert_conn->snd_w_inv) 2358 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 2359 cp.private_data = (void *)&rsp_hdr; 2360 cp.private_data_len = sizeof(rsp_hdr); 2361 2362 ret = rdma_accept(cm_id, &cp); 2363 if (ret) { 2364 isert_err("rdma_accept() failed with: %d\n", ret); 2365 return ret; 2366 } 2367 2368 return 0; 2369 } 2370 2371 static int 2372 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2373 { 2374 struct isert_conn *isert_conn = conn->context; 2375 int ret; 2376 2377 isert_info("before login_req comp conn: %p\n", isert_conn); 2378 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 2379 if (ret) { 2380 isert_err("isert_conn %p interrupted before got login req\n", 2381 isert_conn); 2382 return ret; 2383 } 2384 reinit_completion(&isert_conn->login_req_comp); 2385 2386 /* 2387 * For login requests after the first PDU, isert_rx_login_req() will 2388 * kick schedule_delayed_work(&conn->login_work) as the packet is 2389 * received, which turns this callback from iscsi_target_do_login_rx() 2390 * into a NOP. 2391 */ 2392 if (!login->first_request) 2393 return 0; 2394 2395 isert_rx_login_req(isert_conn); 2396 2397 isert_info("before login_comp conn: %p\n", conn); 2398 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 2399 if (ret) 2400 return ret; 2401 2402 isert_info("processing login->req: %p\n", login->req); 2403 2404 return 0; 2405 } 2406 2407 static void 2408 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2409 struct isert_conn *isert_conn) 2410 { 2411 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2412 struct rdma_route *cm_route = &cm_id->route; 2413 2414 conn->login_family = np->np_sockaddr.ss_family; 2415 2416 conn->login_sockaddr = cm_route->addr.dst_addr; 2417 conn->local_sockaddr = cm_route->addr.src_addr; 2418 } 2419 2420 static int 2421 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2422 { 2423 struct isert_np *isert_np = np->np_context; 2424 struct isert_conn *isert_conn; 2425 int ret; 2426 2427 accept_wait: 2428 ret = down_interruptible(&isert_np->sem); 2429 if (ret) 2430 return -ENODEV; 2431 2432 spin_lock_bh(&np->np_thread_lock); 2433 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 2434 spin_unlock_bh(&np->np_thread_lock); 2435 isert_dbg("np_thread_state %d\n", 2436 np->np_thread_state); 2437 /** 2438 * No point in stalling here when np_thread 2439 * is in state RESET/SHUTDOWN/EXIT - bail 2440 **/ 2441 return -ENODEV; 2442 } 2443 spin_unlock_bh(&np->np_thread_lock); 2444 2445 mutex_lock(&isert_np->mutex); 2446 if (list_empty(&isert_np->pending)) { 2447 mutex_unlock(&isert_np->mutex); 2448 goto accept_wait; 2449 } 2450 isert_conn = list_first_entry(&isert_np->pending, 2451 struct isert_conn, node); 2452 list_del_init(&isert_conn->node); 2453 mutex_unlock(&isert_np->mutex); 2454 2455 conn->context = isert_conn; 2456 isert_conn->conn = conn; 2457 isert_conn->state = ISER_CONN_BOUND; 2458 2459 isert_set_conn_info(np, conn, isert_conn); 2460 2461 isert_dbg("Processing isert_conn: %p\n", isert_conn); 2462 2463 return 0; 2464 } 2465 2466 static void 2467 isert_free_np(struct iscsi_np *np) 2468 { 2469 struct isert_np *isert_np = np->np_context; 2470 struct isert_conn *isert_conn, *n; 2471 2472 if (isert_np->cm_id) 2473 rdma_destroy_id(isert_np->cm_id); 2474 2475 /* 2476 * FIXME: At this point we don't have a good way to insure 2477 * that at this point we don't have hanging connections that 2478 * completed RDMA establishment but didn't start iscsi login 2479 * process. So work-around this by cleaning up what ever piled 2480 * up in accepted and pending lists. 2481 */ 2482 mutex_lock(&isert_np->mutex); 2483 if (!list_empty(&isert_np->pending)) { 2484 isert_info("Still have isert pending connections\n"); 2485 list_for_each_entry_safe(isert_conn, n, 2486 &isert_np->pending, 2487 node) { 2488 isert_info("cleaning isert_conn %p state (%d)\n", 2489 isert_conn, isert_conn->state); 2490 isert_connect_release(isert_conn); 2491 } 2492 } 2493 2494 if (!list_empty(&isert_np->accepted)) { 2495 isert_info("Still have isert accepted connections\n"); 2496 list_for_each_entry_safe(isert_conn, n, 2497 &isert_np->accepted, 2498 node) { 2499 isert_info("cleaning isert_conn %p state (%d)\n", 2500 isert_conn, isert_conn->state); 2501 isert_connect_release(isert_conn); 2502 } 2503 } 2504 mutex_unlock(&isert_np->mutex); 2505 2506 np->np_context = NULL; 2507 kfree(isert_np); 2508 } 2509 2510 static void isert_release_work(struct work_struct *work) 2511 { 2512 struct isert_conn *isert_conn = container_of(work, 2513 struct isert_conn, 2514 release_work); 2515 2516 isert_info("Starting release conn %p\n", isert_conn); 2517 2518 mutex_lock(&isert_conn->mutex); 2519 isert_conn->state = ISER_CONN_DOWN; 2520 mutex_unlock(&isert_conn->mutex); 2521 2522 isert_info("Destroying conn %p\n", isert_conn); 2523 isert_put_conn(isert_conn); 2524 } 2525 2526 static void 2527 isert_wait4logout(struct isert_conn *isert_conn) 2528 { 2529 struct iscsi_conn *conn = isert_conn->conn; 2530 2531 isert_info("conn %p\n", isert_conn); 2532 2533 if (isert_conn->logout_posted) { 2534 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 2535 wait_for_completion_timeout(&conn->conn_logout_comp, 2536 SECONDS_FOR_LOGOUT_COMP * HZ); 2537 } 2538 } 2539 2540 static void 2541 isert_wait4cmds(struct iscsi_conn *conn) 2542 { 2543 isert_info("iscsi_conn %p\n", conn); 2544 2545 if (conn->sess) { 2546 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2547 target_wait_for_sess_cmds(conn->sess->se_sess); 2548 } 2549 } 2550 2551 /** 2552 * isert_put_unsol_pending_cmds() - Drop commands waiting for 2553 * unsolicitate dataout 2554 * @conn: iscsi connection 2555 * 2556 * We might still have commands that are waiting for unsolicited 2557 * dataouts messages. We must put the extra reference on those 2558 * before blocking on the target_wait_for_session_cmds 2559 */ 2560 static void 2561 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 2562 { 2563 struct iscsi_cmd *cmd, *tmp; 2564 static LIST_HEAD(drop_cmd_list); 2565 2566 spin_lock_bh(&conn->cmd_lock); 2567 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 2568 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 2569 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 2570 (cmd->write_data_done < cmd->se_cmd.data_length)) 2571 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 2572 } 2573 spin_unlock_bh(&conn->cmd_lock); 2574 2575 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 2576 list_del_init(&cmd->i_conn_node); 2577 if (cmd->i_state != ISTATE_REMOVE) { 2578 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2579 2580 isert_info("conn %p dropping cmd %p\n", conn, cmd); 2581 isert_put_cmd(isert_cmd, true); 2582 } 2583 } 2584 } 2585 2586 static void isert_wait_conn(struct iscsi_conn *conn) 2587 { 2588 struct isert_conn *isert_conn = conn->context; 2589 2590 isert_info("Starting conn %p\n", isert_conn); 2591 2592 mutex_lock(&isert_conn->mutex); 2593 isert_conn_terminate(isert_conn); 2594 mutex_unlock(&isert_conn->mutex); 2595 2596 ib_drain_qp(isert_conn->qp); 2597 isert_put_unsol_pending_cmds(conn); 2598 isert_wait4cmds(conn); 2599 isert_wait4logout(isert_conn); 2600 2601 queue_work(isert_release_wq, &isert_conn->release_work); 2602 } 2603 2604 static void isert_free_conn(struct iscsi_conn *conn) 2605 { 2606 struct isert_conn *isert_conn = conn->context; 2607 2608 ib_drain_qp(isert_conn->qp); 2609 isert_put_conn(isert_conn); 2610 } 2611 2612 static void isert_get_rx_pdu(struct iscsi_conn *conn) 2613 { 2614 struct completion comp; 2615 2616 init_completion(&comp); 2617 2618 wait_for_completion_interruptible(&comp); 2619 } 2620 2621 static struct iscsit_transport iser_target_transport = { 2622 .name = "IB/iSER", 2623 .transport_type = ISCSI_INFINIBAND, 2624 .rdma_shutdown = true, 2625 .priv_size = sizeof(struct isert_cmd), 2626 .owner = THIS_MODULE, 2627 .iscsit_setup_np = isert_setup_np, 2628 .iscsit_accept_np = isert_accept_np, 2629 .iscsit_free_np = isert_free_np, 2630 .iscsit_wait_conn = isert_wait_conn, 2631 .iscsit_free_conn = isert_free_conn, 2632 .iscsit_get_login_rx = isert_get_login_rx, 2633 .iscsit_put_login_tx = isert_put_login_tx, 2634 .iscsit_immediate_queue = isert_immediate_queue, 2635 .iscsit_response_queue = isert_response_queue, 2636 .iscsit_get_dataout = isert_get_dataout, 2637 .iscsit_queue_data_in = isert_put_datain, 2638 .iscsit_queue_status = isert_put_response, 2639 .iscsit_aborted_task = isert_aborted_task, 2640 .iscsit_get_rx_pdu = isert_get_rx_pdu, 2641 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2642 }; 2643 2644 static int __init isert_init(void) 2645 { 2646 int ret; 2647 2648 isert_comp_wq = alloc_workqueue("isert_comp_wq", 2649 WQ_UNBOUND | WQ_HIGHPRI, 0); 2650 if (!isert_comp_wq) { 2651 isert_err("Unable to allocate isert_comp_wq\n"); 2652 return -ENOMEM; 2653 } 2654 2655 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 2656 WQ_UNBOUND_MAX_ACTIVE); 2657 if (!isert_release_wq) { 2658 isert_err("Unable to allocate isert_release_wq\n"); 2659 ret = -ENOMEM; 2660 goto destroy_comp_wq; 2661 } 2662 2663 iscsit_register_transport(&iser_target_transport); 2664 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2665 2666 return 0; 2667 2668 destroy_comp_wq: 2669 destroy_workqueue(isert_comp_wq); 2670 2671 return ret; 2672 } 2673 2674 static void __exit isert_exit(void) 2675 { 2676 flush_scheduled_work(); 2677 destroy_workqueue(isert_release_wq); 2678 destroy_workqueue(isert_comp_wq); 2679 iscsit_unregister_transport(&iser_target_transport); 2680 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 2681 } 2682 2683 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2684 MODULE_VERSION("1.0"); 2685 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2686 MODULE_LICENSE("GPL"); 2687 2688 module_init(isert_init); 2689 module_exit(isert_exit); 2690