1 /******************************************************************************* 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 3 * 4 * (c) Copyright 2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ****************************************************************************/ 18 19 #include <linux/string.h> 20 #include <linux/module.h> 21 #include <linux/scatterlist.h> 22 #include <linux/socket.h> 23 #include <linux/in.h> 24 #include <linux/in6.h> 25 #include <rdma/ib_verbs.h> 26 #include <rdma/rdma_cm.h> 27 #include <target/target_core_base.h> 28 #include <target/target_core_fabric.h> 29 #include <target/iscsi/iscsi_transport.h> 30 #include <linux/semaphore.h> 31 32 #include "ib_isert.h" 33 34 #define ISERT_MAX_CONN 8 35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 36 #define ISER_MAX_TX_CQ_LEN \ 37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN) 38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 39 ISERT_MAX_CONN) 40 41 static int isert_debug_level; 42 module_param_named(debug_level, isert_debug_level, int, 0644); 43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 44 45 static DEFINE_MUTEX(device_list_mutex); 46 static LIST_HEAD(device_list); 47 static struct workqueue_struct *isert_comp_wq; 48 static struct workqueue_struct *isert_release_wq; 49 50 static int 51 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 52 static int 53 isert_login_post_recv(struct isert_conn *isert_conn); 54 static int 55 isert_rdma_accept(struct isert_conn *isert_conn); 56 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 57 58 static void isert_release_work(struct work_struct *work); 59 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 60 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 61 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 62 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 63 64 static inline bool 65 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 66 { 67 return (conn->pi_support && 68 cmd->prot_op != TARGET_PROT_NORMAL); 69 } 70 71 72 static void 73 isert_qp_event_callback(struct ib_event *e, void *context) 74 { 75 struct isert_conn *isert_conn = context; 76 77 isert_err("%s (%d): conn %p\n", 78 ib_event_msg(e->event), e->event, isert_conn); 79 80 switch (e->event) { 81 case IB_EVENT_COMM_EST: 82 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 83 break; 84 case IB_EVENT_QP_LAST_WQE_REACHED: 85 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 86 break; 87 default: 88 break; 89 } 90 } 91 92 static struct isert_comp * 93 isert_comp_get(struct isert_conn *isert_conn) 94 { 95 struct isert_device *device = isert_conn->device; 96 struct isert_comp *comp; 97 int i, min = 0; 98 99 mutex_lock(&device_list_mutex); 100 for (i = 0; i < device->comps_used; i++) 101 if (device->comps[i].active_qps < 102 device->comps[min].active_qps) 103 min = i; 104 comp = &device->comps[min]; 105 comp->active_qps++; 106 mutex_unlock(&device_list_mutex); 107 108 isert_info("conn %p, using comp %p min_index: %d\n", 109 isert_conn, comp, min); 110 111 return comp; 112 } 113 114 static void 115 isert_comp_put(struct isert_comp *comp) 116 { 117 mutex_lock(&device_list_mutex); 118 comp->active_qps--; 119 mutex_unlock(&device_list_mutex); 120 } 121 122 static struct ib_qp * 123 isert_create_qp(struct isert_conn *isert_conn, 124 struct isert_comp *comp, 125 struct rdma_cm_id *cma_id) 126 { 127 struct isert_device *device = isert_conn->device; 128 struct ib_qp_init_attr attr; 129 int ret; 130 131 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 132 attr.event_handler = isert_qp_event_callback; 133 attr.qp_context = isert_conn; 134 attr.send_cq = comp->cq; 135 attr.recv_cq = comp->cq; 136 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 137 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 138 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX; 139 attr.cap.max_send_sge = device->ib_device->attrs.max_sge; 140 attr.cap.max_recv_sge = 1; 141 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 142 attr.qp_type = IB_QPT_RC; 143 if (device->pi_capable) 144 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 145 146 ret = rdma_create_qp(cma_id, device->pd, &attr); 147 if (ret) { 148 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 149 return ERR_PTR(ret); 150 } 151 152 return cma_id->qp; 153 } 154 155 static int 156 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 157 { 158 struct isert_comp *comp; 159 int ret; 160 161 comp = isert_comp_get(isert_conn); 162 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); 163 if (IS_ERR(isert_conn->qp)) { 164 ret = PTR_ERR(isert_conn->qp); 165 goto err; 166 } 167 168 return 0; 169 err: 170 isert_comp_put(comp); 171 return ret; 172 } 173 174 static int 175 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 176 { 177 struct isert_device *device = isert_conn->device; 178 struct ib_device *ib_dev = device->ib_device; 179 struct iser_rx_desc *rx_desc; 180 struct ib_sge *rx_sg; 181 u64 dma_addr; 182 int i, j; 183 184 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 185 sizeof(struct iser_rx_desc), GFP_KERNEL); 186 if (!isert_conn->rx_descs) 187 goto fail; 188 189 rx_desc = isert_conn->rx_descs; 190 191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 194 if (ib_dma_mapping_error(ib_dev, dma_addr)) 195 goto dma_map_fail; 196 197 rx_desc->dma_addr = dma_addr; 198 199 rx_sg = &rx_desc->rx_sg; 200 rx_sg->addr = rx_desc->dma_addr; 201 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 202 rx_sg->lkey = device->pd->local_dma_lkey; 203 rx_desc->rx_cqe.done = isert_recv_done; 204 } 205 206 return 0; 207 208 dma_map_fail: 209 rx_desc = isert_conn->rx_descs; 210 for (j = 0; j < i; j++, rx_desc++) { 211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 213 } 214 kfree(isert_conn->rx_descs); 215 isert_conn->rx_descs = NULL; 216 fail: 217 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 218 219 return -ENOMEM; 220 } 221 222 static void 223 isert_free_rx_descriptors(struct isert_conn *isert_conn) 224 { 225 struct ib_device *ib_dev = isert_conn->device->ib_device; 226 struct iser_rx_desc *rx_desc; 227 int i; 228 229 if (!isert_conn->rx_descs) 230 return; 231 232 rx_desc = isert_conn->rx_descs; 233 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 234 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 235 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 236 } 237 238 kfree(isert_conn->rx_descs); 239 isert_conn->rx_descs = NULL; 240 } 241 242 static void 243 isert_free_comps(struct isert_device *device) 244 { 245 int i; 246 247 for (i = 0; i < device->comps_used; i++) { 248 struct isert_comp *comp = &device->comps[i]; 249 250 if (comp->cq) 251 ib_free_cq(comp->cq); 252 } 253 kfree(device->comps); 254 } 255 256 static int 257 isert_alloc_comps(struct isert_device *device) 258 { 259 int i, max_cqe, ret = 0; 260 261 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), 262 device->ib_device->num_comp_vectors)); 263 264 isert_info("Using %d CQs, %s supports %d vectors support " 265 "pi_capable %d\n", 266 device->comps_used, device->ib_device->name, 267 device->ib_device->num_comp_vectors, 268 device->pi_capable); 269 270 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), 271 GFP_KERNEL); 272 if (!device->comps) { 273 isert_err("Unable to allocate completion contexts\n"); 274 return -ENOMEM; 275 } 276 277 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); 278 279 for (i = 0; i < device->comps_used; i++) { 280 struct isert_comp *comp = &device->comps[i]; 281 282 comp->device = device; 283 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, 284 IB_POLL_WORKQUEUE); 285 if (IS_ERR(comp->cq)) { 286 isert_err("Unable to allocate cq\n"); 287 ret = PTR_ERR(comp->cq); 288 comp->cq = NULL; 289 goto out_cq; 290 } 291 } 292 293 return 0; 294 out_cq: 295 isert_free_comps(device); 296 return ret; 297 } 298 299 static int 300 isert_create_device_ib_res(struct isert_device *device) 301 { 302 struct ib_device *ib_dev = device->ib_device; 303 int ret; 304 305 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge); 306 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 307 308 ret = isert_alloc_comps(device); 309 if (ret) 310 goto out; 311 312 device->pd = ib_alloc_pd(ib_dev, 0); 313 if (IS_ERR(device->pd)) { 314 ret = PTR_ERR(device->pd); 315 isert_err("failed to allocate pd, device %p, ret=%d\n", 316 device, ret); 317 goto out_cq; 318 } 319 320 /* Check signature cap */ 321 device->pi_capable = ib_dev->attrs.device_cap_flags & 322 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 323 324 return 0; 325 326 out_cq: 327 isert_free_comps(device); 328 out: 329 if (ret > 0) 330 ret = -EINVAL; 331 return ret; 332 } 333 334 static void 335 isert_free_device_ib_res(struct isert_device *device) 336 { 337 isert_info("device %p\n", device); 338 339 ib_dealloc_pd(device->pd); 340 isert_free_comps(device); 341 } 342 343 static void 344 isert_device_put(struct isert_device *device) 345 { 346 mutex_lock(&device_list_mutex); 347 device->refcount--; 348 isert_info("device %p refcount %d\n", device, device->refcount); 349 if (!device->refcount) { 350 isert_free_device_ib_res(device); 351 list_del(&device->dev_node); 352 kfree(device); 353 } 354 mutex_unlock(&device_list_mutex); 355 } 356 357 static struct isert_device * 358 isert_device_get(struct rdma_cm_id *cma_id) 359 { 360 struct isert_device *device; 361 int ret; 362 363 mutex_lock(&device_list_mutex); 364 list_for_each_entry(device, &device_list, dev_node) { 365 if (device->ib_device->node_guid == cma_id->device->node_guid) { 366 device->refcount++; 367 isert_info("Found iser device %p refcount %d\n", 368 device, device->refcount); 369 mutex_unlock(&device_list_mutex); 370 return device; 371 } 372 } 373 374 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 375 if (!device) { 376 mutex_unlock(&device_list_mutex); 377 return ERR_PTR(-ENOMEM); 378 } 379 380 INIT_LIST_HEAD(&device->dev_node); 381 382 device->ib_device = cma_id->device; 383 ret = isert_create_device_ib_res(device); 384 if (ret) { 385 kfree(device); 386 mutex_unlock(&device_list_mutex); 387 return ERR_PTR(ret); 388 } 389 390 device->refcount++; 391 list_add_tail(&device->dev_node, &device_list); 392 isert_info("Created a new iser device %p refcount %d\n", 393 device, device->refcount); 394 mutex_unlock(&device_list_mutex); 395 396 return device; 397 } 398 399 static void 400 isert_init_conn(struct isert_conn *isert_conn) 401 { 402 isert_conn->state = ISER_CONN_INIT; 403 INIT_LIST_HEAD(&isert_conn->node); 404 init_completion(&isert_conn->login_comp); 405 init_completion(&isert_conn->login_req_comp); 406 init_waitqueue_head(&isert_conn->rem_wait); 407 kref_init(&isert_conn->kref); 408 mutex_init(&isert_conn->mutex); 409 INIT_WORK(&isert_conn->release_work, isert_release_work); 410 } 411 412 static void 413 isert_free_login_buf(struct isert_conn *isert_conn) 414 { 415 struct ib_device *ib_dev = isert_conn->device->ib_device; 416 417 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 418 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 419 kfree(isert_conn->login_rsp_buf); 420 421 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 422 ISER_RX_PAYLOAD_SIZE, 423 DMA_FROM_DEVICE); 424 kfree(isert_conn->login_req_buf); 425 } 426 427 static int 428 isert_alloc_login_buf(struct isert_conn *isert_conn, 429 struct ib_device *ib_dev) 430 { 431 int ret; 432 433 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), 434 GFP_KERNEL); 435 if (!isert_conn->login_req_buf) { 436 isert_err("Unable to allocate isert_conn->login_buf\n"); 437 return -ENOMEM; 438 } 439 440 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 441 isert_conn->login_req_buf, 442 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 443 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 444 if (ret) { 445 isert_err("login_req_dma mapping error: %d\n", ret); 446 isert_conn->login_req_dma = 0; 447 goto out_free_login_req_buf; 448 } 449 450 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 451 if (!isert_conn->login_rsp_buf) { 452 ret = -ENOMEM; 453 goto out_unmap_login_req_buf; 454 } 455 456 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 457 isert_conn->login_rsp_buf, 458 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 459 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 460 if (ret) { 461 isert_err("login_rsp_dma mapping error: %d\n", ret); 462 isert_conn->login_rsp_dma = 0; 463 goto out_free_login_rsp_buf; 464 } 465 466 return 0; 467 468 out_free_login_rsp_buf: 469 kfree(isert_conn->login_rsp_buf); 470 out_unmap_login_req_buf: 471 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 472 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 473 out_free_login_req_buf: 474 kfree(isert_conn->login_req_buf); 475 return ret; 476 } 477 478 static void 479 isert_set_nego_params(struct isert_conn *isert_conn, 480 struct rdma_conn_param *param) 481 { 482 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 483 484 /* Set max inflight RDMA READ requests */ 485 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 486 attr->max_qp_init_rd_atom); 487 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 488 489 if (param->private_data) { 490 u8 flags = *(u8 *)param->private_data; 491 492 /* 493 * use remote invalidation if the both initiator 494 * and the HCA support it 495 */ 496 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 497 (attr->device_cap_flags & 498 IB_DEVICE_MEM_MGT_EXTENSIONS); 499 if (isert_conn->snd_w_inv) 500 isert_info("Using remote invalidation\n"); 501 } 502 } 503 504 static int 505 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 506 { 507 struct isert_np *isert_np = cma_id->context; 508 struct iscsi_np *np = isert_np->np; 509 struct isert_conn *isert_conn; 510 struct isert_device *device; 511 int ret = 0; 512 513 spin_lock_bh(&np->np_thread_lock); 514 if (!np->enabled) { 515 spin_unlock_bh(&np->np_thread_lock); 516 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 517 return rdma_reject(cma_id, NULL, 0); 518 } 519 spin_unlock_bh(&np->np_thread_lock); 520 521 isert_dbg("cma_id: %p, portal: %p\n", 522 cma_id, cma_id->context); 523 524 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 525 if (!isert_conn) 526 return -ENOMEM; 527 528 isert_init_conn(isert_conn); 529 isert_conn->cm_id = cma_id; 530 531 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 532 if (ret) 533 goto out; 534 535 device = isert_device_get(cma_id); 536 if (IS_ERR(device)) { 537 ret = PTR_ERR(device); 538 goto out_rsp_dma_map; 539 } 540 isert_conn->device = device; 541 542 isert_set_nego_params(isert_conn, &event->param.conn); 543 544 ret = isert_conn_setup_qp(isert_conn, cma_id); 545 if (ret) 546 goto out_conn_dev; 547 548 ret = isert_login_post_recv(isert_conn); 549 if (ret) 550 goto out_conn_dev; 551 552 ret = isert_rdma_accept(isert_conn); 553 if (ret) 554 goto out_conn_dev; 555 556 mutex_lock(&isert_np->mutex); 557 list_add_tail(&isert_conn->node, &isert_np->accepted); 558 mutex_unlock(&isert_np->mutex); 559 560 return 0; 561 562 out_conn_dev: 563 isert_device_put(device); 564 out_rsp_dma_map: 565 isert_free_login_buf(isert_conn); 566 out: 567 kfree(isert_conn); 568 rdma_reject(cma_id, NULL, 0); 569 return ret; 570 } 571 572 static void 573 isert_connect_release(struct isert_conn *isert_conn) 574 { 575 struct isert_device *device = isert_conn->device; 576 577 isert_dbg("conn %p\n", isert_conn); 578 579 BUG_ON(!device); 580 581 isert_free_rx_descriptors(isert_conn); 582 if (isert_conn->cm_id && 583 !isert_conn->dev_removed) 584 rdma_destroy_id(isert_conn->cm_id); 585 586 if (isert_conn->qp) { 587 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; 588 589 isert_comp_put(comp); 590 ib_destroy_qp(isert_conn->qp); 591 } 592 593 if (isert_conn->login_req_buf) 594 isert_free_login_buf(isert_conn); 595 596 isert_device_put(device); 597 598 if (isert_conn->dev_removed) 599 wake_up_interruptible(&isert_conn->rem_wait); 600 else 601 kfree(isert_conn); 602 } 603 604 static void 605 isert_connected_handler(struct rdma_cm_id *cma_id) 606 { 607 struct isert_conn *isert_conn = cma_id->qp->qp_context; 608 struct isert_np *isert_np = cma_id->context; 609 610 isert_info("conn %p\n", isert_conn); 611 612 mutex_lock(&isert_conn->mutex); 613 isert_conn->state = ISER_CONN_UP; 614 kref_get(&isert_conn->kref); 615 mutex_unlock(&isert_conn->mutex); 616 617 mutex_lock(&isert_np->mutex); 618 list_move_tail(&isert_conn->node, &isert_np->pending); 619 mutex_unlock(&isert_np->mutex); 620 621 isert_info("np %p: Allow accept_np to continue\n", isert_np); 622 up(&isert_np->sem); 623 } 624 625 static void 626 isert_release_kref(struct kref *kref) 627 { 628 struct isert_conn *isert_conn = container_of(kref, 629 struct isert_conn, kref); 630 631 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 632 current->pid); 633 634 isert_connect_release(isert_conn); 635 } 636 637 static void 638 isert_put_conn(struct isert_conn *isert_conn) 639 { 640 kref_put(&isert_conn->kref, isert_release_kref); 641 } 642 643 static void 644 isert_handle_unbound_conn(struct isert_conn *isert_conn) 645 { 646 struct isert_np *isert_np = isert_conn->cm_id->context; 647 648 mutex_lock(&isert_np->mutex); 649 if (!list_empty(&isert_conn->node)) { 650 /* 651 * This means iscsi doesn't know this connection 652 * so schedule a cleanup ourselves 653 */ 654 list_del_init(&isert_conn->node); 655 isert_put_conn(isert_conn); 656 queue_work(isert_release_wq, &isert_conn->release_work); 657 } 658 mutex_unlock(&isert_np->mutex); 659 } 660 661 /** 662 * isert_conn_terminate() - Initiate connection termination 663 * @isert_conn: isert connection struct 664 * 665 * Notes: 666 * In case the connection state is BOUND, move state 667 * to TEMINATING and start teardown sequence (rdma_disconnect). 668 * In case the connection state is UP, complete flush as well. 669 * 670 * This routine must be called with mutex held. Thus it is 671 * safe to call multiple times. 672 */ 673 static void 674 isert_conn_terminate(struct isert_conn *isert_conn) 675 { 676 int err; 677 678 if (isert_conn->state >= ISER_CONN_TERMINATING) 679 return; 680 681 isert_info("Terminating conn %p state %d\n", 682 isert_conn, isert_conn->state); 683 isert_conn->state = ISER_CONN_TERMINATING; 684 err = rdma_disconnect(isert_conn->cm_id); 685 if (err) 686 isert_warn("Failed rdma_disconnect isert_conn %p\n", 687 isert_conn); 688 } 689 690 static int 691 isert_np_cma_handler(struct isert_np *isert_np, 692 enum rdma_cm_event_type event) 693 { 694 isert_dbg("%s (%d): isert np %p\n", 695 rdma_event_msg(event), event, isert_np); 696 697 switch (event) { 698 case RDMA_CM_EVENT_DEVICE_REMOVAL: 699 isert_np->cm_id = NULL; 700 break; 701 case RDMA_CM_EVENT_ADDR_CHANGE: 702 isert_np->cm_id = isert_setup_id(isert_np); 703 if (IS_ERR(isert_np->cm_id)) { 704 isert_err("isert np %p setup id failed: %ld\n", 705 isert_np, PTR_ERR(isert_np->cm_id)); 706 isert_np->cm_id = NULL; 707 } 708 break; 709 default: 710 isert_err("isert np %p Unexpected event %d\n", 711 isert_np, event); 712 } 713 714 return -1; 715 } 716 717 static int 718 isert_disconnected_handler(struct rdma_cm_id *cma_id, 719 enum rdma_cm_event_type event) 720 { 721 struct isert_conn *isert_conn = cma_id->qp->qp_context; 722 723 mutex_lock(&isert_conn->mutex); 724 switch (isert_conn->state) { 725 case ISER_CONN_TERMINATING: 726 break; 727 case ISER_CONN_UP: 728 isert_conn_terminate(isert_conn); 729 ib_drain_qp(isert_conn->qp); 730 isert_handle_unbound_conn(isert_conn); 731 break; 732 case ISER_CONN_BOUND: 733 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 734 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 735 break; 736 default: 737 isert_warn("conn %p teminating in state %d\n", 738 isert_conn, isert_conn->state); 739 } 740 mutex_unlock(&isert_conn->mutex); 741 742 return 0; 743 } 744 745 static int 746 isert_connect_error(struct rdma_cm_id *cma_id) 747 { 748 struct isert_conn *isert_conn = cma_id->qp->qp_context; 749 750 list_del_init(&isert_conn->node); 751 isert_conn->cm_id = NULL; 752 isert_put_conn(isert_conn); 753 754 return -1; 755 } 756 757 static int 758 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 759 { 760 struct isert_np *isert_np = cma_id->context; 761 struct isert_conn *isert_conn; 762 int ret = 0; 763 764 isert_info("%s (%d): status %d id %p np %p\n", 765 rdma_event_msg(event->event), event->event, 766 event->status, cma_id, cma_id->context); 767 768 if (isert_np->cm_id == cma_id) 769 return isert_np_cma_handler(cma_id->context, event->event); 770 771 switch (event->event) { 772 case RDMA_CM_EVENT_CONNECT_REQUEST: 773 ret = isert_connect_request(cma_id, event); 774 if (ret) 775 isert_err("failed handle connect request %d\n", ret); 776 break; 777 case RDMA_CM_EVENT_ESTABLISHED: 778 isert_connected_handler(cma_id); 779 break; 780 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 781 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 782 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 783 ret = isert_disconnected_handler(cma_id, event->event); 784 break; 785 case RDMA_CM_EVENT_DEVICE_REMOVAL: 786 isert_conn = cma_id->qp->qp_context; 787 isert_conn->dev_removed = true; 788 isert_disconnected_handler(cma_id, event->event); 789 wait_event_interruptible(isert_conn->rem_wait, 790 isert_conn->state == ISER_CONN_DOWN); 791 kfree(isert_conn); 792 /* 793 * return non-zero from the callback to destroy 794 * the rdma cm id 795 */ 796 return 1; 797 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 798 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 799 case RDMA_CM_EVENT_CONNECT_ERROR: 800 ret = isert_connect_error(cma_id); 801 break; 802 default: 803 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 804 break; 805 } 806 807 return ret; 808 } 809 810 static int 811 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 812 { 813 struct ib_recv_wr *rx_wr, *rx_wr_failed; 814 int i, ret; 815 struct iser_rx_desc *rx_desc; 816 817 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 818 rx_desc = &isert_conn->rx_descs[i]; 819 820 rx_wr->wr_cqe = &rx_desc->rx_cqe; 821 rx_wr->sg_list = &rx_desc->rx_sg; 822 rx_wr->num_sge = 1; 823 rx_wr->next = rx_wr + 1; 824 } 825 rx_wr--; 826 rx_wr->next = NULL; /* mark end of work requests list */ 827 828 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 829 &rx_wr_failed); 830 if (ret) 831 isert_err("ib_post_recv() failed with ret: %d\n", ret); 832 833 return ret; 834 } 835 836 static int 837 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 838 { 839 struct ib_recv_wr *rx_wr_failed, rx_wr; 840 int ret; 841 842 rx_wr.wr_cqe = &rx_desc->rx_cqe; 843 rx_wr.sg_list = &rx_desc->rx_sg; 844 rx_wr.num_sge = 1; 845 rx_wr.next = NULL; 846 847 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); 848 if (ret) 849 isert_err("ib_post_recv() failed with ret: %d\n", ret); 850 851 return ret; 852 } 853 854 static int 855 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 856 { 857 struct ib_device *ib_dev = isert_conn->cm_id->device; 858 struct ib_send_wr send_wr, *send_wr_failed; 859 int ret; 860 861 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 862 ISER_HEADERS_LEN, DMA_TO_DEVICE); 863 864 tx_desc->tx_cqe.done = isert_login_send_done; 865 866 send_wr.next = NULL; 867 send_wr.wr_cqe = &tx_desc->tx_cqe; 868 send_wr.sg_list = tx_desc->tx_sg; 869 send_wr.num_sge = tx_desc->num_sge; 870 send_wr.opcode = IB_WR_SEND; 871 send_wr.send_flags = IB_SEND_SIGNALED; 872 873 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); 874 if (ret) 875 isert_err("ib_post_send() failed, ret: %d\n", ret); 876 877 return ret; 878 } 879 880 static void 881 isert_create_send_desc(struct isert_conn *isert_conn, 882 struct isert_cmd *isert_cmd, 883 struct iser_tx_desc *tx_desc) 884 { 885 struct isert_device *device = isert_conn->device; 886 struct ib_device *ib_dev = device->ib_device; 887 888 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 889 ISER_HEADERS_LEN, DMA_TO_DEVICE); 890 891 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 892 tx_desc->iser_header.flags = ISCSI_CTRL; 893 894 tx_desc->num_sge = 1; 895 896 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 897 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 898 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 899 } 900 } 901 902 static int 903 isert_init_tx_hdrs(struct isert_conn *isert_conn, 904 struct iser_tx_desc *tx_desc) 905 { 906 struct isert_device *device = isert_conn->device; 907 struct ib_device *ib_dev = device->ib_device; 908 u64 dma_addr; 909 910 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 911 ISER_HEADERS_LEN, DMA_TO_DEVICE); 912 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 913 isert_err("ib_dma_mapping_error() failed\n"); 914 return -ENOMEM; 915 } 916 917 tx_desc->dma_addr = dma_addr; 918 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 919 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 920 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 921 922 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 923 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 924 tx_desc->tx_sg[0].lkey); 925 926 return 0; 927 } 928 929 static void 930 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 931 struct ib_send_wr *send_wr) 932 { 933 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 934 935 tx_desc->tx_cqe.done = isert_send_done; 936 send_wr->wr_cqe = &tx_desc->tx_cqe; 937 938 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 939 send_wr->opcode = IB_WR_SEND_WITH_INV; 940 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 941 } else { 942 send_wr->opcode = IB_WR_SEND; 943 } 944 945 send_wr->sg_list = &tx_desc->tx_sg[0]; 946 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 947 send_wr->send_flags = IB_SEND_SIGNALED; 948 } 949 950 static int 951 isert_login_post_recv(struct isert_conn *isert_conn) 952 { 953 struct ib_recv_wr rx_wr, *rx_wr_fail; 954 struct ib_sge sge; 955 int ret; 956 957 memset(&sge, 0, sizeof(struct ib_sge)); 958 sge.addr = isert_conn->login_req_dma; 959 sge.length = ISER_RX_PAYLOAD_SIZE; 960 sge.lkey = isert_conn->device->pd->local_dma_lkey; 961 962 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 963 sge.addr, sge.length, sge.lkey); 964 965 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; 966 967 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 968 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; 969 rx_wr.sg_list = &sge; 970 rx_wr.num_sge = 1; 971 972 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); 973 if (ret) 974 isert_err("ib_post_recv() failed: %d\n", ret); 975 976 return ret; 977 } 978 979 static int 980 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 981 u32 length) 982 { 983 struct isert_conn *isert_conn = conn->context; 984 struct isert_device *device = isert_conn->device; 985 struct ib_device *ib_dev = device->ib_device; 986 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 987 int ret; 988 989 isert_create_send_desc(isert_conn, NULL, tx_desc); 990 991 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 992 sizeof(struct iscsi_hdr)); 993 994 isert_init_tx_hdrs(isert_conn, tx_desc); 995 996 if (length > 0) { 997 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 998 999 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 1000 length, DMA_TO_DEVICE); 1001 1002 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 1003 1004 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 1005 length, DMA_TO_DEVICE); 1006 1007 tx_dsg->addr = isert_conn->login_rsp_dma; 1008 tx_dsg->length = length; 1009 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 1010 tx_desc->num_sge = 2; 1011 } 1012 if (!login->login_failed) { 1013 if (login->login_complete) { 1014 ret = isert_alloc_rx_descriptors(isert_conn); 1015 if (ret) 1016 return ret; 1017 1018 ret = isert_post_recvm(isert_conn, 1019 ISERT_QP_MAX_RECV_DTOS); 1020 if (ret) 1021 return ret; 1022 1023 /* Now we are in FULL_FEATURE phase */ 1024 mutex_lock(&isert_conn->mutex); 1025 isert_conn->state = ISER_CONN_FULL_FEATURE; 1026 mutex_unlock(&isert_conn->mutex); 1027 goto post_send; 1028 } 1029 1030 ret = isert_login_post_recv(isert_conn); 1031 if (ret) 1032 return ret; 1033 } 1034 post_send: 1035 ret = isert_login_post_send(isert_conn, tx_desc); 1036 if (ret) 1037 return ret; 1038 1039 return 0; 1040 } 1041 1042 static void 1043 isert_rx_login_req(struct isert_conn *isert_conn) 1044 { 1045 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; 1046 int rx_buflen = isert_conn->login_req_len; 1047 struct iscsi_conn *conn = isert_conn->conn; 1048 struct iscsi_login *login = conn->conn_login; 1049 int size; 1050 1051 isert_info("conn %p\n", isert_conn); 1052 1053 WARN_ON_ONCE(!login); 1054 1055 if (login->first_request) { 1056 struct iscsi_login_req *login_req = 1057 (struct iscsi_login_req *)&rx_desc->iscsi_header; 1058 /* 1059 * Setup the initial iscsi_login values from the leading 1060 * login request PDU. 1061 */ 1062 login->leading_connection = (!login_req->tsih) ? 1 : 0; 1063 login->current_stage = 1064 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 1065 >> 2; 1066 login->version_min = login_req->min_version; 1067 login->version_max = login_req->max_version; 1068 memcpy(login->isid, login_req->isid, 6); 1069 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1070 login->init_task_tag = login_req->itt; 1071 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1072 login->cid = be16_to_cpu(login_req->cid); 1073 login->tsih = be16_to_cpu(login_req->tsih); 1074 } 1075 1076 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1077 1078 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1079 isert_dbg("Using login payload size: %d, rx_buflen: %d " 1080 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 1081 MAX_KEY_VALUE_PAIRS); 1082 memcpy(login->req_buf, &rx_desc->data[0], size); 1083 1084 if (login->first_request) { 1085 complete(&isert_conn->login_comp); 1086 return; 1087 } 1088 schedule_delayed_work(&conn->login_work, 0); 1089 } 1090 1091 static struct iscsi_cmd 1092 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1093 { 1094 struct isert_conn *isert_conn = conn->context; 1095 struct isert_cmd *isert_cmd; 1096 struct iscsi_cmd *cmd; 1097 1098 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1099 if (!cmd) { 1100 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1101 return NULL; 1102 } 1103 isert_cmd = iscsit_priv_cmd(cmd); 1104 isert_cmd->conn = isert_conn; 1105 isert_cmd->iscsi_cmd = cmd; 1106 isert_cmd->rx_desc = rx_desc; 1107 1108 return cmd; 1109 } 1110 1111 static int 1112 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1113 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1114 struct iser_rx_desc *rx_desc, unsigned char *buf) 1115 { 1116 struct iscsi_conn *conn = isert_conn->conn; 1117 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1118 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1119 bool dump_payload = false; 1120 unsigned int data_len; 1121 1122 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1123 if (rc < 0) 1124 return rc; 1125 1126 imm_data = cmd->immediate_data; 1127 imm_data_len = cmd->first_burst_len; 1128 unsol_data = cmd->unsolicited_data; 1129 data_len = cmd->se_cmd.data_length; 1130 1131 if (imm_data && imm_data_len == data_len) 1132 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1133 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1134 if (rc < 0) { 1135 return 0; 1136 } else if (rc > 0) { 1137 dump_payload = true; 1138 goto sequence_cmd; 1139 } 1140 1141 if (!imm_data) 1142 return 0; 1143 1144 if (imm_data_len != data_len) { 1145 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1146 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1147 &rx_desc->data[0], imm_data_len); 1148 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1149 sg_nents, imm_data_len); 1150 } else { 1151 sg_init_table(&isert_cmd->sg, 1); 1152 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1153 cmd->se_cmd.t_data_nents = 1; 1154 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); 1155 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1156 imm_data_len); 1157 } 1158 1159 cmd->write_data_done += imm_data_len; 1160 1161 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1162 spin_lock_bh(&cmd->istate_lock); 1163 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1164 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1165 spin_unlock_bh(&cmd->istate_lock); 1166 } 1167 1168 sequence_cmd: 1169 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1170 1171 if (!rc && dump_payload == false && unsol_data) 1172 iscsit_set_unsoliticed_dataout(cmd); 1173 else if (dump_payload && imm_data) 1174 target_put_sess_cmd(&cmd->se_cmd); 1175 1176 return 0; 1177 } 1178 1179 static int 1180 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1181 struct iser_rx_desc *rx_desc, unsigned char *buf) 1182 { 1183 struct scatterlist *sg_start; 1184 struct iscsi_conn *conn = isert_conn->conn; 1185 struct iscsi_cmd *cmd = NULL; 1186 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1187 u32 unsol_data_len = ntoh24(hdr->dlength); 1188 int rc, sg_nents, sg_off, page_off; 1189 1190 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1191 if (rc < 0) 1192 return rc; 1193 else if (!cmd) 1194 return 0; 1195 /* 1196 * FIXME: Unexpected unsolicited_data out 1197 */ 1198 if (!cmd->unsolicited_data) { 1199 isert_err("Received unexpected solicited data payload\n"); 1200 dump_stack(); 1201 return -1; 1202 } 1203 1204 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1205 "write_data_done: %u, data_length: %u\n", 1206 unsol_data_len, cmd->write_data_done, 1207 cmd->se_cmd.data_length); 1208 1209 sg_off = cmd->write_data_done / PAGE_SIZE; 1210 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1211 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1212 page_off = cmd->write_data_done % PAGE_SIZE; 1213 /* 1214 * FIXME: Non page-aligned unsolicited_data out 1215 */ 1216 if (page_off) { 1217 isert_err("unexpected non-page aligned data payload\n"); 1218 dump_stack(); 1219 return -1; 1220 } 1221 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1222 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1223 sg_nents, &rx_desc->data[0], unsol_data_len); 1224 1225 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1226 unsol_data_len); 1227 1228 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1229 if (rc < 0) 1230 return rc; 1231 1232 /* 1233 * multiple data-outs on the same command can arrive - 1234 * so post the buffer before hand 1235 */ 1236 rc = isert_post_recv(isert_conn, rx_desc); 1237 if (rc) { 1238 isert_err("ib_post_recv failed with %d\n", rc); 1239 return rc; 1240 } 1241 return 0; 1242 } 1243 1244 static int 1245 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1246 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1247 unsigned char *buf) 1248 { 1249 struct iscsi_conn *conn = isert_conn->conn; 1250 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1251 int rc; 1252 1253 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1254 if (rc < 0) 1255 return rc; 1256 /* 1257 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1258 */ 1259 1260 return iscsit_process_nop_out(conn, cmd, hdr); 1261 } 1262 1263 static int 1264 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1265 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1266 struct iscsi_text *hdr) 1267 { 1268 struct iscsi_conn *conn = isert_conn->conn; 1269 u32 payload_length = ntoh24(hdr->dlength); 1270 int rc; 1271 unsigned char *text_in = NULL; 1272 1273 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1274 if (rc < 0) 1275 return rc; 1276 1277 if (payload_length) { 1278 text_in = kzalloc(payload_length, GFP_KERNEL); 1279 if (!text_in) { 1280 isert_err("Unable to allocate text_in of payload_length: %u\n", 1281 payload_length); 1282 return -ENOMEM; 1283 } 1284 } 1285 cmd->text_in_ptr = text_in; 1286 1287 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); 1288 1289 return iscsit_process_text_cmd(conn, cmd, hdr); 1290 } 1291 1292 static int 1293 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1294 uint32_t read_stag, uint64_t read_va, 1295 uint32_t write_stag, uint64_t write_va) 1296 { 1297 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1298 struct iscsi_conn *conn = isert_conn->conn; 1299 struct iscsi_cmd *cmd; 1300 struct isert_cmd *isert_cmd; 1301 int ret = -EINVAL; 1302 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1303 1304 if (conn->sess->sess_ops->SessionType && 1305 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1306 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1307 " ignoring\n", opcode); 1308 return 0; 1309 } 1310 1311 switch (opcode) { 1312 case ISCSI_OP_SCSI_CMD: 1313 cmd = isert_allocate_cmd(conn, rx_desc); 1314 if (!cmd) 1315 break; 1316 1317 isert_cmd = iscsit_priv_cmd(cmd); 1318 isert_cmd->read_stag = read_stag; 1319 isert_cmd->read_va = read_va; 1320 isert_cmd->write_stag = write_stag; 1321 isert_cmd->write_va = write_va; 1322 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1323 1324 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1325 rx_desc, (unsigned char *)hdr); 1326 break; 1327 case ISCSI_OP_NOOP_OUT: 1328 cmd = isert_allocate_cmd(conn, rx_desc); 1329 if (!cmd) 1330 break; 1331 1332 isert_cmd = iscsit_priv_cmd(cmd); 1333 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1334 rx_desc, (unsigned char *)hdr); 1335 break; 1336 case ISCSI_OP_SCSI_DATA_OUT: 1337 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1338 (unsigned char *)hdr); 1339 break; 1340 case ISCSI_OP_SCSI_TMFUNC: 1341 cmd = isert_allocate_cmd(conn, rx_desc); 1342 if (!cmd) 1343 break; 1344 1345 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1346 (unsigned char *)hdr); 1347 break; 1348 case ISCSI_OP_LOGOUT: 1349 cmd = isert_allocate_cmd(conn, rx_desc); 1350 if (!cmd) 1351 break; 1352 1353 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1354 break; 1355 case ISCSI_OP_TEXT: 1356 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1357 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1358 else 1359 cmd = isert_allocate_cmd(conn, rx_desc); 1360 1361 if (!cmd) 1362 break; 1363 1364 isert_cmd = iscsit_priv_cmd(cmd); 1365 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1366 rx_desc, (struct iscsi_text *)hdr); 1367 break; 1368 default: 1369 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1370 dump_stack(); 1371 break; 1372 } 1373 1374 return ret; 1375 } 1376 1377 static void 1378 isert_print_wc(struct ib_wc *wc, const char *type) 1379 { 1380 if (wc->status != IB_WC_WR_FLUSH_ERR) 1381 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1382 ib_wc_status_msg(wc->status), wc->status, 1383 wc->vendor_err); 1384 else 1385 isert_dbg("%s failure: %s (%d)\n", type, 1386 ib_wc_status_msg(wc->status), wc->status); 1387 } 1388 1389 static void 1390 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1391 { 1392 struct isert_conn *isert_conn = wc->qp->qp_context; 1393 struct ib_device *ib_dev = isert_conn->cm_id->device; 1394 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1395 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1396 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; 1397 uint64_t read_va = 0, write_va = 0; 1398 uint32_t read_stag = 0, write_stag = 0; 1399 1400 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1401 isert_print_wc(wc, "recv"); 1402 if (wc->status != IB_WC_WR_FLUSH_ERR) 1403 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1404 return; 1405 } 1406 1407 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1408 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1409 1410 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1411 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1412 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1413 1414 switch (iser_ctrl->flags & 0xF0) { 1415 case ISCSI_CTRL: 1416 if (iser_ctrl->flags & ISER_RSV) { 1417 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1418 read_va = be64_to_cpu(iser_ctrl->read_va); 1419 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1420 read_stag, (unsigned long long)read_va); 1421 } 1422 if (iser_ctrl->flags & ISER_WSV) { 1423 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1424 write_va = be64_to_cpu(iser_ctrl->write_va); 1425 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1426 write_stag, (unsigned long long)write_va); 1427 } 1428 1429 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1430 break; 1431 case ISER_HELLO: 1432 isert_err("iSER Hello message\n"); 1433 break; 1434 default: 1435 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1436 break; 1437 } 1438 1439 isert_rx_opcode(isert_conn, rx_desc, 1440 read_stag, read_va, write_stag, write_va); 1441 1442 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1443 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1444 } 1445 1446 static void 1447 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1448 { 1449 struct isert_conn *isert_conn = wc->qp->qp_context; 1450 struct ib_device *ib_dev = isert_conn->cm_id->device; 1451 1452 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1453 isert_print_wc(wc, "login recv"); 1454 return; 1455 } 1456 1457 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, 1458 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1459 1460 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1461 1462 if (isert_conn->conn) { 1463 struct iscsi_login *login = isert_conn->conn->conn_login; 1464 1465 if (login && !login->first_request) 1466 isert_rx_login_req(isert_conn); 1467 } 1468 1469 mutex_lock(&isert_conn->mutex); 1470 complete(&isert_conn->login_req_comp); 1471 mutex_unlock(&isert_conn->mutex); 1472 1473 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, 1474 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1475 } 1476 1477 static void 1478 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) 1479 { 1480 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 1481 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 1482 1483 if (!cmd->rw.nr_ops) 1484 return; 1485 1486 if (isert_prot_cmd(conn, se_cmd)) { 1487 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, 1488 conn->cm_id->port_num, se_cmd->t_data_sg, 1489 se_cmd->t_data_nents, se_cmd->t_prot_sg, 1490 se_cmd->t_prot_nents, dir); 1491 } else { 1492 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, 1493 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); 1494 } 1495 1496 cmd->rw.nr_ops = 0; 1497 } 1498 1499 static void 1500 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1501 { 1502 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1503 struct isert_conn *isert_conn = isert_cmd->conn; 1504 struct iscsi_conn *conn = isert_conn->conn; 1505 struct iscsi_text_rsp *hdr; 1506 1507 isert_dbg("Cmd %p\n", isert_cmd); 1508 1509 switch (cmd->iscsi_opcode) { 1510 case ISCSI_OP_SCSI_CMD: 1511 spin_lock_bh(&conn->cmd_lock); 1512 if (!list_empty(&cmd->i_conn_node)) 1513 list_del_init(&cmd->i_conn_node); 1514 spin_unlock_bh(&conn->cmd_lock); 1515 1516 if (cmd->data_direction == DMA_TO_DEVICE) { 1517 iscsit_stop_dataout_timer(cmd); 1518 /* 1519 * Check for special case during comp_err where 1520 * WRITE_PENDING has been handed off from core, 1521 * but requires an extra target_put_sess_cmd() 1522 * before transport_generic_free_cmd() below. 1523 */ 1524 if (comp_err && 1525 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1526 struct se_cmd *se_cmd = &cmd->se_cmd; 1527 1528 target_put_sess_cmd(se_cmd); 1529 } 1530 } 1531 1532 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1533 transport_generic_free_cmd(&cmd->se_cmd, 0); 1534 break; 1535 case ISCSI_OP_SCSI_TMFUNC: 1536 spin_lock_bh(&conn->cmd_lock); 1537 if (!list_empty(&cmd->i_conn_node)) 1538 list_del_init(&cmd->i_conn_node); 1539 spin_unlock_bh(&conn->cmd_lock); 1540 1541 transport_generic_free_cmd(&cmd->se_cmd, 0); 1542 break; 1543 case ISCSI_OP_REJECT: 1544 case ISCSI_OP_NOOP_OUT: 1545 case ISCSI_OP_TEXT: 1546 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1547 /* If the continue bit is on, keep the command alive */ 1548 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1549 break; 1550 1551 spin_lock_bh(&conn->cmd_lock); 1552 if (!list_empty(&cmd->i_conn_node)) 1553 list_del_init(&cmd->i_conn_node); 1554 spin_unlock_bh(&conn->cmd_lock); 1555 1556 /* 1557 * Handle special case for REJECT when iscsi_add_reject*() has 1558 * overwritten the original iscsi_opcode assignment, and the 1559 * associated cmd->se_cmd needs to be released. 1560 */ 1561 if (cmd->se_cmd.se_tfo != NULL) { 1562 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1563 cmd->iscsi_opcode); 1564 transport_generic_free_cmd(&cmd->se_cmd, 0); 1565 break; 1566 } 1567 /* 1568 * Fall-through 1569 */ 1570 default: 1571 iscsit_release_cmd(cmd); 1572 break; 1573 } 1574 } 1575 1576 static void 1577 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1578 { 1579 if (tx_desc->dma_addr != 0) { 1580 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1581 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1582 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1583 tx_desc->dma_addr = 0; 1584 } 1585 } 1586 1587 static void 1588 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1589 struct ib_device *ib_dev, bool comp_err) 1590 { 1591 if (isert_cmd->pdu_buf_dma != 0) { 1592 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1593 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1594 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1595 isert_cmd->pdu_buf_dma = 0; 1596 } 1597 1598 isert_unmap_tx_desc(tx_desc, ib_dev); 1599 isert_put_cmd(isert_cmd, comp_err); 1600 } 1601 1602 static int 1603 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1604 { 1605 struct ib_mr_status mr_status; 1606 int ret; 1607 1608 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1609 if (ret) { 1610 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1611 goto fail_mr_status; 1612 } 1613 1614 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1615 u64 sec_offset_err; 1616 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1617 1618 switch (mr_status.sig_err.err_type) { 1619 case IB_SIG_BAD_GUARD: 1620 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1621 break; 1622 case IB_SIG_BAD_REFTAG: 1623 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1624 break; 1625 case IB_SIG_BAD_APPTAG: 1626 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1627 break; 1628 } 1629 sec_offset_err = mr_status.sig_err.sig_err_offset; 1630 do_div(sec_offset_err, block_size); 1631 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1632 1633 isert_err("PI error found type %d at sector 0x%llx " 1634 "expected 0x%x vs actual 0x%x\n", 1635 mr_status.sig_err.err_type, 1636 (unsigned long long)se_cmd->bad_sector, 1637 mr_status.sig_err.expected, 1638 mr_status.sig_err.actual); 1639 ret = 1; 1640 } 1641 1642 fail_mr_status: 1643 return ret; 1644 } 1645 1646 static void 1647 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1648 { 1649 struct isert_conn *isert_conn = wc->qp->qp_context; 1650 struct isert_device *device = isert_conn->device; 1651 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1652 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1653 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1654 int ret = 0; 1655 1656 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1657 isert_print_wc(wc, "rdma write"); 1658 if (wc->status != IB_WC_WR_FLUSH_ERR) 1659 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1660 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1661 return; 1662 } 1663 1664 isert_dbg("Cmd %p\n", isert_cmd); 1665 1666 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1667 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1668 1669 if (ret) 1670 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1671 else 1672 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1673 } 1674 1675 static void 1676 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1677 { 1678 struct isert_conn *isert_conn = wc->qp->qp_context; 1679 struct isert_device *device = isert_conn->device; 1680 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1681 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1682 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1683 struct se_cmd *se_cmd = &cmd->se_cmd; 1684 int ret = 0; 1685 1686 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1687 isert_print_wc(wc, "rdma read"); 1688 if (wc->status != IB_WC_WR_FLUSH_ERR) 1689 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1690 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1691 return; 1692 } 1693 1694 isert_dbg("Cmd %p\n", isert_cmd); 1695 1696 iscsit_stop_dataout_timer(cmd); 1697 1698 if (isert_prot_cmd(isert_conn, se_cmd)) 1699 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr); 1700 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1701 cmd->write_data_done = 0; 1702 1703 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1704 spin_lock_bh(&cmd->istate_lock); 1705 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1706 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1707 spin_unlock_bh(&cmd->istate_lock); 1708 1709 if (ret) { 1710 target_put_sess_cmd(se_cmd); 1711 transport_send_check_condition_and_sense(se_cmd, 1712 se_cmd->pi_err, 0); 1713 } else { 1714 target_execute_cmd(se_cmd); 1715 } 1716 } 1717 1718 static void 1719 isert_do_control_comp(struct work_struct *work) 1720 { 1721 struct isert_cmd *isert_cmd = container_of(work, 1722 struct isert_cmd, comp_work); 1723 struct isert_conn *isert_conn = isert_cmd->conn; 1724 struct ib_device *ib_dev = isert_conn->cm_id->device; 1725 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1726 1727 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1728 1729 switch (cmd->i_state) { 1730 case ISTATE_SEND_TASKMGTRSP: 1731 iscsit_tmr_post_handler(cmd, cmd->conn); 1732 case ISTATE_SEND_REJECT: /* FALLTHRU */ 1733 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */ 1734 cmd->i_state = ISTATE_SENT_STATUS; 1735 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1736 ib_dev, false); 1737 break; 1738 case ISTATE_SEND_LOGOUTRSP: 1739 iscsit_logout_post_handler(cmd, cmd->conn); 1740 break; 1741 default: 1742 isert_err("Unknown i_state %d\n", cmd->i_state); 1743 dump_stack(); 1744 break; 1745 } 1746 } 1747 1748 static void 1749 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1750 { 1751 struct isert_conn *isert_conn = wc->qp->qp_context; 1752 struct ib_device *ib_dev = isert_conn->cm_id->device; 1753 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1754 1755 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1756 isert_print_wc(wc, "login send"); 1757 if (wc->status != IB_WC_WR_FLUSH_ERR) 1758 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1759 } 1760 1761 isert_unmap_tx_desc(tx_desc, ib_dev); 1762 } 1763 1764 static void 1765 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 1766 { 1767 struct isert_conn *isert_conn = wc->qp->qp_context; 1768 struct ib_device *ib_dev = isert_conn->cm_id->device; 1769 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1770 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 1771 1772 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1773 isert_print_wc(wc, "send"); 1774 if (wc->status != IB_WC_WR_FLUSH_ERR) 1775 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1776 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 1777 return; 1778 } 1779 1780 isert_dbg("Cmd %p\n", isert_cmd); 1781 1782 switch (isert_cmd->iscsi_cmd->i_state) { 1783 case ISTATE_SEND_TASKMGTRSP: 1784 case ISTATE_SEND_LOGOUTRSP: 1785 case ISTATE_SEND_REJECT: 1786 case ISTATE_SEND_TEXTRSP: 1787 isert_unmap_tx_desc(tx_desc, ib_dev); 1788 1789 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1790 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1791 return; 1792 default: 1793 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 1794 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1795 break; 1796 } 1797 } 1798 1799 static int 1800 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1801 { 1802 struct ib_send_wr *wr_failed; 1803 int ret; 1804 1805 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 1806 if (ret) { 1807 isert_err("ib_post_recv failed with %d\n", ret); 1808 return ret; 1809 } 1810 1811 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, 1812 &wr_failed); 1813 if (ret) { 1814 isert_err("ib_post_send failed with %d\n", ret); 1815 return ret; 1816 } 1817 return ret; 1818 } 1819 1820 static int 1821 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1822 { 1823 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1824 struct isert_conn *isert_conn = conn->context; 1825 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1826 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1827 &isert_cmd->tx_desc.iscsi_header; 1828 1829 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1830 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1831 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1832 /* 1833 * Attach SENSE DATA payload to iSCSI Response PDU 1834 */ 1835 if (cmd->se_cmd.sense_buffer && 1836 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1837 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1838 struct isert_device *device = isert_conn->device; 1839 struct ib_device *ib_dev = device->ib_device; 1840 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1841 u32 padding, pdu_len; 1842 1843 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1844 cmd->sense_buffer); 1845 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1846 1847 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1848 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1849 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 1850 1851 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1852 (void *)cmd->sense_buffer, pdu_len, 1853 DMA_TO_DEVICE); 1854 1855 isert_cmd->pdu_buf_len = pdu_len; 1856 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1857 tx_dsg->length = pdu_len; 1858 tx_dsg->lkey = device->pd->local_dma_lkey; 1859 isert_cmd->tx_desc.num_sge = 2; 1860 } 1861 1862 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1863 1864 isert_dbg("Posting SCSI Response\n"); 1865 1866 return isert_post_response(isert_conn, isert_cmd); 1867 } 1868 1869 static void 1870 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1871 { 1872 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1873 struct isert_conn *isert_conn = conn->context; 1874 1875 spin_lock_bh(&conn->cmd_lock); 1876 if (!list_empty(&cmd->i_conn_node)) 1877 list_del_init(&cmd->i_conn_node); 1878 spin_unlock_bh(&conn->cmd_lock); 1879 1880 if (cmd->data_direction == DMA_TO_DEVICE) 1881 iscsit_stop_dataout_timer(cmd); 1882 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1883 } 1884 1885 static enum target_prot_op 1886 isert_get_sup_prot_ops(struct iscsi_conn *conn) 1887 { 1888 struct isert_conn *isert_conn = conn->context; 1889 struct isert_device *device = isert_conn->device; 1890 1891 if (conn->tpg->tpg_attrib.t10_pi) { 1892 if (device->pi_capable) { 1893 isert_info("conn %p PI offload enabled\n", isert_conn); 1894 isert_conn->pi_support = true; 1895 return TARGET_PROT_ALL; 1896 } 1897 } 1898 1899 isert_info("conn %p PI offload disabled\n", isert_conn); 1900 isert_conn->pi_support = false; 1901 1902 return TARGET_PROT_NORMAL; 1903 } 1904 1905 static int 1906 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1907 bool nopout_response) 1908 { 1909 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1910 struct isert_conn *isert_conn = conn->context; 1911 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1912 1913 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1914 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1915 &isert_cmd->tx_desc.iscsi_header, 1916 nopout_response); 1917 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1918 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1919 1920 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 1921 1922 return isert_post_response(isert_conn, isert_cmd); 1923 } 1924 1925 static int 1926 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1927 { 1928 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1929 struct isert_conn *isert_conn = conn->context; 1930 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1931 1932 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1933 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1934 &isert_cmd->tx_desc.iscsi_header); 1935 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1936 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1937 1938 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 1939 1940 return isert_post_response(isert_conn, isert_cmd); 1941 } 1942 1943 static int 1944 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1945 { 1946 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1947 struct isert_conn *isert_conn = conn->context; 1948 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1949 1950 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1951 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1952 &isert_cmd->tx_desc.iscsi_header); 1953 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1954 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1955 1956 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 1957 1958 return isert_post_response(isert_conn, isert_cmd); 1959 } 1960 1961 static int 1962 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1963 { 1964 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1965 struct isert_conn *isert_conn = conn->context; 1966 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1967 struct isert_device *device = isert_conn->device; 1968 struct ib_device *ib_dev = device->ib_device; 1969 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1970 struct iscsi_reject *hdr = 1971 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 1972 1973 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1974 iscsit_build_reject(cmd, conn, hdr); 1975 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1976 1977 hton24(hdr->dlength, ISCSI_HDR_LEN); 1978 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1979 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 1980 DMA_TO_DEVICE); 1981 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 1982 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1983 tx_dsg->length = ISCSI_HDR_LEN; 1984 tx_dsg->lkey = device->pd->local_dma_lkey; 1985 isert_cmd->tx_desc.num_sge = 2; 1986 1987 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1988 1989 isert_dbg("conn %p Posting Reject\n", isert_conn); 1990 1991 return isert_post_response(isert_conn, isert_cmd); 1992 } 1993 1994 static int 1995 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1996 { 1997 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1998 struct isert_conn *isert_conn = conn->context; 1999 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2000 struct iscsi_text_rsp *hdr = 2001 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 2002 u32 txt_rsp_len; 2003 int rc; 2004 2005 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2006 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 2007 if (rc < 0) 2008 return rc; 2009 2010 txt_rsp_len = rc; 2011 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2012 2013 if (txt_rsp_len) { 2014 struct isert_device *device = isert_conn->device; 2015 struct ib_device *ib_dev = device->ib_device; 2016 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2017 void *txt_rsp_buf = cmd->buf_ptr; 2018 2019 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2020 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 2021 2022 isert_cmd->pdu_buf_len = txt_rsp_len; 2023 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2024 tx_dsg->length = txt_rsp_len; 2025 tx_dsg->lkey = device->pd->local_dma_lkey; 2026 isert_cmd->tx_desc.num_sge = 2; 2027 } 2028 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2029 2030 isert_dbg("conn %p Text Response\n", isert_conn); 2031 2032 return isert_post_response(isert_conn, isert_cmd); 2033 } 2034 2035 static inline void 2036 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, 2037 struct ib_sig_domain *domain) 2038 { 2039 domain->sig_type = IB_SIG_TYPE_T10_DIF; 2040 domain->sig.dif.bg_type = IB_T10DIF_CRC; 2041 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 2042 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 2043 /* 2044 * At the moment we hard code those, but if in the future 2045 * the target core would like to use it, we will take it 2046 * from se_cmd. 2047 */ 2048 domain->sig.dif.apptag_check_mask = 0xffff; 2049 domain->sig.dif.app_escape = true; 2050 domain->sig.dif.ref_escape = true; 2051 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 2052 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 2053 domain->sig.dif.ref_remap = true; 2054 }; 2055 2056 static int 2057 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2058 { 2059 memset(sig_attrs, 0, sizeof(*sig_attrs)); 2060 2061 switch (se_cmd->prot_op) { 2062 case TARGET_PROT_DIN_INSERT: 2063 case TARGET_PROT_DOUT_STRIP: 2064 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 2065 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2066 break; 2067 case TARGET_PROT_DOUT_INSERT: 2068 case TARGET_PROT_DIN_STRIP: 2069 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 2070 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2071 break; 2072 case TARGET_PROT_DIN_PASS: 2073 case TARGET_PROT_DOUT_PASS: 2074 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2075 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2076 break; 2077 default: 2078 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2079 return -EINVAL; 2080 } 2081 2082 sig_attrs->check_mask = 2083 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | 2084 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | 2085 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); 2086 return 0; 2087 } 2088 2089 static int 2090 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, 2091 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 2092 { 2093 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 2094 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 2095 u8 port_num = conn->cm_id->port_num; 2096 u64 addr; 2097 u32 rkey, offset; 2098 int ret; 2099 2100 if (dir == DMA_FROM_DEVICE) { 2101 addr = cmd->write_va; 2102 rkey = cmd->write_stag; 2103 offset = cmd->iscsi_cmd->write_data_done; 2104 } else { 2105 addr = cmd->read_va; 2106 rkey = cmd->read_stag; 2107 offset = 0; 2108 } 2109 2110 if (isert_prot_cmd(conn, se_cmd)) { 2111 struct ib_sig_attrs sig_attrs; 2112 2113 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2114 if (ret) 2115 return ret; 2116 2117 WARN_ON_ONCE(offset); 2118 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, 2119 se_cmd->t_data_sg, se_cmd->t_data_nents, 2120 se_cmd->t_prot_sg, se_cmd->t_prot_nents, 2121 &sig_attrs, addr, rkey, dir); 2122 } else { 2123 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, 2124 se_cmd->t_data_sg, se_cmd->t_data_nents, 2125 offset, addr, rkey, dir); 2126 } 2127 if (ret < 0) { 2128 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); 2129 return ret; 2130 } 2131 2132 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); 2133 if (ret < 0) 2134 isert_err("Cmd: %p failed to post RDMA res\n", cmd); 2135 return ret; 2136 } 2137 2138 static int 2139 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2140 { 2141 struct se_cmd *se_cmd = &cmd->se_cmd; 2142 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2143 struct isert_conn *isert_conn = conn->context; 2144 struct ib_cqe *cqe = NULL; 2145 struct ib_send_wr *chain_wr = NULL; 2146 int rc; 2147 2148 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2149 isert_cmd, se_cmd->data_length); 2150 2151 if (isert_prot_cmd(isert_conn, se_cmd)) { 2152 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2153 cqe = &isert_cmd->tx_desc.tx_cqe; 2154 } else { 2155 /* 2156 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2157 */ 2158 isert_create_send_desc(isert_conn, isert_cmd, 2159 &isert_cmd->tx_desc); 2160 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2161 &isert_cmd->tx_desc.iscsi_header); 2162 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2163 isert_init_send_wr(isert_conn, isert_cmd, 2164 &isert_cmd->tx_desc.send_wr); 2165 2166 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2167 if (rc) { 2168 isert_err("ib_post_recv failed with %d\n", rc); 2169 return rc; 2170 } 2171 2172 chain_wr = &isert_cmd->tx_desc.send_wr; 2173 } 2174 2175 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2176 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); 2177 return 1; 2178 } 2179 2180 static int 2181 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2182 { 2183 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2184 2185 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2186 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2187 2188 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2189 isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2190 &isert_cmd->tx_desc.tx_cqe, NULL); 2191 2192 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2193 isert_cmd); 2194 return 0; 2195 } 2196 2197 static int 2198 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2199 { 2200 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2201 int ret = 0; 2202 2203 switch (state) { 2204 case ISTATE_REMOVE: 2205 spin_lock_bh(&conn->cmd_lock); 2206 list_del_init(&cmd->i_conn_node); 2207 spin_unlock_bh(&conn->cmd_lock); 2208 isert_put_cmd(isert_cmd, true); 2209 break; 2210 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2211 ret = isert_put_nopin(cmd, conn, false); 2212 break; 2213 default: 2214 isert_err("Unknown immediate state: 0x%02x\n", state); 2215 ret = -EINVAL; 2216 break; 2217 } 2218 2219 return ret; 2220 } 2221 2222 static int 2223 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2224 { 2225 struct isert_conn *isert_conn = conn->context; 2226 int ret; 2227 2228 switch (state) { 2229 case ISTATE_SEND_LOGOUTRSP: 2230 ret = isert_put_logout_rsp(cmd, conn); 2231 if (!ret) 2232 isert_conn->logout_posted = true; 2233 break; 2234 case ISTATE_SEND_NOPIN: 2235 ret = isert_put_nopin(cmd, conn, true); 2236 break; 2237 case ISTATE_SEND_TASKMGTRSP: 2238 ret = isert_put_tm_rsp(cmd, conn); 2239 break; 2240 case ISTATE_SEND_REJECT: 2241 ret = isert_put_reject(cmd, conn); 2242 break; 2243 case ISTATE_SEND_TEXTRSP: 2244 ret = isert_put_text_rsp(cmd, conn); 2245 break; 2246 case ISTATE_SEND_STATUS: 2247 /* 2248 * Special case for sending non GOOD SCSI status from TX thread 2249 * context during pre se_cmd excecution failure. 2250 */ 2251 ret = isert_put_response(conn, cmd); 2252 break; 2253 default: 2254 isert_err("Unknown response state: 0x%02x\n", state); 2255 ret = -EINVAL; 2256 break; 2257 } 2258 2259 return ret; 2260 } 2261 2262 struct rdma_cm_id * 2263 isert_setup_id(struct isert_np *isert_np) 2264 { 2265 struct iscsi_np *np = isert_np->np; 2266 struct rdma_cm_id *id; 2267 struct sockaddr *sa; 2268 int ret; 2269 2270 sa = (struct sockaddr *)&np->np_sockaddr; 2271 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2272 2273 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2274 RDMA_PS_TCP, IB_QPT_RC); 2275 if (IS_ERR(id)) { 2276 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2277 ret = PTR_ERR(id); 2278 goto out; 2279 } 2280 isert_dbg("id %p context %p\n", id, id->context); 2281 2282 ret = rdma_bind_addr(id, sa); 2283 if (ret) { 2284 isert_err("rdma_bind_addr() failed: %d\n", ret); 2285 goto out_id; 2286 } 2287 2288 ret = rdma_listen(id, 0); 2289 if (ret) { 2290 isert_err("rdma_listen() failed: %d\n", ret); 2291 goto out_id; 2292 } 2293 2294 return id; 2295 out_id: 2296 rdma_destroy_id(id); 2297 out: 2298 return ERR_PTR(ret); 2299 } 2300 2301 static int 2302 isert_setup_np(struct iscsi_np *np, 2303 struct sockaddr_storage *ksockaddr) 2304 { 2305 struct isert_np *isert_np; 2306 struct rdma_cm_id *isert_lid; 2307 int ret; 2308 2309 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2310 if (!isert_np) { 2311 isert_err("Unable to allocate struct isert_np\n"); 2312 return -ENOMEM; 2313 } 2314 sema_init(&isert_np->sem, 0); 2315 mutex_init(&isert_np->mutex); 2316 INIT_LIST_HEAD(&isert_np->accepted); 2317 INIT_LIST_HEAD(&isert_np->pending); 2318 isert_np->np = np; 2319 2320 /* 2321 * Setup the np->np_sockaddr from the passed sockaddr setup 2322 * in iscsi_target_configfs.c code.. 2323 */ 2324 memcpy(&np->np_sockaddr, ksockaddr, 2325 sizeof(struct sockaddr_storage)); 2326 2327 isert_lid = isert_setup_id(isert_np); 2328 if (IS_ERR(isert_lid)) { 2329 ret = PTR_ERR(isert_lid); 2330 goto out; 2331 } 2332 2333 isert_np->cm_id = isert_lid; 2334 np->np_context = isert_np; 2335 2336 return 0; 2337 2338 out: 2339 kfree(isert_np); 2340 2341 return ret; 2342 } 2343 2344 static int 2345 isert_rdma_accept(struct isert_conn *isert_conn) 2346 { 2347 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2348 struct rdma_conn_param cp; 2349 int ret; 2350 struct iser_cm_hdr rsp_hdr; 2351 2352 memset(&cp, 0, sizeof(struct rdma_conn_param)); 2353 cp.initiator_depth = isert_conn->initiator_depth; 2354 cp.retry_count = 7; 2355 cp.rnr_retry_count = 7; 2356 2357 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 2358 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 2359 if (!isert_conn->snd_w_inv) 2360 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 2361 cp.private_data = (void *)&rsp_hdr; 2362 cp.private_data_len = sizeof(rsp_hdr); 2363 2364 ret = rdma_accept(cm_id, &cp); 2365 if (ret) { 2366 isert_err("rdma_accept() failed with: %d\n", ret); 2367 return ret; 2368 } 2369 2370 return 0; 2371 } 2372 2373 static int 2374 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2375 { 2376 struct isert_conn *isert_conn = conn->context; 2377 int ret; 2378 2379 isert_info("before login_req comp conn: %p\n", isert_conn); 2380 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 2381 if (ret) { 2382 isert_err("isert_conn %p interrupted before got login req\n", 2383 isert_conn); 2384 return ret; 2385 } 2386 reinit_completion(&isert_conn->login_req_comp); 2387 2388 /* 2389 * For login requests after the first PDU, isert_rx_login_req() will 2390 * kick schedule_delayed_work(&conn->login_work) as the packet is 2391 * received, which turns this callback from iscsi_target_do_login_rx() 2392 * into a NOP. 2393 */ 2394 if (!login->first_request) 2395 return 0; 2396 2397 isert_rx_login_req(isert_conn); 2398 2399 isert_info("before login_comp conn: %p\n", conn); 2400 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 2401 if (ret) 2402 return ret; 2403 2404 isert_info("processing login->req: %p\n", login->req); 2405 2406 return 0; 2407 } 2408 2409 static void 2410 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2411 struct isert_conn *isert_conn) 2412 { 2413 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2414 struct rdma_route *cm_route = &cm_id->route; 2415 2416 conn->login_family = np->np_sockaddr.ss_family; 2417 2418 conn->login_sockaddr = cm_route->addr.dst_addr; 2419 conn->local_sockaddr = cm_route->addr.src_addr; 2420 } 2421 2422 static int 2423 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2424 { 2425 struct isert_np *isert_np = np->np_context; 2426 struct isert_conn *isert_conn; 2427 int ret; 2428 2429 accept_wait: 2430 ret = down_interruptible(&isert_np->sem); 2431 if (ret) 2432 return -ENODEV; 2433 2434 spin_lock_bh(&np->np_thread_lock); 2435 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 2436 spin_unlock_bh(&np->np_thread_lock); 2437 isert_dbg("np_thread_state %d\n", 2438 np->np_thread_state); 2439 /** 2440 * No point in stalling here when np_thread 2441 * is in state RESET/SHUTDOWN/EXIT - bail 2442 **/ 2443 return -ENODEV; 2444 } 2445 spin_unlock_bh(&np->np_thread_lock); 2446 2447 mutex_lock(&isert_np->mutex); 2448 if (list_empty(&isert_np->pending)) { 2449 mutex_unlock(&isert_np->mutex); 2450 goto accept_wait; 2451 } 2452 isert_conn = list_first_entry(&isert_np->pending, 2453 struct isert_conn, node); 2454 list_del_init(&isert_conn->node); 2455 mutex_unlock(&isert_np->mutex); 2456 2457 conn->context = isert_conn; 2458 isert_conn->conn = conn; 2459 isert_conn->state = ISER_CONN_BOUND; 2460 2461 isert_set_conn_info(np, conn, isert_conn); 2462 2463 isert_dbg("Processing isert_conn: %p\n", isert_conn); 2464 2465 return 0; 2466 } 2467 2468 static void 2469 isert_free_np(struct iscsi_np *np) 2470 { 2471 struct isert_np *isert_np = np->np_context; 2472 struct isert_conn *isert_conn, *n; 2473 2474 if (isert_np->cm_id) 2475 rdma_destroy_id(isert_np->cm_id); 2476 2477 /* 2478 * FIXME: At this point we don't have a good way to insure 2479 * that at this point we don't have hanging connections that 2480 * completed RDMA establishment but didn't start iscsi login 2481 * process. So work-around this by cleaning up what ever piled 2482 * up in accepted and pending lists. 2483 */ 2484 mutex_lock(&isert_np->mutex); 2485 if (!list_empty(&isert_np->pending)) { 2486 isert_info("Still have isert pending connections\n"); 2487 list_for_each_entry_safe(isert_conn, n, 2488 &isert_np->pending, 2489 node) { 2490 isert_info("cleaning isert_conn %p state (%d)\n", 2491 isert_conn, isert_conn->state); 2492 isert_connect_release(isert_conn); 2493 } 2494 } 2495 2496 if (!list_empty(&isert_np->accepted)) { 2497 isert_info("Still have isert accepted connections\n"); 2498 list_for_each_entry_safe(isert_conn, n, 2499 &isert_np->accepted, 2500 node) { 2501 isert_info("cleaning isert_conn %p state (%d)\n", 2502 isert_conn, isert_conn->state); 2503 isert_connect_release(isert_conn); 2504 } 2505 } 2506 mutex_unlock(&isert_np->mutex); 2507 2508 np->np_context = NULL; 2509 kfree(isert_np); 2510 } 2511 2512 static void isert_release_work(struct work_struct *work) 2513 { 2514 struct isert_conn *isert_conn = container_of(work, 2515 struct isert_conn, 2516 release_work); 2517 2518 isert_info("Starting release conn %p\n", isert_conn); 2519 2520 mutex_lock(&isert_conn->mutex); 2521 isert_conn->state = ISER_CONN_DOWN; 2522 mutex_unlock(&isert_conn->mutex); 2523 2524 isert_info("Destroying conn %p\n", isert_conn); 2525 isert_put_conn(isert_conn); 2526 } 2527 2528 static void 2529 isert_wait4logout(struct isert_conn *isert_conn) 2530 { 2531 struct iscsi_conn *conn = isert_conn->conn; 2532 2533 isert_info("conn %p\n", isert_conn); 2534 2535 if (isert_conn->logout_posted) { 2536 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 2537 wait_for_completion_timeout(&conn->conn_logout_comp, 2538 SECONDS_FOR_LOGOUT_COMP * HZ); 2539 } 2540 } 2541 2542 static void 2543 isert_wait4cmds(struct iscsi_conn *conn) 2544 { 2545 isert_info("iscsi_conn %p\n", conn); 2546 2547 if (conn->sess) { 2548 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2549 target_wait_for_sess_cmds(conn->sess->se_sess); 2550 } 2551 } 2552 2553 /** 2554 * isert_put_unsol_pending_cmds() - Drop commands waiting for 2555 * unsolicitate dataout 2556 * @conn: iscsi connection 2557 * 2558 * We might still have commands that are waiting for unsolicited 2559 * dataouts messages. We must put the extra reference on those 2560 * before blocking on the target_wait_for_session_cmds 2561 */ 2562 static void 2563 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 2564 { 2565 struct iscsi_cmd *cmd, *tmp; 2566 static LIST_HEAD(drop_cmd_list); 2567 2568 spin_lock_bh(&conn->cmd_lock); 2569 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 2570 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 2571 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 2572 (cmd->write_data_done < cmd->se_cmd.data_length)) 2573 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 2574 } 2575 spin_unlock_bh(&conn->cmd_lock); 2576 2577 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 2578 list_del_init(&cmd->i_conn_node); 2579 if (cmd->i_state != ISTATE_REMOVE) { 2580 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2581 2582 isert_info("conn %p dropping cmd %p\n", conn, cmd); 2583 isert_put_cmd(isert_cmd, true); 2584 } 2585 } 2586 } 2587 2588 static void isert_wait_conn(struct iscsi_conn *conn) 2589 { 2590 struct isert_conn *isert_conn = conn->context; 2591 2592 isert_info("Starting conn %p\n", isert_conn); 2593 2594 mutex_lock(&isert_conn->mutex); 2595 isert_conn_terminate(isert_conn); 2596 mutex_unlock(&isert_conn->mutex); 2597 2598 ib_drain_qp(isert_conn->qp); 2599 isert_put_unsol_pending_cmds(conn); 2600 isert_wait4cmds(conn); 2601 isert_wait4logout(isert_conn); 2602 2603 queue_work(isert_release_wq, &isert_conn->release_work); 2604 } 2605 2606 static void isert_free_conn(struct iscsi_conn *conn) 2607 { 2608 struct isert_conn *isert_conn = conn->context; 2609 2610 ib_drain_qp(isert_conn->qp); 2611 isert_put_conn(isert_conn); 2612 } 2613 2614 static void isert_get_rx_pdu(struct iscsi_conn *conn) 2615 { 2616 struct completion comp; 2617 2618 init_completion(&comp); 2619 2620 wait_for_completion_interruptible(&comp); 2621 } 2622 2623 static struct iscsit_transport iser_target_transport = { 2624 .name = "IB/iSER", 2625 .transport_type = ISCSI_INFINIBAND, 2626 .rdma_shutdown = true, 2627 .priv_size = sizeof(struct isert_cmd), 2628 .owner = THIS_MODULE, 2629 .iscsit_setup_np = isert_setup_np, 2630 .iscsit_accept_np = isert_accept_np, 2631 .iscsit_free_np = isert_free_np, 2632 .iscsit_wait_conn = isert_wait_conn, 2633 .iscsit_free_conn = isert_free_conn, 2634 .iscsit_get_login_rx = isert_get_login_rx, 2635 .iscsit_put_login_tx = isert_put_login_tx, 2636 .iscsit_immediate_queue = isert_immediate_queue, 2637 .iscsit_response_queue = isert_response_queue, 2638 .iscsit_get_dataout = isert_get_dataout, 2639 .iscsit_queue_data_in = isert_put_datain, 2640 .iscsit_queue_status = isert_put_response, 2641 .iscsit_aborted_task = isert_aborted_task, 2642 .iscsit_get_rx_pdu = isert_get_rx_pdu, 2643 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2644 }; 2645 2646 static int __init isert_init(void) 2647 { 2648 int ret; 2649 2650 isert_comp_wq = alloc_workqueue("isert_comp_wq", 2651 WQ_UNBOUND | WQ_HIGHPRI, 0); 2652 if (!isert_comp_wq) { 2653 isert_err("Unable to allocate isert_comp_wq\n"); 2654 ret = -ENOMEM; 2655 return -ENOMEM; 2656 } 2657 2658 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 2659 WQ_UNBOUND_MAX_ACTIVE); 2660 if (!isert_release_wq) { 2661 isert_err("Unable to allocate isert_release_wq\n"); 2662 ret = -ENOMEM; 2663 goto destroy_comp_wq; 2664 } 2665 2666 iscsit_register_transport(&iser_target_transport); 2667 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2668 2669 return 0; 2670 2671 destroy_comp_wq: 2672 destroy_workqueue(isert_comp_wq); 2673 2674 return ret; 2675 } 2676 2677 static void __exit isert_exit(void) 2678 { 2679 flush_scheduled_work(); 2680 destroy_workqueue(isert_release_wq); 2681 destroy_workqueue(isert_comp_wq); 2682 iscsit_unregister_transport(&iser_target_transport); 2683 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 2684 } 2685 2686 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2687 MODULE_VERSION("1.0"); 2688 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2689 MODULE_LICENSE("GPL"); 2690 2691 module_init(isert_init); 2692 module_exit(isert_exit); 2693