1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * This file contains iSCSI extentions for RDMA (iSER) Verbs 4 * 5 * (c) Copyright 2013 Datera, Inc. 6 * 7 * Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * 9 ****************************************************************************/ 10 11 #include <linux/string.h> 12 #include <linux/module.h> 13 #include <linux/scatterlist.h> 14 #include <linux/socket.h> 15 #include <linux/in.h> 16 #include <linux/in6.h> 17 #include <rdma/ib_verbs.h> 18 #include <rdma/ib_cm.h> 19 #include <rdma/rdma_cm.h> 20 #include <target/target_core_base.h> 21 #include <target/target_core_fabric.h> 22 #include <target/iscsi/iscsi_transport.h> 23 #include <linux/semaphore.h> 24 25 #include "ib_isert.h" 26 27 #define ISERT_MAX_CONN 8 28 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 29 #define ISER_MAX_TX_CQ_LEN \ 30 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN) 31 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 32 ISERT_MAX_CONN) 33 34 static int isert_debug_level; 35 module_param_named(debug_level, isert_debug_level, int, 0644); 36 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 37 38 static DEFINE_MUTEX(device_list_mutex); 39 static LIST_HEAD(device_list); 40 static struct workqueue_struct *isert_comp_wq; 41 static struct workqueue_struct *isert_release_wq; 42 43 static int 44 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 45 static int 46 isert_login_post_recv(struct isert_conn *isert_conn); 47 static int 48 isert_rdma_accept(struct isert_conn *isert_conn); 49 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 50 51 static void isert_release_work(struct work_struct *work); 52 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 53 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 54 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 55 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 56 57 static inline bool 58 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 59 { 60 return (conn->pi_support && 61 cmd->prot_op != TARGET_PROT_NORMAL); 62 } 63 64 65 static void 66 isert_qp_event_callback(struct ib_event *e, void *context) 67 { 68 struct isert_conn *isert_conn = context; 69 70 isert_err("%s (%d): conn %p\n", 71 ib_event_msg(e->event), e->event, isert_conn); 72 73 switch (e->event) { 74 case IB_EVENT_COMM_EST: 75 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 76 break; 77 case IB_EVENT_QP_LAST_WQE_REACHED: 78 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 79 break; 80 default: 81 break; 82 } 83 } 84 85 static struct isert_comp * 86 isert_comp_get(struct isert_conn *isert_conn) 87 { 88 struct isert_device *device = isert_conn->device; 89 struct isert_comp *comp; 90 int i, min = 0; 91 92 mutex_lock(&device_list_mutex); 93 for (i = 0; i < device->comps_used; i++) 94 if (device->comps[i].active_qps < 95 device->comps[min].active_qps) 96 min = i; 97 comp = &device->comps[min]; 98 comp->active_qps++; 99 mutex_unlock(&device_list_mutex); 100 101 isert_info("conn %p, using comp %p min_index: %d\n", 102 isert_conn, comp, min); 103 104 return comp; 105 } 106 107 static void 108 isert_comp_put(struct isert_comp *comp) 109 { 110 mutex_lock(&device_list_mutex); 111 comp->active_qps--; 112 mutex_unlock(&device_list_mutex); 113 } 114 115 static struct ib_qp * 116 isert_create_qp(struct isert_conn *isert_conn, 117 struct isert_comp *comp, 118 struct rdma_cm_id *cma_id) 119 { 120 struct isert_device *device = isert_conn->device; 121 struct ib_qp_init_attr attr; 122 int ret; 123 124 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 125 attr.event_handler = isert_qp_event_callback; 126 attr.qp_context = isert_conn; 127 attr.send_cq = comp->cq; 128 attr.recv_cq = comp->cq; 129 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 130 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 131 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX; 132 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge; 133 attr.cap.max_recv_sge = 1; 134 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 135 attr.qp_type = IB_QPT_RC; 136 if (device->pi_capable) 137 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 138 139 ret = rdma_create_qp(cma_id, device->pd, &attr); 140 if (ret) { 141 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 142 return ERR_PTR(ret); 143 } 144 145 return cma_id->qp; 146 } 147 148 static int 149 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 150 { 151 struct isert_comp *comp; 152 int ret; 153 154 comp = isert_comp_get(isert_conn); 155 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); 156 if (IS_ERR(isert_conn->qp)) { 157 ret = PTR_ERR(isert_conn->qp); 158 goto err; 159 } 160 161 return 0; 162 err: 163 isert_comp_put(comp); 164 return ret; 165 } 166 167 static int 168 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 169 { 170 struct isert_device *device = isert_conn->device; 171 struct ib_device *ib_dev = device->ib_device; 172 struct iser_rx_desc *rx_desc; 173 struct ib_sge *rx_sg; 174 u64 dma_addr; 175 int i, j; 176 177 isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS, 178 sizeof(struct iser_rx_desc), 179 GFP_KERNEL); 180 if (!isert_conn->rx_descs) 181 return -ENOMEM; 182 183 rx_desc = isert_conn->rx_descs; 184 185 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 186 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 187 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 188 if (ib_dma_mapping_error(ib_dev, dma_addr)) 189 goto dma_map_fail; 190 191 rx_desc->dma_addr = dma_addr; 192 193 rx_sg = &rx_desc->rx_sg; 194 rx_sg->addr = rx_desc->dma_addr; 195 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 196 rx_sg->lkey = device->pd->local_dma_lkey; 197 rx_desc->rx_cqe.done = isert_recv_done; 198 } 199 200 return 0; 201 202 dma_map_fail: 203 rx_desc = isert_conn->rx_descs; 204 for (j = 0; j < i; j++, rx_desc++) { 205 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 206 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 207 } 208 kfree(isert_conn->rx_descs); 209 isert_conn->rx_descs = NULL; 210 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 211 return -ENOMEM; 212 } 213 214 static void 215 isert_free_rx_descriptors(struct isert_conn *isert_conn) 216 { 217 struct ib_device *ib_dev = isert_conn->device->ib_device; 218 struct iser_rx_desc *rx_desc; 219 int i; 220 221 if (!isert_conn->rx_descs) 222 return; 223 224 rx_desc = isert_conn->rx_descs; 225 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 226 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 227 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 228 } 229 230 kfree(isert_conn->rx_descs); 231 isert_conn->rx_descs = NULL; 232 } 233 234 static void 235 isert_free_comps(struct isert_device *device) 236 { 237 int i; 238 239 for (i = 0; i < device->comps_used; i++) { 240 struct isert_comp *comp = &device->comps[i]; 241 242 if (comp->cq) 243 ib_free_cq(comp->cq); 244 } 245 kfree(device->comps); 246 } 247 248 static int 249 isert_alloc_comps(struct isert_device *device) 250 { 251 int i, max_cqe, ret = 0; 252 253 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), 254 device->ib_device->num_comp_vectors)); 255 256 isert_info("Using %d CQs, %s supports %d vectors support " 257 "pi_capable %d\n", 258 device->comps_used, dev_name(&device->ib_device->dev), 259 device->ib_device->num_comp_vectors, 260 device->pi_capable); 261 262 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), 263 GFP_KERNEL); 264 if (!device->comps) 265 return -ENOMEM; 266 267 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); 268 269 for (i = 0; i < device->comps_used; i++) { 270 struct isert_comp *comp = &device->comps[i]; 271 272 comp->device = device; 273 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, 274 IB_POLL_WORKQUEUE); 275 if (IS_ERR(comp->cq)) { 276 isert_err("Unable to allocate cq\n"); 277 ret = PTR_ERR(comp->cq); 278 comp->cq = NULL; 279 goto out_cq; 280 } 281 } 282 283 return 0; 284 out_cq: 285 isert_free_comps(device); 286 return ret; 287 } 288 289 static int 290 isert_create_device_ib_res(struct isert_device *device) 291 { 292 struct ib_device *ib_dev = device->ib_device; 293 int ret; 294 295 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n", 296 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); 297 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 298 299 ret = isert_alloc_comps(device); 300 if (ret) 301 goto out; 302 303 device->pd = ib_alloc_pd(ib_dev, 0); 304 if (IS_ERR(device->pd)) { 305 ret = PTR_ERR(device->pd); 306 isert_err("failed to allocate pd, device %p, ret=%d\n", 307 device, ret); 308 goto out_cq; 309 } 310 311 /* Check signature cap */ 312 device->pi_capable = ib_dev->attrs.device_cap_flags & 313 IB_DEVICE_INTEGRITY_HANDOVER ? true : false; 314 315 return 0; 316 317 out_cq: 318 isert_free_comps(device); 319 out: 320 if (ret > 0) 321 ret = -EINVAL; 322 return ret; 323 } 324 325 static void 326 isert_free_device_ib_res(struct isert_device *device) 327 { 328 isert_info("device %p\n", device); 329 330 ib_dealloc_pd(device->pd); 331 isert_free_comps(device); 332 } 333 334 static void 335 isert_device_put(struct isert_device *device) 336 { 337 mutex_lock(&device_list_mutex); 338 device->refcount--; 339 isert_info("device %p refcount %d\n", device, device->refcount); 340 if (!device->refcount) { 341 isert_free_device_ib_res(device); 342 list_del(&device->dev_node); 343 kfree(device); 344 } 345 mutex_unlock(&device_list_mutex); 346 } 347 348 static struct isert_device * 349 isert_device_get(struct rdma_cm_id *cma_id) 350 { 351 struct isert_device *device; 352 int ret; 353 354 mutex_lock(&device_list_mutex); 355 list_for_each_entry(device, &device_list, dev_node) { 356 if (device->ib_device->node_guid == cma_id->device->node_guid) { 357 device->refcount++; 358 isert_info("Found iser device %p refcount %d\n", 359 device, device->refcount); 360 mutex_unlock(&device_list_mutex); 361 return device; 362 } 363 } 364 365 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 366 if (!device) { 367 mutex_unlock(&device_list_mutex); 368 return ERR_PTR(-ENOMEM); 369 } 370 371 INIT_LIST_HEAD(&device->dev_node); 372 373 device->ib_device = cma_id->device; 374 ret = isert_create_device_ib_res(device); 375 if (ret) { 376 kfree(device); 377 mutex_unlock(&device_list_mutex); 378 return ERR_PTR(ret); 379 } 380 381 device->refcount++; 382 list_add_tail(&device->dev_node, &device_list); 383 isert_info("Created a new iser device %p refcount %d\n", 384 device, device->refcount); 385 mutex_unlock(&device_list_mutex); 386 387 return device; 388 } 389 390 static void 391 isert_init_conn(struct isert_conn *isert_conn) 392 { 393 isert_conn->state = ISER_CONN_INIT; 394 INIT_LIST_HEAD(&isert_conn->node); 395 init_completion(&isert_conn->login_comp); 396 init_completion(&isert_conn->login_req_comp); 397 init_waitqueue_head(&isert_conn->rem_wait); 398 kref_init(&isert_conn->kref); 399 mutex_init(&isert_conn->mutex); 400 INIT_WORK(&isert_conn->release_work, isert_release_work); 401 } 402 403 static void 404 isert_free_login_buf(struct isert_conn *isert_conn) 405 { 406 struct ib_device *ib_dev = isert_conn->device->ib_device; 407 408 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 409 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 410 kfree(isert_conn->login_rsp_buf); 411 412 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 413 ISER_RX_PAYLOAD_SIZE, 414 DMA_FROM_DEVICE); 415 kfree(isert_conn->login_req_buf); 416 } 417 418 static int 419 isert_alloc_login_buf(struct isert_conn *isert_conn, 420 struct ib_device *ib_dev) 421 { 422 int ret; 423 424 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), 425 GFP_KERNEL); 426 if (!isert_conn->login_req_buf) 427 return -ENOMEM; 428 429 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 430 isert_conn->login_req_buf, 431 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 432 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 433 if (ret) { 434 isert_err("login_req_dma mapping error: %d\n", ret); 435 isert_conn->login_req_dma = 0; 436 goto out_free_login_req_buf; 437 } 438 439 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 440 if (!isert_conn->login_rsp_buf) { 441 ret = -ENOMEM; 442 goto out_unmap_login_req_buf; 443 } 444 445 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 446 isert_conn->login_rsp_buf, 447 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 448 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 449 if (ret) { 450 isert_err("login_rsp_dma mapping error: %d\n", ret); 451 isert_conn->login_rsp_dma = 0; 452 goto out_free_login_rsp_buf; 453 } 454 455 return 0; 456 457 out_free_login_rsp_buf: 458 kfree(isert_conn->login_rsp_buf); 459 out_unmap_login_req_buf: 460 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 461 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 462 out_free_login_req_buf: 463 kfree(isert_conn->login_req_buf); 464 return ret; 465 } 466 467 static void 468 isert_set_nego_params(struct isert_conn *isert_conn, 469 struct rdma_conn_param *param) 470 { 471 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 472 473 /* Set max inflight RDMA READ requests */ 474 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 475 attr->max_qp_init_rd_atom); 476 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 477 478 if (param->private_data) { 479 u8 flags = *(u8 *)param->private_data; 480 481 /* 482 * use remote invalidation if the both initiator 483 * and the HCA support it 484 */ 485 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 486 (attr->device_cap_flags & 487 IB_DEVICE_MEM_MGT_EXTENSIONS); 488 if (isert_conn->snd_w_inv) 489 isert_info("Using remote invalidation\n"); 490 } 491 } 492 493 static int 494 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 495 { 496 struct isert_np *isert_np = cma_id->context; 497 struct iscsi_np *np = isert_np->np; 498 struct isert_conn *isert_conn; 499 struct isert_device *device; 500 int ret = 0; 501 502 spin_lock_bh(&np->np_thread_lock); 503 if (!np->enabled) { 504 spin_unlock_bh(&np->np_thread_lock); 505 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 506 return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 507 } 508 spin_unlock_bh(&np->np_thread_lock); 509 510 isert_dbg("cma_id: %p, portal: %p\n", 511 cma_id, cma_id->context); 512 513 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 514 if (!isert_conn) 515 return -ENOMEM; 516 517 isert_init_conn(isert_conn); 518 isert_conn->cm_id = cma_id; 519 520 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 521 if (ret) 522 goto out; 523 524 device = isert_device_get(cma_id); 525 if (IS_ERR(device)) { 526 ret = PTR_ERR(device); 527 goto out_rsp_dma_map; 528 } 529 isert_conn->device = device; 530 531 isert_set_nego_params(isert_conn, &event->param.conn); 532 533 ret = isert_conn_setup_qp(isert_conn, cma_id); 534 if (ret) 535 goto out_conn_dev; 536 537 ret = isert_login_post_recv(isert_conn); 538 if (ret) 539 goto out_conn_dev; 540 541 ret = isert_rdma_accept(isert_conn); 542 if (ret) 543 goto out_conn_dev; 544 545 mutex_lock(&isert_np->mutex); 546 list_add_tail(&isert_conn->node, &isert_np->accepted); 547 mutex_unlock(&isert_np->mutex); 548 549 return 0; 550 551 out_conn_dev: 552 isert_device_put(device); 553 out_rsp_dma_map: 554 isert_free_login_buf(isert_conn); 555 out: 556 kfree(isert_conn); 557 rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 558 return ret; 559 } 560 561 static void 562 isert_connect_release(struct isert_conn *isert_conn) 563 { 564 struct isert_device *device = isert_conn->device; 565 566 isert_dbg("conn %p\n", isert_conn); 567 568 BUG_ON(!device); 569 570 isert_free_rx_descriptors(isert_conn); 571 if (isert_conn->cm_id && 572 !isert_conn->dev_removed) 573 rdma_destroy_id(isert_conn->cm_id); 574 575 if (isert_conn->qp) { 576 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; 577 578 isert_comp_put(comp); 579 ib_destroy_qp(isert_conn->qp); 580 } 581 582 if (isert_conn->login_req_buf) 583 isert_free_login_buf(isert_conn); 584 585 isert_device_put(device); 586 587 if (isert_conn->dev_removed) 588 wake_up_interruptible(&isert_conn->rem_wait); 589 else 590 kfree(isert_conn); 591 } 592 593 static void 594 isert_connected_handler(struct rdma_cm_id *cma_id) 595 { 596 struct isert_conn *isert_conn = cma_id->qp->qp_context; 597 struct isert_np *isert_np = cma_id->context; 598 599 isert_info("conn %p\n", isert_conn); 600 601 mutex_lock(&isert_conn->mutex); 602 isert_conn->state = ISER_CONN_UP; 603 kref_get(&isert_conn->kref); 604 mutex_unlock(&isert_conn->mutex); 605 606 mutex_lock(&isert_np->mutex); 607 list_move_tail(&isert_conn->node, &isert_np->pending); 608 mutex_unlock(&isert_np->mutex); 609 610 isert_info("np %p: Allow accept_np to continue\n", isert_np); 611 up(&isert_np->sem); 612 } 613 614 static void 615 isert_release_kref(struct kref *kref) 616 { 617 struct isert_conn *isert_conn = container_of(kref, 618 struct isert_conn, kref); 619 620 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 621 current->pid); 622 623 isert_connect_release(isert_conn); 624 } 625 626 static void 627 isert_put_conn(struct isert_conn *isert_conn) 628 { 629 kref_put(&isert_conn->kref, isert_release_kref); 630 } 631 632 static void 633 isert_handle_unbound_conn(struct isert_conn *isert_conn) 634 { 635 struct isert_np *isert_np = isert_conn->cm_id->context; 636 637 mutex_lock(&isert_np->mutex); 638 if (!list_empty(&isert_conn->node)) { 639 /* 640 * This means iscsi doesn't know this connection 641 * so schedule a cleanup ourselves 642 */ 643 list_del_init(&isert_conn->node); 644 isert_put_conn(isert_conn); 645 queue_work(isert_release_wq, &isert_conn->release_work); 646 } 647 mutex_unlock(&isert_np->mutex); 648 } 649 650 /** 651 * isert_conn_terminate() - Initiate connection termination 652 * @isert_conn: isert connection struct 653 * 654 * Notes: 655 * In case the connection state is BOUND, move state 656 * to TEMINATING and start teardown sequence (rdma_disconnect). 657 * In case the connection state is UP, complete flush as well. 658 * 659 * This routine must be called with mutex held. Thus it is 660 * safe to call multiple times. 661 */ 662 static void 663 isert_conn_terminate(struct isert_conn *isert_conn) 664 { 665 int err; 666 667 if (isert_conn->state >= ISER_CONN_TERMINATING) 668 return; 669 670 isert_info("Terminating conn %p state %d\n", 671 isert_conn, isert_conn->state); 672 isert_conn->state = ISER_CONN_TERMINATING; 673 err = rdma_disconnect(isert_conn->cm_id); 674 if (err) 675 isert_warn("Failed rdma_disconnect isert_conn %p\n", 676 isert_conn); 677 } 678 679 static int 680 isert_np_cma_handler(struct isert_np *isert_np, 681 enum rdma_cm_event_type event) 682 { 683 isert_dbg("%s (%d): isert np %p\n", 684 rdma_event_msg(event), event, isert_np); 685 686 switch (event) { 687 case RDMA_CM_EVENT_DEVICE_REMOVAL: 688 isert_np->cm_id = NULL; 689 break; 690 case RDMA_CM_EVENT_ADDR_CHANGE: 691 isert_np->cm_id = isert_setup_id(isert_np); 692 if (IS_ERR(isert_np->cm_id)) { 693 isert_err("isert np %p setup id failed: %ld\n", 694 isert_np, PTR_ERR(isert_np->cm_id)); 695 isert_np->cm_id = NULL; 696 } 697 break; 698 default: 699 isert_err("isert np %p Unexpected event %d\n", 700 isert_np, event); 701 } 702 703 return -1; 704 } 705 706 static int 707 isert_disconnected_handler(struct rdma_cm_id *cma_id, 708 enum rdma_cm_event_type event) 709 { 710 struct isert_conn *isert_conn = cma_id->qp->qp_context; 711 712 mutex_lock(&isert_conn->mutex); 713 switch (isert_conn->state) { 714 case ISER_CONN_TERMINATING: 715 break; 716 case ISER_CONN_UP: 717 isert_conn_terminate(isert_conn); 718 ib_drain_qp(isert_conn->qp); 719 isert_handle_unbound_conn(isert_conn); 720 break; 721 case ISER_CONN_BOUND: 722 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 723 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 724 break; 725 default: 726 isert_warn("conn %p terminating in state %d\n", 727 isert_conn, isert_conn->state); 728 } 729 mutex_unlock(&isert_conn->mutex); 730 731 return 0; 732 } 733 734 static int 735 isert_connect_error(struct rdma_cm_id *cma_id) 736 { 737 struct isert_conn *isert_conn = cma_id->qp->qp_context; 738 739 ib_drain_qp(isert_conn->qp); 740 list_del_init(&isert_conn->node); 741 isert_conn->cm_id = NULL; 742 isert_put_conn(isert_conn); 743 744 return -1; 745 } 746 747 static int 748 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 749 { 750 struct isert_np *isert_np = cma_id->context; 751 struct isert_conn *isert_conn; 752 int ret = 0; 753 754 isert_info("%s (%d): status %d id %p np %p\n", 755 rdma_event_msg(event->event), event->event, 756 event->status, cma_id, cma_id->context); 757 758 if (isert_np->cm_id == cma_id) 759 return isert_np_cma_handler(cma_id->context, event->event); 760 761 switch (event->event) { 762 case RDMA_CM_EVENT_CONNECT_REQUEST: 763 ret = isert_connect_request(cma_id, event); 764 if (ret) 765 isert_err("failed handle connect request %d\n", ret); 766 break; 767 case RDMA_CM_EVENT_ESTABLISHED: 768 isert_connected_handler(cma_id); 769 break; 770 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 771 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 772 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 773 ret = isert_disconnected_handler(cma_id, event->event); 774 break; 775 case RDMA_CM_EVENT_DEVICE_REMOVAL: 776 isert_conn = cma_id->qp->qp_context; 777 isert_conn->dev_removed = true; 778 isert_disconnected_handler(cma_id, event->event); 779 wait_event_interruptible(isert_conn->rem_wait, 780 isert_conn->state == ISER_CONN_DOWN); 781 kfree(isert_conn); 782 /* 783 * return non-zero from the callback to destroy 784 * the rdma cm id 785 */ 786 return 1; 787 case RDMA_CM_EVENT_REJECTED: 788 isert_info("Connection rejected: %s\n", 789 rdma_reject_msg(cma_id, event->status)); 790 /* fall through */ 791 case RDMA_CM_EVENT_UNREACHABLE: 792 case RDMA_CM_EVENT_CONNECT_ERROR: 793 ret = isert_connect_error(cma_id); 794 break; 795 default: 796 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 797 break; 798 } 799 800 return ret; 801 } 802 803 static int 804 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 805 { 806 struct ib_recv_wr *rx_wr; 807 int i, ret; 808 struct iser_rx_desc *rx_desc; 809 810 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 811 rx_desc = &isert_conn->rx_descs[i]; 812 813 rx_wr->wr_cqe = &rx_desc->rx_cqe; 814 rx_wr->sg_list = &rx_desc->rx_sg; 815 rx_wr->num_sge = 1; 816 rx_wr->next = rx_wr + 1; 817 rx_desc->in_use = false; 818 } 819 rx_wr--; 820 rx_wr->next = NULL; /* mark end of work requests list */ 821 822 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL); 823 if (ret) 824 isert_err("ib_post_recv() failed with ret: %d\n", ret); 825 826 return ret; 827 } 828 829 static int 830 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 831 { 832 struct ib_recv_wr rx_wr; 833 int ret; 834 835 if (!rx_desc->in_use) { 836 /* 837 * if the descriptor is not in-use we already reposted it 838 * for recv, so just silently return 839 */ 840 return 0; 841 } 842 843 rx_desc->in_use = false; 844 rx_wr.wr_cqe = &rx_desc->rx_cqe; 845 rx_wr.sg_list = &rx_desc->rx_sg; 846 rx_wr.num_sge = 1; 847 rx_wr.next = NULL; 848 849 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 850 if (ret) 851 isert_err("ib_post_recv() failed with ret: %d\n", ret); 852 853 return ret; 854 } 855 856 static int 857 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 858 { 859 struct ib_device *ib_dev = isert_conn->cm_id->device; 860 struct ib_send_wr send_wr; 861 int ret; 862 863 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 864 ISER_HEADERS_LEN, DMA_TO_DEVICE); 865 866 tx_desc->tx_cqe.done = isert_login_send_done; 867 868 send_wr.next = NULL; 869 send_wr.wr_cqe = &tx_desc->tx_cqe; 870 send_wr.sg_list = tx_desc->tx_sg; 871 send_wr.num_sge = tx_desc->num_sge; 872 send_wr.opcode = IB_WR_SEND; 873 send_wr.send_flags = IB_SEND_SIGNALED; 874 875 ret = ib_post_send(isert_conn->qp, &send_wr, NULL); 876 if (ret) 877 isert_err("ib_post_send() failed, ret: %d\n", ret); 878 879 return ret; 880 } 881 882 static void 883 __isert_create_send_desc(struct isert_device *device, 884 struct iser_tx_desc *tx_desc) 885 { 886 887 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 888 tx_desc->iser_header.flags = ISCSI_CTRL; 889 890 tx_desc->num_sge = 1; 891 892 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 893 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 894 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 895 } 896 } 897 898 static void 899 isert_create_send_desc(struct isert_conn *isert_conn, 900 struct isert_cmd *isert_cmd, 901 struct iser_tx_desc *tx_desc) 902 { 903 struct isert_device *device = isert_conn->device; 904 struct ib_device *ib_dev = device->ib_device; 905 906 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 907 ISER_HEADERS_LEN, DMA_TO_DEVICE); 908 909 __isert_create_send_desc(device, tx_desc); 910 } 911 912 static int 913 isert_init_tx_hdrs(struct isert_conn *isert_conn, 914 struct iser_tx_desc *tx_desc) 915 { 916 struct isert_device *device = isert_conn->device; 917 struct ib_device *ib_dev = device->ib_device; 918 u64 dma_addr; 919 920 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 921 ISER_HEADERS_LEN, DMA_TO_DEVICE); 922 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 923 isert_err("ib_dma_mapping_error() failed\n"); 924 return -ENOMEM; 925 } 926 927 tx_desc->dma_addr = dma_addr; 928 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 929 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 930 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 931 932 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 933 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 934 tx_desc->tx_sg[0].lkey); 935 936 return 0; 937 } 938 939 static void 940 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 941 struct ib_send_wr *send_wr) 942 { 943 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 944 945 tx_desc->tx_cqe.done = isert_send_done; 946 send_wr->wr_cqe = &tx_desc->tx_cqe; 947 948 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 949 send_wr->opcode = IB_WR_SEND_WITH_INV; 950 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 951 } else { 952 send_wr->opcode = IB_WR_SEND; 953 } 954 955 send_wr->sg_list = &tx_desc->tx_sg[0]; 956 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 957 send_wr->send_flags = IB_SEND_SIGNALED; 958 } 959 960 static int 961 isert_login_post_recv(struct isert_conn *isert_conn) 962 { 963 struct ib_recv_wr rx_wr; 964 struct ib_sge sge; 965 int ret; 966 967 memset(&sge, 0, sizeof(struct ib_sge)); 968 sge.addr = isert_conn->login_req_dma; 969 sge.length = ISER_RX_PAYLOAD_SIZE; 970 sge.lkey = isert_conn->device->pd->local_dma_lkey; 971 972 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 973 sge.addr, sge.length, sge.lkey); 974 975 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; 976 977 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 978 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; 979 rx_wr.sg_list = &sge; 980 rx_wr.num_sge = 1; 981 982 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 983 if (ret) 984 isert_err("ib_post_recv() failed: %d\n", ret); 985 986 return ret; 987 } 988 989 static int 990 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 991 u32 length) 992 { 993 struct isert_conn *isert_conn = conn->context; 994 struct isert_device *device = isert_conn->device; 995 struct ib_device *ib_dev = device->ib_device; 996 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 997 int ret; 998 999 __isert_create_send_desc(device, tx_desc); 1000 1001 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 1002 sizeof(struct iscsi_hdr)); 1003 1004 isert_init_tx_hdrs(isert_conn, tx_desc); 1005 1006 if (length > 0) { 1007 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 1008 1009 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 1010 length, DMA_TO_DEVICE); 1011 1012 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 1013 1014 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 1015 length, DMA_TO_DEVICE); 1016 1017 tx_dsg->addr = isert_conn->login_rsp_dma; 1018 tx_dsg->length = length; 1019 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 1020 tx_desc->num_sge = 2; 1021 } 1022 if (!login->login_failed) { 1023 if (login->login_complete) { 1024 ret = isert_alloc_rx_descriptors(isert_conn); 1025 if (ret) 1026 return ret; 1027 1028 ret = isert_post_recvm(isert_conn, 1029 ISERT_QP_MAX_RECV_DTOS); 1030 if (ret) 1031 return ret; 1032 1033 /* Now we are in FULL_FEATURE phase */ 1034 mutex_lock(&isert_conn->mutex); 1035 isert_conn->state = ISER_CONN_FULL_FEATURE; 1036 mutex_unlock(&isert_conn->mutex); 1037 goto post_send; 1038 } 1039 1040 ret = isert_login_post_recv(isert_conn); 1041 if (ret) 1042 return ret; 1043 } 1044 post_send: 1045 ret = isert_login_post_send(isert_conn, tx_desc); 1046 if (ret) 1047 return ret; 1048 1049 return 0; 1050 } 1051 1052 static void 1053 isert_rx_login_req(struct isert_conn *isert_conn) 1054 { 1055 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; 1056 int rx_buflen = isert_conn->login_req_len; 1057 struct iscsi_conn *conn = isert_conn->conn; 1058 struct iscsi_login *login = conn->conn_login; 1059 int size; 1060 1061 isert_info("conn %p\n", isert_conn); 1062 1063 WARN_ON_ONCE(!login); 1064 1065 if (login->first_request) { 1066 struct iscsi_login_req *login_req = 1067 (struct iscsi_login_req *)&rx_desc->iscsi_header; 1068 /* 1069 * Setup the initial iscsi_login values from the leading 1070 * login request PDU. 1071 */ 1072 login->leading_connection = (!login_req->tsih) ? 1 : 0; 1073 login->current_stage = 1074 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 1075 >> 2; 1076 login->version_min = login_req->min_version; 1077 login->version_max = login_req->max_version; 1078 memcpy(login->isid, login_req->isid, 6); 1079 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1080 login->init_task_tag = login_req->itt; 1081 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1082 login->cid = be16_to_cpu(login_req->cid); 1083 login->tsih = be16_to_cpu(login_req->tsih); 1084 } 1085 1086 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1087 1088 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1089 isert_dbg("Using login payload size: %d, rx_buflen: %d " 1090 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 1091 MAX_KEY_VALUE_PAIRS); 1092 memcpy(login->req_buf, &rx_desc->data[0], size); 1093 1094 if (login->first_request) { 1095 complete(&isert_conn->login_comp); 1096 return; 1097 } 1098 schedule_delayed_work(&conn->login_work, 0); 1099 } 1100 1101 static struct iscsi_cmd 1102 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1103 { 1104 struct isert_conn *isert_conn = conn->context; 1105 struct isert_cmd *isert_cmd; 1106 struct iscsi_cmd *cmd; 1107 1108 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1109 if (!cmd) { 1110 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1111 return NULL; 1112 } 1113 isert_cmd = iscsit_priv_cmd(cmd); 1114 isert_cmd->conn = isert_conn; 1115 isert_cmd->iscsi_cmd = cmd; 1116 isert_cmd->rx_desc = rx_desc; 1117 1118 return cmd; 1119 } 1120 1121 static int 1122 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1123 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1124 struct iser_rx_desc *rx_desc, unsigned char *buf) 1125 { 1126 struct iscsi_conn *conn = isert_conn->conn; 1127 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1128 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1129 bool dump_payload = false; 1130 unsigned int data_len; 1131 1132 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1133 if (rc < 0) 1134 return rc; 1135 1136 imm_data = cmd->immediate_data; 1137 imm_data_len = cmd->first_burst_len; 1138 unsol_data = cmd->unsolicited_data; 1139 data_len = cmd->se_cmd.data_length; 1140 1141 if (imm_data && imm_data_len == data_len) 1142 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1143 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1144 if (rc < 0) { 1145 return 0; 1146 } else if (rc > 0) { 1147 dump_payload = true; 1148 goto sequence_cmd; 1149 } 1150 1151 if (!imm_data) 1152 return 0; 1153 1154 if (imm_data_len != data_len) { 1155 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1156 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1157 &rx_desc->data[0], imm_data_len); 1158 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1159 sg_nents, imm_data_len); 1160 } else { 1161 sg_init_table(&isert_cmd->sg, 1); 1162 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1163 cmd->se_cmd.t_data_nents = 1; 1164 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); 1165 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1166 imm_data_len); 1167 } 1168 1169 cmd->write_data_done += imm_data_len; 1170 1171 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1172 spin_lock_bh(&cmd->istate_lock); 1173 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1174 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1175 spin_unlock_bh(&cmd->istate_lock); 1176 } 1177 1178 sequence_cmd: 1179 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1180 1181 if (!rc && dump_payload == false && unsol_data) 1182 iscsit_set_unsolicited_dataout(cmd); 1183 else if (dump_payload && imm_data) 1184 target_put_sess_cmd(&cmd->se_cmd); 1185 1186 return 0; 1187 } 1188 1189 static int 1190 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1191 struct iser_rx_desc *rx_desc, unsigned char *buf) 1192 { 1193 struct scatterlist *sg_start; 1194 struct iscsi_conn *conn = isert_conn->conn; 1195 struct iscsi_cmd *cmd = NULL; 1196 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1197 u32 unsol_data_len = ntoh24(hdr->dlength); 1198 int rc, sg_nents, sg_off, page_off; 1199 1200 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1201 if (rc < 0) 1202 return rc; 1203 else if (!cmd) 1204 return 0; 1205 /* 1206 * FIXME: Unexpected unsolicited_data out 1207 */ 1208 if (!cmd->unsolicited_data) { 1209 isert_err("Received unexpected solicited data payload\n"); 1210 dump_stack(); 1211 return -1; 1212 } 1213 1214 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1215 "write_data_done: %u, data_length: %u\n", 1216 unsol_data_len, cmd->write_data_done, 1217 cmd->se_cmd.data_length); 1218 1219 sg_off = cmd->write_data_done / PAGE_SIZE; 1220 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1221 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1222 page_off = cmd->write_data_done % PAGE_SIZE; 1223 /* 1224 * FIXME: Non page-aligned unsolicited_data out 1225 */ 1226 if (page_off) { 1227 isert_err("unexpected non-page aligned data payload\n"); 1228 dump_stack(); 1229 return -1; 1230 } 1231 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1232 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1233 sg_nents, &rx_desc->data[0], unsol_data_len); 1234 1235 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1236 unsol_data_len); 1237 1238 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1239 if (rc < 0) 1240 return rc; 1241 1242 /* 1243 * multiple data-outs on the same command can arrive - 1244 * so post the buffer before hand 1245 */ 1246 rc = isert_post_recv(isert_conn, rx_desc); 1247 if (rc) { 1248 isert_err("ib_post_recv failed with %d\n", rc); 1249 return rc; 1250 } 1251 return 0; 1252 } 1253 1254 static int 1255 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1256 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1257 unsigned char *buf) 1258 { 1259 struct iscsi_conn *conn = isert_conn->conn; 1260 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1261 int rc; 1262 1263 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1264 if (rc < 0) 1265 return rc; 1266 /* 1267 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1268 */ 1269 1270 return iscsit_process_nop_out(conn, cmd, hdr); 1271 } 1272 1273 static int 1274 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1275 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1276 struct iscsi_text *hdr) 1277 { 1278 struct iscsi_conn *conn = isert_conn->conn; 1279 u32 payload_length = ntoh24(hdr->dlength); 1280 int rc; 1281 unsigned char *text_in = NULL; 1282 1283 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1284 if (rc < 0) 1285 return rc; 1286 1287 if (payload_length) { 1288 text_in = kzalloc(payload_length, GFP_KERNEL); 1289 if (!text_in) 1290 return -ENOMEM; 1291 } 1292 cmd->text_in_ptr = text_in; 1293 1294 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); 1295 1296 return iscsit_process_text_cmd(conn, cmd, hdr); 1297 } 1298 1299 static int 1300 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1301 uint32_t read_stag, uint64_t read_va, 1302 uint32_t write_stag, uint64_t write_va) 1303 { 1304 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1305 struct iscsi_conn *conn = isert_conn->conn; 1306 struct iscsi_cmd *cmd; 1307 struct isert_cmd *isert_cmd; 1308 int ret = -EINVAL; 1309 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1310 1311 if (conn->sess->sess_ops->SessionType && 1312 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1313 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1314 " ignoring\n", opcode); 1315 return 0; 1316 } 1317 1318 switch (opcode) { 1319 case ISCSI_OP_SCSI_CMD: 1320 cmd = isert_allocate_cmd(conn, rx_desc); 1321 if (!cmd) 1322 break; 1323 1324 isert_cmd = iscsit_priv_cmd(cmd); 1325 isert_cmd->read_stag = read_stag; 1326 isert_cmd->read_va = read_va; 1327 isert_cmd->write_stag = write_stag; 1328 isert_cmd->write_va = write_va; 1329 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1330 1331 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1332 rx_desc, (unsigned char *)hdr); 1333 break; 1334 case ISCSI_OP_NOOP_OUT: 1335 cmd = isert_allocate_cmd(conn, rx_desc); 1336 if (!cmd) 1337 break; 1338 1339 isert_cmd = iscsit_priv_cmd(cmd); 1340 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1341 rx_desc, (unsigned char *)hdr); 1342 break; 1343 case ISCSI_OP_SCSI_DATA_OUT: 1344 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1345 (unsigned char *)hdr); 1346 break; 1347 case ISCSI_OP_SCSI_TMFUNC: 1348 cmd = isert_allocate_cmd(conn, rx_desc); 1349 if (!cmd) 1350 break; 1351 1352 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1353 (unsigned char *)hdr); 1354 break; 1355 case ISCSI_OP_LOGOUT: 1356 cmd = isert_allocate_cmd(conn, rx_desc); 1357 if (!cmd) 1358 break; 1359 1360 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1361 break; 1362 case ISCSI_OP_TEXT: 1363 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1364 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1365 else 1366 cmd = isert_allocate_cmd(conn, rx_desc); 1367 1368 if (!cmd) 1369 break; 1370 1371 isert_cmd = iscsit_priv_cmd(cmd); 1372 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1373 rx_desc, (struct iscsi_text *)hdr); 1374 break; 1375 default: 1376 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1377 dump_stack(); 1378 break; 1379 } 1380 1381 return ret; 1382 } 1383 1384 static void 1385 isert_print_wc(struct ib_wc *wc, const char *type) 1386 { 1387 if (wc->status != IB_WC_WR_FLUSH_ERR) 1388 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1389 ib_wc_status_msg(wc->status), wc->status, 1390 wc->vendor_err); 1391 else 1392 isert_dbg("%s failure: %s (%d)\n", type, 1393 ib_wc_status_msg(wc->status), wc->status); 1394 } 1395 1396 static void 1397 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1398 { 1399 struct isert_conn *isert_conn = wc->qp->qp_context; 1400 struct ib_device *ib_dev = isert_conn->cm_id->device; 1401 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1402 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1403 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; 1404 uint64_t read_va = 0, write_va = 0; 1405 uint32_t read_stag = 0, write_stag = 0; 1406 1407 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1408 isert_print_wc(wc, "recv"); 1409 if (wc->status != IB_WC_WR_FLUSH_ERR) 1410 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1411 return; 1412 } 1413 1414 rx_desc->in_use = true; 1415 1416 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1417 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1418 1419 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1420 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1421 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1422 1423 switch (iser_ctrl->flags & 0xF0) { 1424 case ISCSI_CTRL: 1425 if (iser_ctrl->flags & ISER_RSV) { 1426 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1427 read_va = be64_to_cpu(iser_ctrl->read_va); 1428 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1429 read_stag, (unsigned long long)read_va); 1430 } 1431 if (iser_ctrl->flags & ISER_WSV) { 1432 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1433 write_va = be64_to_cpu(iser_ctrl->write_va); 1434 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1435 write_stag, (unsigned long long)write_va); 1436 } 1437 1438 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1439 break; 1440 case ISER_HELLO: 1441 isert_err("iSER Hello message\n"); 1442 break; 1443 default: 1444 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1445 break; 1446 } 1447 1448 isert_rx_opcode(isert_conn, rx_desc, 1449 read_stag, read_va, write_stag, write_va); 1450 1451 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1452 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1453 } 1454 1455 static void 1456 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1457 { 1458 struct isert_conn *isert_conn = wc->qp->qp_context; 1459 struct ib_device *ib_dev = isert_conn->device->ib_device; 1460 1461 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1462 isert_print_wc(wc, "login recv"); 1463 return; 1464 } 1465 1466 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, 1467 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1468 1469 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1470 1471 if (isert_conn->conn) { 1472 struct iscsi_login *login = isert_conn->conn->conn_login; 1473 1474 if (login && !login->first_request) 1475 isert_rx_login_req(isert_conn); 1476 } 1477 1478 mutex_lock(&isert_conn->mutex); 1479 complete(&isert_conn->login_req_comp); 1480 mutex_unlock(&isert_conn->mutex); 1481 1482 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, 1483 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1484 } 1485 1486 static void 1487 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) 1488 { 1489 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 1490 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 1491 1492 if (!cmd->rw.nr_ops) 1493 return; 1494 1495 if (isert_prot_cmd(conn, se_cmd)) { 1496 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, 1497 conn->cm_id->port_num, se_cmd->t_data_sg, 1498 se_cmd->t_data_nents, se_cmd->t_prot_sg, 1499 se_cmd->t_prot_nents, dir); 1500 } else { 1501 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, 1502 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); 1503 } 1504 1505 cmd->rw.nr_ops = 0; 1506 } 1507 1508 static void 1509 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1510 { 1511 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1512 struct isert_conn *isert_conn = isert_cmd->conn; 1513 struct iscsi_conn *conn = isert_conn->conn; 1514 struct iscsi_text_rsp *hdr; 1515 1516 isert_dbg("Cmd %p\n", isert_cmd); 1517 1518 switch (cmd->iscsi_opcode) { 1519 case ISCSI_OP_SCSI_CMD: 1520 spin_lock_bh(&conn->cmd_lock); 1521 if (!list_empty(&cmd->i_conn_node)) 1522 list_del_init(&cmd->i_conn_node); 1523 spin_unlock_bh(&conn->cmd_lock); 1524 1525 if (cmd->data_direction == DMA_TO_DEVICE) { 1526 iscsit_stop_dataout_timer(cmd); 1527 /* 1528 * Check for special case during comp_err where 1529 * WRITE_PENDING has been handed off from core, 1530 * but requires an extra target_put_sess_cmd() 1531 * before transport_generic_free_cmd() below. 1532 */ 1533 if (comp_err && 1534 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1535 struct se_cmd *se_cmd = &cmd->se_cmd; 1536 1537 target_put_sess_cmd(se_cmd); 1538 } 1539 } 1540 1541 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1542 transport_generic_free_cmd(&cmd->se_cmd, 0); 1543 break; 1544 case ISCSI_OP_SCSI_TMFUNC: 1545 spin_lock_bh(&conn->cmd_lock); 1546 if (!list_empty(&cmd->i_conn_node)) 1547 list_del_init(&cmd->i_conn_node); 1548 spin_unlock_bh(&conn->cmd_lock); 1549 1550 transport_generic_free_cmd(&cmd->se_cmd, 0); 1551 break; 1552 case ISCSI_OP_REJECT: 1553 case ISCSI_OP_NOOP_OUT: 1554 case ISCSI_OP_TEXT: 1555 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1556 /* If the continue bit is on, keep the command alive */ 1557 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1558 break; 1559 1560 spin_lock_bh(&conn->cmd_lock); 1561 if (!list_empty(&cmd->i_conn_node)) 1562 list_del_init(&cmd->i_conn_node); 1563 spin_unlock_bh(&conn->cmd_lock); 1564 1565 /* 1566 * Handle special case for REJECT when iscsi_add_reject*() has 1567 * overwritten the original iscsi_opcode assignment, and the 1568 * associated cmd->se_cmd needs to be released. 1569 */ 1570 if (cmd->se_cmd.se_tfo != NULL) { 1571 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1572 cmd->iscsi_opcode); 1573 transport_generic_free_cmd(&cmd->se_cmd, 0); 1574 break; 1575 } 1576 /* fall through */ 1577 default: 1578 iscsit_release_cmd(cmd); 1579 break; 1580 } 1581 } 1582 1583 static void 1584 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1585 { 1586 if (tx_desc->dma_addr != 0) { 1587 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1588 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1589 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1590 tx_desc->dma_addr = 0; 1591 } 1592 } 1593 1594 static void 1595 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1596 struct ib_device *ib_dev, bool comp_err) 1597 { 1598 if (isert_cmd->pdu_buf_dma != 0) { 1599 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1600 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1601 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1602 isert_cmd->pdu_buf_dma = 0; 1603 } 1604 1605 isert_unmap_tx_desc(tx_desc, ib_dev); 1606 isert_put_cmd(isert_cmd, comp_err); 1607 } 1608 1609 static int 1610 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1611 { 1612 struct ib_mr_status mr_status; 1613 int ret; 1614 1615 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1616 if (ret) { 1617 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1618 goto fail_mr_status; 1619 } 1620 1621 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1622 u64 sec_offset_err; 1623 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1624 1625 switch (mr_status.sig_err.err_type) { 1626 case IB_SIG_BAD_GUARD: 1627 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1628 break; 1629 case IB_SIG_BAD_REFTAG: 1630 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1631 break; 1632 case IB_SIG_BAD_APPTAG: 1633 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1634 break; 1635 } 1636 sec_offset_err = mr_status.sig_err.sig_err_offset; 1637 do_div(sec_offset_err, block_size); 1638 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1639 1640 isert_err("PI error found type %d at sector 0x%llx " 1641 "expected 0x%x vs actual 0x%x\n", 1642 mr_status.sig_err.err_type, 1643 (unsigned long long)se_cmd->bad_sector, 1644 mr_status.sig_err.expected, 1645 mr_status.sig_err.actual); 1646 ret = 1; 1647 } 1648 1649 fail_mr_status: 1650 return ret; 1651 } 1652 1653 static void 1654 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1655 { 1656 struct isert_conn *isert_conn = wc->qp->qp_context; 1657 struct isert_device *device = isert_conn->device; 1658 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1659 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1660 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1661 int ret = 0; 1662 1663 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1664 isert_print_wc(wc, "rdma write"); 1665 if (wc->status != IB_WC_WR_FLUSH_ERR) 1666 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1667 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1668 return; 1669 } 1670 1671 isert_dbg("Cmd %p\n", isert_cmd); 1672 1673 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr); 1674 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1675 1676 if (ret) { 1677 /* 1678 * transport_generic_request_failure() expects to have 1679 * plus two references to handle queue-full, so re-add 1680 * one here as target-core will have already dropped 1681 * it after the first isert_put_datain() callback. 1682 */ 1683 kref_get(&cmd->cmd_kref); 1684 transport_generic_request_failure(cmd, cmd->pi_err); 1685 } else { 1686 /* 1687 * XXX: isert_put_response() failure is not retried. 1688 */ 1689 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1690 if (ret) 1691 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); 1692 } 1693 } 1694 1695 static void 1696 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1697 { 1698 struct isert_conn *isert_conn = wc->qp->qp_context; 1699 struct isert_device *device = isert_conn->device; 1700 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1701 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1702 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1703 struct se_cmd *se_cmd = &cmd->se_cmd; 1704 int ret = 0; 1705 1706 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1707 isert_print_wc(wc, "rdma read"); 1708 if (wc->status != IB_WC_WR_FLUSH_ERR) 1709 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1710 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1711 return; 1712 } 1713 1714 isert_dbg("Cmd %p\n", isert_cmd); 1715 1716 iscsit_stop_dataout_timer(cmd); 1717 1718 if (isert_prot_cmd(isert_conn, se_cmd)) 1719 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr); 1720 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1721 cmd->write_data_done = 0; 1722 1723 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1724 spin_lock_bh(&cmd->istate_lock); 1725 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1726 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1727 spin_unlock_bh(&cmd->istate_lock); 1728 1729 /* 1730 * transport_generic_request_failure() will drop the extra 1731 * se_cmd->cmd_kref reference after T10-PI error, and handle 1732 * any non-zero ->queue_status() callback error retries. 1733 */ 1734 if (ret) 1735 transport_generic_request_failure(se_cmd, se_cmd->pi_err); 1736 else 1737 target_execute_cmd(se_cmd); 1738 } 1739 1740 static void 1741 isert_do_control_comp(struct work_struct *work) 1742 { 1743 struct isert_cmd *isert_cmd = container_of(work, 1744 struct isert_cmd, comp_work); 1745 struct isert_conn *isert_conn = isert_cmd->conn; 1746 struct ib_device *ib_dev = isert_conn->cm_id->device; 1747 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1748 1749 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1750 1751 switch (cmd->i_state) { 1752 case ISTATE_SEND_TASKMGTRSP: 1753 iscsit_tmr_post_handler(cmd, cmd->conn); 1754 /* fall through */ 1755 case ISTATE_SEND_REJECT: 1756 case ISTATE_SEND_TEXTRSP: 1757 cmd->i_state = ISTATE_SENT_STATUS; 1758 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1759 ib_dev, false); 1760 break; 1761 case ISTATE_SEND_LOGOUTRSP: 1762 iscsit_logout_post_handler(cmd, cmd->conn); 1763 break; 1764 default: 1765 isert_err("Unknown i_state %d\n", cmd->i_state); 1766 dump_stack(); 1767 break; 1768 } 1769 } 1770 1771 static void 1772 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1773 { 1774 struct isert_conn *isert_conn = wc->qp->qp_context; 1775 struct ib_device *ib_dev = isert_conn->cm_id->device; 1776 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1777 1778 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1779 isert_print_wc(wc, "login send"); 1780 if (wc->status != IB_WC_WR_FLUSH_ERR) 1781 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1782 } 1783 1784 isert_unmap_tx_desc(tx_desc, ib_dev); 1785 } 1786 1787 static void 1788 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 1789 { 1790 struct isert_conn *isert_conn = wc->qp->qp_context; 1791 struct ib_device *ib_dev = isert_conn->cm_id->device; 1792 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1793 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 1794 1795 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1796 isert_print_wc(wc, "send"); 1797 if (wc->status != IB_WC_WR_FLUSH_ERR) 1798 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1799 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 1800 return; 1801 } 1802 1803 isert_dbg("Cmd %p\n", isert_cmd); 1804 1805 switch (isert_cmd->iscsi_cmd->i_state) { 1806 case ISTATE_SEND_TASKMGTRSP: 1807 case ISTATE_SEND_LOGOUTRSP: 1808 case ISTATE_SEND_REJECT: 1809 case ISTATE_SEND_TEXTRSP: 1810 isert_unmap_tx_desc(tx_desc, ib_dev); 1811 1812 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1813 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1814 return; 1815 default: 1816 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 1817 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1818 break; 1819 } 1820 } 1821 1822 static int 1823 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1824 { 1825 int ret; 1826 1827 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 1828 if (ret) { 1829 isert_err("ib_post_recv failed with %d\n", ret); 1830 return ret; 1831 } 1832 1833 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL); 1834 if (ret) { 1835 isert_err("ib_post_send failed with %d\n", ret); 1836 return ret; 1837 } 1838 return ret; 1839 } 1840 1841 static int 1842 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1843 { 1844 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1845 struct isert_conn *isert_conn = conn->context; 1846 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1847 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1848 &isert_cmd->tx_desc.iscsi_header; 1849 1850 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1851 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1852 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1853 /* 1854 * Attach SENSE DATA payload to iSCSI Response PDU 1855 */ 1856 if (cmd->se_cmd.sense_buffer && 1857 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1858 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1859 struct isert_device *device = isert_conn->device; 1860 struct ib_device *ib_dev = device->ib_device; 1861 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1862 u32 padding, pdu_len; 1863 1864 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1865 cmd->sense_buffer); 1866 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1867 1868 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1869 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1870 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 1871 1872 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1873 (void *)cmd->sense_buffer, pdu_len, 1874 DMA_TO_DEVICE); 1875 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1876 return -ENOMEM; 1877 1878 isert_cmd->pdu_buf_len = pdu_len; 1879 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1880 tx_dsg->length = pdu_len; 1881 tx_dsg->lkey = device->pd->local_dma_lkey; 1882 isert_cmd->tx_desc.num_sge = 2; 1883 } 1884 1885 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1886 1887 isert_dbg("Posting SCSI Response\n"); 1888 1889 return isert_post_response(isert_conn, isert_cmd); 1890 } 1891 1892 static void 1893 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1894 { 1895 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1896 struct isert_conn *isert_conn = conn->context; 1897 1898 spin_lock_bh(&conn->cmd_lock); 1899 if (!list_empty(&cmd->i_conn_node)) 1900 list_del_init(&cmd->i_conn_node); 1901 spin_unlock_bh(&conn->cmd_lock); 1902 1903 if (cmd->data_direction == DMA_TO_DEVICE) 1904 iscsit_stop_dataout_timer(cmd); 1905 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1906 } 1907 1908 static enum target_prot_op 1909 isert_get_sup_prot_ops(struct iscsi_conn *conn) 1910 { 1911 struct isert_conn *isert_conn = conn->context; 1912 struct isert_device *device = isert_conn->device; 1913 1914 if (conn->tpg->tpg_attrib.t10_pi) { 1915 if (device->pi_capable) { 1916 isert_info("conn %p PI offload enabled\n", isert_conn); 1917 isert_conn->pi_support = true; 1918 return TARGET_PROT_ALL; 1919 } 1920 } 1921 1922 isert_info("conn %p PI offload disabled\n", isert_conn); 1923 isert_conn->pi_support = false; 1924 1925 return TARGET_PROT_NORMAL; 1926 } 1927 1928 static int 1929 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1930 bool nopout_response) 1931 { 1932 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1933 struct isert_conn *isert_conn = conn->context; 1934 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1935 1936 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1937 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1938 &isert_cmd->tx_desc.iscsi_header, 1939 nopout_response); 1940 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1941 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1942 1943 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 1944 1945 return isert_post_response(isert_conn, isert_cmd); 1946 } 1947 1948 static int 1949 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1950 { 1951 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1952 struct isert_conn *isert_conn = conn->context; 1953 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1954 1955 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1956 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1957 &isert_cmd->tx_desc.iscsi_header); 1958 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1959 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1960 1961 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 1962 1963 return isert_post_response(isert_conn, isert_cmd); 1964 } 1965 1966 static int 1967 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1968 { 1969 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1970 struct isert_conn *isert_conn = conn->context; 1971 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1972 1973 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1974 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1975 &isert_cmd->tx_desc.iscsi_header); 1976 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1977 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1978 1979 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 1980 1981 return isert_post_response(isert_conn, isert_cmd); 1982 } 1983 1984 static int 1985 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1986 { 1987 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1988 struct isert_conn *isert_conn = conn->context; 1989 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1990 struct isert_device *device = isert_conn->device; 1991 struct ib_device *ib_dev = device->ib_device; 1992 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1993 struct iscsi_reject *hdr = 1994 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 1995 1996 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1997 iscsit_build_reject(cmd, conn, hdr); 1998 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1999 2000 hton24(hdr->dlength, ISCSI_HDR_LEN); 2001 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2002 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 2003 DMA_TO_DEVICE); 2004 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 2005 return -ENOMEM; 2006 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 2007 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2008 tx_dsg->length = ISCSI_HDR_LEN; 2009 tx_dsg->lkey = device->pd->local_dma_lkey; 2010 isert_cmd->tx_desc.num_sge = 2; 2011 2012 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2013 2014 isert_dbg("conn %p Posting Reject\n", isert_conn); 2015 2016 return isert_post_response(isert_conn, isert_cmd); 2017 } 2018 2019 static int 2020 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2021 { 2022 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2023 struct isert_conn *isert_conn = conn->context; 2024 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2025 struct iscsi_text_rsp *hdr = 2026 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 2027 u32 txt_rsp_len; 2028 int rc; 2029 2030 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2031 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 2032 if (rc < 0) 2033 return rc; 2034 2035 txt_rsp_len = rc; 2036 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2037 2038 if (txt_rsp_len) { 2039 struct isert_device *device = isert_conn->device; 2040 struct ib_device *ib_dev = device->ib_device; 2041 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2042 void *txt_rsp_buf = cmd->buf_ptr; 2043 2044 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2045 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 2046 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 2047 return -ENOMEM; 2048 2049 isert_cmd->pdu_buf_len = txt_rsp_len; 2050 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2051 tx_dsg->length = txt_rsp_len; 2052 tx_dsg->lkey = device->pd->local_dma_lkey; 2053 isert_cmd->tx_desc.num_sge = 2; 2054 } 2055 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 2056 2057 isert_dbg("conn %p Text Response\n", isert_conn); 2058 2059 return isert_post_response(isert_conn, isert_cmd); 2060 } 2061 2062 static inline void 2063 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain) 2064 { 2065 domain->sig_type = IB_SIG_TYPE_T10_DIF; 2066 domain->sig.dif.bg_type = IB_T10DIF_CRC; 2067 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 2068 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 2069 /* 2070 * At the moment we hard code those, but if in the future 2071 * the target core would like to use it, we will take it 2072 * from se_cmd. 2073 */ 2074 domain->sig.dif.apptag_check_mask = 0xffff; 2075 domain->sig.dif.app_escape = true; 2076 domain->sig.dif.ref_escape = true; 2077 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 2078 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 2079 domain->sig.dif.ref_remap = true; 2080 }; 2081 2082 static int 2083 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2084 { 2085 memset(sig_attrs, 0, sizeof(*sig_attrs)); 2086 2087 switch (se_cmd->prot_op) { 2088 case TARGET_PROT_DIN_INSERT: 2089 case TARGET_PROT_DOUT_STRIP: 2090 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 2091 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 2092 break; 2093 case TARGET_PROT_DOUT_INSERT: 2094 case TARGET_PROT_DIN_STRIP: 2095 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 2096 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 2097 break; 2098 case TARGET_PROT_DIN_PASS: 2099 case TARGET_PROT_DOUT_PASS: 2100 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 2101 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 2102 break; 2103 default: 2104 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2105 return -EINVAL; 2106 } 2107 2108 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD) 2109 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; 2110 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG) 2111 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; 2112 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG) 2113 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; 2114 2115 return 0; 2116 } 2117 2118 static int 2119 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, 2120 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 2121 { 2122 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 2123 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 2124 u8 port_num = conn->cm_id->port_num; 2125 u64 addr; 2126 u32 rkey, offset; 2127 int ret; 2128 2129 if (cmd->ctx_init_done) 2130 goto rdma_ctx_post; 2131 2132 if (dir == DMA_FROM_DEVICE) { 2133 addr = cmd->write_va; 2134 rkey = cmd->write_stag; 2135 offset = cmd->iscsi_cmd->write_data_done; 2136 } else { 2137 addr = cmd->read_va; 2138 rkey = cmd->read_stag; 2139 offset = 0; 2140 } 2141 2142 if (isert_prot_cmd(conn, se_cmd)) { 2143 struct ib_sig_attrs sig_attrs; 2144 2145 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2146 if (ret) 2147 return ret; 2148 2149 WARN_ON_ONCE(offset); 2150 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, 2151 se_cmd->t_data_sg, se_cmd->t_data_nents, 2152 se_cmd->t_prot_sg, se_cmd->t_prot_nents, 2153 &sig_attrs, addr, rkey, dir); 2154 } else { 2155 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, 2156 se_cmd->t_data_sg, se_cmd->t_data_nents, 2157 offset, addr, rkey, dir); 2158 } 2159 2160 if (ret < 0) { 2161 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); 2162 return ret; 2163 } 2164 2165 cmd->ctx_init_done = true; 2166 2167 rdma_ctx_post: 2168 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); 2169 if (ret < 0) 2170 isert_err("Cmd: %p failed to post RDMA res\n", cmd); 2171 return ret; 2172 } 2173 2174 static int 2175 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2176 { 2177 struct se_cmd *se_cmd = &cmd->se_cmd; 2178 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2179 struct isert_conn *isert_conn = conn->context; 2180 struct ib_cqe *cqe = NULL; 2181 struct ib_send_wr *chain_wr = NULL; 2182 int rc; 2183 2184 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2185 isert_cmd, se_cmd->data_length); 2186 2187 if (isert_prot_cmd(isert_conn, se_cmd)) { 2188 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2189 cqe = &isert_cmd->tx_desc.tx_cqe; 2190 } else { 2191 /* 2192 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2193 */ 2194 isert_create_send_desc(isert_conn, isert_cmd, 2195 &isert_cmd->tx_desc); 2196 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2197 &isert_cmd->tx_desc.iscsi_header); 2198 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2199 isert_init_send_wr(isert_conn, isert_cmd, 2200 &isert_cmd->tx_desc.send_wr); 2201 2202 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2203 if (rc) { 2204 isert_err("ib_post_recv failed with %d\n", rc); 2205 return rc; 2206 } 2207 2208 chain_wr = &isert_cmd->tx_desc.send_wr; 2209 } 2210 2211 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2212 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", 2213 isert_cmd, rc); 2214 return rc; 2215 } 2216 2217 static int 2218 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2219 { 2220 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2221 int ret; 2222 2223 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2224 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2225 2226 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2227 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2228 &isert_cmd->tx_desc.tx_cqe, NULL); 2229 2230 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", 2231 isert_cmd, ret); 2232 return ret; 2233 } 2234 2235 static int 2236 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2237 { 2238 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2239 int ret = 0; 2240 2241 switch (state) { 2242 case ISTATE_REMOVE: 2243 spin_lock_bh(&conn->cmd_lock); 2244 list_del_init(&cmd->i_conn_node); 2245 spin_unlock_bh(&conn->cmd_lock); 2246 isert_put_cmd(isert_cmd, true); 2247 break; 2248 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2249 ret = isert_put_nopin(cmd, conn, false); 2250 break; 2251 default: 2252 isert_err("Unknown immediate state: 0x%02x\n", state); 2253 ret = -EINVAL; 2254 break; 2255 } 2256 2257 return ret; 2258 } 2259 2260 static int 2261 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2262 { 2263 struct isert_conn *isert_conn = conn->context; 2264 int ret; 2265 2266 switch (state) { 2267 case ISTATE_SEND_LOGOUTRSP: 2268 ret = isert_put_logout_rsp(cmd, conn); 2269 if (!ret) 2270 isert_conn->logout_posted = true; 2271 break; 2272 case ISTATE_SEND_NOPIN: 2273 ret = isert_put_nopin(cmd, conn, true); 2274 break; 2275 case ISTATE_SEND_TASKMGTRSP: 2276 ret = isert_put_tm_rsp(cmd, conn); 2277 break; 2278 case ISTATE_SEND_REJECT: 2279 ret = isert_put_reject(cmd, conn); 2280 break; 2281 case ISTATE_SEND_TEXTRSP: 2282 ret = isert_put_text_rsp(cmd, conn); 2283 break; 2284 case ISTATE_SEND_STATUS: 2285 /* 2286 * Special case for sending non GOOD SCSI status from TX thread 2287 * context during pre se_cmd excecution failure. 2288 */ 2289 ret = isert_put_response(conn, cmd); 2290 break; 2291 default: 2292 isert_err("Unknown response state: 0x%02x\n", state); 2293 ret = -EINVAL; 2294 break; 2295 } 2296 2297 return ret; 2298 } 2299 2300 struct rdma_cm_id * 2301 isert_setup_id(struct isert_np *isert_np) 2302 { 2303 struct iscsi_np *np = isert_np->np; 2304 struct rdma_cm_id *id; 2305 struct sockaddr *sa; 2306 int ret; 2307 2308 sa = (struct sockaddr *)&np->np_sockaddr; 2309 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2310 2311 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2312 RDMA_PS_TCP, IB_QPT_RC); 2313 if (IS_ERR(id)) { 2314 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2315 ret = PTR_ERR(id); 2316 goto out; 2317 } 2318 isert_dbg("id %p context %p\n", id, id->context); 2319 2320 ret = rdma_bind_addr(id, sa); 2321 if (ret) { 2322 isert_err("rdma_bind_addr() failed: %d\n", ret); 2323 goto out_id; 2324 } 2325 2326 ret = rdma_listen(id, 0); 2327 if (ret) { 2328 isert_err("rdma_listen() failed: %d\n", ret); 2329 goto out_id; 2330 } 2331 2332 return id; 2333 out_id: 2334 rdma_destroy_id(id); 2335 out: 2336 return ERR_PTR(ret); 2337 } 2338 2339 static int 2340 isert_setup_np(struct iscsi_np *np, 2341 struct sockaddr_storage *ksockaddr) 2342 { 2343 struct isert_np *isert_np; 2344 struct rdma_cm_id *isert_lid; 2345 int ret; 2346 2347 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2348 if (!isert_np) 2349 return -ENOMEM; 2350 2351 sema_init(&isert_np->sem, 0); 2352 mutex_init(&isert_np->mutex); 2353 INIT_LIST_HEAD(&isert_np->accepted); 2354 INIT_LIST_HEAD(&isert_np->pending); 2355 isert_np->np = np; 2356 2357 /* 2358 * Setup the np->np_sockaddr from the passed sockaddr setup 2359 * in iscsi_target_configfs.c code.. 2360 */ 2361 memcpy(&np->np_sockaddr, ksockaddr, 2362 sizeof(struct sockaddr_storage)); 2363 2364 isert_lid = isert_setup_id(isert_np); 2365 if (IS_ERR(isert_lid)) { 2366 ret = PTR_ERR(isert_lid); 2367 goto out; 2368 } 2369 2370 isert_np->cm_id = isert_lid; 2371 np->np_context = isert_np; 2372 2373 return 0; 2374 2375 out: 2376 kfree(isert_np); 2377 2378 return ret; 2379 } 2380 2381 static int 2382 isert_rdma_accept(struct isert_conn *isert_conn) 2383 { 2384 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2385 struct rdma_conn_param cp; 2386 int ret; 2387 struct iser_cm_hdr rsp_hdr; 2388 2389 memset(&cp, 0, sizeof(struct rdma_conn_param)); 2390 cp.initiator_depth = isert_conn->initiator_depth; 2391 cp.retry_count = 7; 2392 cp.rnr_retry_count = 7; 2393 2394 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 2395 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 2396 if (!isert_conn->snd_w_inv) 2397 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 2398 cp.private_data = (void *)&rsp_hdr; 2399 cp.private_data_len = sizeof(rsp_hdr); 2400 2401 ret = rdma_accept(cm_id, &cp); 2402 if (ret) { 2403 isert_err("rdma_accept() failed with: %d\n", ret); 2404 return ret; 2405 } 2406 2407 return 0; 2408 } 2409 2410 static int 2411 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2412 { 2413 struct isert_conn *isert_conn = conn->context; 2414 int ret; 2415 2416 isert_info("before login_req comp conn: %p\n", isert_conn); 2417 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 2418 if (ret) { 2419 isert_err("isert_conn %p interrupted before got login req\n", 2420 isert_conn); 2421 return ret; 2422 } 2423 reinit_completion(&isert_conn->login_req_comp); 2424 2425 /* 2426 * For login requests after the first PDU, isert_rx_login_req() will 2427 * kick schedule_delayed_work(&conn->login_work) as the packet is 2428 * received, which turns this callback from iscsi_target_do_login_rx() 2429 * into a NOP. 2430 */ 2431 if (!login->first_request) 2432 return 0; 2433 2434 isert_rx_login_req(isert_conn); 2435 2436 isert_info("before login_comp conn: %p\n", conn); 2437 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 2438 if (ret) 2439 return ret; 2440 2441 isert_info("processing login->req: %p\n", login->req); 2442 2443 return 0; 2444 } 2445 2446 static void 2447 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2448 struct isert_conn *isert_conn) 2449 { 2450 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2451 struct rdma_route *cm_route = &cm_id->route; 2452 2453 conn->login_family = np->np_sockaddr.ss_family; 2454 2455 conn->login_sockaddr = cm_route->addr.dst_addr; 2456 conn->local_sockaddr = cm_route->addr.src_addr; 2457 } 2458 2459 static int 2460 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2461 { 2462 struct isert_np *isert_np = np->np_context; 2463 struct isert_conn *isert_conn; 2464 int ret; 2465 2466 accept_wait: 2467 ret = down_interruptible(&isert_np->sem); 2468 if (ret) 2469 return -ENODEV; 2470 2471 spin_lock_bh(&np->np_thread_lock); 2472 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 2473 spin_unlock_bh(&np->np_thread_lock); 2474 isert_dbg("np_thread_state %d\n", 2475 np->np_thread_state); 2476 /** 2477 * No point in stalling here when np_thread 2478 * is in state RESET/SHUTDOWN/EXIT - bail 2479 **/ 2480 return -ENODEV; 2481 } 2482 spin_unlock_bh(&np->np_thread_lock); 2483 2484 mutex_lock(&isert_np->mutex); 2485 if (list_empty(&isert_np->pending)) { 2486 mutex_unlock(&isert_np->mutex); 2487 goto accept_wait; 2488 } 2489 isert_conn = list_first_entry(&isert_np->pending, 2490 struct isert_conn, node); 2491 list_del_init(&isert_conn->node); 2492 mutex_unlock(&isert_np->mutex); 2493 2494 conn->context = isert_conn; 2495 isert_conn->conn = conn; 2496 isert_conn->state = ISER_CONN_BOUND; 2497 2498 isert_set_conn_info(np, conn, isert_conn); 2499 2500 isert_dbg("Processing isert_conn: %p\n", isert_conn); 2501 2502 return 0; 2503 } 2504 2505 static void 2506 isert_free_np(struct iscsi_np *np) 2507 { 2508 struct isert_np *isert_np = np->np_context; 2509 struct isert_conn *isert_conn, *n; 2510 2511 if (isert_np->cm_id) 2512 rdma_destroy_id(isert_np->cm_id); 2513 2514 /* 2515 * FIXME: At this point we don't have a good way to insure 2516 * that at this point we don't have hanging connections that 2517 * completed RDMA establishment but didn't start iscsi login 2518 * process. So work-around this by cleaning up what ever piled 2519 * up in accepted and pending lists. 2520 */ 2521 mutex_lock(&isert_np->mutex); 2522 if (!list_empty(&isert_np->pending)) { 2523 isert_info("Still have isert pending connections\n"); 2524 list_for_each_entry_safe(isert_conn, n, 2525 &isert_np->pending, 2526 node) { 2527 isert_info("cleaning isert_conn %p state (%d)\n", 2528 isert_conn, isert_conn->state); 2529 isert_connect_release(isert_conn); 2530 } 2531 } 2532 2533 if (!list_empty(&isert_np->accepted)) { 2534 isert_info("Still have isert accepted connections\n"); 2535 list_for_each_entry_safe(isert_conn, n, 2536 &isert_np->accepted, 2537 node) { 2538 isert_info("cleaning isert_conn %p state (%d)\n", 2539 isert_conn, isert_conn->state); 2540 isert_connect_release(isert_conn); 2541 } 2542 } 2543 mutex_unlock(&isert_np->mutex); 2544 2545 np->np_context = NULL; 2546 kfree(isert_np); 2547 } 2548 2549 static void isert_release_work(struct work_struct *work) 2550 { 2551 struct isert_conn *isert_conn = container_of(work, 2552 struct isert_conn, 2553 release_work); 2554 2555 isert_info("Starting release conn %p\n", isert_conn); 2556 2557 mutex_lock(&isert_conn->mutex); 2558 isert_conn->state = ISER_CONN_DOWN; 2559 mutex_unlock(&isert_conn->mutex); 2560 2561 isert_info("Destroying conn %p\n", isert_conn); 2562 isert_put_conn(isert_conn); 2563 } 2564 2565 static void 2566 isert_wait4logout(struct isert_conn *isert_conn) 2567 { 2568 struct iscsi_conn *conn = isert_conn->conn; 2569 2570 isert_info("conn %p\n", isert_conn); 2571 2572 if (isert_conn->logout_posted) { 2573 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 2574 wait_for_completion_timeout(&conn->conn_logout_comp, 2575 SECONDS_FOR_LOGOUT_COMP * HZ); 2576 } 2577 } 2578 2579 static void 2580 isert_wait4cmds(struct iscsi_conn *conn) 2581 { 2582 isert_info("iscsi_conn %p\n", conn); 2583 2584 if (conn->sess) { 2585 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2586 target_wait_for_sess_cmds(conn->sess->se_sess); 2587 } 2588 } 2589 2590 /** 2591 * isert_put_unsol_pending_cmds() - Drop commands waiting for 2592 * unsolicitate dataout 2593 * @conn: iscsi connection 2594 * 2595 * We might still have commands that are waiting for unsolicited 2596 * dataouts messages. We must put the extra reference on those 2597 * before blocking on the target_wait_for_session_cmds 2598 */ 2599 static void 2600 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 2601 { 2602 struct iscsi_cmd *cmd, *tmp; 2603 static LIST_HEAD(drop_cmd_list); 2604 2605 spin_lock_bh(&conn->cmd_lock); 2606 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 2607 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 2608 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 2609 (cmd->write_data_done < cmd->se_cmd.data_length)) 2610 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 2611 } 2612 spin_unlock_bh(&conn->cmd_lock); 2613 2614 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 2615 list_del_init(&cmd->i_conn_node); 2616 if (cmd->i_state != ISTATE_REMOVE) { 2617 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2618 2619 isert_info("conn %p dropping cmd %p\n", conn, cmd); 2620 isert_put_cmd(isert_cmd, true); 2621 } 2622 } 2623 } 2624 2625 static void isert_wait_conn(struct iscsi_conn *conn) 2626 { 2627 struct isert_conn *isert_conn = conn->context; 2628 2629 isert_info("Starting conn %p\n", isert_conn); 2630 2631 mutex_lock(&isert_conn->mutex); 2632 isert_conn_terminate(isert_conn); 2633 mutex_unlock(&isert_conn->mutex); 2634 2635 ib_drain_qp(isert_conn->qp); 2636 isert_put_unsol_pending_cmds(conn); 2637 isert_wait4cmds(conn); 2638 isert_wait4logout(isert_conn); 2639 2640 queue_work(isert_release_wq, &isert_conn->release_work); 2641 } 2642 2643 static void isert_free_conn(struct iscsi_conn *conn) 2644 { 2645 struct isert_conn *isert_conn = conn->context; 2646 2647 ib_drain_qp(isert_conn->qp); 2648 isert_put_conn(isert_conn); 2649 } 2650 2651 static void isert_get_rx_pdu(struct iscsi_conn *conn) 2652 { 2653 struct completion comp; 2654 2655 init_completion(&comp); 2656 2657 wait_for_completion_interruptible(&comp); 2658 } 2659 2660 static struct iscsit_transport iser_target_transport = { 2661 .name = "IB/iSER", 2662 .transport_type = ISCSI_INFINIBAND, 2663 .rdma_shutdown = true, 2664 .priv_size = sizeof(struct isert_cmd), 2665 .owner = THIS_MODULE, 2666 .iscsit_setup_np = isert_setup_np, 2667 .iscsit_accept_np = isert_accept_np, 2668 .iscsit_free_np = isert_free_np, 2669 .iscsit_wait_conn = isert_wait_conn, 2670 .iscsit_free_conn = isert_free_conn, 2671 .iscsit_get_login_rx = isert_get_login_rx, 2672 .iscsit_put_login_tx = isert_put_login_tx, 2673 .iscsit_immediate_queue = isert_immediate_queue, 2674 .iscsit_response_queue = isert_response_queue, 2675 .iscsit_get_dataout = isert_get_dataout, 2676 .iscsit_queue_data_in = isert_put_datain, 2677 .iscsit_queue_status = isert_put_response, 2678 .iscsit_aborted_task = isert_aborted_task, 2679 .iscsit_get_rx_pdu = isert_get_rx_pdu, 2680 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2681 }; 2682 2683 static int __init isert_init(void) 2684 { 2685 int ret; 2686 2687 isert_comp_wq = alloc_workqueue("isert_comp_wq", 2688 WQ_UNBOUND | WQ_HIGHPRI, 0); 2689 if (!isert_comp_wq) { 2690 isert_err("Unable to allocate isert_comp_wq\n"); 2691 return -ENOMEM; 2692 } 2693 2694 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 2695 WQ_UNBOUND_MAX_ACTIVE); 2696 if (!isert_release_wq) { 2697 isert_err("Unable to allocate isert_release_wq\n"); 2698 ret = -ENOMEM; 2699 goto destroy_comp_wq; 2700 } 2701 2702 iscsit_register_transport(&iser_target_transport); 2703 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2704 2705 return 0; 2706 2707 destroy_comp_wq: 2708 destroy_workqueue(isert_comp_wq); 2709 2710 return ret; 2711 } 2712 2713 static void __exit isert_exit(void) 2714 { 2715 flush_scheduled_work(); 2716 destroy_workqueue(isert_release_wq); 2717 destroy_workqueue(isert_comp_wq); 2718 iscsit_unregister_transport(&iser_target_transport); 2719 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 2720 } 2721 2722 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2723 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2724 MODULE_LICENSE("GPL"); 2725 2726 module_init(isert_init); 2727 module_exit(isert_exit); 2728