1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * This file contains iSCSI extentions for RDMA (iSER) Verbs 4 * 5 * (c) Copyright 2013 Datera, Inc. 6 * 7 * Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * 9 ****************************************************************************/ 10 11 #include <linux/string.h> 12 #include <linux/module.h> 13 #include <linux/scatterlist.h> 14 #include <linux/socket.h> 15 #include <linux/in.h> 16 #include <linux/in6.h> 17 #include <rdma/ib_verbs.h> 18 #include <rdma/ib_cm.h> 19 #include <rdma/rdma_cm.h> 20 #include <target/target_core_base.h> 21 #include <target/target_core_fabric.h> 22 #include <target/iscsi/iscsi_transport.h> 23 #include <linux/semaphore.h> 24 25 #include "ib_isert.h" 26 27 static int isert_debug_level; 28 module_param_named(debug_level, isert_debug_level, int, 0644); 29 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 30 31 static DEFINE_MUTEX(device_list_mutex); 32 static LIST_HEAD(device_list); 33 static struct workqueue_struct *isert_comp_wq; 34 static struct workqueue_struct *isert_release_wq; 35 36 static int 37 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 38 static int 39 isert_login_post_recv(struct isert_conn *isert_conn); 40 static int 41 isert_rdma_accept(struct isert_conn *isert_conn); 42 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 43 44 static void isert_release_work(struct work_struct *work); 45 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 46 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 47 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 48 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 49 50 static inline bool 51 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 52 { 53 return (conn->pi_support && 54 cmd->prot_op != TARGET_PROT_NORMAL); 55 } 56 57 58 static void 59 isert_qp_event_callback(struct ib_event *e, void *context) 60 { 61 struct isert_conn *isert_conn = context; 62 63 isert_err("%s (%d): conn %p\n", 64 ib_event_msg(e->event), e->event, isert_conn); 65 66 switch (e->event) { 67 case IB_EVENT_COMM_EST: 68 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 69 break; 70 case IB_EVENT_QP_LAST_WQE_REACHED: 71 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 72 break; 73 default: 74 break; 75 } 76 } 77 78 static struct ib_qp * 79 isert_create_qp(struct isert_conn *isert_conn, 80 struct rdma_cm_id *cma_id) 81 { 82 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; 83 struct isert_device *device = isert_conn->device; 84 struct ib_device *ib_dev = device->ib_device; 85 struct ib_qp_init_attr attr; 86 int ret, factor; 87 88 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); 89 if (IS_ERR(isert_conn->cq)) { 90 isert_err("Unable to allocate cq\n"); 91 ret = PTR_ERR(isert_conn->cq); 92 return ERR_PTR(ret); 93 } 94 isert_conn->cq_size = cq_size; 95 96 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 97 attr.event_handler = isert_qp_event_callback; 98 attr.qp_context = isert_conn; 99 attr.send_cq = isert_conn->cq; 100 attr.recv_cq = isert_conn->cq; 101 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 102 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 103 factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num, 104 ISCSI_ISER_MAX_SG_TABLESIZE); 105 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor; 106 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge; 107 attr.cap.max_recv_sge = 1; 108 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 109 attr.qp_type = IB_QPT_RC; 110 if (device->pi_capable) 111 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 112 113 ret = rdma_create_qp(cma_id, device->pd, &attr); 114 if (ret) { 115 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 116 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 117 118 return ERR_PTR(ret); 119 } 120 121 return cma_id->qp; 122 } 123 124 static int 125 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 126 { 127 struct isert_device *device = isert_conn->device; 128 struct ib_device *ib_dev = device->ib_device; 129 struct iser_rx_desc *rx_desc; 130 struct ib_sge *rx_sg; 131 u64 dma_addr; 132 int i, j; 133 134 isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS, 135 sizeof(struct iser_rx_desc), 136 GFP_KERNEL); 137 if (!isert_conn->rx_descs) 138 return -ENOMEM; 139 140 rx_desc = isert_conn->rx_descs; 141 142 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 143 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, 144 ISER_RX_SIZE, DMA_FROM_DEVICE); 145 if (ib_dma_mapping_error(ib_dev, dma_addr)) 146 goto dma_map_fail; 147 148 rx_desc->dma_addr = dma_addr; 149 150 rx_sg = &rx_desc->rx_sg; 151 rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); 152 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 153 rx_sg->lkey = device->pd->local_dma_lkey; 154 rx_desc->rx_cqe.done = isert_recv_done; 155 } 156 157 return 0; 158 159 dma_map_fail: 160 rx_desc = isert_conn->rx_descs; 161 for (j = 0; j < i; j++, rx_desc++) { 162 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 163 ISER_RX_SIZE, DMA_FROM_DEVICE); 164 } 165 kfree(isert_conn->rx_descs); 166 isert_conn->rx_descs = NULL; 167 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 168 return -ENOMEM; 169 } 170 171 static void 172 isert_free_rx_descriptors(struct isert_conn *isert_conn) 173 { 174 struct ib_device *ib_dev = isert_conn->device->ib_device; 175 struct iser_rx_desc *rx_desc; 176 int i; 177 178 if (!isert_conn->rx_descs) 179 return; 180 181 rx_desc = isert_conn->rx_descs; 182 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 183 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 184 ISER_RX_SIZE, DMA_FROM_DEVICE); 185 } 186 187 kfree(isert_conn->rx_descs); 188 isert_conn->rx_descs = NULL; 189 } 190 191 static int 192 isert_create_device_ib_res(struct isert_device *device) 193 { 194 struct ib_device *ib_dev = device->ib_device; 195 int ret; 196 197 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n", 198 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); 199 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 200 201 device->pd = ib_alloc_pd(ib_dev, 0); 202 if (IS_ERR(device->pd)) { 203 ret = PTR_ERR(device->pd); 204 isert_err("failed to allocate pd, device %p, ret=%d\n", 205 device, ret); 206 return ret; 207 } 208 209 /* Check signature cap */ 210 device->pi_capable = ib_dev->attrs.device_cap_flags & 211 IB_DEVICE_INTEGRITY_HANDOVER ? true : false; 212 213 return 0; 214 } 215 216 static void 217 isert_free_device_ib_res(struct isert_device *device) 218 { 219 isert_info("device %p\n", device); 220 221 ib_dealloc_pd(device->pd); 222 } 223 224 static void 225 isert_device_put(struct isert_device *device) 226 { 227 mutex_lock(&device_list_mutex); 228 device->refcount--; 229 isert_info("device %p refcount %d\n", device, device->refcount); 230 if (!device->refcount) { 231 isert_free_device_ib_res(device); 232 list_del(&device->dev_node); 233 kfree(device); 234 } 235 mutex_unlock(&device_list_mutex); 236 } 237 238 static struct isert_device * 239 isert_device_get(struct rdma_cm_id *cma_id) 240 { 241 struct isert_device *device; 242 int ret; 243 244 mutex_lock(&device_list_mutex); 245 list_for_each_entry(device, &device_list, dev_node) { 246 if (device->ib_device->node_guid == cma_id->device->node_guid) { 247 device->refcount++; 248 isert_info("Found iser device %p refcount %d\n", 249 device, device->refcount); 250 mutex_unlock(&device_list_mutex); 251 return device; 252 } 253 } 254 255 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 256 if (!device) { 257 mutex_unlock(&device_list_mutex); 258 return ERR_PTR(-ENOMEM); 259 } 260 261 INIT_LIST_HEAD(&device->dev_node); 262 263 device->ib_device = cma_id->device; 264 ret = isert_create_device_ib_res(device); 265 if (ret) { 266 kfree(device); 267 mutex_unlock(&device_list_mutex); 268 return ERR_PTR(ret); 269 } 270 271 device->refcount++; 272 list_add_tail(&device->dev_node, &device_list); 273 isert_info("Created a new iser device %p refcount %d\n", 274 device, device->refcount); 275 mutex_unlock(&device_list_mutex); 276 277 return device; 278 } 279 280 static void 281 isert_init_conn(struct isert_conn *isert_conn) 282 { 283 isert_conn->state = ISER_CONN_INIT; 284 INIT_LIST_HEAD(&isert_conn->node); 285 init_completion(&isert_conn->login_comp); 286 init_completion(&isert_conn->login_req_comp); 287 init_waitqueue_head(&isert_conn->rem_wait); 288 kref_init(&isert_conn->kref); 289 mutex_init(&isert_conn->mutex); 290 INIT_WORK(&isert_conn->release_work, isert_release_work); 291 } 292 293 static void 294 isert_free_login_buf(struct isert_conn *isert_conn) 295 { 296 struct ib_device *ib_dev = isert_conn->device->ib_device; 297 298 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 299 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 300 kfree(isert_conn->login_rsp_buf); 301 302 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, 303 ISER_RX_SIZE, DMA_FROM_DEVICE); 304 kfree(isert_conn->login_desc); 305 } 306 307 static int 308 isert_alloc_login_buf(struct isert_conn *isert_conn, 309 struct ib_device *ib_dev) 310 { 311 int ret; 312 313 isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), 314 GFP_KERNEL); 315 if (!isert_conn->login_desc) 316 return -ENOMEM; 317 318 isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, 319 isert_conn->login_desc->buf, 320 ISER_RX_SIZE, DMA_FROM_DEVICE); 321 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); 322 if (ret) { 323 isert_err("login_desc dma mapping error: %d\n", ret); 324 isert_conn->login_desc->dma_addr = 0; 325 goto out_free_login_desc; 326 } 327 328 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 329 if (!isert_conn->login_rsp_buf) { 330 ret = -ENOMEM; 331 goto out_unmap_login_desc; 332 } 333 334 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 335 isert_conn->login_rsp_buf, 336 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 337 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 338 if (ret) { 339 isert_err("login_rsp_dma mapping error: %d\n", ret); 340 isert_conn->login_rsp_dma = 0; 341 goto out_free_login_rsp_buf; 342 } 343 344 return 0; 345 346 out_free_login_rsp_buf: 347 kfree(isert_conn->login_rsp_buf); 348 out_unmap_login_desc: 349 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, 350 ISER_RX_SIZE, DMA_FROM_DEVICE); 351 out_free_login_desc: 352 kfree(isert_conn->login_desc); 353 return ret; 354 } 355 356 static void 357 isert_set_nego_params(struct isert_conn *isert_conn, 358 struct rdma_conn_param *param) 359 { 360 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 361 362 /* Set max inflight RDMA READ requests */ 363 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 364 attr->max_qp_init_rd_atom); 365 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 366 367 if (param->private_data) { 368 u8 flags = *(u8 *)param->private_data; 369 370 /* 371 * use remote invalidation if the both initiator 372 * and the HCA support it 373 */ 374 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 375 (attr->device_cap_flags & 376 IB_DEVICE_MEM_MGT_EXTENSIONS); 377 if (isert_conn->snd_w_inv) 378 isert_info("Using remote invalidation\n"); 379 } 380 } 381 382 static void 383 isert_destroy_qp(struct isert_conn *isert_conn) 384 { 385 ib_destroy_qp(isert_conn->qp); 386 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 387 } 388 389 static int 390 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 391 { 392 struct isert_np *isert_np = cma_id->context; 393 struct iscsi_np *np = isert_np->np; 394 struct isert_conn *isert_conn; 395 struct isert_device *device; 396 int ret = 0; 397 398 spin_lock_bh(&np->np_thread_lock); 399 if (!np->enabled) { 400 spin_unlock_bh(&np->np_thread_lock); 401 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 402 return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 403 } 404 spin_unlock_bh(&np->np_thread_lock); 405 406 isert_dbg("cma_id: %p, portal: %p\n", 407 cma_id, cma_id->context); 408 409 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 410 if (!isert_conn) 411 return -ENOMEM; 412 413 isert_init_conn(isert_conn); 414 isert_conn->cm_id = cma_id; 415 416 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 417 if (ret) 418 goto out; 419 420 device = isert_device_get(cma_id); 421 if (IS_ERR(device)) { 422 ret = PTR_ERR(device); 423 goto out_rsp_dma_map; 424 } 425 isert_conn->device = device; 426 427 isert_set_nego_params(isert_conn, &event->param.conn); 428 429 isert_conn->qp = isert_create_qp(isert_conn, cma_id); 430 if (IS_ERR(isert_conn->qp)) { 431 ret = PTR_ERR(isert_conn->qp); 432 goto out_conn_dev; 433 } 434 435 ret = isert_login_post_recv(isert_conn); 436 if (ret) 437 goto out_destroy_qp; 438 439 ret = isert_rdma_accept(isert_conn); 440 if (ret) 441 goto out_destroy_qp; 442 443 mutex_lock(&isert_np->mutex); 444 list_add_tail(&isert_conn->node, &isert_np->accepted); 445 mutex_unlock(&isert_np->mutex); 446 447 return 0; 448 449 out_destroy_qp: 450 isert_destroy_qp(isert_conn); 451 out_conn_dev: 452 isert_device_put(device); 453 out_rsp_dma_map: 454 isert_free_login_buf(isert_conn); 455 out: 456 kfree(isert_conn); 457 rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 458 return ret; 459 } 460 461 static void 462 isert_connect_release(struct isert_conn *isert_conn) 463 { 464 struct isert_device *device = isert_conn->device; 465 466 isert_dbg("conn %p\n", isert_conn); 467 468 BUG_ON(!device); 469 470 isert_free_rx_descriptors(isert_conn); 471 if (isert_conn->cm_id && 472 !isert_conn->dev_removed) 473 rdma_destroy_id(isert_conn->cm_id); 474 475 if (isert_conn->qp) 476 isert_destroy_qp(isert_conn); 477 478 if (isert_conn->login_desc) 479 isert_free_login_buf(isert_conn); 480 481 isert_device_put(device); 482 483 if (isert_conn->dev_removed) 484 wake_up_interruptible(&isert_conn->rem_wait); 485 else 486 kfree(isert_conn); 487 } 488 489 static void 490 isert_connected_handler(struct rdma_cm_id *cma_id) 491 { 492 struct isert_conn *isert_conn = cma_id->qp->qp_context; 493 struct isert_np *isert_np = cma_id->context; 494 495 isert_info("conn %p\n", isert_conn); 496 497 mutex_lock(&isert_conn->mutex); 498 isert_conn->state = ISER_CONN_UP; 499 kref_get(&isert_conn->kref); 500 mutex_unlock(&isert_conn->mutex); 501 502 mutex_lock(&isert_np->mutex); 503 list_move_tail(&isert_conn->node, &isert_np->pending); 504 mutex_unlock(&isert_np->mutex); 505 506 isert_info("np %p: Allow accept_np to continue\n", isert_np); 507 up(&isert_np->sem); 508 } 509 510 static void 511 isert_release_kref(struct kref *kref) 512 { 513 struct isert_conn *isert_conn = container_of(kref, 514 struct isert_conn, kref); 515 516 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 517 current->pid); 518 519 isert_connect_release(isert_conn); 520 } 521 522 static void 523 isert_put_conn(struct isert_conn *isert_conn) 524 { 525 kref_put(&isert_conn->kref, isert_release_kref); 526 } 527 528 static void 529 isert_handle_unbound_conn(struct isert_conn *isert_conn) 530 { 531 struct isert_np *isert_np = isert_conn->cm_id->context; 532 533 mutex_lock(&isert_np->mutex); 534 if (!list_empty(&isert_conn->node)) { 535 /* 536 * This means iscsi doesn't know this connection 537 * so schedule a cleanup ourselves 538 */ 539 list_del_init(&isert_conn->node); 540 isert_put_conn(isert_conn); 541 queue_work(isert_release_wq, &isert_conn->release_work); 542 } 543 mutex_unlock(&isert_np->mutex); 544 } 545 546 /** 547 * isert_conn_terminate() - Initiate connection termination 548 * @isert_conn: isert connection struct 549 * 550 * Notes: 551 * In case the connection state is BOUND, move state 552 * to TEMINATING and start teardown sequence (rdma_disconnect). 553 * In case the connection state is UP, complete flush as well. 554 * 555 * This routine must be called with mutex held. Thus it is 556 * safe to call multiple times. 557 */ 558 static void 559 isert_conn_terminate(struct isert_conn *isert_conn) 560 { 561 int err; 562 563 if (isert_conn->state >= ISER_CONN_TERMINATING) 564 return; 565 566 isert_info("Terminating conn %p state %d\n", 567 isert_conn, isert_conn->state); 568 isert_conn->state = ISER_CONN_TERMINATING; 569 err = rdma_disconnect(isert_conn->cm_id); 570 if (err) 571 isert_warn("Failed rdma_disconnect isert_conn %p\n", 572 isert_conn); 573 } 574 575 static int 576 isert_np_cma_handler(struct isert_np *isert_np, 577 enum rdma_cm_event_type event) 578 { 579 isert_dbg("%s (%d): isert np %p\n", 580 rdma_event_msg(event), event, isert_np); 581 582 switch (event) { 583 case RDMA_CM_EVENT_DEVICE_REMOVAL: 584 isert_np->cm_id = NULL; 585 break; 586 case RDMA_CM_EVENT_ADDR_CHANGE: 587 isert_np->cm_id = isert_setup_id(isert_np); 588 if (IS_ERR(isert_np->cm_id)) { 589 isert_err("isert np %p setup id failed: %ld\n", 590 isert_np, PTR_ERR(isert_np->cm_id)); 591 isert_np->cm_id = NULL; 592 } 593 break; 594 default: 595 isert_err("isert np %p Unexpected event %d\n", 596 isert_np, event); 597 } 598 599 return -1; 600 } 601 602 static int 603 isert_disconnected_handler(struct rdma_cm_id *cma_id, 604 enum rdma_cm_event_type event) 605 { 606 struct isert_conn *isert_conn = cma_id->qp->qp_context; 607 608 mutex_lock(&isert_conn->mutex); 609 switch (isert_conn->state) { 610 case ISER_CONN_TERMINATING: 611 break; 612 case ISER_CONN_UP: 613 isert_conn_terminate(isert_conn); 614 ib_drain_qp(isert_conn->qp); 615 isert_handle_unbound_conn(isert_conn); 616 break; 617 case ISER_CONN_BOUND: 618 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 619 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 620 break; 621 default: 622 isert_warn("conn %p terminating in state %d\n", 623 isert_conn, isert_conn->state); 624 } 625 mutex_unlock(&isert_conn->mutex); 626 627 return 0; 628 } 629 630 static int 631 isert_connect_error(struct rdma_cm_id *cma_id) 632 { 633 struct isert_conn *isert_conn = cma_id->qp->qp_context; 634 635 ib_drain_qp(isert_conn->qp); 636 list_del_init(&isert_conn->node); 637 isert_conn->cm_id = NULL; 638 isert_put_conn(isert_conn); 639 640 return -1; 641 } 642 643 static int 644 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 645 { 646 struct isert_np *isert_np = cma_id->context; 647 struct isert_conn *isert_conn; 648 int ret = 0; 649 650 isert_info("%s (%d): status %d id %p np %p\n", 651 rdma_event_msg(event->event), event->event, 652 event->status, cma_id, cma_id->context); 653 654 if (isert_np->cm_id == cma_id) 655 return isert_np_cma_handler(cma_id->context, event->event); 656 657 switch (event->event) { 658 case RDMA_CM_EVENT_CONNECT_REQUEST: 659 ret = isert_connect_request(cma_id, event); 660 if (ret) 661 isert_err("failed handle connect request %d\n", ret); 662 break; 663 case RDMA_CM_EVENT_ESTABLISHED: 664 isert_connected_handler(cma_id); 665 break; 666 case RDMA_CM_EVENT_ADDR_CHANGE: 667 case RDMA_CM_EVENT_DISCONNECTED: 668 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 669 ret = isert_disconnected_handler(cma_id, event->event); 670 break; 671 case RDMA_CM_EVENT_DEVICE_REMOVAL: 672 isert_conn = cma_id->qp->qp_context; 673 isert_conn->dev_removed = true; 674 isert_disconnected_handler(cma_id, event->event); 675 wait_event_interruptible(isert_conn->rem_wait, 676 isert_conn->state == ISER_CONN_DOWN); 677 kfree(isert_conn); 678 /* 679 * return non-zero from the callback to destroy 680 * the rdma cm id 681 */ 682 return 1; 683 case RDMA_CM_EVENT_REJECTED: 684 isert_info("Connection rejected: %s\n", 685 rdma_reject_msg(cma_id, event->status)); 686 fallthrough; 687 case RDMA_CM_EVENT_UNREACHABLE: 688 case RDMA_CM_EVENT_CONNECT_ERROR: 689 ret = isert_connect_error(cma_id); 690 break; 691 default: 692 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 693 break; 694 } 695 696 return ret; 697 } 698 699 static int 700 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 701 { 702 struct ib_recv_wr *rx_wr; 703 int i, ret; 704 struct iser_rx_desc *rx_desc; 705 706 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 707 rx_desc = &isert_conn->rx_descs[i]; 708 709 rx_wr->wr_cqe = &rx_desc->rx_cqe; 710 rx_wr->sg_list = &rx_desc->rx_sg; 711 rx_wr->num_sge = 1; 712 rx_wr->next = rx_wr + 1; 713 rx_desc->in_use = false; 714 } 715 rx_wr--; 716 rx_wr->next = NULL; /* mark end of work requests list */ 717 718 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL); 719 if (ret) 720 isert_err("ib_post_recv() failed with ret: %d\n", ret); 721 722 return ret; 723 } 724 725 static int 726 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 727 { 728 struct ib_recv_wr rx_wr; 729 int ret; 730 731 if (!rx_desc->in_use) { 732 /* 733 * if the descriptor is not in-use we already reposted it 734 * for recv, so just silently return 735 */ 736 return 0; 737 } 738 739 rx_desc->in_use = false; 740 rx_wr.wr_cqe = &rx_desc->rx_cqe; 741 rx_wr.sg_list = &rx_desc->rx_sg; 742 rx_wr.num_sge = 1; 743 rx_wr.next = NULL; 744 745 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 746 if (ret) 747 isert_err("ib_post_recv() failed with ret: %d\n", ret); 748 749 return ret; 750 } 751 752 static int 753 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 754 { 755 struct ib_device *ib_dev = isert_conn->cm_id->device; 756 struct ib_send_wr send_wr; 757 int ret; 758 759 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 760 ISER_HEADERS_LEN, DMA_TO_DEVICE); 761 762 tx_desc->tx_cqe.done = isert_login_send_done; 763 764 send_wr.next = NULL; 765 send_wr.wr_cqe = &tx_desc->tx_cqe; 766 send_wr.sg_list = tx_desc->tx_sg; 767 send_wr.num_sge = tx_desc->num_sge; 768 send_wr.opcode = IB_WR_SEND; 769 send_wr.send_flags = IB_SEND_SIGNALED; 770 771 ret = ib_post_send(isert_conn->qp, &send_wr, NULL); 772 if (ret) 773 isert_err("ib_post_send() failed, ret: %d\n", ret); 774 775 return ret; 776 } 777 778 static void 779 __isert_create_send_desc(struct isert_device *device, 780 struct iser_tx_desc *tx_desc) 781 { 782 783 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 784 tx_desc->iser_header.flags = ISCSI_CTRL; 785 786 tx_desc->num_sge = 1; 787 788 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 789 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 790 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 791 } 792 } 793 794 static void 795 isert_create_send_desc(struct isert_conn *isert_conn, 796 struct isert_cmd *isert_cmd, 797 struct iser_tx_desc *tx_desc) 798 { 799 struct isert_device *device = isert_conn->device; 800 struct ib_device *ib_dev = device->ib_device; 801 802 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 803 ISER_HEADERS_LEN, DMA_TO_DEVICE); 804 805 __isert_create_send_desc(device, tx_desc); 806 } 807 808 static int 809 isert_init_tx_hdrs(struct isert_conn *isert_conn, 810 struct iser_tx_desc *tx_desc) 811 { 812 struct isert_device *device = isert_conn->device; 813 struct ib_device *ib_dev = device->ib_device; 814 u64 dma_addr; 815 816 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 817 ISER_HEADERS_LEN, DMA_TO_DEVICE); 818 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 819 isert_err("ib_dma_mapping_error() failed\n"); 820 return -ENOMEM; 821 } 822 823 tx_desc->dma_addr = dma_addr; 824 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 825 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 826 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 827 828 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 829 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 830 tx_desc->tx_sg[0].lkey); 831 832 return 0; 833 } 834 835 static void 836 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 837 struct ib_send_wr *send_wr) 838 { 839 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 840 841 tx_desc->tx_cqe.done = isert_send_done; 842 send_wr->wr_cqe = &tx_desc->tx_cqe; 843 844 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 845 send_wr->opcode = IB_WR_SEND_WITH_INV; 846 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 847 } else { 848 send_wr->opcode = IB_WR_SEND; 849 } 850 851 send_wr->sg_list = &tx_desc->tx_sg[0]; 852 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 853 send_wr->send_flags = IB_SEND_SIGNALED; 854 } 855 856 static int 857 isert_login_post_recv(struct isert_conn *isert_conn) 858 { 859 struct ib_recv_wr rx_wr; 860 struct ib_sge sge; 861 int ret; 862 863 memset(&sge, 0, sizeof(struct ib_sge)); 864 sge.addr = isert_conn->login_desc->dma_addr + 865 isert_get_hdr_offset(isert_conn->login_desc); 866 sge.length = ISER_RX_PAYLOAD_SIZE; 867 sge.lkey = isert_conn->device->pd->local_dma_lkey; 868 869 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 870 sge.addr, sge.length, sge.lkey); 871 872 isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; 873 874 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 875 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; 876 rx_wr.sg_list = &sge; 877 rx_wr.num_sge = 1; 878 879 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 880 if (ret) 881 isert_err("ib_post_recv() failed: %d\n", ret); 882 883 return ret; 884 } 885 886 static int 887 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 888 u32 length) 889 { 890 struct isert_conn *isert_conn = conn->context; 891 struct isert_device *device = isert_conn->device; 892 struct ib_device *ib_dev = device->ib_device; 893 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 894 int ret; 895 896 __isert_create_send_desc(device, tx_desc); 897 898 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 899 sizeof(struct iscsi_hdr)); 900 901 isert_init_tx_hdrs(isert_conn, tx_desc); 902 903 if (length > 0) { 904 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 905 906 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 907 length, DMA_TO_DEVICE); 908 909 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 910 911 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 912 length, DMA_TO_DEVICE); 913 914 tx_dsg->addr = isert_conn->login_rsp_dma; 915 tx_dsg->length = length; 916 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 917 tx_desc->num_sge = 2; 918 } 919 if (!login->login_failed) { 920 if (login->login_complete) { 921 ret = isert_alloc_rx_descriptors(isert_conn); 922 if (ret) 923 return ret; 924 925 ret = isert_post_recvm(isert_conn, 926 ISERT_QP_MAX_RECV_DTOS); 927 if (ret) 928 return ret; 929 930 /* Now we are in FULL_FEATURE phase */ 931 mutex_lock(&isert_conn->mutex); 932 isert_conn->state = ISER_CONN_FULL_FEATURE; 933 mutex_unlock(&isert_conn->mutex); 934 goto post_send; 935 } 936 937 ret = isert_login_post_recv(isert_conn); 938 if (ret) 939 return ret; 940 } 941 post_send: 942 ret = isert_login_post_send(isert_conn, tx_desc); 943 if (ret) 944 return ret; 945 946 return 0; 947 } 948 949 static void 950 isert_rx_login_req(struct isert_conn *isert_conn) 951 { 952 struct iser_rx_desc *rx_desc = isert_conn->login_desc; 953 int rx_buflen = isert_conn->login_req_len; 954 struct iscsi_conn *conn = isert_conn->conn; 955 struct iscsi_login *login = conn->conn_login; 956 int size; 957 958 isert_info("conn %p\n", isert_conn); 959 960 WARN_ON_ONCE(!login); 961 962 if (login->first_request) { 963 struct iscsi_login_req *login_req = 964 (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); 965 /* 966 * Setup the initial iscsi_login values from the leading 967 * login request PDU. 968 */ 969 login->leading_connection = (!login_req->tsih) ? 1 : 0; 970 login->current_stage = 971 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 972 >> 2; 973 login->version_min = login_req->min_version; 974 login->version_max = login_req->max_version; 975 memcpy(login->isid, login_req->isid, 6); 976 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 977 login->init_task_tag = login_req->itt; 978 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 979 login->cid = be16_to_cpu(login_req->cid); 980 login->tsih = be16_to_cpu(login_req->tsih); 981 } 982 983 memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); 984 985 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 986 isert_dbg("Using login payload size: %d, rx_buflen: %d " 987 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 988 MAX_KEY_VALUE_PAIRS); 989 memcpy(login->req_buf, isert_get_data(rx_desc), size); 990 991 if (login->first_request) { 992 complete(&isert_conn->login_comp); 993 return; 994 } 995 schedule_delayed_work(&conn->login_work, 0); 996 } 997 998 static struct iscsi_cmd 999 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1000 { 1001 struct isert_conn *isert_conn = conn->context; 1002 struct isert_cmd *isert_cmd; 1003 struct iscsi_cmd *cmd; 1004 1005 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1006 if (!cmd) { 1007 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1008 return NULL; 1009 } 1010 isert_cmd = iscsit_priv_cmd(cmd); 1011 isert_cmd->conn = isert_conn; 1012 isert_cmd->iscsi_cmd = cmd; 1013 isert_cmd->rx_desc = rx_desc; 1014 1015 return cmd; 1016 } 1017 1018 static int 1019 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1020 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1021 struct iser_rx_desc *rx_desc, unsigned char *buf) 1022 { 1023 struct iscsi_conn *conn = isert_conn->conn; 1024 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1025 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1026 bool dump_payload = false; 1027 unsigned int data_len; 1028 1029 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1030 if (rc < 0) 1031 return rc; 1032 1033 imm_data = cmd->immediate_data; 1034 imm_data_len = cmd->first_burst_len; 1035 unsol_data = cmd->unsolicited_data; 1036 data_len = cmd->se_cmd.data_length; 1037 1038 if (imm_data && imm_data_len == data_len) 1039 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1040 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1041 if (rc < 0) { 1042 return 0; 1043 } else if (rc > 0) { 1044 dump_payload = true; 1045 goto sequence_cmd; 1046 } 1047 1048 if (!imm_data) 1049 return 0; 1050 1051 if (imm_data_len != data_len) { 1052 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1053 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1054 isert_get_data(rx_desc), imm_data_len); 1055 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1056 sg_nents, imm_data_len); 1057 } else { 1058 sg_init_table(&isert_cmd->sg, 1); 1059 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1060 cmd->se_cmd.t_data_nents = 1; 1061 sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), 1062 imm_data_len); 1063 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1064 imm_data_len); 1065 } 1066 1067 cmd->write_data_done += imm_data_len; 1068 1069 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1070 spin_lock_bh(&cmd->istate_lock); 1071 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1072 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1073 spin_unlock_bh(&cmd->istate_lock); 1074 } 1075 1076 sequence_cmd: 1077 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1078 1079 if (!rc && dump_payload == false && unsol_data) 1080 iscsit_set_unsolicited_dataout(cmd); 1081 else if (dump_payload && imm_data) 1082 target_put_sess_cmd(&cmd->se_cmd); 1083 1084 return 0; 1085 } 1086 1087 static int 1088 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1089 struct iser_rx_desc *rx_desc, unsigned char *buf) 1090 { 1091 struct scatterlist *sg_start; 1092 struct iscsi_conn *conn = isert_conn->conn; 1093 struct iscsi_cmd *cmd = NULL; 1094 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1095 u32 unsol_data_len = ntoh24(hdr->dlength); 1096 int rc, sg_nents, sg_off, page_off; 1097 1098 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1099 if (rc < 0) 1100 return rc; 1101 else if (!cmd) 1102 return 0; 1103 /* 1104 * FIXME: Unexpected unsolicited_data out 1105 */ 1106 if (!cmd->unsolicited_data) { 1107 isert_err("Received unexpected solicited data payload\n"); 1108 dump_stack(); 1109 return -1; 1110 } 1111 1112 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1113 "write_data_done: %u, data_length: %u\n", 1114 unsol_data_len, cmd->write_data_done, 1115 cmd->se_cmd.data_length); 1116 1117 sg_off = cmd->write_data_done / PAGE_SIZE; 1118 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1119 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1120 page_off = cmd->write_data_done % PAGE_SIZE; 1121 /* 1122 * FIXME: Non page-aligned unsolicited_data out 1123 */ 1124 if (page_off) { 1125 isert_err("unexpected non-page aligned data payload\n"); 1126 dump_stack(); 1127 return -1; 1128 } 1129 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1130 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1131 sg_nents, isert_get_data(rx_desc), unsol_data_len); 1132 1133 sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), 1134 unsol_data_len); 1135 1136 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1137 if (rc < 0) 1138 return rc; 1139 1140 /* 1141 * multiple data-outs on the same command can arrive - 1142 * so post the buffer before hand 1143 */ 1144 return isert_post_recv(isert_conn, rx_desc); 1145 } 1146 1147 static int 1148 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1149 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1150 unsigned char *buf) 1151 { 1152 struct iscsi_conn *conn = isert_conn->conn; 1153 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1154 int rc; 1155 1156 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1157 if (rc < 0) 1158 return rc; 1159 /* 1160 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1161 */ 1162 1163 return iscsit_process_nop_out(conn, cmd, hdr); 1164 } 1165 1166 static int 1167 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1168 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1169 struct iscsi_text *hdr) 1170 { 1171 struct iscsi_conn *conn = isert_conn->conn; 1172 u32 payload_length = ntoh24(hdr->dlength); 1173 int rc; 1174 unsigned char *text_in = NULL; 1175 1176 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1177 if (rc < 0) 1178 return rc; 1179 1180 if (payload_length) { 1181 text_in = kzalloc(payload_length, GFP_KERNEL); 1182 if (!text_in) 1183 return -ENOMEM; 1184 } 1185 cmd->text_in_ptr = text_in; 1186 1187 memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); 1188 1189 return iscsit_process_text_cmd(conn, cmd, hdr); 1190 } 1191 1192 static int 1193 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1194 uint32_t read_stag, uint64_t read_va, 1195 uint32_t write_stag, uint64_t write_va) 1196 { 1197 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); 1198 struct iscsi_conn *conn = isert_conn->conn; 1199 struct iscsi_cmd *cmd; 1200 struct isert_cmd *isert_cmd; 1201 int ret = -EINVAL; 1202 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1203 1204 if (conn->sess->sess_ops->SessionType && 1205 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1206 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1207 " ignoring\n", opcode); 1208 return 0; 1209 } 1210 1211 switch (opcode) { 1212 case ISCSI_OP_SCSI_CMD: 1213 cmd = isert_allocate_cmd(conn, rx_desc); 1214 if (!cmd) 1215 break; 1216 1217 isert_cmd = iscsit_priv_cmd(cmd); 1218 isert_cmd->read_stag = read_stag; 1219 isert_cmd->read_va = read_va; 1220 isert_cmd->write_stag = write_stag; 1221 isert_cmd->write_va = write_va; 1222 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1223 1224 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1225 rx_desc, (unsigned char *)hdr); 1226 break; 1227 case ISCSI_OP_NOOP_OUT: 1228 cmd = isert_allocate_cmd(conn, rx_desc); 1229 if (!cmd) 1230 break; 1231 1232 isert_cmd = iscsit_priv_cmd(cmd); 1233 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1234 rx_desc, (unsigned char *)hdr); 1235 break; 1236 case ISCSI_OP_SCSI_DATA_OUT: 1237 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1238 (unsigned char *)hdr); 1239 break; 1240 case ISCSI_OP_SCSI_TMFUNC: 1241 cmd = isert_allocate_cmd(conn, rx_desc); 1242 if (!cmd) 1243 break; 1244 1245 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1246 (unsigned char *)hdr); 1247 break; 1248 case ISCSI_OP_LOGOUT: 1249 cmd = isert_allocate_cmd(conn, rx_desc); 1250 if (!cmd) 1251 break; 1252 1253 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1254 break; 1255 case ISCSI_OP_TEXT: 1256 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1257 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1258 else 1259 cmd = isert_allocate_cmd(conn, rx_desc); 1260 1261 if (!cmd) 1262 break; 1263 1264 isert_cmd = iscsit_priv_cmd(cmd); 1265 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1266 rx_desc, (struct iscsi_text *)hdr); 1267 break; 1268 default: 1269 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1270 dump_stack(); 1271 break; 1272 } 1273 1274 return ret; 1275 } 1276 1277 static void 1278 isert_print_wc(struct ib_wc *wc, const char *type) 1279 { 1280 if (wc->status != IB_WC_WR_FLUSH_ERR) 1281 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1282 ib_wc_status_msg(wc->status), wc->status, 1283 wc->vendor_err); 1284 else 1285 isert_dbg("%s failure: %s (%d)\n", type, 1286 ib_wc_status_msg(wc->status), wc->status); 1287 } 1288 1289 static void 1290 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1291 { 1292 struct isert_conn *isert_conn = wc->qp->qp_context; 1293 struct ib_device *ib_dev = isert_conn->cm_id->device; 1294 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1295 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); 1296 struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); 1297 uint64_t read_va = 0, write_va = 0; 1298 uint32_t read_stag = 0, write_stag = 0; 1299 1300 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1301 isert_print_wc(wc, "recv"); 1302 if (wc->status != IB_WC_WR_FLUSH_ERR) 1303 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1304 return; 1305 } 1306 1307 rx_desc->in_use = true; 1308 1309 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1310 ISER_RX_SIZE, DMA_FROM_DEVICE); 1311 1312 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1313 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1314 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1315 1316 switch (iser_ctrl->flags & 0xF0) { 1317 case ISCSI_CTRL: 1318 if (iser_ctrl->flags & ISER_RSV) { 1319 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1320 read_va = be64_to_cpu(iser_ctrl->read_va); 1321 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1322 read_stag, (unsigned long long)read_va); 1323 } 1324 if (iser_ctrl->flags & ISER_WSV) { 1325 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1326 write_va = be64_to_cpu(iser_ctrl->write_va); 1327 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1328 write_stag, (unsigned long long)write_va); 1329 } 1330 1331 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1332 break; 1333 case ISER_HELLO: 1334 isert_err("iSER Hello message\n"); 1335 break; 1336 default: 1337 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1338 break; 1339 } 1340 1341 isert_rx_opcode(isert_conn, rx_desc, 1342 read_stag, read_va, write_stag, write_va); 1343 1344 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1345 ISER_RX_SIZE, DMA_FROM_DEVICE); 1346 } 1347 1348 static void 1349 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1350 { 1351 struct isert_conn *isert_conn = wc->qp->qp_context; 1352 struct ib_device *ib_dev = isert_conn->device->ib_device; 1353 1354 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1355 isert_print_wc(wc, "login recv"); 1356 return; 1357 } 1358 1359 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, 1360 ISER_RX_SIZE, DMA_FROM_DEVICE); 1361 1362 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1363 1364 if (isert_conn->conn) { 1365 struct iscsi_login *login = isert_conn->conn->conn_login; 1366 1367 if (login && !login->first_request) 1368 isert_rx_login_req(isert_conn); 1369 } 1370 1371 mutex_lock(&isert_conn->mutex); 1372 complete(&isert_conn->login_req_comp); 1373 mutex_unlock(&isert_conn->mutex); 1374 1375 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, 1376 ISER_RX_SIZE, DMA_FROM_DEVICE); 1377 } 1378 1379 static void 1380 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) 1381 { 1382 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 1383 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 1384 1385 if (!cmd->rw.nr_ops) 1386 return; 1387 1388 if (isert_prot_cmd(conn, se_cmd)) { 1389 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, 1390 conn->cm_id->port_num, se_cmd->t_data_sg, 1391 se_cmd->t_data_nents, se_cmd->t_prot_sg, 1392 se_cmd->t_prot_nents, dir); 1393 } else { 1394 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, 1395 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); 1396 } 1397 1398 cmd->rw.nr_ops = 0; 1399 } 1400 1401 static void 1402 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1403 { 1404 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1405 struct isert_conn *isert_conn = isert_cmd->conn; 1406 struct iscsi_conn *conn = isert_conn->conn; 1407 struct iscsi_text_rsp *hdr; 1408 1409 isert_dbg("Cmd %p\n", isert_cmd); 1410 1411 switch (cmd->iscsi_opcode) { 1412 case ISCSI_OP_SCSI_CMD: 1413 spin_lock_bh(&conn->cmd_lock); 1414 if (!list_empty(&cmd->i_conn_node)) 1415 list_del_init(&cmd->i_conn_node); 1416 spin_unlock_bh(&conn->cmd_lock); 1417 1418 if (cmd->data_direction == DMA_TO_DEVICE) { 1419 iscsit_stop_dataout_timer(cmd); 1420 /* 1421 * Check for special case during comp_err where 1422 * WRITE_PENDING has been handed off from core, 1423 * but requires an extra target_put_sess_cmd() 1424 * before transport_generic_free_cmd() below. 1425 */ 1426 if (comp_err && 1427 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1428 struct se_cmd *se_cmd = &cmd->se_cmd; 1429 1430 target_put_sess_cmd(se_cmd); 1431 } 1432 } 1433 1434 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1435 transport_generic_free_cmd(&cmd->se_cmd, 0); 1436 break; 1437 case ISCSI_OP_SCSI_TMFUNC: 1438 spin_lock_bh(&conn->cmd_lock); 1439 if (!list_empty(&cmd->i_conn_node)) 1440 list_del_init(&cmd->i_conn_node); 1441 spin_unlock_bh(&conn->cmd_lock); 1442 1443 transport_generic_free_cmd(&cmd->se_cmd, 0); 1444 break; 1445 case ISCSI_OP_REJECT: 1446 case ISCSI_OP_NOOP_OUT: 1447 case ISCSI_OP_TEXT: 1448 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1449 /* If the continue bit is on, keep the command alive */ 1450 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1451 break; 1452 1453 spin_lock_bh(&conn->cmd_lock); 1454 if (!list_empty(&cmd->i_conn_node)) 1455 list_del_init(&cmd->i_conn_node); 1456 spin_unlock_bh(&conn->cmd_lock); 1457 1458 /* 1459 * Handle special case for REJECT when iscsi_add_reject*() has 1460 * overwritten the original iscsi_opcode assignment, and the 1461 * associated cmd->se_cmd needs to be released. 1462 */ 1463 if (cmd->se_cmd.se_tfo != NULL) { 1464 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1465 cmd->iscsi_opcode); 1466 transport_generic_free_cmd(&cmd->se_cmd, 0); 1467 break; 1468 } 1469 fallthrough; 1470 default: 1471 iscsit_release_cmd(cmd); 1472 break; 1473 } 1474 } 1475 1476 static void 1477 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1478 { 1479 if (tx_desc->dma_addr != 0) { 1480 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1481 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1482 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1483 tx_desc->dma_addr = 0; 1484 } 1485 } 1486 1487 static void 1488 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1489 struct ib_device *ib_dev, bool comp_err) 1490 { 1491 if (isert_cmd->pdu_buf_dma != 0) { 1492 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1493 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1494 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1495 isert_cmd->pdu_buf_dma = 0; 1496 } 1497 1498 isert_unmap_tx_desc(tx_desc, ib_dev); 1499 isert_put_cmd(isert_cmd, comp_err); 1500 } 1501 1502 static int 1503 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1504 { 1505 struct ib_mr_status mr_status; 1506 int ret; 1507 1508 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1509 if (ret) { 1510 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1511 goto fail_mr_status; 1512 } 1513 1514 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1515 u64 sec_offset_err; 1516 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1517 1518 switch (mr_status.sig_err.err_type) { 1519 case IB_SIG_BAD_GUARD: 1520 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1521 break; 1522 case IB_SIG_BAD_REFTAG: 1523 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1524 break; 1525 case IB_SIG_BAD_APPTAG: 1526 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1527 break; 1528 } 1529 sec_offset_err = mr_status.sig_err.sig_err_offset; 1530 do_div(sec_offset_err, block_size); 1531 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1532 1533 isert_err("PI error found type %d at sector 0x%llx " 1534 "expected 0x%x vs actual 0x%x\n", 1535 mr_status.sig_err.err_type, 1536 (unsigned long long)se_cmd->bad_sector, 1537 mr_status.sig_err.expected, 1538 mr_status.sig_err.actual); 1539 ret = 1; 1540 } 1541 1542 fail_mr_status: 1543 return ret; 1544 } 1545 1546 static void 1547 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1548 { 1549 struct isert_conn *isert_conn = wc->qp->qp_context; 1550 struct isert_device *device = isert_conn->device; 1551 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1552 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1553 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1554 int ret = 0; 1555 1556 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1557 isert_print_wc(wc, "rdma write"); 1558 if (wc->status != IB_WC_WR_FLUSH_ERR) 1559 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1560 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1561 return; 1562 } 1563 1564 isert_dbg("Cmd %p\n", isert_cmd); 1565 1566 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr); 1567 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1568 1569 if (ret) { 1570 /* 1571 * transport_generic_request_failure() expects to have 1572 * plus two references to handle queue-full, so re-add 1573 * one here as target-core will have already dropped 1574 * it after the first isert_put_datain() callback. 1575 */ 1576 kref_get(&cmd->cmd_kref); 1577 transport_generic_request_failure(cmd, cmd->pi_err); 1578 } else { 1579 /* 1580 * XXX: isert_put_response() failure is not retried. 1581 */ 1582 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1583 if (ret) 1584 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); 1585 } 1586 } 1587 1588 static void 1589 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1590 { 1591 struct isert_conn *isert_conn = wc->qp->qp_context; 1592 struct isert_device *device = isert_conn->device; 1593 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1594 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1595 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1596 struct se_cmd *se_cmd = &cmd->se_cmd; 1597 int ret = 0; 1598 1599 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1600 isert_print_wc(wc, "rdma read"); 1601 if (wc->status != IB_WC_WR_FLUSH_ERR) 1602 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1603 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1604 return; 1605 } 1606 1607 isert_dbg("Cmd %p\n", isert_cmd); 1608 1609 iscsit_stop_dataout_timer(cmd); 1610 1611 if (isert_prot_cmd(isert_conn, se_cmd)) 1612 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr); 1613 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1614 cmd->write_data_done = 0; 1615 1616 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1617 spin_lock_bh(&cmd->istate_lock); 1618 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1619 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1620 spin_unlock_bh(&cmd->istate_lock); 1621 1622 /* 1623 * transport_generic_request_failure() will drop the extra 1624 * se_cmd->cmd_kref reference after T10-PI error, and handle 1625 * any non-zero ->queue_status() callback error retries. 1626 */ 1627 if (ret) 1628 transport_generic_request_failure(se_cmd, se_cmd->pi_err); 1629 else 1630 target_execute_cmd(se_cmd); 1631 } 1632 1633 static void 1634 isert_do_control_comp(struct work_struct *work) 1635 { 1636 struct isert_cmd *isert_cmd = container_of(work, 1637 struct isert_cmd, comp_work); 1638 struct isert_conn *isert_conn = isert_cmd->conn; 1639 struct ib_device *ib_dev = isert_conn->cm_id->device; 1640 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1641 1642 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1643 1644 switch (cmd->i_state) { 1645 case ISTATE_SEND_TASKMGTRSP: 1646 iscsit_tmr_post_handler(cmd, cmd->conn); 1647 fallthrough; 1648 case ISTATE_SEND_REJECT: 1649 case ISTATE_SEND_TEXTRSP: 1650 cmd->i_state = ISTATE_SENT_STATUS; 1651 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1652 ib_dev, false); 1653 break; 1654 case ISTATE_SEND_LOGOUTRSP: 1655 iscsit_logout_post_handler(cmd, cmd->conn); 1656 break; 1657 default: 1658 isert_err("Unknown i_state %d\n", cmd->i_state); 1659 dump_stack(); 1660 break; 1661 } 1662 } 1663 1664 static void 1665 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1666 { 1667 struct isert_conn *isert_conn = wc->qp->qp_context; 1668 struct ib_device *ib_dev = isert_conn->cm_id->device; 1669 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1670 1671 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1672 isert_print_wc(wc, "login send"); 1673 if (wc->status != IB_WC_WR_FLUSH_ERR) 1674 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1675 } 1676 1677 isert_unmap_tx_desc(tx_desc, ib_dev); 1678 } 1679 1680 static void 1681 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 1682 { 1683 struct isert_conn *isert_conn = wc->qp->qp_context; 1684 struct ib_device *ib_dev = isert_conn->cm_id->device; 1685 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1686 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 1687 1688 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1689 isert_print_wc(wc, "send"); 1690 if (wc->status != IB_WC_WR_FLUSH_ERR) 1691 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1692 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 1693 return; 1694 } 1695 1696 isert_dbg("Cmd %p\n", isert_cmd); 1697 1698 switch (isert_cmd->iscsi_cmd->i_state) { 1699 case ISTATE_SEND_TASKMGTRSP: 1700 case ISTATE_SEND_LOGOUTRSP: 1701 case ISTATE_SEND_REJECT: 1702 case ISTATE_SEND_TEXTRSP: 1703 isert_unmap_tx_desc(tx_desc, ib_dev); 1704 1705 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1706 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1707 return; 1708 default: 1709 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 1710 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1711 break; 1712 } 1713 } 1714 1715 static int 1716 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1717 { 1718 int ret; 1719 1720 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 1721 if (ret) 1722 return ret; 1723 1724 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL); 1725 if (ret) { 1726 isert_err("ib_post_send failed with %d\n", ret); 1727 return ret; 1728 } 1729 return ret; 1730 } 1731 1732 static int 1733 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1734 { 1735 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1736 struct isert_conn *isert_conn = conn->context; 1737 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1738 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1739 &isert_cmd->tx_desc.iscsi_header; 1740 1741 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1742 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1743 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1744 /* 1745 * Attach SENSE DATA payload to iSCSI Response PDU 1746 */ 1747 if (cmd->se_cmd.sense_buffer && 1748 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1749 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1750 struct isert_device *device = isert_conn->device; 1751 struct ib_device *ib_dev = device->ib_device; 1752 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1753 u32 padding, pdu_len; 1754 1755 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1756 cmd->sense_buffer); 1757 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1758 1759 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1760 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1761 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 1762 1763 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1764 (void *)cmd->sense_buffer, pdu_len, 1765 DMA_TO_DEVICE); 1766 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1767 return -ENOMEM; 1768 1769 isert_cmd->pdu_buf_len = pdu_len; 1770 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1771 tx_dsg->length = pdu_len; 1772 tx_dsg->lkey = device->pd->local_dma_lkey; 1773 isert_cmd->tx_desc.num_sge = 2; 1774 } 1775 1776 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1777 1778 isert_dbg("Posting SCSI Response\n"); 1779 1780 return isert_post_response(isert_conn, isert_cmd); 1781 } 1782 1783 static void 1784 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1785 { 1786 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1787 struct isert_conn *isert_conn = conn->context; 1788 1789 spin_lock_bh(&conn->cmd_lock); 1790 if (!list_empty(&cmd->i_conn_node)) 1791 list_del_init(&cmd->i_conn_node); 1792 spin_unlock_bh(&conn->cmd_lock); 1793 1794 if (cmd->data_direction == DMA_TO_DEVICE) 1795 iscsit_stop_dataout_timer(cmd); 1796 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1797 } 1798 1799 static enum target_prot_op 1800 isert_get_sup_prot_ops(struct iscsi_conn *conn) 1801 { 1802 struct isert_conn *isert_conn = conn->context; 1803 struct isert_device *device = isert_conn->device; 1804 1805 if (conn->tpg->tpg_attrib.t10_pi) { 1806 if (device->pi_capable) { 1807 isert_info("conn %p PI offload enabled\n", isert_conn); 1808 isert_conn->pi_support = true; 1809 return TARGET_PROT_ALL; 1810 } 1811 } 1812 1813 isert_info("conn %p PI offload disabled\n", isert_conn); 1814 isert_conn->pi_support = false; 1815 1816 return TARGET_PROT_NORMAL; 1817 } 1818 1819 static int 1820 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1821 bool nopout_response) 1822 { 1823 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1824 struct isert_conn *isert_conn = conn->context; 1825 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1826 1827 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1828 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1829 &isert_cmd->tx_desc.iscsi_header, 1830 nopout_response); 1831 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1832 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1833 1834 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 1835 1836 return isert_post_response(isert_conn, isert_cmd); 1837 } 1838 1839 static int 1840 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1841 { 1842 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1843 struct isert_conn *isert_conn = conn->context; 1844 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1845 1846 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1847 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1848 &isert_cmd->tx_desc.iscsi_header); 1849 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1850 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1851 1852 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 1853 1854 return isert_post_response(isert_conn, isert_cmd); 1855 } 1856 1857 static int 1858 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1859 { 1860 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1861 struct isert_conn *isert_conn = conn->context; 1862 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1863 1864 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1865 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1866 &isert_cmd->tx_desc.iscsi_header); 1867 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1868 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1869 1870 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 1871 1872 return isert_post_response(isert_conn, isert_cmd); 1873 } 1874 1875 static int 1876 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1877 { 1878 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1879 struct isert_conn *isert_conn = conn->context; 1880 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1881 struct isert_device *device = isert_conn->device; 1882 struct ib_device *ib_dev = device->ib_device; 1883 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1884 struct iscsi_reject *hdr = 1885 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 1886 1887 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1888 iscsit_build_reject(cmd, conn, hdr); 1889 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1890 1891 hton24(hdr->dlength, ISCSI_HDR_LEN); 1892 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1893 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 1894 DMA_TO_DEVICE); 1895 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1896 return -ENOMEM; 1897 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 1898 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1899 tx_dsg->length = ISCSI_HDR_LEN; 1900 tx_dsg->lkey = device->pd->local_dma_lkey; 1901 isert_cmd->tx_desc.num_sge = 2; 1902 1903 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1904 1905 isert_dbg("conn %p Posting Reject\n", isert_conn); 1906 1907 return isert_post_response(isert_conn, isert_cmd); 1908 } 1909 1910 static int 1911 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1912 { 1913 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1914 struct isert_conn *isert_conn = conn->context; 1915 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1916 struct iscsi_text_rsp *hdr = 1917 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1918 u32 txt_rsp_len; 1919 int rc; 1920 1921 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1922 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 1923 if (rc < 0) 1924 return rc; 1925 1926 txt_rsp_len = rc; 1927 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1928 1929 if (txt_rsp_len) { 1930 struct isert_device *device = isert_conn->device; 1931 struct ib_device *ib_dev = device->ib_device; 1932 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1933 void *txt_rsp_buf = cmd->buf_ptr; 1934 1935 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1936 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 1937 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1938 return -ENOMEM; 1939 1940 isert_cmd->pdu_buf_len = txt_rsp_len; 1941 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1942 tx_dsg->length = txt_rsp_len; 1943 tx_dsg->lkey = device->pd->local_dma_lkey; 1944 isert_cmd->tx_desc.num_sge = 2; 1945 } 1946 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1947 1948 isert_dbg("conn %p Text Response\n", isert_conn); 1949 1950 return isert_post_response(isert_conn, isert_cmd); 1951 } 1952 1953 static inline void 1954 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain) 1955 { 1956 domain->sig_type = IB_SIG_TYPE_T10_DIF; 1957 domain->sig.dif.bg_type = IB_T10DIF_CRC; 1958 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 1959 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 1960 /* 1961 * At the moment we hard code those, but if in the future 1962 * the target core would like to use it, we will take it 1963 * from se_cmd. 1964 */ 1965 domain->sig.dif.apptag_check_mask = 0xffff; 1966 domain->sig.dif.app_escape = true; 1967 domain->sig.dif.ref_escape = true; 1968 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 1969 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 1970 domain->sig.dif.ref_remap = true; 1971 }; 1972 1973 static int 1974 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 1975 { 1976 memset(sig_attrs, 0, sizeof(*sig_attrs)); 1977 1978 switch (se_cmd->prot_op) { 1979 case TARGET_PROT_DIN_INSERT: 1980 case TARGET_PROT_DOUT_STRIP: 1981 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 1982 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 1983 break; 1984 case TARGET_PROT_DOUT_INSERT: 1985 case TARGET_PROT_DIN_STRIP: 1986 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 1987 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 1988 break; 1989 case TARGET_PROT_DIN_PASS: 1990 case TARGET_PROT_DOUT_PASS: 1991 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 1992 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 1993 break; 1994 default: 1995 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 1996 return -EINVAL; 1997 } 1998 1999 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD) 2000 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; 2001 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG) 2002 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; 2003 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG) 2004 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; 2005 2006 return 0; 2007 } 2008 2009 static int 2010 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, 2011 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 2012 { 2013 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 2014 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 2015 u8 port_num = conn->cm_id->port_num; 2016 u64 addr; 2017 u32 rkey, offset; 2018 int ret; 2019 2020 if (cmd->ctx_init_done) 2021 goto rdma_ctx_post; 2022 2023 if (dir == DMA_FROM_DEVICE) { 2024 addr = cmd->write_va; 2025 rkey = cmd->write_stag; 2026 offset = cmd->iscsi_cmd->write_data_done; 2027 } else { 2028 addr = cmd->read_va; 2029 rkey = cmd->read_stag; 2030 offset = 0; 2031 } 2032 2033 if (isert_prot_cmd(conn, se_cmd)) { 2034 struct ib_sig_attrs sig_attrs; 2035 2036 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2037 if (ret) 2038 return ret; 2039 2040 WARN_ON_ONCE(offset); 2041 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, 2042 se_cmd->t_data_sg, se_cmd->t_data_nents, 2043 se_cmd->t_prot_sg, se_cmd->t_prot_nents, 2044 &sig_attrs, addr, rkey, dir); 2045 } else { 2046 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, 2047 se_cmd->t_data_sg, se_cmd->t_data_nents, 2048 offset, addr, rkey, dir); 2049 } 2050 2051 if (ret < 0) { 2052 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); 2053 return ret; 2054 } 2055 2056 cmd->ctx_init_done = true; 2057 2058 rdma_ctx_post: 2059 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); 2060 if (ret < 0) 2061 isert_err("Cmd: %p failed to post RDMA res\n", cmd); 2062 return ret; 2063 } 2064 2065 static int 2066 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2067 { 2068 struct se_cmd *se_cmd = &cmd->se_cmd; 2069 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2070 struct isert_conn *isert_conn = conn->context; 2071 struct ib_cqe *cqe = NULL; 2072 struct ib_send_wr *chain_wr = NULL; 2073 int rc; 2074 2075 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2076 isert_cmd, se_cmd->data_length); 2077 2078 if (isert_prot_cmd(isert_conn, se_cmd)) { 2079 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2080 cqe = &isert_cmd->tx_desc.tx_cqe; 2081 } else { 2082 /* 2083 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2084 */ 2085 isert_create_send_desc(isert_conn, isert_cmd, 2086 &isert_cmd->tx_desc); 2087 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2088 &isert_cmd->tx_desc.iscsi_header); 2089 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2090 isert_init_send_wr(isert_conn, isert_cmd, 2091 &isert_cmd->tx_desc.send_wr); 2092 2093 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2094 if (rc) 2095 return rc; 2096 2097 chain_wr = &isert_cmd->tx_desc.send_wr; 2098 } 2099 2100 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2101 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", 2102 isert_cmd, rc); 2103 return rc; 2104 } 2105 2106 static int 2107 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2108 { 2109 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2110 int ret; 2111 2112 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2113 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2114 2115 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2116 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2117 &isert_cmd->tx_desc.tx_cqe, NULL); 2118 2119 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", 2120 isert_cmd, ret); 2121 return ret; 2122 } 2123 2124 static int 2125 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2126 { 2127 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2128 int ret = 0; 2129 2130 switch (state) { 2131 case ISTATE_REMOVE: 2132 spin_lock_bh(&conn->cmd_lock); 2133 list_del_init(&cmd->i_conn_node); 2134 spin_unlock_bh(&conn->cmd_lock); 2135 isert_put_cmd(isert_cmd, true); 2136 break; 2137 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2138 ret = isert_put_nopin(cmd, conn, false); 2139 break; 2140 default: 2141 isert_err("Unknown immediate state: 0x%02x\n", state); 2142 ret = -EINVAL; 2143 break; 2144 } 2145 2146 return ret; 2147 } 2148 2149 static int 2150 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2151 { 2152 struct isert_conn *isert_conn = conn->context; 2153 int ret; 2154 2155 switch (state) { 2156 case ISTATE_SEND_LOGOUTRSP: 2157 ret = isert_put_logout_rsp(cmd, conn); 2158 if (!ret) 2159 isert_conn->logout_posted = true; 2160 break; 2161 case ISTATE_SEND_NOPIN: 2162 ret = isert_put_nopin(cmd, conn, true); 2163 break; 2164 case ISTATE_SEND_TASKMGTRSP: 2165 ret = isert_put_tm_rsp(cmd, conn); 2166 break; 2167 case ISTATE_SEND_REJECT: 2168 ret = isert_put_reject(cmd, conn); 2169 break; 2170 case ISTATE_SEND_TEXTRSP: 2171 ret = isert_put_text_rsp(cmd, conn); 2172 break; 2173 case ISTATE_SEND_STATUS: 2174 /* 2175 * Special case for sending non GOOD SCSI status from TX thread 2176 * context during pre se_cmd excecution failure. 2177 */ 2178 ret = isert_put_response(conn, cmd); 2179 break; 2180 default: 2181 isert_err("Unknown response state: 0x%02x\n", state); 2182 ret = -EINVAL; 2183 break; 2184 } 2185 2186 return ret; 2187 } 2188 2189 struct rdma_cm_id * 2190 isert_setup_id(struct isert_np *isert_np) 2191 { 2192 struct iscsi_np *np = isert_np->np; 2193 struct rdma_cm_id *id; 2194 struct sockaddr *sa; 2195 int ret; 2196 2197 sa = (struct sockaddr *)&np->np_sockaddr; 2198 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2199 2200 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2201 RDMA_PS_TCP, IB_QPT_RC); 2202 if (IS_ERR(id)) { 2203 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2204 ret = PTR_ERR(id); 2205 goto out; 2206 } 2207 isert_dbg("id %p context %p\n", id, id->context); 2208 2209 ret = rdma_bind_addr(id, sa); 2210 if (ret) { 2211 isert_err("rdma_bind_addr() failed: %d\n", ret); 2212 goto out_id; 2213 } 2214 2215 ret = rdma_listen(id, 0); 2216 if (ret) { 2217 isert_err("rdma_listen() failed: %d\n", ret); 2218 goto out_id; 2219 } 2220 2221 return id; 2222 out_id: 2223 rdma_destroy_id(id); 2224 out: 2225 return ERR_PTR(ret); 2226 } 2227 2228 static int 2229 isert_setup_np(struct iscsi_np *np, 2230 struct sockaddr_storage *ksockaddr) 2231 { 2232 struct isert_np *isert_np; 2233 struct rdma_cm_id *isert_lid; 2234 int ret; 2235 2236 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2237 if (!isert_np) 2238 return -ENOMEM; 2239 2240 sema_init(&isert_np->sem, 0); 2241 mutex_init(&isert_np->mutex); 2242 INIT_LIST_HEAD(&isert_np->accepted); 2243 INIT_LIST_HEAD(&isert_np->pending); 2244 isert_np->np = np; 2245 2246 /* 2247 * Setup the np->np_sockaddr from the passed sockaddr setup 2248 * in iscsi_target_configfs.c code.. 2249 */ 2250 memcpy(&np->np_sockaddr, ksockaddr, 2251 sizeof(struct sockaddr_storage)); 2252 2253 isert_lid = isert_setup_id(isert_np); 2254 if (IS_ERR(isert_lid)) { 2255 ret = PTR_ERR(isert_lid); 2256 goto out; 2257 } 2258 2259 isert_np->cm_id = isert_lid; 2260 np->np_context = isert_np; 2261 2262 return 0; 2263 2264 out: 2265 kfree(isert_np); 2266 2267 return ret; 2268 } 2269 2270 static int 2271 isert_rdma_accept(struct isert_conn *isert_conn) 2272 { 2273 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2274 struct rdma_conn_param cp; 2275 int ret; 2276 struct iser_cm_hdr rsp_hdr; 2277 2278 memset(&cp, 0, sizeof(struct rdma_conn_param)); 2279 cp.initiator_depth = isert_conn->initiator_depth; 2280 cp.retry_count = 7; 2281 cp.rnr_retry_count = 7; 2282 2283 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 2284 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 2285 if (!isert_conn->snd_w_inv) 2286 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 2287 cp.private_data = (void *)&rsp_hdr; 2288 cp.private_data_len = sizeof(rsp_hdr); 2289 2290 ret = rdma_accept(cm_id, &cp); 2291 if (ret) { 2292 isert_err("rdma_accept() failed with: %d\n", ret); 2293 return ret; 2294 } 2295 2296 return 0; 2297 } 2298 2299 static int 2300 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2301 { 2302 struct isert_conn *isert_conn = conn->context; 2303 int ret; 2304 2305 isert_info("before login_req comp conn: %p\n", isert_conn); 2306 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 2307 if (ret) { 2308 isert_err("isert_conn %p interrupted before got login req\n", 2309 isert_conn); 2310 return ret; 2311 } 2312 reinit_completion(&isert_conn->login_req_comp); 2313 2314 /* 2315 * For login requests after the first PDU, isert_rx_login_req() will 2316 * kick schedule_delayed_work(&conn->login_work) as the packet is 2317 * received, which turns this callback from iscsi_target_do_login_rx() 2318 * into a NOP. 2319 */ 2320 if (!login->first_request) 2321 return 0; 2322 2323 isert_rx_login_req(isert_conn); 2324 2325 isert_info("before login_comp conn: %p\n", conn); 2326 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 2327 if (ret) 2328 return ret; 2329 2330 isert_info("processing login->req: %p\n", login->req); 2331 2332 return 0; 2333 } 2334 2335 static void 2336 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2337 struct isert_conn *isert_conn) 2338 { 2339 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2340 struct rdma_route *cm_route = &cm_id->route; 2341 2342 conn->login_family = np->np_sockaddr.ss_family; 2343 2344 conn->login_sockaddr = cm_route->addr.dst_addr; 2345 conn->local_sockaddr = cm_route->addr.src_addr; 2346 } 2347 2348 static int 2349 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2350 { 2351 struct isert_np *isert_np = np->np_context; 2352 struct isert_conn *isert_conn; 2353 int ret; 2354 2355 accept_wait: 2356 ret = down_interruptible(&isert_np->sem); 2357 if (ret) 2358 return -ENODEV; 2359 2360 spin_lock_bh(&np->np_thread_lock); 2361 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 2362 spin_unlock_bh(&np->np_thread_lock); 2363 isert_dbg("np_thread_state %d\n", 2364 np->np_thread_state); 2365 /** 2366 * No point in stalling here when np_thread 2367 * is in state RESET/SHUTDOWN/EXIT - bail 2368 **/ 2369 return -ENODEV; 2370 } 2371 spin_unlock_bh(&np->np_thread_lock); 2372 2373 mutex_lock(&isert_np->mutex); 2374 if (list_empty(&isert_np->pending)) { 2375 mutex_unlock(&isert_np->mutex); 2376 goto accept_wait; 2377 } 2378 isert_conn = list_first_entry(&isert_np->pending, 2379 struct isert_conn, node); 2380 list_del_init(&isert_conn->node); 2381 mutex_unlock(&isert_np->mutex); 2382 2383 conn->context = isert_conn; 2384 isert_conn->conn = conn; 2385 isert_conn->state = ISER_CONN_BOUND; 2386 2387 isert_set_conn_info(np, conn, isert_conn); 2388 2389 isert_dbg("Processing isert_conn: %p\n", isert_conn); 2390 2391 return 0; 2392 } 2393 2394 static void 2395 isert_free_np(struct iscsi_np *np) 2396 { 2397 struct isert_np *isert_np = np->np_context; 2398 struct isert_conn *isert_conn, *n; 2399 2400 if (isert_np->cm_id) 2401 rdma_destroy_id(isert_np->cm_id); 2402 2403 /* 2404 * FIXME: At this point we don't have a good way to insure 2405 * that at this point we don't have hanging connections that 2406 * completed RDMA establishment but didn't start iscsi login 2407 * process. So work-around this by cleaning up what ever piled 2408 * up in accepted and pending lists. 2409 */ 2410 mutex_lock(&isert_np->mutex); 2411 if (!list_empty(&isert_np->pending)) { 2412 isert_info("Still have isert pending connections\n"); 2413 list_for_each_entry_safe(isert_conn, n, 2414 &isert_np->pending, 2415 node) { 2416 isert_info("cleaning isert_conn %p state (%d)\n", 2417 isert_conn, isert_conn->state); 2418 isert_connect_release(isert_conn); 2419 } 2420 } 2421 2422 if (!list_empty(&isert_np->accepted)) { 2423 isert_info("Still have isert accepted connections\n"); 2424 list_for_each_entry_safe(isert_conn, n, 2425 &isert_np->accepted, 2426 node) { 2427 isert_info("cleaning isert_conn %p state (%d)\n", 2428 isert_conn, isert_conn->state); 2429 isert_connect_release(isert_conn); 2430 } 2431 } 2432 mutex_unlock(&isert_np->mutex); 2433 2434 np->np_context = NULL; 2435 kfree(isert_np); 2436 } 2437 2438 static void isert_release_work(struct work_struct *work) 2439 { 2440 struct isert_conn *isert_conn = container_of(work, 2441 struct isert_conn, 2442 release_work); 2443 2444 isert_info("Starting release conn %p\n", isert_conn); 2445 2446 mutex_lock(&isert_conn->mutex); 2447 isert_conn->state = ISER_CONN_DOWN; 2448 mutex_unlock(&isert_conn->mutex); 2449 2450 isert_info("Destroying conn %p\n", isert_conn); 2451 isert_put_conn(isert_conn); 2452 } 2453 2454 static void 2455 isert_wait4logout(struct isert_conn *isert_conn) 2456 { 2457 struct iscsi_conn *conn = isert_conn->conn; 2458 2459 isert_info("conn %p\n", isert_conn); 2460 2461 if (isert_conn->logout_posted) { 2462 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 2463 wait_for_completion_timeout(&conn->conn_logout_comp, 2464 SECONDS_FOR_LOGOUT_COMP * HZ); 2465 } 2466 } 2467 2468 static void 2469 isert_wait4cmds(struct iscsi_conn *conn) 2470 { 2471 isert_info("iscsi_conn %p\n", conn); 2472 2473 if (conn->sess) { 2474 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2475 target_wait_for_sess_cmds(conn->sess->se_sess); 2476 } 2477 } 2478 2479 /** 2480 * isert_put_unsol_pending_cmds() - Drop commands waiting for 2481 * unsolicitate dataout 2482 * @conn: iscsi connection 2483 * 2484 * We might still have commands that are waiting for unsolicited 2485 * dataouts messages. We must put the extra reference on those 2486 * before blocking on the target_wait_for_session_cmds 2487 */ 2488 static void 2489 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 2490 { 2491 struct iscsi_cmd *cmd, *tmp; 2492 static LIST_HEAD(drop_cmd_list); 2493 2494 spin_lock_bh(&conn->cmd_lock); 2495 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 2496 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 2497 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 2498 (cmd->write_data_done < cmd->se_cmd.data_length)) 2499 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 2500 } 2501 spin_unlock_bh(&conn->cmd_lock); 2502 2503 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 2504 list_del_init(&cmd->i_conn_node); 2505 if (cmd->i_state != ISTATE_REMOVE) { 2506 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2507 2508 isert_info("conn %p dropping cmd %p\n", conn, cmd); 2509 isert_put_cmd(isert_cmd, true); 2510 } 2511 } 2512 } 2513 2514 static void isert_wait_conn(struct iscsi_conn *conn) 2515 { 2516 struct isert_conn *isert_conn = conn->context; 2517 2518 isert_info("Starting conn %p\n", isert_conn); 2519 2520 mutex_lock(&isert_conn->mutex); 2521 isert_conn_terminate(isert_conn); 2522 mutex_unlock(&isert_conn->mutex); 2523 2524 ib_drain_qp(isert_conn->qp); 2525 isert_put_unsol_pending_cmds(conn); 2526 isert_wait4cmds(conn); 2527 isert_wait4logout(isert_conn); 2528 2529 queue_work(isert_release_wq, &isert_conn->release_work); 2530 } 2531 2532 static void isert_free_conn(struct iscsi_conn *conn) 2533 { 2534 struct isert_conn *isert_conn = conn->context; 2535 2536 ib_drain_qp(isert_conn->qp); 2537 isert_put_conn(isert_conn); 2538 } 2539 2540 static void isert_get_rx_pdu(struct iscsi_conn *conn) 2541 { 2542 struct completion comp; 2543 2544 init_completion(&comp); 2545 2546 wait_for_completion_interruptible(&comp); 2547 } 2548 2549 static struct iscsit_transport iser_target_transport = { 2550 .name = "IB/iSER", 2551 .transport_type = ISCSI_INFINIBAND, 2552 .rdma_shutdown = true, 2553 .priv_size = sizeof(struct isert_cmd), 2554 .owner = THIS_MODULE, 2555 .iscsit_setup_np = isert_setup_np, 2556 .iscsit_accept_np = isert_accept_np, 2557 .iscsit_free_np = isert_free_np, 2558 .iscsit_wait_conn = isert_wait_conn, 2559 .iscsit_free_conn = isert_free_conn, 2560 .iscsit_get_login_rx = isert_get_login_rx, 2561 .iscsit_put_login_tx = isert_put_login_tx, 2562 .iscsit_immediate_queue = isert_immediate_queue, 2563 .iscsit_response_queue = isert_response_queue, 2564 .iscsit_get_dataout = isert_get_dataout, 2565 .iscsit_queue_data_in = isert_put_datain, 2566 .iscsit_queue_status = isert_put_response, 2567 .iscsit_aborted_task = isert_aborted_task, 2568 .iscsit_get_rx_pdu = isert_get_rx_pdu, 2569 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2570 }; 2571 2572 static int __init isert_init(void) 2573 { 2574 int ret; 2575 2576 isert_comp_wq = alloc_workqueue("isert_comp_wq", 2577 WQ_UNBOUND | WQ_HIGHPRI, 0); 2578 if (!isert_comp_wq) { 2579 isert_err("Unable to allocate isert_comp_wq\n"); 2580 return -ENOMEM; 2581 } 2582 2583 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 2584 WQ_UNBOUND_MAX_ACTIVE); 2585 if (!isert_release_wq) { 2586 isert_err("Unable to allocate isert_release_wq\n"); 2587 ret = -ENOMEM; 2588 goto destroy_comp_wq; 2589 } 2590 2591 iscsit_register_transport(&iser_target_transport); 2592 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2593 2594 return 0; 2595 2596 destroy_comp_wq: 2597 destroy_workqueue(isert_comp_wq); 2598 2599 return ret; 2600 } 2601 2602 static void __exit isert_exit(void) 2603 { 2604 flush_scheduled_work(); 2605 destroy_workqueue(isert_release_wq); 2606 destroy_workqueue(isert_comp_wq); 2607 iscsit_unregister_transport(&iser_target_transport); 2608 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 2609 } 2610 2611 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2612 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2613 MODULE_LICENSE("GPL"); 2614 2615 module_init(isert_init); 2616 module_exit(isert_exit); 2617