1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * This file contains iSCSI extentions for RDMA (iSER) Verbs 4 * 5 * (c) Copyright 2013 Datera, Inc. 6 * 7 * Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * 9 ****************************************************************************/ 10 11 #include <linux/string.h> 12 #include <linux/module.h> 13 #include <linux/scatterlist.h> 14 #include <linux/socket.h> 15 #include <linux/in.h> 16 #include <linux/in6.h> 17 #include <rdma/ib_verbs.h> 18 #include <rdma/ib_cm.h> 19 #include <rdma/rdma_cm.h> 20 #include <target/target_core_base.h> 21 #include <target/target_core_fabric.h> 22 #include <target/iscsi/iscsi_transport.h> 23 #include <linux/semaphore.h> 24 25 #include "ib_isert.h" 26 27 static int isert_debug_level; 28 module_param_named(debug_level, isert_debug_level, int, 0644); 29 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 30 31 static DEFINE_MUTEX(device_list_mutex); 32 static LIST_HEAD(device_list); 33 static struct workqueue_struct *isert_comp_wq; 34 static struct workqueue_struct *isert_release_wq; 35 36 static int 37 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 38 static int 39 isert_login_post_recv(struct isert_conn *isert_conn); 40 static int 41 isert_rdma_accept(struct isert_conn *isert_conn); 42 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 43 44 static void isert_release_work(struct work_struct *work); 45 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 46 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 47 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 48 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 49 50 static inline bool 51 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 52 { 53 return (conn->pi_support && 54 cmd->prot_op != TARGET_PROT_NORMAL); 55 } 56 57 58 static void 59 isert_qp_event_callback(struct ib_event *e, void *context) 60 { 61 struct isert_conn *isert_conn = context; 62 63 isert_err("%s (%d): conn %p\n", 64 ib_event_msg(e->event), e->event, isert_conn); 65 66 switch (e->event) { 67 case IB_EVENT_COMM_EST: 68 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 69 break; 70 case IB_EVENT_QP_LAST_WQE_REACHED: 71 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 72 break; 73 default: 74 break; 75 } 76 } 77 78 static struct ib_qp * 79 isert_create_qp(struct isert_conn *isert_conn, 80 struct rdma_cm_id *cma_id) 81 { 82 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; 83 struct isert_device *device = isert_conn->device; 84 struct ib_device *ib_dev = device->ib_device; 85 struct ib_qp_init_attr attr; 86 int ret, factor; 87 88 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); 89 if (IS_ERR(isert_conn->cq)) { 90 isert_err("Unable to allocate cq\n"); 91 ret = PTR_ERR(isert_conn->cq); 92 return ERR_PTR(ret); 93 } 94 isert_conn->cq_size = cq_size; 95 96 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 97 attr.event_handler = isert_qp_event_callback; 98 attr.qp_context = isert_conn; 99 attr.send_cq = isert_conn->cq; 100 attr.recv_cq = isert_conn->cq; 101 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 102 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 103 factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num, 104 ISCSI_ISER_MAX_SG_TABLESIZE); 105 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor; 106 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge; 107 attr.cap.max_recv_sge = 1; 108 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 109 attr.qp_type = IB_QPT_RC; 110 if (device->pi_capable) 111 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 112 113 ret = rdma_create_qp(cma_id, device->pd, &attr); 114 if (ret) { 115 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 116 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 117 118 return ERR_PTR(ret); 119 } 120 121 return cma_id->qp; 122 } 123 124 static int 125 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 126 { 127 struct isert_device *device = isert_conn->device; 128 struct ib_device *ib_dev = device->ib_device; 129 struct iser_rx_desc *rx_desc; 130 struct ib_sge *rx_sg; 131 u64 dma_addr; 132 int i, j; 133 134 isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS, 135 sizeof(struct iser_rx_desc), 136 GFP_KERNEL); 137 if (!isert_conn->rx_descs) 138 return -ENOMEM; 139 140 rx_desc = isert_conn->rx_descs; 141 142 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 143 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, 144 ISER_RX_SIZE, DMA_FROM_DEVICE); 145 if (ib_dma_mapping_error(ib_dev, dma_addr)) 146 goto dma_map_fail; 147 148 rx_desc->dma_addr = dma_addr; 149 150 rx_sg = &rx_desc->rx_sg; 151 rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); 152 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 153 rx_sg->lkey = device->pd->local_dma_lkey; 154 rx_desc->rx_cqe.done = isert_recv_done; 155 } 156 157 return 0; 158 159 dma_map_fail: 160 rx_desc = isert_conn->rx_descs; 161 for (j = 0; j < i; j++, rx_desc++) { 162 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 163 ISER_RX_SIZE, DMA_FROM_DEVICE); 164 } 165 kfree(isert_conn->rx_descs); 166 isert_conn->rx_descs = NULL; 167 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 168 return -ENOMEM; 169 } 170 171 static void 172 isert_free_rx_descriptors(struct isert_conn *isert_conn) 173 { 174 struct ib_device *ib_dev = isert_conn->device->ib_device; 175 struct iser_rx_desc *rx_desc; 176 int i; 177 178 if (!isert_conn->rx_descs) 179 return; 180 181 rx_desc = isert_conn->rx_descs; 182 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 183 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 184 ISER_RX_SIZE, DMA_FROM_DEVICE); 185 } 186 187 kfree(isert_conn->rx_descs); 188 isert_conn->rx_descs = NULL; 189 } 190 191 static int 192 isert_create_device_ib_res(struct isert_device *device) 193 { 194 struct ib_device *ib_dev = device->ib_device; 195 int ret; 196 197 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n", 198 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); 199 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 200 201 device->pd = ib_alloc_pd(ib_dev, 0); 202 if (IS_ERR(device->pd)) { 203 ret = PTR_ERR(device->pd); 204 isert_err("failed to allocate pd, device %p, ret=%d\n", 205 device, ret); 206 return ret; 207 } 208 209 /* Check signature cap */ 210 device->pi_capable = ib_dev->attrs.device_cap_flags & 211 IB_DEVICE_INTEGRITY_HANDOVER ? true : false; 212 213 return 0; 214 } 215 216 static void 217 isert_free_device_ib_res(struct isert_device *device) 218 { 219 isert_info("device %p\n", device); 220 221 ib_dealloc_pd(device->pd); 222 } 223 224 static void 225 isert_device_put(struct isert_device *device) 226 { 227 mutex_lock(&device_list_mutex); 228 device->refcount--; 229 isert_info("device %p refcount %d\n", device, device->refcount); 230 if (!device->refcount) { 231 isert_free_device_ib_res(device); 232 list_del(&device->dev_node); 233 kfree(device); 234 } 235 mutex_unlock(&device_list_mutex); 236 } 237 238 static struct isert_device * 239 isert_device_get(struct rdma_cm_id *cma_id) 240 { 241 struct isert_device *device; 242 int ret; 243 244 mutex_lock(&device_list_mutex); 245 list_for_each_entry(device, &device_list, dev_node) { 246 if (device->ib_device->node_guid == cma_id->device->node_guid) { 247 device->refcount++; 248 isert_info("Found iser device %p refcount %d\n", 249 device, device->refcount); 250 mutex_unlock(&device_list_mutex); 251 return device; 252 } 253 } 254 255 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 256 if (!device) { 257 mutex_unlock(&device_list_mutex); 258 return ERR_PTR(-ENOMEM); 259 } 260 261 INIT_LIST_HEAD(&device->dev_node); 262 263 device->ib_device = cma_id->device; 264 ret = isert_create_device_ib_res(device); 265 if (ret) { 266 kfree(device); 267 mutex_unlock(&device_list_mutex); 268 return ERR_PTR(ret); 269 } 270 271 device->refcount++; 272 list_add_tail(&device->dev_node, &device_list); 273 isert_info("Created a new iser device %p refcount %d\n", 274 device, device->refcount); 275 mutex_unlock(&device_list_mutex); 276 277 return device; 278 } 279 280 static void 281 isert_init_conn(struct isert_conn *isert_conn) 282 { 283 isert_conn->state = ISER_CONN_INIT; 284 INIT_LIST_HEAD(&isert_conn->node); 285 init_completion(&isert_conn->login_comp); 286 init_completion(&isert_conn->login_req_comp); 287 init_waitqueue_head(&isert_conn->rem_wait); 288 kref_init(&isert_conn->kref); 289 mutex_init(&isert_conn->mutex); 290 INIT_WORK(&isert_conn->release_work, isert_release_work); 291 } 292 293 static void 294 isert_free_login_buf(struct isert_conn *isert_conn) 295 { 296 struct ib_device *ib_dev = isert_conn->device->ib_device; 297 298 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 299 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 300 kfree(isert_conn->login_rsp_buf); 301 302 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, 303 ISER_RX_SIZE, DMA_FROM_DEVICE); 304 kfree(isert_conn->login_desc); 305 } 306 307 static int 308 isert_alloc_login_buf(struct isert_conn *isert_conn, 309 struct ib_device *ib_dev) 310 { 311 int ret; 312 313 isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), 314 GFP_KERNEL); 315 if (!isert_conn->login_desc) 316 return -ENOMEM; 317 318 isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, 319 isert_conn->login_desc->buf, 320 ISER_RX_SIZE, DMA_FROM_DEVICE); 321 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); 322 if (ret) { 323 isert_err("login_desc dma mapping error: %d\n", ret); 324 isert_conn->login_desc->dma_addr = 0; 325 goto out_free_login_desc; 326 } 327 328 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 329 if (!isert_conn->login_rsp_buf) { 330 ret = -ENOMEM; 331 goto out_unmap_login_desc; 332 } 333 334 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 335 isert_conn->login_rsp_buf, 336 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 337 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 338 if (ret) { 339 isert_err("login_rsp_dma mapping error: %d\n", ret); 340 isert_conn->login_rsp_dma = 0; 341 goto out_free_login_rsp_buf; 342 } 343 344 return 0; 345 346 out_free_login_rsp_buf: 347 kfree(isert_conn->login_rsp_buf); 348 out_unmap_login_desc: 349 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, 350 ISER_RX_SIZE, DMA_FROM_DEVICE); 351 out_free_login_desc: 352 kfree(isert_conn->login_desc); 353 return ret; 354 } 355 356 static void 357 isert_set_nego_params(struct isert_conn *isert_conn, 358 struct rdma_conn_param *param) 359 { 360 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 361 362 /* Set max inflight RDMA READ requests */ 363 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 364 attr->max_qp_init_rd_atom); 365 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 366 367 if (param->private_data) { 368 u8 flags = *(u8 *)param->private_data; 369 370 /* 371 * use remote invalidation if the both initiator 372 * and the HCA support it 373 */ 374 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 375 (attr->device_cap_flags & 376 IB_DEVICE_MEM_MGT_EXTENSIONS); 377 if (isert_conn->snd_w_inv) 378 isert_info("Using remote invalidation\n"); 379 } 380 } 381 382 static void 383 isert_destroy_qp(struct isert_conn *isert_conn) 384 { 385 ib_destroy_qp(isert_conn->qp); 386 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 387 } 388 389 static int 390 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 391 { 392 struct isert_np *isert_np = cma_id->context; 393 struct iscsi_np *np = isert_np->np; 394 struct isert_conn *isert_conn; 395 struct isert_device *device; 396 int ret = 0; 397 398 spin_lock_bh(&np->np_thread_lock); 399 if (!np->enabled) { 400 spin_unlock_bh(&np->np_thread_lock); 401 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 402 return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 403 } 404 spin_unlock_bh(&np->np_thread_lock); 405 406 isert_dbg("cma_id: %p, portal: %p\n", 407 cma_id, cma_id->context); 408 409 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 410 if (!isert_conn) 411 return -ENOMEM; 412 413 isert_init_conn(isert_conn); 414 isert_conn->cm_id = cma_id; 415 416 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 417 if (ret) 418 goto out; 419 420 device = isert_device_get(cma_id); 421 if (IS_ERR(device)) { 422 ret = PTR_ERR(device); 423 goto out_rsp_dma_map; 424 } 425 isert_conn->device = device; 426 427 isert_set_nego_params(isert_conn, &event->param.conn); 428 429 isert_conn->qp = isert_create_qp(isert_conn, cma_id); 430 if (IS_ERR(isert_conn->qp)) { 431 ret = PTR_ERR(isert_conn->qp); 432 goto out_conn_dev; 433 } 434 435 ret = isert_login_post_recv(isert_conn); 436 if (ret) 437 goto out_destroy_qp; 438 439 ret = isert_rdma_accept(isert_conn); 440 if (ret) 441 goto out_destroy_qp; 442 443 mutex_lock(&isert_np->mutex); 444 list_add_tail(&isert_conn->node, &isert_np->accepted); 445 mutex_unlock(&isert_np->mutex); 446 447 return 0; 448 449 out_destroy_qp: 450 isert_destroy_qp(isert_conn); 451 out_conn_dev: 452 isert_device_put(device); 453 out_rsp_dma_map: 454 isert_free_login_buf(isert_conn); 455 out: 456 kfree(isert_conn); 457 rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 458 return ret; 459 } 460 461 static void 462 isert_connect_release(struct isert_conn *isert_conn) 463 { 464 struct isert_device *device = isert_conn->device; 465 466 isert_dbg("conn %p\n", isert_conn); 467 468 BUG_ON(!device); 469 470 isert_free_rx_descriptors(isert_conn); 471 if (isert_conn->cm_id && 472 !isert_conn->dev_removed) 473 rdma_destroy_id(isert_conn->cm_id); 474 475 if (isert_conn->qp) 476 isert_destroy_qp(isert_conn); 477 478 if (isert_conn->login_desc) 479 isert_free_login_buf(isert_conn); 480 481 isert_device_put(device); 482 483 if (isert_conn->dev_removed) 484 wake_up_interruptible(&isert_conn->rem_wait); 485 else 486 kfree(isert_conn); 487 } 488 489 static void 490 isert_connected_handler(struct rdma_cm_id *cma_id) 491 { 492 struct isert_conn *isert_conn = cma_id->qp->qp_context; 493 struct isert_np *isert_np = cma_id->context; 494 495 isert_info("conn %p\n", isert_conn); 496 497 mutex_lock(&isert_conn->mutex); 498 isert_conn->state = ISER_CONN_UP; 499 kref_get(&isert_conn->kref); 500 mutex_unlock(&isert_conn->mutex); 501 502 mutex_lock(&isert_np->mutex); 503 list_move_tail(&isert_conn->node, &isert_np->pending); 504 mutex_unlock(&isert_np->mutex); 505 506 isert_info("np %p: Allow accept_np to continue\n", isert_np); 507 up(&isert_np->sem); 508 } 509 510 static void 511 isert_release_kref(struct kref *kref) 512 { 513 struct isert_conn *isert_conn = container_of(kref, 514 struct isert_conn, kref); 515 516 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 517 current->pid); 518 519 isert_connect_release(isert_conn); 520 } 521 522 static void 523 isert_put_conn(struct isert_conn *isert_conn) 524 { 525 kref_put(&isert_conn->kref, isert_release_kref); 526 } 527 528 static void 529 isert_handle_unbound_conn(struct isert_conn *isert_conn) 530 { 531 struct isert_np *isert_np = isert_conn->cm_id->context; 532 533 mutex_lock(&isert_np->mutex); 534 if (!list_empty(&isert_conn->node)) { 535 /* 536 * This means iscsi doesn't know this connection 537 * so schedule a cleanup ourselves 538 */ 539 list_del_init(&isert_conn->node); 540 isert_put_conn(isert_conn); 541 queue_work(isert_release_wq, &isert_conn->release_work); 542 } 543 mutex_unlock(&isert_np->mutex); 544 } 545 546 /** 547 * isert_conn_terminate() - Initiate connection termination 548 * @isert_conn: isert connection struct 549 * 550 * Notes: 551 * In case the connection state is BOUND, move state 552 * to TEMINATING and start teardown sequence (rdma_disconnect). 553 * In case the connection state is UP, complete flush as well. 554 * 555 * This routine must be called with mutex held. Thus it is 556 * safe to call multiple times. 557 */ 558 static void 559 isert_conn_terminate(struct isert_conn *isert_conn) 560 { 561 int err; 562 563 if (isert_conn->state >= ISER_CONN_TERMINATING) 564 return; 565 566 isert_info("Terminating conn %p state %d\n", 567 isert_conn, isert_conn->state); 568 isert_conn->state = ISER_CONN_TERMINATING; 569 err = rdma_disconnect(isert_conn->cm_id); 570 if (err) 571 isert_warn("Failed rdma_disconnect isert_conn %p\n", 572 isert_conn); 573 } 574 575 static int 576 isert_np_cma_handler(struct isert_np *isert_np, 577 enum rdma_cm_event_type event) 578 { 579 isert_dbg("%s (%d): isert np %p\n", 580 rdma_event_msg(event), event, isert_np); 581 582 switch (event) { 583 case RDMA_CM_EVENT_DEVICE_REMOVAL: 584 isert_np->cm_id = NULL; 585 break; 586 case RDMA_CM_EVENT_ADDR_CHANGE: 587 isert_np->cm_id = isert_setup_id(isert_np); 588 if (IS_ERR(isert_np->cm_id)) { 589 isert_err("isert np %p setup id failed: %ld\n", 590 isert_np, PTR_ERR(isert_np->cm_id)); 591 isert_np->cm_id = NULL; 592 } 593 break; 594 default: 595 isert_err("isert np %p Unexpected event %d\n", 596 isert_np, event); 597 } 598 599 return -1; 600 } 601 602 static int 603 isert_disconnected_handler(struct rdma_cm_id *cma_id, 604 enum rdma_cm_event_type event) 605 { 606 struct isert_conn *isert_conn = cma_id->qp->qp_context; 607 608 mutex_lock(&isert_conn->mutex); 609 switch (isert_conn->state) { 610 case ISER_CONN_TERMINATING: 611 break; 612 case ISER_CONN_UP: 613 isert_conn_terminate(isert_conn); 614 ib_drain_qp(isert_conn->qp); 615 isert_handle_unbound_conn(isert_conn); 616 break; 617 case ISER_CONN_BOUND: 618 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 619 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 620 break; 621 default: 622 isert_warn("conn %p terminating in state %d\n", 623 isert_conn, isert_conn->state); 624 } 625 mutex_unlock(&isert_conn->mutex); 626 627 return 0; 628 } 629 630 static int 631 isert_connect_error(struct rdma_cm_id *cma_id) 632 { 633 struct isert_conn *isert_conn = cma_id->qp->qp_context; 634 635 ib_drain_qp(isert_conn->qp); 636 list_del_init(&isert_conn->node); 637 isert_conn->cm_id = NULL; 638 isert_put_conn(isert_conn); 639 640 return -1; 641 } 642 643 static int 644 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 645 { 646 struct isert_np *isert_np = cma_id->context; 647 struct isert_conn *isert_conn; 648 int ret = 0; 649 650 isert_info("%s (%d): status %d id %p np %p\n", 651 rdma_event_msg(event->event), event->event, 652 event->status, cma_id, cma_id->context); 653 654 if (isert_np->cm_id == cma_id) 655 return isert_np_cma_handler(cma_id->context, event->event); 656 657 switch (event->event) { 658 case RDMA_CM_EVENT_CONNECT_REQUEST: 659 ret = isert_connect_request(cma_id, event); 660 if (ret) 661 isert_err("failed handle connect request %d\n", ret); 662 break; 663 case RDMA_CM_EVENT_ESTABLISHED: 664 isert_connected_handler(cma_id); 665 break; 666 case RDMA_CM_EVENT_ADDR_CHANGE: 667 case RDMA_CM_EVENT_DISCONNECTED: 668 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 669 ret = isert_disconnected_handler(cma_id, event->event); 670 break; 671 case RDMA_CM_EVENT_DEVICE_REMOVAL: 672 isert_conn = cma_id->qp->qp_context; 673 isert_conn->dev_removed = true; 674 isert_disconnected_handler(cma_id, event->event); 675 wait_event_interruptible(isert_conn->rem_wait, 676 isert_conn->state == ISER_CONN_DOWN); 677 kfree(isert_conn); 678 /* 679 * return non-zero from the callback to destroy 680 * the rdma cm id 681 */ 682 return 1; 683 case RDMA_CM_EVENT_REJECTED: 684 isert_info("Connection rejected: %s\n", 685 rdma_reject_msg(cma_id, event->status)); 686 fallthrough; 687 case RDMA_CM_EVENT_UNREACHABLE: 688 case RDMA_CM_EVENT_CONNECT_ERROR: 689 ret = isert_connect_error(cma_id); 690 break; 691 default: 692 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 693 break; 694 } 695 696 return ret; 697 } 698 699 static int 700 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 701 { 702 struct ib_recv_wr *rx_wr; 703 int i, ret; 704 struct iser_rx_desc *rx_desc; 705 706 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 707 rx_desc = &isert_conn->rx_descs[i]; 708 709 rx_wr->wr_cqe = &rx_desc->rx_cqe; 710 rx_wr->sg_list = &rx_desc->rx_sg; 711 rx_wr->num_sge = 1; 712 rx_wr->next = rx_wr + 1; 713 rx_desc->in_use = false; 714 } 715 rx_wr--; 716 rx_wr->next = NULL; /* mark end of work requests list */ 717 718 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL); 719 if (ret) 720 isert_err("ib_post_recv() failed with ret: %d\n", ret); 721 722 return ret; 723 } 724 725 static int 726 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 727 { 728 struct ib_recv_wr rx_wr; 729 int ret; 730 731 if (!rx_desc->in_use) { 732 /* 733 * if the descriptor is not in-use we already reposted it 734 * for recv, so just silently return 735 */ 736 return 0; 737 } 738 739 rx_desc->in_use = false; 740 rx_wr.wr_cqe = &rx_desc->rx_cqe; 741 rx_wr.sg_list = &rx_desc->rx_sg; 742 rx_wr.num_sge = 1; 743 rx_wr.next = NULL; 744 745 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 746 if (ret) 747 isert_err("ib_post_recv() failed with ret: %d\n", ret); 748 749 return ret; 750 } 751 752 static int 753 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 754 { 755 struct ib_device *ib_dev = isert_conn->cm_id->device; 756 struct ib_send_wr send_wr; 757 int ret; 758 759 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 760 ISER_HEADERS_LEN, DMA_TO_DEVICE); 761 762 tx_desc->tx_cqe.done = isert_login_send_done; 763 764 send_wr.next = NULL; 765 send_wr.wr_cqe = &tx_desc->tx_cqe; 766 send_wr.sg_list = tx_desc->tx_sg; 767 send_wr.num_sge = tx_desc->num_sge; 768 send_wr.opcode = IB_WR_SEND; 769 send_wr.send_flags = IB_SEND_SIGNALED; 770 771 ret = ib_post_send(isert_conn->qp, &send_wr, NULL); 772 if (ret) 773 isert_err("ib_post_send() failed, ret: %d\n", ret); 774 775 return ret; 776 } 777 778 static void 779 __isert_create_send_desc(struct isert_device *device, 780 struct iser_tx_desc *tx_desc) 781 { 782 783 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 784 tx_desc->iser_header.flags = ISCSI_CTRL; 785 786 tx_desc->num_sge = 1; 787 788 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 789 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 790 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 791 } 792 } 793 794 static void 795 isert_create_send_desc(struct isert_conn *isert_conn, 796 struct isert_cmd *isert_cmd, 797 struct iser_tx_desc *tx_desc) 798 { 799 struct isert_device *device = isert_conn->device; 800 struct ib_device *ib_dev = device->ib_device; 801 802 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 803 ISER_HEADERS_LEN, DMA_TO_DEVICE); 804 805 __isert_create_send_desc(device, tx_desc); 806 } 807 808 static int 809 isert_init_tx_hdrs(struct isert_conn *isert_conn, 810 struct iser_tx_desc *tx_desc) 811 { 812 struct isert_device *device = isert_conn->device; 813 struct ib_device *ib_dev = device->ib_device; 814 u64 dma_addr; 815 816 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 817 ISER_HEADERS_LEN, DMA_TO_DEVICE); 818 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 819 isert_err("ib_dma_mapping_error() failed\n"); 820 return -ENOMEM; 821 } 822 823 tx_desc->dma_addr = dma_addr; 824 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 825 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 826 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 827 828 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 829 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 830 tx_desc->tx_sg[0].lkey); 831 832 return 0; 833 } 834 835 static void 836 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 837 struct ib_send_wr *send_wr) 838 { 839 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 840 841 tx_desc->tx_cqe.done = isert_send_done; 842 send_wr->wr_cqe = &tx_desc->tx_cqe; 843 844 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 845 send_wr->opcode = IB_WR_SEND_WITH_INV; 846 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 847 } else { 848 send_wr->opcode = IB_WR_SEND; 849 } 850 851 send_wr->sg_list = &tx_desc->tx_sg[0]; 852 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 853 send_wr->send_flags = IB_SEND_SIGNALED; 854 } 855 856 static int 857 isert_login_post_recv(struct isert_conn *isert_conn) 858 { 859 struct ib_recv_wr rx_wr; 860 struct ib_sge sge; 861 int ret; 862 863 memset(&sge, 0, sizeof(struct ib_sge)); 864 sge.addr = isert_conn->login_desc->dma_addr + 865 isert_get_hdr_offset(isert_conn->login_desc); 866 sge.length = ISER_RX_PAYLOAD_SIZE; 867 sge.lkey = isert_conn->device->pd->local_dma_lkey; 868 869 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 870 sge.addr, sge.length, sge.lkey); 871 872 isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; 873 874 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 875 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; 876 rx_wr.sg_list = &sge; 877 rx_wr.num_sge = 1; 878 879 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 880 if (ret) 881 isert_err("ib_post_recv() failed: %d\n", ret); 882 883 return ret; 884 } 885 886 static int 887 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 888 u32 length) 889 { 890 struct isert_conn *isert_conn = conn->context; 891 struct isert_device *device = isert_conn->device; 892 struct ib_device *ib_dev = device->ib_device; 893 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 894 int ret; 895 896 __isert_create_send_desc(device, tx_desc); 897 898 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 899 sizeof(struct iscsi_hdr)); 900 901 isert_init_tx_hdrs(isert_conn, tx_desc); 902 903 if (length > 0) { 904 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 905 906 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 907 length, DMA_TO_DEVICE); 908 909 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 910 911 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 912 length, DMA_TO_DEVICE); 913 914 tx_dsg->addr = isert_conn->login_rsp_dma; 915 tx_dsg->length = length; 916 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 917 tx_desc->num_sge = 2; 918 } 919 if (!login->login_failed) { 920 if (login->login_complete) { 921 ret = isert_alloc_rx_descriptors(isert_conn); 922 if (ret) 923 return ret; 924 925 ret = isert_post_recvm(isert_conn, 926 ISERT_QP_MAX_RECV_DTOS); 927 if (ret) 928 return ret; 929 930 /* Now we are in FULL_FEATURE phase */ 931 mutex_lock(&isert_conn->mutex); 932 isert_conn->state = ISER_CONN_FULL_FEATURE; 933 mutex_unlock(&isert_conn->mutex); 934 goto post_send; 935 } 936 937 ret = isert_login_post_recv(isert_conn); 938 if (ret) 939 return ret; 940 } 941 post_send: 942 ret = isert_login_post_send(isert_conn, tx_desc); 943 if (ret) 944 return ret; 945 946 return 0; 947 } 948 949 static void 950 isert_rx_login_req(struct isert_conn *isert_conn) 951 { 952 struct iser_rx_desc *rx_desc = isert_conn->login_desc; 953 int rx_buflen = isert_conn->login_req_len; 954 struct iscsi_conn *conn = isert_conn->conn; 955 struct iscsi_login *login = conn->conn_login; 956 int size; 957 958 isert_info("conn %p\n", isert_conn); 959 960 WARN_ON_ONCE(!login); 961 962 if (login->first_request) { 963 struct iscsi_login_req *login_req = 964 (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); 965 /* 966 * Setup the initial iscsi_login values from the leading 967 * login request PDU. 968 */ 969 login->leading_connection = (!login_req->tsih) ? 1 : 0; 970 login->current_stage = 971 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 972 >> 2; 973 login->version_min = login_req->min_version; 974 login->version_max = login_req->max_version; 975 memcpy(login->isid, login_req->isid, 6); 976 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 977 login->init_task_tag = login_req->itt; 978 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 979 login->cid = be16_to_cpu(login_req->cid); 980 login->tsih = be16_to_cpu(login_req->tsih); 981 } 982 983 memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); 984 985 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 986 isert_dbg("Using login payload size: %d, rx_buflen: %d " 987 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 988 MAX_KEY_VALUE_PAIRS); 989 memcpy(login->req_buf, isert_get_data(rx_desc), size); 990 991 if (login->first_request) { 992 complete(&isert_conn->login_comp); 993 return; 994 } 995 schedule_delayed_work(&conn->login_work, 0); 996 } 997 998 static struct iscsi_cmd 999 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1000 { 1001 struct isert_conn *isert_conn = conn->context; 1002 struct isert_cmd *isert_cmd; 1003 struct iscsi_cmd *cmd; 1004 1005 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1006 if (!cmd) { 1007 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1008 return NULL; 1009 } 1010 isert_cmd = iscsit_priv_cmd(cmd); 1011 isert_cmd->conn = isert_conn; 1012 isert_cmd->iscsi_cmd = cmd; 1013 isert_cmd->rx_desc = rx_desc; 1014 1015 return cmd; 1016 } 1017 1018 static int 1019 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1020 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1021 struct iser_rx_desc *rx_desc, unsigned char *buf) 1022 { 1023 struct iscsi_conn *conn = isert_conn->conn; 1024 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1025 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1026 bool dump_payload = false; 1027 unsigned int data_len; 1028 1029 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1030 if (rc < 0) 1031 return rc; 1032 1033 imm_data = cmd->immediate_data; 1034 imm_data_len = cmd->first_burst_len; 1035 unsol_data = cmd->unsolicited_data; 1036 data_len = cmd->se_cmd.data_length; 1037 1038 if (imm_data && imm_data_len == data_len) 1039 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1040 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1041 if (rc < 0) { 1042 return 0; 1043 } else if (rc > 0) { 1044 dump_payload = true; 1045 goto sequence_cmd; 1046 } 1047 1048 if (!imm_data) 1049 return 0; 1050 1051 if (imm_data_len != data_len) { 1052 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1053 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1054 isert_get_data(rx_desc), imm_data_len); 1055 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1056 sg_nents, imm_data_len); 1057 } else { 1058 sg_init_table(&isert_cmd->sg, 1); 1059 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1060 cmd->se_cmd.t_data_nents = 1; 1061 sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), 1062 imm_data_len); 1063 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1064 imm_data_len); 1065 } 1066 1067 cmd->write_data_done += imm_data_len; 1068 1069 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1070 spin_lock_bh(&cmd->istate_lock); 1071 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1072 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1073 spin_unlock_bh(&cmd->istate_lock); 1074 } 1075 1076 sequence_cmd: 1077 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1078 1079 if (!rc && dump_payload == false && unsol_data) 1080 iscsit_set_unsolicited_dataout(cmd); 1081 else if (dump_payload && imm_data) 1082 target_put_sess_cmd(&cmd->se_cmd); 1083 1084 return 0; 1085 } 1086 1087 static int 1088 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1089 struct iser_rx_desc *rx_desc, unsigned char *buf) 1090 { 1091 struct scatterlist *sg_start; 1092 struct iscsi_conn *conn = isert_conn->conn; 1093 struct iscsi_cmd *cmd = NULL; 1094 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1095 u32 unsol_data_len = ntoh24(hdr->dlength); 1096 int rc, sg_nents, sg_off, page_off; 1097 1098 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1099 if (rc < 0) 1100 return rc; 1101 else if (!cmd) 1102 return 0; 1103 /* 1104 * FIXME: Unexpected unsolicited_data out 1105 */ 1106 if (!cmd->unsolicited_data) { 1107 isert_err("Received unexpected solicited data payload\n"); 1108 dump_stack(); 1109 return -1; 1110 } 1111 1112 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1113 "write_data_done: %u, data_length: %u\n", 1114 unsol_data_len, cmd->write_data_done, 1115 cmd->se_cmd.data_length); 1116 1117 sg_off = cmd->write_data_done / PAGE_SIZE; 1118 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1119 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1120 page_off = cmd->write_data_done % PAGE_SIZE; 1121 /* 1122 * FIXME: Non page-aligned unsolicited_data out 1123 */ 1124 if (page_off) { 1125 isert_err("unexpected non-page aligned data payload\n"); 1126 dump_stack(); 1127 return -1; 1128 } 1129 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1130 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1131 sg_nents, isert_get_data(rx_desc), unsol_data_len); 1132 1133 sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), 1134 unsol_data_len); 1135 1136 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1137 if (rc < 0) 1138 return rc; 1139 1140 /* 1141 * multiple data-outs on the same command can arrive - 1142 * so post the buffer before hand 1143 */ 1144 rc = isert_post_recv(isert_conn, rx_desc); 1145 if (rc) { 1146 isert_err("ib_post_recv failed with %d\n", rc); 1147 return rc; 1148 } 1149 return 0; 1150 } 1151 1152 static int 1153 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1154 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1155 unsigned char *buf) 1156 { 1157 struct iscsi_conn *conn = isert_conn->conn; 1158 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1159 int rc; 1160 1161 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1162 if (rc < 0) 1163 return rc; 1164 /* 1165 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1166 */ 1167 1168 return iscsit_process_nop_out(conn, cmd, hdr); 1169 } 1170 1171 static int 1172 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1173 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1174 struct iscsi_text *hdr) 1175 { 1176 struct iscsi_conn *conn = isert_conn->conn; 1177 u32 payload_length = ntoh24(hdr->dlength); 1178 int rc; 1179 unsigned char *text_in = NULL; 1180 1181 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1182 if (rc < 0) 1183 return rc; 1184 1185 if (payload_length) { 1186 text_in = kzalloc(payload_length, GFP_KERNEL); 1187 if (!text_in) 1188 return -ENOMEM; 1189 } 1190 cmd->text_in_ptr = text_in; 1191 1192 memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); 1193 1194 return iscsit_process_text_cmd(conn, cmd, hdr); 1195 } 1196 1197 static int 1198 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1199 uint32_t read_stag, uint64_t read_va, 1200 uint32_t write_stag, uint64_t write_va) 1201 { 1202 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); 1203 struct iscsi_conn *conn = isert_conn->conn; 1204 struct iscsi_cmd *cmd; 1205 struct isert_cmd *isert_cmd; 1206 int ret = -EINVAL; 1207 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1208 1209 if (conn->sess->sess_ops->SessionType && 1210 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1211 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1212 " ignoring\n", opcode); 1213 return 0; 1214 } 1215 1216 switch (opcode) { 1217 case ISCSI_OP_SCSI_CMD: 1218 cmd = isert_allocate_cmd(conn, rx_desc); 1219 if (!cmd) 1220 break; 1221 1222 isert_cmd = iscsit_priv_cmd(cmd); 1223 isert_cmd->read_stag = read_stag; 1224 isert_cmd->read_va = read_va; 1225 isert_cmd->write_stag = write_stag; 1226 isert_cmd->write_va = write_va; 1227 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1228 1229 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1230 rx_desc, (unsigned char *)hdr); 1231 break; 1232 case ISCSI_OP_NOOP_OUT: 1233 cmd = isert_allocate_cmd(conn, rx_desc); 1234 if (!cmd) 1235 break; 1236 1237 isert_cmd = iscsit_priv_cmd(cmd); 1238 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1239 rx_desc, (unsigned char *)hdr); 1240 break; 1241 case ISCSI_OP_SCSI_DATA_OUT: 1242 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1243 (unsigned char *)hdr); 1244 break; 1245 case ISCSI_OP_SCSI_TMFUNC: 1246 cmd = isert_allocate_cmd(conn, rx_desc); 1247 if (!cmd) 1248 break; 1249 1250 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1251 (unsigned char *)hdr); 1252 break; 1253 case ISCSI_OP_LOGOUT: 1254 cmd = isert_allocate_cmd(conn, rx_desc); 1255 if (!cmd) 1256 break; 1257 1258 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1259 break; 1260 case ISCSI_OP_TEXT: 1261 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1262 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1263 else 1264 cmd = isert_allocate_cmd(conn, rx_desc); 1265 1266 if (!cmd) 1267 break; 1268 1269 isert_cmd = iscsit_priv_cmd(cmd); 1270 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1271 rx_desc, (struct iscsi_text *)hdr); 1272 break; 1273 default: 1274 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1275 dump_stack(); 1276 break; 1277 } 1278 1279 return ret; 1280 } 1281 1282 static void 1283 isert_print_wc(struct ib_wc *wc, const char *type) 1284 { 1285 if (wc->status != IB_WC_WR_FLUSH_ERR) 1286 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1287 ib_wc_status_msg(wc->status), wc->status, 1288 wc->vendor_err); 1289 else 1290 isert_dbg("%s failure: %s (%d)\n", type, 1291 ib_wc_status_msg(wc->status), wc->status); 1292 } 1293 1294 static void 1295 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1296 { 1297 struct isert_conn *isert_conn = wc->qp->qp_context; 1298 struct ib_device *ib_dev = isert_conn->cm_id->device; 1299 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1300 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); 1301 struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); 1302 uint64_t read_va = 0, write_va = 0; 1303 uint32_t read_stag = 0, write_stag = 0; 1304 1305 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1306 isert_print_wc(wc, "recv"); 1307 if (wc->status != IB_WC_WR_FLUSH_ERR) 1308 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1309 return; 1310 } 1311 1312 rx_desc->in_use = true; 1313 1314 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1315 ISER_RX_SIZE, DMA_FROM_DEVICE); 1316 1317 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1318 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1319 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1320 1321 switch (iser_ctrl->flags & 0xF0) { 1322 case ISCSI_CTRL: 1323 if (iser_ctrl->flags & ISER_RSV) { 1324 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1325 read_va = be64_to_cpu(iser_ctrl->read_va); 1326 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1327 read_stag, (unsigned long long)read_va); 1328 } 1329 if (iser_ctrl->flags & ISER_WSV) { 1330 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1331 write_va = be64_to_cpu(iser_ctrl->write_va); 1332 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1333 write_stag, (unsigned long long)write_va); 1334 } 1335 1336 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1337 break; 1338 case ISER_HELLO: 1339 isert_err("iSER Hello message\n"); 1340 break; 1341 default: 1342 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1343 break; 1344 } 1345 1346 isert_rx_opcode(isert_conn, rx_desc, 1347 read_stag, read_va, write_stag, write_va); 1348 1349 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1350 ISER_RX_SIZE, DMA_FROM_DEVICE); 1351 } 1352 1353 static void 1354 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1355 { 1356 struct isert_conn *isert_conn = wc->qp->qp_context; 1357 struct ib_device *ib_dev = isert_conn->device->ib_device; 1358 1359 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1360 isert_print_wc(wc, "login recv"); 1361 return; 1362 } 1363 1364 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, 1365 ISER_RX_SIZE, DMA_FROM_DEVICE); 1366 1367 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1368 1369 if (isert_conn->conn) { 1370 struct iscsi_login *login = isert_conn->conn->conn_login; 1371 1372 if (login && !login->first_request) 1373 isert_rx_login_req(isert_conn); 1374 } 1375 1376 mutex_lock(&isert_conn->mutex); 1377 complete(&isert_conn->login_req_comp); 1378 mutex_unlock(&isert_conn->mutex); 1379 1380 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, 1381 ISER_RX_SIZE, DMA_FROM_DEVICE); 1382 } 1383 1384 static void 1385 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) 1386 { 1387 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 1388 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 1389 1390 if (!cmd->rw.nr_ops) 1391 return; 1392 1393 if (isert_prot_cmd(conn, se_cmd)) { 1394 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, 1395 conn->cm_id->port_num, se_cmd->t_data_sg, 1396 se_cmd->t_data_nents, se_cmd->t_prot_sg, 1397 se_cmd->t_prot_nents, dir); 1398 } else { 1399 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, 1400 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); 1401 } 1402 1403 cmd->rw.nr_ops = 0; 1404 } 1405 1406 static void 1407 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1408 { 1409 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1410 struct isert_conn *isert_conn = isert_cmd->conn; 1411 struct iscsi_conn *conn = isert_conn->conn; 1412 struct iscsi_text_rsp *hdr; 1413 1414 isert_dbg("Cmd %p\n", isert_cmd); 1415 1416 switch (cmd->iscsi_opcode) { 1417 case ISCSI_OP_SCSI_CMD: 1418 spin_lock_bh(&conn->cmd_lock); 1419 if (!list_empty(&cmd->i_conn_node)) 1420 list_del_init(&cmd->i_conn_node); 1421 spin_unlock_bh(&conn->cmd_lock); 1422 1423 if (cmd->data_direction == DMA_TO_DEVICE) { 1424 iscsit_stop_dataout_timer(cmd); 1425 /* 1426 * Check for special case during comp_err where 1427 * WRITE_PENDING has been handed off from core, 1428 * but requires an extra target_put_sess_cmd() 1429 * before transport_generic_free_cmd() below. 1430 */ 1431 if (comp_err && 1432 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1433 struct se_cmd *se_cmd = &cmd->se_cmd; 1434 1435 target_put_sess_cmd(se_cmd); 1436 } 1437 } 1438 1439 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1440 transport_generic_free_cmd(&cmd->se_cmd, 0); 1441 break; 1442 case ISCSI_OP_SCSI_TMFUNC: 1443 spin_lock_bh(&conn->cmd_lock); 1444 if (!list_empty(&cmd->i_conn_node)) 1445 list_del_init(&cmd->i_conn_node); 1446 spin_unlock_bh(&conn->cmd_lock); 1447 1448 transport_generic_free_cmd(&cmd->se_cmd, 0); 1449 break; 1450 case ISCSI_OP_REJECT: 1451 case ISCSI_OP_NOOP_OUT: 1452 case ISCSI_OP_TEXT: 1453 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1454 /* If the continue bit is on, keep the command alive */ 1455 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1456 break; 1457 1458 spin_lock_bh(&conn->cmd_lock); 1459 if (!list_empty(&cmd->i_conn_node)) 1460 list_del_init(&cmd->i_conn_node); 1461 spin_unlock_bh(&conn->cmd_lock); 1462 1463 /* 1464 * Handle special case for REJECT when iscsi_add_reject*() has 1465 * overwritten the original iscsi_opcode assignment, and the 1466 * associated cmd->se_cmd needs to be released. 1467 */ 1468 if (cmd->se_cmd.se_tfo != NULL) { 1469 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1470 cmd->iscsi_opcode); 1471 transport_generic_free_cmd(&cmd->se_cmd, 0); 1472 break; 1473 } 1474 fallthrough; 1475 default: 1476 iscsit_release_cmd(cmd); 1477 break; 1478 } 1479 } 1480 1481 static void 1482 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1483 { 1484 if (tx_desc->dma_addr != 0) { 1485 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1486 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1487 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1488 tx_desc->dma_addr = 0; 1489 } 1490 } 1491 1492 static void 1493 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1494 struct ib_device *ib_dev, bool comp_err) 1495 { 1496 if (isert_cmd->pdu_buf_dma != 0) { 1497 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1498 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1499 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1500 isert_cmd->pdu_buf_dma = 0; 1501 } 1502 1503 isert_unmap_tx_desc(tx_desc, ib_dev); 1504 isert_put_cmd(isert_cmd, comp_err); 1505 } 1506 1507 static int 1508 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1509 { 1510 struct ib_mr_status mr_status; 1511 int ret; 1512 1513 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1514 if (ret) { 1515 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1516 goto fail_mr_status; 1517 } 1518 1519 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1520 u64 sec_offset_err; 1521 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1522 1523 switch (mr_status.sig_err.err_type) { 1524 case IB_SIG_BAD_GUARD: 1525 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1526 break; 1527 case IB_SIG_BAD_REFTAG: 1528 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1529 break; 1530 case IB_SIG_BAD_APPTAG: 1531 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1532 break; 1533 } 1534 sec_offset_err = mr_status.sig_err.sig_err_offset; 1535 do_div(sec_offset_err, block_size); 1536 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1537 1538 isert_err("PI error found type %d at sector 0x%llx " 1539 "expected 0x%x vs actual 0x%x\n", 1540 mr_status.sig_err.err_type, 1541 (unsigned long long)se_cmd->bad_sector, 1542 mr_status.sig_err.expected, 1543 mr_status.sig_err.actual); 1544 ret = 1; 1545 } 1546 1547 fail_mr_status: 1548 return ret; 1549 } 1550 1551 static void 1552 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1553 { 1554 struct isert_conn *isert_conn = wc->qp->qp_context; 1555 struct isert_device *device = isert_conn->device; 1556 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1557 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1558 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1559 int ret = 0; 1560 1561 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1562 isert_print_wc(wc, "rdma write"); 1563 if (wc->status != IB_WC_WR_FLUSH_ERR) 1564 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1565 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1566 return; 1567 } 1568 1569 isert_dbg("Cmd %p\n", isert_cmd); 1570 1571 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr); 1572 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1573 1574 if (ret) { 1575 /* 1576 * transport_generic_request_failure() expects to have 1577 * plus two references to handle queue-full, so re-add 1578 * one here as target-core will have already dropped 1579 * it after the first isert_put_datain() callback. 1580 */ 1581 kref_get(&cmd->cmd_kref); 1582 transport_generic_request_failure(cmd, cmd->pi_err); 1583 } else { 1584 /* 1585 * XXX: isert_put_response() failure is not retried. 1586 */ 1587 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1588 if (ret) 1589 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); 1590 } 1591 } 1592 1593 static void 1594 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1595 { 1596 struct isert_conn *isert_conn = wc->qp->qp_context; 1597 struct isert_device *device = isert_conn->device; 1598 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1599 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1600 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1601 struct se_cmd *se_cmd = &cmd->se_cmd; 1602 int ret = 0; 1603 1604 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1605 isert_print_wc(wc, "rdma read"); 1606 if (wc->status != IB_WC_WR_FLUSH_ERR) 1607 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1608 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1609 return; 1610 } 1611 1612 isert_dbg("Cmd %p\n", isert_cmd); 1613 1614 iscsit_stop_dataout_timer(cmd); 1615 1616 if (isert_prot_cmd(isert_conn, se_cmd)) 1617 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr); 1618 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1619 cmd->write_data_done = 0; 1620 1621 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1622 spin_lock_bh(&cmd->istate_lock); 1623 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1624 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1625 spin_unlock_bh(&cmd->istate_lock); 1626 1627 /* 1628 * transport_generic_request_failure() will drop the extra 1629 * se_cmd->cmd_kref reference after T10-PI error, and handle 1630 * any non-zero ->queue_status() callback error retries. 1631 */ 1632 if (ret) 1633 transport_generic_request_failure(se_cmd, se_cmd->pi_err); 1634 else 1635 target_execute_cmd(se_cmd); 1636 } 1637 1638 static void 1639 isert_do_control_comp(struct work_struct *work) 1640 { 1641 struct isert_cmd *isert_cmd = container_of(work, 1642 struct isert_cmd, comp_work); 1643 struct isert_conn *isert_conn = isert_cmd->conn; 1644 struct ib_device *ib_dev = isert_conn->cm_id->device; 1645 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1646 1647 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1648 1649 switch (cmd->i_state) { 1650 case ISTATE_SEND_TASKMGTRSP: 1651 iscsit_tmr_post_handler(cmd, cmd->conn); 1652 fallthrough; 1653 case ISTATE_SEND_REJECT: 1654 case ISTATE_SEND_TEXTRSP: 1655 cmd->i_state = ISTATE_SENT_STATUS; 1656 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1657 ib_dev, false); 1658 break; 1659 case ISTATE_SEND_LOGOUTRSP: 1660 iscsit_logout_post_handler(cmd, cmd->conn); 1661 break; 1662 default: 1663 isert_err("Unknown i_state %d\n", cmd->i_state); 1664 dump_stack(); 1665 break; 1666 } 1667 } 1668 1669 static void 1670 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1671 { 1672 struct isert_conn *isert_conn = wc->qp->qp_context; 1673 struct ib_device *ib_dev = isert_conn->cm_id->device; 1674 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1675 1676 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1677 isert_print_wc(wc, "login send"); 1678 if (wc->status != IB_WC_WR_FLUSH_ERR) 1679 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1680 } 1681 1682 isert_unmap_tx_desc(tx_desc, ib_dev); 1683 } 1684 1685 static void 1686 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 1687 { 1688 struct isert_conn *isert_conn = wc->qp->qp_context; 1689 struct ib_device *ib_dev = isert_conn->cm_id->device; 1690 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1691 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 1692 1693 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1694 isert_print_wc(wc, "send"); 1695 if (wc->status != IB_WC_WR_FLUSH_ERR) 1696 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1697 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 1698 return; 1699 } 1700 1701 isert_dbg("Cmd %p\n", isert_cmd); 1702 1703 switch (isert_cmd->iscsi_cmd->i_state) { 1704 case ISTATE_SEND_TASKMGTRSP: 1705 case ISTATE_SEND_LOGOUTRSP: 1706 case ISTATE_SEND_REJECT: 1707 case ISTATE_SEND_TEXTRSP: 1708 isert_unmap_tx_desc(tx_desc, ib_dev); 1709 1710 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1711 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1712 return; 1713 default: 1714 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 1715 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1716 break; 1717 } 1718 } 1719 1720 static int 1721 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1722 { 1723 int ret; 1724 1725 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 1726 if (ret) { 1727 isert_err("ib_post_recv failed with %d\n", ret); 1728 return ret; 1729 } 1730 1731 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL); 1732 if (ret) { 1733 isert_err("ib_post_send failed with %d\n", ret); 1734 return ret; 1735 } 1736 return ret; 1737 } 1738 1739 static int 1740 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1741 { 1742 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1743 struct isert_conn *isert_conn = conn->context; 1744 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1745 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1746 &isert_cmd->tx_desc.iscsi_header; 1747 1748 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1749 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1750 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1751 /* 1752 * Attach SENSE DATA payload to iSCSI Response PDU 1753 */ 1754 if (cmd->se_cmd.sense_buffer && 1755 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1756 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1757 struct isert_device *device = isert_conn->device; 1758 struct ib_device *ib_dev = device->ib_device; 1759 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1760 u32 padding, pdu_len; 1761 1762 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1763 cmd->sense_buffer); 1764 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1765 1766 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1767 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1768 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 1769 1770 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1771 (void *)cmd->sense_buffer, pdu_len, 1772 DMA_TO_DEVICE); 1773 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1774 return -ENOMEM; 1775 1776 isert_cmd->pdu_buf_len = pdu_len; 1777 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1778 tx_dsg->length = pdu_len; 1779 tx_dsg->lkey = device->pd->local_dma_lkey; 1780 isert_cmd->tx_desc.num_sge = 2; 1781 } 1782 1783 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1784 1785 isert_dbg("Posting SCSI Response\n"); 1786 1787 return isert_post_response(isert_conn, isert_cmd); 1788 } 1789 1790 static void 1791 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1792 { 1793 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1794 struct isert_conn *isert_conn = conn->context; 1795 1796 spin_lock_bh(&conn->cmd_lock); 1797 if (!list_empty(&cmd->i_conn_node)) 1798 list_del_init(&cmd->i_conn_node); 1799 spin_unlock_bh(&conn->cmd_lock); 1800 1801 if (cmd->data_direction == DMA_TO_DEVICE) 1802 iscsit_stop_dataout_timer(cmd); 1803 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1804 } 1805 1806 static enum target_prot_op 1807 isert_get_sup_prot_ops(struct iscsi_conn *conn) 1808 { 1809 struct isert_conn *isert_conn = conn->context; 1810 struct isert_device *device = isert_conn->device; 1811 1812 if (conn->tpg->tpg_attrib.t10_pi) { 1813 if (device->pi_capable) { 1814 isert_info("conn %p PI offload enabled\n", isert_conn); 1815 isert_conn->pi_support = true; 1816 return TARGET_PROT_ALL; 1817 } 1818 } 1819 1820 isert_info("conn %p PI offload disabled\n", isert_conn); 1821 isert_conn->pi_support = false; 1822 1823 return TARGET_PROT_NORMAL; 1824 } 1825 1826 static int 1827 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1828 bool nopout_response) 1829 { 1830 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1831 struct isert_conn *isert_conn = conn->context; 1832 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1833 1834 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1835 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1836 &isert_cmd->tx_desc.iscsi_header, 1837 nopout_response); 1838 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1839 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1840 1841 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 1842 1843 return isert_post_response(isert_conn, isert_cmd); 1844 } 1845 1846 static int 1847 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1848 { 1849 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1850 struct isert_conn *isert_conn = conn->context; 1851 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1852 1853 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1854 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1855 &isert_cmd->tx_desc.iscsi_header); 1856 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1857 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1858 1859 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 1860 1861 return isert_post_response(isert_conn, isert_cmd); 1862 } 1863 1864 static int 1865 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1866 { 1867 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1868 struct isert_conn *isert_conn = conn->context; 1869 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1870 1871 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1872 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1873 &isert_cmd->tx_desc.iscsi_header); 1874 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1875 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1876 1877 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 1878 1879 return isert_post_response(isert_conn, isert_cmd); 1880 } 1881 1882 static int 1883 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1884 { 1885 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1886 struct isert_conn *isert_conn = conn->context; 1887 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1888 struct isert_device *device = isert_conn->device; 1889 struct ib_device *ib_dev = device->ib_device; 1890 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1891 struct iscsi_reject *hdr = 1892 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 1893 1894 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1895 iscsit_build_reject(cmd, conn, hdr); 1896 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1897 1898 hton24(hdr->dlength, ISCSI_HDR_LEN); 1899 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1900 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 1901 DMA_TO_DEVICE); 1902 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1903 return -ENOMEM; 1904 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 1905 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1906 tx_dsg->length = ISCSI_HDR_LEN; 1907 tx_dsg->lkey = device->pd->local_dma_lkey; 1908 isert_cmd->tx_desc.num_sge = 2; 1909 1910 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1911 1912 isert_dbg("conn %p Posting Reject\n", isert_conn); 1913 1914 return isert_post_response(isert_conn, isert_cmd); 1915 } 1916 1917 static int 1918 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1919 { 1920 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1921 struct isert_conn *isert_conn = conn->context; 1922 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1923 struct iscsi_text_rsp *hdr = 1924 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1925 u32 txt_rsp_len; 1926 int rc; 1927 1928 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1929 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 1930 if (rc < 0) 1931 return rc; 1932 1933 txt_rsp_len = rc; 1934 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1935 1936 if (txt_rsp_len) { 1937 struct isert_device *device = isert_conn->device; 1938 struct ib_device *ib_dev = device->ib_device; 1939 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1940 void *txt_rsp_buf = cmd->buf_ptr; 1941 1942 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1943 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 1944 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1945 return -ENOMEM; 1946 1947 isert_cmd->pdu_buf_len = txt_rsp_len; 1948 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1949 tx_dsg->length = txt_rsp_len; 1950 tx_dsg->lkey = device->pd->local_dma_lkey; 1951 isert_cmd->tx_desc.num_sge = 2; 1952 } 1953 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1954 1955 isert_dbg("conn %p Text Response\n", isert_conn); 1956 1957 return isert_post_response(isert_conn, isert_cmd); 1958 } 1959 1960 static inline void 1961 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain) 1962 { 1963 domain->sig_type = IB_SIG_TYPE_T10_DIF; 1964 domain->sig.dif.bg_type = IB_T10DIF_CRC; 1965 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 1966 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 1967 /* 1968 * At the moment we hard code those, but if in the future 1969 * the target core would like to use it, we will take it 1970 * from se_cmd. 1971 */ 1972 domain->sig.dif.apptag_check_mask = 0xffff; 1973 domain->sig.dif.app_escape = true; 1974 domain->sig.dif.ref_escape = true; 1975 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 1976 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 1977 domain->sig.dif.ref_remap = true; 1978 }; 1979 1980 static int 1981 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 1982 { 1983 memset(sig_attrs, 0, sizeof(*sig_attrs)); 1984 1985 switch (se_cmd->prot_op) { 1986 case TARGET_PROT_DIN_INSERT: 1987 case TARGET_PROT_DOUT_STRIP: 1988 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 1989 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 1990 break; 1991 case TARGET_PROT_DOUT_INSERT: 1992 case TARGET_PROT_DIN_STRIP: 1993 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 1994 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 1995 break; 1996 case TARGET_PROT_DIN_PASS: 1997 case TARGET_PROT_DOUT_PASS: 1998 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 1999 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 2000 break; 2001 default: 2002 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2003 return -EINVAL; 2004 } 2005 2006 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD) 2007 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; 2008 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG) 2009 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; 2010 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG) 2011 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; 2012 2013 return 0; 2014 } 2015 2016 static int 2017 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, 2018 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 2019 { 2020 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 2021 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 2022 u8 port_num = conn->cm_id->port_num; 2023 u64 addr; 2024 u32 rkey, offset; 2025 int ret; 2026 2027 if (cmd->ctx_init_done) 2028 goto rdma_ctx_post; 2029 2030 if (dir == DMA_FROM_DEVICE) { 2031 addr = cmd->write_va; 2032 rkey = cmd->write_stag; 2033 offset = cmd->iscsi_cmd->write_data_done; 2034 } else { 2035 addr = cmd->read_va; 2036 rkey = cmd->read_stag; 2037 offset = 0; 2038 } 2039 2040 if (isert_prot_cmd(conn, se_cmd)) { 2041 struct ib_sig_attrs sig_attrs; 2042 2043 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2044 if (ret) 2045 return ret; 2046 2047 WARN_ON_ONCE(offset); 2048 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, 2049 se_cmd->t_data_sg, se_cmd->t_data_nents, 2050 se_cmd->t_prot_sg, se_cmd->t_prot_nents, 2051 &sig_attrs, addr, rkey, dir); 2052 } else { 2053 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, 2054 se_cmd->t_data_sg, se_cmd->t_data_nents, 2055 offset, addr, rkey, dir); 2056 } 2057 2058 if (ret < 0) { 2059 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); 2060 return ret; 2061 } 2062 2063 cmd->ctx_init_done = true; 2064 2065 rdma_ctx_post: 2066 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); 2067 if (ret < 0) 2068 isert_err("Cmd: %p failed to post RDMA res\n", cmd); 2069 return ret; 2070 } 2071 2072 static int 2073 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2074 { 2075 struct se_cmd *se_cmd = &cmd->se_cmd; 2076 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2077 struct isert_conn *isert_conn = conn->context; 2078 struct ib_cqe *cqe = NULL; 2079 struct ib_send_wr *chain_wr = NULL; 2080 int rc; 2081 2082 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2083 isert_cmd, se_cmd->data_length); 2084 2085 if (isert_prot_cmd(isert_conn, se_cmd)) { 2086 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2087 cqe = &isert_cmd->tx_desc.tx_cqe; 2088 } else { 2089 /* 2090 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2091 */ 2092 isert_create_send_desc(isert_conn, isert_cmd, 2093 &isert_cmd->tx_desc); 2094 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2095 &isert_cmd->tx_desc.iscsi_header); 2096 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2097 isert_init_send_wr(isert_conn, isert_cmd, 2098 &isert_cmd->tx_desc.send_wr); 2099 2100 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2101 if (rc) { 2102 isert_err("ib_post_recv failed with %d\n", rc); 2103 return rc; 2104 } 2105 2106 chain_wr = &isert_cmd->tx_desc.send_wr; 2107 } 2108 2109 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2110 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", 2111 isert_cmd, rc); 2112 return rc; 2113 } 2114 2115 static int 2116 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2117 { 2118 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2119 int ret; 2120 2121 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2122 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2123 2124 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2125 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2126 &isert_cmd->tx_desc.tx_cqe, NULL); 2127 2128 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", 2129 isert_cmd, ret); 2130 return ret; 2131 } 2132 2133 static int 2134 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2135 { 2136 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2137 int ret = 0; 2138 2139 switch (state) { 2140 case ISTATE_REMOVE: 2141 spin_lock_bh(&conn->cmd_lock); 2142 list_del_init(&cmd->i_conn_node); 2143 spin_unlock_bh(&conn->cmd_lock); 2144 isert_put_cmd(isert_cmd, true); 2145 break; 2146 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2147 ret = isert_put_nopin(cmd, conn, false); 2148 break; 2149 default: 2150 isert_err("Unknown immediate state: 0x%02x\n", state); 2151 ret = -EINVAL; 2152 break; 2153 } 2154 2155 return ret; 2156 } 2157 2158 static int 2159 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2160 { 2161 struct isert_conn *isert_conn = conn->context; 2162 int ret; 2163 2164 switch (state) { 2165 case ISTATE_SEND_LOGOUTRSP: 2166 ret = isert_put_logout_rsp(cmd, conn); 2167 if (!ret) 2168 isert_conn->logout_posted = true; 2169 break; 2170 case ISTATE_SEND_NOPIN: 2171 ret = isert_put_nopin(cmd, conn, true); 2172 break; 2173 case ISTATE_SEND_TASKMGTRSP: 2174 ret = isert_put_tm_rsp(cmd, conn); 2175 break; 2176 case ISTATE_SEND_REJECT: 2177 ret = isert_put_reject(cmd, conn); 2178 break; 2179 case ISTATE_SEND_TEXTRSP: 2180 ret = isert_put_text_rsp(cmd, conn); 2181 break; 2182 case ISTATE_SEND_STATUS: 2183 /* 2184 * Special case for sending non GOOD SCSI status from TX thread 2185 * context during pre se_cmd excecution failure. 2186 */ 2187 ret = isert_put_response(conn, cmd); 2188 break; 2189 default: 2190 isert_err("Unknown response state: 0x%02x\n", state); 2191 ret = -EINVAL; 2192 break; 2193 } 2194 2195 return ret; 2196 } 2197 2198 struct rdma_cm_id * 2199 isert_setup_id(struct isert_np *isert_np) 2200 { 2201 struct iscsi_np *np = isert_np->np; 2202 struct rdma_cm_id *id; 2203 struct sockaddr *sa; 2204 int ret; 2205 2206 sa = (struct sockaddr *)&np->np_sockaddr; 2207 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2208 2209 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2210 RDMA_PS_TCP, IB_QPT_RC); 2211 if (IS_ERR(id)) { 2212 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2213 ret = PTR_ERR(id); 2214 goto out; 2215 } 2216 isert_dbg("id %p context %p\n", id, id->context); 2217 2218 ret = rdma_bind_addr(id, sa); 2219 if (ret) { 2220 isert_err("rdma_bind_addr() failed: %d\n", ret); 2221 goto out_id; 2222 } 2223 2224 ret = rdma_listen(id, 0); 2225 if (ret) { 2226 isert_err("rdma_listen() failed: %d\n", ret); 2227 goto out_id; 2228 } 2229 2230 return id; 2231 out_id: 2232 rdma_destroy_id(id); 2233 out: 2234 return ERR_PTR(ret); 2235 } 2236 2237 static int 2238 isert_setup_np(struct iscsi_np *np, 2239 struct sockaddr_storage *ksockaddr) 2240 { 2241 struct isert_np *isert_np; 2242 struct rdma_cm_id *isert_lid; 2243 int ret; 2244 2245 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2246 if (!isert_np) 2247 return -ENOMEM; 2248 2249 sema_init(&isert_np->sem, 0); 2250 mutex_init(&isert_np->mutex); 2251 INIT_LIST_HEAD(&isert_np->accepted); 2252 INIT_LIST_HEAD(&isert_np->pending); 2253 isert_np->np = np; 2254 2255 /* 2256 * Setup the np->np_sockaddr from the passed sockaddr setup 2257 * in iscsi_target_configfs.c code.. 2258 */ 2259 memcpy(&np->np_sockaddr, ksockaddr, 2260 sizeof(struct sockaddr_storage)); 2261 2262 isert_lid = isert_setup_id(isert_np); 2263 if (IS_ERR(isert_lid)) { 2264 ret = PTR_ERR(isert_lid); 2265 goto out; 2266 } 2267 2268 isert_np->cm_id = isert_lid; 2269 np->np_context = isert_np; 2270 2271 return 0; 2272 2273 out: 2274 kfree(isert_np); 2275 2276 return ret; 2277 } 2278 2279 static int 2280 isert_rdma_accept(struct isert_conn *isert_conn) 2281 { 2282 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2283 struct rdma_conn_param cp; 2284 int ret; 2285 struct iser_cm_hdr rsp_hdr; 2286 2287 memset(&cp, 0, sizeof(struct rdma_conn_param)); 2288 cp.initiator_depth = isert_conn->initiator_depth; 2289 cp.retry_count = 7; 2290 cp.rnr_retry_count = 7; 2291 2292 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 2293 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 2294 if (!isert_conn->snd_w_inv) 2295 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 2296 cp.private_data = (void *)&rsp_hdr; 2297 cp.private_data_len = sizeof(rsp_hdr); 2298 2299 ret = rdma_accept(cm_id, &cp); 2300 if (ret) { 2301 isert_err("rdma_accept() failed with: %d\n", ret); 2302 return ret; 2303 } 2304 2305 return 0; 2306 } 2307 2308 static int 2309 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2310 { 2311 struct isert_conn *isert_conn = conn->context; 2312 int ret; 2313 2314 isert_info("before login_req comp conn: %p\n", isert_conn); 2315 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 2316 if (ret) { 2317 isert_err("isert_conn %p interrupted before got login req\n", 2318 isert_conn); 2319 return ret; 2320 } 2321 reinit_completion(&isert_conn->login_req_comp); 2322 2323 /* 2324 * For login requests after the first PDU, isert_rx_login_req() will 2325 * kick schedule_delayed_work(&conn->login_work) as the packet is 2326 * received, which turns this callback from iscsi_target_do_login_rx() 2327 * into a NOP. 2328 */ 2329 if (!login->first_request) 2330 return 0; 2331 2332 isert_rx_login_req(isert_conn); 2333 2334 isert_info("before login_comp conn: %p\n", conn); 2335 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 2336 if (ret) 2337 return ret; 2338 2339 isert_info("processing login->req: %p\n", login->req); 2340 2341 return 0; 2342 } 2343 2344 static void 2345 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2346 struct isert_conn *isert_conn) 2347 { 2348 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2349 struct rdma_route *cm_route = &cm_id->route; 2350 2351 conn->login_family = np->np_sockaddr.ss_family; 2352 2353 conn->login_sockaddr = cm_route->addr.dst_addr; 2354 conn->local_sockaddr = cm_route->addr.src_addr; 2355 } 2356 2357 static int 2358 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2359 { 2360 struct isert_np *isert_np = np->np_context; 2361 struct isert_conn *isert_conn; 2362 int ret; 2363 2364 accept_wait: 2365 ret = down_interruptible(&isert_np->sem); 2366 if (ret) 2367 return -ENODEV; 2368 2369 spin_lock_bh(&np->np_thread_lock); 2370 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 2371 spin_unlock_bh(&np->np_thread_lock); 2372 isert_dbg("np_thread_state %d\n", 2373 np->np_thread_state); 2374 /** 2375 * No point in stalling here when np_thread 2376 * is in state RESET/SHUTDOWN/EXIT - bail 2377 **/ 2378 return -ENODEV; 2379 } 2380 spin_unlock_bh(&np->np_thread_lock); 2381 2382 mutex_lock(&isert_np->mutex); 2383 if (list_empty(&isert_np->pending)) { 2384 mutex_unlock(&isert_np->mutex); 2385 goto accept_wait; 2386 } 2387 isert_conn = list_first_entry(&isert_np->pending, 2388 struct isert_conn, node); 2389 list_del_init(&isert_conn->node); 2390 mutex_unlock(&isert_np->mutex); 2391 2392 conn->context = isert_conn; 2393 isert_conn->conn = conn; 2394 isert_conn->state = ISER_CONN_BOUND; 2395 2396 isert_set_conn_info(np, conn, isert_conn); 2397 2398 isert_dbg("Processing isert_conn: %p\n", isert_conn); 2399 2400 return 0; 2401 } 2402 2403 static void 2404 isert_free_np(struct iscsi_np *np) 2405 { 2406 struct isert_np *isert_np = np->np_context; 2407 struct isert_conn *isert_conn, *n; 2408 2409 if (isert_np->cm_id) 2410 rdma_destroy_id(isert_np->cm_id); 2411 2412 /* 2413 * FIXME: At this point we don't have a good way to insure 2414 * that at this point we don't have hanging connections that 2415 * completed RDMA establishment but didn't start iscsi login 2416 * process. So work-around this by cleaning up what ever piled 2417 * up in accepted and pending lists. 2418 */ 2419 mutex_lock(&isert_np->mutex); 2420 if (!list_empty(&isert_np->pending)) { 2421 isert_info("Still have isert pending connections\n"); 2422 list_for_each_entry_safe(isert_conn, n, 2423 &isert_np->pending, 2424 node) { 2425 isert_info("cleaning isert_conn %p state (%d)\n", 2426 isert_conn, isert_conn->state); 2427 isert_connect_release(isert_conn); 2428 } 2429 } 2430 2431 if (!list_empty(&isert_np->accepted)) { 2432 isert_info("Still have isert accepted connections\n"); 2433 list_for_each_entry_safe(isert_conn, n, 2434 &isert_np->accepted, 2435 node) { 2436 isert_info("cleaning isert_conn %p state (%d)\n", 2437 isert_conn, isert_conn->state); 2438 isert_connect_release(isert_conn); 2439 } 2440 } 2441 mutex_unlock(&isert_np->mutex); 2442 2443 np->np_context = NULL; 2444 kfree(isert_np); 2445 } 2446 2447 static void isert_release_work(struct work_struct *work) 2448 { 2449 struct isert_conn *isert_conn = container_of(work, 2450 struct isert_conn, 2451 release_work); 2452 2453 isert_info("Starting release conn %p\n", isert_conn); 2454 2455 mutex_lock(&isert_conn->mutex); 2456 isert_conn->state = ISER_CONN_DOWN; 2457 mutex_unlock(&isert_conn->mutex); 2458 2459 isert_info("Destroying conn %p\n", isert_conn); 2460 isert_put_conn(isert_conn); 2461 } 2462 2463 static void 2464 isert_wait4logout(struct isert_conn *isert_conn) 2465 { 2466 struct iscsi_conn *conn = isert_conn->conn; 2467 2468 isert_info("conn %p\n", isert_conn); 2469 2470 if (isert_conn->logout_posted) { 2471 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 2472 wait_for_completion_timeout(&conn->conn_logout_comp, 2473 SECONDS_FOR_LOGOUT_COMP * HZ); 2474 } 2475 } 2476 2477 static void 2478 isert_wait4cmds(struct iscsi_conn *conn) 2479 { 2480 isert_info("iscsi_conn %p\n", conn); 2481 2482 if (conn->sess) { 2483 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2484 target_wait_for_sess_cmds(conn->sess->se_sess); 2485 } 2486 } 2487 2488 /** 2489 * isert_put_unsol_pending_cmds() - Drop commands waiting for 2490 * unsolicitate dataout 2491 * @conn: iscsi connection 2492 * 2493 * We might still have commands that are waiting for unsolicited 2494 * dataouts messages. We must put the extra reference on those 2495 * before blocking on the target_wait_for_session_cmds 2496 */ 2497 static void 2498 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 2499 { 2500 struct iscsi_cmd *cmd, *tmp; 2501 static LIST_HEAD(drop_cmd_list); 2502 2503 spin_lock_bh(&conn->cmd_lock); 2504 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 2505 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 2506 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 2507 (cmd->write_data_done < cmd->se_cmd.data_length)) 2508 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 2509 } 2510 spin_unlock_bh(&conn->cmd_lock); 2511 2512 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 2513 list_del_init(&cmd->i_conn_node); 2514 if (cmd->i_state != ISTATE_REMOVE) { 2515 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2516 2517 isert_info("conn %p dropping cmd %p\n", conn, cmd); 2518 isert_put_cmd(isert_cmd, true); 2519 } 2520 } 2521 } 2522 2523 static void isert_wait_conn(struct iscsi_conn *conn) 2524 { 2525 struct isert_conn *isert_conn = conn->context; 2526 2527 isert_info("Starting conn %p\n", isert_conn); 2528 2529 mutex_lock(&isert_conn->mutex); 2530 isert_conn_terminate(isert_conn); 2531 mutex_unlock(&isert_conn->mutex); 2532 2533 ib_drain_qp(isert_conn->qp); 2534 isert_put_unsol_pending_cmds(conn); 2535 isert_wait4cmds(conn); 2536 isert_wait4logout(isert_conn); 2537 2538 queue_work(isert_release_wq, &isert_conn->release_work); 2539 } 2540 2541 static void isert_free_conn(struct iscsi_conn *conn) 2542 { 2543 struct isert_conn *isert_conn = conn->context; 2544 2545 ib_drain_qp(isert_conn->qp); 2546 isert_put_conn(isert_conn); 2547 } 2548 2549 static void isert_get_rx_pdu(struct iscsi_conn *conn) 2550 { 2551 struct completion comp; 2552 2553 init_completion(&comp); 2554 2555 wait_for_completion_interruptible(&comp); 2556 } 2557 2558 static struct iscsit_transport iser_target_transport = { 2559 .name = "IB/iSER", 2560 .transport_type = ISCSI_INFINIBAND, 2561 .rdma_shutdown = true, 2562 .priv_size = sizeof(struct isert_cmd), 2563 .owner = THIS_MODULE, 2564 .iscsit_setup_np = isert_setup_np, 2565 .iscsit_accept_np = isert_accept_np, 2566 .iscsit_free_np = isert_free_np, 2567 .iscsit_wait_conn = isert_wait_conn, 2568 .iscsit_free_conn = isert_free_conn, 2569 .iscsit_get_login_rx = isert_get_login_rx, 2570 .iscsit_put_login_tx = isert_put_login_tx, 2571 .iscsit_immediate_queue = isert_immediate_queue, 2572 .iscsit_response_queue = isert_response_queue, 2573 .iscsit_get_dataout = isert_get_dataout, 2574 .iscsit_queue_data_in = isert_put_datain, 2575 .iscsit_queue_status = isert_put_response, 2576 .iscsit_aborted_task = isert_aborted_task, 2577 .iscsit_get_rx_pdu = isert_get_rx_pdu, 2578 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2579 }; 2580 2581 static int __init isert_init(void) 2582 { 2583 int ret; 2584 2585 isert_comp_wq = alloc_workqueue("isert_comp_wq", 2586 WQ_UNBOUND | WQ_HIGHPRI, 0); 2587 if (!isert_comp_wq) { 2588 isert_err("Unable to allocate isert_comp_wq\n"); 2589 return -ENOMEM; 2590 } 2591 2592 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 2593 WQ_UNBOUND_MAX_ACTIVE); 2594 if (!isert_release_wq) { 2595 isert_err("Unable to allocate isert_release_wq\n"); 2596 ret = -ENOMEM; 2597 goto destroy_comp_wq; 2598 } 2599 2600 iscsit_register_transport(&iser_target_transport); 2601 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2602 2603 return 0; 2604 2605 destroy_comp_wq: 2606 destroy_workqueue(isert_comp_wq); 2607 2608 return ret; 2609 } 2610 2611 static void __exit isert_exit(void) 2612 { 2613 flush_scheduled_work(); 2614 destroy_workqueue(isert_release_wq); 2615 destroy_workqueue(isert_comp_wq); 2616 iscsit_unregister_transport(&iser_target_transport); 2617 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 2618 } 2619 2620 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2621 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2622 MODULE_LICENSE("GPL"); 2623 2624 module_init(isert_init); 2625 module_exit(isert_exit); 2626