1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * This file contains iSCSI extentions for RDMA (iSER) Verbs 4 * 5 * (c) Copyright 2013 Datera, Inc. 6 * 7 * Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * 9 ****************************************************************************/ 10 11 #include <linux/string.h> 12 #include <linux/module.h> 13 #include <linux/scatterlist.h> 14 #include <linux/socket.h> 15 #include <linux/in.h> 16 #include <linux/in6.h> 17 #include <rdma/ib_verbs.h> 18 #include <rdma/ib_cm.h> 19 #include <rdma/rdma_cm.h> 20 #include <target/target_core_base.h> 21 #include <target/target_core_fabric.h> 22 #include <target/iscsi/iscsi_transport.h> 23 #include <linux/semaphore.h> 24 25 #include "ib_isert.h" 26 27 static int isert_debug_level; 28 module_param_named(debug_level, isert_debug_level, int, 0644); 29 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); 30 31 static DEFINE_MUTEX(device_list_mutex); 32 static LIST_HEAD(device_list); 33 static struct workqueue_struct *isert_comp_wq; 34 static struct workqueue_struct *isert_release_wq; 35 36 static int 37 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 38 static int 39 isert_login_post_recv(struct isert_conn *isert_conn); 40 static int 41 isert_rdma_accept(struct isert_conn *isert_conn); 42 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 43 44 static void isert_release_work(struct work_struct *work); 45 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 46 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 47 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 48 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 49 50 static inline bool 51 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 52 { 53 return (conn->pi_support && 54 cmd->prot_op != TARGET_PROT_NORMAL); 55 } 56 57 58 static void 59 isert_qp_event_callback(struct ib_event *e, void *context) 60 { 61 struct isert_conn *isert_conn = context; 62 63 isert_err("%s (%d): conn %p\n", 64 ib_event_msg(e->event), e->event, isert_conn); 65 66 switch (e->event) { 67 case IB_EVENT_COMM_EST: 68 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); 69 break; 70 case IB_EVENT_QP_LAST_WQE_REACHED: 71 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); 72 break; 73 default: 74 break; 75 } 76 } 77 78 static struct ib_qp * 79 isert_create_qp(struct isert_conn *isert_conn, 80 struct rdma_cm_id *cma_id) 81 { 82 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; 83 struct isert_device *device = isert_conn->device; 84 struct ib_device *ib_dev = device->ib_device; 85 struct ib_qp_init_attr attr; 86 int ret, factor; 87 88 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); 89 if (IS_ERR(isert_conn->cq)) { 90 isert_err("Unable to allocate cq\n"); 91 ret = PTR_ERR(isert_conn->cq); 92 return ERR_PTR(ret); 93 } 94 isert_conn->cq_size = cq_size; 95 96 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 97 attr.event_handler = isert_qp_event_callback; 98 attr.qp_context = isert_conn; 99 attr.send_cq = isert_conn->cq; 100 attr.recv_cq = isert_conn->cq; 101 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 102 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 103 factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num, 104 ISCSI_ISER_MAX_SG_TABLESIZE); 105 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor; 106 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge; 107 attr.cap.max_recv_sge = 1; 108 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 109 attr.qp_type = IB_QPT_RC; 110 if (device->pi_capable) 111 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 112 113 ret = rdma_create_qp(cma_id, device->pd, &attr); 114 if (ret) { 115 isert_err("rdma_create_qp failed for cma_id %d\n", ret); 116 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 117 118 return ERR_PTR(ret); 119 } 120 121 return cma_id->qp; 122 } 123 124 static int 125 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 126 { 127 struct isert_device *device = isert_conn->device; 128 struct ib_device *ib_dev = device->ib_device; 129 struct iser_rx_desc *rx_desc; 130 struct ib_sge *rx_sg; 131 u64 dma_addr; 132 int i, j; 133 134 isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS, 135 sizeof(struct iser_rx_desc), 136 GFP_KERNEL); 137 if (!isert_conn->rx_descs) 138 return -ENOMEM; 139 140 rx_desc = isert_conn->rx_descs; 141 142 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 143 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 144 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 145 if (ib_dma_mapping_error(ib_dev, dma_addr)) 146 goto dma_map_fail; 147 148 rx_desc->dma_addr = dma_addr; 149 150 rx_sg = &rx_desc->rx_sg; 151 rx_sg->addr = rx_desc->dma_addr; 152 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 153 rx_sg->lkey = device->pd->local_dma_lkey; 154 rx_desc->rx_cqe.done = isert_recv_done; 155 } 156 157 return 0; 158 159 dma_map_fail: 160 rx_desc = isert_conn->rx_descs; 161 for (j = 0; j < i; j++, rx_desc++) { 162 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 163 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 164 } 165 kfree(isert_conn->rx_descs); 166 isert_conn->rx_descs = NULL; 167 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); 168 return -ENOMEM; 169 } 170 171 static void 172 isert_free_rx_descriptors(struct isert_conn *isert_conn) 173 { 174 struct ib_device *ib_dev = isert_conn->device->ib_device; 175 struct iser_rx_desc *rx_desc; 176 int i; 177 178 if (!isert_conn->rx_descs) 179 return; 180 181 rx_desc = isert_conn->rx_descs; 182 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 183 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 184 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 185 } 186 187 kfree(isert_conn->rx_descs); 188 isert_conn->rx_descs = NULL; 189 } 190 191 static int 192 isert_create_device_ib_res(struct isert_device *device) 193 { 194 struct ib_device *ib_dev = device->ib_device; 195 int ret; 196 197 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n", 198 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); 199 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 200 201 device->pd = ib_alloc_pd(ib_dev, 0); 202 if (IS_ERR(device->pd)) { 203 ret = PTR_ERR(device->pd); 204 isert_err("failed to allocate pd, device %p, ret=%d\n", 205 device, ret); 206 return ret; 207 } 208 209 /* Check signature cap */ 210 device->pi_capable = ib_dev->attrs.device_cap_flags & 211 IB_DEVICE_INTEGRITY_HANDOVER ? true : false; 212 213 return 0; 214 } 215 216 static void 217 isert_free_device_ib_res(struct isert_device *device) 218 { 219 isert_info("device %p\n", device); 220 221 ib_dealloc_pd(device->pd); 222 } 223 224 static void 225 isert_device_put(struct isert_device *device) 226 { 227 mutex_lock(&device_list_mutex); 228 device->refcount--; 229 isert_info("device %p refcount %d\n", device, device->refcount); 230 if (!device->refcount) { 231 isert_free_device_ib_res(device); 232 list_del(&device->dev_node); 233 kfree(device); 234 } 235 mutex_unlock(&device_list_mutex); 236 } 237 238 static struct isert_device * 239 isert_device_get(struct rdma_cm_id *cma_id) 240 { 241 struct isert_device *device; 242 int ret; 243 244 mutex_lock(&device_list_mutex); 245 list_for_each_entry(device, &device_list, dev_node) { 246 if (device->ib_device->node_guid == cma_id->device->node_guid) { 247 device->refcount++; 248 isert_info("Found iser device %p refcount %d\n", 249 device, device->refcount); 250 mutex_unlock(&device_list_mutex); 251 return device; 252 } 253 } 254 255 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 256 if (!device) { 257 mutex_unlock(&device_list_mutex); 258 return ERR_PTR(-ENOMEM); 259 } 260 261 INIT_LIST_HEAD(&device->dev_node); 262 263 device->ib_device = cma_id->device; 264 ret = isert_create_device_ib_res(device); 265 if (ret) { 266 kfree(device); 267 mutex_unlock(&device_list_mutex); 268 return ERR_PTR(ret); 269 } 270 271 device->refcount++; 272 list_add_tail(&device->dev_node, &device_list); 273 isert_info("Created a new iser device %p refcount %d\n", 274 device, device->refcount); 275 mutex_unlock(&device_list_mutex); 276 277 return device; 278 } 279 280 static void 281 isert_init_conn(struct isert_conn *isert_conn) 282 { 283 isert_conn->state = ISER_CONN_INIT; 284 INIT_LIST_HEAD(&isert_conn->node); 285 init_completion(&isert_conn->login_comp); 286 init_completion(&isert_conn->login_req_comp); 287 init_waitqueue_head(&isert_conn->rem_wait); 288 kref_init(&isert_conn->kref); 289 mutex_init(&isert_conn->mutex); 290 INIT_WORK(&isert_conn->release_work, isert_release_work); 291 } 292 293 static void 294 isert_free_login_buf(struct isert_conn *isert_conn) 295 { 296 struct ib_device *ib_dev = isert_conn->device->ib_device; 297 298 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 299 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 300 kfree(isert_conn->login_rsp_buf); 301 302 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 303 ISER_RX_PAYLOAD_SIZE, 304 DMA_FROM_DEVICE); 305 kfree(isert_conn->login_req_buf); 306 } 307 308 static int 309 isert_alloc_login_buf(struct isert_conn *isert_conn, 310 struct ib_device *ib_dev) 311 { 312 int ret; 313 314 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), 315 GFP_KERNEL); 316 if (!isert_conn->login_req_buf) 317 return -ENOMEM; 318 319 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 320 isert_conn->login_req_buf, 321 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 322 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 323 if (ret) { 324 isert_err("login_req_dma mapping error: %d\n", ret); 325 isert_conn->login_req_dma = 0; 326 goto out_free_login_req_buf; 327 } 328 329 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 330 if (!isert_conn->login_rsp_buf) { 331 ret = -ENOMEM; 332 goto out_unmap_login_req_buf; 333 } 334 335 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 336 isert_conn->login_rsp_buf, 337 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); 338 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 339 if (ret) { 340 isert_err("login_rsp_dma mapping error: %d\n", ret); 341 isert_conn->login_rsp_dma = 0; 342 goto out_free_login_rsp_buf; 343 } 344 345 return 0; 346 347 out_free_login_rsp_buf: 348 kfree(isert_conn->login_rsp_buf); 349 out_unmap_login_req_buf: 350 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 351 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 352 out_free_login_req_buf: 353 kfree(isert_conn->login_req_buf); 354 return ret; 355 } 356 357 static void 358 isert_set_nego_params(struct isert_conn *isert_conn, 359 struct rdma_conn_param *param) 360 { 361 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; 362 363 /* Set max inflight RDMA READ requests */ 364 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, 365 attr->max_qp_init_rd_atom); 366 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); 367 368 if (param->private_data) { 369 u8 flags = *(u8 *)param->private_data; 370 371 /* 372 * use remote invalidation if the both initiator 373 * and the HCA support it 374 */ 375 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && 376 (attr->device_cap_flags & 377 IB_DEVICE_MEM_MGT_EXTENSIONS); 378 if (isert_conn->snd_w_inv) 379 isert_info("Using remote invalidation\n"); 380 } 381 } 382 383 static void 384 isert_destroy_qp(struct isert_conn *isert_conn) 385 { 386 ib_destroy_qp(isert_conn->qp); 387 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); 388 } 389 390 static int 391 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 392 { 393 struct isert_np *isert_np = cma_id->context; 394 struct iscsi_np *np = isert_np->np; 395 struct isert_conn *isert_conn; 396 struct isert_device *device; 397 int ret = 0; 398 399 spin_lock_bh(&np->np_thread_lock); 400 if (!np->enabled) { 401 spin_unlock_bh(&np->np_thread_lock); 402 isert_dbg("iscsi_np is not enabled, reject connect request\n"); 403 return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 404 } 405 spin_unlock_bh(&np->np_thread_lock); 406 407 isert_dbg("cma_id: %p, portal: %p\n", 408 cma_id, cma_id->context); 409 410 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 411 if (!isert_conn) 412 return -ENOMEM; 413 414 isert_init_conn(isert_conn); 415 isert_conn->cm_id = cma_id; 416 417 ret = isert_alloc_login_buf(isert_conn, cma_id->device); 418 if (ret) 419 goto out; 420 421 device = isert_device_get(cma_id); 422 if (IS_ERR(device)) { 423 ret = PTR_ERR(device); 424 goto out_rsp_dma_map; 425 } 426 isert_conn->device = device; 427 428 isert_set_nego_params(isert_conn, &event->param.conn); 429 430 isert_conn->qp = isert_create_qp(isert_conn, cma_id); 431 if (IS_ERR(isert_conn->qp)) { 432 ret = PTR_ERR(isert_conn->qp); 433 goto out_conn_dev; 434 } 435 436 ret = isert_login_post_recv(isert_conn); 437 if (ret) 438 goto out_destroy_qp; 439 440 ret = isert_rdma_accept(isert_conn); 441 if (ret) 442 goto out_destroy_qp; 443 444 mutex_lock(&isert_np->mutex); 445 list_add_tail(&isert_conn->node, &isert_np->accepted); 446 mutex_unlock(&isert_np->mutex); 447 448 return 0; 449 450 out_destroy_qp: 451 isert_destroy_qp(isert_conn); 452 out_conn_dev: 453 isert_device_put(device); 454 out_rsp_dma_map: 455 isert_free_login_buf(isert_conn); 456 out: 457 kfree(isert_conn); 458 rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 459 return ret; 460 } 461 462 static void 463 isert_connect_release(struct isert_conn *isert_conn) 464 { 465 struct isert_device *device = isert_conn->device; 466 467 isert_dbg("conn %p\n", isert_conn); 468 469 BUG_ON(!device); 470 471 isert_free_rx_descriptors(isert_conn); 472 if (isert_conn->cm_id && 473 !isert_conn->dev_removed) 474 rdma_destroy_id(isert_conn->cm_id); 475 476 if (isert_conn->qp) 477 isert_destroy_qp(isert_conn); 478 479 if (isert_conn->login_req_buf) 480 isert_free_login_buf(isert_conn); 481 482 isert_device_put(device); 483 484 if (isert_conn->dev_removed) 485 wake_up_interruptible(&isert_conn->rem_wait); 486 else 487 kfree(isert_conn); 488 } 489 490 static void 491 isert_connected_handler(struct rdma_cm_id *cma_id) 492 { 493 struct isert_conn *isert_conn = cma_id->qp->qp_context; 494 struct isert_np *isert_np = cma_id->context; 495 496 isert_info("conn %p\n", isert_conn); 497 498 mutex_lock(&isert_conn->mutex); 499 isert_conn->state = ISER_CONN_UP; 500 kref_get(&isert_conn->kref); 501 mutex_unlock(&isert_conn->mutex); 502 503 mutex_lock(&isert_np->mutex); 504 list_move_tail(&isert_conn->node, &isert_np->pending); 505 mutex_unlock(&isert_np->mutex); 506 507 isert_info("np %p: Allow accept_np to continue\n", isert_np); 508 up(&isert_np->sem); 509 } 510 511 static void 512 isert_release_kref(struct kref *kref) 513 { 514 struct isert_conn *isert_conn = container_of(kref, 515 struct isert_conn, kref); 516 517 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, 518 current->pid); 519 520 isert_connect_release(isert_conn); 521 } 522 523 static void 524 isert_put_conn(struct isert_conn *isert_conn) 525 { 526 kref_put(&isert_conn->kref, isert_release_kref); 527 } 528 529 static void 530 isert_handle_unbound_conn(struct isert_conn *isert_conn) 531 { 532 struct isert_np *isert_np = isert_conn->cm_id->context; 533 534 mutex_lock(&isert_np->mutex); 535 if (!list_empty(&isert_conn->node)) { 536 /* 537 * This means iscsi doesn't know this connection 538 * so schedule a cleanup ourselves 539 */ 540 list_del_init(&isert_conn->node); 541 isert_put_conn(isert_conn); 542 queue_work(isert_release_wq, &isert_conn->release_work); 543 } 544 mutex_unlock(&isert_np->mutex); 545 } 546 547 /** 548 * isert_conn_terminate() - Initiate connection termination 549 * @isert_conn: isert connection struct 550 * 551 * Notes: 552 * In case the connection state is BOUND, move state 553 * to TEMINATING and start teardown sequence (rdma_disconnect). 554 * In case the connection state is UP, complete flush as well. 555 * 556 * This routine must be called with mutex held. Thus it is 557 * safe to call multiple times. 558 */ 559 static void 560 isert_conn_terminate(struct isert_conn *isert_conn) 561 { 562 int err; 563 564 if (isert_conn->state >= ISER_CONN_TERMINATING) 565 return; 566 567 isert_info("Terminating conn %p state %d\n", 568 isert_conn, isert_conn->state); 569 isert_conn->state = ISER_CONN_TERMINATING; 570 err = rdma_disconnect(isert_conn->cm_id); 571 if (err) 572 isert_warn("Failed rdma_disconnect isert_conn %p\n", 573 isert_conn); 574 } 575 576 static int 577 isert_np_cma_handler(struct isert_np *isert_np, 578 enum rdma_cm_event_type event) 579 { 580 isert_dbg("%s (%d): isert np %p\n", 581 rdma_event_msg(event), event, isert_np); 582 583 switch (event) { 584 case RDMA_CM_EVENT_DEVICE_REMOVAL: 585 isert_np->cm_id = NULL; 586 break; 587 case RDMA_CM_EVENT_ADDR_CHANGE: 588 isert_np->cm_id = isert_setup_id(isert_np); 589 if (IS_ERR(isert_np->cm_id)) { 590 isert_err("isert np %p setup id failed: %ld\n", 591 isert_np, PTR_ERR(isert_np->cm_id)); 592 isert_np->cm_id = NULL; 593 } 594 break; 595 default: 596 isert_err("isert np %p Unexpected event %d\n", 597 isert_np, event); 598 } 599 600 return -1; 601 } 602 603 static int 604 isert_disconnected_handler(struct rdma_cm_id *cma_id, 605 enum rdma_cm_event_type event) 606 { 607 struct isert_conn *isert_conn = cma_id->qp->qp_context; 608 609 mutex_lock(&isert_conn->mutex); 610 switch (isert_conn->state) { 611 case ISER_CONN_TERMINATING: 612 break; 613 case ISER_CONN_UP: 614 isert_conn_terminate(isert_conn); 615 ib_drain_qp(isert_conn->qp); 616 isert_handle_unbound_conn(isert_conn); 617 break; 618 case ISER_CONN_BOUND: 619 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 620 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 621 break; 622 default: 623 isert_warn("conn %p terminating in state %d\n", 624 isert_conn, isert_conn->state); 625 } 626 mutex_unlock(&isert_conn->mutex); 627 628 return 0; 629 } 630 631 static int 632 isert_connect_error(struct rdma_cm_id *cma_id) 633 { 634 struct isert_conn *isert_conn = cma_id->qp->qp_context; 635 636 ib_drain_qp(isert_conn->qp); 637 list_del_init(&isert_conn->node); 638 isert_conn->cm_id = NULL; 639 isert_put_conn(isert_conn); 640 641 return -1; 642 } 643 644 static int 645 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 646 { 647 struct isert_np *isert_np = cma_id->context; 648 struct isert_conn *isert_conn; 649 int ret = 0; 650 651 isert_info("%s (%d): status %d id %p np %p\n", 652 rdma_event_msg(event->event), event->event, 653 event->status, cma_id, cma_id->context); 654 655 if (isert_np->cm_id == cma_id) 656 return isert_np_cma_handler(cma_id->context, event->event); 657 658 switch (event->event) { 659 case RDMA_CM_EVENT_CONNECT_REQUEST: 660 ret = isert_connect_request(cma_id, event); 661 if (ret) 662 isert_err("failed handle connect request %d\n", ret); 663 break; 664 case RDMA_CM_EVENT_ESTABLISHED: 665 isert_connected_handler(cma_id); 666 break; 667 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 668 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 669 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 670 ret = isert_disconnected_handler(cma_id, event->event); 671 break; 672 case RDMA_CM_EVENT_DEVICE_REMOVAL: 673 isert_conn = cma_id->qp->qp_context; 674 isert_conn->dev_removed = true; 675 isert_disconnected_handler(cma_id, event->event); 676 wait_event_interruptible(isert_conn->rem_wait, 677 isert_conn->state == ISER_CONN_DOWN); 678 kfree(isert_conn); 679 /* 680 * return non-zero from the callback to destroy 681 * the rdma cm id 682 */ 683 return 1; 684 case RDMA_CM_EVENT_REJECTED: 685 isert_info("Connection rejected: %s\n", 686 rdma_reject_msg(cma_id, event->status)); 687 /* fall through */ 688 case RDMA_CM_EVENT_UNREACHABLE: 689 case RDMA_CM_EVENT_CONNECT_ERROR: 690 ret = isert_connect_error(cma_id); 691 break; 692 default: 693 isert_err("Unhandled RDMA CMA event: %d\n", event->event); 694 break; 695 } 696 697 return ret; 698 } 699 700 static int 701 isert_post_recvm(struct isert_conn *isert_conn, u32 count) 702 { 703 struct ib_recv_wr *rx_wr; 704 int i, ret; 705 struct iser_rx_desc *rx_desc; 706 707 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 708 rx_desc = &isert_conn->rx_descs[i]; 709 710 rx_wr->wr_cqe = &rx_desc->rx_cqe; 711 rx_wr->sg_list = &rx_desc->rx_sg; 712 rx_wr->num_sge = 1; 713 rx_wr->next = rx_wr + 1; 714 rx_desc->in_use = false; 715 } 716 rx_wr--; 717 rx_wr->next = NULL; /* mark end of work requests list */ 718 719 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL); 720 if (ret) 721 isert_err("ib_post_recv() failed with ret: %d\n", ret); 722 723 return ret; 724 } 725 726 static int 727 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) 728 { 729 struct ib_recv_wr rx_wr; 730 int ret; 731 732 if (!rx_desc->in_use) { 733 /* 734 * if the descriptor is not in-use we already reposted it 735 * for recv, so just silently return 736 */ 737 return 0; 738 } 739 740 rx_desc->in_use = false; 741 rx_wr.wr_cqe = &rx_desc->rx_cqe; 742 rx_wr.sg_list = &rx_desc->rx_sg; 743 rx_wr.num_sge = 1; 744 rx_wr.next = NULL; 745 746 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 747 if (ret) 748 isert_err("ib_post_recv() failed with ret: %d\n", ret); 749 750 return ret; 751 } 752 753 static int 754 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 755 { 756 struct ib_device *ib_dev = isert_conn->cm_id->device; 757 struct ib_send_wr send_wr; 758 int ret; 759 760 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 761 ISER_HEADERS_LEN, DMA_TO_DEVICE); 762 763 tx_desc->tx_cqe.done = isert_login_send_done; 764 765 send_wr.next = NULL; 766 send_wr.wr_cqe = &tx_desc->tx_cqe; 767 send_wr.sg_list = tx_desc->tx_sg; 768 send_wr.num_sge = tx_desc->num_sge; 769 send_wr.opcode = IB_WR_SEND; 770 send_wr.send_flags = IB_SEND_SIGNALED; 771 772 ret = ib_post_send(isert_conn->qp, &send_wr, NULL); 773 if (ret) 774 isert_err("ib_post_send() failed, ret: %d\n", ret); 775 776 return ret; 777 } 778 779 static void 780 __isert_create_send_desc(struct isert_device *device, 781 struct iser_tx_desc *tx_desc) 782 { 783 784 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 785 tx_desc->iser_header.flags = ISCSI_CTRL; 786 787 tx_desc->num_sge = 1; 788 789 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 790 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 791 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); 792 } 793 } 794 795 static void 796 isert_create_send_desc(struct isert_conn *isert_conn, 797 struct isert_cmd *isert_cmd, 798 struct iser_tx_desc *tx_desc) 799 { 800 struct isert_device *device = isert_conn->device; 801 struct ib_device *ib_dev = device->ib_device; 802 803 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 804 ISER_HEADERS_LEN, DMA_TO_DEVICE); 805 806 __isert_create_send_desc(device, tx_desc); 807 } 808 809 static int 810 isert_init_tx_hdrs(struct isert_conn *isert_conn, 811 struct iser_tx_desc *tx_desc) 812 { 813 struct isert_device *device = isert_conn->device; 814 struct ib_device *ib_dev = device->ib_device; 815 u64 dma_addr; 816 817 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 818 ISER_HEADERS_LEN, DMA_TO_DEVICE); 819 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 820 isert_err("ib_dma_mapping_error() failed\n"); 821 return -ENOMEM; 822 } 823 824 tx_desc->dma_addr = dma_addr; 825 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 826 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 827 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 828 829 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", 830 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, 831 tx_desc->tx_sg[0].lkey); 832 833 return 0; 834 } 835 836 static void 837 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 838 struct ib_send_wr *send_wr) 839 { 840 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 841 842 tx_desc->tx_cqe.done = isert_send_done; 843 send_wr->wr_cqe = &tx_desc->tx_cqe; 844 845 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 846 send_wr->opcode = IB_WR_SEND_WITH_INV; 847 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 848 } else { 849 send_wr->opcode = IB_WR_SEND; 850 } 851 852 send_wr->sg_list = &tx_desc->tx_sg[0]; 853 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 854 send_wr->send_flags = IB_SEND_SIGNALED; 855 } 856 857 static int 858 isert_login_post_recv(struct isert_conn *isert_conn) 859 { 860 struct ib_recv_wr rx_wr; 861 struct ib_sge sge; 862 int ret; 863 864 memset(&sge, 0, sizeof(struct ib_sge)); 865 sge.addr = isert_conn->login_req_dma; 866 sge.length = ISER_RX_PAYLOAD_SIZE; 867 sge.lkey = isert_conn->device->pd->local_dma_lkey; 868 869 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 870 sge.addr, sge.length, sge.lkey); 871 872 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; 873 874 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 875 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; 876 rx_wr.sg_list = &sge; 877 rx_wr.num_sge = 1; 878 879 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); 880 if (ret) 881 isert_err("ib_post_recv() failed: %d\n", ret); 882 883 return ret; 884 } 885 886 static int 887 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 888 u32 length) 889 { 890 struct isert_conn *isert_conn = conn->context; 891 struct isert_device *device = isert_conn->device; 892 struct ib_device *ib_dev = device->ib_device; 893 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; 894 int ret; 895 896 __isert_create_send_desc(device, tx_desc); 897 898 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 899 sizeof(struct iscsi_hdr)); 900 901 isert_init_tx_hdrs(isert_conn, tx_desc); 902 903 if (length > 0) { 904 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 905 906 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 907 length, DMA_TO_DEVICE); 908 909 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 910 911 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 912 length, DMA_TO_DEVICE); 913 914 tx_dsg->addr = isert_conn->login_rsp_dma; 915 tx_dsg->length = length; 916 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; 917 tx_desc->num_sge = 2; 918 } 919 if (!login->login_failed) { 920 if (login->login_complete) { 921 ret = isert_alloc_rx_descriptors(isert_conn); 922 if (ret) 923 return ret; 924 925 ret = isert_post_recvm(isert_conn, 926 ISERT_QP_MAX_RECV_DTOS); 927 if (ret) 928 return ret; 929 930 /* Now we are in FULL_FEATURE phase */ 931 mutex_lock(&isert_conn->mutex); 932 isert_conn->state = ISER_CONN_FULL_FEATURE; 933 mutex_unlock(&isert_conn->mutex); 934 goto post_send; 935 } 936 937 ret = isert_login_post_recv(isert_conn); 938 if (ret) 939 return ret; 940 } 941 post_send: 942 ret = isert_login_post_send(isert_conn, tx_desc); 943 if (ret) 944 return ret; 945 946 return 0; 947 } 948 949 static void 950 isert_rx_login_req(struct isert_conn *isert_conn) 951 { 952 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; 953 int rx_buflen = isert_conn->login_req_len; 954 struct iscsi_conn *conn = isert_conn->conn; 955 struct iscsi_login *login = conn->conn_login; 956 int size; 957 958 isert_info("conn %p\n", isert_conn); 959 960 WARN_ON_ONCE(!login); 961 962 if (login->first_request) { 963 struct iscsi_login_req *login_req = 964 (struct iscsi_login_req *)&rx_desc->iscsi_header; 965 /* 966 * Setup the initial iscsi_login values from the leading 967 * login request PDU. 968 */ 969 login->leading_connection = (!login_req->tsih) ? 1 : 0; 970 login->current_stage = 971 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 972 >> 2; 973 login->version_min = login_req->min_version; 974 login->version_max = login_req->max_version; 975 memcpy(login->isid, login_req->isid, 6); 976 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 977 login->init_task_tag = login_req->itt; 978 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 979 login->cid = be16_to_cpu(login_req->cid); 980 login->tsih = be16_to_cpu(login_req->tsih); 981 } 982 983 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 984 985 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 986 isert_dbg("Using login payload size: %d, rx_buflen: %d " 987 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, 988 MAX_KEY_VALUE_PAIRS); 989 memcpy(login->req_buf, &rx_desc->data[0], size); 990 991 if (login->first_request) { 992 complete(&isert_conn->login_comp); 993 return; 994 } 995 schedule_delayed_work(&conn->login_work, 0); 996 } 997 998 static struct iscsi_cmd 999 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) 1000 { 1001 struct isert_conn *isert_conn = conn->context; 1002 struct isert_cmd *isert_cmd; 1003 struct iscsi_cmd *cmd; 1004 1005 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1006 if (!cmd) { 1007 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1008 return NULL; 1009 } 1010 isert_cmd = iscsit_priv_cmd(cmd); 1011 isert_cmd->conn = isert_conn; 1012 isert_cmd->iscsi_cmd = cmd; 1013 isert_cmd->rx_desc = rx_desc; 1014 1015 return cmd; 1016 } 1017 1018 static int 1019 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1020 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1021 struct iser_rx_desc *rx_desc, unsigned char *buf) 1022 { 1023 struct iscsi_conn *conn = isert_conn->conn; 1024 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1025 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1026 bool dump_payload = false; 1027 unsigned int data_len; 1028 1029 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1030 if (rc < 0) 1031 return rc; 1032 1033 imm_data = cmd->immediate_data; 1034 imm_data_len = cmd->first_burst_len; 1035 unsol_data = cmd->unsolicited_data; 1036 data_len = cmd->se_cmd.data_length; 1037 1038 if (imm_data && imm_data_len == data_len) 1039 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1040 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1041 if (rc < 0) { 1042 return 0; 1043 } else if (rc > 0) { 1044 dump_payload = true; 1045 goto sequence_cmd; 1046 } 1047 1048 if (!imm_data) 1049 return 0; 1050 1051 if (imm_data_len != data_len) { 1052 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1053 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, 1054 &rx_desc->data[0], imm_data_len); 1055 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", 1056 sg_nents, imm_data_len); 1057 } else { 1058 sg_init_table(&isert_cmd->sg, 1); 1059 cmd->se_cmd.t_data_sg = &isert_cmd->sg; 1060 cmd->se_cmd.t_data_nents = 1; 1061 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); 1062 isert_dbg("Transfer Immediate imm_data_len: %d\n", 1063 imm_data_len); 1064 } 1065 1066 cmd->write_data_done += imm_data_len; 1067 1068 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1069 spin_lock_bh(&cmd->istate_lock); 1070 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1071 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1072 spin_unlock_bh(&cmd->istate_lock); 1073 } 1074 1075 sequence_cmd: 1076 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1077 1078 if (!rc && dump_payload == false && unsol_data) 1079 iscsit_set_unsolicited_dataout(cmd); 1080 else if (dump_payload && imm_data) 1081 target_put_sess_cmd(&cmd->se_cmd); 1082 1083 return 0; 1084 } 1085 1086 static int 1087 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1088 struct iser_rx_desc *rx_desc, unsigned char *buf) 1089 { 1090 struct scatterlist *sg_start; 1091 struct iscsi_conn *conn = isert_conn->conn; 1092 struct iscsi_cmd *cmd = NULL; 1093 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1094 u32 unsol_data_len = ntoh24(hdr->dlength); 1095 int rc, sg_nents, sg_off, page_off; 1096 1097 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1098 if (rc < 0) 1099 return rc; 1100 else if (!cmd) 1101 return 0; 1102 /* 1103 * FIXME: Unexpected unsolicited_data out 1104 */ 1105 if (!cmd->unsolicited_data) { 1106 isert_err("Received unexpected solicited data payload\n"); 1107 dump_stack(); 1108 return -1; 1109 } 1110 1111 isert_dbg("Unsolicited DataOut unsol_data_len: %u, " 1112 "write_data_done: %u, data_length: %u\n", 1113 unsol_data_len, cmd->write_data_done, 1114 cmd->se_cmd.data_length); 1115 1116 sg_off = cmd->write_data_done / PAGE_SIZE; 1117 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1118 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1119 page_off = cmd->write_data_done % PAGE_SIZE; 1120 /* 1121 * FIXME: Non page-aligned unsolicited_data out 1122 */ 1123 if (page_off) { 1124 isert_err("unexpected non-page aligned data payload\n"); 1125 dump_stack(); 1126 return -1; 1127 } 1128 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " 1129 "sg_nents: %u from %p %u\n", sg_start, sg_off, 1130 sg_nents, &rx_desc->data[0], unsol_data_len); 1131 1132 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1133 unsol_data_len); 1134 1135 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1136 if (rc < 0) 1137 return rc; 1138 1139 /* 1140 * multiple data-outs on the same command can arrive - 1141 * so post the buffer before hand 1142 */ 1143 rc = isert_post_recv(isert_conn, rx_desc); 1144 if (rc) { 1145 isert_err("ib_post_recv failed with %d\n", rc); 1146 return rc; 1147 } 1148 return 0; 1149 } 1150 1151 static int 1152 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1153 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1154 unsigned char *buf) 1155 { 1156 struct iscsi_conn *conn = isert_conn->conn; 1157 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1158 int rc; 1159 1160 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1161 if (rc < 0) 1162 return rc; 1163 /* 1164 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1165 */ 1166 1167 return iscsit_process_nop_out(conn, cmd, hdr); 1168 } 1169 1170 static int 1171 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1172 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1173 struct iscsi_text *hdr) 1174 { 1175 struct iscsi_conn *conn = isert_conn->conn; 1176 u32 payload_length = ntoh24(hdr->dlength); 1177 int rc; 1178 unsigned char *text_in = NULL; 1179 1180 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1181 if (rc < 0) 1182 return rc; 1183 1184 if (payload_length) { 1185 text_in = kzalloc(payload_length, GFP_KERNEL); 1186 if (!text_in) 1187 return -ENOMEM; 1188 } 1189 cmd->text_in_ptr = text_in; 1190 1191 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); 1192 1193 return iscsit_process_text_cmd(conn, cmd, hdr); 1194 } 1195 1196 static int 1197 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1198 uint32_t read_stag, uint64_t read_va, 1199 uint32_t write_stag, uint64_t write_va) 1200 { 1201 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1202 struct iscsi_conn *conn = isert_conn->conn; 1203 struct iscsi_cmd *cmd; 1204 struct isert_cmd *isert_cmd; 1205 int ret = -EINVAL; 1206 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1207 1208 if (conn->sess->sess_ops->SessionType && 1209 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1210 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1211 " ignoring\n", opcode); 1212 return 0; 1213 } 1214 1215 switch (opcode) { 1216 case ISCSI_OP_SCSI_CMD: 1217 cmd = isert_allocate_cmd(conn, rx_desc); 1218 if (!cmd) 1219 break; 1220 1221 isert_cmd = iscsit_priv_cmd(cmd); 1222 isert_cmd->read_stag = read_stag; 1223 isert_cmd->read_va = read_va; 1224 isert_cmd->write_stag = write_stag; 1225 isert_cmd->write_va = write_va; 1226 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; 1227 1228 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1229 rx_desc, (unsigned char *)hdr); 1230 break; 1231 case ISCSI_OP_NOOP_OUT: 1232 cmd = isert_allocate_cmd(conn, rx_desc); 1233 if (!cmd) 1234 break; 1235 1236 isert_cmd = iscsit_priv_cmd(cmd); 1237 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1238 rx_desc, (unsigned char *)hdr); 1239 break; 1240 case ISCSI_OP_SCSI_DATA_OUT: 1241 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1242 (unsigned char *)hdr); 1243 break; 1244 case ISCSI_OP_SCSI_TMFUNC: 1245 cmd = isert_allocate_cmd(conn, rx_desc); 1246 if (!cmd) 1247 break; 1248 1249 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1250 (unsigned char *)hdr); 1251 break; 1252 case ISCSI_OP_LOGOUT: 1253 cmd = isert_allocate_cmd(conn, rx_desc); 1254 if (!cmd) 1255 break; 1256 1257 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1258 break; 1259 case ISCSI_OP_TEXT: 1260 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) 1261 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1262 else 1263 cmd = isert_allocate_cmd(conn, rx_desc); 1264 1265 if (!cmd) 1266 break; 1267 1268 isert_cmd = iscsit_priv_cmd(cmd); 1269 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1270 rx_desc, (struct iscsi_text *)hdr); 1271 break; 1272 default: 1273 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1274 dump_stack(); 1275 break; 1276 } 1277 1278 return ret; 1279 } 1280 1281 static void 1282 isert_print_wc(struct ib_wc *wc, const char *type) 1283 { 1284 if (wc->status != IB_WC_WR_FLUSH_ERR) 1285 isert_err("%s failure: %s (%d) vend_err %x\n", type, 1286 ib_wc_status_msg(wc->status), wc->status, 1287 wc->vendor_err); 1288 else 1289 isert_dbg("%s failure: %s (%d)\n", type, 1290 ib_wc_status_msg(wc->status), wc->status); 1291 } 1292 1293 static void 1294 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1295 { 1296 struct isert_conn *isert_conn = wc->qp->qp_context; 1297 struct ib_device *ib_dev = isert_conn->cm_id->device; 1298 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); 1299 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1300 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; 1301 uint64_t read_va = 0, write_va = 0; 1302 uint32_t read_stag = 0, write_stag = 0; 1303 1304 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1305 isert_print_wc(wc, "recv"); 1306 if (wc->status != IB_WC_WR_FLUSH_ERR) 1307 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1308 return; 1309 } 1310 1311 rx_desc->in_use = true; 1312 1313 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1314 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1315 1316 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1317 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, 1318 (int)(wc->byte_len - ISER_HEADERS_LEN)); 1319 1320 switch (iser_ctrl->flags & 0xF0) { 1321 case ISCSI_CTRL: 1322 if (iser_ctrl->flags & ISER_RSV) { 1323 read_stag = be32_to_cpu(iser_ctrl->read_stag); 1324 read_va = be64_to_cpu(iser_ctrl->read_va); 1325 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", 1326 read_stag, (unsigned long long)read_va); 1327 } 1328 if (iser_ctrl->flags & ISER_WSV) { 1329 write_stag = be32_to_cpu(iser_ctrl->write_stag); 1330 write_va = be64_to_cpu(iser_ctrl->write_va); 1331 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", 1332 write_stag, (unsigned long long)write_va); 1333 } 1334 1335 isert_dbg("ISER ISCSI_CTRL PDU\n"); 1336 break; 1337 case ISER_HELLO: 1338 isert_err("iSER Hello message\n"); 1339 break; 1340 default: 1341 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); 1342 break; 1343 } 1344 1345 isert_rx_opcode(isert_conn, rx_desc, 1346 read_stag, read_va, write_stag, write_va); 1347 1348 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, 1349 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1350 } 1351 1352 static void 1353 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1354 { 1355 struct isert_conn *isert_conn = wc->qp->qp_context; 1356 struct ib_device *ib_dev = isert_conn->device->ib_device; 1357 1358 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1359 isert_print_wc(wc, "login recv"); 1360 return; 1361 } 1362 1363 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, 1364 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1365 1366 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; 1367 1368 if (isert_conn->conn) { 1369 struct iscsi_login *login = isert_conn->conn->conn_login; 1370 1371 if (login && !login->first_request) 1372 isert_rx_login_req(isert_conn); 1373 } 1374 1375 mutex_lock(&isert_conn->mutex); 1376 complete(&isert_conn->login_req_comp); 1377 mutex_unlock(&isert_conn->mutex); 1378 1379 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, 1380 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1381 } 1382 1383 static void 1384 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) 1385 { 1386 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 1387 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 1388 1389 if (!cmd->rw.nr_ops) 1390 return; 1391 1392 if (isert_prot_cmd(conn, se_cmd)) { 1393 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, 1394 conn->cm_id->port_num, se_cmd->t_data_sg, 1395 se_cmd->t_data_nents, se_cmd->t_prot_sg, 1396 se_cmd->t_prot_nents, dir); 1397 } else { 1398 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, 1399 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); 1400 } 1401 1402 cmd->rw.nr_ops = 0; 1403 } 1404 1405 static void 1406 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1407 { 1408 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1409 struct isert_conn *isert_conn = isert_cmd->conn; 1410 struct iscsi_conn *conn = isert_conn->conn; 1411 struct iscsi_text_rsp *hdr; 1412 1413 isert_dbg("Cmd %p\n", isert_cmd); 1414 1415 switch (cmd->iscsi_opcode) { 1416 case ISCSI_OP_SCSI_CMD: 1417 spin_lock_bh(&conn->cmd_lock); 1418 if (!list_empty(&cmd->i_conn_node)) 1419 list_del_init(&cmd->i_conn_node); 1420 spin_unlock_bh(&conn->cmd_lock); 1421 1422 if (cmd->data_direction == DMA_TO_DEVICE) { 1423 iscsit_stop_dataout_timer(cmd); 1424 /* 1425 * Check for special case during comp_err where 1426 * WRITE_PENDING has been handed off from core, 1427 * but requires an extra target_put_sess_cmd() 1428 * before transport_generic_free_cmd() below. 1429 */ 1430 if (comp_err && 1431 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1432 struct se_cmd *se_cmd = &cmd->se_cmd; 1433 1434 target_put_sess_cmd(se_cmd); 1435 } 1436 } 1437 1438 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1439 transport_generic_free_cmd(&cmd->se_cmd, 0); 1440 break; 1441 case ISCSI_OP_SCSI_TMFUNC: 1442 spin_lock_bh(&conn->cmd_lock); 1443 if (!list_empty(&cmd->i_conn_node)) 1444 list_del_init(&cmd->i_conn_node); 1445 spin_unlock_bh(&conn->cmd_lock); 1446 1447 transport_generic_free_cmd(&cmd->se_cmd, 0); 1448 break; 1449 case ISCSI_OP_REJECT: 1450 case ISCSI_OP_NOOP_OUT: 1451 case ISCSI_OP_TEXT: 1452 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1453 /* If the continue bit is on, keep the command alive */ 1454 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) 1455 break; 1456 1457 spin_lock_bh(&conn->cmd_lock); 1458 if (!list_empty(&cmd->i_conn_node)) 1459 list_del_init(&cmd->i_conn_node); 1460 spin_unlock_bh(&conn->cmd_lock); 1461 1462 /* 1463 * Handle special case for REJECT when iscsi_add_reject*() has 1464 * overwritten the original iscsi_opcode assignment, and the 1465 * associated cmd->se_cmd needs to be released. 1466 */ 1467 if (cmd->se_cmd.se_tfo != NULL) { 1468 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", 1469 cmd->iscsi_opcode); 1470 transport_generic_free_cmd(&cmd->se_cmd, 0); 1471 break; 1472 } 1473 /* fall through */ 1474 default: 1475 iscsit_release_cmd(cmd); 1476 break; 1477 } 1478 } 1479 1480 static void 1481 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1482 { 1483 if (tx_desc->dma_addr != 0) { 1484 isert_dbg("unmap single for tx_desc->dma_addr\n"); 1485 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1486 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1487 tx_desc->dma_addr = 0; 1488 } 1489 } 1490 1491 static void 1492 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1493 struct ib_device *ib_dev, bool comp_err) 1494 { 1495 if (isert_cmd->pdu_buf_dma != 0) { 1496 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); 1497 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1498 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1499 isert_cmd->pdu_buf_dma = 0; 1500 } 1501 1502 isert_unmap_tx_desc(tx_desc, ib_dev); 1503 isert_put_cmd(isert_cmd, comp_err); 1504 } 1505 1506 static int 1507 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1508 { 1509 struct ib_mr_status mr_status; 1510 int ret; 1511 1512 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1513 if (ret) { 1514 isert_err("ib_check_mr_status failed, ret %d\n", ret); 1515 goto fail_mr_status; 1516 } 1517 1518 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1519 u64 sec_offset_err; 1520 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1521 1522 switch (mr_status.sig_err.err_type) { 1523 case IB_SIG_BAD_GUARD: 1524 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1525 break; 1526 case IB_SIG_BAD_REFTAG: 1527 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1528 break; 1529 case IB_SIG_BAD_APPTAG: 1530 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1531 break; 1532 } 1533 sec_offset_err = mr_status.sig_err.sig_err_offset; 1534 do_div(sec_offset_err, block_size); 1535 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1536 1537 isert_err("PI error found type %d at sector 0x%llx " 1538 "expected 0x%x vs actual 0x%x\n", 1539 mr_status.sig_err.err_type, 1540 (unsigned long long)se_cmd->bad_sector, 1541 mr_status.sig_err.expected, 1542 mr_status.sig_err.actual); 1543 ret = 1; 1544 } 1545 1546 fail_mr_status: 1547 return ret; 1548 } 1549 1550 static void 1551 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 1552 { 1553 struct isert_conn *isert_conn = wc->qp->qp_context; 1554 struct isert_device *device = isert_conn->device; 1555 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1556 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1557 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; 1558 int ret = 0; 1559 1560 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1561 isert_print_wc(wc, "rdma write"); 1562 if (wc->status != IB_WC_WR_FLUSH_ERR) 1563 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1564 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1565 return; 1566 } 1567 1568 isert_dbg("Cmd %p\n", isert_cmd); 1569 1570 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr); 1571 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1572 1573 if (ret) { 1574 /* 1575 * transport_generic_request_failure() expects to have 1576 * plus two references to handle queue-full, so re-add 1577 * one here as target-core will have already dropped 1578 * it after the first isert_put_datain() callback. 1579 */ 1580 kref_get(&cmd->cmd_kref); 1581 transport_generic_request_failure(cmd, cmd->pi_err); 1582 } else { 1583 /* 1584 * XXX: isert_put_response() failure is not retried. 1585 */ 1586 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1587 if (ret) 1588 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); 1589 } 1590 } 1591 1592 static void 1593 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1594 { 1595 struct isert_conn *isert_conn = wc->qp->qp_context; 1596 struct isert_device *device = isert_conn->device; 1597 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); 1598 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); 1599 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1600 struct se_cmd *se_cmd = &cmd->se_cmd; 1601 int ret = 0; 1602 1603 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1604 isert_print_wc(wc, "rdma read"); 1605 if (wc->status != IB_WC_WR_FLUSH_ERR) 1606 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1607 isert_completion_put(desc, isert_cmd, device->ib_device, true); 1608 return; 1609 } 1610 1611 isert_dbg("Cmd %p\n", isert_cmd); 1612 1613 iscsit_stop_dataout_timer(cmd); 1614 1615 if (isert_prot_cmd(isert_conn, se_cmd)) 1616 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr); 1617 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1618 cmd->write_data_done = 0; 1619 1620 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1621 spin_lock_bh(&cmd->istate_lock); 1622 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1623 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1624 spin_unlock_bh(&cmd->istate_lock); 1625 1626 /* 1627 * transport_generic_request_failure() will drop the extra 1628 * se_cmd->cmd_kref reference after T10-PI error, and handle 1629 * any non-zero ->queue_status() callback error retries. 1630 */ 1631 if (ret) 1632 transport_generic_request_failure(se_cmd, se_cmd->pi_err); 1633 else 1634 target_execute_cmd(se_cmd); 1635 } 1636 1637 static void 1638 isert_do_control_comp(struct work_struct *work) 1639 { 1640 struct isert_cmd *isert_cmd = container_of(work, 1641 struct isert_cmd, comp_work); 1642 struct isert_conn *isert_conn = isert_cmd->conn; 1643 struct ib_device *ib_dev = isert_conn->cm_id->device; 1644 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1645 1646 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); 1647 1648 switch (cmd->i_state) { 1649 case ISTATE_SEND_TASKMGTRSP: 1650 iscsit_tmr_post_handler(cmd, cmd->conn); 1651 /* fall through */ 1652 case ISTATE_SEND_REJECT: 1653 case ISTATE_SEND_TEXTRSP: 1654 cmd->i_state = ISTATE_SENT_STATUS; 1655 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, 1656 ib_dev, false); 1657 break; 1658 case ISTATE_SEND_LOGOUTRSP: 1659 iscsit_logout_post_handler(cmd, cmd->conn); 1660 break; 1661 default: 1662 isert_err("Unknown i_state %d\n", cmd->i_state); 1663 dump_stack(); 1664 break; 1665 } 1666 } 1667 1668 static void 1669 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) 1670 { 1671 struct isert_conn *isert_conn = wc->qp->qp_context; 1672 struct ib_device *ib_dev = isert_conn->cm_id->device; 1673 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1674 1675 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1676 isert_print_wc(wc, "login send"); 1677 if (wc->status != IB_WC_WR_FLUSH_ERR) 1678 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1679 } 1680 1681 isert_unmap_tx_desc(tx_desc, ib_dev); 1682 } 1683 1684 static void 1685 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) 1686 { 1687 struct isert_conn *isert_conn = wc->qp->qp_context; 1688 struct ib_device *ib_dev = isert_conn->cm_id->device; 1689 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); 1690 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); 1691 1692 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1693 isert_print_wc(wc, "send"); 1694 if (wc->status != IB_WC_WR_FLUSH_ERR) 1695 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 1696 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 1697 return; 1698 } 1699 1700 isert_dbg("Cmd %p\n", isert_cmd); 1701 1702 switch (isert_cmd->iscsi_cmd->i_state) { 1703 case ISTATE_SEND_TASKMGTRSP: 1704 case ISTATE_SEND_LOGOUTRSP: 1705 case ISTATE_SEND_REJECT: 1706 case ISTATE_SEND_TEXTRSP: 1707 isert_unmap_tx_desc(tx_desc, ib_dev); 1708 1709 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1710 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1711 return; 1712 default: 1713 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; 1714 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1715 break; 1716 } 1717 } 1718 1719 static int 1720 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 1721 { 1722 int ret; 1723 1724 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); 1725 if (ret) { 1726 isert_err("ib_post_recv failed with %d\n", ret); 1727 return ret; 1728 } 1729 1730 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL); 1731 if (ret) { 1732 isert_err("ib_post_send failed with %d\n", ret); 1733 return ret; 1734 } 1735 return ret; 1736 } 1737 1738 static int 1739 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1740 { 1741 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1742 struct isert_conn *isert_conn = conn->context; 1743 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1744 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1745 &isert_cmd->tx_desc.iscsi_header; 1746 1747 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1748 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 1749 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1750 /* 1751 * Attach SENSE DATA payload to iSCSI Response PDU 1752 */ 1753 if (cmd->se_cmd.sense_buffer && 1754 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1755 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 1756 struct isert_device *device = isert_conn->device; 1757 struct ib_device *ib_dev = device->ib_device; 1758 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1759 u32 padding, pdu_len; 1760 1761 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 1762 cmd->sense_buffer); 1763 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 1764 1765 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 1766 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 1767 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 1768 1769 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1770 (void *)cmd->sense_buffer, pdu_len, 1771 DMA_TO_DEVICE); 1772 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1773 return -ENOMEM; 1774 1775 isert_cmd->pdu_buf_len = pdu_len; 1776 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1777 tx_dsg->length = pdu_len; 1778 tx_dsg->lkey = device->pd->local_dma_lkey; 1779 isert_cmd->tx_desc.num_sge = 2; 1780 } 1781 1782 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1783 1784 isert_dbg("Posting SCSI Response\n"); 1785 1786 return isert_post_response(isert_conn, isert_cmd); 1787 } 1788 1789 static void 1790 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1791 { 1792 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1793 struct isert_conn *isert_conn = conn->context; 1794 1795 spin_lock_bh(&conn->cmd_lock); 1796 if (!list_empty(&cmd->i_conn_node)) 1797 list_del_init(&cmd->i_conn_node); 1798 spin_unlock_bh(&conn->cmd_lock); 1799 1800 if (cmd->data_direction == DMA_TO_DEVICE) 1801 iscsit_stop_dataout_timer(cmd); 1802 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1803 } 1804 1805 static enum target_prot_op 1806 isert_get_sup_prot_ops(struct iscsi_conn *conn) 1807 { 1808 struct isert_conn *isert_conn = conn->context; 1809 struct isert_device *device = isert_conn->device; 1810 1811 if (conn->tpg->tpg_attrib.t10_pi) { 1812 if (device->pi_capable) { 1813 isert_info("conn %p PI offload enabled\n", isert_conn); 1814 isert_conn->pi_support = true; 1815 return TARGET_PROT_ALL; 1816 } 1817 } 1818 1819 isert_info("conn %p PI offload disabled\n", isert_conn); 1820 isert_conn->pi_support = false; 1821 1822 return TARGET_PROT_NORMAL; 1823 } 1824 1825 static int 1826 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1827 bool nopout_response) 1828 { 1829 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1830 struct isert_conn *isert_conn = conn->context; 1831 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1832 1833 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1834 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 1835 &isert_cmd->tx_desc.iscsi_header, 1836 nopout_response); 1837 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1838 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1839 1840 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); 1841 1842 return isert_post_response(isert_conn, isert_cmd); 1843 } 1844 1845 static int 1846 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1847 { 1848 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1849 struct isert_conn *isert_conn = conn->context; 1850 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1851 1852 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1853 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1854 &isert_cmd->tx_desc.iscsi_header); 1855 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1856 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1857 1858 isert_dbg("conn %p Posting Logout Response\n", isert_conn); 1859 1860 return isert_post_response(isert_conn, isert_cmd); 1861 } 1862 1863 static int 1864 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1865 { 1866 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1867 struct isert_conn *isert_conn = conn->context; 1868 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1869 1870 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1871 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1872 &isert_cmd->tx_desc.iscsi_header); 1873 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1874 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1875 1876 isert_dbg("conn %p Posting Task Management Response\n", isert_conn); 1877 1878 return isert_post_response(isert_conn, isert_cmd); 1879 } 1880 1881 static int 1882 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1883 { 1884 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1885 struct isert_conn *isert_conn = conn->context; 1886 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1887 struct isert_device *device = isert_conn->device; 1888 struct ib_device *ib_dev = device->ib_device; 1889 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1890 struct iscsi_reject *hdr = 1891 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 1892 1893 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1894 iscsit_build_reject(cmd, conn, hdr); 1895 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1896 1897 hton24(hdr->dlength, ISCSI_HDR_LEN); 1898 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1899 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 1900 DMA_TO_DEVICE); 1901 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1902 return -ENOMEM; 1903 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 1904 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1905 tx_dsg->length = ISCSI_HDR_LEN; 1906 tx_dsg->lkey = device->pd->local_dma_lkey; 1907 isert_cmd->tx_desc.num_sge = 2; 1908 1909 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1910 1911 isert_dbg("conn %p Posting Reject\n", isert_conn); 1912 1913 return isert_post_response(isert_conn, isert_cmd); 1914 } 1915 1916 static int 1917 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1918 { 1919 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1920 struct isert_conn *isert_conn = conn->context; 1921 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1922 struct iscsi_text_rsp *hdr = 1923 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 1924 u32 txt_rsp_len; 1925 int rc; 1926 1927 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 1928 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); 1929 if (rc < 0) 1930 return rc; 1931 1932 txt_rsp_len = rc; 1933 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1934 1935 if (txt_rsp_len) { 1936 struct isert_device *device = isert_conn->device; 1937 struct ib_device *ib_dev = device->ib_device; 1938 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 1939 void *txt_rsp_buf = cmd->buf_ptr; 1940 1941 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1942 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 1943 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1944 return -ENOMEM; 1945 1946 isert_cmd->pdu_buf_len = txt_rsp_len; 1947 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1948 tx_dsg->length = txt_rsp_len; 1949 tx_dsg->lkey = device->pd->local_dma_lkey; 1950 isert_cmd->tx_desc.num_sge = 2; 1951 } 1952 isert_init_send_wr(isert_conn, isert_cmd, send_wr); 1953 1954 isert_dbg("conn %p Text Response\n", isert_conn); 1955 1956 return isert_post_response(isert_conn, isert_cmd); 1957 } 1958 1959 static inline void 1960 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain) 1961 { 1962 domain->sig_type = IB_SIG_TYPE_T10_DIF; 1963 domain->sig.dif.bg_type = IB_T10DIF_CRC; 1964 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; 1965 domain->sig.dif.ref_tag = se_cmd->reftag_seed; 1966 /* 1967 * At the moment we hard code those, but if in the future 1968 * the target core would like to use it, we will take it 1969 * from se_cmd. 1970 */ 1971 domain->sig.dif.apptag_check_mask = 0xffff; 1972 domain->sig.dif.app_escape = true; 1973 domain->sig.dif.ref_escape = true; 1974 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || 1975 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) 1976 domain->sig.dif.ref_remap = true; 1977 }; 1978 1979 static int 1980 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 1981 { 1982 memset(sig_attrs, 0, sizeof(*sig_attrs)); 1983 1984 switch (se_cmd->prot_op) { 1985 case TARGET_PROT_DIN_INSERT: 1986 case TARGET_PROT_DOUT_STRIP: 1987 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 1988 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 1989 break; 1990 case TARGET_PROT_DOUT_INSERT: 1991 case TARGET_PROT_DIN_STRIP: 1992 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 1993 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 1994 break; 1995 case TARGET_PROT_DIN_PASS: 1996 case TARGET_PROT_DOUT_PASS: 1997 isert_set_dif_domain(se_cmd, &sig_attrs->wire); 1998 isert_set_dif_domain(se_cmd, &sig_attrs->mem); 1999 break; 2000 default: 2001 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2002 return -EINVAL; 2003 } 2004 2005 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD) 2006 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; 2007 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG) 2008 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; 2009 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG) 2010 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; 2011 2012 return 0; 2013 } 2014 2015 static int 2016 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, 2017 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 2018 { 2019 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; 2020 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); 2021 u8 port_num = conn->cm_id->port_num; 2022 u64 addr; 2023 u32 rkey, offset; 2024 int ret; 2025 2026 if (cmd->ctx_init_done) 2027 goto rdma_ctx_post; 2028 2029 if (dir == DMA_FROM_DEVICE) { 2030 addr = cmd->write_va; 2031 rkey = cmd->write_stag; 2032 offset = cmd->iscsi_cmd->write_data_done; 2033 } else { 2034 addr = cmd->read_va; 2035 rkey = cmd->read_stag; 2036 offset = 0; 2037 } 2038 2039 if (isert_prot_cmd(conn, se_cmd)) { 2040 struct ib_sig_attrs sig_attrs; 2041 2042 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2043 if (ret) 2044 return ret; 2045 2046 WARN_ON_ONCE(offset); 2047 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, 2048 se_cmd->t_data_sg, se_cmd->t_data_nents, 2049 se_cmd->t_prot_sg, se_cmd->t_prot_nents, 2050 &sig_attrs, addr, rkey, dir); 2051 } else { 2052 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, 2053 se_cmd->t_data_sg, se_cmd->t_data_nents, 2054 offset, addr, rkey, dir); 2055 } 2056 2057 if (ret < 0) { 2058 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); 2059 return ret; 2060 } 2061 2062 cmd->ctx_init_done = true; 2063 2064 rdma_ctx_post: 2065 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); 2066 if (ret < 0) 2067 isert_err("Cmd: %p failed to post RDMA res\n", cmd); 2068 return ret; 2069 } 2070 2071 static int 2072 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2073 { 2074 struct se_cmd *se_cmd = &cmd->se_cmd; 2075 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2076 struct isert_conn *isert_conn = conn->context; 2077 struct ib_cqe *cqe = NULL; 2078 struct ib_send_wr *chain_wr = NULL; 2079 int rc; 2080 2081 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2082 isert_cmd, se_cmd->data_length); 2083 2084 if (isert_prot_cmd(isert_conn, se_cmd)) { 2085 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; 2086 cqe = &isert_cmd->tx_desc.tx_cqe; 2087 } else { 2088 /* 2089 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2090 */ 2091 isert_create_send_desc(isert_conn, isert_cmd, 2092 &isert_cmd->tx_desc); 2093 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2094 &isert_cmd->tx_desc.iscsi_header); 2095 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2096 isert_init_send_wr(isert_conn, isert_cmd, 2097 &isert_cmd->tx_desc.send_wr); 2098 2099 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2100 if (rc) { 2101 isert_err("ib_post_recv failed with %d\n", rc); 2102 return rc; 2103 } 2104 2105 chain_wr = &isert_cmd->tx_desc.send_wr; 2106 } 2107 2108 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2109 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", 2110 isert_cmd, rc); 2111 return rc; 2112 } 2113 2114 static int 2115 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2116 { 2117 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2118 int ret; 2119 2120 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2121 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2122 2123 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2124 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2125 &isert_cmd->tx_desc.tx_cqe, NULL); 2126 2127 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", 2128 isert_cmd, ret); 2129 return ret; 2130 } 2131 2132 static int 2133 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2134 { 2135 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2136 int ret = 0; 2137 2138 switch (state) { 2139 case ISTATE_REMOVE: 2140 spin_lock_bh(&conn->cmd_lock); 2141 list_del_init(&cmd->i_conn_node); 2142 spin_unlock_bh(&conn->cmd_lock); 2143 isert_put_cmd(isert_cmd, true); 2144 break; 2145 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2146 ret = isert_put_nopin(cmd, conn, false); 2147 break; 2148 default: 2149 isert_err("Unknown immediate state: 0x%02x\n", state); 2150 ret = -EINVAL; 2151 break; 2152 } 2153 2154 return ret; 2155 } 2156 2157 static int 2158 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2159 { 2160 struct isert_conn *isert_conn = conn->context; 2161 int ret; 2162 2163 switch (state) { 2164 case ISTATE_SEND_LOGOUTRSP: 2165 ret = isert_put_logout_rsp(cmd, conn); 2166 if (!ret) 2167 isert_conn->logout_posted = true; 2168 break; 2169 case ISTATE_SEND_NOPIN: 2170 ret = isert_put_nopin(cmd, conn, true); 2171 break; 2172 case ISTATE_SEND_TASKMGTRSP: 2173 ret = isert_put_tm_rsp(cmd, conn); 2174 break; 2175 case ISTATE_SEND_REJECT: 2176 ret = isert_put_reject(cmd, conn); 2177 break; 2178 case ISTATE_SEND_TEXTRSP: 2179 ret = isert_put_text_rsp(cmd, conn); 2180 break; 2181 case ISTATE_SEND_STATUS: 2182 /* 2183 * Special case for sending non GOOD SCSI status from TX thread 2184 * context during pre se_cmd excecution failure. 2185 */ 2186 ret = isert_put_response(conn, cmd); 2187 break; 2188 default: 2189 isert_err("Unknown response state: 0x%02x\n", state); 2190 ret = -EINVAL; 2191 break; 2192 } 2193 2194 return ret; 2195 } 2196 2197 struct rdma_cm_id * 2198 isert_setup_id(struct isert_np *isert_np) 2199 { 2200 struct iscsi_np *np = isert_np->np; 2201 struct rdma_cm_id *id; 2202 struct sockaddr *sa; 2203 int ret; 2204 2205 sa = (struct sockaddr *)&np->np_sockaddr; 2206 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); 2207 2208 id = rdma_create_id(&init_net, isert_cma_handler, isert_np, 2209 RDMA_PS_TCP, IB_QPT_RC); 2210 if (IS_ERR(id)) { 2211 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); 2212 ret = PTR_ERR(id); 2213 goto out; 2214 } 2215 isert_dbg("id %p context %p\n", id, id->context); 2216 2217 ret = rdma_bind_addr(id, sa); 2218 if (ret) { 2219 isert_err("rdma_bind_addr() failed: %d\n", ret); 2220 goto out_id; 2221 } 2222 2223 ret = rdma_listen(id, 0); 2224 if (ret) { 2225 isert_err("rdma_listen() failed: %d\n", ret); 2226 goto out_id; 2227 } 2228 2229 return id; 2230 out_id: 2231 rdma_destroy_id(id); 2232 out: 2233 return ERR_PTR(ret); 2234 } 2235 2236 static int 2237 isert_setup_np(struct iscsi_np *np, 2238 struct sockaddr_storage *ksockaddr) 2239 { 2240 struct isert_np *isert_np; 2241 struct rdma_cm_id *isert_lid; 2242 int ret; 2243 2244 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2245 if (!isert_np) 2246 return -ENOMEM; 2247 2248 sema_init(&isert_np->sem, 0); 2249 mutex_init(&isert_np->mutex); 2250 INIT_LIST_HEAD(&isert_np->accepted); 2251 INIT_LIST_HEAD(&isert_np->pending); 2252 isert_np->np = np; 2253 2254 /* 2255 * Setup the np->np_sockaddr from the passed sockaddr setup 2256 * in iscsi_target_configfs.c code.. 2257 */ 2258 memcpy(&np->np_sockaddr, ksockaddr, 2259 sizeof(struct sockaddr_storage)); 2260 2261 isert_lid = isert_setup_id(isert_np); 2262 if (IS_ERR(isert_lid)) { 2263 ret = PTR_ERR(isert_lid); 2264 goto out; 2265 } 2266 2267 isert_np->cm_id = isert_lid; 2268 np->np_context = isert_np; 2269 2270 return 0; 2271 2272 out: 2273 kfree(isert_np); 2274 2275 return ret; 2276 } 2277 2278 static int 2279 isert_rdma_accept(struct isert_conn *isert_conn) 2280 { 2281 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2282 struct rdma_conn_param cp; 2283 int ret; 2284 struct iser_cm_hdr rsp_hdr; 2285 2286 memset(&cp, 0, sizeof(struct rdma_conn_param)); 2287 cp.initiator_depth = isert_conn->initiator_depth; 2288 cp.retry_count = 7; 2289 cp.rnr_retry_count = 7; 2290 2291 memset(&rsp_hdr, 0, sizeof(rsp_hdr)); 2292 rsp_hdr.flags = ISERT_ZBVA_NOT_USED; 2293 if (!isert_conn->snd_w_inv) 2294 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; 2295 cp.private_data = (void *)&rsp_hdr; 2296 cp.private_data_len = sizeof(rsp_hdr); 2297 2298 ret = rdma_accept(cm_id, &cp); 2299 if (ret) { 2300 isert_err("rdma_accept() failed with: %d\n", ret); 2301 return ret; 2302 } 2303 2304 return 0; 2305 } 2306 2307 static int 2308 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 2309 { 2310 struct isert_conn *isert_conn = conn->context; 2311 int ret; 2312 2313 isert_info("before login_req comp conn: %p\n", isert_conn); 2314 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); 2315 if (ret) { 2316 isert_err("isert_conn %p interrupted before got login req\n", 2317 isert_conn); 2318 return ret; 2319 } 2320 reinit_completion(&isert_conn->login_req_comp); 2321 2322 /* 2323 * For login requests after the first PDU, isert_rx_login_req() will 2324 * kick schedule_delayed_work(&conn->login_work) as the packet is 2325 * received, which turns this callback from iscsi_target_do_login_rx() 2326 * into a NOP. 2327 */ 2328 if (!login->first_request) 2329 return 0; 2330 2331 isert_rx_login_req(isert_conn); 2332 2333 isert_info("before login_comp conn: %p\n", conn); 2334 ret = wait_for_completion_interruptible(&isert_conn->login_comp); 2335 if (ret) 2336 return ret; 2337 2338 isert_info("processing login->req: %p\n", login->req); 2339 2340 return 0; 2341 } 2342 2343 static void 2344 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 2345 struct isert_conn *isert_conn) 2346 { 2347 struct rdma_cm_id *cm_id = isert_conn->cm_id; 2348 struct rdma_route *cm_route = &cm_id->route; 2349 2350 conn->login_family = np->np_sockaddr.ss_family; 2351 2352 conn->login_sockaddr = cm_route->addr.dst_addr; 2353 conn->local_sockaddr = cm_route->addr.src_addr; 2354 } 2355 2356 static int 2357 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 2358 { 2359 struct isert_np *isert_np = np->np_context; 2360 struct isert_conn *isert_conn; 2361 int ret; 2362 2363 accept_wait: 2364 ret = down_interruptible(&isert_np->sem); 2365 if (ret) 2366 return -ENODEV; 2367 2368 spin_lock_bh(&np->np_thread_lock); 2369 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 2370 spin_unlock_bh(&np->np_thread_lock); 2371 isert_dbg("np_thread_state %d\n", 2372 np->np_thread_state); 2373 /** 2374 * No point in stalling here when np_thread 2375 * is in state RESET/SHUTDOWN/EXIT - bail 2376 **/ 2377 return -ENODEV; 2378 } 2379 spin_unlock_bh(&np->np_thread_lock); 2380 2381 mutex_lock(&isert_np->mutex); 2382 if (list_empty(&isert_np->pending)) { 2383 mutex_unlock(&isert_np->mutex); 2384 goto accept_wait; 2385 } 2386 isert_conn = list_first_entry(&isert_np->pending, 2387 struct isert_conn, node); 2388 list_del_init(&isert_conn->node); 2389 mutex_unlock(&isert_np->mutex); 2390 2391 conn->context = isert_conn; 2392 isert_conn->conn = conn; 2393 isert_conn->state = ISER_CONN_BOUND; 2394 2395 isert_set_conn_info(np, conn, isert_conn); 2396 2397 isert_dbg("Processing isert_conn: %p\n", isert_conn); 2398 2399 return 0; 2400 } 2401 2402 static void 2403 isert_free_np(struct iscsi_np *np) 2404 { 2405 struct isert_np *isert_np = np->np_context; 2406 struct isert_conn *isert_conn, *n; 2407 2408 if (isert_np->cm_id) 2409 rdma_destroy_id(isert_np->cm_id); 2410 2411 /* 2412 * FIXME: At this point we don't have a good way to insure 2413 * that at this point we don't have hanging connections that 2414 * completed RDMA establishment but didn't start iscsi login 2415 * process. So work-around this by cleaning up what ever piled 2416 * up in accepted and pending lists. 2417 */ 2418 mutex_lock(&isert_np->mutex); 2419 if (!list_empty(&isert_np->pending)) { 2420 isert_info("Still have isert pending connections\n"); 2421 list_for_each_entry_safe(isert_conn, n, 2422 &isert_np->pending, 2423 node) { 2424 isert_info("cleaning isert_conn %p state (%d)\n", 2425 isert_conn, isert_conn->state); 2426 isert_connect_release(isert_conn); 2427 } 2428 } 2429 2430 if (!list_empty(&isert_np->accepted)) { 2431 isert_info("Still have isert accepted connections\n"); 2432 list_for_each_entry_safe(isert_conn, n, 2433 &isert_np->accepted, 2434 node) { 2435 isert_info("cleaning isert_conn %p state (%d)\n", 2436 isert_conn, isert_conn->state); 2437 isert_connect_release(isert_conn); 2438 } 2439 } 2440 mutex_unlock(&isert_np->mutex); 2441 2442 np->np_context = NULL; 2443 kfree(isert_np); 2444 } 2445 2446 static void isert_release_work(struct work_struct *work) 2447 { 2448 struct isert_conn *isert_conn = container_of(work, 2449 struct isert_conn, 2450 release_work); 2451 2452 isert_info("Starting release conn %p\n", isert_conn); 2453 2454 mutex_lock(&isert_conn->mutex); 2455 isert_conn->state = ISER_CONN_DOWN; 2456 mutex_unlock(&isert_conn->mutex); 2457 2458 isert_info("Destroying conn %p\n", isert_conn); 2459 isert_put_conn(isert_conn); 2460 } 2461 2462 static void 2463 isert_wait4logout(struct isert_conn *isert_conn) 2464 { 2465 struct iscsi_conn *conn = isert_conn->conn; 2466 2467 isert_info("conn %p\n", isert_conn); 2468 2469 if (isert_conn->logout_posted) { 2470 isert_info("conn %p wait for conn_logout_comp\n", isert_conn); 2471 wait_for_completion_timeout(&conn->conn_logout_comp, 2472 SECONDS_FOR_LOGOUT_COMP * HZ); 2473 } 2474 } 2475 2476 static void 2477 isert_wait4cmds(struct iscsi_conn *conn) 2478 { 2479 isert_info("iscsi_conn %p\n", conn); 2480 2481 if (conn->sess) { 2482 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2483 target_wait_for_sess_cmds(conn->sess->se_sess); 2484 } 2485 } 2486 2487 /** 2488 * isert_put_unsol_pending_cmds() - Drop commands waiting for 2489 * unsolicitate dataout 2490 * @conn: iscsi connection 2491 * 2492 * We might still have commands that are waiting for unsolicited 2493 * dataouts messages. We must put the extra reference on those 2494 * before blocking on the target_wait_for_session_cmds 2495 */ 2496 static void 2497 isert_put_unsol_pending_cmds(struct iscsi_conn *conn) 2498 { 2499 struct iscsi_cmd *cmd, *tmp; 2500 static LIST_HEAD(drop_cmd_list); 2501 2502 spin_lock_bh(&conn->cmd_lock); 2503 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { 2504 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && 2505 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && 2506 (cmd->write_data_done < cmd->se_cmd.data_length)) 2507 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); 2508 } 2509 spin_unlock_bh(&conn->cmd_lock); 2510 2511 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { 2512 list_del_init(&cmd->i_conn_node); 2513 if (cmd->i_state != ISTATE_REMOVE) { 2514 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2515 2516 isert_info("conn %p dropping cmd %p\n", conn, cmd); 2517 isert_put_cmd(isert_cmd, true); 2518 } 2519 } 2520 } 2521 2522 static void isert_wait_conn(struct iscsi_conn *conn) 2523 { 2524 struct isert_conn *isert_conn = conn->context; 2525 2526 isert_info("Starting conn %p\n", isert_conn); 2527 2528 mutex_lock(&isert_conn->mutex); 2529 isert_conn_terminate(isert_conn); 2530 mutex_unlock(&isert_conn->mutex); 2531 2532 ib_drain_qp(isert_conn->qp); 2533 isert_put_unsol_pending_cmds(conn); 2534 isert_wait4cmds(conn); 2535 isert_wait4logout(isert_conn); 2536 2537 queue_work(isert_release_wq, &isert_conn->release_work); 2538 } 2539 2540 static void isert_free_conn(struct iscsi_conn *conn) 2541 { 2542 struct isert_conn *isert_conn = conn->context; 2543 2544 ib_drain_qp(isert_conn->qp); 2545 isert_put_conn(isert_conn); 2546 } 2547 2548 static void isert_get_rx_pdu(struct iscsi_conn *conn) 2549 { 2550 struct completion comp; 2551 2552 init_completion(&comp); 2553 2554 wait_for_completion_interruptible(&comp); 2555 } 2556 2557 static struct iscsit_transport iser_target_transport = { 2558 .name = "IB/iSER", 2559 .transport_type = ISCSI_INFINIBAND, 2560 .rdma_shutdown = true, 2561 .priv_size = sizeof(struct isert_cmd), 2562 .owner = THIS_MODULE, 2563 .iscsit_setup_np = isert_setup_np, 2564 .iscsit_accept_np = isert_accept_np, 2565 .iscsit_free_np = isert_free_np, 2566 .iscsit_wait_conn = isert_wait_conn, 2567 .iscsit_free_conn = isert_free_conn, 2568 .iscsit_get_login_rx = isert_get_login_rx, 2569 .iscsit_put_login_tx = isert_put_login_tx, 2570 .iscsit_immediate_queue = isert_immediate_queue, 2571 .iscsit_response_queue = isert_response_queue, 2572 .iscsit_get_dataout = isert_get_dataout, 2573 .iscsit_queue_data_in = isert_put_datain, 2574 .iscsit_queue_status = isert_put_response, 2575 .iscsit_aborted_task = isert_aborted_task, 2576 .iscsit_get_rx_pdu = isert_get_rx_pdu, 2577 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2578 }; 2579 2580 static int __init isert_init(void) 2581 { 2582 int ret; 2583 2584 isert_comp_wq = alloc_workqueue("isert_comp_wq", 2585 WQ_UNBOUND | WQ_HIGHPRI, 0); 2586 if (!isert_comp_wq) { 2587 isert_err("Unable to allocate isert_comp_wq\n"); 2588 return -ENOMEM; 2589 } 2590 2591 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, 2592 WQ_UNBOUND_MAX_ACTIVE); 2593 if (!isert_release_wq) { 2594 isert_err("Unable to allocate isert_release_wq\n"); 2595 ret = -ENOMEM; 2596 goto destroy_comp_wq; 2597 } 2598 2599 iscsit_register_transport(&iser_target_transport); 2600 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2601 2602 return 0; 2603 2604 destroy_comp_wq: 2605 destroy_workqueue(isert_comp_wq); 2606 2607 return ret; 2608 } 2609 2610 static void __exit isert_exit(void) 2611 { 2612 flush_scheduled_work(); 2613 destroy_workqueue(isert_release_wq); 2614 destroy_workqueue(isert_comp_wq); 2615 iscsit_unregister_transport(&iser_target_transport); 2616 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); 2617 } 2618 2619 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 2620 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 2621 MODULE_LICENSE("GPL"); 2622 2623 module_init(isert_init); 2624 module_exit(isert_exit); 2625