1 /******************************************************************************* 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 3 * 4 * (c) Copyright 2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ****************************************************************************/ 18 19 #include <linux/string.h> 20 #include <linux/module.h> 21 #include <linux/scatterlist.h> 22 #include <linux/socket.h> 23 #include <linux/in.h> 24 #include <linux/in6.h> 25 #include <linux/llist.h> 26 #include <rdma/ib_verbs.h> 27 #include <rdma/rdma_cm.h> 28 #include <target/target_core_base.h> 29 #include <target/target_core_fabric.h> 30 #include <target/iscsi/iscsi_transport.h> 31 32 #include "isert_proto.h" 33 #include "ib_isert.h" 34 35 #define ISERT_MAX_CONN 8 36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) 38 39 static DEFINE_MUTEX(device_list_mutex); 40 static LIST_HEAD(device_list); 41 static struct workqueue_struct *isert_rx_wq; 42 static struct workqueue_struct *isert_comp_wq; 43 44 static void 45 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 46 static int 47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 48 struct isert_rdma_wr *wr); 49 static void 50 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 51 static int 52 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 53 struct isert_rdma_wr *wr); 54 static int 55 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 56 57 static void 58 isert_qp_event_callback(struct ib_event *e, void *context) 59 { 60 struct isert_conn *isert_conn = (struct isert_conn *)context; 61 62 pr_err("isert_qp_event_callback event: %d\n", e->event); 63 switch (e->event) { 64 case IB_EVENT_COMM_EST: 65 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); 66 break; 67 case IB_EVENT_QP_LAST_WQE_REACHED: 68 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); 69 break; 70 default: 71 break; 72 } 73 } 74 75 static int 76 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) 77 { 78 int ret; 79 80 ret = ib_query_device(ib_dev, devattr); 81 if (ret) { 82 pr_err("ib_query_device() failed: %d\n", ret); 83 return ret; 84 } 85 pr_debug("devattr->max_sge: %d\n", devattr->max_sge); 86 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); 87 88 return 0; 89 } 90 91 static int 92 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, 93 u8 protection) 94 { 95 struct isert_device *device = isert_conn->conn_device; 96 struct ib_qp_init_attr attr; 97 int ret, index, min_index = 0; 98 99 mutex_lock(&device_list_mutex); 100 for (index = 0; index < device->cqs_used; index++) 101 if (device->cq_active_qps[index] < 102 device->cq_active_qps[min_index]) 103 min_index = index; 104 device->cq_active_qps[min_index]++; 105 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); 106 mutex_unlock(&device_list_mutex); 107 108 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 109 attr.event_handler = isert_qp_event_callback; 110 attr.qp_context = isert_conn; 111 attr.send_cq = device->dev_tx_cq[min_index]; 112 attr.recv_cq = device->dev_rx_cq[min_index]; 113 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 114 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; 115 /* 116 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 117 * work-around for RDMA_READ.. 118 */ 119 attr.cap.max_send_sge = device->dev_attr.max_sge - 2; 120 isert_conn->max_sge = attr.cap.max_send_sge; 121 122 attr.cap.max_recv_sge = 1; 123 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 124 attr.qp_type = IB_QPT_RC; 125 if (protection) 126 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 127 128 pr_debug("isert_conn_setup_qp cma_id->device: %p\n", 129 cma_id->device); 130 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n", 131 isert_conn->conn_pd->device); 132 133 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); 134 if (ret) { 135 pr_err("rdma_create_qp failed for cma_id %d\n", ret); 136 return ret; 137 } 138 isert_conn->conn_qp = cma_id->qp; 139 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); 140 141 return 0; 142 } 143 144 static void 145 isert_cq_event_callback(struct ib_event *e, void *context) 146 { 147 pr_debug("isert_cq_event_callback event: %d\n", e->event); 148 } 149 150 static int 151 isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 152 { 153 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 154 struct iser_rx_desc *rx_desc; 155 struct ib_sge *rx_sg; 156 u64 dma_addr; 157 int i, j; 158 159 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * 160 sizeof(struct iser_rx_desc), GFP_KERNEL); 161 if (!isert_conn->conn_rx_descs) 162 goto fail; 163 164 rx_desc = isert_conn->conn_rx_descs; 165 166 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 167 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, 168 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 169 if (ib_dma_mapping_error(ib_dev, dma_addr)) 170 goto dma_map_fail; 171 172 rx_desc->dma_addr = dma_addr; 173 174 rx_sg = &rx_desc->rx_sg; 175 rx_sg->addr = rx_desc->dma_addr; 176 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 177 rx_sg->lkey = isert_conn->conn_mr->lkey; 178 } 179 180 isert_conn->conn_rx_desc_head = 0; 181 return 0; 182 183 dma_map_fail: 184 rx_desc = isert_conn->conn_rx_descs; 185 for (j = 0; j < i; j++, rx_desc++) { 186 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 187 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 188 } 189 kfree(isert_conn->conn_rx_descs); 190 isert_conn->conn_rx_descs = NULL; 191 fail: 192 return -ENOMEM; 193 } 194 195 static void 196 isert_free_rx_descriptors(struct isert_conn *isert_conn) 197 { 198 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 199 struct iser_rx_desc *rx_desc; 200 int i; 201 202 if (!isert_conn->conn_rx_descs) 203 return; 204 205 rx_desc = isert_conn->conn_rx_descs; 206 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { 207 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, 208 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 209 } 210 211 kfree(isert_conn->conn_rx_descs); 212 isert_conn->conn_rx_descs = NULL; 213 } 214 215 static void isert_cq_tx_work(struct work_struct *); 216 static void isert_cq_tx_callback(struct ib_cq *, void *); 217 static void isert_cq_rx_work(struct work_struct *); 218 static void isert_cq_rx_callback(struct ib_cq *, void *); 219 220 static int 221 isert_create_device_ib_res(struct isert_device *device) 222 { 223 struct ib_device *ib_dev = device->ib_device; 224 struct isert_cq_desc *cq_desc; 225 struct ib_device_attr *dev_attr; 226 int ret = 0, i, j; 227 228 dev_attr = &device->dev_attr; 229 ret = isert_query_device(ib_dev, dev_attr); 230 if (ret) 231 return ret; 232 233 /* asign function handlers */ 234 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 235 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { 236 device->use_fastreg = 1; 237 device->reg_rdma_mem = isert_reg_rdma; 238 device->unreg_rdma_mem = isert_unreg_rdma; 239 } else { 240 device->use_fastreg = 0; 241 device->reg_rdma_mem = isert_map_rdma; 242 device->unreg_rdma_mem = isert_unmap_cmd; 243 } 244 245 /* Check signature cap */ 246 device->pi_capable = dev_attr->device_cap_flags & 247 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 248 249 device->cqs_used = min_t(int, num_online_cpus(), 250 device->ib_device->num_comp_vectors); 251 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 252 pr_debug("Using %d CQs, device %s supports %d vectors support " 253 "Fast registration %d pi_capable %d\n", 254 device->cqs_used, device->ib_device->name, 255 device->ib_device->num_comp_vectors, device->use_fastreg, 256 device->pi_capable); 257 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 258 device->cqs_used, GFP_KERNEL); 259 if (!device->cq_desc) { 260 pr_err("Unable to allocate device->cq_desc\n"); 261 return -ENOMEM; 262 } 263 cq_desc = device->cq_desc; 264 265 for (i = 0; i < device->cqs_used; i++) { 266 cq_desc[i].device = device; 267 cq_desc[i].cq_index = i; 268 269 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); 270 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 271 isert_cq_rx_callback, 272 isert_cq_event_callback, 273 (void *)&cq_desc[i], 274 ISER_MAX_RX_CQ_LEN, i); 275 if (IS_ERR(device->dev_rx_cq[i])) { 276 ret = PTR_ERR(device->dev_rx_cq[i]); 277 device->dev_rx_cq[i] = NULL; 278 goto out_cq; 279 } 280 281 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); 282 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 283 isert_cq_tx_callback, 284 isert_cq_event_callback, 285 (void *)&cq_desc[i], 286 ISER_MAX_TX_CQ_LEN, i); 287 if (IS_ERR(device->dev_tx_cq[i])) { 288 ret = PTR_ERR(device->dev_tx_cq[i]); 289 device->dev_tx_cq[i] = NULL; 290 goto out_cq; 291 } 292 293 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); 294 if (ret) 295 goto out_cq; 296 297 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); 298 if (ret) 299 goto out_cq; 300 } 301 302 return 0; 303 304 out_cq: 305 for (j = 0; j < i; j++) { 306 cq_desc = &device->cq_desc[j]; 307 308 if (device->dev_rx_cq[j]) { 309 cancel_work_sync(&cq_desc->cq_rx_work); 310 ib_destroy_cq(device->dev_rx_cq[j]); 311 } 312 if (device->dev_tx_cq[j]) { 313 cancel_work_sync(&cq_desc->cq_tx_work); 314 ib_destroy_cq(device->dev_tx_cq[j]); 315 } 316 } 317 kfree(device->cq_desc); 318 319 return ret; 320 } 321 322 static void 323 isert_free_device_ib_res(struct isert_device *device) 324 { 325 struct isert_cq_desc *cq_desc; 326 int i; 327 328 for (i = 0; i < device->cqs_used; i++) { 329 cq_desc = &device->cq_desc[i]; 330 331 cancel_work_sync(&cq_desc->cq_rx_work); 332 cancel_work_sync(&cq_desc->cq_tx_work); 333 ib_destroy_cq(device->dev_rx_cq[i]); 334 ib_destroy_cq(device->dev_tx_cq[i]); 335 device->dev_rx_cq[i] = NULL; 336 device->dev_tx_cq[i] = NULL; 337 } 338 339 kfree(device->cq_desc); 340 } 341 342 static void 343 isert_device_try_release(struct isert_device *device) 344 { 345 mutex_lock(&device_list_mutex); 346 device->refcount--; 347 if (!device->refcount) { 348 isert_free_device_ib_res(device); 349 list_del(&device->dev_node); 350 kfree(device); 351 } 352 mutex_unlock(&device_list_mutex); 353 } 354 355 static struct isert_device * 356 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) 357 { 358 struct isert_device *device; 359 int ret; 360 361 mutex_lock(&device_list_mutex); 362 list_for_each_entry(device, &device_list, dev_node) { 363 if (device->ib_device->node_guid == cma_id->device->node_guid) { 364 device->refcount++; 365 mutex_unlock(&device_list_mutex); 366 return device; 367 } 368 } 369 370 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); 371 if (!device) { 372 mutex_unlock(&device_list_mutex); 373 return ERR_PTR(-ENOMEM); 374 } 375 376 INIT_LIST_HEAD(&device->dev_node); 377 378 device->ib_device = cma_id->device; 379 ret = isert_create_device_ib_res(device); 380 if (ret) { 381 kfree(device); 382 mutex_unlock(&device_list_mutex); 383 return ERR_PTR(ret); 384 } 385 386 device->refcount++; 387 list_add_tail(&device->dev_node, &device_list); 388 mutex_unlock(&device_list_mutex); 389 390 return device; 391 } 392 393 static void 394 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) 395 { 396 struct fast_reg_descriptor *fr_desc, *tmp; 397 int i = 0; 398 399 if (list_empty(&isert_conn->conn_fr_pool)) 400 return; 401 402 pr_debug("Freeing conn %p fastreg pool", isert_conn); 403 404 list_for_each_entry_safe(fr_desc, tmp, 405 &isert_conn->conn_fr_pool, list) { 406 list_del(&fr_desc->list); 407 ib_free_fast_reg_page_list(fr_desc->data_frpl); 408 ib_dereg_mr(fr_desc->data_mr); 409 if (fr_desc->pi_ctx) { 410 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); 411 ib_dereg_mr(fr_desc->pi_ctx->prot_mr); 412 ib_destroy_mr(fr_desc->pi_ctx->sig_mr); 413 kfree(fr_desc->pi_ctx); 414 } 415 kfree(fr_desc); 416 ++i; 417 } 418 419 if (i < isert_conn->conn_fr_pool_size) 420 pr_warn("Pool still has %d regions registered\n", 421 isert_conn->conn_fr_pool_size - i); 422 } 423 424 static int 425 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 426 struct fast_reg_descriptor *fr_desc, u8 protection) 427 { 428 int ret; 429 430 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 431 ISCSI_ISER_SG_TABLESIZE); 432 if (IS_ERR(fr_desc->data_frpl)) { 433 pr_err("Failed to allocate data frpl err=%ld\n", 434 PTR_ERR(fr_desc->data_frpl)); 435 return PTR_ERR(fr_desc->data_frpl); 436 } 437 438 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); 439 if (IS_ERR(fr_desc->data_mr)) { 440 pr_err("Failed to allocate data frmr err=%ld\n", 441 PTR_ERR(fr_desc->data_mr)); 442 ret = PTR_ERR(fr_desc->data_mr); 443 goto err_data_frpl; 444 } 445 pr_debug("Create fr_desc %p page_list %p\n", 446 fr_desc, fr_desc->data_frpl->page_list); 447 fr_desc->ind |= ISERT_DATA_KEY_VALID; 448 449 if (protection) { 450 struct ib_mr_init_attr mr_init_attr = {0}; 451 struct pi_context *pi_ctx; 452 453 fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL); 454 if (!fr_desc->pi_ctx) { 455 pr_err("Failed to allocate pi context\n"); 456 ret = -ENOMEM; 457 goto err_data_mr; 458 } 459 pi_ctx = fr_desc->pi_ctx; 460 461 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, 462 ISCSI_ISER_SG_TABLESIZE); 463 if (IS_ERR(pi_ctx->prot_frpl)) { 464 pr_err("Failed to allocate prot frpl err=%ld\n", 465 PTR_ERR(pi_ctx->prot_frpl)); 466 ret = PTR_ERR(pi_ctx->prot_frpl); 467 goto err_pi_ctx; 468 } 469 470 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); 471 if (IS_ERR(pi_ctx->prot_mr)) { 472 pr_err("Failed to allocate prot frmr err=%ld\n", 473 PTR_ERR(pi_ctx->prot_mr)); 474 ret = PTR_ERR(pi_ctx->prot_mr); 475 goto err_prot_frpl; 476 } 477 fr_desc->ind |= ISERT_PROT_KEY_VALID; 478 479 mr_init_attr.max_reg_descriptors = 2; 480 mr_init_attr.flags |= IB_MR_SIGNATURE_EN; 481 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); 482 if (IS_ERR(pi_ctx->sig_mr)) { 483 pr_err("Failed to allocate signature enabled mr err=%ld\n", 484 PTR_ERR(pi_ctx->sig_mr)); 485 ret = PTR_ERR(pi_ctx->sig_mr); 486 goto err_prot_mr; 487 } 488 fr_desc->ind |= ISERT_SIG_KEY_VALID; 489 } 490 fr_desc->ind &= ~ISERT_PROTECTED; 491 492 return 0; 493 err_prot_mr: 494 ib_dereg_mr(fr_desc->pi_ctx->prot_mr); 495 err_prot_frpl: 496 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); 497 err_pi_ctx: 498 kfree(fr_desc->pi_ctx); 499 err_data_mr: 500 ib_dereg_mr(fr_desc->data_mr); 501 err_data_frpl: 502 ib_free_fast_reg_page_list(fr_desc->data_frpl); 503 504 return ret; 505 } 506 507 static int 508 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) 509 { 510 struct fast_reg_descriptor *fr_desc; 511 struct isert_device *device = isert_conn->conn_device; 512 struct se_session *se_sess = isert_conn->conn->sess->se_sess; 513 struct se_node_acl *se_nacl = se_sess->se_node_acl; 514 int i, ret, tag_num; 515 /* 516 * Setup the number of FRMRs based upon the number of tags 517 * available to session in iscsi_target_locate_portal(). 518 */ 519 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth); 520 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; 521 522 isert_conn->conn_fr_pool_size = 0; 523 for (i = 0; i < tag_num; i++) { 524 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 525 if (!fr_desc) { 526 pr_err("Failed to allocate fast_reg descriptor\n"); 527 ret = -ENOMEM; 528 goto err; 529 } 530 531 ret = isert_create_fr_desc(device->ib_device, 532 isert_conn->conn_pd, fr_desc, 533 pi_support); 534 if (ret) { 535 pr_err("Failed to create fastreg descriptor err=%d\n", 536 ret); 537 kfree(fr_desc); 538 goto err; 539 } 540 541 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); 542 isert_conn->conn_fr_pool_size++; 543 } 544 545 pr_debug("Creating conn %p fastreg pool size=%d", 546 isert_conn, isert_conn->conn_fr_pool_size); 547 548 return 0; 549 550 err: 551 isert_conn_free_fastreg_pool(isert_conn); 552 return ret; 553 } 554 555 static int 556 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 557 { 558 struct iscsi_np *np = cma_id->context; 559 struct isert_np *isert_np = np->np_context; 560 struct isert_conn *isert_conn; 561 struct isert_device *device; 562 struct ib_device *ib_dev = cma_id->device; 563 int ret = 0; 564 u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 565 566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 567 cma_id, cma_id->context); 568 569 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 570 if (!isert_conn) { 571 pr_err("Unable to allocate isert_conn\n"); 572 return -ENOMEM; 573 } 574 isert_conn->state = ISER_CONN_INIT; 575 INIT_LIST_HEAD(&isert_conn->conn_accept_node); 576 init_completion(&isert_conn->conn_login_comp); 577 init_completion(&isert_conn->conn_wait); 578 init_completion(&isert_conn->conn_wait_comp_err); 579 kref_init(&isert_conn->conn_kref); 580 kref_get(&isert_conn->conn_kref); 581 mutex_init(&isert_conn->conn_mutex); 582 spin_lock_init(&isert_conn->conn_lock); 583 INIT_LIST_HEAD(&isert_conn->conn_fr_pool); 584 585 cma_id->context = isert_conn; 586 isert_conn->conn_cm_id = cma_id; 587 isert_conn->responder_resources = event->param.conn.responder_resources; 588 isert_conn->initiator_depth = event->param.conn.initiator_depth; 589 pr_debug("Using responder_resources: %u initiator_depth: %u\n", 590 isert_conn->responder_resources, isert_conn->initiator_depth); 591 592 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 593 ISER_RX_LOGIN_SIZE, GFP_KERNEL); 594 if (!isert_conn->login_buf) { 595 pr_err("Unable to allocate isert_conn->login_buf\n"); 596 ret = -ENOMEM; 597 goto out; 598 } 599 600 isert_conn->login_req_buf = isert_conn->login_buf; 601 isert_conn->login_rsp_buf = isert_conn->login_buf + 602 ISCSI_DEF_MAX_RECV_SEG_LEN; 603 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", 604 isert_conn->login_buf, isert_conn->login_req_buf, 605 isert_conn->login_rsp_buf); 606 607 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 608 (void *)isert_conn->login_req_buf, 609 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 610 611 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 612 if (ret) { 613 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", 614 ret); 615 isert_conn->login_req_dma = 0; 616 goto out_login_buf; 617 } 618 619 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 620 (void *)isert_conn->login_rsp_buf, 621 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 622 623 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 624 if (ret) { 625 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", 626 ret); 627 isert_conn->login_rsp_dma = 0; 628 goto out_req_dma_map; 629 } 630 631 device = isert_device_find_by_ib_dev(cma_id); 632 if (IS_ERR(device)) { 633 ret = PTR_ERR(device); 634 goto out_rsp_dma_map; 635 } 636 637 isert_conn->conn_device = device; 638 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); 639 if (IS_ERR(isert_conn->conn_pd)) { 640 ret = PTR_ERR(isert_conn->conn_pd); 641 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", 642 isert_conn, ret); 643 goto out_pd; 644 } 645 646 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd, 647 IB_ACCESS_LOCAL_WRITE); 648 if (IS_ERR(isert_conn->conn_mr)) { 649 ret = PTR_ERR(isert_conn->conn_mr); 650 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", 651 isert_conn, ret); 652 goto out_mr; 653 } 654 655 if (pi_support && !device->pi_capable) { 656 pr_err("Protection information requested but not supported\n"); 657 ret = -EINVAL; 658 goto out_mr; 659 } 660 661 ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); 662 if (ret) 663 goto out_conn_dev; 664 665 mutex_lock(&isert_np->np_accept_mutex); 666 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); 667 mutex_unlock(&isert_np->np_accept_mutex); 668 669 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); 670 wake_up(&isert_np->np_accept_wq); 671 return 0; 672 673 out_conn_dev: 674 ib_dereg_mr(isert_conn->conn_mr); 675 out_mr: 676 ib_dealloc_pd(isert_conn->conn_pd); 677 out_pd: 678 isert_device_try_release(device); 679 out_rsp_dma_map: 680 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 681 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 682 out_req_dma_map: 683 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 684 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 685 out_login_buf: 686 kfree(isert_conn->login_buf); 687 out: 688 kfree(isert_conn); 689 return ret; 690 } 691 692 static void 693 isert_connect_release(struct isert_conn *isert_conn) 694 { 695 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 696 struct isert_device *device = isert_conn->conn_device; 697 int cq_index; 698 699 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 700 701 if (device && device->use_fastreg) 702 isert_conn_free_fastreg_pool(isert_conn); 703 704 if (isert_conn->conn_qp) { 705 cq_index = ((struct isert_cq_desc *) 706 isert_conn->conn_qp->recv_cq->cq_context)->cq_index; 707 pr_debug("isert_connect_release: cq_index: %d\n", cq_index); 708 isert_conn->conn_device->cq_active_qps[cq_index]--; 709 710 rdma_destroy_qp(isert_conn->conn_cm_id); 711 } 712 713 isert_free_rx_descriptors(isert_conn); 714 rdma_destroy_id(isert_conn->conn_cm_id); 715 716 ib_dereg_mr(isert_conn->conn_mr); 717 ib_dealloc_pd(isert_conn->conn_pd); 718 719 if (isert_conn->login_buf) { 720 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 721 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 722 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 723 ISCSI_DEF_MAX_RECV_SEG_LEN, 724 DMA_FROM_DEVICE); 725 kfree(isert_conn->login_buf); 726 } 727 kfree(isert_conn); 728 729 if (device) 730 isert_device_try_release(device); 731 732 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n"); 733 } 734 735 static void 736 isert_connected_handler(struct rdma_cm_id *cma_id) 737 { 738 return; 739 } 740 741 static void 742 isert_release_conn_kref(struct kref *kref) 743 { 744 struct isert_conn *isert_conn = container_of(kref, 745 struct isert_conn, conn_kref); 746 747 pr_debug("Calling isert_connect_release for final kref %s/%d\n", 748 current->comm, current->pid); 749 750 isert_connect_release(isert_conn); 751 } 752 753 static void 754 isert_put_conn(struct isert_conn *isert_conn) 755 { 756 kref_put(&isert_conn->conn_kref, isert_release_conn_kref); 757 } 758 759 static void 760 isert_disconnect_work(struct work_struct *work) 761 { 762 struct isert_conn *isert_conn = container_of(work, 763 struct isert_conn, conn_logout_work); 764 765 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 766 mutex_lock(&isert_conn->conn_mutex); 767 if (isert_conn->state == ISER_CONN_UP) 768 isert_conn->state = ISER_CONN_TERMINATING; 769 770 if (isert_conn->post_recv_buf_count == 0 && 771 atomic_read(&isert_conn->post_send_buf_count) == 0) { 772 mutex_unlock(&isert_conn->conn_mutex); 773 goto wake_up; 774 } 775 if (!isert_conn->conn_cm_id) { 776 mutex_unlock(&isert_conn->conn_mutex); 777 isert_put_conn(isert_conn); 778 return; 779 } 780 if (!isert_conn->logout_posted) { 781 pr_debug("Calling rdma_disconnect for !logout_posted from" 782 " isert_disconnect_work\n"); 783 rdma_disconnect(isert_conn->conn_cm_id); 784 mutex_unlock(&isert_conn->conn_mutex); 785 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); 786 goto wake_up; 787 } 788 mutex_unlock(&isert_conn->conn_mutex); 789 790 wake_up: 791 complete(&isert_conn->conn_wait); 792 isert_put_conn(isert_conn); 793 } 794 795 static void 796 isert_disconnected_handler(struct rdma_cm_id *cma_id) 797 { 798 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; 799 800 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); 801 schedule_work(&isert_conn->conn_logout_work); 802 } 803 804 static int 805 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 806 { 807 int ret = 0; 808 809 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", 810 event->event, event->status, cma_id->context, cma_id); 811 812 switch (event->event) { 813 case RDMA_CM_EVENT_CONNECT_REQUEST: 814 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n"); 815 ret = isert_connect_request(cma_id, event); 816 break; 817 case RDMA_CM_EVENT_ESTABLISHED: 818 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n"); 819 isert_connected_handler(cma_id); 820 break; 821 case RDMA_CM_EVENT_DISCONNECTED: 822 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); 823 isert_disconnected_handler(cma_id); 824 break; 825 case RDMA_CM_EVENT_DEVICE_REMOVAL: 826 case RDMA_CM_EVENT_ADDR_CHANGE: 827 break; 828 case RDMA_CM_EVENT_CONNECT_ERROR: 829 default: 830 pr_err("Unknown RDMA CMA event: %d\n", event->event); 831 break; 832 } 833 834 if (ret != 0) { 835 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", 836 event->event, ret); 837 dump_stack(); 838 } 839 840 return ret; 841 } 842 843 static int 844 isert_post_recv(struct isert_conn *isert_conn, u32 count) 845 { 846 struct ib_recv_wr *rx_wr, *rx_wr_failed; 847 int i, ret; 848 unsigned int rx_head = isert_conn->conn_rx_desc_head; 849 struct iser_rx_desc *rx_desc; 850 851 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { 852 rx_desc = &isert_conn->conn_rx_descs[rx_head]; 853 rx_wr->wr_id = (unsigned long)rx_desc; 854 rx_wr->sg_list = &rx_desc->rx_sg; 855 rx_wr->num_sge = 1; 856 rx_wr->next = rx_wr + 1; 857 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1); 858 } 859 860 rx_wr--; 861 rx_wr->next = NULL; /* mark end of work requests list */ 862 863 isert_conn->post_recv_buf_count += count; 864 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, 865 &rx_wr_failed); 866 if (ret) { 867 pr_err("ib_post_recv() failed with ret: %d\n", ret); 868 isert_conn->post_recv_buf_count -= count; 869 } else { 870 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); 871 isert_conn->conn_rx_desc_head = rx_head; 872 } 873 return ret; 874 } 875 876 static int 877 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 878 { 879 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 880 struct ib_send_wr send_wr, *send_wr_failed; 881 int ret; 882 883 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 884 ISER_HEADERS_LEN, DMA_TO_DEVICE); 885 886 send_wr.next = NULL; 887 send_wr.wr_id = (unsigned long)tx_desc; 888 send_wr.sg_list = tx_desc->tx_sg; 889 send_wr.num_sge = tx_desc->num_sge; 890 send_wr.opcode = IB_WR_SEND; 891 send_wr.send_flags = IB_SEND_SIGNALED; 892 893 atomic_inc(&isert_conn->post_send_buf_count); 894 895 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); 896 if (ret) { 897 pr_err("ib_post_send() failed, ret: %d\n", ret); 898 atomic_dec(&isert_conn->post_send_buf_count); 899 } 900 901 return ret; 902 } 903 904 static void 905 isert_create_send_desc(struct isert_conn *isert_conn, 906 struct isert_cmd *isert_cmd, 907 struct iser_tx_desc *tx_desc) 908 { 909 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 910 911 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, 912 ISER_HEADERS_LEN, DMA_TO_DEVICE); 913 914 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); 915 tx_desc->iser_header.flags = ISER_VER; 916 917 tx_desc->num_sge = 1; 918 tx_desc->isert_cmd = isert_cmd; 919 920 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { 921 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 922 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); 923 } 924 } 925 926 static int 927 isert_init_tx_hdrs(struct isert_conn *isert_conn, 928 struct iser_tx_desc *tx_desc) 929 { 930 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 931 u64 dma_addr; 932 933 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 934 ISER_HEADERS_LEN, DMA_TO_DEVICE); 935 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 936 pr_err("ib_dma_mapping_error() failed\n"); 937 return -ENOMEM; 938 } 939 940 tx_desc->dma_addr = dma_addr; 941 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 942 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 943 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 944 945 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" 946 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, 947 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); 948 949 return 0; 950 } 951 952 static void 953 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 954 struct ib_send_wr *send_wr, bool coalesce) 955 { 956 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 957 958 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; 959 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 960 send_wr->opcode = IB_WR_SEND; 961 send_wr->sg_list = &tx_desc->tx_sg[0]; 962 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 963 /* 964 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED 965 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. 966 */ 967 mutex_lock(&isert_conn->conn_mutex); 968 if (coalesce && isert_conn->state == ISER_CONN_UP && 969 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { 970 tx_desc->llnode_active = true; 971 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); 972 mutex_unlock(&isert_conn->conn_mutex); 973 return; 974 } 975 isert_conn->conn_comp_batch = 0; 976 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); 977 mutex_unlock(&isert_conn->conn_mutex); 978 979 send_wr->send_flags = IB_SEND_SIGNALED; 980 } 981 982 static int 983 isert_rdma_post_recvl(struct isert_conn *isert_conn) 984 { 985 struct ib_recv_wr rx_wr, *rx_wr_fail; 986 struct ib_sge sge; 987 int ret; 988 989 memset(&sge, 0, sizeof(struct ib_sge)); 990 sge.addr = isert_conn->login_req_dma; 991 sge.length = ISER_RX_LOGIN_SIZE; 992 sge.lkey = isert_conn->conn_mr->lkey; 993 994 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", 995 sge.addr, sge.length, sge.lkey); 996 997 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 998 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; 999 rx_wr.sg_list = &sge; 1000 rx_wr.num_sge = 1; 1001 1002 isert_conn->post_recv_buf_count++; 1003 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); 1004 if (ret) { 1005 pr_err("ib_post_recv() failed: %d\n", ret); 1006 isert_conn->post_recv_buf_count--; 1007 } 1008 1009 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); 1010 return ret; 1011 } 1012 1013 static int 1014 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 1015 u32 length) 1016 { 1017 struct isert_conn *isert_conn = conn->context; 1018 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1019 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc; 1020 int ret; 1021 1022 isert_create_send_desc(isert_conn, NULL, tx_desc); 1023 1024 memcpy(&tx_desc->iscsi_header, &login->rsp[0], 1025 sizeof(struct iscsi_hdr)); 1026 1027 isert_init_tx_hdrs(isert_conn, tx_desc); 1028 1029 if (length > 0) { 1030 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; 1031 1032 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, 1033 length, DMA_TO_DEVICE); 1034 1035 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); 1036 1037 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, 1038 length, DMA_TO_DEVICE); 1039 1040 tx_dsg->addr = isert_conn->login_rsp_dma; 1041 tx_dsg->length = length; 1042 tx_dsg->lkey = isert_conn->conn_mr->lkey; 1043 tx_desc->num_sge = 2; 1044 } 1045 if (!login->login_failed) { 1046 if (login->login_complete) { 1047 if (isert_conn->conn_device->use_fastreg) { 1048 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; 1049 1050 ret = isert_conn_create_fastreg_pool(isert_conn, 1051 pi_support); 1052 if (ret) { 1053 pr_err("Conn: %p failed to create" 1054 " fastreg pool\n", isert_conn); 1055 return ret; 1056 } 1057 } 1058 1059 ret = isert_alloc_rx_descriptors(isert_conn); 1060 if (ret) 1061 return ret; 1062 1063 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); 1064 if (ret) 1065 return ret; 1066 1067 isert_conn->state = ISER_CONN_UP; 1068 goto post_send; 1069 } 1070 1071 ret = isert_rdma_post_recvl(isert_conn); 1072 if (ret) 1073 return ret; 1074 } 1075 post_send: 1076 ret = isert_post_send(isert_conn, tx_desc); 1077 if (ret) 1078 return ret; 1079 1080 return 0; 1081 } 1082 1083 static void 1084 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, 1085 struct isert_conn *isert_conn) 1086 { 1087 struct iscsi_conn *conn = isert_conn->conn; 1088 struct iscsi_login *login = conn->conn_login; 1089 int size; 1090 1091 if (!login) { 1092 pr_err("conn->conn_login is NULL\n"); 1093 dump_stack(); 1094 return; 1095 } 1096 1097 if (login->first_request) { 1098 struct iscsi_login_req *login_req = 1099 (struct iscsi_login_req *)&rx_desc->iscsi_header; 1100 /* 1101 * Setup the initial iscsi_login values from the leading 1102 * login request PDU. 1103 */ 1104 login->leading_connection = (!login_req->tsih) ? 1 : 0; 1105 login->current_stage = 1106 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 1107 >> 2; 1108 login->version_min = login_req->min_version; 1109 login->version_max = login_req->max_version; 1110 memcpy(login->isid, login_req->isid, 6); 1111 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1112 login->init_task_tag = login_req->itt; 1113 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1114 login->cid = be16_to_cpu(login_req->cid); 1115 login->tsih = be16_to_cpu(login_req->tsih); 1116 } 1117 1118 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1119 1120 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1121 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", 1122 size, rx_buflen, MAX_KEY_VALUE_PAIRS); 1123 memcpy(login->req_buf, &rx_desc->data[0], size); 1124 1125 if (login->first_request) { 1126 complete(&isert_conn->conn_login_comp); 1127 return; 1128 } 1129 schedule_delayed_work(&conn->login_work, 0); 1130 } 1131 1132 static struct iscsi_cmd 1133 *isert_allocate_cmd(struct iscsi_conn *conn) 1134 { 1135 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1136 struct isert_cmd *isert_cmd; 1137 struct iscsi_cmd *cmd; 1138 1139 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1140 if (!cmd) { 1141 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1142 return NULL; 1143 } 1144 isert_cmd = iscsit_priv_cmd(cmd); 1145 isert_cmd->conn = isert_conn; 1146 isert_cmd->iscsi_cmd = cmd; 1147 1148 return cmd; 1149 } 1150 1151 static int 1152 isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1153 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, 1154 struct iser_rx_desc *rx_desc, unsigned char *buf) 1155 { 1156 struct iscsi_conn *conn = isert_conn->conn; 1157 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1158 struct scatterlist *sg; 1159 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1160 bool dump_payload = false; 1161 1162 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1163 if (rc < 0) 1164 return rc; 1165 1166 imm_data = cmd->immediate_data; 1167 imm_data_len = cmd->first_burst_len; 1168 unsol_data = cmd->unsolicited_data; 1169 1170 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1171 if (rc < 0) { 1172 return 0; 1173 } else if (rc > 0) { 1174 dump_payload = true; 1175 goto sequence_cmd; 1176 } 1177 1178 if (!imm_data) 1179 return 0; 1180 1181 sg = &cmd->se_cmd.t_data_sg[0]; 1182 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1183 1184 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", 1185 sg, sg_nents, &rx_desc->data[0], imm_data_len); 1186 1187 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); 1188 1189 cmd->write_data_done += imm_data_len; 1190 1191 if (cmd->write_data_done == cmd->se_cmd.data_length) { 1192 spin_lock_bh(&cmd->istate_lock); 1193 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1194 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1195 spin_unlock_bh(&cmd->istate_lock); 1196 } 1197 1198 sequence_cmd: 1199 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1200 1201 if (!rc && dump_payload == false && unsol_data) 1202 iscsit_set_unsoliticed_dataout(cmd); 1203 1204 return 0; 1205 } 1206 1207 static int 1208 isert_handle_iscsi_dataout(struct isert_conn *isert_conn, 1209 struct iser_rx_desc *rx_desc, unsigned char *buf) 1210 { 1211 struct scatterlist *sg_start; 1212 struct iscsi_conn *conn = isert_conn->conn; 1213 struct iscsi_cmd *cmd = NULL; 1214 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1215 u32 unsol_data_len = ntoh24(hdr->dlength); 1216 int rc, sg_nents, sg_off, page_off; 1217 1218 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1219 if (rc < 0) 1220 return rc; 1221 else if (!cmd) 1222 return 0; 1223 /* 1224 * FIXME: Unexpected unsolicited_data out 1225 */ 1226 if (!cmd->unsolicited_data) { 1227 pr_err("Received unexpected solicited data payload\n"); 1228 dump_stack(); 1229 return -1; 1230 } 1231 1232 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", 1233 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); 1234 1235 sg_off = cmd->write_data_done / PAGE_SIZE; 1236 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1237 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); 1238 page_off = cmd->write_data_done % PAGE_SIZE; 1239 /* 1240 * FIXME: Non page-aligned unsolicited_data out 1241 */ 1242 if (page_off) { 1243 pr_err("Received unexpected non-page aligned data payload\n"); 1244 dump_stack(); 1245 return -1; 1246 } 1247 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", 1248 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); 1249 1250 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1251 unsol_data_len); 1252 1253 rc = iscsit_check_dataout_payload(cmd, hdr, false); 1254 if (rc < 0) 1255 return rc; 1256 1257 return 0; 1258 } 1259 1260 static int 1261 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1262 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1263 unsigned char *buf) 1264 { 1265 struct iscsi_conn *conn = isert_conn->conn; 1266 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1267 int rc; 1268 1269 rc = iscsit_setup_nop_out(conn, cmd, hdr); 1270 if (rc < 0) 1271 return rc; 1272 /* 1273 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload 1274 */ 1275 1276 return iscsit_process_nop_out(conn, cmd, hdr); 1277 } 1278 1279 static int 1280 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1281 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, 1282 struct iscsi_text *hdr) 1283 { 1284 struct iscsi_conn *conn = isert_conn->conn; 1285 u32 payload_length = ntoh24(hdr->dlength); 1286 int rc; 1287 unsigned char *text_in; 1288 1289 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1290 if (rc < 0) 1291 return rc; 1292 1293 text_in = kzalloc(payload_length, GFP_KERNEL); 1294 if (!text_in) { 1295 pr_err("Unable to allocate text_in of payload_length: %u\n", 1296 payload_length); 1297 return -ENOMEM; 1298 } 1299 cmd->text_in_ptr = text_in; 1300 1301 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); 1302 1303 return iscsit_process_text_cmd(conn, cmd, hdr); 1304 } 1305 1306 static int 1307 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, 1308 uint32_t read_stag, uint64_t read_va, 1309 uint32_t write_stag, uint64_t write_va) 1310 { 1311 struct iscsi_hdr *hdr = &rx_desc->iscsi_header; 1312 struct iscsi_conn *conn = isert_conn->conn; 1313 struct iscsi_session *sess = conn->sess; 1314 struct iscsi_cmd *cmd; 1315 struct isert_cmd *isert_cmd; 1316 int ret = -EINVAL; 1317 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1318 1319 if (sess->sess_ops->SessionType && 1320 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1321 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1322 " ignoring\n", opcode); 1323 return 0; 1324 } 1325 1326 switch (opcode) { 1327 case ISCSI_OP_SCSI_CMD: 1328 cmd = isert_allocate_cmd(conn); 1329 if (!cmd) 1330 break; 1331 1332 isert_cmd = iscsit_priv_cmd(cmd); 1333 isert_cmd->read_stag = read_stag; 1334 isert_cmd->read_va = read_va; 1335 isert_cmd->write_stag = write_stag; 1336 isert_cmd->write_va = write_va; 1337 1338 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, 1339 rx_desc, (unsigned char *)hdr); 1340 break; 1341 case ISCSI_OP_NOOP_OUT: 1342 cmd = isert_allocate_cmd(conn); 1343 if (!cmd) 1344 break; 1345 1346 isert_cmd = iscsit_priv_cmd(cmd); 1347 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, 1348 rx_desc, (unsigned char *)hdr); 1349 break; 1350 case ISCSI_OP_SCSI_DATA_OUT: 1351 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, 1352 (unsigned char *)hdr); 1353 break; 1354 case ISCSI_OP_SCSI_TMFUNC: 1355 cmd = isert_allocate_cmd(conn); 1356 if (!cmd) 1357 break; 1358 1359 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1360 (unsigned char *)hdr); 1361 break; 1362 case ISCSI_OP_LOGOUT: 1363 cmd = isert_allocate_cmd(conn); 1364 if (!cmd) 1365 break; 1366 1367 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1368 if (ret > 0) 1369 wait_for_completion_timeout(&conn->conn_logout_comp, 1370 SECONDS_FOR_LOGOUT_COMP * 1371 HZ); 1372 break; 1373 case ISCSI_OP_TEXT: 1374 cmd = isert_allocate_cmd(conn); 1375 if (!cmd) 1376 break; 1377 1378 isert_cmd = iscsit_priv_cmd(cmd); 1379 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1380 rx_desc, (struct iscsi_text *)hdr); 1381 break; 1382 default: 1383 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1384 dump_stack(); 1385 break; 1386 } 1387 1388 return ret; 1389 } 1390 1391 static void 1392 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) 1393 { 1394 struct iser_hdr *iser_hdr = &rx_desc->iser_header; 1395 uint64_t read_va = 0, write_va = 0; 1396 uint32_t read_stag = 0, write_stag = 0; 1397 int rc; 1398 1399 switch (iser_hdr->flags & 0xF0) { 1400 case ISCSI_CTRL: 1401 if (iser_hdr->flags & ISER_RSV) { 1402 read_stag = be32_to_cpu(iser_hdr->read_stag); 1403 read_va = be64_to_cpu(iser_hdr->read_va); 1404 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", 1405 read_stag, (unsigned long long)read_va); 1406 } 1407 if (iser_hdr->flags & ISER_WSV) { 1408 write_stag = be32_to_cpu(iser_hdr->write_stag); 1409 write_va = be64_to_cpu(iser_hdr->write_va); 1410 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", 1411 write_stag, (unsigned long long)write_va); 1412 } 1413 1414 pr_debug("ISER ISCSI_CTRL PDU\n"); 1415 break; 1416 case ISER_HELLO: 1417 pr_err("iSER Hello message\n"); 1418 break; 1419 default: 1420 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); 1421 break; 1422 } 1423 1424 rc = isert_rx_opcode(isert_conn, rx_desc, 1425 read_stag, read_va, write_stag, write_va); 1426 } 1427 1428 static void 1429 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, 1430 unsigned long xfer_len) 1431 { 1432 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1433 struct iscsi_hdr *hdr; 1434 u64 rx_dma; 1435 int rx_buflen, outstanding; 1436 1437 if ((char *)desc == isert_conn->login_req_buf) { 1438 rx_dma = isert_conn->login_req_dma; 1439 rx_buflen = ISER_RX_LOGIN_SIZE; 1440 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1441 rx_dma, rx_buflen); 1442 } else { 1443 rx_dma = desc->dma_addr; 1444 rx_buflen = ISER_RX_PAYLOAD_SIZE; 1445 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1446 rx_dma, rx_buflen); 1447 } 1448 1449 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); 1450 1451 hdr = &desc->iscsi_header; 1452 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1453 hdr->opcode, hdr->itt, hdr->flags, 1454 (int)(xfer_len - ISER_HEADERS_LEN)); 1455 1456 if ((char *)desc == isert_conn->login_req_buf) 1457 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, 1458 isert_conn); 1459 else 1460 isert_rx_do_work(desc, isert_conn); 1461 1462 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, 1463 DMA_FROM_DEVICE); 1464 1465 isert_conn->post_recv_buf_count--; 1466 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", 1467 isert_conn->post_recv_buf_count); 1468 1469 if ((char *)desc == isert_conn->login_req_buf) 1470 return; 1471 1472 outstanding = isert_conn->post_recv_buf_count; 1473 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) { 1474 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding, 1475 ISERT_MIN_POSTED_RX); 1476 err = isert_post_recv(isert_conn, count); 1477 if (err) { 1478 pr_err("isert_post_recv() count: %d failed, %d\n", 1479 count, err); 1480 } 1481 } 1482 } 1483 1484 static int 1485 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1486 struct scatterlist *sg, u32 nents, u32 length, u32 offset, 1487 enum iser_ib_op_code op, struct isert_data_buf *data) 1488 { 1489 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1490 1491 data->dma_dir = op == ISER_IB_RDMA_WRITE ? 1492 DMA_TO_DEVICE : DMA_FROM_DEVICE; 1493 1494 data->len = length - offset; 1495 data->offset = offset; 1496 data->sg_off = data->offset / PAGE_SIZE; 1497 1498 data->sg = &sg[data->sg_off]; 1499 data->nents = min_t(unsigned int, nents - data->sg_off, 1500 ISCSI_ISER_SG_TABLESIZE); 1501 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE * 1502 PAGE_SIZE); 1503 1504 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, 1505 data->dma_dir); 1506 if (unlikely(!data->dma_nents)) { 1507 pr_err("Cmd: unable to dma map SGs %p\n", sg); 1508 return -EINVAL; 1509 } 1510 1511 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", 1512 isert_cmd, data->dma_nents, data->sg, data->nents, data->len); 1513 1514 return 0; 1515 } 1516 1517 static void 1518 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) 1519 { 1520 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1521 1522 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); 1523 memset(data, 0, sizeof(*data)); 1524 } 1525 1526 1527 1528 static void 1529 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1530 { 1531 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1532 1533 pr_debug("isert_unmap_cmd: %p\n", isert_cmd); 1534 1535 if (wr->data.sg) { 1536 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); 1537 isert_unmap_data_buf(isert_conn, &wr->data); 1538 } 1539 1540 if (wr->send_wr) { 1541 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd); 1542 kfree(wr->send_wr); 1543 wr->send_wr = NULL; 1544 } 1545 1546 if (wr->ib_sge) { 1547 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); 1548 kfree(wr->ib_sge); 1549 wr->ib_sge = NULL; 1550 } 1551 } 1552 1553 static void 1554 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1555 { 1556 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1557 LIST_HEAD(unmap_list); 1558 1559 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); 1560 1561 if (wr->fr_desc) { 1562 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1563 isert_cmd, wr->fr_desc); 1564 if (wr->fr_desc->ind & ISERT_PROTECTED) { 1565 isert_unmap_data_buf(isert_conn, &wr->prot); 1566 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1567 } 1568 spin_lock_bh(&isert_conn->conn_lock); 1569 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); 1570 spin_unlock_bh(&isert_conn->conn_lock); 1571 wr->fr_desc = NULL; 1572 } 1573 1574 if (wr->data.sg) { 1575 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); 1576 isert_unmap_data_buf(isert_conn, &wr->data); 1577 } 1578 1579 wr->ib_sge = NULL; 1580 wr->send_wr = NULL; 1581 } 1582 1583 static void 1584 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) 1585 { 1586 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1587 struct isert_conn *isert_conn = isert_cmd->conn; 1588 struct iscsi_conn *conn = isert_conn->conn; 1589 struct isert_device *device = isert_conn->conn_device; 1590 1591 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); 1592 1593 switch (cmd->iscsi_opcode) { 1594 case ISCSI_OP_SCSI_CMD: 1595 spin_lock_bh(&conn->cmd_lock); 1596 if (!list_empty(&cmd->i_conn_node)) 1597 list_del_init(&cmd->i_conn_node); 1598 spin_unlock_bh(&conn->cmd_lock); 1599 1600 if (cmd->data_direction == DMA_TO_DEVICE) { 1601 iscsit_stop_dataout_timer(cmd); 1602 /* 1603 * Check for special case during comp_err where 1604 * WRITE_PENDING has been handed off from core, 1605 * but requires an extra target_put_sess_cmd() 1606 * before transport_generic_free_cmd() below. 1607 */ 1608 if (comp_err && 1609 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 1610 struct se_cmd *se_cmd = &cmd->se_cmd; 1611 1612 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 1613 } 1614 } 1615 1616 device->unreg_rdma_mem(isert_cmd, isert_conn); 1617 transport_generic_free_cmd(&cmd->se_cmd, 0); 1618 break; 1619 case ISCSI_OP_SCSI_TMFUNC: 1620 spin_lock_bh(&conn->cmd_lock); 1621 if (!list_empty(&cmd->i_conn_node)) 1622 list_del_init(&cmd->i_conn_node); 1623 spin_unlock_bh(&conn->cmd_lock); 1624 1625 transport_generic_free_cmd(&cmd->se_cmd, 0); 1626 break; 1627 case ISCSI_OP_REJECT: 1628 case ISCSI_OP_NOOP_OUT: 1629 case ISCSI_OP_TEXT: 1630 spin_lock_bh(&conn->cmd_lock); 1631 if (!list_empty(&cmd->i_conn_node)) 1632 list_del_init(&cmd->i_conn_node); 1633 spin_unlock_bh(&conn->cmd_lock); 1634 1635 /* 1636 * Handle special case for REJECT when iscsi_add_reject*() has 1637 * overwritten the original iscsi_opcode assignment, and the 1638 * associated cmd->se_cmd needs to be released. 1639 */ 1640 if (cmd->se_cmd.se_tfo != NULL) { 1641 pr_debug("Calling transport_generic_free_cmd from" 1642 " isert_put_cmd for 0x%02x\n", 1643 cmd->iscsi_opcode); 1644 transport_generic_free_cmd(&cmd->se_cmd, 0); 1645 break; 1646 } 1647 /* 1648 * Fall-through 1649 */ 1650 default: 1651 iscsit_release_cmd(cmd); 1652 break; 1653 } 1654 } 1655 1656 static void 1657 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1658 { 1659 if (tx_desc->dma_addr != 0) { 1660 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); 1661 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1662 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1663 tx_desc->dma_addr = 0; 1664 } 1665 } 1666 1667 static void 1668 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1669 struct ib_device *ib_dev, bool comp_err) 1670 { 1671 if (isert_cmd->pdu_buf_dma != 0) { 1672 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); 1673 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1674 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1675 isert_cmd->pdu_buf_dma = 0; 1676 } 1677 1678 isert_unmap_tx_desc(tx_desc, ib_dev); 1679 isert_put_cmd(isert_cmd, comp_err); 1680 } 1681 1682 static int 1683 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) 1684 { 1685 struct ib_mr_status mr_status; 1686 int ret; 1687 1688 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1689 if (ret) { 1690 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1691 goto fail_mr_status; 1692 } 1693 1694 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1695 u64 sec_offset_err; 1696 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; 1697 1698 switch (mr_status.sig_err.err_type) { 1699 case IB_SIG_BAD_GUARD: 1700 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1701 break; 1702 case IB_SIG_BAD_REFTAG: 1703 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1704 break; 1705 case IB_SIG_BAD_APPTAG: 1706 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 1707 break; 1708 } 1709 sec_offset_err = mr_status.sig_err.sig_err_offset; 1710 do_div(sec_offset_err, block_size); 1711 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1712 1713 pr_err("isert: PI error found type %d at sector 0x%llx " 1714 "expected 0x%x vs actual 0x%x\n", 1715 mr_status.sig_err.err_type, 1716 (unsigned long long)se_cmd->bad_sector, 1717 mr_status.sig_err.expected, 1718 mr_status.sig_err.actual); 1719 ret = 1; 1720 } 1721 1722 fail_mr_status: 1723 return ret; 1724 } 1725 1726 static void 1727 isert_completion_rdma_write(struct iser_tx_desc *tx_desc, 1728 struct isert_cmd *isert_cmd) 1729 { 1730 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1731 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1732 struct se_cmd *se_cmd = &cmd->se_cmd; 1733 struct isert_conn *isert_conn = isert_cmd->conn; 1734 struct isert_device *device = isert_conn->conn_device; 1735 int ret = 0; 1736 1737 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { 1738 ret = isert_check_pi_status(se_cmd, 1739 wr->fr_desc->pi_ctx->sig_mr); 1740 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1741 } 1742 1743 device->unreg_rdma_mem(isert_cmd, isert_conn); 1744 wr->send_wr_num = 0; 1745 if (ret) 1746 transport_send_check_condition_and_sense(se_cmd, 1747 se_cmd->pi_err, 0); 1748 else 1749 isert_put_response(isert_conn->conn, cmd); 1750 } 1751 1752 static void 1753 isert_completion_rdma_read(struct iser_tx_desc *tx_desc, 1754 struct isert_cmd *isert_cmd) 1755 { 1756 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1757 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1758 struct se_cmd *se_cmd = &cmd->se_cmd; 1759 struct isert_conn *isert_conn = isert_cmd->conn; 1760 struct isert_device *device = isert_conn->conn_device; 1761 int ret = 0; 1762 1763 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { 1764 ret = isert_check_pi_status(se_cmd, 1765 wr->fr_desc->pi_ctx->sig_mr); 1766 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1767 } 1768 1769 iscsit_stop_dataout_timer(cmd); 1770 device->unreg_rdma_mem(isert_cmd, isert_conn); 1771 cmd->write_data_done = wr->data.len; 1772 wr->send_wr_num = 0; 1773 1774 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1775 spin_lock_bh(&cmd->istate_lock); 1776 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1777 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1778 spin_unlock_bh(&cmd->istate_lock); 1779 1780 if (ret) 1781 transport_send_check_condition_and_sense(se_cmd, 1782 se_cmd->pi_err, 0); 1783 else 1784 target_execute_cmd(se_cmd); 1785 } 1786 1787 static void 1788 isert_do_control_comp(struct work_struct *work) 1789 { 1790 struct isert_cmd *isert_cmd = container_of(work, 1791 struct isert_cmd, comp_work); 1792 struct isert_conn *isert_conn = isert_cmd->conn; 1793 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1794 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1795 1796 switch (cmd->i_state) { 1797 case ISTATE_SEND_TASKMGTRSP: 1798 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); 1799 1800 atomic_dec(&isert_conn->post_send_buf_count); 1801 iscsit_tmr_post_handler(cmd, cmd->conn); 1802 1803 cmd->i_state = ISTATE_SENT_STATUS; 1804 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); 1805 break; 1806 case ISTATE_SEND_REJECT: 1807 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); 1808 atomic_dec(&isert_conn->post_send_buf_count); 1809 1810 cmd->i_state = ISTATE_SENT_STATUS; 1811 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); 1812 break; 1813 case ISTATE_SEND_LOGOUTRSP: 1814 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1815 /* 1816 * Call atomic_dec(&isert_conn->post_send_buf_count) 1817 * from isert_wait_conn() 1818 */ 1819 isert_conn->logout_posted = true; 1820 iscsit_logout_post_handler(cmd, cmd->conn); 1821 break; 1822 case ISTATE_SEND_TEXTRSP: 1823 atomic_dec(&isert_conn->post_send_buf_count); 1824 cmd->i_state = ISTATE_SENT_STATUS; 1825 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); 1826 break; 1827 default: 1828 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); 1829 dump_stack(); 1830 break; 1831 } 1832 } 1833 1834 static void 1835 isert_response_completion(struct iser_tx_desc *tx_desc, 1836 struct isert_cmd *isert_cmd, 1837 struct isert_conn *isert_conn, 1838 struct ib_device *ib_dev) 1839 { 1840 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1841 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1842 1843 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1844 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1845 cmd->i_state == ISTATE_SEND_REJECT || 1846 cmd->i_state == ISTATE_SEND_TEXTRSP) { 1847 isert_unmap_tx_desc(tx_desc, ib_dev); 1848 1849 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1850 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1851 return; 1852 } 1853 1854 /** 1855 * If send_wr_num is 0 this means that we got 1856 * RDMA completion and we cleared it and we should 1857 * simply decrement the response post. else the 1858 * response is incorporated in send_wr_num, just 1859 * sub it. 1860 **/ 1861 if (wr->send_wr_num) 1862 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 1863 else 1864 atomic_dec(&isert_conn->post_send_buf_count); 1865 1866 cmd->i_state = ISTATE_SENT_STATUS; 1867 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1868 } 1869 1870 static void 1871 __isert_send_completion(struct iser_tx_desc *tx_desc, 1872 struct isert_conn *isert_conn) 1873 { 1874 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1875 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1876 struct isert_rdma_wr *wr; 1877 1878 if (!isert_cmd) { 1879 atomic_dec(&isert_conn->post_send_buf_count); 1880 isert_unmap_tx_desc(tx_desc, ib_dev); 1881 return; 1882 } 1883 wr = &isert_cmd->rdma_wr; 1884 1885 switch (wr->iser_ib_op) { 1886 case ISER_IB_RECV: 1887 pr_err("isert_send_completion: Got ISER_IB_RECV\n"); 1888 dump_stack(); 1889 break; 1890 case ISER_IB_SEND: 1891 pr_debug("isert_send_completion: Got ISER_IB_SEND\n"); 1892 isert_response_completion(tx_desc, isert_cmd, 1893 isert_conn, ib_dev); 1894 break; 1895 case ISER_IB_RDMA_WRITE: 1896 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); 1897 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 1898 isert_completion_rdma_write(tx_desc, isert_cmd); 1899 break; 1900 case ISER_IB_RDMA_READ: 1901 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1902 1903 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 1904 isert_completion_rdma_read(tx_desc, isert_cmd); 1905 break; 1906 default: 1907 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); 1908 dump_stack(); 1909 break; 1910 } 1911 } 1912 1913 static void 1914 isert_send_completion(struct iser_tx_desc *tx_desc, 1915 struct isert_conn *isert_conn) 1916 { 1917 struct llist_node *llnode = tx_desc->comp_llnode_batch; 1918 struct iser_tx_desc *t; 1919 /* 1920 * Drain coalesced completion llist starting from comp_llnode_batch 1921 * setup in isert_init_send_wr(), and then complete trailing tx_desc. 1922 */ 1923 while (llnode) { 1924 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1925 llnode = llist_next(llnode); 1926 __isert_send_completion(t, isert_conn); 1927 } 1928 __isert_send_completion(tx_desc, isert_conn); 1929 } 1930 1931 static void 1932 isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) 1933 { 1934 struct llist_node *llnode; 1935 struct isert_rdma_wr *wr; 1936 struct iser_tx_desc *t; 1937 1938 mutex_lock(&isert_conn->conn_mutex); 1939 llnode = llist_del_all(&isert_conn->conn_comp_llist); 1940 isert_conn->conn_comp_batch = 0; 1941 mutex_unlock(&isert_conn->conn_mutex); 1942 1943 while (llnode) { 1944 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1945 llnode = llist_next(llnode); 1946 wr = &t->isert_cmd->rdma_wr; 1947 1948 /** 1949 * If send_wr_num is 0 this means that we got 1950 * RDMA completion and we cleared it and we should 1951 * simply decrement the response post. else the 1952 * response is incorporated in send_wr_num, just 1953 * sub it. 1954 **/ 1955 if (wr->send_wr_num) 1956 atomic_sub(wr->send_wr_num, 1957 &isert_conn->post_send_buf_count); 1958 else 1959 atomic_dec(&isert_conn->post_send_buf_count); 1960 1961 isert_completion_put(t, t->isert_cmd, ib_dev, true); 1962 } 1963 } 1964 1965 static void 1966 isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1967 { 1968 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1969 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1970 struct llist_node *llnode = tx_desc->comp_llnode_batch; 1971 struct isert_rdma_wr *wr; 1972 struct iser_tx_desc *t; 1973 1974 while (llnode) { 1975 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1976 llnode = llist_next(llnode); 1977 wr = &t->isert_cmd->rdma_wr; 1978 1979 /** 1980 * If send_wr_num is 0 this means that we got 1981 * RDMA completion and we cleared it and we should 1982 * simply decrement the response post. else the 1983 * response is incorporated in send_wr_num, just 1984 * sub it. 1985 **/ 1986 if (wr->send_wr_num) 1987 atomic_sub(wr->send_wr_num, 1988 &isert_conn->post_send_buf_count); 1989 else 1990 atomic_dec(&isert_conn->post_send_buf_count); 1991 1992 isert_completion_put(t, t->isert_cmd, ib_dev, true); 1993 } 1994 tx_desc->comp_llnode_batch = NULL; 1995 1996 if (!isert_cmd) 1997 isert_unmap_tx_desc(tx_desc, ib_dev); 1998 else 1999 isert_completion_put(tx_desc, isert_cmd, ib_dev, true); 2000 } 2001 2002 static void 2003 isert_cq_rx_comp_err(struct isert_conn *isert_conn) 2004 { 2005 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2006 struct iscsi_conn *conn = isert_conn->conn; 2007 2008 if (isert_conn->post_recv_buf_count) 2009 return; 2010 2011 isert_cq_drain_comp_llist(isert_conn, ib_dev); 2012 2013 if (conn->sess) { 2014 target_sess_cmd_list_set_waiting(conn->sess->se_sess); 2015 target_wait_for_sess_cmds(conn->sess->se_sess); 2016 } 2017 2018 while (atomic_read(&isert_conn->post_send_buf_count)) 2019 msleep(3000); 2020 2021 mutex_lock(&isert_conn->conn_mutex); 2022 isert_conn->state = ISER_CONN_DOWN; 2023 mutex_unlock(&isert_conn->conn_mutex); 2024 2025 complete(&isert_conn->conn_wait_comp_err); 2026 } 2027 2028 static void 2029 isert_cq_tx_work(struct work_struct *work) 2030 { 2031 struct isert_cq_desc *cq_desc = container_of(work, 2032 struct isert_cq_desc, cq_tx_work); 2033 struct isert_device *device = cq_desc->device; 2034 int cq_index = cq_desc->cq_index; 2035 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index]; 2036 struct isert_conn *isert_conn; 2037 struct iser_tx_desc *tx_desc; 2038 struct ib_wc wc; 2039 2040 while (ib_poll_cq(tx_cq, 1, &wc) == 1) { 2041 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id; 2042 isert_conn = wc.qp->qp_context; 2043 2044 if (wc.status == IB_WC_SUCCESS) { 2045 isert_send_completion(tx_desc, isert_conn); 2046 } else { 2047 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 2048 pr_debug("TX wc.status: 0x%08x\n", wc.status); 2049 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); 2050 2051 if (wc.wr_id != ISER_FASTREG_LI_WRID) { 2052 if (tx_desc->llnode_active) 2053 continue; 2054 2055 atomic_dec(&isert_conn->post_send_buf_count); 2056 isert_cq_tx_comp_err(tx_desc, isert_conn); 2057 } 2058 } 2059 } 2060 2061 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); 2062 } 2063 2064 static void 2065 isert_cq_tx_callback(struct ib_cq *cq, void *context) 2066 { 2067 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 2068 2069 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 2070 } 2071 2072 static void 2073 isert_cq_rx_work(struct work_struct *work) 2074 { 2075 struct isert_cq_desc *cq_desc = container_of(work, 2076 struct isert_cq_desc, cq_rx_work); 2077 struct isert_device *device = cq_desc->device; 2078 int cq_index = cq_desc->cq_index; 2079 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; 2080 struct isert_conn *isert_conn; 2081 struct iser_rx_desc *rx_desc; 2082 struct ib_wc wc; 2083 unsigned long xfer_len; 2084 2085 while (ib_poll_cq(rx_cq, 1, &wc) == 1) { 2086 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; 2087 isert_conn = wc.qp->qp_context; 2088 2089 if (wc.status == IB_WC_SUCCESS) { 2090 xfer_len = (unsigned long)wc.byte_len; 2091 isert_rx_completion(rx_desc, isert_conn, xfer_len); 2092 } else { 2093 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 2094 if (wc.status != IB_WC_WR_FLUSH_ERR) { 2095 pr_debug("RX wc.status: 0x%08x\n", wc.status); 2096 pr_debug("RX wc.vendor_err: 0x%08x\n", 2097 wc.vendor_err); 2098 } 2099 isert_conn->post_recv_buf_count--; 2100 isert_cq_rx_comp_err(isert_conn); 2101 } 2102 } 2103 2104 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); 2105 } 2106 2107 static void 2108 isert_cq_rx_callback(struct ib_cq *cq, void *context) 2109 { 2110 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 2111 2112 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 2113 } 2114 2115 static int 2116 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 2117 { 2118 struct ib_send_wr *wr_failed; 2119 int ret; 2120 2121 atomic_inc(&isert_conn->post_send_buf_count); 2122 2123 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, 2124 &wr_failed); 2125 if (ret) { 2126 pr_err("ib_post_send failed with %d\n", ret); 2127 atomic_dec(&isert_conn->post_send_buf_count); 2128 return ret; 2129 } 2130 return ret; 2131 } 2132 2133 static int 2134 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2135 { 2136 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2137 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2138 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2139 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 2140 &isert_cmd->tx_desc.iscsi_header; 2141 2142 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2143 iscsit_build_rsp_pdu(cmd, conn, true, hdr); 2144 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2145 /* 2146 * Attach SENSE DATA payload to iSCSI Response PDU 2147 */ 2148 if (cmd->se_cmd.sense_buffer && 2149 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 2150 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 2151 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2152 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2153 u32 padding, pdu_len; 2154 2155 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, 2156 cmd->sense_buffer); 2157 cmd->se_cmd.scsi_sense_length += sizeof(__be16); 2158 2159 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 2160 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 2161 pdu_len = cmd->se_cmd.scsi_sense_length + padding; 2162 2163 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2164 (void *)cmd->sense_buffer, pdu_len, 2165 DMA_TO_DEVICE); 2166 2167 isert_cmd->pdu_buf_len = pdu_len; 2168 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2169 tx_dsg->length = pdu_len; 2170 tx_dsg->lkey = isert_conn->conn_mr->lkey; 2171 isert_cmd->tx_desc.num_sge = 2; 2172 } 2173 2174 isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); 2175 2176 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2177 2178 return isert_post_response(isert_conn, isert_cmd); 2179 } 2180 2181 static void 2182 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2183 { 2184 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2185 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2186 struct isert_device *device = isert_conn->conn_device; 2187 2188 spin_lock_bh(&conn->cmd_lock); 2189 if (!list_empty(&cmd->i_conn_node)) 2190 list_del_init(&cmd->i_conn_node); 2191 spin_unlock_bh(&conn->cmd_lock); 2192 2193 if (cmd->data_direction == DMA_TO_DEVICE) 2194 iscsit_stop_dataout_timer(cmd); 2195 2196 device->unreg_rdma_mem(isert_cmd, isert_conn); 2197 } 2198 2199 static enum target_prot_op 2200 isert_get_sup_prot_ops(struct iscsi_conn *conn) 2201 { 2202 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2203 struct isert_device *device = isert_conn->conn_device; 2204 2205 if (device->pi_capable) 2206 return TARGET_PROT_ALL; 2207 2208 return TARGET_PROT_NORMAL; 2209 } 2210 2211 static int 2212 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2213 bool nopout_response) 2214 { 2215 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2216 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2217 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2218 2219 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2220 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) 2221 &isert_cmd->tx_desc.iscsi_header, 2222 nopout_response); 2223 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2224 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2225 2226 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2227 2228 return isert_post_response(isert_conn, isert_cmd); 2229 } 2230 2231 static int 2232 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2233 { 2234 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2235 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2236 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2237 2238 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2239 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 2240 &isert_cmd->tx_desc.iscsi_header); 2241 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2242 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2243 2244 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2245 2246 return isert_post_response(isert_conn, isert_cmd); 2247 } 2248 2249 static int 2250 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2251 { 2252 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2253 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2254 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2255 2256 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2257 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 2258 &isert_cmd->tx_desc.iscsi_header); 2259 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2260 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2261 2262 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2263 2264 return isert_post_response(isert_conn, isert_cmd); 2265 } 2266 2267 static int 2268 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2269 { 2270 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2271 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2272 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2273 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2274 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2275 struct iscsi_reject *hdr = 2276 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; 2277 2278 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2279 iscsit_build_reject(cmd, conn, hdr); 2280 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2281 2282 hton24(hdr->dlength, ISCSI_HDR_LEN); 2283 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2284 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 2285 DMA_TO_DEVICE); 2286 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 2287 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2288 tx_dsg->length = ISCSI_HDR_LEN; 2289 tx_dsg->lkey = isert_conn->conn_mr->lkey; 2290 isert_cmd->tx_desc.num_sge = 2; 2291 2292 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2293 2294 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2295 2296 return isert_post_response(isert_conn, isert_cmd); 2297 } 2298 2299 static int 2300 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2301 { 2302 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2303 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2304 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 2305 struct iscsi_text_rsp *hdr = 2306 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; 2307 u32 txt_rsp_len; 2308 int rc; 2309 2310 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2311 rc = iscsit_build_text_rsp(cmd, conn, hdr); 2312 if (rc < 0) 2313 return rc; 2314 2315 txt_rsp_len = rc; 2316 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2317 2318 if (txt_rsp_len) { 2319 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2320 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; 2321 void *txt_rsp_buf = cmd->buf_ptr; 2322 2323 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2324 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 2325 2326 isert_cmd->pdu_buf_len = txt_rsp_len; 2327 tx_dsg->addr = isert_cmd->pdu_buf_dma; 2328 tx_dsg->length = txt_rsp_len; 2329 tx_dsg->lkey = isert_conn->conn_mr->lkey; 2330 isert_cmd->tx_desc.num_sge = 2; 2331 } 2332 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2333 2334 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2335 2336 return isert_post_response(isert_conn, isert_cmd); 2337 } 2338 2339 static int 2340 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 2341 struct ib_sge *ib_sge, struct ib_send_wr *send_wr, 2342 u32 data_left, u32 offset) 2343 { 2344 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2345 struct scatterlist *sg_start, *tmp_sg; 2346 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2347 u32 sg_off, page_off; 2348 int i = 0, sg_nents; 2349 2350 sg_off = offset / PAGE_SIZE; 2351 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2352 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); 2353 page_off = offset % PAGE_SIZE; 2354 2355 send_wr->sg_list = ib_sge; 2356 send_wr->num_sge = sg_nents; 2357 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2358 /* 2359 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2360 */ 2361 for_each_sg(sg_start, tmp_sg, sg_nents, i) { 2362 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", 2363 (unsigned long long)tmp_sg->dma_address, 2364 tmp_sg->length, page_off); 2365 2366 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; 2367 ib_sge->length = min_t(u32, data_left, 2368 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 2369 ib_sge->lkey = isert_conn->conn_mr->lkey; 2370 2371 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2372 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2373 page_off = 0; 2374 data_left -= ib_sge->length; 2375 ib_sge++; 2376 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); 2377 } 2378 2379 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 2380 send_wr->sg_list, send_wr->num_sge); 2381 2382 return sg_nents; 2383 } 2384 2385 static int 2386 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2387 struct isert_rdma_wr *wr) 2388 { 2389 struct se_cmd *se_cmd = &cmd->se_cmd; 2390 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2391 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2392 struct isert_data_buf *data = &wr->data; 2393 struct ib_send_wr *send_wr; 2394 struct ib_sge *ib_sge; 2395 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; 2396 int ret = 0, i, ib_sge_cnt; 2397 2398 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2399 2400 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0; 2401 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2402 se_cmd->t_data_nents, se_cmd->data_length, 2403 offset, wr->iser_ib_op, &wr->data); 2404 if (ret) 2405 return ret; 2406 2407 data_left = data->len; 2408 offset = data->offset; 2409 2410 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); 2411 if (!ib_sge) { 2412 pr_warn("Unable to allocate ib_sge\n"); 2413 ret = -ENOMEM; 2414 goto unmap_cmd; 2415 } 2416 wr->ib_sge = ib_sge; 2417 2418 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); 2419 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2420 GFP_KERNEL); 2421 if (!wr->send_wr) { 2422 pr_debug("Unable to allocate wr->send_wr\n"); 2423 ret = -ENOMEM; 2424 goto unmap_cmd; 2425 } 2426 2427 wr->isert_cmd = isert_cmd; 2428 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2429 2430 for (i = 0; i < wr->send_wr_num; i++) { 2431 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2432 data_len = min(data_left, rdma_write_max); 2433 2434 send_wr->send_flags = 0; 2435 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2436 send_wr->opcode = IB_WR_RDMA_WRITE; 2437 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 2438 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2439 if (i + 1 == wr->send_wr_num) 2440 send_wr->next = &isert_cmd->tx_desc.send_wr; 2441 else 2442 send_wr->next = &wr->send_wr[i + 1]; 2443 } else { 2444 send_wr->opcode = IB_WR_RDMA_READ; 2445 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 2446 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2447 if (i + 1 == wr->send_wr_num) 2448 send_wr->send_flags = IB_SEND_SIGNALED; 2449 else 2450 send_wr->next = &wr->send_wr[i + 1]; 2451 } 2452 2453 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2454 send_wr, data_len, offset); 2455 ib_sge += ib_sge_cnt; 2456 2457 offset += data_len; 2458 va_offset += data_len; 2459 data_left -= data_len; 2460 } 2461 2462 return 0; 2463 unmap_cmd: 2464 isert_unmap_data_buf(isert_conn, data); 2465 2466 return ret; 2467 } 2468 2469 static int 2470 isert_map_fr_pagelist(struct ib_device *ib_dev, 2471 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl) 2472 { 2473 u64 start_addr, end_addr, page, chunk_start = 0; 2474 struct scatterlist *tmp_sg; 2475 int i = 0, new_chunk, last_ent, n_pages; 2476 2477 n_pages = 0; 2478 new_chunk = 1; 2479 last_ent = sg_nents - 1; 2480 for_each_sg(sg_start, tmp_sg, sg_nents, i) { 2481 start_addr = ib_sg_dma_address(ib_dev, tmp_sg); 2482 if (new_chunk) 2483 chunk_start = start_addr; 2484 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); 2485 2486 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", 2487 i, (unsigned long long)tmp_sg->dma_address, 2488 tmp_sg->length); 2489 2490 if ((end_addr & ~PAGE_MASK) && i < last_ent) { 2491 new_chunk = 0; 2492 continue; 2493 } 2494 new_chunk = 1; 2495 2496 page = chunk_start & PAGE_MASK; 2497 do { 2498 fr_pl[n_pages++] = page; 2499 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", 2500 n_pages - 1, page); 2501 page += PAGE_SIZE; 2502 } while (page < end_addr); 2503 } 2504 2505 return n_pages; 2506 } 2507 2508 static int 2509 isert_fast_reg_mr(struct isert_conn *isert_conn, 2510 struct fast_reg_descriptor *fr_desc, 2511 struct isert_data_buf *mem, 2512 enum isert_indicator ind, 2513 struct ib_sge *sge) 2514 { 2515 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2516 struct ib_mr *mr; 2517 struct ib_fast_reg_page_list *frpl; 2518 struct ib_send_wr fr_wr, inv_wr; 2519 struct ib_send_wr *bad_wr, *wr = NULL; 2520 int ret, pagelist_len; 2521 u32 page_off; 2522 u8 key; 2523 2524 if (mem->dma_nents == 1) { 2525 sge->lkey = isert_conn->conn_mr->lkey; 2526 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); 2527 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); 2528 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", 2529 __func__, __LINE__, sge->addr, sge->length, 2530 sge->lkey); 2531 return 0; 2532 } 2533 2534 if (ind == ISERT_DATA_KEY_VALID) { 2535 /* Registering data buffer */ 2536 mr = fr_desc->data_mr; 2537 frpl = fr_desc->data_frpl; 2538 } else { 2539 /* Registering protection buffer */ 2540 mr = fr_desc->pi_ctx->prot_mr; 2541 frpl = fr_desc->pi_ctx->prot_frpl; 2542 } 2543 2544 page_off = mem->offset % PAGE_SIZE; 2545 2546 pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2547 fr_desc, mem->nents, mem->offset); 2548 2549 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, 2550 &frpl->page_list[0]); 2551 2552 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { 2553 memset(&inv_wr, 0, sizeof(inv_wr)); 2554 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 2555 inv_wr.opcode = IB_WR_LOCAL_INV; 2556 inv_wr.ex.invalidate_rkey = mr->rkey; 2557 wr = &inv_wr; 2558 /* Bump the key */ 2559 key = (u8)(mr->rkey & 0x000000FF); 2560 ib_update_fast_reg_key(mr, ++key); 2561 } 2562 2563 /* Prepare FASTREG WR */ 2564 memset(&fr_wr, 0, sizeof(fr_wr)); 2565 fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2566 fr_wr.opcode = IB_WR_FAST_REG_MR; 2567 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off; 2568 fr_wr.wr.fast_reg.page_list = frpl; 2569 fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2570 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2571 fr_wr.wr.fast_reg.length = mem->len; 2572 fr_wr.wr.fast_reg.rkey = mr->rkey; 2573 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2574 2575 if (!wr) 2576 wr = &fr_wr; 2577 else 2578 wr->next = &fr_wr; 2579 2580 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2581 if (ret) { 2582 pr_err("fast registration failed, ret:%d\n", ret); 2583 return ret; 2584 } 2585 fr_desc->ind &= ~ind; 2586 2587 sge->lkey = mr->lkey; 2588 sge->addr = frpl->page_list[0] + page_off; 2589 sge->length = mem->len; 2590 2591 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", 2592 __func__, __LINE__, sge->addr, sge->length, 2593 sge->lkey); 2594 2595 return ret; 2596 } 2597 2598 static inline enum ib_t10_dif_type 2599 se2ib_prot_type(enum target_prot_type prot_type) 2600 { 2601 switch (prot_type) { 2602 case TARGET_DIF_TYPE0_PROT: 2603 return IB_T10DIF_NONE; 2604 case TARGET_DIF_TYPE1_PROT: 2605 return IB_T10DIF_TYPE1; 2606 case TARGET_DIF_TYPE2_PROT: 2607 return IB_T10DIF_TYPE2; 2608 case TARGET_DIF_TYPE3_PROT: 2609 return IB_T10DIF_TYPE3; 2610 default: 2611 return IB_T10DIF_NONE; 2612 } 2613 } 2614 2615 static int 2616 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2617 { 2618 enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type); 2619 2620 sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF; 2621 sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF; 2622 sig_attrs->mem.sig.dif.pi_interval = 2623 se_cmd->se_dev->dev_attrib.block_size; 2624 sig_attrs->wire.sig.dif.pi_interval = 2625 se_cmd->se_dev->dev_attrib.block_size; 2626 2627 switch (se_cmd->prot_op) { 2628 case TARGET_PROT_DIN_INSERT: 2629 case TARGET_PROT_DOUT_STRIP: 2630 sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; 2631 sig_attrs->wire.sig.dif.type = ib_prot_type; 2632 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 2633 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed; 2634 break; 2635 case TARGET_PROT_DOUT_INSERT: 2636 case TARGET_PROT_DIN_STRIP: 2637 sig_attrs->mem.sig.dif.type = ib_prot_type; 2638 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; 2639 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed; 2640 sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE; 2641 break; 2642 case TARGET_PROT_DIN_PASS: 2643 case TARGET_PROT_DOUT_PASS: 2644 sig_attrs->mem.sig.dif.type = ib_prot_type; 2645 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; 2646 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed; 2647 sig_attrs->wire.sig.dif.type = ib_prot_type; 2648 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 2649 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed; 2650 break; 2651 default: 2652 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2653 return -EINVAL; 2654 } 2655 2656 return 0; 2657 } 2658 2659 static inline u8 2660 isert_set_prot_checks(u8 prot_checks) 2661 { 2662 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | 2663 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | 2664 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); 2665 } 2666 2667 static int 2668 isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, 2669 struct fast_reg_descriptor *fr_desc, 2670 struct ib_sge *data_sge, struct ib_sge *prot_sge, 2671 struct ib_sge *sig_sge) 2672 { 2673 struct ib_send_wr sig_wr, inv_wr; 2674 struct ib_send_wr *bad_wr, *wr = NULL; 2675 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2676 struct ib_sig_attrs sig_attrs; 2677 int ret; 2678 u32 key; 2679 2680 memset(&sig_attrs, 0, sizeof(sig_attrs)); 2681 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2682 if (ret) 2683 goto err; 2684 2685 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); 2686 2687 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { 2688 memset(&inv_wr, 0, sizeof(inv_wr)); 2689 inv_wr.opcode = IB_WR_LOCAL_INV; 2690 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 2691 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; 2692 wr = &inv_wr; 2693 /* Bump the key */ 2694 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); 2695 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); 2696 } 2697 2698 memset(&sig_wr, 0, sizeof(sig_wr)); 2699 sig_wr.opcode = IB_WR_REG_SIG_MR; 2700 sig_wr.wr_id = ISER_FASTREG_LI_WRID; 2701 sig_wr.sg_list = data_sge; 2702 sig_wr.num_sge = 1; 2703 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; 2704 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; 2705 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; 2706 if (se_cmd->t_prot_sg) 2707 sig_wr.wr.sig_handover.prot = prot_sge; 2708 2709 if (!wr) 2710 wr = &sig_wr; 2711 else 2712 wr->next = &sig_wr; 2713 2714 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2715 if (ret) { 2716 pr_err("fast registration failed, ret:%d\n", ret); 2717 goto err; 2718 } 2719 fr_desc->ind &= ~ISERT_SIG_KEY_VALID; 2720 2721 sig_sge->lkey = pi_ctx->sig_mr->lkey; 2722 sig_sge->addr = 0; 2723 sig_sge->length = se_cmd->data_length; 2724 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && 2725 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) 2726 /* 2727 * We have protection guards on the wire 2728 * so we need to set a larget transfer 2729 */ 2730 sig_sge->length += se_cmd->prot_length; 2731 2732 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", 2733 sig_sge->addr, sig_sge->length, 2734 sig_sge->lkey); 2735 err: 2736 return ret; 2737 } 2738 2739 static int 2740 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2741 struct isert_rdma_wr *wr) 2742 { 2743 struct se_cmd *se_cmd = &cmd->se_cmd; 2744 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2745 struct isert_conn *isert_conn = conn->context; 2746 struct ib_sge data_sge; 2747 struct ib_send_wr *send_wr; 2748 struct fast_reg_descriptor *fr_desc = NULL; 2749 u32 offset; 2750 int ret = 0; 2751 unsigned long flags; 2752 2753 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2754 2755 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0; 2756 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2757 se_cmd->t_data_nents, se_cmd->data_length, 2758 offset, wr->iser_ib_op, &wr->data); 2759 if (ret) 2760 return ret; 2761 2762 if (wr->data.dma_nents != 1 || 2763 se_cmd->prot_op != TARGET_PROT_NORMAL) { 2764 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2765 fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2766 struct fast_reg_descriptor, list); 2767 list_del(&fr_desc->list); 2768 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2769 wr->fr_desc = fr_desc; 2770 } 2771 2772 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, 2773 ISERT_DATA_KEY_VALID, &data_sge); 2774 if (ret) 2775 goto unmap_cmd; 2776 2777 if (se_cmd->prot_op != TARGET_PROT_NORMAL) { 2778 struct ib_sge prot_sge, sig_sge; 2779 2780 if (se_cmd->t_prot_sg) { 2781 ret = isert_map_data_buf(isert_conn, isert_cmd, 2782 se_cmd->t_prot_sg, 2783 se_cmd->t_prot_nents, 2784 se_cmd->prot_length, 2785 0, wr->iser_ib_op, &wr->prot); 2786 if (ret) 2787 goto unmap_cmd; 2788 2789 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot, 2790 ISERT_PROT_KEY_VALID, &prot_sge); 2791 if (ret) 2792 goto unmap_prot_cmd; 2793 } 2794 2795 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc, 2796 &data_sge, &prot_sge, &sig_sge); 2797 if (ret) 2798 goto unmap_prot_cmd; 2799 2800 fr_desc->ind |= ISERT_PROTECTED; 2801 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge)); 2802 } else 2803 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge)); 2804 2805 wr->ib_sge = &wr->s_ib_sge; 2806 wr->send_wr_num = 1; 2807 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2808 wr->send_wr = &wr->s_send_wr; 2809 wr->isert_cmd = isert_cmd; 2810 2811 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2812 send_wr->sg_list = &wr->s_ib_sge; 2813 send_wr->num_sge = 1; 2814 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2815 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2816 send_wr->opcode = IB_WR_RDMA_WRITE; 2817 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2818 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2819 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ? 2820 0 : IB_SEND_SIGNALED; 2821 } else { 2822 send_wr->opcode = IB_WR_RDMA_READ; 2823 send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2824 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2825 send_wr->send_flags = IB_SEND_SIGNALED; 2826 } 2827 2828 return 0; 2829 unmap_prot_cmd: 2830 if (se_cmd->t_prot_sg) 2831 isert_unmap_data_buf(isert_conn, &wr->prot); 2832 unmap_cmd: 2833 if (fr_desc) { 2834 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2835 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); 2836 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2837 } 2838 isert_unmap_data_buf(isert_conn, &wr->data); 2839 2840 return ret; 2841 } 2842 2843 static int 2844 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2845 { 2846 struct se_cmd *se_cmd = &cmd->se_cmd; 2847 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2848 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 2849 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2850 struct isert_device *device = isert_conn->conn_device; 2851 struct ib_send_wr *wr_failed; 2852 int rc; 2853 2854 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n", 2855 isert_cmd, se_cmd->data_length); 2856 wr->iser_ib_op = ISER_IB_RDMA_WRITE; 2857 rc = device->reg_rdma_mem(conn, cmd, wr); 2858 if (rc) { 2859 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2860 return rc; 2861 } 2862 2863 if (se_cmd->prot_op == TARGET_PROT_NORMAL) { 2864 /* 2865 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2866 */ 2867 isert_create_send_desc(isert_conn, isert_cmd, 2868 &isert_cmd->tx_desc); 2869 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2870 &isert_cmd->tx_desc.iscsi_header); 2871 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2872 isert_init_send_wr(isert_conn, isert_cmd, 2873 &isert_cmd->tx_desc.send_wr, true); 2874 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2875 wr->send_wr_num += 1; 2876 } 2877 2878 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); 2879 2880 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2881 if (rc) { 2882 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2883 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 2884 } 2885 2886 if (se_cmd->prot_op == TARGET_PROT_NORMAL) 2887 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " 2888 "READ\n", isert_cmd); 2889 else 2890 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", 2891 isert_cmd); 2892 2893 return 1; 2894 } 2895 2896 static int 2897 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2898 { 2899 struct se_cmd *se_cmd = &cmd->se_cmd; 2900 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2901 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 2902 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2903 struct isert_device *device = isert_conn->conn_device; 2904 struct ib_send_wr *wr_failed; 2905 int rc; 2906 2907 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2908 isert_cmd, se_cmd->data_length, cmd->write_data_done); 2909 wr->iser_ib_op = ISER_IB_RDMA_READ; 2910 rc = device->reg_rdma_mem(conn, cmd, wr); 2911 if (rc) { 2912 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2913 return rc; 2914 } 2915 2916 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); 2917 2918 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2919 if (rc) { 2920 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2921 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 2922 } 2923 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2924 isert_cmd); 2925 2926 return 0; 2927 } 2928 2929 static int 2930 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2931 { 2932 int ret; 2933 2934 switch (state) { 2935 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 2936 ret = isert_put_nopin(cmd, conn, false); 2937 break; 2938 default: 2939 pr_err("Unknown immediate state: 0x%02x\n", state); 2940 ret = -EINVAL; 2941 break; 2942 } 2943 2944 return ret; 2945 } 2946 2947 static int 2948 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2949 { 2950 int ret; 2951 2952 switch (state) { 2953 case ISTATE_SEND_LOGOUTRSP: 2954 ret = isert_put_logout_rsp(cmd, conn); 2955 if (!ret) { 2956 pr_debug("Returning iSER Logout -EAGAIN\n"); 2957 ret = -EAGAIN; 2958 } 2959 break; 2960 case ISTATE_SEND_NOPIN: 2961 ret = isert_put_nopin(cmd, conn, true); 2962 break; 2963 case ISTATE_SEND_TASKMGTRSP: 2964 ret = isert_put_tm_rsp(cmd, conn); 2965 break; 2966 case ISTATE_SEND_REJECT: 2967 ret = isert_put_reject(cmd, conn); 2968 break; 2969 case ISTATE_SEND_TEXTRSP: 2970 ret = isert_put_text_rsp(cmd, conn); 2971 break; 2972 case ISTATE_SEND_STATUS: 2973 /* 2974 * Special case for sending non GOOD SCSI status from TX thread 2975 * context during pre se_cmd excecution failure. 2976 */ 2977 ret = isert_put_response(conn, cmd); 2978 break; 2979 default: 2980 pr_err("Unknown response state: 0x%02x\n", state); 2981 ret = -EINVAL; 2982 break; 2983 } 2984 2985 return ret; 2986 } 2987 2988 static int 2989 isert_setup_np(struct iscsi_np *np, 2990 struct __kernel_sockaddr_storage *ksockaddr) 2991 { 2992 struct isert_np *isert_np; 2993 struct rdma_cm_id *isert_lid; 2994 struct sockaddr *sa; 2995 int ret; 2996 2997 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2998 if (!isert_np) { 2999 pr_err("Unable to allocate struct isert_np\n"); 3000 return -ENOMEM; 3001 } 3002 init_waitqueue_head(&isert_np->np_accept_wq); 3003 mutex_init(&isert_np->np_accept_mutex); 3004 INIT_LIST_HEAD(&isert_np->np_accept_list); 3005 init_completion(&isert_np->np_login_comp); 3006 3007 sa = (struct sockaddr *)ksockaddr; 3008 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); 3009 /* 3010 * Setup the np->np_sockaddr from the passed sockaddr setup 3011 * in iscsi_target_configfs.c code.. 3012 */ 3013 memcpy(&np->np_sockaddr, ksockaddr, 3014 sizeof(struct __kernel_sockaddr_storage)); 3015 3016 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, 3017 IB_QPT_RC); 3018 if (IS_ERR(isert_lid)) { 3019 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", 3020 PTR_ERR(isert_lid)); 3021 ret = PTR_ERR(isert_lid); 3022 goto out; 3023 } 3024 3025 ret = rdma_bind_addr(isert_lid, sa); 3026 if (ret) { 3027 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); 3028 goto out_lid; 3029 } 3030 3031 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); 3032 if (ret) { 3033 pr_err("rdma_listen() for isert_lid failed: %d\n", ret); 3034 goto out_lid; 3035 } 3036 3037 isert_np->np_cm_id = isert_lid; 3038 np->np_context = isert_np; 3039 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); 3040 3041 return 0; 3042 3043 out_lid: 3044 rdma_destroy_id(isert_lid); 3045 out: 3046 kfree(isert_np); 3047 return ret; 3048 } 3049 3050 static int 3051 isert_check_accept_queue(struct isert_np *isert_np) 3052 { 3053 int empty; 3054 3055 mutex_lock(&isert_np->np_accept_mutex); 3056 empty = list_empty(&isert_np->np_accept_list); 3057 mutex_unlock(&isert_np->np_accept_mutex); 3058 3059 return empty; 3060 } 3061 3062 static int 3063 isert_rdma_accept(struct isert_conn *isert_conn) 3064 { 3065 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 3066 struct rdma_conn_param cp; 3067 int ret; 3068 3069 memset(&cp, 0, sizeof(struct rdma_conn_param)); 3070 cp.responder_resources = isert_conn->responder_resources; 3071 cp.initiator_depth = isert_conn->initiator_depth; 3072 cp.retry_count = 7; 3073 cp.rnr_retry_count = 7; 3074 3075 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); 3076 3077 ret = rdma_accept(cm_id, &cp); 3078 if (ret) { 3079 pr_err("rdma_accept() failed with: %d\n", ret); 3080 return ret; 3081 } 3082 3083 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); 3084 3085 return 0; 3086 } 3087 3088 static int 3089 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 3090 { 3091 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 3092 int ret; 3093 3094 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); 3095 /* 3096 * For login requests after the first PDU, isert_rx_login_req() will 3097 * kick schedule_delayed_work(&conn->login_work) as the packet is 3098 * received, which turns this callback from iscsi_target_do_login_rx() 3099 * into a NOP. 3100 */ 3101 if (!login->first_request) 3102 return 0; 3103 3104 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 3105 if (ret) 3106 return ret; 3107 3108 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); 3109 return 0; 3110 } 3111 3112 static void 3113 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, 3114 struct isert_conn *isert_conn) 3115 { 3116 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 3117 struct rdma_route *cm_route = &cm_id->route; 3118 struct sockaddr_in *sock_in; 3119 struct sockaddr_in6 *sock_in6; 3120 3121 conn->login_family = np->np_sockaddr.ss_family; 3122 3123 if (np->np_sockaddr.ss_family == AF_INET6) { 3124 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr; 3125 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 3126 &sock_in6->sin6_addr.in6_u); 3127 conn->login_port = ntohs(sock_in6->sin6_port); 3128 3129 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr; 3130 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", 3131 &sock_in6->sin6_addr.in6_u); 3132 conn->local_port = ntohs(sock_in6->sin6_port); 3133 } else { 3134 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr; 3135 sprintf(conn->login_ip, "%pI4", 3136 &sock_in->sin_addr.s_addr); 3137 conn->login_port = ntohs(sock_in->sin_port); 3138 3139 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr; 3140 sprintf(conn->local_ip, "%pI4", 3141 &sock_in->sin_addr.s_addr); 3142 conn->local_port = ntohs(sock_in->sin_port); 3143 } 3144 } 3145 3146 static int 3147 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) 3148 { 3149 struct isert_np *isert_np = (struct isert_np *)np->np_context; 3150 struct isert_conn *isert_conn; 3151 int max_accept = 0, ret; 3152 3153 accept_wait: 3154 ret = wait_event_interruptible(isert_np->np_accept_wq, 3155 !isert_check_accept_queue(isert_np) || 3156 np->np_thread_state == ISCSI_NP_THREAD_RESET); 3157 if (max_accept > 5) 3158 return -ENODEV; 3159 3160 spin_lock_bh(&np->np_thread_lock); 3161 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 3162 spin_unlock_bh(&np->np_thread_lock); 3163 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 3164 return -ENODEV; 3165 } 3166 spin_unlock_bh(&np->np_thread_lock); 3167 3168 mutex_lock(&isert_np->np_accept_mutex); 3169 if (list_empty(&isert_np->np_accept_list)) { 3170 mutex_unlock(&isert_np->np_accept_mutex); 3171 max_accept++; 3172 goto accept_wait; 3173 } 3174 isert_conn = list_first_entry(&isert_np->np_accept_list, 3175 struct isert_conn, conn_accept_node); 3176 list_del_init(&isert_conn->conn_accept_node); 3177 mutex_unlock(&isert_np->np_accept_mutex); 3178 3179 conn->context = isert_conn; 3180 isert_conn->conn = conn; 3181 max_accept = 0; 3182 3183 ret = isert_rdma_post_recvl(isert_conn); 3184 if (ret) 3185 return ret; 3186 3187 ret = isert_rdma_accept(isert_conn); 3188 if (ret) 3189 return ret; 3190 3191 isert_set_conn_info(np, conn, isert_conn); 3192 3193 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); 3194 return 0; 3195 } 3196 3197 static void 3198 isert_free_np(struct iscsi_np *np) 3199 { 3200 struct isert_np *isert_np = (struct isert_np *)np->np_context; 3201 3202 rdma_destroy_id(isert_np->np_cm_id); 3203 3204 np->np_context = NULL; 3205 kfree(isert_np); 3206 } 3207 3208 static void isert_wait_conn(struct iscsi_conn *conn) 3209 { 3210 struct isert_conn *isert_conn = conn->context; 3211 3212 pr_debug("isert_wait_conn: Starting \n"); 3213 /* 3214 * Decrement post_send_buf_count for special case when called 3215 * from isert_do_control_comp() -> iscsit_logout_post_handler() 3216 */ 3217 mutex_lock(&isert_conn->conn_mutex); 3218 if (isert_conn->logout_posted) 3219 atomic_dec(&isert_conn->post_send_buf_count); 3220 3221 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { 3222 pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); 3223 rdma_disconnect(isert_conn->conn_cm_id); 3224 } 3225 /* 3226 * Only wait for conn_wait_comp_err if the isert_conn made it 3227 * into full feature phase.. 3228 */ 3229 if (isert_conn->state == ISER_CONN_INIT) { 3230 mutex_unlock(&isert_conn->conn_mutex); 3231 return; 3232 } 3233 if (isert_conn->state == ISER_CONN_UP) 3234 isert_conn->state = ISER_CONN_TERMINATING; 3235 mutex_unlock(&isert_conn->conn_mutex); 3236 3237 wait_for_completion(&isert_conn->conn_wait_comp_err); 3238 3239 wait_for_completion(&isert_conn->conn_wait); 3240 } 3241 3242 static void isert_free_conn(struct iscsi_conn *conn) 3243 { 3244 struct isert_conn *isert_conn = conn->context; 3245 3246 isert_put_conn(isert_conn); 3247 } 3248 3249 static struct iscsit_transport iser_target_transport = { 3250 .name = "IB/iSER", 3251 .transport_type = ISCSI_INFINIBAND, 3252 .priv_size = sizeof(struct isert_cmd), 3253 .owner = THIS_MODULE, 3254 .iscsit_setup_np = isert_setup_np, 3255 .iscsit_accept_np = isert_accept_np, 3256 .iscsit_free_np = isert_free_np, 3257 .iscsit_wait_conn = isert_wait_conn, 3258 .iscsit_free_conn = isert_free_conn, 3259 .iscsit_get_login_rx = isert_get_login_rx, 3260 .iscsit_put_login_tx = isert_put_login_tx, 3261 .iscsit_immediate_queue = isert_immediate_queue, 3262 .iscsit_response_queue = isert_response_queue, 3263 .iscsit_get_dataout = isert_get_dataout, 3264 .iscsit_queue_data_in = isert_put_datain, 3265 .iscsit_queue_status = isert_put_response, 3266 .iscsit_aborted_task = isert_aborted_task, 3267 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 3268 }; 3269 3270 static int __init isert_init(void) 3271 { 3272 int ret; 3273 3274 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); 3275 if (!isert_rx_wq) { 3276 pr_err("Unable to allocate isert_rx_wq\n"); 3277 return -ENOMEM; 3278 } 3279 3280 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); 3281 if (!isert_comp_wq) { 3282 pr_err("Unable to allocate isert_comp_wq\n"); 3283 ret = -ENOMEM; 3284 goto destroy_rx_wq; 3285 } 3286 3287 iscsit_register_transport(&iser_target_transport); 3288 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); 3289 return 0; 3290 3291 destroy_rx_wq: 3292 destroy_workqueue(isert_rx_wq); 3293 return ret; 3294 } 3295 3296 static void __exit isert_exit(void) 3297 { 3298 destroy_workqueue(isert_comp_wq); 3299 destroy_workqueue(isert_rx_wq); 3300 iscsit_unregister_transport(&iser_target_transport); 3301 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); 3302 } 3303 3304 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 3305 MODULE_VERSION("0.1"); 3306 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 3307 MODULE_LICENSE("GPL"); 3308 3309 module_init(isert_init); 3310 module_exit(isert_exit); 3311