1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/kernel.h> 33 #include <linux/slab.h> 34 #include <linux/mm.h> 35 #include <linux/scatterlist.h> 36 #include <linux/kfifo.h> 37 #include <scsi/scsi_cmnd.h> 38 #include <scsi/scsi_host.h> 39 40 #include "iscsi_iser.h" 41 42 /* Constant PDU lengths calculations */ 43 #define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \ 44 sizeof (struct iscsi_hdr)) 45 46 /* iser_dto_add_regd_buff - increments the reference count for * 47 * the registered buffer & adds it to the DTO object */ 48 static void iser_dto_add_regd_buff(struct iser_dto *dto, 49 struct iser_regd_buf *regd_buf, 50 unsigned long use_offset, 51 unsigned long use_size) 52 { 53 int add_idx; 54 55 atomic_inc(®d_buf->ref_count); 56 57 add_idx = dto->regd_vector_len; 58 dto->regd[add_idx] = regd_buf; 59 dto->used_sz[add_idx] = use_size; 60 dto->offset[add_idx] = use_offset; 61 62 dto->regd_vector_len++; 63 } 64 65 /* Register user buffer memory and initialize passive rdma 66 * dto descriptor. Total data size is stored in 67 * iser_task->data[ISER_DIR_IN].data_len 68 */ 69 static int iser_prepare_read_cmd(struct iscsi_task *task, 70 unsigned int edtl) 71 72 { 73 struct iscsi_iser_task *iser_task = task->dd_data; 74 struct iser_regd_buf *regd_buf; 75 int err; 76 struct iser_hdr *hdr = &iser_task->desc.iser_header; 77 struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; 78 79 err = iser_dma_map_task_data(iser_task, 80 buf_in, 81 ISER_DIR_IN, 82 DMA_FROM_DEVICE); 83 if (err) 84 return err; 85 86 if (edtl > iser_task->data[ISER_DIR_IN].data_len) { 87 iser_err("Total data length: %ld, less than EDTL: " 88 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", 89 iser_task->data[ISER_DIR_IN].data_len, edtl, 90 task->itt, iser_task->iser_conn); 91 return -EINVAL; 92 } 93 94 err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN); 95 if (err) { 96 iser_err("Failed to set up Data-IN RDMA\n"); 97 return err; 98 } 99 regd_buf = &iser_task->rdma_regd[ISER_DIR_IN]; 100 101 hdr->flags |= ISER_RSV; 102 hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); 103 hdr->read_va = cpu_to_be64(regd_buf->reg.va); 104 105 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", 106 task->itt, regd_buf->reg.rkey, 107 (unsigned long long)regd_buf->reg.va); 108 109 return 0; 110 } 111 112 /* Register user buffer memory and initialize passive rdma 113 * dto descriptor. Total data size is stored in 114 * task->data[ISER_DIR_OUT].data_len 115 */ 116 static int 117 iser_prepare_write_cmd(struct iscsi_task *task, 118 unsigned int imm_sz, 119 unsigned int unsol_sz, 120 unsigned int edtl) 121 { 122 struct iscsi_iser_task *iser_task = task->dd_data; 123 struct iser_regd_buf *regd_buf; 124 int err; 125 struct iser_dto *send_dto = &iser_task->desc.dto; 126 struct iser_hdr *hdr = &iser_task->desc.iser_header; 127 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; 128 129 err = iser_dma_map_task_data(iser_task, 130 buf_out, 131 ISER_DIR_OUT, 132 DMA_TO_DEVICE); 133 if (err) 134 return err; 135 136 if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { 137 iser_err("Total data length: %ld, less than EDTL: %d, " 138 "in WRITE cmd BHS itt: %d, conn: 0x%p\n", 139 iser_task->data[ISER_DIR_OUT].data_len, 140 edtl, task->itt, task->conn); 141 return -EINVAL; 142 } 143 144 err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT); 145 if (err != 0) { 146 iser_err("Failed to register write cmd RDMA mem\n"); 147 return err; 148 } 149 150 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT]; 151 152 if (unsol_sz < edtl) { 153 hdr->flags |= ISER_WSV; 154 hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey); 155 hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz); 156 157 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " 158 "VA:%#llX + unsol:%d\n", 159 task->itt, regd_buf->reg.rkey, 160 (unsigned long long)regd_buf->reg.va, unsol_sz); 161 } 162 163 if (imm_sz > 0) { 164 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", 165 task->itt, imm_sz); 166 iser_dto_add_regd_buff(send_dto, 167 regd_buf, 168 0, 169 imm_sz); 170 } 171 172 return 0; 173 } 174 175 /** 176 * iser_post_receive_control - allocates, initializes and posts receive DTO. 177 */ 178 static int iser_post_receive_control(struct iscsi_conn *conn) 179 { 180 struct iscsi_iser_conn *iser_conn = conn->dd_data; 181 struct iser_desc *rx_desc; 182 struct iser_regd_buf *regd_hdr; 183 struct iser_regd_buf *regd_data; 184 struct iser_dto *recv_dto = NULL; 185 struct iser_device *device = iser_conn->ib_conn->device; 186 int rx_data_size, err; 187 int posts, outstanding_unexp_pdus; 188 189 /* for the login sequence we must support rx of upto 8K; login is done 190 * after conn create/bind (connect) and conn stop/bind (reconnect), 191 * what's common for both schemes is that the connection is not started 192 */ 193 if (conn->c_stage != ISCSI_CONN_STARTED) 194 rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN; 195 else /* FIXME till user space sets conn->max_recv_dlength correctly */ 196 rx_data_size = 128; 197 198 outstanding_unexp_pdus = 199 atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0); 200 201 /* 202 * in addition to the response buffer, replace those consumed by 203 * unexpected pdus. 204 */ 205 for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) { 206 rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); 207 if (rx_desc == NULL) { 208 iser_err("Failed to alloc desc for post recv %d\n", 209 posts); 210 err = -ENOMEM; 211 goto post_rx_cache_alloc_failure; 212 } 213 rx_desc->type = ISCSI_RX; 214 rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); 215 if (rx_desc->data == NULL) { 216 iser_err("Failed to alloc data buf for post recv %d\n", 217 posts); 218 err = -ENOMEM; 219 goto post_rx_kmalloc_failure; 220 } 221 222 recv_dto = &rx_desc->dto; 223 recv_dto->ib_conn = iser_conn->ib_conn; 224 recv_dto->regd_vector_len = 0; 225 226 regd_hdr = &rx_desc->hdr_regd_buf; 227 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 228 regd_hdr->device = device; 229 regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ 230 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; 231 232 iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); 233 234 iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); 235 236 regd_data = &rx_desc->data_regd_buf; 237 memset(regd_data, 0, sizeof(struct iser_regd_buf)); 238 regd_data->device = device; 239 regd_data->virt_addr = rx_desc->data; 240 regd_data->data_size = rx_data_size; 241 242 iser_reg_single(device, regd_data, DMA_FROM_DEVICE); 243 244 iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); 245 246 err = iser_post_recv(rx_desc); 247 if (err) { 248 iser_err("Failed iser_post_recv for post %d\n", posts); 249 goto post_rx_post_recv_failure; 250 } 251 } 252 /* all posts successful */ 253 return 0; 254 255 post_rx_post_recv_failure: 256 iser_dto_buffs_release(recv_dto); 257 kfree(rx_desc->data); 258 post_rx_kmalloc_failure: 259 kmem_cache_free(ig.desc_cache, rx_desc); 260 post_rx_cache_alloc_failure: 261 if (posts > 0) { 262 /* 263 * response buffer posted, but did not replace all unexpected 264 * pdu recv bufs. Ignore error, retry occurs next send 265 */ 266 outstanding_unexp_pdus -= (posts - 1); 267 err = 0; 268 } 269 atomic_add(outstanding_unexp_pdus, 270 &iser_conn->ib_conn->unexpected_pdu_count); 271 272 return err; 273 } 274 275 /* creates a new tx descriptor and adds header regd buffer */ 276 static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn, 277 struct iser_desc *tx_desc) 278 { 279 struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf; 280 struct iser_dto *send_dto = &tx_desc->dto; 281 282 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 283 regd_hdr->device = iser_conn->ib_conn->device; 284 regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ 285 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; 286 287 send_dto->ib_conn = iser_conn->ib_conn; 288 send_dto->notify_enable = 1; 289 send_dto->regd_vector_len = 0; 290 291 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); 292 tx_desc->iser_header.flags = ISER_VER; 293 294 iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0); 295 } 296 297 /** 298 * iser_conn_set_full_featured_mode - (iSER API) 299 */ 300 int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) 301 { 302 struct iscsi_iser_conn *iser_conn = conn->dd_data; 303 304 int i; 305 /* 306 * FIXME this value should be declared to the target during login with 307 * the MaxOutstandingUnexpectedPDUs key when supported 308 */ 309 int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; 310 311 iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); 312 313 /* Check that there is no posted recv or send buffers left - */ 314 /* they must be consumed during the login phase */ 315 BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0); 316 BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); 317 318 /* Initial post receive buffers */ 319 for (i = 0; i < initial_post_recv_bufs_num; i++) { 320 if (iser_post_receive_control(conn) != 0) { 321 iser_err("Failed to post recv bufs at:%d conn:0x%p\n", 322 i, conn); 323 return -ENOMEM; 324 } 325 } 326 iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn); 327 return 0; 328 } 329 330 static int 331 iser_check_xmit(struct iscsi_conn *conn, void *task) 332 { 333 struct iscsi_iser_conn *iser_conn = conn->dd_data; 334 335 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 336 ISER_QP_MAX_REQ_DTOS) { 337 iser_dbg("%ld can't xmit task %p\n",jiffies,task); 338 return -ENOBUFS; 339 } 340 return 0; 341 } 342 343 344 /** 345 * iser_send_command - send command PDU 346 */ 347 int iser_send_command(struct iscsi_conn *conn, 348 struct iscsi_task *task) 349 { 350 struct iscsi_iser_conn *iser_conn = conn->dd_data; 351 struct iscsi_iser_task *iser_task = task->dd_data; 352 struct iser_dto *send_dto = NULL; 353 unsigned long edtl; 354 int err = 0; 355 struct iser_data_buf *data_buf; 356 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr; 357 struct scsi_cmnd *sc = task->sc; 358 359 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 360 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); 361 return -EPERM; 362 } 363 if (iser_check_xmit(conn, task)) 364 return -ENOBUFS; 365 366 edtl = ntohl(hdr->data_length); 367 368 /* build the tx desc regd header and add it to the tx desc dto */ 369 iser_task->desc.type = ISCSI_TX_SCSI_COMMAND; 370 send_dto = &iser_task->desc.dto; 371 send_dto->task = iser_task; 372 iser_create_send_desc(iser_conn, &iser_task->desc); 373 374 if (hdr->flags & ISCSI_FLAG_CMD_READ) 375 data_buf = &iser_task->data[ISER_DIR_IN]; 376 else 377 data_buf = &iser_task->data[ISER_DIR_OUT]; 378 379 if (scsi_sg_count(sc)) { /* using a scatter list */ 380 data_buf->buf = scsi_sglist(sc); 381 data_buf->size = scsi_sg_count(sc); 382 } 383 384 data_buf->data_len = scsi_bufflen(sc); 385 386 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 387 err = iser_prepare_read_cmd(task, edtl); 388 if (err) 389 goto send_command_error; 390 } 391 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { 392 err = iser_prepare_write_cmd(task, 393 task->imm_count, 394 task->imm_count + 395 task->unsol_r2t.data_length, 396 edtl); 397 if (err) 398 goto send_command_error; 399 } 400 401 iser_reg_single(iser_conn->ib_conn->device, 402 send_dto->regd[0], DMA_TO_DEVICE); 403 404 if (iser_post_receive_control(conn) != 0) { 405 iser_err("post_recv failed!\n"); 406 err = -ENOMEM; 407 goto send_command_error; 408 } 409 410 iser_task->status = ISER_TASK_STATUS_STARTED; 411 412 err = iser_post_send(&iser_task->desc); 413 if (!err) 414 return 0; 415 416 send_command_error: 417 iser_dto_buffs_release(send_dto); 418 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); 419 return err; 420 } 421 422 /** 423 * iser_send_data_out - send data out PDU 424 */ 425 int iser_send_data_out(struct iscsi_conn *conn, 426 struct iscsi_task *task, 427 struct iscsi_data *hdr) 428 { 429 struct iscsi_iser_conn *iser_conn = conn->dd_data; 430 struct iscsi_iser_task *iser_task = task->dd_data; 431 struct iser_desc *tx_desc = NULL; 432 struct iser_dto *send_dto = NULL; 433 unsigned long buf_offset; 434 unsigned long data_seg_len; 435 uint32_t itt; 436 int err = 0; 437 438 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 439 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); 440 return -EPERM; 441 } 442 443 if (iser_check_xmit(conn, task)) 444 return -ENOBUFS; 445 446 itt = (__force uint32_t)hdr->itt; 447 data_seg_len = ntoh24(hdr->dlength); 448 buf_offset = ntohl(hdr->offset); 449 450 iser_dbg("%s itt %d dseg_len %d offset %d\n", 451 __func__,(int)itt,(int)data_seg_len,(int)buf_offset); 452 453 tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); 454 if (tx_desc == NULL) { 455 iser_err("Failed to alloc desc for post dataout\n"); 456 return -ENOMEM; 457 } 458 459 tx_desc->type = ISCSI_TX_DATAOUT; 460 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); 461 462 /* build the tx desc regd header and add it to the tx desc dto */ 463 send_dto = &tx_desc->dto; 464 send_dto->task = iser_task; 465 iser_create_send_desc(iser_conn, tx_desc); 466 467 iser_reg_single(iser_conn->ib_conn->device, 468 send_dto->regd[0], DMA_TO_DEVICE); 469 470 /* all data was registered for RDMA, we can use the lkey */ 471 iser_dto_add_regd_buff(send_dto, 472 &iser_task->rdma_regd[ISER_DIR_OUT], 473 buf_offset, 474 data_seg_len); 475 476 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { 477 iser_err("Offset:%ld & DSL:%ld in Data-Out " 478 "inconsistent with total len:%ld, itt:%d\n", 479 buf_offset, data_seg_len, 480 iser_task->data[ISER_DIR_OUT].data_len, itt); 481 err = -EINVAL; 482 goto send_data_out_error; 483 } 484 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n", 485 itt, buf_offset, data_seg_len); 486 487 488 err = iser_post_send(tx_desc); 489 if (!err) 490 return 0; 491 492 send_data_out_error: 493 iser_dto_buffs_release(send_dto); 494 kmem_cache_free(ig.desc_cache, tx_desc); 495 iser_err("conn %p failed err %d\n",conn, err); 496 return err; 497 } 498 499 int iser_send_control(struct iscsi_conn *conn, 500 struct iscsi_task *task) 501 { 502 struct iscsi_iser_conn *iser_conn = conn->dd_data; 503 struct iscsi_iser_task *iser_task = task->dd_data; 504 struct iser_desc *mdesc = &iser_task->desc; 505 struct iser_dto *send_dto = NULL; 506 unsigned long data_seg_len; 507 int err = 0; 508 struct iser_regd_buf *regd_buf; 509 struct iser_device *device; 510 unsigned char opcode; 511 512 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 513 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); 514 return -EPERM; 515 } 516 517 if (iser_check_xmit(conn, task)) 518 return -ENOBUFS; 519 520 /* build the tx desc regd header and add it to the tx desc dto */ 521 mdesc->type = ISCSI_TX_CONTROL; 522 send_dto = &mdesc->dto; 523 send_dto->task = NULL; 524 iser_create_send_desc(iser_conn, mdesc); 525 526 device = iser_conn->ib_conn->device; 527 528 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); 529 530 data_seg_len = ntoh24(task->hdr->dlength); 531 532 if (data_seg_len > 0) { 533 regd_buf = &mdesc->data_regd_buf; 534 memset(regd_buf, 0, sizeof(struct iser_regd_buf)); 535 regd_buf->device = device; 536 regd_buf->virt_addr = task->data; 537 regd_buf->data_size = task->data_count; 538 iser_reg_single(device, regd_buf, 539 DMA_TO_DEVICE); 540 iser_dto_add_regd_buff(send_dto, regd_buf, 541 0, 542 data_seg_len); 543 } 544 545 opcode = task->hdr->opcode & ISCSI_OPCODE_MASK; 546 547 /* post recv buffer for response if one is expected */ 548 if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) { 549 if (iser_post_receive_control(conn) != 0) { 550 iser_err("post_rcv_buff failed!\n"); 551 err = -ENOMEM; 552 goto send_control_error; 553 } 554 } 555 556 err = iser_post_send(mdesc); 557 if (!err) 558 return 0; 559 560 send_control_error: 561 iser_dto_buffs_release(send_dto); 562 iser_err("conn %p failed err %d\n",conn, err); 563 return err; 564 } 565 566 /** 567 * iser_rcv_dto_completion - recv DTO completion 568 */ 569 void iser_rcv_completion(struct iser_desc *rx_desc, 570 unsigned long dto_xfer_len) 571 { 572 struct iser_dto *dto = &rx_desc->dto; 573 struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; 574 struct iscsi_task *task; 575 struct iscsi_iser_task *iser_task; 576 struct iscsi_hdr *hdr; 577 char *rx_data = NULL; 578 int rx_data_len = 0; 579 unsigned char opcode; 580 581 hdr = &rx_desc->iscsi_header; 582 583 iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt); 584 585 if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */ 586 rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN; 587 rx_data = dto->regd[1]->virt_addr; 588 rx_data += dto->offset[1]; 589 } 590 591 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 592 593 if (opcode == ISCSI_OP_SCSI_CMD_RSP) { 594 spin_lock(&conn->iscsi_conn->session->lock); 595 task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt); 596 if (task) 597 __iscsi_get_task(task); 598 spin_unlock(&conn->iscsi_conn->session->lock); 599 600 if (!task) 601 iser_err("itt can't be matched to task!!! " 602 "conn %p opcode %d itt %d\n", 603 conn->iscsi_conn, opcode, hdr->itt); 604 else { 605 iser_task = task->dd_data; 606 iser_dbg("itt %d task %p\n",hdr->itt, task); 607 iser_task->status = ISER_TASK_STATUS_COMPLETED; 608 iser_task_rdma_finalize(iser_task); 609 iscsi_put_task(task); 610 } 611 } 612 iser_dto_buffs_release(dto); 613 614 iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); 615 616 kfree(rx_desc->data); 617 kmem_cache_free(ig.desc_cache, rx_desc); 618 619 /* decrementing conn->post_recv_buf_count only --after-- freeing the * 620 * task eliminates the need to worry on tasks which are completed in * 621 * parallel to the execution of iser_conn_term. So the code that waits * 622 * for the posted rx bufs refcount to become zero handles everything */ 623 atomic_dec(&conn->ib_conn->post_recv_buf_count); 624 625 /* 626 * if an unexpected PDU was received then the recv wr consumed must 627 * be replaced, this is done in the next send of a control-type PDU 628 */ 629 if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { 630 /* nop-in with itt = 0xffffffff */ 631 atomic_inc(&conn->ib_conn->unexpected_pdu_count); 632 } 633 else if (opcode == ISCSI_OP_ASYNC_EVENT) { 634 /* asyncronous message */ 635 atomic_inc(&conn->ib_conn->unexpected_pdu_count); 636 } 637 /* a reject PDU consumes the recv buf posted for the response */ 638 } 639 640 void iser_snd_completion(struct iser_desc *tx_desc) 641 { 642 struct iser_dto *dto = &tx_desc->dto; 643 struct iser_conn *ib_conn = dto->ib_conn; 644 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; 645 struct iscsi_conn *conn = iser_conn->iscsi_conn; 646 struct iscsi_task *task; 647 int resume_tx = 0; 648 649 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 650 651 iser_dto_buffs_release(dto); 652 653 if (tx_desc->type == ISCSI_TX_DATAOUT) 654 kmem_cache_free(ig.desc_cache, tx_desc); 655 656 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 657 ISER_QP_MAX_REQ_DTOS) 658 resume_tx = 1; 659 660 atomic_dec(&ib_conn->post_send_buf_count); 661 662 if (resume_tx) { 663 iser_dbg("%ld resuming tx\n",jiffies); 664 iscsi_conn_queue_work(conn); 665 } 666 667 if (tx_desc->type == ISCSI_TX_CONTROL) { 668 /* this arithmetic is legal by libiscsi dd_data allocation */ 669 task = (void *) ((long)(void *)tx_desc - 670 sizeof(struct iscsi_task)); 671 if (task->hdr->itt == RESERVED_ITT) 672 iscsi_put_task(task); 673 } 674 } 675 676 void iser_task_rdma_init(struct iscsi_iser_task *iser_task) 677 678 { 679 iser_task->status = ISER_TASK_STATUS_INIT; 680 681 iser_task->dir[ISER_DIR_IN] = 0; 682 iser_task->dir[ISER_DIR_OUT] = 0; 683 684 iser_task->data[ISER_DIR_IN].data_len = 0; 685 iser_task->data[ISER_DIR_OUT].data_len = 0; 686 687 memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, 688 sizeof(struct iser_regd_buf)); 689 memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, 690 sizeof(struct iser_regd_buf)); 691 } 692 693 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) 694 { 695 int deferred; 696 int is_rdma_aligned = 1; 697 struct iser_regd_buf *regd; 698 699 /* if we were reading, copy back to unaligned sglist, 700 * anyway dma_unmap and free the copy 701 */ 702 if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { 703 is_rdma_aligned = 0; 704 iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN); 705 } 706 if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { 707 is_rdma_aligned = 0; 708 iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); 709 } 710 711 if (iser_task->dir[ISER_DIR_IN]) { 712 regd = &iser_task->rdma_regd[ISER_DIR_IN]; 713 deferred = iser_regd_buff_release(regd); 714 if (deferred) { 715 iser_err("%d references remain for BUF-IN rdma reg\n", 716 atomic_read(®d->ref_count)); 717 } 718 } 719 720 if (iser_task->dir[ISER_DIR_OUT]) { 721 regd = &iser_task->rdma_regd[ISER_DIR_OUT]; 722 deferred = iser_regd_buff_release(regd); 723 if (deferred) { 724 iser_err("%d references remain for BUF-OUT rdma reg\n", 725 atomic_read(®d->ref_count)); 726 } 727 } 728 729 /* if the data was unaligned, it was already unmapped and then copied */ 730 if (is_rdma_aligned) 731 iser_dma_unmap_task_data(iser_task); 732 } 733 734 void iser_dto_buffs_release(struct iser_dto *dto) 735 { 736 int i; 737 738 for (i = 0; i < dto->regd_vector_len; i++) 739 iser_regd_buff_release(dto->regd[i]); 740 } 741 742