1 /******************************************************************************* 2 * This file contains the iSCSI Target specific utility functions. 3 * 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 ******************************************************************************/ 20 21 #include <linux/list.h> 22 #include <scsi/scsi_tcq.h> 23 #include <scsi/iscsi_proto.h> 24 #include <target/target_core_base.h> 25 #include <target/target_core_fabric.h> 26 #include <target/target_core_configfs.h> 27 #include <target/iscsi/iscsi_transport.h> 28 29 #include "iscsi_target_core.h" 30 #include "iscsi_target_parameters.h" 31 #include "iscsi_target_seq_pdu_list.h" 32 #include "iscsi_target_datain_values.h" 33 #include "iscsi_target_erl0.h" 34 #include "iscsi_target_erl1.h" 35 #include "iscsi_target_erl2.h" 36 #include "iscsi_target_tpg.h" 37 #include "iscsi_target_tq.h" 38 #include "iscsi_target_util.h" 39 #include "iscsi_target.h" 40 41 #define PRINT_BUFF(buff, len) \ 42 { \ 43 int zzz; \ 44 \ 45 pr_debug("%d:\n", __LINE__); \ 46 for (zzz = 0; zzz < len; zzz++) { \ 47 if (zzz % 16 == 0) { \ 48 if (zzz) \ 49 pr_debug("\n"); \ 50 pr_debug("%4i: ", zzz); \ 51 } \ 52 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ 53 } \ 54 if ((len + 1) % 16) \ 55 pr_debug("\n"); \ 56 } 57 58 extern struct list_head g_tiqn_list; 59 extern spinlock_t tiqn_lock; 60 61 /* 62 * Called with cmd->r2t_lock held. 63 */ 64 int iscsit_add_r2t_to_list( 65 struct iscsi_cmd *cmd, 66 u32 offset, 67 u32 xfer_len, 68 int recovery, 69 u32 r2t_sn) 70 { 71 struct iscsi_r2t *r2t; 72 73 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); 74 if (!r2t) { 75 pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); 76 return -1; 77 } 78 INIT_LIST_HEAD(&r2t->r2t_list); 79 80 r2t->recovery_r2t = recovery; 81 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; 82 r2t->offset = offset; 83 r2t->xfer_len = xfer_len; 84 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); 85 spin_unlock_bh(&cmd->r2t_lock); 86 87 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); 88 89 spin_lock_bh(&cmd->r2t_lock); 90 return 0; 91 } 92 93 struct iscsi_r2t *iscsit_get_r2t_for_eos( 94 struct iscsi_cmd *cmd, 95 u32 offset, 96 u32 length) 97 { 98 struct iscsi_r2t *r2t; 99 100 spin_lock_bh(&cmd->r2t_lock); 101 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 102 if ((r2t->offset <= offset) && 103 (r2t->offset + r2t->xfer_len) >= (offset + length)) { 104 spin_unlock_bh(&cmd->r2t_lock); 105 return r2t; 106 } 107 } 108 spin_unlock_bh(&cmd->r2t_lock); 109 110 pr_err("Unable to locate R2T for Offset: %u, Length:" 111 " %u\n", offset, length); 112 return NULL; 113 } 114 115 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd) 116 { 117 struct iscsi_r2t *r2t; 118 119 spin_lock_bh(&cmd->r2t_lock); 120 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 121 if (!r2t->sent_r2t) { 122 spin_unlock_bh(&cmd->r2t_lock); 123 return r2t; 124 } 125 } 126 spin_unlock_bh(&cmd->r2t_lock); 127 128 pr_err("Unable to locate next R2T to send for ITT:" 129 " 0x%08x.\n", cmd->init_task_tag); 130 return NULL; 131 } 132 133 /* 134 * Called with cmd->r2t_lock held. 135 */ 136 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd) 137 { 138 list_del(&r2t->r2t_list); 139 kmem_cache_free(lio_r2t_cache, r2t); 140 } 141 142 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) 143 { 144 struct iscsi_r2t *r2t, *r2t_tmp; 145 146 spin_lock_bh(&cmd->r2t_lock); 147 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) 148 iscsit_free_r2t(r2t, cmd); 149 spin_unlock_bh(&cmd->r2t_lock); 150 } 151 152 struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 153 { 154 struct iscsi_cmd *cmd; 155 156 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); 157 if (!cmd) 158 return NULL; 159 160 cmd->release_cmd = &iscsit_release_cmd; 161 return cmd; 162 } 163 164 /* 165 * May be called from software interrupt (timer) context for allocating 166 * iSCSI NopINs. 167 */ 168 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 169 { 170 struct iscsi_cmd *cmd; 171 172 cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask); 173 if (!cmd) { 174 pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); 175 return NULL; 176 } 177 cmd->conn = conn; 178 INIT_LIST_HEAD(&cmd->i_conn_node); 179 INIT_LIST_HEAD(&cmd->datain_list); 180 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 181 spin_lock_init(&cmd->datain_lock); 182 spin_lock_init(&cmd->dataout_timeout_lock); 183 spin_lock_init(&cmd->istate_lock); 184 spin_lock_init(&cmd->error_lock); 185 spin_lock_init(&cmd->r2t_lock); 186 187 return cmd; 188 } 189 EXPORT_SYMBOL(iscsit_allocate_cmd); 190 191 struct iscsi_seq *iscsit_get_seq_holder_for_datain( 192 struct iscsi_cmd *cmd, 193 u32 seq_send_order) 194 { 195 u32 i; 196 197 for (i = 0; i < cmd->seq_count; i++) 198 if (cmd->seq_list[i].seq_send_order == seq_send_order) 199 return &cmd->seq_list[i]; 200 201 return NULL; 202 } 203 204 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd) 205 { 206 u32 i; 207 208 if (!cmd->seq_list) { 209 pr_err("struct iscsi_cmd->seq_list is NULL!\n"); 210 return NULL; 211 } 212 213 for (i = 0; i < cmd->seq_count; i++) { 214 if (cmd->seq_list[i].type != SEQTYPE_NORMAL) 215 continue; 216 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { 217 cmd->seq_send_order++; 218 return &cmd->seq_list[i]; 219 } 220 } 221 222 return NULL; 223 } 224 225 struct iscsi_r2t *iscsit_get_holder_for_r2tsn( 226 struct iscsi_cmd *cmd, 227 u32 r2t_sn) 228 { 229 struct iscsi_r2t *r2t; 230 231 spin_lock_bh(&cmd->r2t_lock); 232 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 233 if (r2t->r2t_sn == r2t_sn) { 234 spin_unlock_bh(&cmd->r2t_lock); 235 return r2t; 236 } 237 } 238 spin_unlock_bh(&cmd->r2t_lock); 239 240 return NULL; 241 } 242 243 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) 244 { 245 int ret; 246 247 /* 248 * This is the proper method of checking received CmdSN against 249 * ExpCmdSN and MaxCmdSN values, as well as accounting for out 250 * or order CmdSNs due to multiple connection sessions and/or 251 * CRC failures. 252 */ 253 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { 254 pr_err("Received CmdSN: 0x%08x is greater than" 255 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, 256 sess->max_cmd_sn); 257 ret = CMDSN_ERROR_CANNOT_RECOVER; 258 259 } else if (cmdsn == sess->exp_cmd_sn) { 260 sess->exp_cmd_sn++; 261 pr_debug("Received CmdSN matches ExpCmdSN," 262 " incremented ExpCmdSN to: 0x%08x\n", 263 sess->exp_cmd_sn); 264 ret = CMDSN_NORMAL_OPERATION; 265 266 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { 267 pr_debug("Received CmdSN: 0x%08x is greater" 268 " than ExpCmdSN: 0x%08x, not acknowledging.\n", 269 cmdsn, sess->exp_cmd_sn); 270 ret = CMDSN_HIGHER_THAN_EXP; 271 272 } else { 273 pr_err("Received CmdSN: 0x%08x is less than" 274 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, 275 sess->exp_cmd_sn); 276 ret = CMDSN_LOWER_THAN_EXP; 277 } 278 279 return ret; 280 } 281 282 /* 283 * Commands may be received out of order if MC/S is in use. 284 * Ensure they are executed in CmdSN order. 285 */ 286 int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 287 unsigned char *buf, __be32 cmdsn) 288 { 289 int ret, cmdsn_ret; 290 bool reject = false; 291 u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES; 292 293 mutex_lock(&conn->sess->cmdsn_mutex); 294 295 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn)); 296 switch (cmdsn_ret) { 297 case CMDSN_NORMAL_OPERATION: 298 ret = iscsit_execute_cmd(cmd, 0); 299 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) 300 iscsit_execute_ooo_cmdsns(conn->sess); 301 else if (ret < 0) { 302 reject = true; 303 ret = CMDSN_ERROR_CANNOT_RECOVER; 304 } 305 break; 306 case CMDSN_HIGHER_THAN_EXP: 307 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn)); 308 if (ret < 0) { 309 reject = true; 310 ret = CMDSN_ERROR_CANNOT_RECOVER; 311 break; 312 } 313 ret = CMDSN_HIGHER_THAN_EXP; 314 break; 315 case CMDSN_LOWER_THAN_EXP: 316 cmd->i_state = ISTATE_REMOVE; 317 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 318 ret = cmdsn_ret; 319 break; 320 default: 321 reason = ISCSI_REASON_PROTOCOL_ERROR; 322 reject = true; 323 ret = cmdsn_ret; 324 break; 325 } 326 mutex_unlock(&conn->sess->cmdsn_mutex); 327 328 if (reject) 329 iscsit_reject_cmd(cmd, reason, buf); 330 331 return ret; 332 } 333 EXPORT_SYMBOL(iscsit_sequence_cmd); 334 335 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 336 { 337 struct iscsi_conn *conn = cmd->conn; 338 struct se_cmd *se_cmd = &cmd->se_cmd; 339 struct iscsi_data *hdr = (struct iscsi_data *) buf; 340 u32 payload_length = ntoh24(hdr->dlength); 341 342 if (conn->sess->sess_ops->InitialR2T) { 343 pr_err("Received unexpected unsolicited data" 344 " while InitialR2T=Yes, protocol error.\n"); 345 transport_send_check_condition_and_sense(se_cmd, 346 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 347 return -1; 348 } 349 350 if ((cmd->first_burst_len + payload_length) > 351 conn->sess->sess_ops->FirstBurstLength) { 352 pr_err("Total %u bytes exceeds FirstBurstLength: %u" 353 " for this Unsolicited DataOut Burst.\n", 354 (cmd->first_burst_len + payload_length), 355 conn->sess->sess_ops->FirstBurstLength); 356 transport_send_check_condition_and_sense(se_cmd, 357 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 358 return -1; 359 } 360 361 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) 362 return 0; 363 364 if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) && 365 ((cmd->first_burst_len + payload_length) != 366 conn->sess->sess_ops->FirstBurstLength)) { 367 pr_err("Unsolicited non-immediate data received %u" 368 " does not equal FirstBurstLength: %u, and does" 369 " not equal ExpXferLen %u.\n", 370 (cmd->first_burst_len + payload_length), 371 conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); 372 transport_send_check_condition_and_sense(se_cmd, 373 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 374 return -1; 375 } 376 return 0; 377 } 378 379 struct iscsi_cmd *iscsit_find_cmd_from_itt( 380 struct iscsi_conn *conn, 381 itt_t init_task_tag) 382 { 383 struct iscsi_cmd *cmd; 384 385 spin_lock_bh(&conn->cmd_lock); 386 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 387 if (cmd->init_task_tag == init_task_tag) { 388 spin_unlock_bh(&conn->cmd_lock); 389 return cmd; 390 } 391 } 392 spin_unlock_bh(&conn->cmd_lock); 393 394 pr_err("Unable to locate ITT: 0x%08x on CID: %hu", 395 init_task_tag, conn->cid); 396 return NULL; 397 } 398 399 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 400 struct iscsi_conn *conn, 401 itt_t init_task_tag, 402 u32 length) 403 { 404 struct iscsi_cmd *cmd; 405 406 spin_lock_bh(&conn->cmd_lock); 407 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 408 if (cmd->init_task_tag == init_task_tag) { 409 spin_unlock_bh(&conn->cmd_lock); 410 return cmd; 411 } 412 } 413 spin_unlock_bh(&conn->cmd_lock); 414 415 pr_err("Unable to locate ITT: 0x%08x on CID: %hu," 416 " dumping payload\n", init_task_tag, conn->cid); 417 if (length) 418 iscsit_dump_data_payload(conn, length, 1); 419 420 return NULL; 421 } 422 423 struct iscsi_cmd *iscsit_find_cmd_from_ttt( 424 struct iscsi_conn *conn, 425 u32 targ_xfer_tag) 426 { 427 struct iscsi_cmd *cmd = NULL; 428 429 spin_lock_bh(&conn->cmd_lock); 430 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 431 if (cmd->targ_xfer_tag == targ_xfer_tag) { 432 spin_unlock_bh(&conn->cmd_lock); 433 return cmd; 434 } 435 } 436 spin_unlock_bh(&conn->cmd_lock); 437 438 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", 439 targ_xfer_tag, conn->cid); 440 return NULL; 441 } 442 443 int iscsit_find_cmd_for_recovery( 444 struct iscsi_session *sess, 445 struct iscsi_cmd **cmd_ptr, 446 struct iscsi_conn_recovery **cr_ptr, 447 itt_t init_task_tag) 448 { 449 struct iscsi_cmd *cmd = NULL; 450 struct iscsi_conn_recovery *cr; 451 /* 452 * Scan through the inactive connection recovery list's command list. 453 * If init_task_tag matches the command is still alligent. 454 */ 455 spin_lock(&sess->cr_i_lock); 456 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 457 spin_lock(&cr->conn_recovery_cmd_lock); 458 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 459 if (cmd->init_task_tag == init_task_tag) { 460 spin_unlock(&cr->conn_recovery_cmd_lock); 461 spin_unlock(&sess->cr_i_lock); 462 463 *cr_ptr = cr; 464 *cmd_ptr = cmd; 465 return -2; 466 } 467 } 468 spin_unlock(&cr->conn_recovery_cmd_lock); 469 } 470 spin_unlock(&sess->cr_i_lock); 471 /* 472 * Scan through the active connection recovery list's command list. 473 * If init_task_tag matches the command is ready to be reassigned. 474 */ 475 spin_lock(&sess->cr_a_lock); 476 list_for_each_entry(cr, &sess->cr_active_list, cr_list) { 477 spin_lock(&cr->conn_recovery_cmd_lock); 478 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 479 if (cmd->init_task_tag == init_task_tag) { 480 spin_unlock(&cr->conn_recovery_cmd_lock); 481 spin_unlock(&sess->cr_a_lock); 482 483 *cr_ptr = cr; 484 *cmd_ptr = cmd; 485 return 0; 486 } 487 } 488 spin_unlock(&cr->conn_recovery_cmd_lock); 489 } 490 spin_unlock(&sess->cr_a_lock); 491 492 return -1; 493 } 494 495 void iscsit_add_cmd_to_immediate_queue( 496 struct iscsi_cmd *cmd, 497 struct iscsi_conn *conn, 498 u8 state) 499 { 500 struct iscsi_queue_req *qr; 501 502 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 503 if (!qr) { 504 pr_err("Unable to allocate memory for" 505 " struct iscsi_queue_req\n"); 506 return; 507 } 508 INIT_LIST_HEAD(&qr->qr_list); 509 qr->cmd = cmd; 510 qr->state = state; 511 512 spin_lock_bh(&conn->immed_queue_lock); 513 list_add_tail(&qr->qr_list, &conn->immed_queue_list); 514 atomic_inc(&cmd->immed_queue_count); 515 atomic_set(&conn->check_immediate_queue, 1); 516 spin_unlock_bh(&conn->immed_queue_lock); 517 518 wake_up(&conn->queues_wq); 519 } 520 521 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 522 { 523 struct iscsi_queue_req *qr; 524 525 spin_lock_bh(&conn->immed_queue_lock); 526 if (list_empty(&conn->immed_queue_list)) { 527 spin_unlock_bh(&conn->immed_queue_lock); 528 return NULL; 529 } 530 qr = list_first_entry(&conn->immed_queue_list, 531 struct iscsi_queue_req, qr_list); 532 533 list_del(&qr->qr_list); 534 if (qr->cmd) 535 atomic_dec(&qr->cmd->immed_queue_count); 536 spin_unlock_bh(&conn->immed_queue_lock); 537 538 return qr; 539 } 540 541 static void iscsit_remove_cmd_from_immediate_queue( 542 struct iscsi_cmd *cmd, 543 struct iscsi_conn *conn) 544 { 545 struct iscsi_queue_req *qr, *qr_tmp; 546 547 spin_lock_bh(&conn->immed_queue_lock); 548 if (!atomic_read(&cmd->immed_queue_count)) { 549 spin_unlock_bh(&conn->immed_queue_lock); 550 return; 551 } 552 553 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 554 if (qr->cmd != cmd) 555 continue; 556 557 atomic_dec(&qr->cmd->immed_queue_count); 558 list_del(&qr->qr_list); 559 kmem_cache_free(lio_qr_cache, qr); 560 } 561 spin_unlock_bh(&conn->immed_queue_lock); 562 563 if (atomic_read(&cmd->immed_queue_count)) { 564 pr_err("ITT: 0x%08x immed_queue_count: %d\n", 565 cmd->init_task_tag, 566 atomic_read(&cmd->immed_queue_count)); 567 } 568 } 569 570 void iscsit_add_cmd_to_response_queue( 571 struct iscsi_cmd *cmd, 572 struct iscsi_conn *conn, 573 u8 state) 574 { 575 struct iscsi_queue_req *qr; 576 577 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 578 if (!qr) { 579 pr_err("Unable to allocate memory for" 580 " struct iscsi_queue_req\n"); 581 return; 582 } 583 INIT_LIST_HEAD(&qr->qr_list); 584 qr->cmd = cmd; 585 qr->state = state; 586 587 spin_lock_bh(&conn->response_queue_lock); 588 list_add_tail(&qr->qr_list, &conn->response_queue_list); 589 atomic_inc(&cmd->response_queue_count); 590 spin_unlock_bh(&conn->response_queue_lock); 591 592 wake_up(&conn->queues_wq); 593 } 594 595 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 596 { 597 struct iscsi_queue_req *qr; 598 599 spin_lock_bh(&conn->response_queue_lock); 600 if (list_empty(&conn->response_queue_list)) { 601 spin_unlock_bh(&conn->response_queue_lock); 602 return NULL; 603 } 604 605 qr = list_first_entry(&conn->response_queue_list, 606 struct iscsi_queue_req, qr_list); 607 608 list_del(&qr->qr_list); 609 if (qr->cmd) 610 atomic_dec(&qr->cmd->response_queue_count); 611 spin_unlock_bh(&conn->response_queue_lock); 612 613 return qr; 614 } 615 616 static void iscsit_remove_cmd_from_response_queue( 617 struct iscsi_cmd *cmd, 618 struct iscsi_conn *conn) 619 { 620 struct iscsi_queue_req *qr, *qr_tmp; 621 622 spin_lock_bh(&conn->response_queue_lock); 623 if (!atomic_read(&cmd->response_queue_count)) { 624 spin_unlock_bh(&conn->response_queue_lock); 625 return; 626 } 627 628 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 629 qr_list) { 630 if (qr->cmd != cmd) 631 continue; 632 633 atomic_dec(&qr->cmd->response_queue_count); 634 list_del(&qr->qr_list); 635 kmem_cache_free(lio_qr_cache, qr); 636 } 637 spin_unlock_bh(&conn->response_queue_lock); 638 639 if (atomic_read(&cmd->response_queue_count)) { 640 pr_err("ITT: 0x%08x response_queue_count: %d\n", 641 cmd->init_task_tag, 642 atomic_read(&cmd->response_queue_count)); 643 } 644 } 645 646 bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn) 647 { 648 bool empty; 649 650 spin_lock_bh(&conn->immed_queue_lock); 651 empty = list_empty(&conn->immed_queue_list); 652 spin_unlock_bh(&conn->immed_queue_lock); 653 654 if (!empty) 655 return empty; 656 657 spin_lock_bh(&conn->response_queue_lock); 658 empty = list_empty(&conn->response_queue_list); 659 spin_unlock_bh(&conn->response_queue_lock); 660 661 return empty; 662 } 663 664 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) 665 { 666 struct iscsi_queue_req *qr, *qr_tmp; 667 668 spin_lock_bh(&conn->immed_queue_lock); 669 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 670 list_del(&qr->qr_list); 671 if (qr->cmd) 672 atomic_dec(&qr->cmd->immed_queue_count); 673 674 kmem_cache_free(lio_qr_cache, qr); 675 } 676 spin_unlock_bh(&conn->immed_queue_lock); 677 678 spin_lock_bh(&conn->response_queue_lock); 679 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 680 qr_list) { 681 list_del(&qr->qr_list); 682 if (qr->cmd) 683 atomic_dec(&qr->cmd->response_queue_count); 684 685 kmem_cache_free(lio_qr_cache, qr); 686 } 687 spin_unlock_bh(&conn->response_queue_lock); 688 } 689 690 void iscsit_release_cmd(struct iscsi_cmd *cmd) 691 { 692 kfree(cmd->buf_ptr); 693 kfree(cmd->pdu_list); 694 kfree(cmd->seq_list); 695 kfree(cmd->tmr_req); 696 kfree(cmd->iov_data); 697 kfree(cmd->text_in_ptr); 698 699 kmem_cache_free(lio_cmd_cache, cmd); 700 } 701 702 static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 703 bool check_queues) 704 { 705 struct iscsi_conn *conn = cmd->conn; 706 707 if (scsi_cmd) { 708 if (cmd->data_direction == DMA_TO_DEVICE) { 709 iscsit_stop_dataout_timer(cmd); 710 iscsit_free_r2ts_from_list(cmd); 711 } 712 if (cmd->data_direction == DMA_FROM_DEVICE) 713 iscsit_free_all_datain_reqs(cmd); 714 } 715 716 if (conn && check_queues) { 717 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 718 iscsit_remove_cmd_from_response_queue(cmd, conn); 719 } 720 } 721 722 void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) 723 { 724 struct se_cmd *se_cmd = NULL; 725 int rc; 726 /* 727 * Determine if a struct se_cmd is associated with 728 * this struct iscsi_cmd. 729 */ 730 switch (cmd->iscsi_opcode) { 731 case ISCSI_OP_SCSI_CMD: 732 se_cmd = &cmd->se_cmd; 733 __iscsit_free_cmd(cmd, true, shutdown); 734 /* 735 * Fallthrough 736 */ 737 case ISCSI_OP_SCSI_TMFUNC: 738 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 739 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 740 __iscsit_free_cmd(cmd, true, shutdown); 741 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 742 } 743 break; 744 case ISCSI_OP_REJECT: 745 /* 746 * Handle special case for REJECT when iscsi_add_reject*() has 747 * overwritten the original iscsi_opcode assignment, and the 748 * associated cmd->se_cmd needs to be released. 749 */ 750 if (cmd->se_cmd.se_tfo != NULL) { 751 se_cmd = &cmd->se_cmd; 752 __iscsit_free_cmd(cmd, true, shutdown); 753 754 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 755 if (!rc && shutdown && se_cmd->se_sess) { 756 __iscsit_free_cmd(cmd, true, shutdown); 757 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 758 } 759 break; 760 } 761 /* Fall-through */ 762 default: 763 __iscsit_free_cmd(cmd, false, shutdown); 764 cmd->release_cmd(cmd); 765 break; 766 } 767 } 768 769 int iscsit_check_session_usage_count(struct iscsi_session *sess) 770 { 771 spin_lock_bh(&sess->session_usage_lock); 772 if (sess->session_usage_count != 0) { 773 sess->session_waiting_on_uc = 1; 774 spin_unlock_bh(&sess->session_usage_lock); 775 if (in_interrupt()) 776 return 2; 777 778 wait_for_completion(&sess->session_waiting_on_uc_comp); 779 return 1; 780 } 781 spin_unlock_bh(&sess->session_usage_lock); 782 783 return 0; 784 } 785 786 void iscsit_dec_session_usage_count(struct iscsi_session *sess) 787 { 788 spin_lock_bh(&sess->session_usage_lock); 789 sess->session_usage_count--; 790 791 if (!sess->session_usage_count && sess->session_waiting_on_uc) 792 complete(&sess->session_waiting_on_uc_comp); 793 794 spin_unlock_bh(&sess->session_usage_lock); 795 } 796 797 void iscsit_inc_session_usage_count(struct iscsi_session *sess) 798 { 799 spin_lock_bh(&sess->session_usage_lock); 800 sess->session_usage_count++; 801 spin_unlock_bh(&sess->session_usage_lock); 802 } 803 804 /* 805 * Setup conn->if_marker and conn->of_marker values based upon 806 * the initial marker-less interval. (see iSCSI v19 A.2) 807 */ 808 int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn) 809 { 810 int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0; 811 /* 812 * IFMarkInt and OFMarkInt are negotiated as 32-bit words. 813 */ 814 u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4); 815 u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4); 816 817 if (conn->conn_ops->OFMarker) { 818 /* 819 * Account for the first Login Command received not 820 * via iscsi_recv_msg(). 821 */ 822 conn->of_marker += ISCSI_HDR_LEN; 823 if (conn->of_marker <= OFMarkInt) { 824 conn->of_marker = (OFMarkInt - conn->of_marker); 825 } else { 826 login_ofmarker_count = (conn->of_marker / OFMarkInt); 827 next_marker = (OFMarkInt * (login_ofmarker_count + 1)) + 828 (login_ofmarker_count * MARKER_SIZE); 829 conn->of_marker = (next_marker - conn->of_marker); 830 } 831 conn->of_marker_offset = 0; 832 pr_debug("Setting OFMarker value to %u based on Initial" 833 " Markerless Interval.\n", conn->of_marker); 834 } 835 836 if (conn->conn_ops->IFMarker) { 837 if (conn->if_marker <= IFMarkInt) { 838 conn->if_marker = (IFMarkInt - conn->if_marker); 839 } else { 840 login_ifmarker_count = (conn->if_marker / IFMarkInt); 841 next_marker = (IFMarkInt * (login_ifmarker_count + 1)) + 842 (login_ifmarker_count * MARKER_SIZE); 843 conn->if_marker = (next_marker - conn->if_marker); 844 } 845 pr_debug("Setting IFMarker value to %u based on Initial" 846 " Markerless Interval.\n", conn->if_marker); 847 } 848 849 return 0; 850 } 851 852 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) 853 { 854 struct iscsi_conn *conn; 855 856 spin_lock_bh(&sess->conn_lock); 857 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 858 if ((conn->cid == cid) && 859 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { 860 iscsit_inc_conn_usage_count(conn); 861 spin_unlock_bh(&sess->conn_lock); 862 return conn; 863 } 864 } 865 spin_unlock_bh(&sess->conn_lock); 866 867 return NULL; 868 } 869 870 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid) 871 { 872 struct iscsi_conn *conn; 873 874 spin_lock_bh(&sess->conn_lock); 875 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 876 if (conn->cid == cid) { 877 iscsit_inc_conn_usage_count(conn); 878 spin_lock(&conn->state_lock); 879 atomic_set(&conn->connection_wait_rcfr, 1); 880 spin_unlock(&conn->state_lock); 881 spin_unlock_bh(&sess->conn_lock); 882 return conn; 883 } 884 } 885 spin_unlock_bh(&sess->conn_lock); 886 887 return NULL; 888 } 889 890 void iscsit_check_conn_usage_count(struct iscsi_conn *conn) 891 { 892 spin_lock_bh(&conn->conn_usage_lock); 893 if (conn->conn_usage_count != 0) { 894 conn->conn_waiting_on_uc = 1; 895 spin_unlock_bh(&conn->conn_usage_lock); 896 897 wait_for_completion(&conn->conn_waiting_on_uc_comp); 898 return; 899 } 900 spin_unlock_bh(&conn->conn_usage_lock); 901 } 902 903 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn) 904 { 905 spin_lock_bh(&conn->conn_usage_lock); 906 conn->conn_usage_count--; 907 908 if (!conn->conn_usage_count && conn->conn_waiting_on_uc) 909 complete(&conn->conn_waiting_on_uc_comp); 910 911 spin_unlock_bh(&conn->conn_usage_lock); 912 } 913 914 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn) 915 { 916 spin_lock_bh(&conn->conn_usage_lock); 917 conn->conn_usage_count++; 918 spin_unlock_bh(&conn->conn_usage_lock); 919 } 920 921 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) 922 { 923 u8 state; 924 struct iscsi_cmd *cmd; 925 926 cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); 927 if (!cmd) 928 return -1; 929 930 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; 931 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 932 ISTATE_SEND_NOPIN_NO_RESPONSE; 933 cmd->init_task_tag = RESERVED_ITT; 934 spin_lock_bh(&conn->sess->ttt_lock); 935 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : 936 0xFFFFFFFF; 937 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF)) 938 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 939 spin_unlock_bh(&conn->sess->ttt_lock); 940 941 spin_lock_bh(&conn->cmd_lock); 942 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 943 spin_unlock_bh(&conn->cmd_lock); 944 945 if (want_response) 946 iscsit_start_nopin_response_timer(conn); 947 iscsit_add_cmd_to_immediate_queue(cmd, conn, state); 948 949 return 0; 950 } 951 952 static void iscsit_handle_nopin_response_timeout(unsigned long data) 953 { 954 struct iscsi_conn *conn = (struct iscsi_conn *) data; 955 956 iscsit_inc_conn_usage_count(conn); 957 958 spin_lock_bh(&conn->nopin_timer_lock); 959 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { 960 spin_unlock_bh(&conn->nopin_timer_lock); 961 iscsit_dec_conn_usage_count(conn); 962 return; 963 } 964 965 pr_debug("Did not receive response to NOPIN on CID: %hu on" 966 " SID: %u, failing connection.\n", conn->cid, 967 conn->sess->sid); 968 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 969 spin_unlock_bh(&conn->nopin_timer_lock); 970 971 { 972 struct iscsi_portal_group *tpg = conn->sess->tpg; 973 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 974 975 if (tiqn) { 976 spin_lock_bh(&tiqn->sess_err_stats.lock); 977 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 978 conn->sess->sess_ops->InitiatorName); 979 tiqn->sess_err_stats.last_sess_failure_type = 980 ISCSI_SESS_ERR_CXN_TIMEOUT; 981 tiqn->sess_err_stats.cxn_timeout_errors++; 982 conn->sess->conn_timeout_errors++; 983 spin_unlock_bh(&tiqn->sess_err_stats.lock); 984 } 985 } 986 987 iscsit_cause_connection_reinstatement(conn, 0); 988 iscsit_dec_conn_usage_count(conn); 989 } 990 991 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn) 992 { 993 struct iscsi_session *sess = conn->sess; 994 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 995 996 spin_lock_bh(&conn->nopin_timer_lock); 997 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 998 spin_unlock_bh(&conn->nopin_timer_lock); 999 return; 1000 } 1001 1002 mod_timer(&conn->nopin_response_timer, 1003 (get_jiffies_64() + na->nopin_response_timeout * HZ)); 1004 spin_unlock_bh(&conn->nopin_timer_lock); 1005 } 1006 1007 /* 1008 * Called with conn->nopin_timer_lock held. 1009 */ 1010 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) 1011 { 1012 struct iscsi_session *sess = conn->sess; 1013 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1014 1015 spin_lock_bh(&conn->nopin_timer_lock); 1016 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { 1017 spin_unlock_bh(&conn->nopin_timer_lock); 1018 return; 1019 } 1020 1021 init_timer(&conn->nopin_response_timer); 1022 conn->nopin_response_timer.expires = 1023 (get_jiffies_64() + na->nopin_response_timeout * HZ); 1024 conn->nopin_response_timer.data = (unsigned long)conn; 1025 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; 1026 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; 1027 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; 1028 add_timer(&conn->nopin_response_timer); 1029 1030 pr_debug("Started NOPIN Response Timer on CID: %d to %u" 1031 " seconds\n", conn->cid, na->nopin_response_timeout); 1032 spin_unlock_bh(&conn->nopin_timer_lock); 1033 } 1034 1035 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) 1036 { 1037 spin_lock_bh(&conn->nopin_timer_lock); 1038 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 1039 spin_unlock_bh(&conn->nopin_timer_lock); 1040 return; 1041 } 1042 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 1043 spin_unlock_bh(&conn->nopin_timer_lock); 1044 1045 del_timer_sync(&conn->nopin_response_timer); 1046 1047 spin_lock_bh(&conn->nopin_timer_lock); 1048 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 1049 spin_unlock_bh(&conn->nopin_timer_lock); 1050 } 1051 1052 static void iscsit_handle_nopin_timeout(unsigned long data) 1053 { 1054 struct iscsi_conn *conn = (struct iscsi_conn *) data; 1055 1056 iscsit_inc_conn_usage_count(conn); 1057 1058 spin_lock_bh(&conn->nopin_timer_lock); 1059 if (conn->nopin_timer_flags & ISCSI_TF_STOP) { 1060 spin_unlock_bh(&conn->nopin_timer_lock); 1061 iscsit_dec_conn_usage_count(conn); 1062 return; 1063 } 1064 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1065 spin_unlock_bh(&conn->nopin_timer_lock); 1066 1067 iscsit_add_nopin(conn, 1); 1068 iscsit_dec_conn_usage_count(conn); 1069 } 1070 1071 /* 1072 * Called with conn->nopin_timer_lock held. 1073 */ 1074 void __iscsit_start_nopin_timer(struct iscsi_conn *conn) 1075 { 1076 struct iscsi_session *sess = conn->sess; 1077 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1078 /* 1079 * NOPIN timeout is disabled. 1080 */ 1081 if (!na->nopin_timeout) 1082 return; 1083 1084 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) 1085 return; 1086 1087 init_timer(&conn->nopin_timer); 1088 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1089 conn->nopin_timer.data = (unsigned long)conn; 1090 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1091 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1092 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1093 add_timer(&conn->nopin_timer); 1094 1095 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1096 " interval\n", conn->cid, na->nopin_timeout); 1097 } 1098 1099 void iscsit_start_nopin_timer(struct iscsi_conn *conn) 1100 { 1101 struct iscsi_session *sess = conn->sess; 1102 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1103 /* 1104 * NOPIN timeout is disabled.. 1105 */ 1106 if (!na->nopin_timeout) 1107 return; 1108 1109 spin_lock_bh(&conn->nopin_timer_lock); 1110 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { 1111 spin_unlock_bh(&conn->nopin_timer_lock); 1112 return; 1113 } 1114 1115 init_timer(&conn->nopin_timer); 1116 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1117 conn->nopin_timer.data = (unsigned long)conn; 1118 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1119 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1120 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1121 add_timer(&conn->nopin_timer); 1122 1123 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1124 " interval\n", conn->cid, na->nopin_timeout); 1125 spin_unlock_bh(&conn->nopin_timer_lock); 1126 } 1127 1128 void iscsit_stop_nopin_timer(struct iscsi_conn *conn) 1129 { 1130 spin_lock_bh(&conn->nopin_timer_lock); 1131 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { 1132 spin_unlock_bh(&conn->nopin_timer_lock); 1133 return; 1134 } 1135 conn->nopin_timer_flags |= ISCSI_TF_STOP; 1136 spin_unlock_bh(&conn->nopin_timer_lock); 1137 1138 del_timer_sync(&conn->nopin_timer); 1139 1140 spin_lock_bh(&conn->nopin_timer_lock); 1141 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1142 spin_unlock_bh(&conn->nopin_timer_lock); 1143 } 1144 1145 int iscsit_send_tx_data( 1146 struct iscsi_cmd *cmd, 1147 struct iscsi_conn *conn, 1148 int use_misc) 1149 { 1150 int tx_sent, tx_size; 1151 u32 iov_count; 1152 struct kvec *iov; 1153 1154 send_data: 1155 tx_size = cmd->tx_size; 1156 1157 if (!use_misc) { 1158 iov = &cmd->iov_data[0]; 1159 iov_count = cmd->iov_data_count; 1160 } else { 1161 iov = &cmd->iov_misc[0]; 1162 iov_count = cmd->iov_misc_count; 1163 } 1164 1165 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); 1166 if (tx_size != tx_sent) { 1167 if (tx_sent == -EAGAIN) { 1168 pr_err("tx_data() returned -EAGAIN\n"); 1169 goto send_data; 1170 } else 1171 return -1; 1172 } 1173 cmd->tx_size = 0; 1174 1175 return 0; 1176 } 1177 1178 int iscsit_fe_sendpage_sg( 1179 struct iscsi_cmd *cmd, 1180 struct iscsi_conn *conn) 1181 { 1182 struct scatterlist *sg = cmd->first_data_sg; 1183 struct kvec iov; 1184 u32 tx_hdr_size, data_len; 1185 u32 offset = cmd->first_data_sg_off; 1186 int tx_sent, iov_off; 1187 1188 send_hdr: 1189 tx_hdr_size = ISCSI_HDR_LEN; 1190 if (conn->conn_ops->HeaderDigest) 1191 tx_hdr_size += ISCSI_CRC_LEN; 1192 1193 iov.iov_base = cmd->pdu; 1194 iov.iov_len = tx_hdr_size; 1195 1196 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); 1197 if (tx_hdr_size != tx_sent) { 1198 if (tx_sent == -EAGAIN) { 1199 pr_err("tx_data() returned -EAGAIN\n"); 1200 goto send_hdr; 1201 } 1202 return -1; 1203 } 1204 1205 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1206 /* 1207 * Set iov_off used by padding and data digest tx_data() calls below 1208 * in order to determine proper offset into cmd->iov_data[] 1209 */ 1210 if (conn->conn_ops->DataDigest) { 1211 data_len -= ISCSI_CRC_LEN; 1212 if (cmd->padding) 1213 iov_off = (cmd->iov_data_count - 2); 1214 else 1215 iov_off = (cmd->iov_data_count - 1); 1216 } else { 1217 iov_off = (cmd->iov_data_count - 1); 1218 } 1219 /* 1220 * Perform sendpage() for each page in the scatterlist 1221 */ 1222 while (data_len) { 1223 u32 space = (sg->length - offset); 1224 u32 sub_len = min_t(u32, data_len, space); 1225 send_pg: 1226 tx_sent = conn->sock->ops->sendpage(conn->sock, 1227 sg_page(sg), sg->offset + offset, sub_len, 0); 1228 if (tx_sent != sub_len) { 1229 if (tx_sent == -EAGAIN) { 1230 pr_err("tcp_sendpage() returned" 1231 " -EAGAIN\n"); 1232 goto send_pg; 1233 } 1234 1235 pr_err("tcp_sendpage() failure: %d\n", 1236 tx_sent); 1237 return -1; 1238 } 1239 1240 data_len -= sub_len; 1241 offset = 0; 1242 sg = sg_next(sg); 1243 } 1244 1245 send_padding: 1246 if (cmd->padding) { 1247 struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1248 1249 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1250 if (cmd->padding != tx_sent) { 1251 if (tx_sent == -EAGAIN) { 1252 pr_err("tx_data() returned -EAGAIN\n"); 1253 goto send_padding; 1254 } 1255 return -1; 1256 } 1257 } 1258 1259 send_datacrc: 1260 if (conn->conn_ops->DataDigest) { 1261 struct kvec *iov_d = &cmd->iov_data[iov_off]; 1262 1263 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1264 if (ISCSI_CRC_LEN != tx_sent) { 1265 if (tx_sent == -EAGAIN) { 1266 pr_err("tx_data() returned -EAGAIN\n"); 1267 goto send_datacrc; 1268 } 1269 return -1; 1270 } 1271 } 1272 1273 return 0; 1274 } 1275 1276 /* 1277 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU 1278 * back to the Initiator when an expection condition occurs with the 1279 * errors set in status_class and status_detail. 1280 * 1281 * Parameters: iSCSI Connection, Status Class, Status Detail. 1282 * Returns: 0 on success, -1 on error. 1283 */ 1284 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1285 { 1286 struct iscsi_login_rsp *hdr; 1287 struct iscsi_login *login = conn->conn_login; 1288 1289 login->login_failed = 1; 1290 iscsit_collect_login_stats(conn, status_class, status_detail); 1291 1292 hdr = (struct iscsi_login_rsp *)&login->rsp[0]; 1293 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1294 hdr->status_class = status_class; 1295 hdr->status_detail = status_detail; 1296 hdr->itt = conn->login_itt; 1297 1298 return conn->conn_transport->iscsit_put_login_tx(conn, login, 0); 1299 } 1300 1301 void iscsit_print_session_params(struct iscsi_session *sess) 1302 { 1303 struct iscsi_conn *conn; 1304 1305 pr_debug("-----------------------------[Session Params for" 1306 " SID: %u]-----------------------------\n", sess->sid); 1307 spin_lock_bh(&sess->conn_lock); 1308 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 1309 iscsi_dump_conn_ops(conn->conn_ops); 1310 spin_unlock_bh(&sess->conn_lock); 1311 1312 iscsi_dump_sess_ops(sess->sess_ops); 1313 } 1314 1315 static int iscsit_do_rx_data( 1316 struct iscsi_conn *conn, 1317 struct iscsi_data_count *count) 1318 { 1319 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1320 struct kvec *iov_p; 1321 struct msghdr msg; 1322 1323 if (!conn || !conn->sock || !conn->conn_ops) 1324 return -1; 1325 1326 memset(&msg, 0, sizeof(struct msghdr)); 1327 1328 iov_p = count->iov; 1329 iov_len = count->iov_count; 1330 1331 while (total_rx < data) { 1332 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, 1333 (data - total_rx), MSG_WAITALL); 1334 if (rx_loop <= 0) { 1335 pr_debug("rx_loop: %d total_rx: %d\n", 1336 rx_loop, total_rx); 1337 return rx_loop; 1338 } 1339 total_rx += rx_loop; 1340 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", 1341 rx_loop, total_rx, data); 1342 } 1343 1344 return total_rx; 1345 } 1346 1347 static int iscsit_do_tx_data( 1348 struct iscsi_conn *conn, 1349 struct iscsi_data_count *count) 1350 { 1351 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1352 struct kvec *iov_p; 1353 struct msghdr msg; 1354 1355 if (!conn || !conn->sock || !conn->conn_ops) 1356 return -1; 1357 1358 if (data <= 0) { 1359 pr_err("Data length is: %d\n", data); 1360 return -1; 1361 } 1362 1363 memset(&msg, 0, sizeof(struct msghdr)); 1364 1365 iov_p = count->iov; 1366 iov_len = count->iov_count; 1367 1368 while (total_tx < data) { 1369 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1370 (data - total_tx)); 1371 if (tx_loop <= 0) { 1372 pr_debug("tx_loop: %d total_tx %d\n", 1373 tx_loop, total_tx); 1374 return tx_loop; 1375 } 1376 total_tx += tx_loop; 1377 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", 1378 tx_loop, total_tx, data); 1379 } 1380 1381 return total_tx; 1382 } 1383 1384 int rx_data( 1385 struct iscsi_conn *conn, 1386 struct kvec *iov, 1387 int iov_count, 1388 int data) 1389 { 1390 struct iscsi_data_count c; 1391 1392 if (!conn || !conn->sock || !conn->conn_ops) 1393 return -1; 1394 1395 memset(&c, 0, sizeof(struct iscsi_data_count)); 1396 c.iov = iov; 1397 c.iov_count = iov_count; 1398 c.data_length = data; 1399 c.type = ISCSI_RX_DATA; 1400 1401 return iscsit_do_rx_data(conn, &c); 1402 } 1403 1404 int tx_data( 1405 struct iscsi_conn *conn, 1406 struct kvec *iov, 1407 int iov_count, 1408 int data) 1409 { 1410 struct iscsi_data_count c; 1411 1412 if (!conn || !conn->sock || !conn->conn_ops) 1413 return -1; 1414 1415 memset(&c, 0, sizeof(struct iscsi_data_count)); 1416 c.iov = iov; 1417 c.iov_count = iov_count; 1418 c.data_length = data; 1419 c.type = ISCSI_TX_DATA; 1420 1421 return iscsit_do_tx_data(conn, &c); 1422 } 1423 1424 void iscsit_collect_login_stats( 1425 struct iscsi_conn *conn, 1426 u8 status_class, 1427 u8 status_detail) 1428 { 1429 struct iscsi_param *intrname = NULL; 1430 struct iscsi_tiqn *tiqn; 1431 struct iscsi_login_stats *ls; 1432 1433 tiqn = iscsit_snmp_get_tiqn(conn); 1434 if (!tiqn) 1435 return; 1436 1437 ls = &tiqn->login_stats; 1438 1439 spin_lock(&ls->lock); 1440 if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) && 1441 ((get_jiffies_64() - ls->last_fail_time) < 10)) { 1442 /* We already have the failure info for this login */ 1443 spin_unlock(&ls->lock); 1444 return; 1445 } 1446 1447 if (status_class == ISCSI_STATUS_CLS_SUCCESS) 1448 ls->accepts++; 1449 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { 1450 ls->redirects++; 1451 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; 1452 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1453 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { 1454 ls->authenticate_fails++; 1455 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; 1456 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1457 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { 1458 ls->authorize_fails++; 1459 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; 1460 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1461 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { 1462 ls->negotiate_fails++; 1463 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; 1464 } else { 1465 ls->other_fails++; 1466 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; 1467 } 1468 1469 /* Save initiator name, ip address and time, if it is a failed login */ 1470 if (status_class != ISCSI_STATUS_CLS_SUCCESS) { 1471 if (conn->param_list) 1472 intrname = iscsi_find_param_from_key(INITIATORNAME, 1473 conn->param_list); 1474 strcpy(ls->last_intr_fail_name, 1475 (intrname ? intrname->value : "Unknown")); 1476 1477 ls->last_intr_fail_ip_family = conn->login_family; 1478 1479 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, 1480 "%s", conn->login_ip); 1481 ls->last_fail_time = get_jiffies_64(); 1482 } 1483 1484 spin_unlock(&ls->lock); 1485 } 1486 1487 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) 1488 { 1489 struct iscsi_portal_group *tpg; 1490 1491 if (!conn || !conn->sess) 1492 return NULL; 1493 1494 tpg = conn->sess->tpg; 1495 if (!tpg) 1496 return NULL; 1497 1498 if (!tpg->tpg_tiqn) 1499 return NULL; 1500 1501 return tpg->tpg_tiqn; 1502 } 1503