1 /******************************************************************************* 2 * This file contains the iSCSI Target specific utility functions. 3 * 4 * (c) Copyright 2007-2013 Datera, Inc. 5 * 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ******************************************************************************/ 18 19 #include <linux/list.h> 20 #include <linux/percpu_ida.h> 21 #include <scsi/scsi_tcq.h> 22 #include <scsi/iscsi_proto.h> 23 #include <target/target_core_base.h> 24 #include <target/target_core_fabric.h> 25 #include <target/iscsi/iscsi_transport.h> 26 27 #include <target/iscsi/iscsi_target_core.h> 28 #include "iscsi_target_parameters.h" 29 #include "iscsi_target_seq_pdu_list.h" 30 #include "iscsi_target_datain_values.h" 31 #include "iscsi_target_erl0.h" 32 #include "iscsi_target_erl1.h" 33 #include "iscsi_target_erl2.h" 34 #include "iscsi_target_tpg.h" 35 #include "iscsi_target_util.h" 36 #include "iscsi_target.h" 37 38 #define PRINT_BUFF(buff, len) \ 39 { \ 40 int zzz; \ 41 \ 42 pr_debug("%d:\n", __LINE__); \ 43 for (zzz = 0; zzz < len; zzz++) { \ 44 if (zzz % 16 == 0) { \ 45 if (zzz) \ 46 pr_debug("\n"); \ 47 pr_debug("%4i: ", zzz); \ 48 } \ 49 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ 50 } \ 51 if ((len + 1) % 16) \ 52 pr_debug("\n"); \ 53 } 54 55 extern struct list_head g_tiqn_list; 56 extern spinlock_t tiqn_lock; 57 58 /* 59 * Called with cmd->r2t_lock held. 60 */ 61 int iscsit_add_r2t_to_list( 62 struct iscsi_cmd *cmd, 63 u32 offset, 64 u32 xfer_len, 65 int recovery, 66 u32 r2t_sn) 67 { 68 struct iscsi_r2t *r2t; 69 70 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); 71 if (!r2t) { 72 pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); 73 return -1; 74 } 75 INIT_LIST_HEAD(&r2t->r2t_list); 76 77 r2t->recovery_r2t = recovery; 78 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; 79 r2t->offset = offset; 80 r2t->xfer_len = xfer_len; 81 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); 82 spin_unlock_bh(&cmd->r2t_lock); 83 84 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); 85 86 spin_lock_bh(&cmd->r2t_lock); 87 return 0; 88 } 89 90 struct iscsi_r2t *iscsit_get_r2t_for_eos( 91 struct iscsi_cmd *cmd, 92 u32 offset, 93 u32 length) 94 { 95 struct iscsi_r2t *r2t; 96 97 spin_lock_bh(&cmd->r2t_lock); 98 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 99 if ((r2t->offset <= offset) && 100 (r2t->offset + r2t->xfer_len) >= (offset + length)) { 101 spin_unlock_bh(&cmd->r2t_lock); 102 return r2t; 103 } 104 } 105 spin_unlock_bh(&cmd->r2t_lock); 106 107 pr_err("Unable to locate R2T for Offset: %u, Length:" 108 " %u\n", offset, length); 109 return NULL; 110 } 111 112 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd) 113 { 114 struct iscsi_r2t *r2t; 115 116 spin_lock_bh(&cmd->r2t_lock); 117 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 118 if (!r2t->sent_r2t) { 119 spin_unlock_bh(&cmd->r2t_lock); 120 return r2t; 121 } 122 } 123 spin_unlock_bh(&cmd->r2t_lock); 124 125 pr_err("Unable to locate next R2T to send for ITT:" 126 " 0x%08x.\n", cmd->init_task_tag); 127 return NULL; 128 } 129 130 /* 131 * Called with cmd->r2t_lock held. 132 */ 133 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd) 134 { 135 list_del(&r2t->r2t_list); 136 kmem_cache_free(lio_r2t_cache, r2t); 137 } 138 139 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) 140 { 141 struct iscsi_r2t *r2t, *r2t_tmp; 142 143 spin_lock_bh(&cmd->r2t_lock); 144 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) 145 iscsit_free_r2t(r2t, cmd); 146 spin_unlock_bh(&cmd->r2t_lock); 147 } 148 149 /* 150 * May be called from software interrupt (timer) context for allocating 151 * iSCSI NopINs. 152 */ 153 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) 154 { 155 struct iscsi_cmd *cmd; 156 struct se_session *se_sess = conn->sess->se_sess; 157 int size, tag; 158 159 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); 160 if (tag < 0) 161 return NULL; 162 163 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 164 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); 165 memset(cmd, 0, size); 166 167 cmd->se_cmd.map_tag = tag; 168 cmd->conn = conn; 169 INIT_LIST_HEAD(&cmd->i_conn_node); 170 INIT_LIST_HEAD(&cmd->datain_list); 171 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 172 spin_lock_init(&cmd->datain_lock); 173 spin_lock_init(&cmd->dataout_timeout_lock); 174 spin_lock_init(&cmd->istate_lock); 175 spin_lock_init(&cmd->error_lock); 176 spin_lock_init(&cmd->r2t_lock); 177 178 return cmd; 179 } 180 EXPORT_SYMBOL(iscsit_allocate_cmd); 181 182 struct iscsi_seq *iscsit_get_seq_holder_for_datain( 183 struct iscsi_cmd *cmd, 184 u32 seq_send_order) 185 { 186 u32 i; 187 188 for (i = 0; i < cmd->seq_count; i++) 189 if (cmd->seq_list[i].seq_send_order == seq_send_order) 190 return &cmd->seq_list[i]; 191 192 return NULL; 193 } 194 195 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd) 196 { 197 u32 i; 198 199 if (!cmd->seq_list) { 200 pr_err("struct iscsi_cmd->seq_list is NULL!\n"); 201 return NULL; 202 } 203 204 for (i = 0; i < cmd->seq_count; i++) { 205 if (cmd->seq_list[i].type != SEQTYPE_NORMAL) 206 continue; 207 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { 208 cmd->seq_send_order++; 209 return &cmd->seq_list[i]; 210 } 211 } 212 213 return NULL; 214 } 215 216 struct iscsi_r2t *iscsit_get_holder_for_r2tsn( 217 struct iscsi_cmd *cmd, 218 u32 r2t_sn) 219 { 220 struct iscsi_r2t *r2t; 221 222 spin_lock_bh(&cmd->r2t_lock); 223 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 224 if (r2t->r2t_sn == r2t_sn) { 225 spin_unlock_bh(&cmd->r2t_lock); 226 return r2t; 227 } 228 } 229 spin_unlock_bh(&cmd->r2t_lock); 230 231 return NULL; 232 } 233 234 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) 235 { 236 u32 max_cmdsn; 237 int ret; 238 239 /* 240 * This is the proper method of checking received CmdSN against 241 * ExpCmdSN and MaxCmdSN values, as well as accounting for out 242 * or order CmdSNs due to multiple connection sessions and/or 243 * CRC failures. 244 */ 245 max_cmdsn = atomic_read(&sess->max_cmd_sn); 246 if (iscsi_sna_gt(cmdsn, max_cmdsn)) { 247 pr_err("Received CmdSN: 0x%08x is greater than" 248 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn); 249 ret = CMDSN_MAXCMDSN_OVERRUN; 250 251 } else if (cmdsn == sess->exp_cmd_sn) { 252 sess->exp_cmd_sn++; 253 pr_debug("Received CmdSN matches ExpCmdSN," 254 " incremented ExpCmdSN to: 0x%08x\n", 255 sess->exp_cmd_sn); 256 ret = CMDSN_NORMAL_OPERATION; 257 258 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { 259 pr_debug("Received CmdSN: 0x%08x is greater" 260 " than ExpCmdSN: 0x%08x, not acknowledging.\n", 261 cmdsn, sess->exp_cmd_sn); 262 ret = CMDSN_HIGHER_THAN_EXP; 263 264 } else { 265 pr_err("Received CmdSN: 0x%08x is less than" 266 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, 267 sess->exp_cmd_sn); 268 ret = CMDSN_LOWER_THAN_EXP; 269 } 270 271 return ret; 272 } 273 274 /* 275 * Commands may be received out of order if MC/S is in use. 276 * Ensure they are executed in CmdSN order. 277 */ 278 int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 279 unsigned char *buf, __be32 cmdsn) 280 { 281 int ret, cmdsn_ret; 282 bool reject = false; 283 u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES; 284 285 mutex_lock(&conn->sess->cmdsn_mutex); 286 287 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn)); 288 switch (cmdsn_ret) { 289 case CMDSN_NORMAL_OPERATION: 290 ret = iscsit_execute_cmd(cmd, 0); 291 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) 292 iscsit_execute_ooo_cmdsns(conn->sess); 293 else if (ret < 0) { 294 reject = true; 295 ret = CMDSN_ERROR_CANNOT_RECOVER; 296 } 297 break; 298 case CMDSN_HIGHER_THAN_EXP: 299 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn)); 300 if (ret < 0) { 301 reject = true; 302 ret = CMDSN_ERROR_CANNOT_RECOVER; 303 break; 304 } 305 ret = CMDSN_HIGHER_THAN_EXP; 306 break; 307 case CMDSN_LOWER_THAN_EXP: 308 case CMDSN_MAXCMDSN_OVERRUN: 309 default: 310 cmd->i_state = ISTATE_REMOVE; 311 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 312 /* 313 * Existing callers for iscsit_sequence_cmd() will silently 314 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this 315 * return for CMDSN_MAXCMDSN_OVERRUN as well.. 316 */ 317 ret = CMDSN_LOWER_THAN_EXP; 318 break; 319 } 320 mutex_unlock(&conn->sess->cmdsn_mutex); 321 322 if (reject) 323 iscsit_reject_cmd(cmd, reason, buf); 324 325 return ret; 326 } 327 EXPORT_SYMBOL(iscsit_sequence_cmd); 328 329 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 330 { 331 struct iscsi_conn *conn = cmd->conn; 332 struct se_cmd *se_cmd = &cmd->se_cmd; 333 struct iscsi_data *hdr = (struct iscsi_data *) buf; 334 u32 payload_length = ntoh24(hdr->dlength); 335 336 if (conn->sess->sess_ops->InitialR2T) { 337 pr_err("Received unexpected unsolicited data" 338 " while InitialR2T=Yes, protocol error.\n"); 339 transport_send_check_condition_and_sense(se_cmd, 340 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 341 return -1; 342 } 343 344 if ((cmd->first_burst_len + payload_length) > 345 conn->sess->sess_ops->FirstBurstLength) { 346 pr_err("Total %u bytes exceeds FirstBurstLength: %u" 347 " for this Unsolicited DataOut Burst.\n", 348 (cmd->first_burst_len + payload_length), 349 conn->sess->sess_ops->FirstBurstLength); 350 transport_send_check_condition_and_sense(se_cmd, 351 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 352 return -1; 353 } 354 355 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) 356 return 0; 357 358 if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) && 359 ((cmd->first_burst_len + payload_length) != 360 conn->sess->sess_ops->FirstBurstLength)) { 361 pr_err("Unsolicited non-immediate data received %u" 362 " does not equal FirstBurstLength: %u, and does" 363 " not equal ExpXferLen %u.\n", 364 (cmd->first_burst_len + payload_length), 365 conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); 366 transport_send_check_condition_and_sense(se_cmd, 367 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 368 return -1; 369 } 370 return 0; 371 } 372 373 struct iscsi_cmd *iscsit_find_cmd_from_itt( 374 struct iscsi_conn *conn, 375 itt_t init_task_tag) 376 { 377 struct iscsi_cmd *cmd; 378 379 spin_lock_bh(&conn->cmd_lock); 380 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 381 if (cmd->init_task_tag == init_task_tag) { 382 spin_unlock_bh(&conn->cmd_lock); 383 return cmd; 384 } 385 } 386 spin_unlock_bh(&conn->cmd_lock); 387 388 pr_err("Unable to locate ITT: 0x%08x on CID: %hu", 389 init_task_tag, conn->cid); 390 return NULL; 391 } 392 EXPORT_SYMBOL(iscsit_find_cmd_from_itt); 393 394 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 395 struct iscsi_conn *conn, 396 itt_t init_task_tag, 397 u32 length) 398 { 399 struct iscsi_cmd *cmd; 400 401 spin_lock_bh(&conn->cmd_lock); 402 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 403 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) 404 continue; 405 if (cmd->init_task_tag == init_task_tag) { 406 spin_unlock_bh(&conn->cmd_lock); 407 return cmd; 408 } 409 } 410 spin_unlock_bh(&conn->cmd_lock); 411 412 pr_err("Unable to locate ITT: 0x%08x on CID: %hu," 413 " dumping payload\n", init_task_tag, conn->cid); 414 if (length) 415 iscsit_dump_data_payload(conn, length, 1); 416 417 return NULL; 418 } 419 420 struct iscsi_cmd *iscsit_find_cmd_from_ttt( 421 struct iscsi_conn *conn, 422 u32 targ_xfer_tag) 423 { 424 struct iscsi_cmd *cmd = NULL; 425 426 spin_lock_bh(&conn->cmd_lock); 427 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 428 if (cmd->targ_xfer_tag == targ_xfer_tag) { 429 spin_unlock_bh(&conn->cmd_lock); 430 return cmd; 431 } 432 } 433 spin_unlock_bh(&conn->cmd_lock); 434 435 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", 436 targ_xfer_tag, conn->cid); 437 return NULL; 438 } 439 440 int iscsit_find_cmd_for_recovery( 441 struct iscsi_session *sess, 442 struct iscsi_cmd **cmd_ptr, 443 struct iscsi_conn_recovery **cr_ptr, 444 itt_t init_task_tag) 445 { 446 struct iscsi_cmd *cmd = NULL; 447 struct iscsi_conn_recovery *cr; 448 /* 449 * Scan through the inactive connection recovery list's command list. 450 * If init_task_tag matches the command is still alligent. 451 */ 452 spin_lock(&sess->cr_i_lock); 453 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 454 spin_lock(&cr->conn_recovery_cmd_lock); 455 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 456 if (cmd->init_task_tag == init_task_tag) { 457 spin_unlock(&cr->conn_recovery_cmd_lock); 458 spin_unlock(&sess->cr_i_lock); 459 460 *cr_ptr = cr; 461 *cmd_ptr = cmd; 462 return -2; 463 } 464 } 465 spin_unlock(&cr->conn_recovery_cmd_lock); 466 } 467 spin_unlock(&sess->cr_i_lock); 468 /* 469 * Scan through the active connection recovery list's command list. 470 * If init_task_tag matches the command is ready to be reassigned. 471 */ 472 spin_lock(&sess->cr_a_lock); 473 list_for_each_entry(cr, &sess->cr_active_list, cr_list) { 474 spin_lock(&cr->conn_recovery_cmd_lock); 475 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 476 if (cmd->init_task_tag == init_task_tag) { 477 spin_unlock(&cr->conn_recovery_cmd_lock); 478 spin_unlock(&sess->cr_a_lock); 479 480 *cr_ptr = cr; 481 *cmd_ptr = cmd; 482 return 0; 483 } 484 } 485 spin_unlock(&cr->conn_recovery_cmd_lock); 486 } 487 spin_unlock(&sess->cr_a_lock); 488 489 return -1; 490 } 491 492 void iscsit_add_cmd_to_immediate_queue( 493 struct iscsi_cmd *cmd, 494 struct iscsi_conn *conn, 495 u8 state) 496 { 497 struct iscsi_queue_req *qr; 498 499 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 500 if (!qr) { 501 pr_err("Unable to allocate memory for" 502 " struct iscsi_queue_req\n"); 503 return; 504 } 505 INIT_LIST_HEAD(&qr->qr_list); 506 qr->cmd = cmd; 507 qr->state = state; 508 509 spin_lock_bh(&conn->immed_queue_lock); 510 list_add_tail(&qr->qr_list, &conn->immed_queue_list); 511 atomic_inc(&cmd->immed_queue_count); 512 atomic_set(&conn->check_immediate_queue, 1); 513 spin_unlock_bh(&conn->immed_queue_lock); 514 515 wake_up(&conn->queues_wq); 516 } 517 EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue); 518 519 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 520 { 521 struct iscsi_queue_req *qr; 522 523 spin_lock_bh(&conn->immed_queue_lock); 524 if (list_empty(&conn->immed_queue_list)) { 525 spin_unlock_bh(&conn->immed_queue_lock); 526 return NULL; 527 } 528 qr = list_first_entry(&conn->immed_queue_list, 529 struct iscsi_queue_req, qr_list); 530 531 list_del(&qr->qr_list); 532 if (qr->cmd) 533 atomic_dec(&qr->cmd->immed_queue_count); 534 spin_unlock_bh(&conn->immed_queue_lock); 535 536 return qr; 537 } 538 539 static void iscsit_remove_cmd_from_immediate_queue( 540 struct iscsi_cmd *cmd, 541 struct iscsi_conn *conn) 542 { 543 struct iscsi_queue_req *qr, *qr_tmp; 544 545 spin_lock_bh(&conn->immed_queue_lock); 546 if (!atomic_read(&cmd->immed_queue_count)) { 547 spin_unlock_bh(&conn->immed_queue_lock); 548 return; 549 } 550 551 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 552 if (qr->cmd != cmd) 553 continue; 554 555 atomic_dec(&qr->cmd->immed_queue_count); 556 list_del(&qr->qr_list); 557 kmem_cache_free(lio_qr_cache, qr); 558 } 559 spin_unlock_bh(&conn->immed_queue_lock); 560 561 if (atomic_read(&cmd->immed_queue_count)) { 562 pr_err("ITT: 0x%08x immed_queue_count: %d\n", 563 cmd->init_task_tag, 564 atomic_read(&cmd->immed_queue_count)); 565 } 566 } 567 568 void iscsit_add_cmd_to_response_queue( 569 struct iscsi_cmd *cmd, 570 struct iscsi_conn *conn, 571 u8 state) 572 { 573 struct iscsi_queue_req *qr; 574 575 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 576 if (!qr) { 577 pr_err("Unable to allocate memory for" 578 " struct iscsi_queue_req\n"); 579 return; 580 } 581 INIT_LIST_HEAD(&qr->qr_list); 582 qr->cmd = cmd; 583 qr->state = state; 584 585 spin_lock_bh(&conn->response_queue_lock); 586 list_add_tail(&qr->qr_list, &conn->response_queue_list); 587 atomic_inc(&cmd->response_queue_count); 588 spin_unlock_bh(&conn->response_queue_lock); 589 590 wake_up(&conn->queues_wq); 591 } 592 593 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 594 { 595 struct iscsi_queue_req *qr; 596 597 spin_lock_bh(&conn->response_queue_lock); 598 if (list_empty(&conn->response_queue_list)) { 599 spin_unlock_bh(&conn->response_queue_lock); 600 return NULL; 601 } 602 603 qr = list_first_entry(&conn->response_queue_list, 604 struct iscsi_queue_req, qr_list); 605 606 list_del(&qr->qr_list); 607 if (qr->cmd) 608 atomic_dec(&qr->cmd->response_queue_count); 609 spin_unlock_bh(&conn->response_queue_lock); 610 611 return qr; 612 } 613 614 static void iscsit_remove_cmd_from_response_queue( 615 struct iscsi_cmd *cmd, 616 struct iscsi_conn *conn) 617 { 618 struct iscsi_queue_req *qr, *qr_tmp; 619 620 spin_lock_bh(&conn->response_queue_lock); 621 if (!atomic_read(&cmd->response_queue_count)) { 622 spin_unlock_bh(&conn->response_queue_lock); 623 return; 624 } 625 626 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 627 qr_list) { 628 if (qr->cmd != cmd) 629 continue; 630 631 atomic_dec(&qr->cmd->response_queue_count); 632 list_del(&qr->qr_list); 633 kmem_cache_free(lio_qr_cache, qr); 634 } 635 spin_unlock_bh(&conn->response_queue_lock); 636 637 if (atomic_read(&cmd->response_queue_count)) { 638 pr_err("ITT: 0x%08x response_queue_count: %d\n", 639 cmd->init_task_tag, 640 atomic_read(&cmd->response_queue_count)); 641 } 642 } 643 644 bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn) 645 { 646 bool empty; 647 648 spin_lock_bh(&conn->immed_queue_lock); 649 empty = list_empty(&conn->immed_queue_list); 650 spin_unlock_bh(&conn->immed_queue_lock); 651 652 if (!empty) 653 return empty; 654 655 spin_lock_bh(&conn->response_queue_lock); 656 empty = list_empty(&conn->response_queue_list); 657 spin_unlock_bh(&conn->response_queue_lock); 658 659 return empty; 660 } 661 662 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) 663 { 664 struct iscsi_queue_req *qr, *qr_tmp; 665 666 spin_lock_bh(&conn->immed_queue_lock); 667 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 668 list_del(&qr->qr_list); 669 if (qr->cmd) 670 atomic_dec(&qr->cmd->immed_queue_count); 671 672 kmem_cache_free(lio_qr_cache, qr); 673 } 674 spin_unlock_bh(&conn->immed_queue_lock); 675 676 spin_lock_bh(&conn->response_queue_lock); 677 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 678 qr_list) { 679 list_del(&qr->qr_list); 680 if (qr->cmd) 681 atomic_dec(&qr->cmd->response_queue_count); 682 683 kmem_cache_free(lio_qr_cache, qr); 684 } 685 spin_unlock_bh(&conn->response_queue_lock); 686 } 687 688 void iscsit_release_cmd(struct iscsi_cmd *cmd) 689 { 690 struct iscsi_session *sess; 691 struct se_cmd *se_cmd = &cmd->se_cmd; 692 693 if (cmd->conn) 694 sess = cmd->conn->sess; 695 else 696 sess = cmd->sess; 697 698 BUG_ON(!sess || !sess->se_sess); 699 700 kfree(cmd->buf_ptr); 701 kfree(cmd->pdu_list); 702 kfree(cmd->seq_list); 703 kfree(cmd->tmr_req); 704 kfree(cmd->iov_data); 705 kfree(cmd->text_in_ptr); 706 707 percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag); 708 } 709 EXPORT_SYMBOL(iscsit_release_cmd); 710 711 void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 712 bool check_queues) 713 { 714 struct iscsi_conn *conn = cmd->conn; 715 716 if (scsi_cmd) { 717 if (cmd->data_direction == DMA_TO_DEVICE) { 718 iscsit_stop_dataout_timer(cmd); 719 iscsit_free_r2ts_from_list(cmd); 720 } 721 if (cmd->data_direction == DMA_FROM_DEVICE) 722 iscsit_free_all_datain_reqs(cmd); 723 } 724 725 if (conn && check_queues) { 726 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 727 iscsit_remove_cmd_from_response_queue(cmd, conn); 728 } 729 730 if (conn && conn->conn_transport->iscsit_release_cmd) 731 conn->conn_transport->iscsit_release_cmd(conn, cmd); 732 } 733 734 void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) 735 { 736 struct se_cmd *se_cmd = NULL; 737 int rc; 738 /* 739 * Determine if a struct se_cmd is associated with 740 * this struct iscsi_cmd. 741 */ 742 switch (cmd->iscsi_opcode) { 743 case ISCSI_OP_SCSI_CMD: 744 se_cmd = &cmd->se_cmd; 745 __iscsit_free_cmd(cmd, true, shutdown); 746 /* 747 * Fallthrough 748 */ 749 case ISCSI_OP_SCSI_TMFUNC: 750 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 751 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 752 __iscsit_free_cmd(cmd, true, shutdown); 753 target_put_sess_cmd(se_cmd); 754 } 755 break; 756 case ISCSI_OP_REJECT: 757 /* 758 * Handle special case for REJECT when iscsi_add_reject*() has 759 * overwritten the original iscsi_opcode assignment, and the 760 * associated cmd->se_cmd needs to be released. 761 */ 762 if (cmd->se_cmd.se_tfo != NULL) { 763 se_cmd = &cmd->se_cmd; 764 __iscsit_free_cmd(cmd, true, shutdown); 765 766 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 767 if (!rc && shutdown && se_cmd->se_sess) { 768 __iscsit_free_cmd(cmd, true, shutdown); 769 target_put_sess_cmd(se_cmd); 770 } 771 break; 772 } 773 /* Fall-through */ 774 default: 775 __iscsit_free_cmd(cmd, false, shutdown); 776 iscsit_release_cmd(cmd); 777 break; 778 } 779 } 780 EXPORT_SYMBOL(iscsit_free_cmd); 781 782 int iscsit_check_session_usage_count(struct iscsi_session *sess) 783 { 784 spin_lock_bh(&sess->session_usage_lock); 785 if (sess->session_usage_count != 0) { 786 sess->session_waiting_on_uc = 1; 787 spin_unlock_bh(&sess->session_usage_lock); 788 if (in_interrupt()) 789 return 2; 790 791 wait_for_completion(&sess->session_waiting_on_uc_comp); 792 return 1; 793 } 794 spin_unlock_bh(&sess->session_usage_lock); 795 796 return 0; 797 } 798 799 void iscsit_dec_session_usage_count(struct iscsi_session *sess) 800 { 801 spin_lock_bh(&sess->session_usage_lock); 802 sess->session_usage_count--; 803 804 if (!sess->session_usage_count && sess->session_waiting_on_uc) 805 complete(&sess->session_waiting_on_uc_comp); 806 807 spin_unlock_bh(&sess->session_usage_lock); 808 } 809 810 void iscsit_inc_session_usage_count(struct iscsi_session *sess) 811 { 812 spin_lock_bh(&sess->session_usage_lock); 813 sess->session_usage_count++; 814 spin_unlock_bh(&sess->session_usage_lock); 815 } 816 817 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) 818 { 819 struct iscsi_conn *conn; 820 821 spin_lock_bh(&sess->conn_lock); 822 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 823 if ((conn->cid == cid) && 824 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { 825 iscsit_inc_conn_usage_count(conn); 826 spin_unlock_bh(&sess->conn_lock); 827 return conn; 828 } 829 } 830 spin_unlock_bh(&sess->conn_lock); 831 832 return NULL; 833 } 834 835 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid) 836 { 837 struct iscsi_conn *conn; 838 839 spin_lock_bh(&sess->conn_lock); 840 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 841 if (conn->cid == cid) { 842 iscsit_inc_conn_usage_count(conn); 843 spin_lock(&conn->state_lock); 844 atomic_set(&conn->connection_wait_rcfr, 1); 845 spin_unlock(&conn->state_lock); 846 spin_unlock_bh(&sess->conn_lock); 847 return conn; 848 } 849 } 850 spin_unlock_bh(&sess->conn_lock); 851 852 return NULL; 853 } 854 855 void iscsit_check_conn_usage_count(struct iscsi_conn *conn) 856 { 857 spin_lock_bh(&conn->conn_usage_lock); 858 if (conn->conn_usage_count != 0) { 859 conn->conn_waiting_on_uc = 1; 860 spin_unlock_bh(&conn->conn_usage_lock); 861 862 wait_for_completion(&conn->conn_waiting_on_uc_comp); 863 return; 864 } 865 spin_unlock_bh(&conn->conn_usage_lock); 866 } 867 868 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn) 869 { 870 spin_lock_bh(&conn->conn_usage_lock); 871 conn->conn_usage_count--; 872 873 if (!conn->conn_usage_count && conn->conn_waiting_on_uc) 874 complete(&conn->conn_waiting_on_uc_comp); 875 876 spin_unlock_bh(&conn->conn_usage_lock); 877 } 878 879 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn) 880 { 881 spin_lock_bh(&conn->conn_usage_lock); 882 conn->conn_usage_count++; 883 spin_unlock_bh(&conn->conn_usage_lock); 884 } 885 886 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) 887 { 888 u8 state; 889 struct iscsi_cmd *cmd; 890 891 cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); 892 if (!cmd) 893 return -1; 894 895 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; 896 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 897 ISTATE_SEND_NOPIN_NO_RESPONSE; 898 cmd->init_task_tag = RESERVED_ITT; 899 cmd->targ_xfer_tag = (want_response) ? 900 session_get_next_ttt(conn->sess) : 0xFFFFFFFF; 901 spin_lock_bh(&conn->cmd_lock); 902 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 903 spin_unlock_bh(&conn->cmd_lock); 904 905 if (want_response) 906 iscsit_start_nopin_response_timer(conn); 907 iscsit_add_cmd_to_immediate_queue(cmd, conn, state); 908 909 return 0; 910 } 911 912 static void iscsit_handle_nopin_response_timeout(unsigned long data) 913 { 914 struct iscsi_conn *conn = (struct iscsi_conn *) data; 915 916 iscsit_inc_conn_usage_count(conn); 917 918 spin_lock_bh(&conn->nopin_timer_lock); 919 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { 920 spin_unlock_bh(&conn->nopin_timer_lock); 921 iscsit_dec_conn_usage_count(conn); 922 return; 923 } 924 925 pr_debug("Did not receive response to NOPIN on CID: %hu on" 926 " SID: %u, failing connection.\n", conn->cid, 927 conn->sess->sid); 928 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 929 spin_unlock_bh(&conn->nopin_timer_lock); 930 931 { 932 struct iscsi_portal_group *tpg = conn->sess->tpg; 933 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 934 935 if (tiqn) { 936 spin_lock_bh(&tiqn->sess_err_stats.lock); 937 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 938 conn->sess->sess_ops->InitiatorName); 939 tiqn->sess_err_stats.last_sess_failure_type = 940 ISCSI_SESS_ERR_CXN_TIMEOUT; 941 tiqn->sess_err_stats.cxn_timeout_errors++; 942 atomic_long_inc(&conn->sess->conn_timeout_errors); 943 spin_unlock_bh(&tiqn->sess_err_stats.lock); 944 } 945 } 946 947 iscsit_cause_connection_reinstatement(conn, 0); 948 iscsit_dec_conn_usage_count(conn); 949 } 950 951 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn) 952 { 953 struct iscsi_session *sess = conn->sess; 954 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 955 956 spin_lock_bh(&conn->nopin_timer_lock); 957 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 958 spin_unlock_bh(&conn->nopin_timer_lock); 959 return; 960 } 961 962 mod_timer(&conn->nopin_response_timer, 963 (get_jiffies_64() + na->nopin_response_timeout * HZ)); 964 spin_unlock_bh(&conn->nopin_timer_lock); 965 } 966 967 /* 968 * Called with conn->nopin_timer_lock held. 969 */ 970 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) 971 { 972 struct iscsi_session *sess = conn->sess; 973 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 974 975 spin_lock_bh(&conn->nopin_timer_lock); 976 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { 977 spin_unlock_bh(&conn->nopin_timer_lock); 978 return; 979 } 980 981 init_timer(&conn->nopin_response_timer); 982 conn->nopin_response_timer.expires = 983 (get_jiffies_64() + na->nopin_response_timeout * HZ); 984 conn->nopin_response_timer.data = (unsigned long)conn; 985 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; 986 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; 987 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; 988 add_timer(&conn->nopin_response_timer); 989 990 pr_debug("Started NOPIN Response Timer on CID: %d to %u" 991 " seconds\n", conn->cid, na->nopin_response_timeout); 992 spin_unlock_bh(&conn->nopin_timer_lock); 993 } 994 995 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) 996 { 997 spin_lock_bh(&conn->nopin_timer_lock); 998 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 999 spin_unlock_bh(&conn->nopin_timer_lock); 1000 return; 1001 } 1002 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 1003 spin_unlock_bh(&conn->nopin_timer_lock); 1004 1005 del_timer_sync(&conn->nopin_response_timer); 1006 1007 spin_lock_bh(&conn->nopin_timer_lock); 1008 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 1009 spin_unlock_bh(&conn->nopin_timer_lock); 1010 } 1011 1012 static void iscsit_handle_nopin_timeout(unsigned long data) 1013 { 1014 struct iscsi_conn *conn = (struct iscsi_conn *) data; 1015 1016 iscsit_inc_conn_usage_count(conn); 1017 1018 spin_lock_bh(&conn->nopin_timer_lock); 1019 if (conn->nopin_timer_flags & ISCSI_TF_STOP) { 1020 spin_unlock_bh(&conn->nopin_timer_lock); 1021 iscsit_dec_conn_usage_count(conn); 1022 return; 1023 } 1024 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1025 spin_unlock_bh(&conn->nopin_timer_lock); 1026 1027 iscsit_add_nopin(conn, 1); 1028 iscsit_dec_conn_usage_count(conn); 1029 } 1030 1031 /* 1032 * Called with conn->nopin_timer_lock held. 1033 */ 1034 void __iscsit_start_nopin_timer(struct iscsi_conn *conn) 1035 { 1036 struct iscsi_session *sess = conn->sess; 1037 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1038 /* 1039 * NOPIN timeout is disabled. 1040 */ 1041 if (!na->nopin_timeout) 1042 return; 1043 1044 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) 1045 return; 1046 1047 init_timer(&conn->nopin_timer); 1048 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1049 conn->nopin_timer.data = (unsigned long)conn; 1050 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1051 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1052 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1053 add_timer(&conn->nopin_timer); 1054 1055 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1056 " interval\n", conn->cid, na->nopin_timeout); 1057 } 1058 1059 void iscsit_start_nopin_timer(struct iscsi_conn *conn) 1060 { 1061 struct iscsi_session *sess = conn->sess; 1062 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1063 /* 1064 * NOPIN timeout is disabled.. 1065 */ 1066 if (!na->nopin_timeout) 1067 return; 1068 1069 spin_lock_bh(&conn->nopin_timer_lock); 1070 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { 1071 spin_unlock_bh(&conn->nopin_timer_lock); 1072 return; 1073 } 1074 1075 init_timer(&conn->nopin_timer); 1076 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1077 conn->nopin_timer.data = (unsigned long)conn; 1078 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1079 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1080 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1081 add_timer(&conn->nopin_timer); 1082 1083 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1084 " interval\n", conn->cid, na->nopin_timeout); 1085 spin_unlock_bh(&conn->nopin_timer_lock); 1086 } 1087 1088 void iscsit_stop_nopin_timer(struct iscsi_conn *conn) 1089 { 1090 spin_lock_bh(&conn->nopin_timer_lock); 1091 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { 1092 spin_unlock_bh(&conn->nopin_timer_lock); 1093 return; 1094 } 1095 conn->nopin_timer_flags |= ISCSI_TF_STOP; 1096 spin_unlock_bh(&conn->nopin_timer_lock); 1097 1098 del_timer_sync(&conn->nopin_timer); 1099 1100 spin_lock_bh(&conn->nopin_timer_lock); 1101 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1102 spin_unlock_bh(&conn->nopin_timer_lock); 1103 } 1104 1105 int iscsit_send_tx_data( 1106 struct iscsi_cmd *cmd, 1107 struct iscsi_conn *conn, 1108 int use_misc) 1109 { 1110 int tx_sent, tx_size; 1111 u32 iov_count; 1112 struct kvec *iov; 1113 1114 send_data: 1115 tx_size = cmd->tx_size; 1116 1117 if (!use_misc) { 1118 iov = &cmd->iov_data[0]; 1119 iov_count = cmd->iov_data_count; 1120 } else { 1121 iov = &cmd->iov_misc[0]; 1122 iov_count = cmd->iov_misc_count; 1123 } 1124 1125 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); 1126 if (tx_size != tx_sent) { 1127 if (tx_sent == -EAGAIN) { 1128 pr_err("tx_data() returned -EAGAIN\n"); 1129 goto send_data; 1130 } else 1131 return -1; 1132 } 1133 cmd->tx_size = 0; 1134 1135 return 0; 1136 } 1137 1138 int iscsit_fe_sendpage_sg( 1139 struct iscsi_cmd *cmd, 1140 struct iscsi_conn *conn) 1141 { 1142 struct scatterlist *sg = cmd->first_data_sg; 1143 struct kvec iov; 1144 u32 tx_hdr_size, data_len; 1145 u32 offset = cmd->first_data_sg_off; 1146 int tx_sent, iov_off; 1147 1148 send_hdr: 1149 tx_hdr_size = ISCSI_HDR_LEN; 1150 if (conn->conn_ops->HeaderDigest) 1151 tx_hdr_size += ISCSI_CRC_LEN; 1152 1153 iov.iov_base = cmd->pdu; 1154 iov.iov_len = tx_hdr_size; 1155 1156 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); 1157 if (tx_hdr_size != tx_sent) { 1158 if (tx_sent == -EAGAIN) { 1159 pr_err("tx_data() returned -EAGAIN\n"); 1160 goto send_hdr; 1161 } 1162 return -1; 1163 } 1164 1165 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1166 /* 1167 * Set iov_off used by padding and data digest tx_data() calls below 1168 * in order to determine proper offset into cmd->iov_data[] 1169 */ 1170 if (conn->conn_ops->DataDigest) { 1171 data_len -= ISCSI_CRC_LEN; 1172 if (cmd->padding) 1173 iov_off = (cmd->iov_data_count - 2); 1174 else 1175 iov_off = (cmd->iov_data_count - 1); 1176 } else { 1177 iov_off = (cmd->iov_data_count - 1); 1178 } 1179 /* 1180 * Perform sendpage() for each page in the scatterlist 1181 */ 1182 while (data_len) { 1183 u32 space = (sg->length - offset); 1184 u32 sub_len = min_t(u32, data_len, space); 1185 send_pg: 1186 tx_sent = conn->sock->ops->sendpage(conn->sock, 1187 sg_page(sg), sg->offset + offset, sub_len, 0); 1188 if (tx_sent != sub_len) { 1189 if (tx_sent == -EAGAIN) { 1190 pr_err("tcp_sendpage() returned" 1191 " -EAGAIN\n"); 1192 goto send_pg; 1193 } 1194 1195 pr_err("tcp_sendpage() failure: %d\n", 1196 tx_sent); 1197 return -1; 1198 } 1199 1200 data_len -= sub_len; 1201 offset = 0; 1202 sg = sg_next(sg); 1203 } 1204 1205 send_padding: 1206 if (cmd->padding) { 1207 struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1208 1209 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1210 if (cmd->padding != tx_sent) { 1211 if (tx_sent == -EAGAIN) { 1212 pr_err("tx_data() returned -EAGAIN\n"); 1213 goto send_padding; 1214 } 1215 return -1; 1216 } 1217 } 1218 1219 send_datacrc: 1220 if (conn->conn_ops->DataDigest) { 1221 struct kvec *iov_d = &cmd->iov_data[iov_off]; 1222 1223 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1224 if (ISCSI_CRC_LEN != tx_sent) { 1225 if (tx_sent == -EAGAIN) { 1226 pr_err("tx_data() returned -EAGAIN\n"); 1227 goto send_datacrc; 1228 } 1229 return -1; 1230 } 1231 } 1232 1233 return 0; 1234 } 1235 1236 /* 1237 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU 1238 * back to the Initiator when an expection condition occurs with the 1239 * errors set in status_class and status_detail. 1240 * 1241 * Parameters: iSCSI Connection, Status Class, Status Detail. 1242 * Returns: 0 on success, -1 on error. 1243 */ 1244 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1245 { 1246 struct iscsi_login_rsp *hdr; 1247 struct iscsi_login *login = conn->conn_login; 1248 1249 login->login_failed = 1; 1250 iscsit_collect_login_stats(conn, status_class, status_detail); 1251 1252 memset(&login->rsp[0], 0, ISCSI_HDR_LEN); 1253 1254 hdr = (struct iscsi_login_rsp *)&login->rsp[0]; 1255 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1256 hdr->status_class = status_class; 1257 hdr->status_detail = status_detail; 1258 hdr->itt = conn->login_itt; 1259 1260 return conn->conn_transport->iscsit_put_login_tx(conn, login, 0); 1261 } 1262 1263 void iscsit_print_session_params(struct iscsi_session *sess) 1264 { 1265 struct iscsi_conn *conn; 1266 1267 pr_debug("-----------------------------[Session Params for" 1268 " SID: %u]-----------------------------\n", sess->sid); 1269 spin_lock_bh(&sess->conn_lock); 1270 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 1271 iscsi_dump_conn_ops(conn->conn_ops); 1272 spin_unlock_bh(&sess->conn_lock); 1273 1274 iscsi_dump_sess_ops(sess->sess_ops); 1275 } 1276 1277 static int iscsit_do_rx_data( 1278 struct iscsi_conn *conn, 1279 struct iscsi_data_count *count) 1280 { 1281 int data = count->data_length, rx_loop = 0, total_rx = 0; 1282 struct msghdr msg; 1283 1284 if (!conn || !conn->sock || !conn->conn_ops) 1285 return -1; 1286 1287 memset(&msg, 0, sizeof(struct msghdr)); 1288 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, 1289 count->iov, count->iov_count, data); 1290 1291 while (msg_data_left(&msg)) { 1292 rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL); 1293 if (rx_loop <= 0) { 1294 pr_debug("rx_loop: %d total_rx: %d\n", 1295 rx_loop, total_rx); 1296 return rx_loop; 1297 } 1298 total_rx += rx_loop; 1299 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", 1300 rx_loop, total_rx, data); 1301 } 1302 1303 return total_rx; 1304 } 1305 1306 static int iscsit_do_tx_data( 1307 struct iscsi_conn *conn, 1308 struct iscsi_data_count *count) 1309 { 1310 int ret, iov_len; 1311 struct kvec *iov_p; 1312 struct msghdr msg; 1313 1314 if (!conn || !conn->sock || !conn->conn_ops) 1315 return -1; 1316 1317 if (count->data_length <= 0) { 1318 pr_err("Data length is: %d\n", count->data_length); 1319 return -1; 1320 } 1321 1322 memset(&msg, 0, sizeof(struct msghdr)); 1323 1324 iov_p = count->iov; 1325 iov_len = count->iov_count; 1326 1327 ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1328 count->data_length); 1329 if (ret != count->data_length) { 1330 pr_err("Unexpected ret: %d send data %d\n", 1331 ret, count->data_length); 1332 return -EPIPE; 1333 } 1334 pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); 1335 1336 return ret; 1337 } 1338 1339 int rx_data( 1340 struct iscsi_conn *conn, 1341 struct kvec *iov, 1342 int iov_count, 1343 int data) 1344 { 1345 struct iscsi_data_count c; 1346 1347 if (!conn || !conn->sock || !conn->conn_ops) 1348 return -1; 1349 1350 memset(&c, 0, sizeof(struct iscsi_data_count)); 1351 c.iov = iov; 1352 c.iov_count = iov_count; 1353 c.data_length = data; 1354 c.type = ISCSI_RX_DATA; 1355 1356 return iscsit_do_rx_data(conn, &c); 1357 } 1358 1359 int tx_data( 1360 struct iscsi_conn *conn, 1361 struct kvec *iov, 1362 int iov_count, 1363 int data) 1364 { 1365 struct iscsi_data_count c; 1366 1367 if (!conn || !conn->sock || !conn->conn_ops) 1368 return -1; 1369 1370 memset(&c, 0, sizeof(struct iscsi_data_count)); 1371 c.iov = iov; 1372 c.iov_count = iov_count; 1373 c.data_length = data; 1374 c.type = ISCSI_TX_DATA; 1375 1376 return iscsit_do_tx_data(conn, &c); 1377 } 1378 1379 static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y) 1380 { 1381 switch (x->ss_family) { 1382 case AF_INET: { 1383 struct sockaddr_in *sinx = (struct sockaddr_in *)x; 1384 struct sockaddr_in *siny = (struct sockaddr_in *)y; 1385 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) 1386 return false; 1387 if (sinx->sin_port != siny->sin_port) 1388 return false; 1389 break; 1390 } 1391 case AF_INET6: { 1392 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; 1393 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; 1394 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) 1395 return false; 1396 if (sinx->sin6_port != siny->sin6_port) 1397 return false; 1398 break; 1399 } 1400 default: 1401 return false; 1402 } 1403 return true; 1404 } 1405 1406 void iscsit_collect_login_stats( 1407 struct iscsi_conn *conn, 1408 u8 status_class, 1409 u8 status_detail) 1410 { 1411 struct iscsi_param *intrname = NULL; 1412 struct iscsi_tiqn *tiqn; 1413 struct iscsi_login_stats *ls; 1414 1415 tiqn = iscsit_snmp_get_tiqn(conn); 1416 if (!tiqn) 1417 return; 1418 1419 ls = &tiqn->login_stats; 1420 1421 spin_lock(&ls->lock); 1422 if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) && 1423 ((get_jiffies_64() - ls->last_fail_time) < 10)) { 1424 /* We already have the failure info for this login */ 1425 spin_unlock(&ls->lock); 1426 return; 1427 } 1428 1429 if (status_class == ISCSI_STATUS_CLS_SUCCESS) 1430 ls->accepts++; 1431 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { 1432 ls->redirects++; 1433 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; 1434 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1435 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { 1436 ls->authenticate_fails++; 1437 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; 1438 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1439 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { 1440 ls->authorize_fails++; 1441 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; 1442 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1443 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { 1444 ls->negotiate_fails++; 1445 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; 1446 } else { 1447 ls->other_fails++; 1448 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; 1449 } 1450 1451 /* Save initiator name, ip address and time, if it is a failed login */ 1452 if (status_class != ISCSI_STATUS_CLS_SUCCESS) { 1453 if (conn->param_list) 1454 intrname = iscsi_find_param_from_key(INITIATORNAME, 1455 conn->param_list); 1456 strlcpy(ls->last_intr_fail_name, 1457 (intrname ? intrname->value : "Unknown"), 1458 sizeof(ls->last_intr_fail_name)); 1459 1460 ls->last_intr_fail_ip_family = conn->login_family; 1461 1462 ls->last_intr_fail_sockaddr = conn->login_sockaddr; 1463 ls->last_fail_time = get_jiffies_64(); 1464 } 1465 1466 spin_unlock(&ls->lock); 1467 } 1468 1469 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) 1470 { 1471 struct iscsi_portal_group *tpg; 1472 1473 if (!conn || !conn->sess) 1474 return NULL; 1475 1476 tpg = conn->sess->tpg; 1477 if (!tpg) 1478 return NULL; 1479 1480 if (!tpg->tpg_tiqn) 1481 return NULL; 1482 1483 return tpg->tpg_tiqn; 1484 } 1485