1 /******************************************************************************* 2 * This file contains the iSCSI Target specific utility functions. 3 * 4 * (c) Copyright 2007-2013 Datera, Inc. 5 * 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ******************************************************************************/ 18 19 #include <linux/list.h> 20 #include <linux/percpu_ida.h> 21 #include <net/ipv6.h> /* ipv6_addr_equal() */ 22 #include <scsi/scsi_tcq.h> 23 #include <scsi/iscsi_proto.h> 24 #include <target/target_core_base.h> 25 #include <target/target_core_fabric.h> 26 #include <target/iscsi/iscsi_transport.h> 27 28 #include <target/iscsi/iscsi_target_core.h> 29 #include "iscsi_target_parameters.h" 30 #include "iscsi_target_seq_pdu_list.h" 31 #include "iscsi_target_datain_values.h" 32 #include "iscsi_target_erl0.h" 33 #include "iscsi_target_erl1.h" 34 #include "iscsi_target_erl2.h" 35 #include "iscsi_target_tpg.h" 36 #include "iscsi_target_util.h" 37 #include "iscsi_target.h" 38 39 #define PRINT_BUFF(buff, len) \ 40 { \ 41 int zzz; \ 42 \ 43 pr_debug("%d:\n", __LINE__); \ 44 for (zzz = 0; zzz < len; zzz++) { \ 45 if (zzz % 16 == 0) { \ 46 if (zzz) \ 47 pr_debug("\n"); \ 48 pr_debug("%4i: ", zzz); \ 49 } \ 50 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ 51 } \ 52 if ((len + 1) % 16) \ 53 pr_debug("\n"); \ 54 } 55 56 extern struct list_head g_tiqn_list; 57 extern spinlock_t tiqn_lock; 58 59 /* 60 * Called with cmd->r2t_lock held. 61 */ 62 int iscsit_add_r2t_to_list( 63 struct iscsi_cmd *cmd, 64 u32 offset, 65 u32 xfer_len, 66 int recovery, 67 u32 r2t_sn) 68 { 69 struct iscsi_r2t *r2t; 70 71 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); 72 if (!r2t) { 73 pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); 74 return -1; 75 } 76 INIT_LIST_HEAD(&r2t->r2t_list); 77 78 r2t->recovery_r2t = recovery; 79 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; 80 r2t->offset = offset; 81 r2t->xfer_len = xfer_len; 82 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); 83 spin_unlock_bh(&cmd->r2t_lock); 84 85 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); 86 87 spin_lock_bh(&cmd->r2t_lock); 88 return 0; 89 } 90 91 struct iscsi_r2t *iscsit_get_r2t_for_eos( 92 struct iscsi_cmd *cmd, 93 u32 offset, 94 u32 length) 95 { 96 struct iscsi_r2t *r2t; 97 98 spin_lock_bh(&cmd->r2t_lock); 99 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 100 if ((r2t->offset <= offset) && 101 (r2t->offset + r2t->xfer_len) >= (offset + length)) { 102 spin_unlock_bh(&cmd->r2t_lock); 103 return r2t; 104 } 105 } 106 spin_unlock_bh(&cmd->r2t_lock); 107 108 pr_err("Unable to locate R2T for Offset: %u, Length:" 109 " %u\n", offset, length); 110 return NULL; 111 } 112 113 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd) 114 { 115 struct iscsi_r2t *r2t; 116 117 spin_lock_bh(&cmd->r2t_lock); 118 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 119 if (!r2t->sent_r2t) { 120 spin_unlock_bh(&cmd->r2t_lock); 121 return r2t; 122 } 123 } 124 spin_unlock_bh(&cmd->r2t_lock); 125 126 pr_err("Unable to locate next R2T to send for ITT:" 127 " 0x%08x.\n", cmd->init_task_tag); 128 return NULL; 129 } 130 131 /* 132 * Called with cmd->r2t_lock held. 133 */ 134 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd) 135 { 136 list_del(&r2t->r2t_list); 137 kmem_cache_free(lio_r2t_cache, r2t); 138 } 139 140 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) 141 { 142 struct iscsi_r2t *r2t, *r2t_tmp; 143 144 spin_lock_bh(&cmd->r2t_lock); 145 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) 146 iscsit_free_r2t(r2t, cmd); 147 spin_unlock_bh(&cmd->r2t_lock); 148 } 149 150 /* 151 * May be called from software interrupt (timer) context for allocating 152 * iSCSI NopINs. 153 */ 154 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) 155 { 156 struct iscsi_cmd *cmd; 157 struct se_session *se_sess = conn->sess->se_sess; 158 int size, tag; 159 160 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); 161 if (tag < 0) 162 return NULL; 163 164 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 165 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); 166 memset(cmd, 0, size); 167 168 cmd->se_cmd.map_tag = tag; 169 cmd->conn = conn; 170 cmd->data_direction = DMA_NONE; 171 INIT_LIST_HEAD(&cmd->i_conn_node); 172 INIT_LIST_HEAD(&cmd->datain_list); 173 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 174 spin_lock_init(&cmd->datain_lock); 175 spin_lock_init(&cmd->dataout_timeout_lock); 176 spin_lock_init(&cmd->istate_lock); 177 spin_lock_init(&cmd->error_lock); 178 spin_lock_init(&cmd->r2t_lock); 179 timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0); 180 181 return cmd; 182 } 183 EXPORT_SYMBOL(iscsit_allocate_cmd); 184 185 struct iscsi_seq *iscsit_get_seq_holder_for_datain( 186 struct iscsi_cmd *cmd, 187 u32 seq_send_order) 188 { 189 u32 i; 190 191 for (i = 0; i < cmd->seq_count; i++) 192 if (cmd->seq_list[i].seq_send_order == seq_send_order) 193 return &cmd->seq_list[i]; 194 195 return NULL; 196 } 197 198 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd) 199 { 200 u32 i; 201 202 if (!cmd->seq_list) { 203 pr_err("struct iscsi_cmd->seq_list is NULL!\n"); 204 return NULL; 205 } 206 207 for (i = 0; i < cmd->seq_count; i++) { 208 if (cmd->seq_list[i].type != SEQTYPE_NORMAL) 209 continue; 210 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { 211 cmd->seq_send_order++; 212 return &cmd->seq_list[i]; 213 } 214 } 215 216 return NULL; 217 } 218 219 struct iscsi_r2t *iscsit_get_holder_for_r2tsn( 220 struct iscsi_cmd *cmd, 221 u32 r2t_sn) 222 { 223 struct iscsi_r2t *r2t; 224 225 spin_lock_bh(&cmd->r2t_lock); 226 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 227 if (r2t->r2t_sn == r2t_sn) { 228 spin_unlock_bh(&cmd->r2t_lock); 229 return r2t; 230 } 231 } 232 spin_unlock_bh(&cmd->r2t_lock); 233 234 return NULL; 235 } 236 237 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) 238 { 239 u32 max_cmdsn; 240 int ret; 241 242 /* 243 * This is the proper method of checking received CmdSN against 244 * ExpCmdSN and MaxCmdSN values, as well as accounting for out 245 * or order CmdSNs due to multiple connection sessions and/or 246 * CRC failures. 247 */ 248 max_cmdsn = atomic_read(&sess->max_cmd_sn); 249 if (iscsi_sna_gt(cmdsn, max_cmdsn)) { 250 pr_err("Received CmdSN: 0x%08x is greater than" 251 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn); 252 ret = CMDSN_MAXCMDSN_OVERRUN; 253 254 } else if (cmdsn == sess->exp_cmd_sn) { 255 sess->exp_cmd_sn++; 256 pr_debug("Received CmdSN matches ExpCmdSN," 257 " incremented ExpCmdSN to: 0x%08x\n", 258 sess->exp_cmd_sn); 259 ret = CMDSN_NORMAL_OPERATION; 260 261 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { 262 pr_debug("Received CmdSN: 0x%08x is greater" 263 " than ExpCmdSN: 0x%08x, not acknowledging.\n", 264 cmdsn, sess->exp_cmd_sn); 265 ret = CMDSN_HIGHER_THAN_EXP; 266 267 } else { 268 pr_err("Received CmdSN: 0x%08x is less than" 269 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, 270 sess->exp_cmd_sn); 271 ret = CMDSN_LOWER_THAN_EXP; 272 } 273 274 return ret; 275 } 276 277 /* 278 * Commands may be received out of order if MC/S is in use. 279 * Ensure they are executed in CmdSN order. 280 */ 281 int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 282 unsigned char *buf, __be32 cmdsn) 283 { 284 int ret, cmdsn_ret; 285 bool reject = false; 286 u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES; 287 288 mutex_lock(&conn->sess->cmdsn_mutex); 289 290 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn)); 291 switch (cmdsn_ret) { 292 case CMDSN_NORMAL_OPERATION: 293 ret = iscsit_execute_cmd(cmd, 0); 294 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) 295 iscsit_execute_ooo_cmdsns(conn->sess); 296 else if (ret < 0) { 297 reject = true; 298 ret = CMDSN_ERROR_CANNOT_RECOVER; 299 } 300 break; 301 case CMDSN_HIGHER_THAN_EXP: 302 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn)); 303 if (ret < 0) { 304 reject = true; 305 ret = CMDSN_ERROR_CANNOT_RECOVER; 306 break; 307 } 308 ret = CMDSN_HIGHER_THAN_EXP; 309 break; 310 case CMDSN_LOWER_THAN_EXP: 311 case CMDSN_MAXCMDSN_OVERRUN: 312 default: 313 cmd->i_state = ISTATE_REMOVE; 314 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 315 /* 316 * Existing callers for iscsit_sequence_cmd() will silently 317 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this 318 * return for CMDSN_MAXCMDSN_OVERRUN as well.. 319 */ 320 ret = CMDSN_LOWER_THAN_EXP; 321 break; 322 } 323 mutex_unlock(&conn->sess->cmdsn_mutex); 324 325 if (reject) 326 iscsit_reject_cmd(cmd, reason, buf); 327 328 return ret; 329 } 330 EXPORT_SYMBOL(iscsit_sequence_cmd); 331 332 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 333 { 334 struct iscsi_conn *conn = cmd->conn; 335 struct se_cmd *se_cmd = &cmd->se_cmd; 336 struct iscsi_data *hdr = (struct iscsi_data *) buf; 337 u32 payload_length = ntoh24(hdr->dlength); 338 339 if (conn->sess->sess_ops->InitialR2T) { 340 pr_err("Received unexpected unsolicited data" 341 " while InitialR2T=Yes, protocol error.\n"); 342 transport_send_check_condition_and_sense(se_cmd, 343 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 344 return -1; 345 } 346 347 if ((cmd->first_burst_len + payload_length) > 348 conn->sess->sess_ops->FirstBurstLength) { 349 pr_err("Total %u bytes exceeds FirstBurstLength: %u" 350 " for this Unsolicited DataOut Burst.\n", 351 (cmd->first_burst_len + payload_length), 352 conn->sess->sess_ops->FirstBurstLength); 353 transport_send_check_condition_and_sense(se_cmd, 354 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 355 return -1; 356 } 357 358 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) 359 return 0; 360 361 if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) && 362 ((cmd->first_burst_len + payload_length) != 363 conn->sess->sess_ops->FirstBurstLength)) { 364 pr_err("Unsolicited non-immediate data received %u" 365 " does not equal FirstBurstLength: %u, and does" 366 " not equal ExpXferLen %u.\n", 367 (cmd->first_burst_len + payload_length), 368 conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); 369 transport_send_check_condition_and_sense(se_cmd, 370 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 371 return -1; 372 } 373 return 0; 374 } 375 376 struct iscsi_cmd *iscsit_find_cmd_from_itt( 377 struct iscsi_conn *conn, 378 itt_t init_task_tag) 379 { 380 struct iscsi_cmd *cmd; 381 382 spin_lock_bh(&conn->cmd_lock); 383 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 384 if (cmd->init_task_tag == init_task_tag) { 385 spin_unlock_bh(&conn->cmd_lock); 386 return cmd; 387 } 388 } 389 spin_unlock_bh(&conn->cmd_lock); 390 391 pr_err("Unable to locate ITT: 0x%08x on CID: %hu", 392 init_task_tag, conn->cid); 393 return NULL; 394 } 395 EXPORT_SYMBOL(iscsit_find_cmd_from_itt); 396 397 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 398 struct iscsi_conn *conn, 399 itt_t init_task_tag, 400 u32 length) 401 { 402 struct iscsi_cmd *cmd; 403 404 spin_lock_bh(&conn->cmd_lock); 405 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 406 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) 407 continue; 408 if (cmd->init_task_tag == init_task_tag) { 409 spin_unlock_bh(&conn->cmd_lock); 410 return cmd; 411 } 412 } 413 spin_unlock_bh(&conn->cmd_lock); 414 415 pr_err("Unable to locate ITT: 0x%08x on CID: %hu," 416 " dumping payload\n", init_task_tag, conn->cid); 417 if (length) 418 iscsit_dump_data_payload(conn, length, 1); 419 420 return NULL; 421 } 422 EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump); 423 424 struct iscsi_cmd *iscsit_find_cmd_from_ttt( 425 struct iscsi_conn *conn, 426 u32 targ_xfer_tag) 427 { 428 struct iscsi_cmd *cmd = NULL; 429 430 spin_lock_bh(&conn->cmd_lock); 431 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 432 if (cmd->targ_xfer_tag == targ_xfer_tag) { 433 spin_unlock_bh(&conn->cmd_lock); 434 return cmd; 435 } 436 } 437 spin_unlock_bh(&conn->cmd_lock); 438 439 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", 440 targ_xfer_tag, conn->cid); 441 return NULL; 442 } 443 444 int iscsit_find_cmd_for_recovery( 445 struct iscsi_session *sess, 446 struct iscsi_cmd **cmd_ptr, 447 struct iscsi_conn_recovery **cr_ptr, 448 itt_t init_task_tag) 449 { 450 struct iscsi_cmd *cmd = NULL; 451 struct iscsi_conn_recovery *cr; 452 /* 453 * Scan through the inactive connection recovery list's command list. 454 * If init_task_tag matches the command is still alligent. 455 */ 456 spin_lock(&sess->cr_i_lock); 457 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 458 spin_lock(&cr->conn_recovery_cmd_lock); 459 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 460 if (cmd->init_task_tag == init_task_tag) { 461 spin_unlock(&cr->conn_recovery_cmd_lock); 462 spin_unlock(&sess->cr_i_lock); 463 464 *cr_ptr = cr; 465 *cmd_ptr = cmd; 466 return -2; 467 } 468 } 469 spin_unlock(&cr->conn_recovery_cmd_lock); 470 } 471 spin_unlock(&sess->cr_i_lock); 472 /* 473 * Scan through the active connection recovery list's command list. 474 * If init_task_tag matches the command is ready to be reassigned. 475 */ 476 spin_lock(&sess->cr_a_lock); 477 list_for_each_entry(cr, &sess->cr_active_list, cr_list) { 478 spin_lock(&cr->conn_recovery_cmd_lock); 479 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 480 if (cmd->init_task_tag == init_task_tag) { 481 spin_unlock(&cr->conn_recovery_cmd_lock); 482 spin_unlock(&sess->cr_a_lock); 483 484 *cr_ptr = cr; 485 *cmd_ptr = cmd; 486 return 0; 487 } 488 } 489 spin_unlock(&cr->conn_recovery_cmd_lock); 490 } 491 spin_unlock(&sess->cr_a_lock); 492 493 return -1; 494 } 495 496 void iscsit_add_cmd_to_immediate_queue( 497 struct iscsi_cmd *cmd, 498 struct iscsi_conn *conn, 499 u8 state) 500 { 501 struct iscsi_queue_req *qr; 502 503 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 504 if (!qr) { 505 pr_err("Unable to allocate memory for" 506 " struct iscsi_queue_req\n"); 507 return; 508 } 509 INIT_LIST_HEAD(&qr->qr_list); 510 qr->cmd = cmd; 511 qr->state = state; 512 513 spin_lock_bh(&conn->immed_queue_lock); 514 list_add_tail(&qr->qr_list, &conn->immed_queue_list); 515 atomic_inc(&cmd->immed_queue_count); 516 atomic_set(&conn->check_immediate_queue, 1); 517 spin_unlock_bh(&conn->immed_queue_lock); 518 519 wake_up(&conn->queues_wq); 520 } 521 EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue); 522 523 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 524 { 525 struct iscsi_queue_req *qr; 526 527 spin_lock_bh(&conn->immed_queue_lock); 528 if (list_empty(&conn->immed_queue_list)) { 529 spin_unlock_bh(&conn->immed_queue_lock); 530 return NULL; 531 } 532 qr = list_first_entry(&conn->immed_queue_list, 533 struct iscsi_queue_req, qr_list); 534 535 list_del(&qr->qr_list); 536 if (qr->cmd) 537 atomic_dec(&qr->cmd->immed_queue_count); 538 spin_unlock_bh(&conn->immed_queue_lock); 539 540 return qr; 541 } 542 543 static void iscsit_remove_cmd_from_immediate_queue( 544 struct iscsi_cmd *cmd, 545 struct iscsi_conn *conn) 546 { 547 struct iscsi_queue_req *qr, *qr_tmp; 548 549 spin_lock_bh(&conn->immed_queue_lock); 550 if (!atomic_read(&cmd->immed_queue_count)) { 551 spin_unlock_bh(&conn->immed_queue_lock); 552 return; 553 } 554 555 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 556 if (qr->cmd != cmd) 557 continue; 558 559 atomic_dec(&qr->cmd->immed_queue_count); 560 list_del(&qr->qr_list); 561 kmem_cache_free(lio_qr_cache, qr); 562 } 563 spin_unlock_bh(&conn->immed_queue_lock); 564 565 if (atomic_read(&cmd->immed_queue_count)) { 566 pr_err("ITT: 0x%08x immed_queue_count: %d\n", 567 cmd->init_task_tag, 568 atomic_read(&cmd->immed_queue_count)); 569 } 570 } 571 572 int iscsit_add_cmd_to_response_queue( 573 struct iscsi_cmd *cmd, 574 struct iscsi_conn *conn, 575 u8 state) 576 { 577 struct iscsi_queue_req *qr; 578 579 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 580 if (!qr) { 581 pr_err("Unable to allocate memory for" 582 " struct iscsi_queue_req\n"); 583 return -ENOMEM; 584 } 585 INIT_LIST_HEAD(&qr->qr_list); 586 qr->cmd = cmd; 587 qr->state = state; 588 589 spin_lock_bh(&conn->response_queue_lock); 590 list_add_tail(&qr->qr_list, &conn->response_queue_list); 591 atomic_inc(&cmd->response_queue_count); 592 spin_unlock_bh(&conn->response_queue_lock); 593 594 wake_up(&conn->queues_wq); 595 return 0; 596 } 597 598 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 599 { 600 struct iscsi_queue_req *qr; 601 602 spin_lock_bh(&conn->response_queue_lock); 603 if (list_empty(&conn->response_queue_list)) { 604 spin_unlock_bh(&conn->response_queue_lock); 605 return NULL; 606 } 607 608 qr = list_first_entry(&conn->response_queue_list, 609 struct iscsi_queue_req, qr_list); 610 611 list_del(&qr->qr_list); 612 if (qr->cmd) 613 atomic_dec(&qr->cmd->response_queue_count); 614 spin_unlock_bh(&conn->response_queue_lock); 615 616 return qr; 617 } 618 619 static void iscsit_remove_cmd_from_response_queue( 620 struct iscsi_cmd *cmd, 621 struct iscsi_conn *conn) 622 { 623 struct iscsi_queue_req *qr, *qr_tmp; 624 625 spin_lock_bh(&conn->response_queue_lock); 626 if (!atomic_read(&cmd->response_queue_count)) { 627 spin_unlock_bh(&conn->response_queue_lock); 628 return; 629 } 630 631 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 632 qr_list) { 633 if (qr->cmd != cmd) 634 continue; 635 636 atomic_dec(&qr->cmd->response_queue_count); 637 list_del(&qr->qr_list); 638 kmem_cache_free(lio_qr_cache, qr); 639 } 640 spin_unlock_bh(&conn->response_queue_lock); 641 642 if (atomic_read(&cmd->response_queue_count)) { 643 pr_err("ITT: 0x%08x response_queue_count: %d\n", 644 cmd->init_task_tag, 645 atomic_read(&cmd->response_queue_count)); 646 } 647 } 648 649 bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn) 650 { 651 bool empty; 652 653 spin_lock_bh(&conn->immed_queue_lock); 654 empty = list_empty(&conn->immed_queue_list); 655 spin_unlock_bh(&conn->immed_queue_lock); 656 657 if (!empty) 658 return empty; 659 660 spin_lock_bh(&conn->response_queue_lock); 661 empty = list_empty(&conn->response_queue_list); 662 spin_unlock_bh(&conn->response_queue_lock); 663 664 return empty; 665 } 666 667 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) 668 { 669 struct iscsi_queue_req *qr, *qr_tmp; 670 671 spin_lock_bh(&conn->immed_queue_lock); 672 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 673 list_del(&qr->qr_list); 674 if (qr->cmd) 675 atomic_dec(&qr->cmd->immed_queue_count); 676 677 kmem_cache_free(lio_qr_cache, qr); 678 } 679 spin_unlock_bh(&conn->immed_queue_lock); 680 681 spin_lock_bh(&conn->response_queue_lock); 682 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 683 qr_list) { 684 list_del(&qr->qr_list); 685 if (qr->cmd) 686 atomic_dec(&qr->cmd->response_queue_count); 687 688 kmem_cache_free(lio_qr_cache, qr); 689 } 690 spin_unlock_bh(&conn->response_queue_lock); 691 } 692 693 void iscsit_release_cmd(struct iscsi_cmd *cmd) 694 { 695 struct iscsi_session *sess; 696 struct se_cmd *se_cmd = &cmd->se_cmd; 697 698 WARN_ON(!list_empty(&cmd->i_conn_node)); 699 700 if (cmd->conn) 701 sess = cmd->conn->sess; 702 else 703 sess = cmd->sess; 704 705 BUG_ON(!sess || !sess->se_sess); 706 707 kfree(cmd->buf_ptr); 708 kfree(cmd->pdu_list); 709 kfree(cmd->seq_list); 710 kfree(cmd->tmr_req); 711 kfree(cmd->iov_data); 712 kfree(cmd->text_in_ptr); 713 714 percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag); 715 } 716 EXPORT_SYMBOL(iscsit_release_cmd); 717 718 void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues) 719 { 720 struct iscsi_conn *conn = cmd->conn; 721 722 WARN_ON(!list_empty(&cmd->i_conn_node)); 723 724 if (cmd->data_direction == DMA_TO_DEVICE) { 725 iscsit_stop_dataout_timer(cmd); 726 iscsit_free_r2ts_from_list(cmd); 727 } 728 if (cmd->data_direction == DMA_FROM_DEVICE) 729 iscsit_free_all_datain_reqs(cmd); 730 731 if (conn && check_queues) { 732 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 733 iscsit_remove_cmd_from_response_queue(cmd, conn); 734 } 735 736 if (conn && conn->conn_transport->iscsit_release_cmd) 737 conn->conn_transport->iscsit_release_cmd(conn, cmd); 738 } 739 740 void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) 741 { 742 struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL; 743 int rc; 744 745 __iscsit_free_cmd(cmd, shutdown); 746 if (se_cmd) { 747 rc = transport_generic_free_cmd(se_cmd, shutdown); 748 if (!rc && shutdown && se_cmd->se_sess) { 749 __iscsit_free_cmd(cmd, shutdown); 750 target_put_sess_cmd(se_cmd); 751 } 752 } else { 753 iscsit_release_cmd(cmd); 754 } 755 } 756 EXPORT_SYMBOL(iscsit_free_cmd); 757 758 int iscsit_check_session_usage_count(struct iscsi_session *sess) 759 { 760 spin_lock_bh(&sess->session_usage_lock); 761 if (sess->session_usage_count != 0) { 762 sess->session_waiting_on_uc = 1; 763 spin_unlock_bh(&sess->session_usage_lock); 764 if (in_interrupt()) 765 return 2; 766 767 wait_for_completion(&sess->session_waiting_on_uc_comp); 768 return 1; 769 } 770 spin_unlock_bh(&sess->session_usage_lock); 771 772 return 0; 773 } 774 775 void iscsit_dec_session_usage_count(struct iscsi_session *sess) 776 { 777 spin_lock_bh(&sess->session_usage_lock); 778 sess->session_usage_count--; 779 780 if (!sess->session_usage_count && sess->session_waiting_on_uc) 781 complete(&sess->session_waiting_on_uc_comp); 782 783 spin_unlock_bh(&sess->session_usage_lock); 784 } 785 786 void iscsit_inc_session_usage_count(struct iscsi_session *sess) 787 { 788 spin_lock_bh(&sess->session_usage_lock); 789 sess->session_usage_count++; 790 spin_unlock_bh(&sess->session_usage_lock); 791 } 792 793 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) 794 { 795 struct iscsi_conn *conn; 796 797 spin_lock_bh(&sess->conn_lock); 798 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 799 if ((conn->cid == cid) && 800 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { 801 iscsit_inc_conn_usage_count(conn); 802 spin_unlock_bh(&sess->conn_lock); 803 return conn; 804 } 805 } 806 spin_unlock_bh(&sess->conn_lock); 807 808 return NULL; 809 } 810 811 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid) 812 { 813 struct iscsi_conn *conn; 814 815 spin_lock_bh(&sess->conn_lock); 816 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 817 if (conn->cid == cid) { 818 iscsit_inc_conn_usage_count(conn); 819 spin_lock(&conn->state_lock); 820 atomic_set(&conn->connection_wait_rcfr, 1); 821 spin_unlock(&conn->state_lock); 822 spin_unlock_bh(&sess->conn_lock); 823 return conn; 824 } 825 } 826 spin_unlock_bh(&sess->conn_lock); 827 828 return NULL; 829 } 830 831 void iscsit_check_conn_usage_count(struct iscsi_conn *conn) 832 { 833 spin_lock_bh(&conn->conn_usage_lock); 834 if (conn->conn_usage_count != 0) { 835 conn->conn_waiting_on_uc = 1; 836 spin_unlock_bh(&conn->conn_usage_lock); 837 838 wait_for_completion(&conn->conn_waiting_on_uc_comp); 839 return; 840 } 841 spin_unlock_bh(&conn->conn_usage_lock); 842 } 843 844 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn) 845 { 846 spin_lock_bh(&conn->conn_usage_lock); 847 conn->conn_usage_count--; 848 849 if (!conn->conn_usage_count && conn->conn_waiting_on_uc) 850 complete(&conn->conn_waiting_on_uc_comp); 851 852 spin_unlock_bh(&conn->conn_usage_lock); 853 } 854 855 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn) 856 { 857 spin_lock_bh(&conn->conn_usage_lock); 858 conn->conn_usage_count++; 859 spin_unlock_bh(&conn->conn_usage_lock); 860 } 861 862 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) 863 { 864 u8 state; 865 struct iscsi_cmd *cmd; 866 867 cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); 868 if (!cmd) 869 return -1; 870 871 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; 872 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 873 ISTATE_SEND_NOPIN_NO_RESPONSE; 874 cmd->init_task_tag = RESERVED_ITT; 875 cmd->targ_xfer_tag = (want_response) ? 876 session_get_next_ttt(conn->sess) : 0xFFFFFFFF; 877 spin_lock_bh(&conn->cmd_lock); 878 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 879 spin_unlock_bh(&conn->cmd_lock); 880 881 if (want_response) 882 iscsit_start_nopin_response_timer(conn); 883 iscsit_add_cmd_to_immediate_queue(cmd, conn, state); 884 885 return 0; 886 } 887 888 void iscsit_handle_nopin_response_timeout(struct timer_list *t) 889 { 890 struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer); 891 892 iscsit_inc_conn_usage_count(conn); 893 894 spin_lock_bh(&conn->nopin_timer_lock); 895 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { 896 spin_unlock_bh(&conn->nopin_timer_lock); 897 iscsit_dec_conn_usage_count(conn); 898 return; 899 } 900 901 pr_debug("Did not receive response to NOPIN on CID: %hu on" 902 " SID: %u, failing connection.\n", conn->cid, 903 conn->sess->sid); 904 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 905 spin_unlock_bh(&conn->nopin_timer_lock); 906 907 { 908 struct iscsi_portal_group *tpg = conn->sess->tpg; 909 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 910 911 if (tiqn) { 912 spin_lock_bh(&tiqn->sess_err_stats.lock); 913 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 914 conn->sess->sess_ops->InitiatorName); 915 tiqn->sess_err_stats.last_sess_failure_type = 916 ISCSI_SESS_ERR_CXN_TIMEOUT; 917 tiqn->sess_err_stats.cxn_timeout_errors++; 918 atomic_long_inc(&conn->sess->conn_timeout_errors); 919 spin_unlock_bh(&tiqn->sess_err_stats.lock); 920 } 921 } 922 923 iscsit_cause_connection_reinstatement(conn, 0); 924 iscsit_dec_conn_usage_count(conn); 925 } 926 927 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn) 928 { 929 struct iscsi_session *sess = conn->sess; 930 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 931 932 spin_lock_bh(&conn->nopin_timer_lock); 933 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 934 spin_unlock_bh(&conn->nopin_timer_lock); 935 return; 936 } 937 938 mod_timer(&conn->nopin_response_timer, 939 (get_jiffies_64() + na->nopin_response_timeout * HZ)); 940 spin_unlock_bh(&conn->nopin_timer_lock); 941 } 942 943 /* 944 * Called with conn->nopin_timer_lock held. 945 */ 946 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) 947 { 948 struct iscsi_session *sess = conn->sess; 949 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 950 951 spin_lock_bh(&conn->nopin_timer_lock); 952 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { 953 spin_unlock_bh(&conn->nopin_timer_lock); 954 return; 955 } 956 957 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; 958 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; 959 mod_timer(&conn->nopin_response_timer, 960 jiffies + na->nopin_response_timeout * HZ); 961 962 pr_debug("Started NOPIN Response Timer on CID: %d to %u" 963 " seconds\n", conn->cid, na->nopin_response_timeout); 964 spin_unlock_bh(&conn->nopin_timer_lock); 965 } 966 967 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) 968 { 969 spin_lock_bh(&conn->nopin_timer_lock); 970 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 971 spin_unlock_bh(&conn->nopin_timer_lock); 972 return; 973 } 974 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 975 spin_unlock_bh(&conn->nopin_timer_lock); 976 977 del_timer_sync(&conn->nopin_response_timer); 978 979 spin_lock_bh(&conn->nopin_timer_lock); 980 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 981 spin_unlock_bh(&conn->nopin_timer_lock); 982 } 983 984 void iscsit_handle_nopin_timeout(struct timer_list *t) 985 { 986 struct iscsi_conn *conn = from_timer(conn, t, nopin_timer); 987 988 iscsit_inc_conn_usage_count(conn); 989 990 spin_lock_bh(&conn->nopin_timer_lock); 991 if (conn->nopin_timer_flags & ISCSI_TF_STOP) { 992 spin_unlock_bh(&conn->nopin_timer_lock); 993 iscsit_dec_conn_usage_count(conn); 994 return; 995 } 996 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 997 spin_unlock_bh(&conn->nopin_timer_lock); 998 999 iscsit_add_nopin(conn, 1); 1000 iscsit_dec_conn_usage_count(conn); 1001 } 1002 1003 /* 1004 * Called with conn->nopin_timer_lock held. 1005 */ 1006 void __iscsit_start_nopin_timer(struct iscsi_conn *conn) 1007 { 1008 struct iscsi_session *sess = conn->sess; 1009 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1010 /* 1011 * NOPIN timeout is disabled. 1012 */ 1013 if (!na->nopin_timeout) 1014 return; 1015 1016 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) 1017 return; 1018 1019 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1020 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1021 mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); 1022 1023 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1024 " interval\n", conn->cid, na->nopin_timeout); 1025 } 1026 1027 void iscsit_start_nopin_timer(struct iscsi_conn *conn) 1028 { 1029 struct iscsi_session *sess = conn->sess; 1030 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1031 /* 1032 * NOPIN timeout is disabled.. 1033 */ 1034 if (!na->nopin_timeout) 1035 return; 1036 1037 spin_lock_bh(&conn->nopin_timer_lock); 1038 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { 1039 spin_unlock_bh(&conn->nopin_timer_lock); 1040 return; 1041 } 1042 1043 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1044 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1045 mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); 1046 1047 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1048 " interval\n", conn->cid, na->nopin_timeout); 1049 spin_unlock_bh(&conn->nopin_timer_lock); 1050 } 1051 1052 void iscsit_stop_nopin_timer(struct iscsi_conn *conn) 1053 { 1054 spin_lock_bh(&conn->nopin_timer_lock); 1055 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { 1056 spin_unlock_bh(&conn->nopin_timer_lock); 1057 return; 1058 } 1059 conn->nopin_timer_flags |= ISCSI_TF_STOP; 1060 spin_unlock_bh(&conn->nopin_timer_lock); 1061 1062 del_timer_sync(&conn->nopin_timer); 1063 1064 spin_lock_bh(&conn->nopin_timer_lock); 1065 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1066 spin_unlock_bh(&conn->nopin_timer_lock); 1067 } 1068 1069 int iscsit_send_tx_data( 1070 struct iscsi_cmd *cmd, 1071 struct iscsi_conn *conn, 1072 int use_misc) 1073 { 1074 int tx_sent, tx_size; 1075 u32 iov_count; 1076 struct kvec *iov; 1077 1078 send_data: 1079 tx_size = cmd->tx_size; 1080 1081 if (!use_misc) { 1082 iov = &cmd->iov_data[0]; 1083 iov_count = cmd->iov_data_count; 1084 } else { 1085 iov = &cmd->iov_misc[0]; 1086 iov_count = cmd->iov_misc_count; 1087 } 1088 1089 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); 1090 if (tx_size != tx_sent) { 1091 if (tx_sent == -EAGAIN) { 1092 pr_err("tx_data() returned -EAGAIN\n"); 1093 goto send_data; 1094 } else 1095 return -1; 1096 } 1097 cmd->tx_size = 0; 1098 1099 return 0; 1100 } 1101 1102 int iscsit_fe_sendpage_sg( 1103 struct iscsi_cmd *cmd, 1104 struct iscsi_conn *conn) 1105 { 1106 struct scatterlist *sg = cmd->first_data_sg; 1107 struct kvec iov; 1108 u32 tx_hdr_size, data_len; 1109 u32 offset = cmd->first_data_sg_off; 1110 int tx_sent, iov_off; 1111 1112 send_hdr: 1113 tx_hdr_size = ISCSI_HDR_LEN; 1114 if (conn->conn_ops->HeaderDigest) 1115 tx_hdr_size += ISCSI_CRC_LEN; 1116 1117 iov.iov_base = cmd->pdu; 1118 iov.iov_len = tx_hdr_size; 1119 1120 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); 1121 if (tx_hdr_size != tx_sent) { 1122 if (tx_sent == -EAGAIN) { 1123 pr_err("tx_data() returned -EAGAIN\n"); 1124 goto send_hdr; 1125 } 1126 return -1; 1127 } 1128 1129 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1130 /* 1131 * Set iov_off used by padding and data digest tx_data() calls below 1132 * in order to determine proper offset into cmd->iov_data[] 1133 */ 1134 if (conn->conn_ops->DataDigest) { 1135 data_len -= ISCSI_CRC_LEN; 1136 if (cmd->padding) 1137 iov_off = (cmd->iov_data_count - 2); 1138 else 1139 iov_off = (cmd->iov_data_count - 1); 1140 } else { 1141 iov_off = (cmd->iov_data_count - 1); 1142 } 1143 /* 1144 * Perform sendpage() for each page in the scatterlist 1145 */ 1146 while (data_len) { 1147 u32 space = (sg->length - offset); 1148 u32 sub_len = min_t(u32, data_len, space); 1149 send_pg: 1150 tx_sent = conn->sock->ops->sendpage(conn->sock, 1151 sg_page(sg), sg->offset + offset, sub_len, 0); 1152 if (tx_sent != sub_len) { 1153 if (tx_sent == -EAGAIN) { 1154 pr_err("tcp_sendpage() returned" 1155 " -EAGAIN\n"); 1156 goto send_pg; 1157 } 1158 1159 pr_err("tcp_sendpage() failure: %d\n", 1160 tx_sent); 1161 return -1; 1162 } 1163 1164 data_len -= sub_len; 1165 offset = 0; 1166 sg = sg_next(sg); 1167 } 1168 1169 send_padding: 1170 if (cmd->padding) { 1171 struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1172 1173 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1174 if (cmd->padding != tx_sent) { 1175 if (tx_sent == -EAGAIN) { 1176 pr_err("tx_data() returned -EAGAIN\n"); 1177 goto send_padding; 1178 } 1179 return -1; 1180 } 1181 } 1182 1183 send_datacrc: 1184 if (conn->conn_ops->DataDigest) { 1185 struct kvec *iov_d = &cmd->iov_data[iov_off]; 1186 1187 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1188 if (ISCSI_CRC_LEN != tx_sent) { 1189 if (tx_sent == -EAGAIN) { 1190 pr_err("tx_data() returned -EAGAIN\n"); 1191 goto send_datacrc; 1192 } 1193 return -1; 1194 } 1195 } 1196 1197 return 0; 1198 } 1199 1200 /* 1201 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU 1202 * back to the Initiator when an expection condition occurs with the 1203 * errors set in status_class and status_detail. 1204 * 1205 * Parameters: iSCSI Connection, Status Class, Status Detail. 1206 * Returns: 0 on success, -1 on error. 1207 */ 1208 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1209 { 1210 struct iscsi_login_rsp *hdr; 1211 struct iscsi_login *login = conn->conn_login; 1212 1213 login->login_failed = 1; 1214 iscsit_collect_login_stats(conn, status_class, status_detail); 1215 1216 memset(&login->rsp[0], 0, ISCSI_HDR_LEN); 1217 1218 hdr = (struct iscsi_login_rsp *)&login->rsp[0]; 1219 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1220 hdr->status_class = status_class; 1221 hdr->status_detail = status_detail; 1222 hdr->itt = conn->login_itt; 1223 1224 return conn->conn_transport->iscsit_put_login_tx(conn, login, 0); 1225 } 1226 1227 void iscsit_print_session_params(struct iscsi_session *sess) 1228 { 1229 struct iscsi_conn *conn; 1230 1231 pr_debug("-----------------------------[Session Params for" 1232 " SID: %u]-----------------------------\n", sess->sid); 1233 spin_lock_bh(&sess->conn_lock); 1234 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 1235 iscsi_dump_conn_ops(conn->conn_ops); 1236 spin_unlock_bh(&sess->conn_lock); 1237 1238 iscsi_dump_sess_ops(sess->sess_ops); 1239 } 1240 1241 static int iscsit_do_rx_data( 1242 struct iscsi_conn *conn, 1243 struct iscsi_data_count *count) 1244 { 1245 int data = count->data_length, rx_loop = 0, total_rx = 0; 1246 struct msghdr msg; 1247 1248 if (!conn || !conn->sock || !conn->conn_ops) 1249 return -1; 1250 1251 memset(&msg, 0, sizeof(struct msghdr)); 1252 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, 1253 count->iov, count->iov_count, data); 1254 1255 while (msg_data_left(&msg)) { 1256 rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL); 1257 if (rx_loop <= 0) { 1258 pr_debug("rx_loop: %d total_rx: %d\n", 1259 rx_loop, total_rx); 1260 return rx_loop; 1261 } 1262 total_rx += rx_loop; 1263 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", 1264 rx_loop, total_rx, data); 1265 } 1266 1267 return total_rx; 1268 } 1269 1270 int rx_data( 1271 struct iscsi_conn *conn, 1272 struct kvec *iov, 1273 int iov_count, 1274 int data) 1275 { 1276 struct iscsi_data_count c; 1277 1278 if (!conn || !conn->sock || !conn->conn_ops) 1279 return -1; 1280 1281 memset(&c, 0, sizeof(struct iscsi_data_count)); 1282 c.iov = iov; 1283 c.iov_count = iov_count; 1284 c.data_length = data; 1285 c.type = ISCSI_RX_DATA; 1286 1287 return iscsit_do_rx_data(conn, &c); 1288 } 1289 1290 int tx_data( 1291 struct iscsi_conn *conn, 1292 struct kvec *iov, 1293 int iov_count, 1294 int data) 1295 { 1296 struct msghdr msg; 1297 int total_tx = 0; 1298 1299 if (!conn || !conn->sock || !conn->conn_ops) 1300 return -1; 1301 1302 if (data <= 0) { 1303 pr_err("Data length is: %d\n", data); 1304 return -1; 1305 } 1306 1307 memset(&msg, 0, sizeof(struct msghdr)); 1308 1309 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, 1310 iov, iov_count, data); 1311 1312 while (msg_data_left(&msg)) { 1313 int tx_loop = sock_sendmsg(conn->sock, &msg); 1314 if (tx_loop <= 0) { 1315 pr_debug("tx_loop: %d total_tx %d\n", 1316 tx_loop, total_tx); 1317 return tx_loop; 1318 } 1319 total_tx += tx_loop; 1320 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", 1321 tx_loop, total_tx, data); 1322 } 1323 1324 return total_tx; 1325 } 1326 1327 void iscsit_collect_login_stats( 1328 struct iscsi_conn *conn, 1329 u8 status_class, 1330 u8 status_detail) 1331 { 1332 struct iscsi_param *intrname = NULL; 1333 struct iscsi_tiqn *tiqn; 1334 struct iscsi_login_stats *ls; 1335 1336 tiqn = iscsit_snmp_get_tiqn(conn); 1337 if (!tiqn) 1338 return; 1339 1340 ls = &tiqn->login_stats; 1341 1342 spin_lock(&ls->lock); 1343 if (status_class == ISCSI_STATUS_CLS_SUCCESS) 1344 ls->accepts++; 1345 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { 1346 ls->redirects++; 1347 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; 1348 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1349 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { 1350 ls->authenticate_fails++; 1351 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; 1352 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1353 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { 1354 ls->authorize_fails++; 1355 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; 1356 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1357 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { 1358 ls->negotiate_fails++; 1359 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; 1360 } else { 1361 ls->other_fails++; 1362 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; 1363 } 1364 1365 /* Save initiator name, ip address and time, if it is a failed login */ 1366 if (status_class != ISCSI_STATUS_CLS_SUCCESS) { 1367 if (conn->param_list) 1368 intrname = iscsi_find_param_from_key(INITIATORNAME, 1369 conn->param_list); 1370 strlcpy(ls->last_intr_fail_name, 1371 (intrname ? intrname->value : "Unknown"), 1372 sizeof(ls->last_intr_fail_name)); 1373 1374 ls->last_intr_fail_ip_family = conn->login_family; 1375 1376 ls->last_intr_fail_sockaddr = conn->login_sockaddr; 1377 ls->last_fail_time = get_jiffies_64(); 1378 } 1379 1380 spin_unlock(&ls->lock); 1381 } 1382 1383 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) 1384 { 1385 struct iscsi_portal_group *tpg; 1386 1387 if (!conn) 1388 return NULL; 1389 1390 tpg = conn->tpg; 1391 if (!tpg) 1392 return NULL; 1393 1394 if (!tpg->tpg_tiqn) 1395 return NULL; 1396 1397 return tpg->tpg_tiqn; 1398 } 1399