1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/blkdev.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/cdrom.h> 36 #include <linux/module.h> 37 #include <linux/ratelimit.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_tcq.h> 44 45 #include <target/target_core_base.h> 46 #include <target/target_core_backend.h> 47 #include <target/target_core_fabric.h> 48 #include <target/target_core_configfs.h> 49 50 #include "target_core_internal.h" 51 #include "target_core_alua.h" 52 #include "target_core_pr.h" 53 #include "target_core_ua.h" 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/target.h> 57 58 static struct workqueue_struct *target_completion_wq; 59 static struct kmem_cache *se_sess_cache; 60 struct kmem_cache *se_ua_cache; 61 struct kmem_cache *t10_pr_reg_cache; 62 struct kmem_cache *t10_alua_lu_gp_cache; 63 struct kmem_cache *t10_alua_lu_gp_mem_cache; 64 struct kmem_cache *t10_alua_tg_pt_gp_cache; 65 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 66 67 static void transport_complete_task_attr(struct se_cmd *cmd); 68 static void transport_handle_queue_full(struct se_cmd *cmd, 69 struct se_device *dev); 70 static int transport_put_cmd(struct se_cmd *cmd); 71 static void target_complete_ok_work(struct work_struct *work); 72 73 int init_se_kmem_caches(void) 74 { 75 se_sess_cache = kmem_cache_create("se_sess_cache", 76 sizeof(struct se_session), __alignof__(struct se_session), 77 0, NULL); 78 if (!se_sess_cache) { 79 pr_err("kmem_cache_create() for struct se_session" 80 " failed\n"); 81 goto out; 82 } 83 se_ua_cache = kmem_cache_create("se_ua_cache", 84 sizeof(struct se_ua), __alignof__(struct se_ua), 85 0, NULL); 86 if (!se_ua_cache) { 87 pr_err("kmem_cache_create() for struct se_ua failed\n"); 88 goto out_free_sess_cache; 89 } 90 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 91 sizeof(struct t10_pr_registration), 92 __alignof__(struct t10_pr_registration), 0, NULL); 93 if (!t10_pr_reg_cache) { 94 pr_err("kmem_cache_create() for struct t10_pr_registration" 95 " failed\n"); 96 goto out_free_ua_cache; 97 } 98 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 99 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 100 0, NULL); 101 if (!t10_alua_lu_gp_cache) { 102 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 103 " failed\n"); 104 goto out_free_pr_reg_cache; 105 } 106 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 107 sizeof(struct t10_alua_lu_gp_member), 108 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 109 if (!t10_alua_lu_gp_mem_cache) { 110 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 111 "cache failed\n"); 112 goto out_free_lu_gp_cache; 113 } 114 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 115 sizeof(struct t10_alua_tg_pt_gp), 116 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 117 if (!t10_alua_tg_pt_gp_cache) { 118 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 119 "cache failed\n"); 120 goto out_free_lu_gp_mem_cache; 121 } 122 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 123 "t10_alua_tg_pt_gp_mem_cache", 124 sizeof(struct t10_alua_tg_pt_gp_member), 125 __alignof__(struct t10_alua_tg_pt_gp_member), 126 0, NULL); 127 if (!t10_alua_tg_pt_gp_mem_cache) { 128 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 129 "mem_t failed\n"); 130 goto out_free_tg_pt_gp_cache; 131 } 132 133 target_completion_wq = alloc_workqueue("target_completion", 134 WQ_MEM_RECLAIM, 0); 135 if (!target_completion_wq) 136 goto out_free_tg_pt_gp_mem_cache; 137 138 return 0; 139 140 out_free_tg_pt_gp_mem_cache: 141 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 142 out_free_tg_pt_gp_cache: 143 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 144 out_free_lu_gp_mem_cache: 145 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 146 out_free_lu_gp_cache: 147 kmem_cache_destroy(t10_alua_lu_gp_cache); 148 out_free_pr_reg_cache: 149 kmem_cache_destroy(t10_pr_reg_cache); 150 out_free_ua_cache: 151 kmem_cache_destroy(se_ua_cache); 152 out_free_sess_cache: 153 kmem_cache_destroy(se_sess_cache); 154 out: 155 return -ENOMEM; 156 } 157 158 void release_se_kmem_caches(void) 159 { 160 destroy_workqueue(target_completion_wq); 161 kmem_cache_destroy(se_sess_cache); 162 kmem_cache_destroy(se_ua_cache); 163 kmem_cache_destroy(t10_pr_reg_cache); 164 kmem_cache_destroy(t10_alua_lu_gp_cache); 165 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 166 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 167 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 168 } 169 170 /* This code ensures unique mib indexes are handed out. */ 171 static DEFINE_SPINLOCK(scsi_mib_index_lock); 172 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 173 174 /* 175 * Allocate a new row index for the entry type specified 176 */ 177 u32 scsi_get_new_index(scsi_index_t type) 178 { 179 u32 new_index; 180 181 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 182 183 spin_lock(&scsi_mib_index_lock); 184 new_index = ++scsi_mib_index[type]; 185 spin_unlock(&scsi_mib_index_lock); 186 187 return new_index; 188 } 189 190 void transport_subsystem_check_init(void) 191 { 192 int ret; 193 static int sub_api_initialized; 194 195 if (sub_api_initialized) 196 return; 197 198 ret = request_module("target_core_iblock"); 199 if (ret != 0) 200 pr_err("Unable to load target_core_iblock\n"); 201 202 ret = request_module("target_core_file"); 203 if (ret != 0) 204 pr_err("Unable to load target_core_file\n"); 205 206 ret = request_module("target_core_pscsi"); 207 if (ret != 0) 208 pr_err("Unable to load target_core_pscsi\n"); 209 210 sub_api_initialized = 1; 211 } 212 213 struct se_session *transport_init_session(void) 214 { 215 struct se_session *se_sess; 216 217 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 218 if (!se_sess) { 219 pr_err("Unable to allocate struct se_session from" 220 " se_sess_cache\n"); 221 return ERR_PTR(-ENOMEM); 222 } 223 INIT_LIST_HEAD(&se_sess->sess_list); 224 INIT_LIST_HEAD(&se_sess->sess_acl_list); 225 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 226 INIT_LIST_HEAD(&se_sess->sess_wait_list); 227 spin_lock_init(&se_sess->sess_cmd_lock); 228 kref_init(&se_sess->sess_kref); 229 230 return se_sess; 231 } 232 EXPORT_SYMBOL(transport_init_session); 233 234 int transport_alloc_session_tags(struct se_session *se_sess, 235 unsigned int tag_num, unsigned int tag_size) 236 { 237 int rc; 238 239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 240 if (!se_sess->sess_cmd_map) { 241 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 242 return -ENOMEM; 243 } 244 245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 246 if (rc < 0) { 247 pr_err("Unable to init se_sess->sess_tag_pool," 248 " tag_num: %u\n", tag_num); 249 kfree(se_sess->sess_cmd_map); 250 se_sess->sess_cmd_map = NULL; 251 return -ENOMEM; 252 } 253 254 return 0; 255 } 256 EXPORT_SYMBOL(transport_alloc_session_tags); 257 258 struct se_session *transport_init_session_tags(unsigned int tag_num, 259 unsigned int tag_size) 260 { 261 struct se_session *se_sess; 262 int rc; 263 264 se_sess = transport_init_session(); 265 if (IS_ERR(se_sess)) 266 return se_sess; 267 268 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 269 if (rc < 0) { 270 transport_free_session(se_sess); 271 return ERR_PTR(-ENOMEM); 272 } 273 274 return se_sess; 275 } 276 EXPORT_SYMBOL(transport_init_session_tags); 277 278 /* 279 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 280 */ 281 void __transport_register_session( 282 struct se_portal_group *se_tpg, 283 struct se_node_acl *se_nacl, 284 struct se_session *se_sess, 285 void *fabric_sess_ptr) 286 { 287 unsigned char buf[PR_REG_ISID_LEN]; 288 289 se_sess->se_tpg = se_tpg; 290 se_sess->fabric_sess_ptr = fabric_sess_ptr; 291 /* 292 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 293 * 294 * Only set for struct se_session's that will actually be moving I/O. 295 * eg: *NOT* discovery sessions. 296 */ 297 if (se_nacl) { 298 /* 299 * If the fabric module supports an ISID based TransportID, 300 * save this value in binary from the fabric I_T Nexus now. 301 */ 302 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 303 memset(&buf[0], 0, PR_REG_ISID_LEN); 304 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 305 &buf[0], PR_REG_ISID_LEN); 306 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 307 } 308 kref_get(&se_nacl->acl_kref); 309 310 spin_lock_irq(&se_nacl->nacl_sess_lock); 311 /* 312 * The se_nacl->nacl_sess pointer will be set to the 313 * last active I_T Nexus for each struct se_node_acl. 314 */ 315 se_nacl->nacl_sess = se_sess; 316 317 list_add_tail(&se_sess->sess_acl_list, 318 &se_nacl->acl_sess_list); 319 spin_unlock_irq(&se_nacl->nacl_sess_lock); 320 } 321 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 322 323 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 324 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 325 } 326 EXPORT_SYMBOL(__transport_register_session); 327 328 void transport_register_session( 329 struct se_portal_group *se_tpg, 330 struct se_node_acl *se_nacl, 331 struct se_session *se_sess, 332 void *fabric_sess_ptr) 333 { 334 unsigned long flags; 335 336 spin_lock_irqsave(&se_tpg->session_lock, flags); 337 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 338 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 339 } 340 EXPORT_SYMBOL(transport_register_session); 341 342 static void target_release_session(struct kref *kref) 343 { 344 struct se_session *se_sess = container_of(kref, 345 struct se_session, sess_kref); 346 struct se_portal_group *se_tpg = se_sess->se_tpg; 347 348 se_tpg->se_tpg_tfo->close_session(se_sess); 349 } 350 351 void target_get_session(struct se_session *se_sess) 352 { 353 kref_get(&se_sess->sess_kref); 354 } 355 EXPORT_SYMBOL(target_get_session); 356 357 void target_put_session(struct se_session *se_sess) 358 { 359 struct se_portal_group *tpg = se_sess->se_tpg; 360 361 if (tpg->se_tpg_tfo->put_session != NULL) { 362 tpg->se_tpg_tfo->put_session(se_sess); 363 return; 364 } 365 kref_put(&se_sess->sess_kref, target_release_session); 366 } 367 EXPORT_SYMBOL(target_put_session); 368 369 static void target_complete_nacl(struct kref *kref) 370 { 371 struct se_node_acl *nacl = container_of(kref, 372 struct se_node_acl, acl_kref); 373 374 complete(&nacl->acl_free_comp); 375 } 376 377 void target_put_nacl(struct se_node_acl *nacl) 378 { 379 kref_put(&nacl->acl_kref, target_complete_nacl); 380 } 381 382 void transport_deregister_session_configfs(struct se_session *se_sess) 383 { 384 struct se_node_acl *se_nacl; 385 unsigned long flags; 386 /* 387 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 388 */ 389 se_nacl = se_sess->se_node_acl; 390 if (se_nacl) { 391 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 392 if (se_nacl->acl_stop == 0) 393 list_del(&se_sess->sess_acl_list); 394 /* 395 * If the session list is empty, then clear the pointer. 396 * Otherwise, set the struct se_session pointer from the tail 397 * element of the per struct se_node_acl active session list. 398 */ 399 if (list_empty(&se_nacl->acl_sess_list)) 400 se_nacl->nacl_sess = NULL; 401 else { 402 se_nacl->nacl_sess = container_of( 403 se_nacl->acl_sess_list.prev, 404 struct se_session, sess_acl_list); 405 } 406 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 407 } 408 } 409 EXPORT_SYMBOL(transport_deregister_session_configfs); 410 411 void transport_free_session(struct se_session *se_sess) 412 { 413 if (se_sess->sess_cmd_map) { 414 percpu_ida_destroy(&se_sess->sess_tag_pool); 415 kfree(se_sess->sess_cmd_map); 416 } 417 kmem_cache_free(se_sess_cache, se_sess); 418 } 419 EXPORT_SYMBOL(transport_free_session); 420 421 void transport_deregister_session(struct se_session *se_sess) 422 { 423 struct se_portal_group *se_tpg = se_sess->se_tpg; 424 struct target_core_fabric_ops *se_tfo; 425 struct se_node_acl *se_nacl; 426 unsigned long flags; 427 bool comp_nacl = true; 428 429 if (!se_tpg) { 430 transport_free_session(se_sess); 431 return; 432 } 433 se_tfo = se_tpg->se_tpg_tfo; 434 435 spin_lock_irqsave(&se_tpg->session_lock, flags); 436 list_del(&se_sess->sess_list); 437 se_sess->se_tpg = NULL; 438 se_sess->fabric_sess_ptr = NULL; 439 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 440 441 /* 442 * Determine if we need to do extra work for this initiator node's 443 * struct se_node_acl if it had been previously dynamically generated. 444 */ 445 se_nacl = se_sess->se_node_acl; 446 447 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 448 if (se_nacl && se_nacl->dynamic_node_acl) { 449 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 450 list_del(&se_nacl->acl_list); 451 se_tpg->num_node_acls--; 452 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 453 core_tpg_wait_for_nacl_pr_ref(se_nacl); 454 core_free_device_list_for_node(se_nacl, se_tpg); 455 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); 456 457 comp_nacl = false; 458 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 459 } 460 } 461 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 462 463 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 464 se_tpg->se_tpg_tfo->get_fabric_name()); 465 /* 466 * If last kref is dropping now for an explict NodeACL, awake sleeping 467 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 468 * removal context. 469 */ 470 if (se_nacl && comp_nacl == true) 471 target_put_nacl(se_nacl); 472 473 transport_free_session(se_sess); 474 } 475 EXPORT_SYMBOL(transport_deregister_session); 476 477 /* 478 * Called with cmd->t_state_lock held. 479 */ 480 static void target_remove_from_state_list(struct se_cmd *cmd) 481 { 482 struct se_device *dev = cmd->se_dev; 483 unsigned long flags; 484 485 if (!dev) 486 return; 487 488 if (cmd->transport_state & CMD_T_BUSY) 489 return; 490 491 spin_lock_irqsave(&dev->execute_task_lock, flags); 492 if (cmd->state_active) { 493 list_del(&cmd->state_list); 494 cmd->state_active = false; 495 } 496 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 497 } 498 499 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 500 bool write_pending) 501 { 502 unsigned long flags; 503 504 spin_lock_irqsave(&cmd->t_state_lock, flags); 505 if (write_pending) 506 cmd->t_state = TRANSPORT_WRITE_PENDING; 507 508 /* 509 * Determine if IOCTL context caller in requesting the stopping of this 510 * command for LUN shutdown purposes. 511 */ 512 if (cmd->transport_state & CMD_T_LUN_STOP) { 513 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", 514 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); 515 516 cmd->transport_state &= ~CMD_T_ACTIVE; 517 if (remove_from_lists) 518 target_remove_from_state_list(cmd); 519 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 520 521 complete(&cmd->transport_lun_stop_comp); 522 return 1; 523 } 524 525 if (remove_from_lists) { 526 target_remove_from_state_list(cmd); 527 528 /* 529 * Clear struct se_cmd->se_lun before the handoff to FE. 530 */ 531 cmd->se_lun = NULL; 532 } 533 534 /* 535 * Determine if frontend context caller is requesting the stopping of 536 * this command for frontend exceptions. 537 */ 538 if (cmd->transport_state & CMD_T_STOP) { 539 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 540 __func__, __LINE__, 541 cmd->se_tfo->get_task_tag(cmd)); 542 543 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 544 545 complete(&cmd->t_transport_stop_comp); 546 return 1; 547 } 548 549 cmd->transport_state &= ~CMD_T_ACTIVE; 550 if (remove_from_lists) { 551 /* 552 * Some fabric modules like tcm_loop can release 553 * their internally allocated I/O reference now and 554 * struct se_cmd now. 555 * 556 * Fabric modules are expected to return '1' here if the 557 * se_cmd being passed is released at this point, 558 * or zero if not being released. 559 */ 560 if (cmd->se_tfo->check_stop_free != NULL) { 561 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 562 return cmd->se_tfo->check_stop_free(cmd); 563 } 564 } 565 566 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 567 return 0; 568 } 569 570 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 571 { 572 return transport_cmd_check_stop(cmd, true, false); 573 } 574 575 static void transport_lun_remove_cmd(struct se_cmd *cmd) 576 { 577 struct se_lun *lun = cmd->se_lun; 578 579 if (!lun || !cmd->lun_ref_active) 580 return; 581 582 percpu_ref_put(&lun->lun_ref); 583 } 584 585 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 586 { 587 if (transport_cmd_check_stop_to_fabric(cmd)) 588 return; 589 if (remove) 590 transport_put_cmd(cmd); 591 } 592 593 static void target_complete_failure_work(struct work_struct *work) 594 { 595 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 596 597 transport_generic_request_failure(cmd, 598 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 599 } 600 601 /* 602 * Used when asking transport to copy Sense Data from the underlying 603 * Linux/SCSI struct scsi_cmnd 604 */ 605 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 606 { 607 struct se_device *dev = cmd->se_dev; 608 609 WARN_ON(!cmd->se_lun); 610 611 if (!dev) 612 return NULL; 613 614 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 615 return NULL; 616 617 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 618 619 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 620 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 621 return cmd->sense_buffer; 622 } 623 624 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 625 { 626 struct se_device *dev = cmd->se_dev; 627 int success = scsi_status == GOOD; 628 unsigned long flags; 629 630 cmd->scsi_status = scsi_status; 631 632 633 spin_lock_irqsave(&cmd->t_state_lock, flags); 634 cmd->transport_state &= ~CMD_T_BUSY; 635 636 if (dev && dev->transport->transport_complete) { 637 dev->transport->transport_complete(cmd, 638 cmd->t_data_sg, 639 transport_get_sense_buffer(cmd)); 640 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 641 success = 1; 642 } 643 644 /* 645 * See if we are waiting to complete for an exception condition. 646 */ 647 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 648 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 649 complete(&cmd->task_stop_comp); 650 return; 651 } 652 653 if (!success) 654 cmd->transport_state |= CMD_T_FAILED; 655 656 /* 657 * Check for case where an explict ABORT_TASK has been received 658 * and transport_wait_for_tasks() will be waiting for completion.. 659 */ 660 if (cmd->transport_state & CMD_T_ABORTED && 661 cmd->transport_state & CMD_T_STOP) { 662 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 663 complete(&cmd->t_transport_stop_comp); 664 return; 665 } else if (cmd->transport_state & CMD_T_FAILED) { 666 INIT_WORK(&cmd->work, target_complete_failure_work); 667 } else { 668 INIT_WORK(&cmd->work, target_complete_ok_work); 669 } 670 671 cmd->t_state = TRANSPORT_COMPLETE; 672 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 673 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 674 675 queue_work(target_completion_wq, &cmd->work); 676 } 677 EXPORT_SYMBOL(target_complete_cmd); 678 679 static void target_add_to_state_list(struct se_cmd *cmd) 680 { 681 struct se_device *dev = cmd->se_dev; 682 unsigned long flags; 683 684 spin_lock_irqsave(&dev->execute_task_lock, flags); 685 if (!cmd->state_active) { 686 list_add_tail(&cmd->state_list, &dev->state_list); 687 cmd->state_active = true; 688 } 689 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 690 } 691 692 /* 693 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 694 */ 695 static void transport_write_pending_qf(struct se_cmd *cmd); 696 static void transport_complete_qf(struct se_cmd *cmd); 697 698 void target_qf_do_work(struct work_struct *work) 699 { 700 struct se_device *dev = container_of(work, struct se_device, 701 qf_work_queue); 702 LIST_HEAD(qf_cmd_list); 703 struct se_cmd *cmd, *cmd_tmp; 704 705 spin_lock_irq(&dev->qf_cmd_lock); 706 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 707 spin_unlock_irq(&dev->qf_cmd_lock); 708 709 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 710 list_del(&cmd->se_qf_node); 711 atomic_dec(&dev->dev_qf_count); 712 smp_mb__after_atomic_dec(); 713 714 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 715 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 716 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 717 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 718 : "UNKNOWN"); 719 720 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 721 transport_write_pending_qf(cmd); 722 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 723 transport_complete_qf(cmd); 724 } 725 } 726 727 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 728 { 729 switch (cmd->data_direction) { 730 case DMA_NONE: 731 return "NONE"; 732 case DMA_FROM_DEVICE: 733 return "READ"; 734 case DMA_TO_DEVICE: 735 return "WRITE"; 736 case DMA_BIDIRECTIONAL: 737 return "BIDI"; 738 default: 739 break; 740 } 741 742 return "UNKNOWN"; 743 } 744 745 void transport_dump_dev_state( 746 struct se_device *dev, 747 char *b, 748 int *bl) 749 { 750 *bl += sprintf(b + *bl, "Status: "); 751 if (dev->export_count) 752 *bl += sprintf(b + *bl, "ACTIVATED"); 753 else 754 *bl += sprintf(b + *bl, "DEACTIVATED"); 755 756 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 757 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 758 dev->dev_attrib.block_size, 759 dev->dev_attrib.hw_max_sectors); 760 *bl += sprintf(b + *bl, " "); 761 } 762 763 void transport_dump_vpd_proto_id( 764 struct t10_vpd *vpd, 765 unsigned char *p_buf, 766 int p_buf_len) 767 { 768 unsigned char buf[VPD_TMP_BUF_SIZE]; 769 int len; 770 771 memset(buf, 0, VPD_TMP_BUF_SIZE); 772 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 773 774 switch (vpd->protocol_identifier) { 775 case 0x00: 776 sprintf(buf+len, "Fibre Channel\n"); 777 break; 778 case 0x10: 779 sprintf(buf+len, "Parallel SCSI\n"); 780 break; 781 case 0x20: 782 sprintf(buf+len, "SSA\n"); 783 break; 784 case 0x30: 785 sprintf(buf+len, "IEEE 1394\n"); 786 break; 787 case 0x40: 788 sprintf(buf+len, "SCSI Remote Direct Memory Access" 789 " Protocol\n"); 790 break; 791 case 0x50: 792 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 793 break; 794 case 0x60: 795 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 796 break; 797 case 0x70: 798 sprintf(buf+len, "Automation/Drive Interface Transport" 799 " Protocol\n"); 800 break; 801 case 0x80: 802 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 803 break; 804 default: 805 sprintf(buf+len, "Unknown 0x%02x\n", 806 vpd->protocol_identifier); 807 break; 808 } 809 810 if (p_buf) 811 strncpy(p_buf, buf, p_buf_len); 812 else 813 pr_debug("%s", buf); 814 } 815 816 void 817 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 818 { 819 /* 820 * Check if the Protocol Identifier Valid (PIV) bit is set.. 821 * 822 * from spc3r23.pdf section 7.5.1 823 */ 824 if (page_83[1] & 0x80) { 825 vpd->protocol_identifier = (page_83[0] & 0xf0); 826 vpd->protocol_identifier_set = 1; 827 transport_dump_vpd_proto_id(vpd, NULL, 0); 828 } 829 } 830 EXPORT_SYMBOL(transport_set_vpd_proto_id); 831 832 int transport_dump_vpd_assoc( 833 struct t10_vpd *vpd, 834 unsigned char *p_buf, 835 int p_buf_len) 836 { 837 unsigned char buf[VPD_TMP_BUF_SIZE]; 838 int ret = 0; 839 int len; 840 841 memset(buf, 0, VPD_TMP_BUF_SIZE); 842 len = sprintf(buf, "T10 VPD Identifier Association: "); 843 844 switch (vpd->association) { 845 case 0x00: 846 sprintf(buf+len, "addressed logical unit\n"); 847 break; 848 case 0x10: 849 sprintf(buf+len, "target port\n"); 850 break; 851 case 0x20: 852 sprintf(buf+len, "SCSI target device\n"); 853 break; 854 default: 855 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 856 ret = -EINVAL; 857 break; 858 } 859 860 if (p_buf) 861 strncpy(p_buf, buf, p_buf_len); 862 else 863 pr_debug("%s", buf); 864 865 return ret; 866 } 867 868 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 869 { 870 /* 871 * The VPD identification association.. 872 * 873 * from spc3r23.pdf Section 7.6.3.1 Table 297 874 */ 875 vpd->association = (page_83[1] & 0x30); 876 return transport_dump_vpd_assoc(vpd, NULL, 0); 877 } 878 EXPORT_SYMBOL(transport_set_vpd_assoc); 879 880 int transport_dump_vpd_ident_type( 881 struct t10_vpd *vpd, 882 unsigned char *p_buf, 883 int p_buf_len) 884 { 885 unsigned char buf[VPD_TMP_BUF_SIZE]; 886 int ret = 0; 887 int len; 888 889 memset(buf, 0, VPD_TMP_BUF_SIZE); 890 len = sprintf(buf, "T10 VPD Identifier Type: "); 891 892 switch (vpd->device_identifier_type) { 893 case 0x00: 894 sprintf(buf+len, "Vendor specific\n"); 895 break; 896 case 0x01: 897 sprintf(buf+len, "T10 Vendor ID based\n"); 898 break; 899 case 0x02: 900 sprintf(buf+len, "EUI-64 based\n"); 901 break; 902 case 0x03: 903 sprintf(buf+len, "NAA\n"); 904 break; 905 case 0x04: 906 sprintf(buf+len, "Relative target port identifier\n"); 907 break; 908 case 0x08: 909 sprintf(buf+len, "SCSI name string\n"); 910 break; 911 default: 912 sprintf(buf+len, "Unsupported: 0x%02x\n", 913 vpd->device_identifier_type); 914 ret = -EINVAL; 915 break; 916 } 917 918 if (p_buf) { 919 if (p_buf_len < strlen(buf)+1) 920 return -EINVAL; 921 strncpy(p_buf, buf, p_buf_len); 922 } else { 923 pr_debug("%s", buf); 924 } 925 926 return ret; 927 } 928 929 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 930 { 931 /* 932 * The VPD identifier type.. 933 * 934 * from spc3r23.pdf Section 7.6.3.1 Table 298 935 */ 936 vpd->device_identifier_type = (page_83[1] & 0x0f); 937 return transport_dump_vpd_ident_type(vpd, NULL, 0); 938 } 939 EXPORT_SYMBOL(transport_set_vpd_ident_type); 940 941 int transport_dump_vpd_ident( 942 struct t10_vpd *vpd, 943 unsigned char *p_buf, 944 int p_buf_len) 945 { 946 unsigned char buf[VPD_TMP_BUF_SIZE]; 947 int ret = 0; 948 949 memset(buf, 0, VPD_TMP_BUF_SIZE); 950 951 switch (vpd->device_identifier_code_set) { 952 case 0x01: /* Binary */ 953 snprintf(buf, sizeof(buf), 954 "T10 VPD Binary Device Identifier: %s\n", 955 &vpd->device_identifier[0]); 956 break; 957 case 0x02: /* ASCII */ 958 snprintf(buf, sizeof(buf), 959 "T10 VPD ASCII Device Identifier: %s\n", 960 &vpd->device_identifier[0]); 961 break; 962 case 0x03: /* UTF-8 */ 963 snprintf(buf, sizeof(buf), 964 "T10 VPD UTF-8 Device Identifier: %s\n", 965 &vpd->device_identifier[0]); 966 break; 967 default: 968 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 969 " 0x%02x", vpd->device_identifier_code_set); 970 ret = -EINVAL; 971 break; 972 } 973 974 if (p_buf) 975 strncpy(p_buf, buf, p_buf_len); 976 else 977 pr_debug("%s", buf); 978 979 return ret; 980 } 981 982 int 983 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 984 { 985 static const char hex_str[] = "0123456789abcdef"; 986 int j = 0, i = 4; /* offset to start of the identifier */ 987 988 /* 989 * The VPD Code Set (encoding) 990 * 991 * from spc3r23.pdf Section 7.6.3.1 Table 296 992 */ 993 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 994 switch (vpd->device_identifier_code_set) { 995 case 0x01: /* Binary */ 996 vpd->device_identifier[j++] = 997 hex_str[vpd->device_identifier_type]; 998 while (i < (4 + page_83[3])) { 999 vpd->device_identifier[j++] = 1000 hex_str[(page_83[i] & 0xf0) >> 4]; 1001 vpd->device_identifier[j++] = 1002 hex_str[page_83[i] & 0x0f]; 1003 i++; 1004 } 1005 break; 1006 case 0x02: /* ASCII */ 1007 case 0x03: /* UTF-8 */ 1008 while (i < (4 + page_83[3])) 1009 vpd->device_identifier[j++] = page_83[i++]; 1010 break; 1011 default: 1012 break; 1013 } 1014 1015 return transport_dump_vpd_ident(vpd, NULL, 0); 1016 } 1017 EXPORT_SYMBOL(transport_set_vpd_ident); 1018 1019 sense_reason_t 1020 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1021 { 1022 struct se_device *dev = cmd->se_dev; 1023 1024 if (cmd->unknown_data_length) { 1025 cmd->data_length = size; 1026 } else if (size != cmd->data_length) { 1027 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1028 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1029 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1030 cmd->data_length, size, cmd->t_task_cdb[0]); 1031 1032 if (cmd->data_direction == DMA_TO_DEVICE) { 1033 pr_err("Rejecting underflow/overflow" 1034 " WRITE data\n"); 1035 return TCM_INVALID_CDB_FIELD; 1036 } 1037 /* 1038 * Reject READ_* or WRITE_* with overflow/underflow for 1039 * type SCF_SCSI_DATA_CDB. 1040 */ 1041 if (dev->dev_attrib.block_size != 512) { 1042 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1043 " CDB on non 512-byte sector setup subsystem" 1044 " plugin: %s\n", dev->transport->name); 1045 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1046 return TCM_INVALID_CDB_FIELD; 1047 } 1048 /* 1049 * For the overflow case keep the existing fabric provided 1050 * ->data_length. Otherwise for the underflow case, reset 1051 * ->data_length to the smaller SCSI expected data transfer 1052 * length. 1053 */ 1054 if (size > cmd->data_length) { 1055 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1056 cmd->residual_count = (size - cmd->data_length); 1057 } else { 1058 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1059 cmd->residual_count = (cmd->data_length - size); 1060 cmd->data_length = size; 1061 } 1062 } 1063 1064 return 0; 1065 1066 } 1067 1068 /* 1069 * Used by fabric modules containing a local struct se_cmd within their 1070 * fabric dependent per I/O descriptor. 1071 */ 1072 void transport_init_se_cmd( 1073 struct se_cmd *cmd, 1074 struct target_core_fabric_ops *tfo, 1075 struct se_session *se_sess, 1076 u32 data_length, 1077 int data_direction, 1078 int task_attr, 1079 unsigned char *sense_buffer) 1080 { 1081 INIT_LIST_HEAD(&cmd->se_lun_node); 1082 INIT_LIST_HEAD(&cmd->se_delayed_node); 1083 INIT_LIST_HEAD(&cmd->se_qf_node); 1084 INIT_LIST_HEAD(&cmd->se_cmd_list); 1085 INIT_LIST_HEAD(&cmd->state_list); 1086 init_completion(&cmd->transport_lun_fe_stop_comp); 1087 init_completion(&cmd->transport_lun_stop_comp); 1088 init_completion(&cmd->t_transport_stop_comp); 1089 init_completion(&cmd->cmd_wait_comp); 1090 init_completion(&cmd->task_stop_comp); 1091 spin_lock_init(&cmd->t_state_lock); 1092 cmd->transport_state = CMD_T_DEV_ACTIVE; 1093 1094 cmd->se_tfo = tfo; 1095 cmd->se_sess = se_sess; 1096 cmd->data_length = data_length; 1097 cmd->data_direction = data_direction; 1098 cmd->sam_task_attr = task_attr; 1099 cmd->sense_buffer = sense_buffer; 1100 1101 cmd->state_active = false; 1102 } 1103 EXPORT_SYMBOL(transport_init_se_cmd); 1104 1105 static sense_reason_t 1106 transport_check_alloc_task_attr(struct se_cmd *cmd) 1107 { 1108 struct se_device *dev = cmd->se_dev; 1109 1110 /* 1111 * Check if SAM Task Attribute emulation is enabled for this 1112 * struct se_device storage object 1113 */ 1114 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1115 return 0; 1116 1117 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1118 pr_debug("SAM Task Attribute ACA" 1119 " emulation is not supported\n"); 1120 return TCM_INVALID_CDB_FIELD; 1121 } 1122 /* 1123 * Used to determine when ORDERED commands should go from 1124 * Dormant to Active status. 1125 */ 1126 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1127 smp_mb__after_atomic_inc(); 1128 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1129 cmd->se_ordered_id, cmd->sam_task_attr, 1130 dev->transport->name); 1131 return 0; 1132 } 1133 1134 sense_reason_t 1135 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1136 { 1137 struct se_device *dev = cmd->se_dev; 1138 sense_reason_t ret; 1139 1140 /* 1141 * Ensure that the received CDB is less than the max (252 + 8) bytes 1142 * for VARIABLE_LENGTH_CMD 1143 */ 1144 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1145 pr_err("Received SCSI CDB with command_size: %d that" 1146 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1147 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1148 return TCM_INVALID_CDB_FIELD; 1149 } 1150 /* 1151 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1152 * allocate the additional extended CDB buffer now.. Otherwise 1153 * setup the pointer from __t_task_cdb to t_task_cdb. 1154 */ 1155 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1156 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1157 GFP_KERNEL); 1158 if (!cmd->t_task_cdb) { 1159 pr_err("Unable to allocate cmd->t_task_cdb" 1160 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1161 scsi_command_size(cdb), 1162 (unsigned long)sizeof(cmd->__t_task_cdb)); 1163 return TCM_OUT_OF_RESOURCES; 1164 } 1165 } else 1166 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1167 /* 1168 * Copy the original CDB into cmd-> 1169 */ 1170 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1171 1172 trace_target_sequencer_start(cmd); 1173 1174 /* 1175 * Check for an existing UNIT ATTENTION condition 1176 */ 1177 ret = target_scsi3_ua_check(cmd); 1178 if (ret) 1179 return ret; 1180 1181 ret = target_alua_state_check(cmd); 1182 if (ret) 1183 return ret; 1184 1185 ret = target_check_reservation(cmd); 1186 if (ret) { 1187 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1188 return ret; 1189 } 1190 1191 ret = dev->transport->parse_cdb(cmd); 1192 if (ret) 1193 return ret; 1194 1195 ret = transport_check_alloc_task_attr(cmd); 1196 if (ret) 1197 return ret; 1198 1199 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1200 1201 spin_lock(&cmd->se_lun->lun_sep_lock); 1202 if (cmd->se_lun->lun_sep) 1203 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1204 spin_unlock(&cmd->se_lun->lun_sep_lock); 1205 return 0; 1206 } 1207 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1208 1209 /* 1210 * Used by fabric module frontends to queue tasks directly. 1211 * Many only be used from process context only 1212 */ 1213 int transport_handle_cdb_direct( 1214 struct se_cmd *cmd) 1215 { 1216 sense_reason_t ret; 1217 1218 if (!cmd->se_lun) { 1219 dump_stack(); 1220 pr_err("cmd->se_lun is NULL\n"); 1221 return -EINVAL; 1222 } 1223 if (in_interrupt()) { 1224 dump_stack(); 1225 pr_err("transport_generic_handle_cdb cannot be called" 1226 " from interrupt context\n"); 1227 return -EINVAL; 1228 } 1229 /* 1230 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1231 * outstanding descriptors are handled correctly during shutdown via 1232 * transport_wait_for_tasks() 1233 * 1234 * Also, we don't take cmd->t_state_lock here as we only expect 1235 * this to be called for initial descriptor submission. 1236 */ 1237 cmd->t_state = TRANSPORT_NEW_CMD; 1238 cmd->transport_state |= CMD_T_ACTIVE; 1239 1240 /* 1241 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1242 * so follow TRANSPORT_NEW_CMD processing thread context usage 1243 * and call transport_generic_request_failure() if necessary.. 1244 */ 1245 ret = transport_generic_new_cmd(cmd); 1246 if (ret) 1247 transport_generic_request_failure(cmd, ret); 1248 return 0; 1249 } 1250 EXPORT_SYMBOL(transport_handle_cdb_direct); 1251 1252 sense_reason_t 1253 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1254 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1255 { 1256 if (!sgl || !sgl_count) 1257 return 0; 1258 1259 /* 1260 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1261 * scatterlists already have been set to follow what the fabric 1262 * passes for the original expected data transfer length. 1263 */ 1264 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1265 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1266 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1267 return TCM_INVALID_CDB_FIELD; 1268 } 1269 1270 cmd->t_data_sg = sgl; 1271 cmd->t_data_nents = sgl_count; 1272 1273 if (sgl_bidi && sgl_bidi_count) { 1274 cmd->t_bidi_data_sg = sgl_bidi; 1275 cmd->t_bidi_data_nents = sgl_bidi_count; 1276 } 1277 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1278 return 0; 1279 } 1280 1281 /* 1282 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1283 * se_cmd + use pre-allocated SGL memory. 1284 * 1285 * @se_cmd: command descriptor to submit 1286 * @se_sess: associated se_sess for endpoint 1287 * @cdb: pointer to SCSI CDB 1288 * @sense: pointer to SCSI sense buffer 1289 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1290 * @data_length: fabric expected data transfer length 1291 * @task_addr: SAM task attribute 1292 * @data_dir: DMA data direction 1293 * @flags: flags for command submission from target_sc_flags_tables 1294 * @sgl: struct scatterlist memory for unidirectional mapping 1295 * @sgl_count: scatterlist count for unidirectional mapping 1296 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1297 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1298 * 1299 * Returns non zero to signal active I/O shutdown failure. All other 1300 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1301 * but still return zero here. 1302 * 1303 * This may only be called from process context, and also currently 1304 * assumes internal allocation of fabric payload buffer by target-core. 1305 */ 1306 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1307 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1308 u32 data_length, int task_attr, int data_dir, int flags, 1309 struct scatterlist *sgl, u32 sgl_count, 1310 struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1311 { 1312 struct se_portal_group *se_tpg; 1313 sense_reason_t rc; 1314 int ret; 1315 1316 se_tpg = se_sess->se_tpg; 1317 BUG_ON(!se_tpg); 1318 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1319 BUG_ON(in_interrupt()); 1320 /* 1321 * Initialize se_cmd for target operation. From this point 1322 * exceptions are handled by sending exception status via 1323 * target_core_fabric_ops->queue_status() callback 1324 */ 1325 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1326 data_length, data_dir, task_attr, sense); 1327 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1328 se_cmd->unknown_data_length = 1; 1329 /* 1330 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1331 * se_sess->sess_cmd_list. A second kref_get here is necessary 1332 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1333 * kref_put() to happen during fabric packet acknowledgement. 1334 */ 1335 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1336 if (ret) 1337 return ret; 1338 /* 1339 * Signal bidirectional data payloads to target-core 1340 */ 1341 if (flags & TARGET_SCF_BIDI_OP) 1342 se_cmd->se_cmd_flags |= SCF_BIDI; 1343 /* 1344 * Locate se_lun pointer and attach it to struct se_cmd 1345 */ 1346 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1347 if (rc) { 1348 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1349 target_put_sess_cmd(se_sess, se_cmd); 1350 return 0; 1351 } 1352 1353 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1354 if (rc != 0) { 1355 transport_generic_request_failure(se_cmd, rc); 1356 return 0; 1357 } 1358 /* 1359 * When a non zero sgl_count has been passed perform SGL passthrough 1360 * mapping for pre-allocated fabric memory instead of having target 1361 * core perform an internal SGL allocation.. 1362 */ 1363 if (sgl_count != 0) { 1364 BUG_ON(!sgl); 1365 1366 /* 1367 * A work-around for tcm_loop as some userspace code via 1368 * scsi-generic do not memset their associated read buffers, 1369 * so go ahead and do that here for type non-data CDBs. Also 1370 * note that this is currently guaranteed to be a single SGL 1371 * for this case by target core in target_setup_cmd_from_cdb() 1372 * -> transport_generic_cmd_sequencer(). 1373 */ 1374 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1375 se_cmd->data_direction == DMA_FROM_DEVICE) { 1376 unsigned char *buf = NULL; 1377 1378 if (sgl) 1379 buf = kmap(sg_page(sgl)) + sgl->offset; 1380 1381 if (buf) { 1382 memset(buf, 0, sgl->length); 1383 kunmap(sg_page(sgl)); 1384 } 1385 } 1386 1387 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1388 sgl_bidi, sgl_bidi_count); 1389 if (rc != 0) { 1390 transport_generic_request_failure(se_cmd, rc); 1391 return 0; 1392 } 1393 } 1394 /* 1395 * Check if we need to delay processing because of ALUA 1396 * Active/NonOptimized primary access state.. 1397 */ 1398 core_alua_check_nonop_delay(se_cmd); 1399 1400 transport_handle_cdb_direct(se_cmd); 1401 return 0; 1402 } 1403 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1404 1405 /* 1406 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1407 * 1408 * @se_cmd: command descriptor to submit 1409 * @se_sess: associated se_sess for endpoint 1410 * @cdb: pointer to SCSI CDB 1411 * @sense: pointer to SCSI sense buffer 1412 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1413 * @data_length: fabric expected data transfer length 1414 * @task_addr: SAM task attribute 1415 * @data_dir: DMA data direction 1416 * @flags: flags for command submission from target_sc_flags_tables 1417 * 1418 * Returns non zero to signal active I/O shutdown failure. All other 1419 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1420 * but still return zero here. 1421 * 1422 * This may only be called from process context, and also currently 1423 * assumes internal allocation of fabric payload buffer by target-core. 1424 * 1425 * It also assumes interal target core SGL memory allocation. 1426 */ 1427 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1428 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1429 u32 data_length, int task_attr, int data_dir, int flags) 1430 { 1431 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1432 unpacked_lun, data_length, task_attr, data_dir, 1433 flags, NULL, 0, NULL, 0); 1434 } 1435 EXPORT_SYMBOL(target_submit_cmd); 1436 1437 static void target_complete_tmr_failure(struct work_struct *work) 1438 { 1439 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1440 1441 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1442 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1443 1444 transport_cmd_check_stop_to_fabric(se_cmd); 1445 } 1446 1447 /** 1448 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1449 * for TMR CDBs 1450 * 1451 * @se_cmd: command descriptor to submit 1452 * @se_sess: associated se_sess for endpoint 1453 * @sense: pointer to SCSI sense buffer 1454 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1455 * @fabric_context: fabric context for TMR req 1456 * @tm_type: Type of TM request 1457 * @gfp: gfp type for caller 1458 * @tag: referenced task tag for TMR_ABORT_TASK 1459 * @flags: submit cmd flags 1460 * 1461 * Callable from all contexts. 1462 **/ 1463 1464 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1465 unsigned char *sense, u32 unpacked_lun, 1466 void *fabric_tmr_ptr, unsigned char tm_type, 1467 gfp_t gfp, unsigned int tag, int flags) 1468 { 1469 struct se_portal_group *se_tpg; 1470 int ret; 1471 1472 se_tpg = se_sess->se_tpg; 1473 BUG_ON(!se_tpg); 1474 1475 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1476 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1477 /* 1478 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1479 * allocation failure. 1480 */ 1481 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1482 if (ret < 0) 1483 return -ENOMEM; 1484 1485 if (tm_type == TMR_ABORT_TASK) 1486 se_cmd->se_tmr_req->ref_task_tag = tag; 1487 1488 /* See target_submit_cmd for commentary */ 1489 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1490 if (ret) { 1491 core_tmr_release_req(se_cmd->se_tmr_req); 1492 return ret; 1493 } 1494 1495 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1496 if (ret) { 1497 /* 1498 * For callback during failure handling, push this work off 1499 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1500 */ 1501 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1502 schedule_work(&se_cmd->work); 1503 return 0; 1504 } 1505 transport_generic_handle_tmr(se_cmd); 1506 return 0; 1507 } 1508 EXPORT_SYMBOL(target_submit_tmr); 1509 1510 /* 1511 * If the cmd is active, request it to be stopped and sleep until it 1512 * has completed. 1513 */ 1514 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1515 { 1516 bool was_active = false; 1517 1518 if (cmd->transport_state & CMD_T_BUSY) { 1519 cmd->transport_state |= CMD_T_REQUEST_STOP; 1520 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1521 1522 pr_debug("cmd %p waiting to complete\n", cmd); 1523 wait_for_completion(&cmd->task_stop_comp); 1524 pr_debug("cmd %p stopped successfully\n", cmd); 1525 1526 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1527 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1528 cmd->transport_state &= ~CMD_T_BUSY; 1529 was_active = true; 1530 } 1531 1532 return was_active; 1533 } 1534 1535 /* 1536 * Handle SAM-esque emulation for generic transport request failures. 1537 */ 1538 void transport_generic_request_failure(struct se_cmd *cmd, 1539 sense_reason_t sense_reason) 1540 { 1541 int ret = 0; 1542 1543 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1544 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1545 cmd->t_task_cdb[0]); 1546 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1547 cmd->se_tfo->get_cmd_state(cmd), 1548 cmd->t_state, sense_reason); 1549 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1550 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1551 (cmd->transport_state & CMD_T_STOP) != 0, 1552 (cmd->transport_state & CMD_T_SENT) != 0); 1553 1554 /* 1555 * For SAM Task Attribute emulation for failed struct se_cmd 1556 */ 1557 transport_complete_task_attr(cmd); 1558 /* 1559 * Handle special case for COMPARE_AND_WRITE failure, where the 1560 * callback is expected to drop the per device ->caw_mutex. 1561 */ 1562 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1563 cmd->transport_complete_callback) 1564 cmd->transport_complete_callback(cmd); 1565 1566 switch (sense_reason) { 1567 case TCM_NON_EXISTENT_LUN: 1568 case TCM_UNSUPPORTED_SCSI_OPCODE: 1569 case TCM_INVALID_CDB_FIELD: 1570 case TCM_INVALID_PARAMETER_LIST: 1571 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1572 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1573 case TCM_UNKNOWN_MODE_PAGE: 1574 case TCM_WRITE_PROTECTED: 1575 case TCM_ADDRESS_OUT_OF_RANGE: 1576 case TCM_CHECK_CONDITION_ABORT_CMD: 1577 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1578 case TCM_CHECK_CONDITION_NOT_READY: 1579 break; 1580 case TCM_OUT_OF_RESOURCES: 1581 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1582 break; 1583 case TCM_RESERVATION_CONFLICT: 1584 /* 1585 * No SENSE Data payload for this case, set SCSI Status 1586 * and queue the response to $FABRIC_MOD. 1587 * 1588 * Uses linux/include/scsi/scsi.h SAM status codes defs 1589 */ 1590 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1591 /* 1592 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1593 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1594 * CONFLICT STATUS. 1595 * 1596 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1597 */ 1598 if (cmd->se_sess && 1599 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1600 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1601 cmd->orig_fe_lun, 0x2C, 1602 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1603 1604 trace_target_cmd_complete(cmd); 1605 ret = cmd->se_tfo-> queue_status(cmd); 1606 if (ret == -EAGAIN || ret == -ENOMEM) 1607 goto queue_full; 1608 goto check_stop; 1609 default: 1610 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1611 cmd->t_task_cdb[0], sense_reason); 1612 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1613 break; 1614 } 1615 1616 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1617 if (ret == -EAGAIN || ret == -ENOMEM) 1618 goto queue_full; 1619 1620 check_stop: 1621 transport_lun_remove_cmd(cmd); 1622 if (!transport_cmd_check_stop_to_fabric(cmd)) 1623 ; 1624 return; 1625 1626 queue_full: 1627 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1628 transport_handle_queue_full(cmd, cmd->se_dev); 1629 } 1630 EXPORT_SYMBOL(transport_generic_request_failure); 1631 1632 void __target_execute_cmd(struct se_cmd *cmd) 1633 { 1634 sense_reason_t ret; 1635 1636 if (cmd->execute_cmd) { 1637 ret = cmd->execute_cmd(cmd); 1638 if (ret) { 1639 spin_lock_irq(&cmd->t_state_lock); 1640 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1641 spin_unlock_irq(&cmd->t_state_lock); 1642 1643 transport_generic_request_failure(cmd, ret); 1644 } 1645 } 1646 } 1647 1648 static bool target_handle_task_attr(struct se_cmd *cmd) 1649 { 1650 struct se_device *dev = cmd->se_dev; 1651 1652 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1653 return false; 1654 1655 /* 1656 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1657 * to allow the passed struct se_cmd list of tasks to the front of the list. 1658 */ 1659 switch (cmd->sam_task_attr) { 1660 case MSG_HEAD_TAG: 1661 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1662 "se_ordered_id: %u\n", 1663 cmd->t_task_cdb[0], cmd->se_ordered_id); 1664 return false; 1665 case MSG_ORDERED_TAG: 1666 atomic_inc(&dev->dev_ordered_sync); 1667 smp_mb__after_atomic_inc(); 1668 1669 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1670 " se_ordered_id: %u\n", 1671 cmd->t_task_cdb[0], cmd->se_ordered_id); 1672 1673 /* 1674 * Execute an ORDERED command if no other older commands 1675 * exist that need to be completed first. 1676 */ 1677 if (!atomic_read(&dev->simple_cmds)) 1678 return false; 1679 break; 1680 default: 1681 /* 1682 * For SIMPLE and UNTAGGED Task Attribute commands 1683 */ 1684 atomic_inc(&dev->simple_cmds); 1685 smp_mb__after_atomic_inc(); 1686 break; 1687 } 1688 1689 if (atomic_read(&dev->dev_ordered_sync) == 0) 1690 return false; 1691 1692 spin_lock(&dev->delayed_cmd_lock); 1693 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1694 spin_unlock(&dev->delayed_cmd_lock); 1695 1696 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1697 " delayed CMD list, se_ordered_id: %u\n", 1698 cmd->t_task_cdb[0], cmd->sam_task_attr, 1699 cmd->se_ordered_id); 1700 return true; 1701 } 1702 1703 void target_execute_cmd(struct se_cmd *cmd) 1704 { 1705 /* 1706 * If the received CDB has aleady been aborted stop processing it here. 1707 */ 1708 if (transport_check_aborted_status(cmd, 1)) { 1709 complete(&cmd->transport_lun_stop_comp); 1710 return; 1711 } 1712 1713 /* 1714 * Determine if IOCTL context caller in requesting the stopping of this 1715 * command for LUN shutdown purposes. 1716 */ 1717 spin_lock_irq(&cmd->t_state_lock); 1718 if (cmd->transport_state & CMD_T_LUN_STOP) { 1719 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", 1720 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); 1721 1722 cmd->transport_state &= ~CMD_T_ACTIVE; 1723 spin_unlock_irq(&cmd->t_state_lock); 1724 complete(&cmd->transport_lun_stop_comp); 1725 return; 1726 } 1727 /* 1728 * Determine if frontend context caller is requesting the stopping of 1729 * this command for frontend exceptions. 1730 */ 1731 if (cmd->transport_state & CMD_T_STOP) { 1732 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 1733 __func__, __LINE__, 1734 cmd->se_tfo->get_task_tag(cmd)); 1735 1736 spin_unlock_irq(&cmd->t_state_lock); 1737 complete(&cmd->t_transport_stop_comp); 1738 return; 1739 } 1740 1741 cmd->t_state = TRANSPORT_PROCESSING; 1742 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1743 spin_unlock_irq(&cmd->t_state_lock); 1744 1745 if (target_handle_task_attr(cmd)) { 1746 spin_lock_irq(&cmd->t_state_lock); 1747 cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1748 spin_unlock_irq(&cmd->t_state_lock); 1749 return; 1750 } 1751 1752 __target_execute_cmd(cmd); 1753 } 1754 EXPORT_SYMBOL(target_execute_cmd); 1755 1756 /* 1757 * Process all commands up to the last received ORDERED task attribute which 1758 * requires another blocking boundary 1759 */ 1760 static void target_restart_delayed_cmds(struct se_device *dev) 1761 { 1762 for (;;) { 1763 struct se_cmd *cmd; 1764 1765 spin_lock(&dev->delayed_cmd_lock); 1766 if (list_empty(&dev->delayed_cmd_list)) { 1767 spin_unlock(&dev->delayed_cmd_lock); 1768 break; 1769 } 1770 1771 cmd = list_entry(dev->delayed_cmd_list.next, 1772 struct se_cmd, se_delayed_node); 1773 list_del(&cmd->se_delayed_node); 1774 spin_unlock(&dev->delayed_cmd_lock); 1775 1776 __target_execute_cmd(cmd); 1777 1778 if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1779 break; 1780 } 1781 } 1782 1783 /* 1784 * Called from I/O completion to determine which dormant/delayed 1785 * and ordered cmds need to have their tasks added to the execution queue. 1786 */ 1787 static void transport_complete_task_attr(struct se_cmd *cmd) 1788 { 1789 struct se_device *dev = cmd->se_dev; 1790 1791 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1792 return; 1793 1794 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1795 atomic_dec(&dev->simple_cmds); 1796 smp_mb__after_atomic_dec(); 1797 dev->dev_cur_ordered_id++; 1798 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1799 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1800 cmd->se_ordered_id); 1801 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1802 dev->dev_cur_ordered_id++; 1803 pr_debug("Incremented dev_cur_ordered_id: %u for" 1804 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1805 cmd->se_ordered_id); 1806 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1807 atomic_dec(&dev->dev_ordered_sync); 1808 smp_mb__after_atomic_dec(); 1809 1810 dev->dev_cur_ordered_id++; 1811 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1812 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 1813 } 1814 1815 target_restart_delayed_cmds(dev); 1816 } 1817 1818 static void transport_complete_qf(struct se_cmd *cmd) 1819 { 1820 int ret = 0; 1821 1822 transport_complete_task_attr(cmd); 1823 1824 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1825 trace_target_cmd_complete(cmd); 1826 ret = cmd->se_tfo->queue_status(cmd); 1827 if (ret) 1828 goto out; 1829 } 1830 1831 switch (cmd->data_direction) { 1832 case DMA_FROM_DEVICE: 1833 trace_target_cmd_complete(cmd); 1834 ret = cmd->se_tfo->queue_data_in(cmd); 1835 break; 1836 case DMA_TO_DEVICE: 1837 if (cmd->se_cmd_flags & SCF_BIDI) { 1838 ret = cmd->se_tfo->queue_data_in(cmd); 1839 if (ret < 0) 1840 break; 1841 } 1842 /* Fall through for DMA_TO_DEVICE */ 1843 case DMA_NONE: 1844 trace_target_cmd_complete(cmd); 1845 ret = cmd->se_tfo->queue_status(cmd); 1846 break; 1847 default: 1848 break; 1849 } 1850 1851 out: 1852 if (ret < 0) { 1853 transport_handle_queue_full(cmd, cmd->se_dev); 1854 return; 1855 } 1856 transport_lun_remove_cmd(cmd); 1857 transport_cmd_check_stop_to_fabric(cmd); 1858 } 1859 1860 static void transport_handle_queue_full( 1861 struct se_cmd *cmd, 1862 struct se_device *dev) 1863 { 1864 spin_lock_irq(&dev->qf_cmd_lock); 1865 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1866 atomic_inc(&dev->dev_qf_count); 1867 smp_mb__after_atomic_inc(); 1868 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1869 1870 schedule_work(&cmd->se_dev->qf_work_queue); 1871 } 1872 1873 static void target_complete_ok_work(struct work_struct *work) 1874 { 1875 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1876 int ret; 1877 1878 /* 1879 * Check if we need to move delayed/dormant tasks from cmds on the 1880 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 1881 * Attribute. 1882 */ 1883 transport_complete_task_attr(cmd); 1884 1885 /* 1886 * Check to schedule QUEUE_FULL work, or execute an existing 1887 * cmd->transport_qf_callback() 1888 */ 1889 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 1890 schedule_work(&cmd->se_dev->qf_work_queue); 1891 1892 /* 1893 * Check if we need to send a sense buffer from 1894 * the struct se_cmd in question. 1895 */ 1896 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1897 WARN_ON(!cmd->scsi_status); 1898 ret = transport_send_check_condition_and_sense( 1899 cmd, 0, 1); 1900 if (ret == -EAGAIN || ret == -ENOMEM) 1901 goto queue_full; 1902 1903 transport_lun_remove_cmd(cmd); 1904 transport_cmd_check_stop_to_fabric(cmd); 1905 return; 1906 } 1907 /* 1908 * Check for a callback, used by amongst other things 1909 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 1910 */ 1911 if (cmd->transport_complete_callback) { 1912 sense_reason_t rc; 1913 1914 rc = cmd->transport_complete_callback(cmd); 1915 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 1916 return; 1917 } else if (rc) { 1918 ret = transport_send_check_condition_and_sense(cmd, 1919 rc, 0); 1920 if (ret == -EAGAIN || ret == -ENOMEM) 1921 goto queue_full; 1922 1923 transport_lun_remove_cmd(cmd); 1924 transport_cmd_check_stop_to_fabric(cmd); 1925 return; 1926 } 1927 } 1928 1929 switch (cmd->data_direction) { 1930 case DMA_FROM_DEVICE: 1931 spin_lock(&cmd->se_lun->lun_sep_lock); 1932 if (cmd->se_lun->lun_sep) { 1933 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1934 cmd->data_length; 1935 } 1936 spin_unlock(&cmd->se_lun->lun_sep_lock); 1937 1938 trace_target_cmd_complete(cmd); 1939 ret = cmd->se_tfo->queue_data_in(cmd); 1940 if (ret == -EAGAIN || ret == -ENOMEM) 1941 goto queue_full; 1942 break; 1943 case DMA_TO_DEVICE: 1944 spin_lock(&cmd->se_lun->lun_sep_lock); 1945 if (cmd->se_lun->lun_sep) { 1946 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 1947 cmd->data_length; 1948 } 1949 spin_unlock(&cmd->se_lun->lun_sep_lock); 1950 /* 1951 * Check if we need to send READ payload for BIDI-COMMAND 1952 */ 1953 if (cmd->se_cmd_flags & SCF_BIDI) { 1954 spin_lock(&cmd->se_lun->lun_sep_lock); 1955 if (cmd->se_lun->lun_sep) { 1956 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1957 cmd->data_length; 1958 } 1959 spin_unlock(&cmd->se_lun->lun_sep_lock); 1960 ret = cmd->se_tfo->queue_data_in(cmd); 1961 if (ret == -EAGAIN || ret == -ENOMEM) 1962 goto queue_full; 1963 break; 1964 } 1965 /* Fall through for DMA_TO_DEVICE */ 1966 case DMA_NONE: 1967 trace_target_cmd_complete(cmd); 1968 ret = cmd->se_tfo->queue_status(cmd); 1969 if (ret == -EAGAIN || ret == -ENOMEM) 1970 goto queue_full; 1971 break; 1972 default: 1973 break; 1974 } 1975 1976 transport_lun_remove_cmd(cmd); 1977 transport_cmd_check_stop_to_fabric(cmd); 1978 return; 1979 1980 queue_full: 1981 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 1982 " data_direction: %d\n", cmd, cmd->data_direction); 1983 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1984 transport_handle_queue_full(cmd, cmd->se_dev); 1985 } 1986 1987 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 1988 { 1989 struct scatterlist *sg; 1990 int count; 1991 1992 for_each_sg(sgl, sg, nents, count) 1993 __free_page(sg_page(sg)); 1994 1995 kfree(sgl); 1996 } 1997 1998 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 1999 { 2000 /* 2001 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2002 * emulation, and free + reset pointers if necessary.. 2003 */ 2004 if (!cmd->t_data_sg_orig) 2005 return; 2006 2007 kfree(cmd->t_data_sg); 2008 cmd->t_data_sg = cmd->t_data_sg_orig; 2009 cmd->t_data_sg_orig = NULL; 2010 cmd->t_data_nents = cmd->t_data_nents_orig; 2011 cmd->t_data_nents_orig = 0; 2012 } 2013 2014 static inline void transport_free_pages(struct se_cmd *cmd) 2015 { 2016 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2017 transport_reset_sgl_orig(cmd); 2018 return; 2019 } 2020 transport_reset_sgl_orig(cmd); 2021 2022 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2023 cmd->t_data_sg = NULL; 2024 cmd->t_data_nents = 0; 2025 2026 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2027 cmd->t_bidi_data_sg = NULL; 2028 cmd->t_bidi_data_nents = 0; 2029 } 2030 2031 /** 2032 * transport_release_cmd - free a command 2033 * @cmd: command to free 2034 * 2035 * This routine unconditionally frees a command, and reference counting 2036 * or list removal must be done in the caller. 2037 */ 2038 static int transport_release_cmd(struct se_cmd *cmd) 2039 { 2040 BUG_ON(!cmd->se_tfo); 2041 2042 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2043 core_tmr_release_req(cmd->se_tmr_req); 2044 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2045 kfree(cmd->t_task_cdb); 2046 /* 2047 * If this cmd has been setup with target_get_sess_cmd(), drop 2048 * the kref and call ->release_cmd() in kref callback. 2049 */ 2050 return target_put_sess_cmd(cmd->se_sess, cmd); 2051 } 2052 2053 /** 2054 * transport_put_cmd - release a reference to a command 2055 * @cmd: command to release 2056 * 2057 * This routine releases our reference to the command and frees it if possible. 2058 */ 2059 static int transport_put_cmd(struct se_cmd *cmd) 2060 { 2061 transport_free_pages(cmd); 2062 return transport_release_cmd(cmd); 2063 } 2064 2065 void *transport_kmap_data_sg(struct se_cmd *cmd) 2066 { 2067 struct scatterlist *sg = cmd->t_data_sg; 2068 struct page **pages; 2069 int i; 2070 2071 /* 2072 * We need to take into account a possible offset here for fabrics like 2073 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2074 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2075 */ 2076 if (!cmd->t_data_nents) 2077 return NULL; 2078 2079 BUG_ON(!sg); 2080 if (cmd->t_data_nents == 1) 2081 return kmap(sg_page(sg)) + sg->offset; 2082 2083 /* >1 page. use vmap */ 2084 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2085 if (!pages) 2086 return NULL; 2087 2088 /* convert sg[] to pages[] */ 2089 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2090 pages[i] = sg_page(sg); 2091 } 2092 2093 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2094 kfree(pages); 2095 if (!cmd->t_data_vmap) 2096 return NULL; 2097 2098 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2099 } 2100 EXPORT_SYMBOL(transport_kmap_data_sg); 2101 2102 void transport_kunmap_data_sg(struct se_cmd *cmd) 2103 { 2104 if (!cmd->t_data_nents) { 2105 return; 2106 } else if (cmd->t_data_nents == 1) { 2107 kunmap(sg_page(cmd->t_data_sg)); 2108 return; 2109 } 2110 2111 vunmap(cmd->t_data_vmap); 2112 cmd->t_data_vmap = NULL; 2113 } 2114 EXPORT_SYMBOL(transport_kunmap_data_sg); 2115 2116 int 2117 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2118 bool zero_page) 2119 { 2120 struct scatterlist *sg; 2121 struct page *page; 2122 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2123 unsigned int nent; 2124 int i = 0; 2125 2126 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2127 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2128 if (!sg) 2129 return -ENOMEM; 2130 2131 sg_init_table(sg, nent); 2132 2133 while (length) { 2134 u32 page_len = min_t(u32, length, PAGE_SIZE); 2135 page = alloc_page(GFP_KERNEL | zero_flag); 2136 if (!page) 2137 goto out; 2138 2139 sg_set_page(&sg[i], page, page_len, 0); 2140 length -= page_len; 2141 i++; 2142 } 2143 *sgl = sg; 2144 *nents = nent; 2145 return 0; 2146 2147 out: 2148 while (i > 0) { 2149 i--; 2150 __free_page(sg_page(&sg[i])); 2151 } 2152 kfree(sg); 2153 return -ENOMEM; 2154 } 2155 2156 /* 2157 * Allocate any required resources to execute the command. For writes we 2158 * might not have the payload yet, so notify the fabric via a call to 2159 * ->write_pending instead. Otherwise place it on the execution queue. 2160 */ 2161 sense_reason_t 2162 transport_generic_new_cmd(struct se_cmd *cmd) 2163 { 2164 int ret = 0; 2165 2166 /* 2167 * Determine is the TCM fabric module has already allocated physical 2168 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2169 * beforehand. 2170 */ 2171 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2172 cmd->data_length) { 2173 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2174 2175 if ((cmd->se_cmd_flags & SCF_BIDI) || 2176 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2177 u32 bidi_length; 2178 2179 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2180 bidi_length = cmd->t_task_nolb * 2181 cmd->se_dev->dev_attrib.block_size; 2182 else 2183 bidi_length = cmd->data_length; 2184 2185 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2186 &cmd->t_bidi_data_nents, 2187 bidi_length, zero_flag); 2188 if (ret < 0) 2189 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2190 } 2191 2192 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2193 cmd->data_length, zero_flag); 2194 if (ret < 0) 2195 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2196 } 2197 /* 2198 * If this command is not a write we can execute it right here, 2199 * for write buffers we need to notify the fabric driver first 2200 * and let it call back once the write buffers are ready. 2201 */ 2202 target_add_to_state_list(cmd); 2203 if (cmd->data_direction != DMA_TO_DEVICE) { 2204 target_execute_cmd(cmd); 2205 return 0; 2206 } 2207 transport_cmd_check_stop(cmd, false, true); 2208 2209 ret = cmd->se_tfo->write_pending(cmd); 2210 if (ret == -EAGAIN || ret == -ENOMEM) 2211 goto queue_full; 2212 2213 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2214 WARN_ON(ret); 2215 2216 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2217 2218 queue_full: 2219 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2220 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2221 transport_handle_queue_full(cmd, cmd->se_dev); 2222 return 0; 2223 } 2224 EXPORT_SYMBOL(transport_generic_new_cmd); 2225 2226 static void transport_write_pending_qf(struct se_cmd *cmd) 2227 { 2228 int ret; 2229 2230 ret = cmd->se_tfo->write_pending(cmd); 2231 if (ret == -EAGAIN || ret == -ENOMEM) { 2232 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2233 cmd); 2234 transport_handle_queue_full(cmd, cmd->se_dev); 2235 } 2236 } 2237 2238 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2239 { 2240 unsigned long flags; 2241 int ret = 0; 2242 2243 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2244 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2245 transport_wait_for_tasks(cmd); 2246 2247 ret = transport_release_cmd(cmd); 2248 } else { 2249 if (wait_for_tasks) 2250 transport_wait_for_tasks(cmd); 2251 /* 2252 * Handle WRITE failure case where transport_generic_new_cmd() 2253 * has already added se_cmd to state_list, but fabric has 2254 * failed command before I/O submission. 2255 */ 2256 if (cmd->state_active) { 2257 spin_lock_irqsave(&cmd->t_state_lock, flags); 2258 target_remove_from_state_list(cmd); 2259 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2260 } 2261 2262 if (cmd->se_lun) 2263 transport_lun_remove_cmd(cmd); 2264 2265 ret = transport_put_cmd(cmd); 2266 } 2267 return ret; 2268 } 2269 EXPORT_SYMBOL(transport_generic_free_cmd); 2270 2271 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2272 * @se_sess: session to reference 2273 * @se_cmd: command descriptor to add 2274 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2275 */ 2276 int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2277 bool ack_kref) 2278 { 2279 unsigned long flags; 2280 int ret = 0; 2281 2282 kref_init(&se_cmd->cmd_kref); 2283 /* 2284 * Add a second kref if the fabric caller is expecting to handle 2285 * fabric acknowledgement that requires two target_put_sess_cmd() 2286 * invocations before se_cmd descriptor release. 2287 */ 2288 if (ack_kref == true) { 2289 kref_get(&se_cmd->cmd_kref); 2290 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2291 } 2292 2293 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2294 if (se_sess->sess_tearing_down) { 2295 ret = -ESHUTDOWN; 2296 goto out; 2297 } 2298 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2299 out: 2300 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2301 return ret; 2302 } 2303 EXPORT_SYMBOL(target_get_sess_cmd); 2304 2305 static void target_release_cmd_kref(struct kref *kref) 2306 { 2307 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2308 struct se_session *se_sess = se_cmd->se_sess; 2309 2310 if (list_empty(&se_cmd->se_cmd_list)) { 2311 spin_unlock(&se_sess->sess_cmd_lock); 2312 se_cmd->se_tfo->release_cmd(se_cmd); 2313 return; 2314 } 2315 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2316 spin_unlock(&se_sess->sess_cmd_lock); 2317 complete(&se_cmd->cmd_wait_comp); 2318 return; 2319 } 2320 list_del(&se_cmd->se_cmd_list); 2321 spin_unlock(&se_sess->sess_cmd_lock); 2322 2323 se_cmd->se_tfo->release_cmd(se_cmd); 2324 } 2325 2326 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2327 * @se_sess: session to reference 2328 * @se_cmd: command descriptor to drop 2329 */ 2330 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2331 { 2332 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2333 &se_sess->sess_cmd_lock); 2334 } 2335 EXPORT_SYMBOL(target_put_sess_cmd); 2336 2337 /* target_sess_cmd_list_set_waiting - Flag all commands in 2338 * sess_cmd_list to complete cmd_wait_comp. Set 2339 * sess_tearing_down so no more commands are queued. 2340 * @se_sess: session to flag 2341 */ 2342 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2343 { 2344 struct se_cmd *se_cmd; 2345 unsigned long flags; 2346 2347 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2348 if (se_sess->sess_tearing_down) { 2349 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2350 return; 2351 } 2352 se_sess->sess_tearing_down = 1; 2353 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2354 2355 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2356 se_cmd->cmd_wait_set = 1; 2357 2358 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2359 } 2360 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2361 2362 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2363 * @se_sess: session to wait for active I/O 2364 */ 2365 void target_wait_for_sess_cmds(struct se_session *se_sess) 2366 { 2367 struct se_cmd *se_cmd, *tmp_cmd; 2368 unsigned long flags; 2369 2370 list_for_each_entry_safe(se_cmd, tmp_cmd, 2371 &se_sess->sess_wait_list, se_cmd_list) { 2372 list_del(&se_cmd->se_cmd_list); 2373 2374 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2375 " %d\n", se_cmd, se_cmd->t_state, 2376 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2377 2378 wait_for_completion(&se_cmd->cmd_wait_comp); 2379 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2380 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2381 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2382 2383 se_cmd->se_tfo->release_cmd(se_cmd); 2384 } 2385 2386 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2387 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2388 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2389 2390 } 2391 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2392 2393 /* transport_lun_wait_for_tasks(): 2394 * 2395 * Called from ConfigFS context to stop the passed struct se_cmd to allow 2396 * an struct se_lun to be successfully shutdown. 2397 */ 2398 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) 2399 { 2400 unsigned long flags; 2401 int ret = 0; 2402 2403 /* 2404 * If the frontend has already requested this struct se_cmd to 2405 * be stopped, we can safely ignore this struct se_cmd. 2406 */ 2407 spin_lock_irqsave(&cmd->t_state_lock, flags); 2408 if (cmd->transport_state & CMD_T_STOP) { 2409 cmd->transport_state &= ~CMD_T_LUN_STOP; 2410 2411 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", 2412 cmd->se_tfo->get_task_tag(cmd)); 2413 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2414 transport_cmd_check_stop(cmd, false, false); 2415 return -EPERM; 2416 } 2417 cmd->transport_state |= CMD_T_LUN_FE_STOP; 2418 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2419 2420 // XXX: audit task_flags checks. 2421 spin_lock_irqsave(&cmd->t_state_lock, flags); 2422 if ((cmd->transport_state & CMD_T_BUSY) && 2423 (cmd->transport_state & CMD_T_SENT)) { 2424 if (!target_stop_cmd(cmd, &flags)) 2425 ret++; 2426 } 2427 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2428 2429 pr_debug("ConfigFS: cmd: %p stop tasks ret:" 2430 " %d\n", cmd, ret); 2431 if (!ret) { 2432 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 2433 cmd->se_tfo->get_task_tag(cmd)); 2434 wait_for_completion(&cmd->transport_lun_stop_comp); 2435 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 2436 cmd->se_tfo->get_task_tag(cmd)); 2437 } 2438 2439 return 0; 2440 } 2441 2442 static void __transport_clear_lun_from_sessions(struct se_lun *lun) 2443 { 2444 struct se_cmd *cmd = NULL; 2445 unsigned long lun_flags, cmd_flags; 2446 /* 2447 * Do exception processing and return CHECK_CONDITION status to the 2448 * Initiator Port. 2449 */ 2450 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2451 while (!list_empty(&lun->lun_cmd_list)) { 2452 cmd = list_first_entry(&lun->lun_cmd_list, 2453 struct se_cmd, se_lun_node); 2454 list_del_init(&cmd->se_lun_node); 2455 2456 spin_lock(&cmd->t_state_lock); 2457 pr_debug("SE_LUN[%d] - Setting cmd->transport" 2458 "_lun_stop for ITT: 0x%08x\n", 2459 cmd->se_lun->unpacked_lun, 2460 cmd->se_tfo->get_task_tag(cmd)); 2461 cmd->transport_state |= CMD_T_LUN_STOP; 2462 spin_unlock(&cmd->t_state_lock); 2463 2464 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 2465 2466 if (!cmd->se_lun) { 2467 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", 2468 cmd->se_tfo->get_task_tag(cmd), 2469 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2470 BUG(); 2471 } 2472 /* 2473 * If the Storage engine still owns the iscsi_cmd_t, determine 2474 * and/or stop its context. 2475 */ 2476 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" 2477 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 2478 cmd->se_tfo->get_task_tag(cmd)); 2479 2480 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { 2481 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2482 continue; 2483 } 2484 2485 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 2486 "_wait_for_tasks(): SUCCESS\n", 2487 cmd->se_lun->unpacked_lun, 2488 cmd->se_tfo->get_task_tag(cmd)); 2489 2490 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 2491 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { 2492 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2493 goto check_cond; 2494 } 2495 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 2496 target_remove_from_state_list(cmd); 2497 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2498 2499 /* 2500 * The Storage engine stopped this struct se_cmd before it was 2501 * send to the fabric frontend for delivery back to the 2502 * Initiator Node. Return this SCSI CDB back with an 2503 * CHECK_CONDITION status. 2504 */ 2505 check_cond: 2506 transport_send_check_condition_and_sense(cmd, 2507 TCM_NON_EXISTENT_LUN, 0); 2508 /* 2509 * If the fabric frontend is waiting for this iscsi_cmd_t to 2510 * be released, notify the waiting thread now that LU has 2511 * finished accessing it. 2512 */ 2513 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 2514 if (cmd->transport_state & CMD_T_LUN_FE_STOP) { 2515 pr_debug("SE_LUN[%d] - Detected FE stop for" 2516 " struct se_cmd: %p ITT: 0x%08x\n", 2517 lun->unpacked_lun, 2518 cmd, cmd->se_tfo->get_task_tag(cmd)); 2519 2520 spin_unlock_irqrestore(&cmd->t_state_lock, 2521 cmd_flags); 2522 transport_cmd_check_stop(cmd, false, false); 2523 complete(&cmd->transport_lun_fe_stop_comp); 2524 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2525 continue; 2526 } 2527 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 2528 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 2529 2530 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2531 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2532 } 2533 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 2534 } 2535 2536 static int transport_clear_lun_ref_thread(void *p) 2537 { 2538 struct se_lun *lun = p; 2539 2540 percpu_ref_kill(&lun->lun_ref); 2541 2542 wait_for_completion(&lun->lun_ref_comp); 2543 complete(&lun->lun_shutdown_comp); 2544 2545 return 0; 2546 } 2547 2548 int transport_clear_lun_ref(struct se_lun *lun) 2549 { 2550 struct task_struct *kt; 2551 2552 kt = kthread_run(transport_clear_lun_ref_thread, lun, 2553 "tcm_cl_%u", lun->unpacked_lun); 2554 if (IS_ERR(kt)) { 2555 pr_err("Unable to start clear_lun thread\n"); 2556 return PTR_ERR(kt); 2557 } 2558 wait_for_completion(&lun->lun_shutdown_comp); 2559 2560 return 0; 2561 } 2562 2563 /** 2564 * transport_wait_for_tasks - wait for completion to occur 2565 * @cmd: command to wait 2566 * 2567 * Called from frontend fabric context to wait for storage engine 2568 * to pause and/or release frontend generated struct se_cmd. 2569 */ 2570 bool transport_wait_for_tasks(struct se_cmd *cmd) 2571 { 2572 unsigned long flags; 2573 2574 spin_lock_irqsave(&cmd->t_state_lock, flags); 2575 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2576 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2577 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2578 return false; 2579 } 2580 2581 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2582 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2583 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2584 return false; 2585 } 2586 /* 2587 * If we are already stopped due to an external event (ie: LUN shutdown) 2588 * sleep until the connection can have the passed struct se_cmd back. 2589 * The cmd->transport_lun_stopped_sem will be upped by 2590 * transport_clear_lun_from_sessions() once the ConfigFS context caller 2591 * has completed its operation on the struct se_cmd. 2592 */ 2593 if (cmd->transport_state & CMD_T_LUN_STOP) { 2594 pr_debug("wait_for_tasks: Stopping" 2595 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 2596 "_stop_comp); for ITT: 0x%08x\n", 2597 cmd->se_tfo->get_task_tag(cmd)); 2598 /* 2599 * There is a special case for WRITES where a FE exception + 2600 * LUN shutdown means ConfigFS context is still sleeping on 2601 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). 2602 * We go ahead and up transport_lun_stop_comp just to be sure 2603 * here. 2604 */ 2605 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2606 complete(&cmd->transport_lun_stop_comp); 2607 wait_for_completion(&cmd->transport_lun_fe_stop_comp); 2608 spin_lock_irqsave(&cmd->t_state_lock, flags); 2609 2610 target_remove_from_state_list(cmd); 2611 /* 2612 * At this point, the frontend who was the originator of this 2613 * struct se_cmd, now owns the structure and can be released through 2614 * normal means below. 2615 */ 2616 pr_debug("wait_for_tasks: Stopped" 2617 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 2618 "stop_comp); for ITT: 0x%08x\n", 2619 cmd->se_tfo->get_task_tag(cmd)); 2620 2621 cmd->transport_state &= ~CMD_T_LUN_STOP; 2622 } 2623 2624 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2625 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2626 return false; 2627 } 2628 2629 cmd->transport_state |= CMD_T_STOP; 2630 2631 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 2632 " i_state: %d, t_state: %d, CMD_T_STOP\n", 2633 cmd, cmd->se_tfo->get_task_tag(cmd), 2634 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2635 2636 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2637 2638 wait_for_completion(&cmd->t_transport_stop_comp); 2639 2640 spin_lock_irqsave(&cmd->t_state_lock, flags); 2641 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2642 2643 pr_debug("wait_for_tasks: Stopped wait_for_completion(" 2644 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 2645 cmd->se_tfo->get_task_tag(cmd)); 2646 2647 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2648 2649 return true; 2650 } 2651 EXPORT_SYMBOL(transport_wait_for_tasks); 2652 2653 static int transport_get_sense_codes( 2654 struct se_cmd *cmd, 2655 u8 *asc, 2656 u8 *ascq) 2657 { 2658 *asc = cmd->scsi_asc; 2659 *ascq = cmd->scsi_ascq; 2660 2661 return 0; 2662 } 2663 2664 int 2665 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2666 sense_reason_t reason, int from_transport) 2667 { 2668 unsigned char *buffer = cmd->sense_buffer; 2669 unsigned long flags; 2670 u8 asc = 0, ascq = 0; 2671 2672 spin_lock_irqsave(&cmd->t_state_lock, flags); 2673 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2674 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2675 return 0; 2676 } 2677 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2678 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2679 2680 if (!reason && from_transport) 2681 goto after_reason; 2682 2683 if (!from_transport) 2684 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2685 2686 /* 2687 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2688 * SENSE KEY values from include/scsi/scsi.h 2689 */ 2690 switch (reason) { 2691 case TCM_NO_SENSE: 2692 /* CURRENT ERROR */ 2693 buffer[0] = 0x70; 2694 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2695 /* Not Ready */ 2696 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2697 /* NO ADDITIONAL SENSE INFORMATION */ 2698 buffer[SPC_ASC_KEY_OFFSET] = 0; 2699 buffer[SPC_ASCQ_KEY_OFFSET] = 0; 2700 break; 2701 case TCM_NON_EXISTENT_LUN: 2702 /* CURRENT ERROR */ 2703 buffer[0] = 0x70; 2704 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2705 /* ILLEGAL REQUEST */ 2706 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2707 /* LOGICAL UNIT NOT SUPPORTED */ 2708 buffer[SPC_ASC_KEY_OFFSET] = 0x25; 2709 break; 2710 case TCM_UNSUPPORTED_SCSI_OPCODE: 2711 case TCM_SECTOR_COUNT_TOO_MANY: 2712 /* CURRENT ERROR */ 2713 buffer[0] = 0x70; 2714 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2715 /* ILLEGAL REQUEST */ 2716 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2717 /* INVALID COMMAND OPERATION CODE */ 2718 buffer[SPC_ASC_KEY_OFFSET] = 0x20; 2719 break; 2720 case TCM_UNKNOWN_MODE_PAGE: 2721 /* CURRENT ERROR */ 2722 buffer[0] = 0x70; 2723 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2724 /* ILLEGAL REQUEST */ 2725 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2726 /* INVALID FIELD IN CDB */ 2727 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2728 break; 2729 case TCM_CHECK_CONDITION_ABORT_CMD: 2730 /* CURRENT ERROR */ 2731 buffer[0] = 0x70; 2732 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2733 /* ABORTED COMMAND */ 2734 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2735 /* BUS DEVICE RESET FUNCTION OCCURRED */ 2736 buffer[SPC_ASC_KEY_OFFSET] = 0x29; 2737 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2738 break; 2739 case TCM_INCORRECT_AMOUNT_OF_DATA: 2740 /* CURRENT ERROR */ 2741 buffer[0] = 0x70; 2742 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2743 /* ABORTED COMMAND */ 2744 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2745 /* WRITE ERROR */ 2746 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2747 /* NOT ENOUGH UNSOLICITED DATA */ 2748 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d; 2749 break; 2750 case TCM_INVALID_CDB_FIELD: 2751 /* CURRENT ERROR */ 2752 buffer[0] = 0x70; 2753 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2754 /* ILLEGAL REQUEST */ 2755 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2756 /* INVALID FIELD IN CDB */ 2757 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2758 break; 2759 case TCM_INVALID_PARAMETER_LIST: 2760 /* CURRENT ERROR */ 2761 buffer[0] = 0x70; 2762 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2763 /* ILLEGAL REQUEST */ 2764 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2765 /* INVALID FIELD IN PARAMETER LIST */ 2766 buffer[SPC_ASC_KEY_OFFSET] = 0x26; 2767 break; 2768 case TCM_PARAMETER_LIST_LENGTH_ERROR: 2769 /* CURRENT ERROR */ 2770 buffer[0] = 0x70; 2771 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2772 /* ILLEGAL REQUEST */ 2773 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2774 /* PARAMETER LIST LENGTH ERROR */ 2775 buffer[SPC_ASC_KEY_OFFSET] = 0x1a; 2776 break; 2777 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2778 /* CURRENT ERROR */ 2779 buffer[0] = 0x70; 2780 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2781 /* ABORTED COMMAND */ 2782 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2783 /* WRITE ERROR */ 2784 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2785 /* UNEXPECTED_UNSOLICITED_DATA */ 2786 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c; 2787 break; 2788 case TCM_SERVICE_CRC_ERROR: 2789 /* CURRENT ERROR */ 2790 buffer[0] = 0x70; 2791 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2792 /* ABORTED COMMAND */ 2793 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2794 /* PROTOCOL SERVICE CRC ERROR */ 2795 buffer[SPC_ASC_KEY_OFFSET] = 0x47; 2796 /* N/A */ 2797 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05; 2798 break; 2799 case TCM_SNACK_REJECTED: 2800 /* CURRENT ERROR */ 2801 buffer[0] = 0x70; 2802 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2803 /* ABORTED COMMAND */ 2804 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2805 /* READ ERROR */ 2806 buffer[SPC_ASC_KEY_OFFSET] = 0x11; 2807 /* FAILED RETRANSMISSION REQUEST */ 2808 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13; 2809 break; 2810 case TCM_WRITE_PROTECTED: 2811 /* CURRENT ERROR */ 2812 buffer[0] = 0x70; 2813 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2814 /* DATA PROTECT */ 2815 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 2816 /* WRITE PROTECTED */ 2817 buffer[SPC_ASC_KEY_OFFSET] = 0x27; 2818 break; 2819 case TCM_ADDRESS_OUT_OF_RANGE: 2820 /* CURRENT ERROR */ 2821 buffer[0] = 0x70; 2822 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2823 /* ILLEGAL REQUEST */ 2824 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2825 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2826 buffer[SPC_ASC_KEY_OFFSET] = 0x21; 2827 break; 2828 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2829 /* CURRENT ERROR */ 2830 buffer[0] = 0x70; 2831 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2832 /* UNIT ATTENTION */ 2833 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 2834 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2835 buffer[SPC_ASC_KEY_OFFSET] = asc; 2836 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2837 break; 2838 case TCM_CHECK_CONDITION_NOT_READY: 2839 /* CURRENT ERROR */ 2840 buffer[0] = 0x70; 2841 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2842 /* Not Ready */ 2843 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2844 transport_get_sense_codes(cmd, &asc, &ascq); 2845 buffer[SPC_ASC_KEY_OFFSET] = asc; 2846 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2847 break; 2848 case TCM_MISCOMPARE_VERIFY: 2849 /* CURRENT ERROR */ 2850 buffer[0] = 0x70; 2851 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2852 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE; 2853 /* MISCOMPARE DURING VERIFY OPERATION */ 2854 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2855 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2856 break; 2857 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2858 default: 2859 /* CURRENT ERROR */ 2860 buffer[0] = 0x70; 2861 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2862 /* 2863 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2864 * Solaris initiators. Returning NOT READY instead means the 2865 * operations will be retried a finite number of times and we 2866 * can survive intermittent errors. 2867 */ 2868 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2869 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2870 buffer[SPC_ASC_KEY_OFFSET] = 0x08; 2871 break; 2872 } 2873 /* 2874 * This code uses linux/include/scsi/scsi.h SAM status codes! 2875 */ 2876 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2877 /* 2878 * Automatically padded, this value is encoded in the fabric's 2879 * data_length response PDU containing the SCSI defined sense data. 2880 */ 2881 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2882 2883 after_reason: 2884 trace_target_cmd_complete(cmd); 2885 return cmd->se_tfo->queue_status(cmd); 2886 } 2887 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2888 2889 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2890 { 2891 if (!(cmd->transport_state & CMD_T_ABORTED)) 2892 return 0; 2893 2894 if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 2895 return 1; 2896 2897 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", 2898 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2899 2900 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2901 trace_target_cmd_complete(cmd); 2902 cmd->se_tfo->queue_status(cmd); 2903 2904 return 1; 2905 } 2906 EXPORT_SYMBOL(transport_check_aborted_status); 2907 2908 void transport_send_task_abort(struct se_cmd *cmd) 2909 { 2910 unsigned long flags; 2911 2912 spin_lock_irqsave(&cmd->t_state_lock, flags); 2913 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { 2914 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2915 return; 2916 } 2917 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2918 2919 /* 2920 * If there are still expected incoming fabric WRITEs, we wait 2921 * until until they have completed before sending a TASK_ABORTED 2922 * response. This response with TASK_ABORTED status will be 2923 * queued back to fabric module by transport_check_aborted_status(). 2924 */ 2925 if (cmd->data_direction == DMA_TO_DEVICE) { 2926 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2927 cmd->transport_state |= CMD_T_ABORTED; 2928 smp_mb__after_atomic_inc(); 2929 } 2930 } 2931 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2932 2933 transport_lun_remove_cmd(cmd); 2934 2935 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 2936 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 2937 cmd->se_tfo->get_task_tag(cmd)); 2938 2939 trace_target_cmd_complete(cmd); 2940 cmd->se_tfo->queue_status(cmd); 2941 } 2942 2943 static void target_tmr_work(struct work_struct *work) 2944 { 2945 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2946 struct se_device *dev = cmd->se_dev; 2947 struct se_tmr_req *tmr = cmd->se_tmr_req; 2948 int ret; 2949 2950 switch (tmr->function) { 2951 case TMR_ABORT_TASK: 2952 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2953 break; 2954 case TMR_ABORT_TASK_SET: 2955 case TMR_CLEAR_ACA: 2956 case TMR_CLEAR_TASK_SET: 2957 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2958 break; 2959 case TMR_LUN_RESET: 2960 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2961 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2962 TMR_FUNCTION_REJECTED; 2963 break; 2964 case TMR_TARGET_WARM_RESET: 2965 tmr->response = TMR_FUNCTION_REJECTED; 2966 break; 2967 case TMR_TARGET_COLD_RESET: 2968 tmr->response = TMR_FUNCTION_REJECTED; 2969 break; 2970 default: 2971 pr_err("Uknown TMR function: 0x%02x.\n", 2972 tmr->function); 2973 tmr->response = TMR_FUNCTION_REJECTED; 2974 break; 2975 } 2976 2977 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2978 cmd->se_tfo->queue_tm_rsp(cmd); 2979 2980 transport_cmd_check_stop_to_fabric(cmd); 2981 } 2982 2983 int transport_generic_handle_tmr( 2984 struct se_cmd *cmd) 2985 { 2986 INIT_WORK(&cmd->work, target_tmr_work); 2987 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2988 return 0; 2989 } 2990 EXPORT_SYMBOL(transport_generic_handle_tmr); 2991