1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/blkdev.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/cdrom.h> 36 #include <linux/module.h> 37 #include <linux/ratelimit.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_tcq.h> 44 45 #include <target/target_core_base.h> 46 #include <target/target_core_backend.h> 47 #include <target/target_core_fabric.h> 48 #include <target/target_core_configfs.h> 49 50 #include "target_core_internal.h" 51 #include "target_core_alua.h" 52 #include "target_core_pr.h" 53 #include "target_core_ua.h" 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/target.h> 57 58 static struct workqueue_struct *target_completion_wq; 59 static struct kmem_cache *se_sess_cache; 60 struct kmem_cache *se_ua_cache; 61 struct kmem_cache *t10_pr_reg_cache; 62 struct kmem_cache *t10_alua_lu_gp_cache; 63 struct kmem_cache *t10_alua_lu_gp_mem_cache; 64 struct kmem_cache *t10_alua_tg_pt_gp_cache; 65 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 66 67 static void transport_complete_task_attr(struct se_cmd *cmd); 68 static void transport_handle_queue_full(struct se_cmd *cmd, 69 struct se_device *dev); 70 static int transport_put_cmd(struct se_cmd *cmd); 71 static void target_complete_ok_work(struct work_struct *work); 72 73 int init_se_kmem_caches(void) 74 { 75 se_sess_cache = kmem_cache_create("se_sess_cache", 76 sizeof(struct se_session), __alignof__(struct se_session), 77 0, NULL); 78 if (!se_sess_cache) { 79 pr_err("kmem_cache_create() for struct se_session" 80 " failed\n"); 81 goto out; 82 } 83 se_ua_cache = kmem_cache_create("se_ua_cache", 84 sizeof(struct se_ua), __alignof__(struct se_ua), 85 0, NULL); 86 if (!se_ua_cache) { 87 pr_err("kmem_cache_create() for struct se_ua failed\n"); 88 goto out_free_sess_cache; 89 } 90 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 91 sizeof(struct t10_pr_registration), 92 __alignof__(struct t10_pr_registration), 0, NULL); 93 if (!t10_pr_reg_cache) { 94 pr_err("kmem_cache_create() for struct t10_pr_registration" 95 " failed\n"); 96 goto out_free_ua_cache; 97 } 98 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 99 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 100 0, NULL); 101 if (!t10_alua_lu_gp_cache) { 102 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 103 " failed\n"); 104 goto out_free_pr_reg_cache; 105 } 106 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 107 sizeof(struct t10_alua_lu_gp_member), 108 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 109 if (!t10_alua_lu_gp_mem_cache) { 110 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 111 "cache failed\n"); 112 goto out_free_lu_gp_cache; 113 } 114 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 115 sizeof(struct t10_alua_tg_pt_gp), 116 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 117 if (!t10_alua_tg_pt_gp_cache) { 118 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 119 "cache failed\n"); 120 goto out_free_lu_gp_mem_cache; 121 } 122 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 123 "t10_alua_tg_pt_gp_mem_cache", 124 sizeof(struct t10_alua_tg_pt_gp_member), 125 __alignof__(struct t10_alua_tg_pt_gp_member), 126 0, NULL); 127 if (!t10_alua_tg_pt_gp_mem_cache) { 128 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 129 "mem_t failed\n"); 130 goto out_free_tg_pt_gp_cache; 131 } 132 133 target_completion_wq = alloc_workqueue("target_completion", 134 WQ_MEM_RECLAIM, 0); 135 if (!target_completion_wq) 136 goto out_free_tg_pt_gp_mem_cache; 137 138 return 0; 139 140 out_free_tg_pt_gp_mem_cache: 141 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 142 out_free_tg_pt_gp_cache: 143 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 144 out_free_lu_gp_mem_cache: 145 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 146 out_free_lu_gp_cache: 147 kmem_cache_destroy(t10_alua_lu_gp_cache); 148 out_free_pr_reg_cache: 149 kmem_cache_destroy(t10_pr_reg_cache); 150 out_free_ua_cache: 151 kmem_cache_destroy(se_ua_cache); 152 out_free_sess_cache: 153 kmem_cache_destroy(se_sess_cache); 154 out: 155 return -ENOMEM; 156 } 157 158 void release_se_kmem_caches(void) 159 { 160 destroy_workqueue(target_completion_wq); 161 kmem_cache_destroy(se_sess_cache); 162 kmem_cache_destroy(se_ua_cache); 163 kmem_cache_destroy(t10_pr_reg_cache); 164 kmem_cache_destroy(t10_alua_lu_gp_cache); 165 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 166 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 167 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 168 } 169 170 /* This code ensures unique mib indexes are handed out. */ 171 static DEFINE_SPINLOCK(scsi_mib_index_lock); 172 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 173 174 /* 175 * Allocate a new row index for the entry type specified 176 */ 177 u32 scsi_get_new_index(scsi_index_t type) 178 { 179 u32 new_index; 180 181 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 182 183 spin_lock(&scsi_mib_index_lock); 184 new_index = ++scsi_mib_index[type]; 185 spin_unlock(&scsi_mib_index_lock); 186 187 return new_index; 188 } 189 190 void transport_subsystem_check_init(void) 191 { 192 int ret; 193 static int sub_api_initialized; 194 195 if (sub_api_initialized) 196 return; 197 198 ret = request_module("target_core_iblock"); 199 if (ret != 0) 200 pr_err("Unable to load target_core_iblock\n"); 201 202 ret = request_module("target_core_file"); 203 if (ret != 0) 204 pr_err("Unable to load target_core_file\n"); 205 206 ret = request_module("target_core_pscsi"); 207 if (ret != 0) 208 pr_err("Unable to load target_core_pscsi\n"); 209 210 sub_api_initialized = 1; 211 } 212 213 struct se_session *transport_init_session(void) 214 { 215 struct se_session *se_sess; 216 217 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 218 if (!se_sess) { 219 pr_err("Unable to allocate struct se_session from" 220 " se_sess_cache\n"); 221 return ERR_PTR(-ENOMEM); 222 } 223 INIT_LIST_HEAD(&se_sess->sess_list); 224 INIT_LIST_HEAD(&se_sess->sess_acl_list); 225 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 226 INIT_LIST_HEAD(&se_sess->sess_wait_list); 227 spin_lock_init(&se_sess->sess_cmd_lock); 228 kref_init(&se_sess->sess_kref); 229 230 return se_sess; 231 } 232 EXPORT_SYMBOL(transport_init_session); 233 234 int transport_alloc_session_tags(struct se_session *se_sess, 235 unsigned int tag_num, unsigned int tag_size) 236 { 237 int rc; 238 239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 240 if (!se_sess->sess_cmd_map) { 241 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 242 return -ENOMEM; 243 } 244 245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 246 if (rc < 0) { 247 pr_err("Unable to init se_sess->sess_tag_pool," 248 " tag_num: %u\n", tag_num); 249 kfree(se_sess->sess_cmd_map); 250 se_sess->sess_cmd_map = NULL; 251 return -ENOMEM; 252 } 253 254 return 0; 255 } 256 EXPORT_SYMBOL(transport_alloc_session_tags); 257 258 struct se_session *transport_init_session_tags(unsigned int tag_num, 259 unsigned int tag_size) 260 { 261 struct se_session *se_sess; 262 int rc; 263 264 se_sess = transport_init_session(); 265 if (IS_ERR(se_sess)) 266 return se_sess; 267 268 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 269 if (rc < 0) { 270 transport_free_session(se_sess); 271 return ERR_PTR(-ENOMEM); 272 } 273 274 return se_sess; 275 } 276 EXPORT_SYMBOL(transport_init_session_tags); 277 278 /* 279 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 280 */ 281 void __transport_register_session( 282 struct se_portal_group *se_tpg, 283 struct se_node_acl *se_nacl, 284 struct se_session *se_sess, 285 void *fabric_sess_ptr) 286 { 287 unsigned char buf[PR_REG_ISID_LEN]; 288 289 se_sess->se_tpg = se_tpg; 290 se_sess->fabric_sess_ptr = fabric_sess_ptr; 291 /* 292 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 293 * 294 * Only set for struct se_session's that will actually be moving I/O. 295 * eg: *NOT* discovery sessions. 296 */ 297 if (se_nacl) { 298 /* 299 * If the fabric module supports an ISID based TransportID, 300 * save this value in binary from the fabric I_T Nexus now. 301 */ 302 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 303 memset(&buf[0], 0, PR_REG_ISID_LEN); 304 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 305 &buf[0], PR_REG_ISID_LEN); 306 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 307 } 308 kref_get(&se_nacl->acl_kref); 309 310 spin_lock_irq(&se_nacl->nacl_sess_lock); 311 /* 312 * The se_nacl->nacl_sess pointer will be set to the 313 * last active I_T Nexus for each struct se_node_acl. 314 */ 315 se_nacl->nacl_sess = se_sess; 316 317 list_add_tail(&se_sess->sess_acl_list, 318 &se_nacl->acl_sess_list); 319 spin_unlock_irq(&se_nacl->nacl_sess_lock); 320 } 321 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 322 323 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 324 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 325 } 326 EXPORT_SYMBOL(__transport_register_session); 327 328 void transport_register_session( 329 struct se_portal_group *se_tpg, 330 struct se_node_acl *se_nacl, 331 struct se_session *se_sess, 332 void *fabric_sess_ptr) 333 { 334 unsigned long flags; 335 336 spin_lock_irqsave(&se_tpg->session_lock, flags); 337 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 338 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 339 } 340 EXPORT_SYMBOL(transport_register_session); 341 342 static void target_release_session(struct kref *kref) 343 { 344 struct se_session *se_sess = container_of(kref, 345 struct se_session, sess_kref); 346 struct se_portal_group *se_tpg = se_sess->se_tpg; 347 348 se_tpg->se_tpg_tfo->close_session(se_sess); 349 } 350 351 void target_get_session(struct se_session *se_sess) 352 { 353 kref_get(&se_sess->sess_kref); 354 } 355 EXPORT_SYMBOL(target_get_session); 356 357 void target_put_session(struct se_session *se_sess) 358 { 359 struct se_portal_group *tpg = se_sess->se_tpg; 360 361 if (tpg->se_tpg_tfo->put_session != NULL) { 362 tpg->se_tpg_tfo->put_session(se_sess); 363 return; 364 } 365 kref_put(&se_sess->sess_kref, target_release_session); 366 } 367 EXPORT_SYMBOL(target_put_session); 368 369 static void target_complete_nacl(struct kref *kref) 370 { 371 struct se_node_acl *nacl = container_of(kref, 372 struct se_node_acl, acl_kref); 373 374 complete(&nacl->acl_free_comp); 375 } 376 377 void target_put_nacl(struct se_node_acl *nacl) 378 { 379 kref_put(&nacl->acl_kref, target_complete_nacl); 380 } 381 382 void transport_deregister_session_configfs(struct se_session *se_sess) 383 { 384 struct se_node_acl *se_nacl; 385 unsigned long flags; 386 /* 387 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 388 */ 389 se_nacl = se_sess->se_node_acl; 390 if (se_nacl) { 391 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 392 if (se_nacl->acl_stop == 0) 393 list_del(&se_sess->sess_acl_list); 394 /* 395 * If the session list is empty, then clear the pointer. 396 * Otherwise, set the struct se_session pointer from the tail 397 * element of the per struct se_node_acl active session list. 398 */ 399 if (list_empty(&se_nacl->acl_sess_list)) 400 se_nacl->nacl_sess = NULL; 401 else { 402 se_nacl->nacl_sess = container_of( 403 se_nacl->acl_sess_list.prev, 404 struct se_session, sess_acl_list); 405 } 406 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 407 } 408 } 409 EXPORT_SYMBOL(transport_deregister_session_configfs); 410 411 void transport_free_session(struct se_session *se_sess) 412 { 413 if (se_sess->sess_cmd_map) { 414 percpu_ida_destroy(&se_sess->sess_tag_pool); 415 kfree(se_sess->sess_cmd_map); 416 } 417 kmem_cache_free(se_sess_cache, se_sess); 418 } 419 EXPORT_SYMBOL(transport_free_session); 420 421 void transport_deregister_session(struct se_session *se_sess) 422 { 423 struct se_portal_group *se_tpg = se_sess->se_tpg; 424 struct target_core_fabric_ops *se_tfo; 425 struct se_node_acl *se_nacl; 426 unsigned long flags; 427 bool comp_nacl = true; 428 429 if (!se_tpg) { 430 transport_free_session(se_sess); 431 return; 432 } 433 se_tfo = se_tpg->se_tpg_tfo; 434 435 spin_lock_irqsave(&se_tpg->session_lock, flags); 436 list_del(&se_sess->sess_list); 437 se_sess->se_tpg = NULL; 438 se_sess->fabric_sess_ptr = NULL; 439 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 440 441 /* 442 * Determine if we need to do extra work for this initiator node's 443 * struct se_node_acl if it had been previously dynamically generated. 444 */ 445 se_nacl = se_sess->se_node_acl; 446 447 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 448 if (se_nacl && se_nacl->dynamic_node_acl) { 449 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 450 list_del(&se_nacl->acl_list); 451 se_tpg->num_node_acls--; 452 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 453 core_tpg_wait_for_nacl_pr_ref(se_nacl); 454 core_free_device_list_for_node(se_nacl, se_tpg); 455 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); 456 457 comp_nacl = false; 458 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 459 } 460 } 461 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 462 463 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 464 se_tpg->se_tpg_tfo->get_fabric_name()); 465 /* 466 * If last kref is dropping now for an explict NodeACL, awake sleeping 467 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 468 * removal context. 469 */ 470 if (se_nacl && comp_nacl == true) 471 target_put_nacl(se_nacl); 472 473 transport_free_session(se_sess); 474 } 475 EXPORT_SYMBOL(transport_deregister_session); 476 477 /* 478 * Called with cmd->t_state_lock held. 479 */ 480 static void target_remove_from_state_list(struct se_cmd *cmd) 481 { 482 struct se_device *dev = cmd->se_dev; 483 unsigned long flags; 484 485 if (!dev) 486 return; 487 488 if (cmd->transport_state & CMD_T_BUSY) 489 return; 490 491 spin_lock_irqsave(&dev->execute_task_lock, flags); 492 if (cmd->state_active) { 493 list_del(&cmd->state_list); 494 cmd->state_active = false; 495 } 496 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 497 } 498 499 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 500 bool write_pending) 501 { 502 unsigned long flags; 503 504 spin_lock_irqsave(&cmd->t_state_lock, flags); 505 if (write_pending) 506 cmd->t_state = TRANSPORT_WRITE_PENDING; 507 508 /* 509 * Determine if IOCTL context caller in requesting the stopping of this 510 * command for LUN shutdown purposes. 511 */ 512 if (cmd->transport_state & CMD_T_LUN_STOP) { 513 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", 514 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); 515 516 cmd->transport_state &= ~CMD_T_ACTIVE; 517 if (remove_from_lists) 518 target_remove_from_state_list(cmd); 519 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 520 521 complete(&cmd->transport_lun_stop_comp); 522 return 1; 523 } 524 525 if (remove_from_lists) { 526 target_remove_from_state_list(cmd); 527 528 /* 529 * Clear struct se_cmd->se_lun before the handoff to FE. 530 */ 531 cmd->se_lun = NULL; 532 } 533 534 /* 535 * Determine if frontend context caller is requesting the stopping of 536 * this command for frontend exceptions. 537 */ 538 if (cmd->transport_state & CMD_T_STOP) { 539 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 540 __func__, __LINE__, 541 cmd->se_tfo->get_task_tag(cmd)); 542 543 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 544 545 complete(&cmd->t_transport_stop_comp); 546 return 1; 547 } 548 549 cmd->transport_state &= ~CMD_T_ACTIVE; 550 if (remove_from_lists) { 551 /* 552 * Some fabric modules like tcm_loop can release 553 * their internally allocated I/O reference now and 554 * struct se_cmd now. 555 * 556 * Fabric modules are expected to return '1' here if the 557 * se_cmd being passed is released at this point, 558 * or zero if not being released. 559 */ 560 if (cmd->se_tfo->check_stop_free != NULL) { 561 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 562 return cmd->se_tfo->check_stop_free(cmd); 563 } 564 } 565 566 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 567 return 0; 568 } 569 570 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 571 { 572 return transport_cmd_check_stop(cmd, true, false); 573 } 574 575 static void transport_lun_remove_cmd(struct se_cmd *cmd) 576 { 577 struct se_lun *lun = cmd->se_lun; 578 unsigned long flags; 579 580 if (!lun) 581 return; 582 583 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 584 if (!list_empty(&cmd->se_lun_node)) 585 list_del_init(&cmd->se_lun_node); 586 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); 587 } 588 589 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 590 { 591 if (transport_cmd_check_stop_to_fabric(cmd)) 592 return; 593 if (remove) 594 transport_put_cmd(cmd); 595 } 596 597 static void target_complete_failure_work(struct work_struct *work) 598 { 599 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 600 601 transport_generic_request_failure(cmd, 602 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 603 } 604 605 /* 606 * Used when asking transport to copy Sense Data from the underlying 607 * Linux/SCSI struct scsi_cmnd 608 */ 609 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 610 { 611 struct se_device *dev = cmd->se_dev; 612 613 WARN_ON(!cmd->se_lun); 614 615 if (!dev) 616 return NULL; 617 618 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 619 return NULL; 620 621 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 622 623 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 624 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 625 return cmd->sense_buffer; 626 } 627 628 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 629 { 630 struct se_device *dev = cmd->se_dev; 631 int success = scsi_status == GOOD; 632 unsigned long flags; 633 634 cmd->scsi_status = scsi_status; 635 636 637 spin_lock_irqsave(&cmd->t_state_lock, flags); 638 cmd->transport_state &= ~CMD_T_BUSY; 639 640 if (dev && dev->transport->transport_complete) { 641 dev->transport->transport_complete(cmd, 642 cmd->t_data_sg, 643 transport_get_sense_buffer(cmd)); 644 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 645 success = 1; 646 } 647 648 /* 649 * See if we are waiting to complete for an exception condition. 650 */ 651 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 652 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 653 complete(&cmd->task_stop_comp); 654 return; 655 } 656 657 if (!success) 658 cmd->transport_state |= CMD_T_FAILED; 659 660 /* 661 * Check for case where an explict ABORT_TASK has been received 662 * and transport_wait_for_tasks() will be waiting for completion.. 663 */ 664 if (cmd->transport_state & CMD_T_ABORTED && 665 cmd->transport_state & CMD_T_STOP) { 666 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 667 complete(&cmd->t_transport_stop_comp); 668 return; 669 } else if (cmd->transport_state & CMD_T_FAILED) { 670 INIT_WORK(&cmd->work, target_complete_failure_work); 671 } else { 672 INIT_WORK(&cmd->work, target_complete_ok_work); 673 } 674 675 cmd->t_state = TRANSPORT_COMPLETE; 676 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 677 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 678 679 queue_work(target_completion_wq, &cmd->work); 680 } 681 EXPORT_SYMBOL(target_complete_cmd); 682 683 static void target_add_to_state_list(struct se_cmd *cmd) 684 { 685 struct se_device *dev = cmd->se_dev; 686 unsigned long flags; 687 688 spin_lock_irqsave(&dev->execute_task_lock, flags); 689 if (!cmd->state_active) { 690 list_add_tail(&cmd->state_list, &dev->state_list); 691 cmd->state_active = true; 692 } 693 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 694 } 695 696 /* 697 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 698 */ 699 static void transport_write_pending_qf(struct se_cmd *cmd); 700 static void transport_complete_qf(struct se_cmd *cmd); 701 702 void target_qf_do_work(struct work_struct *work) 703 { 704 struct se_device *dev = container_of(work, struct se_device, 705 qf_work_queue); 706 LIST_HEAD(qf_cmd_list); 707 struct se_cmd *cmd, *cmd_tmp; 708 709 spin_lock_irq(&dev->qf_cmd_lock); 710 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 711 spin_unlock_irq(&dev->qf_cmd_lock); 712 713 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 714 list_del(&cmd->se_qf_node); 715 atomic_dec(&dev->dev_qf_count); 716 smp_mb__after_atomic_dec(); 717 718 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 719 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 720 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 721 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 722 : "UNKNOWN"); 723 724 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 725 transport_write_pending_qf(cmd); 726 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 727 transport_complete_qf(cmd); 728 } 729 } 730 731 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 732 { 733 switch (cmd->data_direction) { 734 case DMA_NONE: 735 return "NONE"; 736 case DMA_FROM_DEVICE: 737 return "READ"; 738 case DMA_TO_DEVICE: 739 return "WRITE"; 740 case DMA_BIDIRECTIONAL: 741 return "BIDI"; 742 default: 743 break; 744 } 745 746 return "UNKNOWN"; 747 } 748 749 void transport_dump_dev_state( 750 struct se_device *dev, 751 char *b, 752 int *bl) 753 { 754 *bl += sprintf(b + *bl, "Status: "); 755 if (dev->export_count) 756 *bl += sprintf(b + *bl, "ACTIVATED"); 757 else 758 *bl += sprintf(b + *bl, "DEACTIVATED"); 759 760 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 761 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 762 dev->dev_attrib.block_size, 763 dev->dev_attrib.hw_max_sectors); 764 *bl += sprintf(b + *bl, " "); 765 } 766 767 void transport_dump_vpd_proto_id( 768 struct t10_vpd *vpd, 769 unsigned char *p_buf, 770 int p_buf_len) 771 { 772 unsigned char buf[VPD_TMP_BUF_SIZE]; 773 int len; 774 775 memset(buf, 0, VPD_TMP_BUF_SIZE); 776 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 777 778 switch (vpd->protocol_identifier) { 779 case 0x00: 780 sprintf(buf+len, "Fibre Channel\n"); 781 break; 782 case 0x10: 783 sprintf(buf+len, "Parallel SCSI\n"); 784 break; 785 case 0x20: 786 sprintf(buf+len, "SSA\n"); 787 break; 788 case 0x30: 789 sprintf(buf+len, "IEEE 1394\n"); 790 break; 791 case 0x40: 792 sprintf(buf+len, "SCSI Remote Direct Memory Access" 793 " Protocol\n"); 794 break; 795 case 0x50: 796 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 797 break; 798 case 0x60: 799 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 800 break; 801 case 0x70: 802 sprintf(buf+len, "Automation/Drive Interface Transport" 803 " Protocol\n"); 804 break; 805 case 0x80: 806 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 807 break; 808 default: 809 sprintf(buf+len, "Unknown 0x%02x\n", 810 vpd->protocol_identifier); 811 break; 812 } 813 814 if (p_buf) 815 strncpy(p_buf, buf, p_buf_len); 816 else 817 pr_debug("%s", buf); 818 } 819 820 void 821 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 822 { 823 /* 824 * Check if the Protocol Identifier Valid (PIV) bit is set.. 825 * 826 * from spc3r23.pdf section 7.5.1 827 */ 828 if (page_83[1] & 0x80) { 829 vpd->protocol_identifier = (page_83[0] & 0xf0); 830 vpd->protocol_identifier_set = 1; 831 transport_dump_vpd_proto_id(vpd, NULL, 0); 832 } 833 } 834 EXPORT_SYMBOL(transport_set_vpd_proto_id); 835 836 int transport_dump_vpd_assoc( 837 struct t10_vpd *vpd, 838 unsigned char *p_buf, 839 int p_buf_len) 840 { 841 unsigned char buf[VPD_TMP_BUF_SIZE]; 842 int ret = 0; 843 int len; 844 845 memset(buf, 0, VPD_TMP_BUF_SIZE); 846 len = sprintf(buf, "T10 VPD Identifier Association: "); 847 848 switch (vpd->association) { 849 case 0x00: 850 sprintf(buf+len, "addressed logical unit\n"); 851 break; 852 case 0x10: 853 sprintf(buf+len, "target port\n"); 854 break; 855 case 0x20: 856 sprintf(buf+len, "SCSI target device\n"); 857 break; 858 default: 859 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 860 ret = -EINVAL; 861 break; 862 } 863 864 if (p_buf) 865 strncpy(p_buf, buf, p_buf_len); 866 else 867 pr_debug("%s", buf); 868 869 return ret; 870 } 871 872 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 873 { 874 /* 875 * The VPD identification association.. 876 * 877 * from spc3r23.pdf Section 7.6.3.1 Table 297 878 */ 879 vpd->association = (page_83[1] & 0x30); 880 return transport_dump_vpd_assoc(vpd, NULL, 0); 881 } 882 EXPORT_SYMBOL(transport_set_vpd_assoc); 883 884 int transport_dump_vpd_ident_type( 885 struct t10_vpd *vpd, 886 unsigned char *p_buf, 887 int p_buf_len) 888 { 889 unsigned char buf[VPD_TMP_BUF_SIZE]; 890 int ret = 0; 891 int len; 892 893 memset(buf, 0, VPD_TMP_BUF_SIZE); 894 len = sprintf(buf, "T10 VPD Identifier Type: "); 895 896 switch (vpd->device_identifier_type) { 897 case 0x00: 898 sprintf(buf+len, "Vendor specific\n"); 899 break; 900 case 0x01: 901 sprintf(buf+len, "T10 Vendor ID based\n"); 902 break; 903 case 0x02: 904 sprintf(buf+len, "EUI-64 based\n"); 905 break; 906 case 0x03: 907 sprintf(buf+len, "NAA\n"); 908 break; 909 case 0x04: 910 sprintf(buf+len, "Relative target port identifier\n"); 911 break; 912 case 0x08: 913 sprintf(buf+len, "SCSI name string\n"); 914 break; 915 default: 916 sprintf(buf+len, "Unsupported: 0x%02x\n", 917 vpd->device_identifier_type); 918 ret = -EINVAL; 919 break; 920 } 921 922 if (p_buf) { 923 if (p_buf_len < strlen(buf)+1) 924 return -EINVAL; 925 strncpy(p_buf, buf, p_buf_len); 926 } else { 927 pr_debug("%s", buf); 928 } 929 930 return ret; 931 } 932 933 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 934 { 935 /* 936 * The VPD identifier type.. 937 * 938 * from spc3r23.pdf Section 7.6.3.1 Table 298 939 */ 940 vpd->device_identifier_type = (page_83[1] & 0x0f); 941 return transport_dump_vpd_ident_type(vpd, NULL, 0); 942 } 943 EXPORT_SYMBOL(transport_set_vpd_ident_type); 944 945 int transport_dump_vpd_ident( 946 struct t10_vpd *vpd, 947 unsigned char *p_buf, 948 int p_buf_len) 949 { 950 unsigned char buf[VPD_TMP_BUF_SIZE]; 951 int ret = 0; 952 953 memset(buf, 0, VPD_TMP_BUF_SIZE); 954 955 switch (vpd->device_identifier_code_set) { 956 case 0x01: /* Binary */ 957 snprintf(buf, sizeof(buf), 958 "T10 VPD Binary Device Identifier: %s\n", 959 &vpd->device_identifier[0]); 960 break; 961 case 0x02: /* ASCII */ 962 snprintf(buf, sizeof(buf), 963 "T10 VPD ASCII Device Identifier: %s\n", 964 &vpd->device_identifier[0]); 965 break; 966 case 0x03: /* UTF-8 */ 967 snprintf(buf, sizeof(buf), 968 "T10 VPD UTF-8 Device Identifier: %s\n", 969 &vpd->device_identifier[0]); 970 break; 971 default: 972 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 973 " 0x%02x", vpd->device_identifier_code_set); 974 ret = -EINVAL; 975 break; 976 } 977 978 if (p_buf) 979 strncpy(p_buf, buf, p_buf_len); 980 else 981 pr_debug("%s", buf); 982 983 return ret; 984 } 985 986 int 987 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 988 { 989 static const char hex_str[] = "0123456789abcdef"; 990 int j = 0, i = 4; /* offset to start of the identifier */ 991 992 /* 993 * The VPD Code Set (encoding) 994 * 995 * from spc3r23.pdf Section 7.6.3.1 Table 296 996 */ 997 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 998 switch (vpd->device_identifier_code_set) { 999 case 0x01: /* Binary */ 1000 vpd->device_identifier[j++] = 1001 hex_str[vpd->device_identifier_type]; 1002 while (i < (4 + page_83[3])) { 1003 vpd->device_identifier[j++] = 1004 hex_str[(page_83[i] & 0xf0) >> 4]; 1005 vpd->device_identifier[j++] = 1006 hex_str[page_83[i] & 0x0f]; 1007 i++; 1008 } 1009 break; 1010 case 0x02: /* ASCII */ 1011 case 0x03: /* UTF-8 */ 1012 while (i < (4 + page_83[3])) 1013 vpd->device_identifier[j++] = page_83[i++]; 1014 break; 1015 default: 1016 break; 1017 } 1018 1019 return transport_dump_vpd_ident(vpd, NULL, 0); 1020 } 1021 EXPORT_SYMBOL(transport_set_vpd_ident); 1022 1023 sense_reason_t 1024 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1025 { 1026 struct se_device *dev = cmd->se_dev; 1027 1028 if (cmd->unknown_data_length) { 1029 cmd->data_length = size; 1030 } else if (size != cmd->data_length) { 1031 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1032 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1033 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1034 cmd->data_length, size, cmd->t_task_cdb[0]); 1035 1036 if (cmd->data_direction == DMA_TO_DEVICE) { 1037 pr_err("Rejecting underflow/overflow" 1038 " WRITE data\n"); 1039 return TCM_INVALID_CDB_FIELD; 1040 } 1041 /* 1042 * Reject READ_* or WRITE_* with overflow/underflow for 1043 * type SCF_SCSI_DATA_CDB. 1044 */ 1045 if (dev->dev_attrib.block_size != 512) { 1046 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1047 " CDB on non 512-byte sector setup subsystem" 1048 " plugin: %s\n", dev->transport->name); 1049 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1050 return TCM_INVALID_CDB_FIELD; 1051 } 1052 /* 1053 * For the overflow case keep the existing fabric provided 1054 * ->data_length. Otherwise for the underflow case, reset 1055 * ->data_length to the smaller SCSI expected data transfer 1056 * length. 1057 */ 1058 if (size > cmd->data_length) { 1059 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1060 cmd->residual_count = (size - cmd->data_length); 1061 } else { 1062 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1063 cmd->residual_count = (cmd->data_length - size); 1064 cmd->data_length = size; 1065 } 1066 } 1067 1068 return 0; 1069 1070 } 1071 1072 /* 1073 * Used by fabric modules containing a local struct se_cmd within their 1074 * fabric dependent per I/O descriptor. 1075 */ 1076 void transport_init_se_cmd( 1077 struct se_cmd *cmd, 1078 struct target_core_fabric_ops *tfo, 1079 struct se_session *se_sess, 1080 u32 data_length, 1081 int data_direction, 1082 int task_attr, 1083 unsigned char *sense_buffer) 1084 { 1085 INIT_LIST_HEAD(&cmd->se_lun_node); 1086 INIT_LIST_HEAD(&cmd->se_delayed_node); 1087 INIT_LIST_HEAD(&cmd->se_qf_node); 1088 INIT_LIST_HEAD(&cmd->se_cmd_list); 1089 INIT_LIST_HEAD(&cmd->state_list); 1090 init_completion(&cmd->transport_lun_fe_stop_comp); 1091 init_completion(&cmd->transport_lun_stop_comp); 1092 init_completion(&cmd->t_transport_stop_comp); 1093 init_completion(&cmd->cmd_wait_comp); 1094 init_completion(&cmd->task_stop_comp); 1095 spin_lock_init(&cmd->t_state_lock); 1096 cmd->transport_state = CMD_T_DEV_ACTIVE; 1097 1098 cmd->se_tfo = tfo; 1099 cmd->se_sess = se_sess; 1100 cmd->data_length = data_length; 1101 cmd->data_direction = data_direction; 1102 cmd->sam_task_attr = task_attr; 1103 cmd->sense_buffer = sense_buffer; 1104 1105 cmd->state_active = false; 1106 } 1107 EXPORT_SYMBOL(transport_init_se_cmd); 1108 1109 static sense_reason_t 1110 transport_check_alloc_task_attr(struct se_cmd *cmd) 1111 { 1112 struct se_device *dev = cmd->se_dev; 1113 1114 /* 1115 * Check if SAM Task Attribute emulation is enabled for this 1116 * struct se_device storage object 1117 */ 1118 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1119 return 0; 1120 1121 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1122 pr_debug("SAM Task Attribute ACA" 1123 " emulation is not supported\n"); 1124 return TCM_INVALID_CDB_FIELD; 1125 } 1126 /* 1127 * Used to determine when ORDERED commands should go from 1128 * Dormant to Active status. 1129 */ 1130 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1131 smp_mb__after_atomic_inc(); 1132 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1133 cmd->se_ordered_id, cmd->sam_task_attr, 1134 dev->transport->name); 1135 return 0; 1136 } 1137 1138 sense_reason_t 1139 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1140 { 1141 struct se_device *dev = cmd->se_dev; 1142 sense_reason_t ret; 1143 1144 /* 1145 * Ensure that the received CDB is less than the max (252 + 8) bytes 1146 * for VARIABLE_LENGTH_CMD 1147 */ 1148 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1149 pr_err("Received SCSI CDB with command_size: %d that" 1150 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1151 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1152 return TCM_INVALID_CDB_FIELD; 1153 } 1154 /* 1155 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1156 * allocate the additional extended CDB buffer now.. Otherwise 1157 * setup the pointer from __t_task_cdb to t_task_cdb. 1158 */ 1159 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1160 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1161 GFP_KERNEL); 1162 if (!cmd->t_task_cdb) { 1163 pr_err("Unable to allocate cmd->t_task_cdb" 1164 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1165 scsi_command_size(cdb), 1166 (unsigned long)sizeof(cmd->__t_task_cdb)); 1167 return TCM_OUT_OF_RESOURCES; 1168 } 1169 } else 1170 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1171 /* 1172 * Copy the original CDB into cmd-> 1173 */ 1174 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1175 1176 trace_target_sequencer_start(cmd); 1177 1178 /* 1179 * Check for an existing UNIT ATTENTION condition 1180 */ 1181 ret = target_scsi3_ua_check(cmd); 1182 if (ret) 1183 return ret; 1184 1185 ret = target_alua_state_check(cmd); 1186 if (ret) 1187 return ret; 1188 1189 ret = target_check_reservation(cmd); 1190 if (ret) { 1191 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1192 return ret; 1193 } 1194 1195 ret = dev->transport->parse_cdb(cmd); 1196 if (ret) 1197 return ret; 1198 1199 ret = transport_check_alloc_task_attr(cmd); 1200 if (ret) 1201 return ret; 1202 1203 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1204 1205 spin_lock(&cmd->se_lun->lun_sep_lock); 1206 if (cmd->se_lun->lun_sep) 1207 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1208 spin_unlock(&cmd->se_lun->lun_sep_lock); 1209 return 0; 1210 } 1211 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1212 1213 /* 1214 * Used by fabric module frontends to queue tasks directly. 1215 * Many only be used from process context only 1216 */ 1217 int transport_handle_cdb_direct( 1218 struct se_cmd *cmd) 1219 { 1220 sense_reason_t ret; 1221 1222 if (!cmd->se_lun) { 1223 dump_stack(); 1224 pr_err("cmd->se_lun is NULL\n"); 1225 return -EINVAL; 1226 } 1227 if (in_interrupt()) { 1228 dump_stack(); 1229 pr_err("transport_generic_handle_cdb cannot be called" 1230 " from interrupt context\n"); 1231 return -EINVAL; 1232 } 1233 /* 1234 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1235 * outstanding descriptors are handled correctly during shutdown via 1236 * transport_wait_for_tasks() 1237 * 1238 * Also, we don't take cmd->t_state_lock here as we only expect 1239 * this to be called for initial descriptor submission. 1240 */ 1241 cmd->t_state = TRANSPORT_NEW_CMD; 1242 cmd->transport_state |= CMD_T_ACTIVE; 1243 1244 /* 1245 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1246 * so follow TRANSPORT_NEW_CMD processing thread context usage 1247 * and call transport_generic_request_failure() if necessary.. 1248 */ 1249 ret = transport_generic_new_cmd(cmd); 1250 if (ret) 1251 transport_generic_request_failure(cmd, ret); 1252 return 0; 1253 } 1254 EXPORT_SYMBOL(transport_handle_cdb_direct); 1255 1256 sense_reason_t 1257 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1258 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1259 { 1260 if (!sgl || !sgl_count) 1261 return 0; 1262 1263 /* 1264 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1265 * scatterlists already have been set to follow what the fabric 1266 * passes for the original expected data transfer length. 1267 */ 1268 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1269 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1270 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1271 return TCM_INVALID_CDB_FIELD; 1272 } 1273 1274 cmd->t_data_sg = sgl; 1275 cmd->t_data_nents = sgl_count; 1276 1277 if (sgl_bidi && sgl_bidi_count) { 1278 cmd->t_bidi_data_sg = sgl_bidi; 1279 cmd->t_bidi_data_nents = sgl_bidi_count; 1280 } 1281 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1282 return 0; 1283 } 1284 1285 /* 1286 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1287 * se_cmd + use pre-allocated SGL memory. 1288 * 1289 * @se_cmd: command descriptor to submit 1290 * @se_sess: associated se_sess for endpoint 1291 * @cdb: pointer to SCSI CDB 1292 * @sense: pointer to SCSI sense buffer 1293 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1294 * @data_length: fabric expected data transfer length 1295 * @task_addr: SAM task attribute 1296 * @data_dir: DMA data direction 1297 * @flags: flags for command submission from target_sc_flags_tables 1298 * @sgl: struct scatterlist memory for unidirectional mapping 1299 * @sgl_count: scatterlist count for unidirectional mapping 1300 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1301 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1302 * 1303 * Returns non zero to signal active I/O shutdown failure. All other 1304 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1305 * but still return zero here. 1306 * 1307 * This may only be called from process context, and also currently 1308 * assumes internal allocation of fabric payload buffer by target-core. 1309 */ 1310 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1311 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1312 u32 data_length, int task_attr, int data_dir, int flags, 1313 struct scatterlist *sgl, u32 sgl_count, 1314 struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1315 { 1316 struct se_portal_group *se_tpg; 1317 sense_reason_t rc; 1318 int ret; 1319 1320 se_tpg = se_sess->se_tpg; 1321 BUG_ON(!se_tpg); 1322 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1323 BUG_ON(in_interrupt()); 1324 /* 1325 * Initialize se_cmd for target operation. From this point 1326 * exceptions are handled by sending exception status via 1327 * target_core_fabric_ops->queue_status() callback 1328 */ 1329 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1330 data_length, data_dir, task_attr, sense); 1331 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1332 se_cmd->unknown_data_length = 1; 1333 /* 1334 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1335 * se_sess->sess_cmd_list. A second kref_get here is necessary 1336 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1337 * kref_put() to happen during fabric packet acknowledgement. 1338 */ 1339 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1340 if (ret) 1341 return ret; 1342 /* 1343 * Signal bidirectional data payloads to target-core 1344 */ 1345 if (flags & TARGET_SCF_BIDI_OP) 1346 se_cmd->se_cmd_flags |= SCF_BIDI; 1347 /* 1348 * Locate se_lun pointer and attach it to struct se_cmd 1349 */ 1350 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1351 if (rc) { 1352 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1353 target_put_sess_cmd(se_sess, se_cmd); 1354 return 0; 1355 } 1356 1357 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1358 if (rc != 0) { 1359 transport_generic_request_failure(se_cmd, rc); 1360 return 0; 1361 } 1362 /* 1363 * When a non zero sgl_count has been passed perform SGL passthrough 1364 * mapping for pre-allocated fabric memory instead of having target 1365 * core perform an internal SGL allocation.. 1366 */ 1367 if (sgl_count != 0) { 1368 BUG_ON(!sgl); 1369 1370 /* 1371 * A work-around for tcm_loop as some userspace code via 1372 * scsi-generic do not memset their associated read buffers, 1373 * so go ahead and do that here for type non-data CDBs. Also 1374 * note that this is currently guaranteed to be a single SGL 1375 * for this case by target core in target_setup_cmd_from_cdb() 1376 * -> transport_generic_cmd_sequencer(). 1377 */ 1378 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1379 se_cmd->data_direction == DMA_FROM_DEVICE) { 1380 unsigned char *buf = NULL; 1381 1382 if (sgl) 1383 buf = kmap(sg_page(sgl)) + sgl->offset; 1384 1385 if (buf) { 1386 memset(buf, 0, sgl->length); 1387 kunmap(sg_page(sgl)); 1388 } 1389 } 1390 1391 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1392 sgl_bidi, sgl_bidi_count); 1393 if (rc != 0) { 1394 transport_generic_request_failure(se_cmd, rc); 1395 return 0; 1396 } 1397 } 1398 /* 1399 * Check if we need to delay processing because of ALUA 1400 * Active/NonOptimized primary access state.. 1401 */ 1402 core_alua_check_nonop_delay(se_cmd); 1403 1404 transport_handle_cdb_direct(se_cmd); 1405 return 0; 1406 } 1407 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1408 1409 /* 1410 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1411 * 1412 * @se_cmd: command descriptor to submit 1413 * @se_sess: associated se_sess for endpoint 1414 * @cdb: pointer to SCSI CDB 1415 * @sense: pointer to SCSI sense buffer 1416 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1417 * @data_length: fabric expected data transfer length 1418 * @task_addr: SAM task attribute 1419 * @data_dir: DMA data direction 1420 * @flags: flags for command submission from target_sc_flags_tables 1421 * 1422 * Returns non zero to signal active I/O shutdown failure. All other 1423 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1424 * but still return zero here. 1425 * 1426 * This may only be called from process context, and also currently 1427 * assumes internal allocation of fabric payload buffer by target-core. 1428 * 1429 * It also assumes interal target core SGL memory allocation. 1430 */ 1431 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1432 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1433 u32 data_length, int task_attr, int data_dir, int flags) 1434 { 1435 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1436 unpacked_lun, data_length, task_attr, data_dir, 1437 flags, NULL, 0, NULL, 0); 1438 } 1439 EXPORT_SYMBOL(target_submit_cmd); 1440 1441 static void target_complete_tmr_failure(struct work_struct *work) 1442 { 1443 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1444 1445 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1446 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1447 1448 transport_cmd_check_stop_to_fabric(se_cmd); 1449 } 1450 1451 /** 1452 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1453 * for TMR CDBs 1454 * 1455 * @se_cmd: command descriptor to submit 1456 * @se_sess: associated se_sess for endpoint 1457 * @sense: pointer to SCSI sense buffer 1458 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1459 * @fabric_context: fabric context for TMR req 1460 * @tm_type: Type of TM request 1461 * @gfp: gfp type for caller 1462 * @tag: referenced task tag for TMR_ABORT_TASK 1463 * @flags: submit cmd flags 1464 * 1465 * Callable from all contexts. 1466 **/ 1467 1468 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1469 unsigned char *sense, u32 unpacked_lun, 1470 void *fabric_tmr_ptr, unsigned char tm_type, 1471 gfp_t gfp, unsigned int tag, int flags) 1472 { 1473 struct se_portal_group *se_tpg; 1474 int ret; 1475 1476 se_tpg = se_sess->se_tpg; 1477 BUG_ON(!se_tpg); 1478 1479 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1480 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1481 /* 1482 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1483 * allocation failure. 1484 */ 1485 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1486 if (ret < 0) 1487 return -ENOMEM; 1488 1489 if (tm_type == TMR_ABORT_TASK) 1490 se_cmd->se_tmr_req->ref_task_tag = tag; 1491 1492 /* See target_submit_cmd for commentary */ 1493 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1494 if (ret) { 1495 core_tmr_release_req(se_cmd->se_tmr_req); 1496 return ret; 1497 } 1498 1499 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1500 if (ret) { 1501 /* 1502 * For callback during failure handling, push this work off 1503 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1504 */ 1505 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1506 schedule_work(&se_cmd->work); 1507 return 0; 1508 } 1509 transport_generic_handle_tmr(se_cmd); 1510 return 0; 1511 } 1512 EXPORT_SYMBOL(target_submit_tmr); 1513 1514 /* 1515 * If the cmd is active, request it to be stopped and sleep until it 1516 * has completed. 1517 */ 1518 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1519 { 1520 bool was_active = false; 1521 1522 if (cmd->transport_state & CMD_T_BUSY) { 1523 cmd->transport_state |= CMD_T_REQUEST_STOP; 1524 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1525 1526 pr_debug("cmd %p waiting to complete\n", cmd); 1527 wait_for_completion(&cmd->task_stop_comp); 1528 pr_debug("cmd %p stopped successfully\n", cmd); 1529 1530 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1531 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1532 cmd->transport_state &= ~CMD_T_BUSY; 1533 was_active = true; 1534 } 1535 1536 return was_active; 1537 } 1538 1539 /* 1540 * Handle SAM-esque emulation for generic transport request failures. 1541 */ 1542 void transport_generic_request_failure(struct se_cmd *cmd, 1543 sense_reason_t sense_reason) 1544 { 1545 int ret = 0; 1546 1547 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1548 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1549 cmd->t_task_cdb[0]); 1550 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1551 cmd->se_tfo->get_cmd_state(cmd), 1552 cmd->t_state, sense_reason); 1553 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1554 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1555 (cmd->transport_state & CMD_T_STOP) != 0, 1556 (cmd->transport_state & CMD_T_SENT) != 0); 1557 1558 /* 1559 * For SAM Task Attribute emulation for failed struct se_cmd 1560 */ 1561 transport_complete_task_attr(cmd); 1562 /* 1563 * Handle special case for COMPARE_AND_WRITE failure, where the 1564 * callback is expected to drop the per device ->caw_mutex. 1565 */ 1566 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1567 cmd->transport_complete_callback) 1568 cmd->transport_complete_callback(cmd); 1569 1570 switch (sense_reason) { 1571 case TCM_NON_EXISTENT_LUN: 1572 case TCM_UNSUPPORTED_SCSI_OPCODE: 1573 case TCM_INVALID_CDB_FIELD: 1574 case TCM_INVALID_PARAMETER_LIST: 1575 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1576 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1577 case TCM_UNKNOWN_MODE_PAGE: 1578 case TCM_WRITE_PROTECTED: 1579 case TCM_ADDRESS_OUT_OF_RANGE: 1580 case TCM_CHECK_CONDITION_ABORT_CMD: 1581 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1582 case TCM_CHECK_CONDITION_NOT_READY: 1583 break; 1584 case TCM_OUT_OF_RESOURCES: 1585 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1586 break; 1587 case TCM_RESERVATION_CONFLICT: 1588 /* 1589 * No SENSE Data payload for this case, set SCSI Status 1590 * and queue the response to $FABRIC_MOD. 1591 * 1592 * Uses linux/include/scsi/scsi.h SAM status codes defs 1593 */ 1594 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1595 /* 1596 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1597 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1598 * CONFLICT STATUS. 1599 * 1600 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1601 */ 1602 if (cmd->se_sess && 1603 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1604 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1605 cmd->orig_fe_lun, 0x2C, 1606 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1607 1608 trace_target_cmd_complete(cmd); 1609 ret = cmd->se_tfo-> queue_status(cmd); 1610 if (ret == -EAGAIN || ret == -ENOMEM) 1611 goto queue_full; 1612 goto check_stop; 1613 default: 1614 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1615 cmd->t_task_cdb[0], sense_reason); 1616 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1617 break; 1618 } 1619 1620 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1621 if (ret == -EAGAIN || ret == -ENOMEM) 1622 goto queue_full; 1623 1624 check_stop: 1625 transport_lun_remove_cmd(cmd); 1626 if (!transport_cmd_check_stop_to_fabric(cmd)) 1627 ; 1628 return; 1629 1630 queue_full: 1631 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1632 transport_handle_queue_full(cmd, cmd->se_dev); 1633 } 1634 EXPORT_SYMBOL(transport_generic_request_failure); 1635 1636 void __target_execute_cmd(struct se_cmd *cmd) 1637 { 1638 sense_reason_t ret; 1639 1640 if (cmd->execute_cmd) { 1641 ret = cmd->execute_cmd(cmd); 1642 if (ret) { 1643 spin_lock_irq(&cmd->t_state_lock); 1644 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1645 spin_unlock_irq(&cmd->t_state_lock); 1646 1647 transport_generic_request_failure(cmd, ret); 1648 } 1649 } 1650 } 1651 1652 static bool target_handle_task_attr(struct se_cmd *cmd) 1653 { 1654 struct se_device *dev = cmd->se_dev; 1655 1656 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1657 return false; 1658 1659 /* 1660 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1661 * to allow the passed struct se_cmd list of tasks to the front of the list. 1662 */ 1663 switch (cmd->sam_task_attr) { 1664 case MSG_HEAD_TAG: 1665 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1666 "se_ordered_id: %u\n", 1667 cmd->t_task_cdb[0], cmd->se_ordered_id); 1668 return false; 1669 case MSG_ORDERED_TAG: 1670 atomic_inc(&dev->dev_ordered_sync); 1671 smp_mb__after_atomic_inc(); 1672 1673 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1674 " se_ordered_id: %u\n", 1675 cmd->t_task_cdb[0], cmd->se_ordered_id); 1676 1677 /* 1678 * Execute an ORDERED command if no other older commands 1679 * exist that need to be completed first. 1680 */ 1681 if (!atomic_read(&dev->simple_cmds)) 1682 return false; 1683 break; 1684 default: 1685 /* 1686 * For SIMPLE and UNTAGGED Task Attribute commands 1687 */ 1688 atomic_inc(&dev->simple_cmds); 1689 smp_mb__after_atomic_inc(); 1690 break; 1691 } 1692 1693 if (atomic_read(&dev->dev_ordered_sync) == 0) 1694 return false; 1695 1696 spin_lock(&dev->delayed_cmd_lock); 1697 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1698 spin_unlock(&dev->delayed_cmd_lock); 1699 1700 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1701 " delayed CMD list, se_ordered_id: %u\n", 1702 cmd->t_task_cdb[0], cmd->sam_task_attr, 1703 cmd->se_ordered_id); 1704 return true; 1705 } 1706 1707 void target_execute_cmd(struct se_cmd *cmd) 1708 { 1709 /* 1710 * If the received CDB has aleady been aborted stop processing it here. 1711 */ 1712 if (transport_check_aborted_status(cmd, 1)) { 1713 complete(&cmd->transport_lun_stop_comp); 1714 return; 1715 } 1716 1717 /* 1718 * Determine if IOCTL context caller in requesting the stopping of this 1719 * command for LUN shutdown purposes. 1720 */ 1721 spin_lock_irq(&cmd->t_state_lock); 1722 if (cmd->transport_state & CMD_T_LUN_STOP) { 1723 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", 1724 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); 1725 1726 cmd->transport_state &= ~CMD_T_ACTIVE; 1727 spin_unlock_irq(&cmd->t_state_lock); 1728 complete(&cmd->transport_lun_stop_comp); 1729 return; 1730 } 1731 /* 1732 * Determine if frontend context caller is requesting the stopping of 1733 * this command for frontend exceptions. 1734 */ 1735 if (cmd->transport_state & CMD_T_STOP) { 1736 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 1737 __func__, __LINE__, 1738 cmd->se_tfo->get_task_tag(cmd)); 1739 1740 spin_unlock_irq(&cmd->t_state_lock); 1741 complete(&cmd->t_transport_stop_comp); 1742 return; 1743 } 1744 1745 cmd->t_state = TRANSPORT_PROCESSING; 1746 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1747 spin_unlock_irq(&cmd->t_state_lock); 1748 1749 if (target_handle_task_attr(cmd)) { 1750 spin_lock_irq(&cmd->t_state_lock); 1751 cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1752 spin_unlock_irq(&cmd->t_state_lock); 1753 return; 1754 } 1755 1756 __target_execute_cmd(cmd); 1757 } 1758 EXPORT_SYMBOL(target_execute_cmd); 1759 1760 /* 1761 * Process all commands up to the last received ORDERED task attribute which 1762 * requires another blocking boundary 1763 */ 1764 static void target_restart_delayed_cmds(struct se_device *dev) 1765 { 1766 for (;;) { 1767 struct se_cmd *cmd; 1768 1769 spin_lock(&dev->delayed_cmd_lock); 1770 if (list_empty(&dev->delayed_cmd_list)) { 1771 spin_unlock(&dev->delayed_cmd_lock); 1772 break; 1773 } 1774 1775 cmd = list_entry(dev->delayed_cmd_list.next, 1776 struct se_cmd, se_delayed_node); 1777 list_del(&cmd->se_delayed_node); 1778 spin_unlock(&dev->delayed_cmd_lock); 1779 1780 __target_execute_cmd(cmd); 1781 1782 if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1783 break; 1784 } 1785 } 1786 1787 /* 1788 * Called from I/O completion to determine which dormant/delayed 1789 * and ordered cmds need to have their tasks added to the execution queue. 1790 */ 1791 static void transport_complete_task_attr(struct se_cmd *cmd) 1792 { 1793 struct se_device *dev = cmd->se_dev; 1794 1795 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1796 return; 1797 1798 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1799 atomic_dec(&dev->simple_cmds); 1800 smp_mb__after_atomic_dec(); 1801 dev->dev_cur_ordered_id++; 1802 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1803 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1804 cmd->se_ordered_id); 1805 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1806 dev->dev_cur_ordered_id++; 1807 pr_debug("Incremented dev_cur_ordered_id: %u for" 1808 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1809 cmd->se_ordered_id); 1810 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1811 atomic_dec(&dev->dev_ordered_sync); 1812 smp_mb__after_atomic_dec(); 1813 1814 dev->dev_cur_ordered_id++; 1815 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1816 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 1817 } 1818 1819 target_restart_delayed_cmds(dev); 1820 } 1821 1822 static void transport_complete_qf(struct se_cmd *cmd) 1823 { 1824 int ret = 0; 1825 1826 transport_complete_task_attr(cmd); 1827 1828 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1829 trace_target_cmd_complete(cmd); 1830 ret = cmd->se_tfo->queue_status(cmd); 1831 if (ret) 1832 goto out; 1833 } 1834 1835 switch (cmd->data_direction) { 1836 case DMA_FROM_DEVICE: 1837 trace_target_cmd_complete(cmd); 1838 ret = cmd->se_tfo->queue_data_in(cmd); 1839 break; 1840 case DMA_TO_DEVICE: 1841 if (cmd->se_cmd_flags & SCF_BIDI) { 1842 ret = cmd->se_tfo->queue_data_in(cmd); 1843 if (ret < 0) 1844 break; 1845 } 1846 /* Fall through for DMA_TO_DEVICE */ 1847 case DMA_NONE: 1848 trace_target_cmd_complete(cmd); 1849 ret = cmd->se_tfo->queue_status(cmd); 1850 break; 1851 default: 1852 break; 1853 } 1854 1855 out: 1856 if (ret < 0) { 1857 transport_handle_queue_full(cmd, cmd->se_dev); 1858 return; 1859 } 1860 transport_lun_remove_cmd(cmd); 1861 transport_cmd_check_stop_to_fabric(cmd); 1862 } 1863 1864 static void transport_handle_queue_full( 1865 struct se_cmd *cmd, 1866 struct se_device *dev) 1867 { 1868 spin_lock_irq(&dev->qf_cmd_lock); 1869 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1870 atomic_inc(&dev->dev_qf_count); 1871 smp_mb__after_atomic_inc(); 1872 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1873 1874 schedule_work(&cmd->se_dev->qf_work_queue); 1875 } 1876 1877 static void target_complete_ok_work(struct work_struct *work) 1878 { 1879 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1880 int ret; 1881 1882 /* 1883 * Check if we need to move delayed/dormant tasks from cmds on the 1884 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 1885 * Attribute. 1886 */ 1887 transport_complete_task_attr(cmd); 1888 1889 /* 1890 * Check to schedule QUEUE_FULL work, or execute an existing 1891 * cmd->transport_qf_callback() 1892 */ 1893 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 1894 schedule_work(&cmd->se_dev->qf_work_queue); 1895 1896 /* 1897 * Check if we need to send a sense buffer from 1898 * the struct se_cmd in question. 1899 */ 1900 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1901 WARN_ON(!cmd->scsi_status); 1902 ret = transport_send_check_condition_and_sense( 1903 cmd, 0, 1); 1904 if (ret == -EAGAIN || ret == -ENOMEM) 1905 goto queue_full; 1906 1907 transport_lun_remove_cmd(cmd); 1908 transport_cmd_check_stop_to_fabric(cmd); 1909 return; 1910 } 1911 /* 1912 * Check for a callback, used by amongst other things 1913 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 1914 */ 1915 if (cmd->transport_complete_callback) { 1916 sense_reason_t rc; 1917 1918 rc = cmd->transport_complete_callback(cmd); 1919 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 1920 return; 1921 } else if (rc) { 1922 ret = transport_send_check_condition_and_sense(cmd, 1923 rc, 0); 1924 if (ret == -EAGAIN || ret == -ENOMEM) 1925 goto queue_full; 1926 1927 transport_lun_remove_cmd(cmd); 1928 transport_cmd_check_stop_to_fabric(cmd); 1929 return; 1930 } 1931 } 1932 1933 switch (cmd->data_direction) { 1934 case DMA_FROM_DEVICE: 1935 spin_lock(&cmd->se_lun->lun_sep_lock); 1936 if (cmd->se_lun->lun_sep) { 1937 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1938 cmd->data_length; 1939 } 1940 spin_unlock(&cmd->se_lun->lun_sep_lock); 1941 1942 trace_target_cmd_complete(cmd); 1943 ret = cmd->se_tfo->queue_data_in(cmd); 1944 if (ret == -EAGAIN || ret == -ENOMEM) 1945 goto queue_full; 1946 break; 1947 case DMA_TO_DEVICE: 1948 spin_lock(&cmd->se_lun->lun_sep_lock); 1949 if (cmd->se_lun->lun_sep) { 1950 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 1951 cmd->data_length; 1952 } 1953 spin_unlock(&cmd->se_lun->lun_sep_lock); 1954 /* 1955 * Check if we need to send READ payload for BIDI-COMMAND 1956 */ 1957 if (cmd->se_cmd_flags & SCF_BIDI) { 1958 spin_lock(&cmd->se_lun->lun_sep_lock); 1959 if (cmd->se_lun->lun_sep) { 1960 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1961 cmd->data_length; 1962 } 1963 spin_unlock(&cmd->se_lun->lun_sep_lock); 1964 ret = cmd->se_tfo->queue_data_in(cmd); 1965 if (ret == -EAGAIN || ret == -ENOMEM) 1966 goto queue_full; 1967 break; 1968 } 1969 /* Fall through for DMA_TO_DEVICE */ 1970 case DMA_NONE: 1971 trace_target_cmd_complete(cmd); 1972 ret = cmd->se_tfo->queue_status(cmd); 1973 if (ret == -EAGAIN || ret == -ENOMEM) 1974 goto queue_full; 1975 break; 1976 default: 1977 break; 1978 } 1979 1980 transport_lun_remove_cmd(cmd); 1981 transport_cmd_check_stop_to_fabric(cmd); 1982 return; 1983 1984 queue_full: 1985 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 1986 " data_direction: %d\n", cmd, cmd->data_direction); 1987 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1988 transport_handle_queue_full(cmd, cmd->se_dev); 1989 } 1990 1991 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 1992 { 1993 struct scatterlist *sg; 1994 int count; 1995 1996 for_each_sg(sgl, sg, nents, count) 1997 __free_page(sg_page(sg)); 1998 1999 kfree(sgl); 2000 } 2001 2002 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2003 { 2004 /* 2005 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2006 * emulation, and free + reset pointers if necessary.. 2007 */ 2008 if (!cmd->t_data_sg_orig) 2009 return; 2010 2011 kfree(cmd->t_data_sg); 2012 cmd->t_data_sg = cmd->t_data_sg_orig; 2013 cmd->t_data_sg_orig = NULL; 2014 cmd->t_data_nents = cmd->t_data_nents_orig; 2015 cmd->t_data_nents_orig = 0; 2016 } 2017 2018 static inline void transport_free_pages(struct se_cmd *cmd) 2019 { 2020 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2021 transport_reset_sgl_orig(cmd); 2022 return; 2023 } 2024 transport_reset_sgl_orig(cmd); 2025 2026 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2027 cmd->t_data_sg = NULL; 2028 cmd->t_data_nents = 0; 2029 2030 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2031 cmd->t_bidi_data_sg = NULL; 2032 cmd->t_bidi_data_nents = 0; 2033 } 2034 2035 /** 2036 * transport_release_cmd - free a command 2037 * @cmd: command to free 2038 * 2039 * This routine unconditionally frees a command, and reference counting 2040 * or list removal must be done in the caller. 2041 */ 2042 static int transport_release_cmd(struct se_cmd *cmd) 2043 { 2044 BUG_ON(!cmd->se_tfo); 2045 2046 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2047 core_tmr_release_req(cmd->se_tmr_req); 2048 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2049 kfree(cmd->t_task_cdb); 2050 /* 2051 * If this cmd has been setup with target_get_sess_cmd(), drop 2052 * the kref and call ->release_cmd() in kref callback. 2053 */ 2054 return target_put_sess_cmd(cmd->se_sess, cmd); 2055 } 2056 2057 /** 2058 * transport_put_cmd - release a reference to a command 2059 * @cmd: command to release 2060 * 2061 * This routine releases our reference to the command and frees it if possible. 2062 */ 2063 static int transport_put_cmd(struct se_cmd *cmd) 2064 { 2065 transport_free_pages(cmd); 2066 return transport_release_cmd(cmd); 2067 } 2068 2069 void *transport_kmap_data_sg(struct se_cmd *cmd) 2070 { 2071 struct scatterlist *sg = cmd->t_data_sg; 2072 struct page **pages; 2073 int i; 2074 2075 /* 2076 * We need to take into account a possible offset here for fabrics like 2077 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2078 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2079 */ 2080 if (!cmd->t_data_nents) 2081 return NULL; 2082 2083 BUG_ON(!sg); 2084 if (cmd->t_data_nents == 1) 2085 return kmap(sg_page(sg)) + sg->offset; 2086 2087 /* >1 page. use vmap */ 2088 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2089 if (!pages) 2090 return NULL; 2091 2092 /* convert sg[] to pages[] */ 2093 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2094 pages[i] = sg_page(sg); 2095 } 2096 2097 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2098 kfree(pages); 2099 if (!cmd->t_data_vmap) 2100 return NULL; 2101 2102 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2103 } 2104 EXPORT_SYMBOL(transport_kmap_data_sg); 2105 2106 void transport_kunmap_data_sg(struct se_cmd *cmd) 2107 { 2108 if (!cmd->t_data_nents) { 2109 return; 2110 } else if (cmd->t_data_nents == 1) { 2111 kunmap(sg_page(cmd->t_data_sg)); 2112 return; 2113 } 2114 2115 vunmap(cmd->t_data_vmap); 2116 cmd->t_data_vmap = NULL; 2117 } 2118 EXPORT_SYMBOL(transport_kunmap_data_sg); 2119 2120 int 2121 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2122 bool zero_page) 2123 { 2124 struct scatterlist *sg; 2125 struct page *page; 2126 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2127 unsigned int nent; 2128 int i = 0; 2129 2130 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2131 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2132 if (!sg) 2133 return -ENOMEM; 2134 2135 sg_init_table(sg, nent); 2136 2137 while (length) { 2138 u32 page_len = min_t(u32, length, PAGE_SIZE); 2139 page = alloc_page(GFP_KERNEL | zero_flag); 2140 if (!page) 2141 goto out; 2142 2143 sg_set_page(&sg[i], page, page_len, 0); 2144 length -= page_len; 2145 i++; 2146 } 2147 *sgl = sg; 2148 *nents = nent; 2149 return 0; 2150 2151 out: 2152 while (i > 0) { 2153 i--; 2154 __free_page(sg_page(&sg[i])); 2155 } 2156 kfree(sg); 2157 return -ENOMEM; 2158 } 2159 2160 /* 2161 * Allocate any required resources to execute the command. For writes we 2162 * might not have the payload yet, so notify the fabric via a call to 2163 * ->write_pending instead. Otherwise place it on the execution queue. 2164 */ 2165 sense_reason_t 2166 transport_generic_new_cmd(struct se_cmd *cmd) 2167 { 2168 int ret = 0; 2169 2170 /* 2171 * Determine is the TCM fabric module has already allocated physical 2172 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2173 * beforehand. 2174 */ 2175 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2176 cmd->data_length) { 2177 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2178 2179 if ((cmd->se_cmd_flags & SCF_BIDI) || 2180 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2181 u32 bidi_length; 2182 2183 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2184 bidi_length = cmd->t_task_nolb * 2185 cmd->se_dev->dev_attrib.block_size; 2186 else 2187 bidi_length = cmd->data_length; 2188 2189 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2190 &cmd->t_bidi_data_nents, 2191 bidi_length, zero_flag); 2192 if (ret < 0) 2193 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2194 } 2195 2196 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2197 cmd->data_length, zero_flag); 2198 if (ret < 0) 2199 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2200 } 2201 /* 2202 * If this command is not a write we can execute it right here, 2203 * for write buffers we need to notify the fabric driver first 2204 * and let it call back once the write buffers are ready. 2205 */ 2206 target_add_to_state_list(cmd); 2207 if (cmd->data_direction != DMA_TO_DEVICE) { 2208 target_execute_cmd(cmd); 2209 return 0; 2210 } 2211 transport_cmd_check_stop(cmd, false, true); 2212 2213 ret = cmd->se_tfo->write_pending(cmd); 2214 if (ret == -EAGAIN || ret == -ENOMEM) 2215 goto queue_full; 2216 2217 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2218 WARN_ON(ret); 2219 2220 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2221 2222 queue_full: 2223 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2224 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2225 transport_handle_queue_full(cmd, cmd->se_dev); 2226 return 0; 2227 } 2228 EXPORT_SYMBOL(transport_generic_new_cmd); 2229 2230 static void transport_write_pending_qf(struct se_cmd *cmd) 2231 { 2232 int ret; 2233 2234 ret = cmd->se_tfo->write_pending(cmd); 2235 if (ret == -EAGAIN || ret == -ENOMEM) { 2236 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2237 cmd); 2238 transport_handle_queue_full(cmd, cmd->se_dev); 2239 } 2240 } 2241 2242 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2243 { 2244 unsigned long flags; 2245 int ret = 0; 2246 2247 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2248 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2249 transport_wait_for_tasks(cmd); 2250 2251 ret = transport_release_cmd(cmd); 2252 } else { 2253 if (wait_for_tasks) 2254 transport_wait_for_tasks(cmd); 2255 /* 2256 * Handle WRITE failure case where transport_generic_new_cmd() 2257 * has already added se_cmd to state_list, but fabric has 2258 * failed command before I/O submission. 2259 */ 2260 if (cmd->state_active) { 2261 spin_lock_irqsave(&cmd->t_state_lock, flags); 2262 target_remove_from_state_list(cmd); 2263 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2264 } 2265 2266 if (cmd->se_lun) 2267 transport_lun_remove_cmd(cmd); 2268 2269 ret = transport_put_cmd(cmd); 2270 } 2271 return ret; 2272 } 2273 EXPORT_SYMBOL(transport_generic_free_cmd); 2274 2275 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2276 * @se_sess: session to reference 2277 * @se_cmd: command descriptor to add 2278 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2279 */ 2280 int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2281 bool ack_kref) 2282 { 2283 unsigned long flags; 2284 int ret = 0; 2285 2286 kref_init(&se_cmd->cmd_kref); 2287 /* 2288 * Add a second kref if the fabric caller is expecting to handle 2289 * fabric acknowledgement that requires two target_put_sess_cmd() 2290 * invocations before se_cmd descriptor release. 2291 */ 2292 if (ack_kref == true) { 2293 kref_get(&se_cmd->cmd_kref); 2294 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2295 } 2296 2297 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2298 if (se_sess->sess_tearing_down) { 2299 ret = -ESHUTDOWN; 2300 goto out; 2301 } 2302 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2303 out: 2304 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2305 return ret; 2306 } 2307 EXPORT_SYMBOL(target_get_sess_cmd); 2308 2309 static void target_release_cmd_kref(struct kref *kref) 2310 { 2311 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2312 struct se_session *se_sess = se_cmd->se_sess; 2313 2314 if (list_empty(&se_cmd->se_cmd_list)) { 2315 spin_unlock(&se_sess->sess_cmd_lock); 2316 se_cmd->se_tfo->release_cmd(se_cmd); 2317 return; 2318 } 2319 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2320 spin_unlock(&se_sess->sess_cmd_lock); 2321 complete(&se_cmd->cmd_wait_comp); 2322 return; 2323 } 2324 list_del(&se_cmd->se_cmd_list); 2325 spin_unlock(&se_sess->sess_cmd_lock); 2326 2327 se_cmd->se_tfo->release_cmd(se_cmd); 2328 } 2329 2330 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2331 * @se_sess: session to reference 2332 * @se_cmd: command descriptor to drop 2333 */ 2334 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2335 { 2336 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2337 &se_sess->sess_cmd_lock); 2338 } 2339 EXPORT_SYMBOL(target_put_sess_cmd); 2340 2341 /* target_sess_cmd_list_set_waiting - Flag all commands in 2342 * sess_cmd_list to complete cmd_wait_comp. Set 2343 * sess_tearing_down so no more commands are queued. 2344 * @se_sess: session to flag 2345 */ 2346 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2347 { 2348 struct se_cmd *se_cmd; 2349 unsigned long flags; 2350 2351 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2352 if (se_sess->sess_tearing_down) { 2353 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2354 return; 2355 } 2356 se_sess->sess_tearing_down = 1; 2357 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2358 2359 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2360 se_cmd->cmd_wait_set = 1; 2361 2362 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2363 } 2364 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2365 2366 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2367 * @se_sess: session to wait for active I/O 2368 */ 2369 void target_wait_for_sess_cmds(struct se_session *se_sess) 2370 { 2371 struct se_cmd *se_cmd, *tmp_cmd; 2372 unsigned long flags; 2373 2374 list_for_each_entry_safe(se_cmd, tmp_cmd, 2375 &se_sess->sess_wait_list, se_cmd_list) { 2376 list_del(&se_cmd->se_cmd_list); 2377 2378 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2379 " %d\n", se_cmd, se_cmd->t_state, 2380 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2381 2382 wait_for_completion(&se_cmd->cmd_wait_comp); 2383 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2384 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2385 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2386 2387 se_cmd->se_tfo->release_cmd(se_cmd); 2388 } 2389 2390 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2391 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2392 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2393 2394 } 2395 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2396 2397 /* transport_lun_wait_for_tasks(): 2398 * 2399 * Called from ConfigFS context to stop the passed struct se_cmd to allow 2400 * an struct se_lun to be successfully shutdown. 2401 */ 2402 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) 2403 { 2404 unsigned long flags; 2405 int ret = 0; 2406 2407 /* 2408 * If the frontend has already requested this struct se_cmd to 2409 * be stopped, we can safely ignore this struct se_cmd. 2410 */ 2411 spin_lock_irqsave(&cmd->t_state_lock, flags); 2412 if (cmd->transport_state & CMD_T_STOP) { 2413 cmd->transport_state &= ~CMD_T_LUN_STOP; 2414 2415 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", 2416 cmd->se_tfo->get_task_tag(cmd)); 2417 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2418 transport_cmd_check_stop(cmd, false, false); 2419 return -EPERM; 2420 } 2421 cmd->transport_state |= CMD_T_LUN_FE_STOP; 2422 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2423 2424 // XXX: audit task_flags checks. 2425 spin_lock_irqsave(&cmd->t_state_lock, flags); 2426 if ((cmd->transport_state & CMD_T_BUSY) && 2427 (cmd->transport_state & CMD_T_SENT)) { 2428 if (!target_stop_cmd(cmd, &flags)) 2429 ret++; 2430 } 2431 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2432 2433 pr_debug("ConfigFS: cmd: %p stop tasks ret:" 2434 " %d\n", cmd, ret); 2435 if (!ret) { 2436 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 2437 cmd->se_tfo->get_task_tag(cmd)); 2438 wait_for_completion(&cmd->transport_lun_stop_comp); 2439 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 2440 cmd->se_tfo->get_task_tag(cmd)); 2441 } 2442 2443 return 0; 2444 } 2445 2446 static void __transport_clear_lun_from_sessions(struct se_lun *lun) 2447 { 2448 struct se_cmd *cmd = NULL; 2449 unsigned long lun_flags, cmd_flags; 2450 /* 2451 * Do exception processing and return CHECK_CONDITION status to the 2452 * Initiator Port. 2453 */ 2454 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2455 while (!list_empty(&lun->lun_cmd_list)) { 2456 cmd = list_first_entry(&lun->lun_cmd_list, 2457 struct se_cmd, se_lun_node); 2458 list_del_init(&cmd->se_lun_node); 2459 2460 spin_lock(&cmd->t_state_lock); 2461 pr_debug("SE_LUN[%d] - Setting cmd->transport" 2462 "_lun_stop for ITT: 0x%08x\n", 2463 cmd->se_lun->unpacked_lun, 2464 cmd->se_tfo->get_task_tag(cmd)); 2465 cmd->transport_state |= CMD_T_LUN_STOP; 2466 spin_unlock(&cmd->t_state_lock); 2467 2468 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 2469 2470 if (!cmd->se_lun) { 2471 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", 2472 cmd->se_tfo->get_task_tag(cmd), 2473 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2474 BUG(); 2475 } 2476 /* 2477 * If the Storage engine still owns the iscsi_cmd_t, determine 2478 * and/or stop its context. 2479 */ 2480 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" 2481 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 2482 cmd->se_tfo->get_task_tag(cmd)); 2483 2484 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { 2485 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2486 continue; 2487 } 2488 2489 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 2490 "_wait_for_tasks(): SUCCESS\n", 2491 cmd->se_lun->unpacked_lun, 2492 cmd->se_tfo->get_task_tag(cmd)); 2493 2494 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 2495 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { 2496 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2497 goto check_cond; 2498 } 2499 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 2500 target_remove_from_state_list(cmd); 2501 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2502 2503 /* 2504 * The Storage engine stopped this struct se_cmd before it was 2505 * send to the fabric frontend for delivery back to the 2506 * Initiator Node. Return this SCSI CDB back with an 2507 * CHECK_CONDITION status. 2508 */ 2509 check_cond: 2510 transport_send_check_condition_and_sense(cmd, 2511 TCM_NON_EXISTENT_LUN, 0); 2512 /* 2513 * If the fabric frontend is waiting for this iscsi_cmd_t to 2514 * be released, notify the waiting thread now that LU has 2515 * finished accessing it. 2516 */ 2517 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 2518 if (cmd->transport_state & CMD_T_LUN_FE_STOP) { 2519 pr_debug("SE_LUN[%d] - Detected FE stop for" 2520 " struct se_cmd: %p ITT: 0x%08x\n", 2521 lun->unpacked_lun, 2522 cmd, cmd->se_tfo->get_task_tag(cmd)); 2523 2524 spin_unlock_irqrestore(&cmd->t_state_lock, 2525 cmd_flags); 2526 transport_cmd_check_stop(cmd, false, false); 2527 complete(&cmd->transport_lun_fe_stop_comp); 2528 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2529 continue; 2530 } 2531 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 2532 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 2533 2534 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2535 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2536 } 2537 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 2538 } 2539 2540 static int transport_clear_lun_thread(void *p) 2541 { 2542 struct se_lun *lun = p; 2543 2544 __transport_clear_lun_from_sessions(lun); 2545 complete(&lun->lun_shutdown_comp); 2546 2547 return 0; 2548 } 2549 2550 int transport_clear_lun_from_sessions(struct se_lun *lun) 2551 { 2552 struct task_struct *kt; 2553 2554 kt = kthread_run(transport_clear_lun_thread, lun, 2555 "tcm_cl_%u", lun->unpacked_lun); 2556 if (IS_ERR(kt)) { 2557 pr_err("Unable to start clear_lun thread\n"); 2558 return PTR_ERR(kt); 2559 } 2560 wait_for_completion(&lun->lun_shutdown_comp); 2561 2562 return 0; 2563 } 2564 2565 /** 2566 * transport_wait_for_tasks - wait for completion to occur 2567 * @cmd: command to wait 2568 * 2569 * Called from frontend fabric context to wait for storage engine 2570 * to pause and/or release frontend generated struct se_cmd. 2571 */ 2572 bool transport_wait_for_tasks(struct se_cmd *cmd) 2573 { 2574 unsigned long flags; 2575 2576 spin_lock_irqsave(&cmd->t_state_lock, flags); 2577 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2578 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2579 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2580 return false; 2581 } 2582 2583 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2584 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2585 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2586 return false; 2587 } 2588 /* 2589 * If we are already stopped due to an external event (ie: LUN shutdown) 2590 * sleep until the connection can have the passed struct se_cmd back. 2591 * The cmd->transport_lun_stopped_sem will be upped by 2592 * transport_clear_lun_from_sessions() once the ConfigFS context caller 2593 * has completed its operation on the struct se_cmd. 2594 */ 2595 if (cmd->transport_state & CMD_T_LUN_STOP) { 2596 pr_debug("wait_for_tasks: Stopping" 2597 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 2598 "_stop_comp); for ITT: 0x%08x\n", 2599 cmd->se_tfo->get_task_tag(cmd)); 2600 /* 2601 * There is a special case for WRITES where a FE exception + 2602 * LUN shutdown means ConfigFS context is still sleeping on 2603 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). 2604 * We go ahead and up transport_lun_stop_comp just to be sure 2605 * here. 2606 */ 2607 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2608 complete(&cmd->transport_lun_stop_comp); 2609 wait_for_completion(&cmd->transport_lun_fe_stop_comp); 2610 spin_lock_irqsave(&cmd->t_state_lock, flags); 2611 2612 target_remove_from_state_list(cmd); 2613 /* 2614 * At this point, the frontend who was the originator of this 2615 * struct se_cmd, now owns the structure and can be released through 2616 * normal means below. 2617 */ 2618 pr_debug("wait_for_tasks: Stopped" 2619 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 2620 "stop_comp); for ITT: 0x%08x\n", 2621 cmd->se_tfo->get_task_tag(cmd)); 2622 2623 cmd->transport_state &= ~CMD_T_LUN_STOP; 2624 } 2625 2626 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2627 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2628 return false; 2629 } 2630 2631 cmd->transport_state |= CMD_T_STOP; 2632 2633 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 2634 " i_state: %d, t_state: %d, CMD_T_STOP\n", 2635 cmd, cmd->se_tfo->get_task_tag(cmd), 2636 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2637 2638 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2639 2640 wait_for_completion(&cmd->t_transport_stop_comp); 2641 2642 spin_lock_irqsave(&cmd->t_state_lock, flags); 2643 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2644 2645 pr_debug("wait_for_tasks: Stopped wait_for_completion(" 2646 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 2647 cmd->se_tfo->get_task_tag(cmd)); 2648 2649 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2650 2651 return true; 2652 } 2653 EXPORT_SYMBOL(transport_wait_for_tasks); 2654 2655 static int transport_get_sense_codes( 2656 struct se_cmd *cmd, 2657 u8 *asc, 2658 u8 *ascq) 2659 { 2660 *asc = cmd->scsi_asc; 2661 *ascq = cmd->scsi_ascq; 2662 2663 return 0; 2664 } 2665 2666 int 2667 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2668 sense_reason_t reason, int from_transport) 2669 { 2670 unsigned char *buffer = cmd->sense_buffer; 2671 unsigned long flags; 2672 u8 asc = 0, ascq = 0; 2673 2674 spin_lock_irqsave(&cmd->t_state_lock, flags); 2675 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2676 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2677 return 0; 2678 } 2679 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2680 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2681 2682 if (!reason && from_transport) 2683 goto after_reason; 2684 2685 if (!from_transport) 2686 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2687 2688 /* 2689 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2690 * SENSE KEY values from include/scsi/scsi.h 2691 */ 2692 switch (reason) { 2693 case TCM_NO_SENSE: 2694 /* CURRENT ERROR */ 2695 buffer[0] = 0x70; 2696 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2697 /* Not Ready */ 2698 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2699 /* NO ADDITIONAL SENSE INFORMATION */ 2700 buffer[SPC_ASC_KEY_OFFSET] = 0; 2701 buffer[SPC_ASCQ_KEY_OFFSET] = 0; 2702 break; 2703 case TCM_NON_EXISTENT_LUN: 2704 /* CURRENT ERROR */ 2705 buffer[0] = 0x70; 2706 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2707 /* ILLEGAL REQUEST */ 2708 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2709 /* LOGICAL UNIT NOT SUPPORTED */ 2710 buffer[SPC_ASC_KEY_OFFSET] = 0x25; 2711 break; 2712 case TCM_UNSUPPORTED_SCSI_OPCODE: 2713 case TCM_SECTOR_COUNT_TOO_MANY: 2714 /* CURRENT ERROR */ 2715 buffer[0] = 0x70; 2716 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2717 /* ILLEGAL REQUEST */ 2718 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2719 /* INVALID COMMAND OPERATION CODE */ 2720 buffer[SPC_ASC_KEY_OFFSET] = 0x20; 2721 break; 2722 case TCM_UNKNOWN_MODE_PAGE: 2723 /* CURRENT ERROR */ 2724 buffer[0] = 0x70; 2725 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2726 /* ILLEGAL REQUEST */ 2727 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2728 /* INVALID FIELD IN CDB */ 2729 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2730 break; 2731 case TCM_CHECK_CONDITION_ABORT_CMD: 2732 /* CURRENT ERROR */ 2733 buffer[0] = 0x70; 2734 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2735 /* ABORTED COMMAND */ 2736 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2737 /* BUS DEVICE RESET FUNCTION OCCURRED */ 2738 buffer[SPC_ASC_KEY_OFFSET] = 0x29; 2739 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2740 break; 2741 case TCM_INCORRECT_AMOUNT_OF_DATA: 2742 /* CURRENT ERROR */ 2743 buffer[0] = 0x70; 2744 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2745 /* ABORTED COMMAND */ 2746 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2747 /* WRITE ERROR */ 2748 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2749 /* NOT ENOUGH UNSOLICITED DATA */ 2750 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d; 2751 break; 2752 case TCM_INVALID_CDB_FIELD: 2753 /* CURRENT ERROR */ 2754 buffer[0] = 0x70; 2755 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2756 /* ILLEGAL REQUEST */ 2757 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2758 /* INVALID FIELD IN CDB */ 2759 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2760 break; 2761 case TCM_INVALID_PARAMETER_LIST: 2762 /* CURRENT ERROR */ 2763 buffer[0] = 0x70; 2764 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2765 /* ILLEGAL REQUEST */ 2766 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2767 /* INVALID FIELD IN PARAMETER LIST */ 2768 buffer[SPC_ASC_KEY_OFFSET] = 0x26; 2769 break; 2770 case TCM_PARAMETER_LIST_LENGTH_ERROR: 2771 /* CURRENT ERROR */ 2772 buffer[0] = 0x70; 2773 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2774 /* ILLEGAL REQUEST */ 2775 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2776 /* PARAMETER LIST LENGTH ERROR */ 2777 buffer[SPC_ASC_KEY_OFFSET] = 0x1a; 2778 break; 2779 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2780 /* CURRENT ERROR */ 2781 buffer[0] = 0x70; 2782 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2783 /* ABORTED COMMAND */ 2784 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2785 /* WRITE ERROR */ 2786 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2787 /* UNEXPECTED_UNSOLICITED_DATA */ 2788 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c; 2789 break; 2790 case TCM_SERVICE_CRC_ERROR: 2791 /* CURRENT ERROR */ 2792 buffer[0] = 0x70; 2793 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2794 /* ABORTED COMMAND */ 2795 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2796 /* PROTOCOL SERVICE CRC ERROR */ 2797 buffer[SPC_ASC_KEY_OFFSET] = 0x47; 2798 /* N/A */ 2799 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05; 2800 break; 2801 case TCM_SNACK_REJECTED: 2802 /* CURRENT ERROR */ 2803 buffer[0] = 0x70; 2804 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2805 /* ABORTED COMMAND */ 2806 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2807 /* READ ERROR */ 2808 buffer[SPC_ASC_KEY_OFFSET] = 0x11; 2809 /* FAILED RETRANSMISSION REQUEST */ 2810 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13; 2811 break; 2812 case TCM_WRITE_PROTECTED: 2813 /* CURRENT ERROR */ 2814 buffer[0] = 0x70; 2815 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2816 /* DATA PROTECT */ 2817 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 2818 /* WRITE PROTECTED */ 2819 buffer[SPC_ASC_KEY_OFFSET] = 0x27; 2820 break; 2821 case TCM_ADDRESS_OUT_OF_RANGE: 2822 /* CURRENT ERROR */ 2823 buffer[0] = 0x70; 2824 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2825 /* ILLEGAL REQUEST */ 2826 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2827 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2828 buffer[SPC_ASC_KEY_OFFSET] = 0x21; 2829 break; 2830 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2831 /* CURRENT ERROR */ 2832 buffer[0] = 0x70; 2833 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2834 /* UNIT ATTENTION */ 2835 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 2836 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2837 buffer[SPC_ASC_KEY_OFFSET] = asc; 2838 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2839 break; 2840 case TCM_CHECK_CONDITION_NOT_READY: 2841 /* CURRENT ERROR */ 2842 buffer[0] = 0x70; 2843 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2844 /* Not Ready */ 2845 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2846 transport_get_sense_codes(cmd, &asc, &ascq); 2847 buffer[SPC_ASC_KEY_OFFSET] = asc; 2848 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2849 break; 2850 case TCM_MISCOMPARE_VERIFY: 2851 /* CURRENT ERROR */ 2852 buffer[0] = 0x70; 2853 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2854 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE; 2855 /* MISCOMPARE DURING VERIFY OPERATION */ 2856 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2857 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2858 break; 2859 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2860 default: 2861 /* CURRENT ERROR */ 2862 buffer[0] = 0x70; 2863 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2864 /* 2865 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2866 * Solaris initiators. Returning NOT READY instead means the 2867 * operations will be retried a finite number of times and we 2868 * can survive intermittent errors. 2869 */ 2870 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2871 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2872 buffer[SPC_ASC_KEY_OFFSET] = 0x08; 2873 break; 2874 } 2875 /* 2876 * This code uses linux/include/scsi/scsi.h SAM status codes! 2877 */ 2878 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2879 /* 2880 * Automatically padded, this value is encoded in the fabric's 2881 * data_length response PDU containing the SCSI defined sense data. 2882 */ 2883 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2884 2885 after_reason: 2886 trace_target_cmd_complete(cmd); 2887 return cmd->se_tfo->queue_status(cmd); 2888 } 2889 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2890 2891 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2892 { 2893 if (!(cmd->transport_state & CMD_T_ABORTED)) 2894 return 0; 2895 2896 if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 2897 return 1; 2898 2899 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", 2900 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2901 2902 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2903 trace_target_cmd_complete(cmd); 2904 cmd->se_tfo->queue_status(cmd); 2905 2906 return 1; 2907 } 2908 EXPORT_SYMBOL(transport_check_aborted_status); 2909 2910 void transport_send_task_abort(struct se_cmd *cmd) 2911 { 2912 unsigned long flags; 2913 2914 spin_lock_irqsave(&cmd->t_state_lock, flags); 2915 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { 2916 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2917 return; 2918 } 2919 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2920 2921 /* 2922 * If there are still expected incoming fabric WRITEs, we wait 2923 * until until they have completed before sending a TASK_ABORTED 2924 * response. This response with TASK_ABORTED status will be 2925 * queued back to fabric module by transport_check_aborted_status(). 2926 */ 2927 if (cmd->data_direction == DMA_TO_DEVICE) { 2928 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2929 cmd->transport_state |= CMD_T_ABORTED; 2930 smp_mb__after_atomic_inc(); 2931 } 2932 } 2933 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2934 2935 transport_lun_remove_cmd(cmd); 2936 2937 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 2938 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 2939 cmd->se_tfo->get_task_tag(cmd)); 2940 2941 trace_target_cmd_complete(cmd); 2942 cmd->se_tfo->queue_status(cmd); 2943 } 2944 2945 static void target_tmr_work(struct work_struct *work) 2946 { 2947 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2948 struct se_device *dev = cmd->se_dev; 2949 struct se_tmr_req *tmr = cmd->se_tmr_req; 2950 int ret; 2951 2952 switch (tmr->function) { 2953 case TMR_ABORT_TASK: 2954 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2955 break; 2956 case TMR_ABORT_TASK_SET: 2957 case TMR_CLEAR_ACA: 2958 case TMR_CLEAR_TASK_SET: 2959 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2960 break; 2961 case TMR_LUN_RESET: 2962 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2963 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2964 TMR_FUNCTION_REJECTED; 2965 break; 2966 case TMR_TARGET_WARM_RESET: 2967 tmr->response = TMR_FUNCTION_REJECTED; 2968 break; 2969 case TMR_TARGET_COLD_RESET: 2970 tmr->response = TMR_FUNCTION_REJECTED; 2971 break; 2972 default: 2973 pr_err("Uknown TMR function: 0x%02x.\n", 2974 tmr->function); 2975 tmr->response = TMR_FUNCTION_REJECTED; 2976 break; 2977 } 2978 2979 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2980 cmd->se_tfo->queue_tm_rsp(cmd); 2981 2982 transport_cmd_check_stop_to_fabric(cmd); 2983 } 2984 2985 int transport_generic_handle_tmr( 2986 struct se_cmd *cmd) 2987 { 2988 INIT_WORK(&cmd->work, target_tmr_work); 2989 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2990 return 0; 2991 } 2992 EXPORT_SYMBOL(transport_generic_handle_tmr); 2993