1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <asm/unaligned.h> 38 #include <net/sock.h> 39 #include <net/tcp.h> 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_tcq.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 #include <target/target_core_configfs.h> 48 49 #include "target_core_internal.h" 50 #include "target_core_alua.h" 51 #include "target_core_pr.h" 52 #include "target_core_ua.h" 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/target.h> 56 57 static struct workqueue_struct *target_completion_wq; 58 static struct kmem_cache *se_sess_cache; 59 struct kmem_cache *se_ua_cache; 60 struct kmem_cache *t10_pr_reg_cache; 61 struct kmem_cache *t10_alua_lu_gp_cache; 62 struct kmem_cache *t10_alua_lu_gp_mem_cache; 63 struct kmem_cache *t10_alua_tg_pt_gp_cache; 64 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 122 "t10_alua_tg_pt_gp_mem_cache", 123 sizeof(struct t10_alua_tg_pt_gp_member), 124 __alignof__(struct t10_alua_tg_pt_gp_member), 125 0, NULL); 126 if (!t10_alua_tg_pt_gp_mem_cache) { 127 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 128 "mem_t failed\n"); 129 goto out_free_tg_pt_gp_cache; 130 } 131 132 target_completion_wq = alloc_workqueue("target_completion", 133 WQ_MEM_RECLAIM, 0); 134 if (!target_completion_wq) 135 goto out_free_tg_pt_gp_mem_cache; 136 137 return 0; 138 139 out_free_tg_pt_gp_mem_cache: 140 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 141 out_free_tg_pt_gp_cache: 142 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 143 out_free_lu_gp_mem_cache: 144 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 145 out_free_lu_gp_cache: 146 kmem_cache_destroy(t10_alua_lu_gp_cache); 147 out_free_pr_reg_cache: 148 kmem_cache_destroy(t10_pr_reg_cache); 149 out_free_ua_cache: 150 kmem_cache_destroy(se_ua_cache); 151 out_free_sess_cache: 152 kmem_cache_destroy(se_sess_cache); 153 out: 154 return -ENOMEM; 155 } 156 157 void release_se_kmem_caches(void) 158 { 159 destroy_workqueue(target_completion_wq); 160 kmem_cache_destroy(se_sess_cache); 161 kmem_cache_destroy(se_ua_cache); 162 kmem_cache_destroy(t10_pr_reg_cache); 163 kmem_cache_destroy(t10_alua_lu_gp_cache); 164 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 166 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 167 } 168 169 /* This code ensures unique mib indexes are handed out. */ 170 static DEFINE_SPINLOCK(scsi_mib_index_lock); 171 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 172 173 /* 174 * Allocate a new row index for the entry type specified 175 */ 176 u32 scsi_get_new_index(scsi_index_t type) 177 { 178 u32 new_index; 179 180 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 181 182 spin_lock(&scsi_mib_index_lock); 183 new_index = ++scsi_mib_index[type]; 184 spin_unlock(&scsi_mib_index_lock); 185 186 return new_index; 187 } 188 189 void transport_subsystem_check_init(void) 190 { 191 int ret; 192 static int sub_api_initialized; 193 194 if (sub_api_initialized) 195 return; 196 197 ret = request_module("target_core_iblock"); 198 if (ret != 0) 199 pr_err("Unable to load target_core_iblock\n"); 200 201 ret = request_module("target_core_file"); 202 if (ret != 0) 203 pr_err("Unable to load target_core_file\n"); 204 205 ret = request_module("target_core_pscsi"); 206 if (ret != 0) 207 pr_err("Unable to load target_core_pscsi\n"); 208 209 sub_api_initialized = 1; 210 } 211 212 struct se_session *transport_init_session(void) 213 { 214 struct se_session *se_sess; 215 216 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 217 if (!se_sess) { 218 pr_err("Unable to allocate struct se_session from" 219 " se_sess_cache\n"); 220 return ERR_PTR(-ENOMEM); 221 } 222 INIT_LIST_HEAD(&se_sess->sess_list); 223 INIT_LIST_HEAD(&se_sess->sess_acl_list); 224 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 225 INIT_LIST_HEAD(&se_sess->sess_wait_list); 226 spin_lock_init(&se_sess->sess_cmd_lock); 227 kref_init(&se_sess->sess_kref); 228 229 return se_sess; 230 } 231 EXPORT_SYMBOL(transport_init_session); 232 233 int transport_alloc_session_tags(struct se_session *se_sess, 234 unsigned int tag_num, unsigned int tag_size) 235 { 236 int rc; 237 238 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 239 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 240 if (!se_sess->sess_cmd_map) { 241 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 242 if (!se_sess->sess_cmd_map) { 243 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 244 return -ENOMEM; 245 } 246 } 247 248 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 249 if (rc < 0) { 250 pr_err("Unable to init se_sess->sess_tag_pool," 251 " tag_num: %u\n", tag_num); 252 if (is_vmalloc_addr(se_sess->sess_cmd_map)) 253 vfree(se_sess->sess_cmd_map); 254 else 255 kfree(se_sess->sess_cmd_map); 256 se_sess->sess_cmd_map = NULL; 257 return -ENOMEM; 258 } 259 260 return 0; 261 } 262 EXPORT_SYMBOL(transport_alloc_session_tags); 263 264 struct se_session *transport_init_session_tags(unsigned int tag_num, 265 unsigned int tag_size) 266 { 267 struct se_session *se_sess; 268 int rc; 269 270 se_sess = transport_init_session(); 271 if (IS_ERR(se_sess)) 272 return se_sess; 273 274 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 275 if (rc < 0) { 276 transport_free_session(se_sess); 277 return ERR_PTR(-ENOMEM); 278 } 279 280 return se_sess; 281 } 282 EXPORT_SYMBOL(transport_init_session_tags); 283 284 /* 285 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 286 */ 287 void __transport_register_session( 288 struct se_portal_group *se_tpg, 289 struct se_node_acl *se_nacl, 290 struct se_session *se_sess, 291 void *fabric_sess_ptr) 292 { 293 unsigned char buf[PR_REG_ISID_LEN]; 294 295 se_sess->se_tpg = se_tpg; 296 se_sess->fabric_sess_ptr = fabric_sess_ptr; 297 /* 298 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 299 * 300 * Only set for struct se_session's that will actually be moving I/O. 301 * eg: *NOT* discovery sessions. 302 */ 303 if (se_nacl) { 304 /* 305 * If the fabric module supports an ISID based TransportID, 306 * save this value in binary from the fabric I_T Nexus now. 307 */ 308 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 309 memset(&buf[0], 0, PR_REG_ISID_LEN); 310 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 311 &buf[0], PR_REG_ISID_LEN); 312 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 313 } 314 kref_get(&se_nacl->acl_kref); 315 316 spin_lock_irq(&se_nacl->nacl_sess_lock); 317 /* 318 * The se_nacl->nacl_sess pointer will be set to the 319 * last active I_T Nexus for each struct se_node_acl. 320 */ 321 se_nacl->nacl_sess = se_sess; 322 323 list_add_tail(&se_sess->sess_acl_list, 324 &se_nacl->acl_sess_list); 325 spin_unlock_irq(&se_nacl->nacl_sess_lock); 326 } 327 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 328 329 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 330 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 331 } 332 EXPORT_SYMBOL(__transport_register_session); 333 334 void transport_register_session( 335 struct se_portal_group *se_tpg, 336 struct se_node_acl *se_nacl, 337 struct se_session *se_sess, 338 void *fabric_sess_ptr) 339 { 340 unsigned long flags; 341 342 spin_lock_irqsave(&se_tpg->session_lock, flags); 343 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 344 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 345 } 346 EXPORT_SYMBOL(transport_register_session); 347 348 static void target_release_session(struct kref *kref) 349 { 350 struct se_session *se_sess = container_of(kref, 351 struct se_session, sess_kref); 352 struct se_portal_group *se_tpg = se_sess->se_tpg; 353 354 se_tpg->se_tpg_tfo->close_session(se_sess); 355 } 356 357 void target_get_session(struct se_session *se_sess) 358 { 359 kref_get(&se_sess->sess_kref); 360 } 361 EXPORT_SYMBOL(target_get_session); 362 363 void target_put_session(struct se_session *se_sess) 364 { 365 struct se_portal_group *tpg = se_sess->se_tpg; 366 367 if (tpg->se_tpg_tfo->put_session != NULL) { 368 tpg->se_tpg_tfo->put_session(se_sess); 369 return; 370 } 371 kref_put(&se_sess->sess_kref, target_release_session); 372 } 373 EXPORT_SYMBOL(target_put_session); 374 375 static void target_complete_nacl(struct kref *kref) 376 { 377 struct se_node_acl *nacl = container_of(kref, 378 struct se_node_acl, acl_kref); 379 380 complete(&nacl->acl_free_comp); 381 } 382 383 void target_put_nacl(struct se_node_acl *nacl) 384 { 385 kref_put(&nacl->acl_kref, target_complete_nacl); 386 } 387 388 void transport_deregister_session_configfs(struct se_session *se_sess) 389 { 390 struct se_node_acl *se_nacl; 391 unsigned long flags; 392 /* 393 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 394 */ 395 se_nacl = se_sess->se_node_acl; 396 if (se_nacl) { 397 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 398 if (se_nacl->acl_stop == 0) 399 list_del(&se_sess->sess_acl_list); 400 /* 401 * If the session list is empty, then clear the pointer. 402 * Otherwise, set the struct se_session pointer from the tail 403 * element of the per struct se_node_acl active session list. 404 */ 405 if (list_empty(&se_nacl->acl_sess_list)) 406 se_nacl->nacl_sess = NULL; 407 else { 408 se_nacl->nacl_sess = container_of( 409 se_nacl->acl_sess_list.prev, 410 struct se_session, sess_acl_list); 411 } 412 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 413 } 414 } 415 EXPORT_SYMBOL(transport_deregister_session_configfs); 416 417 void transport_free_session(struct se_session *se_sess) 418 { 419 if (se_sess->sess_cmd_map) { 420 percpu_ida_destroy(&se_sess->sess_tag_pool); 421 if (is_vmalloc_addr(se_sess->sess_cmd_map)) 422 vfree(se_sess->sess_cmd_map); 423 else 424 kfree(se_sess->sess_cmd_map); 425 } 426 kmem_cache_free(se_sess_cache, se_sess); 427 } 428 EXPORT_SYMBOL(transport_free_session); 429 430 void transport_deregister_session(struct se_session *se_sess) 431 { 432 struct se_portal_group *se_tpg = se_sess->se_tpg; 433 struct target_core_fabric_ops *se_tfo; 434 struct se_node_acl *se_nacl; 435 unsigned long flags; 436 bool comp_nacl = true; 437 438 if (!se_tpg) { 439 transport_free_session(se_sess); 440 return; 441 } 442 se_tfo = se_tpg->se_tpg_tfo; 443 444 spin_lock_irqsave(&se_tpg->session_lock, flags); 445 list_del(&se_sess->sess_list); 446 se_sess->se_tpg = NULL; 447 se_sess->fabric_sess_ptr = NULL; 448 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 449 450 /* 451 * Determine if we need to do extra work for this initiator node's 452 * struct se_node_acl if it had been previously dynamically generated. 453 */ 454 se_nacl = se_sess->se_node_acl; 455 456 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 457 if (se_nacl && se_nacl->dynamic_node_acl) { 458 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 459 list_del(&se_nacl->acl_list); 460 se_tpg->num_node_acls--; 461 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 462 core_tpg_wait_for_nacl_pr_ref(se_nacl); 463 core_free_device_list_for_node(se_nacl, se_tpg); 464 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); 465 466 comp_nacl = false; 467 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 468 } 469 } 470 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 471 472 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 473 se_tpg->se_tpg_tfo->get_fabric_name()); 474 /* 475 * If last kref is dropping now for an explicit NodeACL, awake sleeping 476 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 477 * removal context. 478 */ 479 if (se_nacl && comp_nacl == true) 480 target_put_nacl(se_nacl); 481 482 transport_free_session(se_sess); 483 } 484 EXPORT_SYMBOL(transport_deregister_session); 485 486 /* 487 * Called with cmd->t_state_lock held. 488 */ 489 static void target_remove_from_state_list(struct se_cmd *cmd) 490 { 491 struct se_device *dev = cmd->se_dev; 492 unsigned long flags; 493 494 if (!dev) 495 return; 496 497 if (cmd->transport_state & CMD_T_BUSY) 498 return; 499 500 spin_lock_irqsave(&dev->execute_task_lock, flags); 501 if (cmd->state_active) { 502 list_del(&cmd->state_list); 503 cmd->state_active = false; 504 } 505 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 506 } 507 508 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 509 bool write_pending) 510 { 511 unsigned long flags; 512 513 spin_lock_irqsave(&cmd->t_state_lock, flags); 514 if (write_pending) 515 cmd->t_state = TRANSPORT_WRITE_PENDING; 516 517 if (remove_from_lists) { 518 target_remove_from_state_list(cmd); 519 520 /* 521 * Clear struct se_cmd->se_lun before the handoff to FE. 522 */ 523 cmd->se_lun = NULL; 524 } 525 526 /* 527 * Determine if frontend context caller is requesting the stopping of 528 * this command for frontend exceptions. 529 */ 530 if (cmd->transport_state & CMD_T_STOP) { 531 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 532 __func__, __LINE__, 533 cmd->se_tfo->get_task_tag(cmd)); 534 535 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 536 537 complete(&cmd->t_transport_stop_comp); 538 return 1; 539 } 540 541 cmd->transport_state &= ~CMD_T_ACTIVE; 542 if (remove_from_lists) { 543 /* 544 * Some fabric modules like tcm_loop can release 545 * their internally allocated I/O reference now and 546 * struct se_cmd now. 547 * 548 * Fabric modules are expected to return '1' here if the 549 * se_cmd being passed is released at this point, 550 * or zero if not being released. 551 */ 552 if (cmd->se_tfo->check_stop_free != NULL) { 553 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 554 return cmd->se_tfo->check_stop_free(cmd); 555 } 556 } 557 558 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 559 return 0; 560 } 561 562 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 563 { 564 return transport_cmd_check_stop(cmd, true, false); 565 } 566 567 static void transport_lun_remove_cmd(struct se_cmd *cmd) 568 { 569 struct se_lun *lun = cmd->se_lun; 570 571 if (!lun || !cmd->lun_ref_active) 572 return; 573 574 percpu_ref_put(&lun->lun_ref); 575 } 576 577 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 578 { 579 if (transport_cmd_check_stop_to_fabric(cmd)) 580 return; 581 if (remove) 582 transport_put_cmd(cmd); 583 } 584 585 static void target_complete_failure_work(struct work_struct *work) 586 { 587 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 588 589 transport_generic_request_failure(cmd, 590 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 591 } 592 593 /* 594 * Used when asking transport to copy Sense Data from the underlying 595 * Linux/SCSI struct scsi_cmnd 596 */ 597 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 598 { 599 struct se_device *dev = cmd->se_dev; 600 601 WARN_ON(!cmd->se_lun); 602 603 if (!dev) 604 return NULL; 605 606 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 607 return NULL; 608 609 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 610 611 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 612 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 613 return cmd->sense_buffer; 614 } 615 616 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 617 { 618 struct se_device *dev = cmd->se_dev; 619 int success = scsi_status == GOOD; 620 unsigned long flags; 621 622 cmd->scsi_status = scsi_status; 623 624 625 spin_lock_irqsave(&cmd->t_state_lock, flags); 626 cmd->transport_state &= ~CMD_T_BUSY; 627 628 if (dev && dev->transport->transport_complete) { 629 dev->transport->transport_complete(cmd, 630 cmd->t_data_sg, 631 transport_get_sense_buffer(cmd)); 632 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 633 success = 1; 634 } 635 636 /* 637 * See if we are waiting to complete for an exception condition. 638 */ 639 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 640 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 641 complete(&cmd->task_stop_comp); 642 return; 643 } 644 645 if (!success) 646 cmd->transport_state |= CMD_T_FAILED; 647 648 /* 649 * Check for case where an explicit ABORT_TASK has been received 650 * and transport_wait_for_tasks() will be waiting for completion.. 651 */ 652 if (cmd->transport_state & CMD_T_ABORTED && 653 cmd->transport_state & CMD_T_STOP) { 654 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 655 complete(&cmd->t_transport_stop_comp); 656 return; 657 } else if (cmd->transport_state & CMD_T_FAILED) { 658 INIT_WORK(&cmd->work, target_complete_failure_work); 659 } else { 660 INIT_WORK(&cmd->work, target_complete_ok_work); 661 } 662 663 cmd->t_state = TRANSPORT_COMPLETE; 664 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 665 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 666 667 queue_work(target_completion_wq, &cmd->work); 668 } 669 EXPORT_SYMBOL(target_complete_cmd); 670 671 static void target_add_to_state_list(struct se_cmd *cmd) 672 { 673 struct se_device *dev = cmd->se_dev; 674 unsigned long flags; 675 676 spin_lock_irqsave(&dev->execute_task_lock, flags); 677 if (!cmd->state_active) { 678 list_add_tail(&cmd->state_list, &dev->state_list); 679 cmd->state_active = true; 680 } 681 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 682 } 683 684 /* 685 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 686 */ 687 static void transport_write_pending_qf(struct se_cmd *cmd); 688 static void transport_complete_qf(struct se_cmd *cmd); 689 690 void target_qf_do_work(struct work_struct *work) 691 { 692 struct se_device *dev = container_of(work, struct se_device, 693 qf_work_queue); 694 LIST_HEAD(qf_cmd_list); 695 struct se_cmd *cmd, *cmd_tmp; 696 697 spin_lock_irq(&dev->qf_cmd_lock); 698 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 699 spin_unlock_irq(&dev->qf_cmd_lock); 700 701 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 702 list_del(&cmd->se_qf_node); 703 atomic_dec(&dev->dev_qf_count); 704 smp_mb__after_atomic_dec(); 705 706 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 707 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 708 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 709 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 710 : "UNKNOWN"); 711 712 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 713 transport_write_pending_qf(cmd); 714 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 715 transport_complete_qf(cmd); 716 } 717 } 718 719 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 720 { 721 switch (cmd->data_direction) { 722 case DMA_NONE: 723 return "NONE"; 724 case DMA_FROM_DEVICE: 725 return "READ"; 726 case DMA_TO_DEVICE: 727 return "WRITE"; 728 case DMA_BIDIRECTIONAL: 729 return "BIDI"; 730 default: 731 break; 732 } 733 734 return "UNKNOWN"; 735 } 736 737 void transport_dump_dev_state( 738 struct se_device *dev, 739 char *b, 740 int *bl) 741 { 742 *bl += sprintf(b + *bl, "Status: "); 743 if (dev->export_count) 744 *bl += sprintf(b + *bl, "ACTIVATED"); 745 else 746 *bl += sprintf(b + *bl, "DEACTIVATED"); 747 748 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 749 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 750 dev->dev_attrib.block_size, 751 dev->dev_attrib.hw_max_sectors); 752 *bl += sprintf(b + *bl, " "); 753 } 754 755 void transport_dump_vpd_proto_id( 756 struct t10_vpd *vpd, 757 unsigned char *p_buf, 758 int p_buf_len) 759 { 760 unsigned char buf[VPD_TMP_BUF_SIZE]; 761 int len; 762 763 memset(buf, 0, VPD_TMP_BUF_SIZE); 764 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 765 766 switch (vpd->protocol_identifier) { 767 case 0x00: 768 sprintf(buf+len, "Fibre Channel\n"); 769 break; 770 case 0x10: 771 sprintf(buf+len, "Parallel SCSI\n"); 772 break; 773 case 0x20: 774 sprintf(buf+len, "SSA\n"); 775 break; 776 case 0x30: 777 sprintf(buf+len, "IEEE 1394\n"); 778 break; 779 case 0x40: 780 sprintf(buf+len, "SCSI Remote Direct Memory Access" 781 " Protocol\n"); 782 break; 783 case 0x50: 784 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 785 break; 786 case 0x60: 787 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 788 break; 789 case 0x70: 790 sprintf(buf+len, "Automation/Drive Interface Transport" 791 " Protocol\n"); 792 break; 793 case 0x80: 794 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 795 break; 796 default: 797 sprintf(buf+len, "Unknown 0x%02x\n", 798 vpd->protocol_identifier); 799 break; 800 } 801 802 if (p_buf) 803 strncpy(p_buf, buf, p_buf_len); 804 else 805 pr_debug("%s", buf); 806 } 807 808 void 809 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 810 { 811 /* 812 * Check if the Protocol Identifier Valid (PIV) bit is set.. 813 * 814 * from spc3r23.pdf section 7.5.1 815 */ 816 if (page_83[1] & 0x80) { 817 vpd->protocol_identifier = (page_83[0] & 0xf0); 818 vpd->protocol_identifier_set = 1; 819 transport_dump_vpd_proto_id(vpd, NULL, 0); 820 } 821 } 822 EXPORT_SYMBOL(transport_set_vpd_proto_id); 823 824 int transport_dump_vpd_assoc( 825 struct t10_vpd *vpd, 826 unsigned char *p_buf, 827 int p_buf_len) 828 { 829 unsigned char buf[VPD_TMP_BUF_SIZE]; 830 int ret = 0; 831 int len; 832 833 memset(buf, 0, VPD_TMP_BUF_SIZE); 834 len = sprintf(buf, "T10 VPD Identifier Association: "); 835 836 switch (vpd->association) { 837 case 0x00: 838 sprintf(buf+len, "addressed logical unit\n"); 839 break; 840 case 0x10: 841 sprintf(buf+len, "target port\n"); 842 break; 843 case 0x20: 844 sprintf(buf+len, "SCSI target device\n"); 845 break; 846 default: 847 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 848 ret = -EINVAL; 849 break; 850 } 851 852 if (p_buf) 853 strncpy(p_buf, buf, p_buf_len); 854 else 855 pr_debug("%s", buf); 856 857 return ret; 858 } 859 860 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 861 { 862 /* 863 * The VPD identification association.. 864 * 865 * from spc3r23.pdf Section 7.6.3.1 Table 297 866 */ 867 vpd->association = (page_83[1] & 0x30); 868 return transport_dump_vpd_assoc(vpd, NULL, 0); 869 } 870 EXPORT_SYMBOL(transport_set_vpd_assoc); 871 872 int transport_dump_vpd_ident_type( 873 struct t10_vpd *vpd, 874 unsigned char *p_buf, 875 int p_buf_len) 876 { 877 unsigned char buf[VPD_TMP_BUF_SIZE]; 878 int ret = 0; 879 int len; 880 881 memset(buf, 0, VPD_TMP_BUF_SIZE); 882 len = sprintf(buf, "T10 VPD Identifier Type: "); 883 884 switch (vpd->device_identifier_type) { 885 case 0x00: 886 sprintf(buf+len, "Vendor specific\n"); 887 break; 888 case 0x01: 889 sprintf(buf+len, "T10 Vendor ID based\n"); 890 break; 891 case 0x02: 892 sprintf(buf+len, "EUI-64 based\n"); 893 break; 894 case 0x03: 895 sprintf(buf+len, "NAA\n"); 896 break; 897 case 0x04: 898 sprintf(buf+len, "Relative target port identifier\n"); 899 break; 900 case 0x08: 901 sprintf(buf+len, "SCSI name string\n"); 902 break; 903 default: 904 sprintf(buf+len, "Unsupported: 0x%02x\n", 905 vpd->device_identifier_type); 906 ret = -EINVAL; 907 break; 908 } 909 910 if (p_buf) { 911 if (p_buf_len < strlen(buf)+1) 912 return -EINVAL; 913 strncpy(p_buf, buf, p_buf_len); 914 } else { 915 pr_debug("%s", buf); 916 } 917 918 return ret; 919 } 920 921 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 922 { 923 /* 924 * The VPD identifier type.. 925 * 926 * from spc3r23.pdf Section 7.6.3.1 Table 298 927 */ 928 vpd->device_identifier_type = (page_83[1] & 0x0f); 929 return transport_dump_vpd_ident_type(vpd, NULL, 0); 930 } 931 EXPORT_SYMBOL(transport_set_vpd_ident_type); 932 933 int transport_dump_vpd_ident( 934 struct t10_vpd *vpd, 935 unsigned char *p_buf, 936 int p_buf_len) 937 { 938 unsigned char buf[VPD_TMP_BUF_SIZE]; 939 int ret = 0; 940 941 memset(buf, 0, VPD_TMP_BUF_SIZE); 942 943 switch (vpd->device_identifier_code_set) { 944 case 0x01: /* Binary */ 945 snprintf(buf, sizeof(buf), 946 "T10 VPD Binary Device Identifier: %s\n", 947 &vpd->device_identifier[0]); 948 break; 949 case 0x02: /* ASCII */ 950 snprintf(buf, sizeof(buf), 951 "T10 VPD ASCII Device Identifier: %s\n", 952 &vpd->device_identifier[0]); 953 break; 954 case 0x03: /* UTF-8 */ 955 snprintf(buf, sizeof(buf), 956 "T10 VPD UTF-8 Device Identifier: %s\n", 957 &vpd->device_identifier[0]); 958 break; 959 default: 960 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 961 " 0x%02x", vpd->device_identifier_code_set); 962 ret = -EINVAL; 963 break; 964 } 965 966 if (p_buf) 967 strncpy(p_buf, buf, p_buf_len); 968 else 969 pr_debug("%s", buf); 970 971 return ret; 972 } 973 974 int 975 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 976 { 977 static const char hex_str[] = "0123456789abcdef"; 978 int j = 0, i = 4; /* offset to start of the identifier */ 979 980 /* 981 * The VPD Code Set (encoding) 982 * 983 * from spc3r23.pdf Section 7.6.3.1 Table 296 984 */ 985 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 986 switch (vpd->device_identifier_code_set) { 987 case 0x01: /* Binary */ 988 vpd->device_identifier[j++] = 989 hex_str[vpd->device_identifier_type]; 990 while (i < (4 + page_83[3])) { 991 vpd->device_identifier[j++] = 992 hex_str[(page_83[i] & 0xf0) >> 4]; 993 vpd->device_identifier[j++] = 994 hex_str[page_83[i] & 0x0f]; 995 i++; 996 } 997 break; 998 case 0x02: /* ASCII */ 999 case 0x03: /* UTF-8 */ 1000 while (i < (4 + page_83[3])) 1001 vpd->device_identifier[j++] = page_83[i++]; 1002 break; 1003 default: 1004 break; 1005 } 1006 1007 return transport_dump_vpd_ident(vpd, NULL, 0); 1008 } 1009 EXPORT_SYMBOL(transport_set_vpd_ident); 1010 1011 sense_reason_t 1012 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1013 { 1014 struct se_device *dev = cmd->se_dev; 1015 1016 if (cmd->unknown_data_length) { 1017 cmd->data_length = size; 1018 } else if (size != cmd->data_length) { 1019 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1020 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1021 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1022 cmd->data_length, size, cmd->t_task_cdb[0]); 1023 1024 if (cmd->data_direction == DMA_TO_DEVICE) { 1025 pr_err("Rejecting underflow/overflow" 1026 " WRITE data\n"); 1027 return TCM_INVALID_CDB_FIELD; 1028 } 1029 /* 1030 * Reject READ_* or WRITE_* with overflow/underflow for 1031 * type SCF_SCSI_DATA_CDB. 1032 */ 1033 if (dev->dev_attrib.block_size != 512) { 1034 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1035 " CDB on non 512-byte sector setup subsystem" 1036 " plugin: %s\n", dev->transport->name); 1037 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1038 return TCM_INVALID_CDB_FIELD; 1039 } 1040 /* 1041 * For the overflow case keep the existing fabric provided 1042 * ->data_length. Otherwise for the underflow case, reset 1043 * ->data_length to the smaller SCSI expected data transfer 1044 * length. 1045 */ 1046 if (size > cmd->data_length) { 1047 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1048 cmd->residual_count = (size - cmd->data_length); 1049 } else { 1050 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1051 cmd->residual_count = (cmd->data_length - size); 1052 cmd->data_length = size; 1053 } 1054 } 1055 1056 return 0; 1057 1058 } 1059 1060 /* 1061 * Used by fabric modules containing a local struct se_cmd within their 1062 * fabric dependent per I/O descriptor. 1063 */ 1064 void transport_init_se_cmd( 1065 struct se_cmd *cmd, 1066 struct target_core_fabric_ops *tfo, 1067 struct se_session *se_sess, 1068 u32 data_length, 1069 int data_direction, 1070 int task_attr, 1071 unsigned char *sense_buffer) 1072 { 1073 INIT_LIST_HEAD(&cmd->se_delayed_node); 1074 INIT_LIST_HEAD(&cmd->se_qf_node); 1075 INIT_LIST_HEAD(&cmd->se_cmd_list); 1076 INIT_LIST_HEAD(&cmd->state_list); 1077 init_completion(&cmd->t_transport_stop_comp); 1078 init_completion(&cmd->cmd_wait_comp); 1079 init_completion(&cmd->task_stop_comp); 1080 spin_lock_init(&cmd->t_state_lock); 1081 cmd->transport_state = CMD_T_DEV_ACTIVE; 1082 1083 cmd->se_tfo = tfo; 1084 cmd->se_sess = se_sess; 1085 cmd->data_length = data_length; 1086 cmd->data_direction = data_direction; 1087 cmd->sam_task_attr = task_attr; 1088 cmd->sense_buffer = sense_buffer; 1089 1090 cmd->state_active = false; 1091 } 1092 EXPORT_SYMBOL(transport_init_se_cmd); 1093 1094 static sense_reason_t 1095 transport_check_alloc_task_attr(struct se_cmd *cmd) 1096 { 1097 struct se_device *dev = cmd->se_dev; 1098 1099 /* 1100 * Check if SAM Task Attribute emulation is enabled for this 1101 * struct se_device storage object 1102 */ 1103 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1104 return 0; 1105 1106 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1107 pr_debug("SAM Task Attribute ACA" 1108 " emulation is not supported\n"); 1109 return TCM_INVALID_CDB_FIELD; 1110 } 1111 /* 1112 * Used to determine when ORDERED commands should go from 1113 * Dormant to Active status. 1114 */ 1115 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1116 smp_mb__after_atomic_inc(); 1117 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1118 cmd->se_ordered_id, cmd->sam_task_attr, 1119 dev->transport->name); 1120 return 0; 1121 } 1122 1123 sense_reason_t 1124 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1125 { 1126 struct se_device *dev = cmd->se_dev; 1127 sense_reason_t ret; 1128 1129 /* 1130 * Ensure that the received CDB is less than the max (252 + 8) bytes 1131 * for VARIABLE_LENGTH_CMD 1132 */ 1133 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1134 pr_err("Received SCSI CDB with command_size: %d that" 1135 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1136 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1137 return TCM_INVALID_CDB_FIELD; 1138 } 1139 /* 1140 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1141 * allocate the additional extended CDB buffer now.. Otherwise 1142 * setup the pointer from __t_task_cdb to t_task_cdb. 1143 */ 1144 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1145 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1146 GFP_KERNEL); 1147 if (!cmd->t_task_cdb) { 1148 pr_err("Unable to allocate cmd->t_task_cdb" 1149 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1150 scsi_command_size(cdb), 1151 (unsigned long)sizeof(cmd->__t_task_cdb)); 1152 return TCM_OUT_OF_RESOURCES; 1153 } 1154 } else 1155 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1156 /* 1157 * Copy the original CDB into cmd-> 1158 */ 1159 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1160 1161 trace_target_sequencer_start(cmd); 1162 1163 /* 1164 * Check for an existing UNIT ATTENTION condition 1165 */ 1166 ret = target_scsi3_ua_check(cmd); 1167 if (ret) 1168 return ret; 1169 1170 ret = target_alua_state_check(cmd); 1171 if (ret) 1172 return ret; 1173 1174 ret = target_check_reservation(cmd); 1175 if (ret) { 1176 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1177 return ret; 1178 } 1179 1180 ret = dev->transport->parse_cdb(cmd); 1181 if (ret) 1182 return ret; 1183 1184 ret = transport_check_alloc_task_attr(cmd); 1185 if (ret) 1186 return ret; 1187 1188 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1189 1190 spin_lock(&cmd->se_lun->lun_sep_lock); 1191 if (cmd->se_lun->lun_sep) 1192 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1193 spin_unlock(&cmd->se_lun->lun_sep_lock); 1194 return 0; 1195 } 1196 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1197 1198 /* 1199 * Used by fabric module frontends to queue tasks directly. 1200 * Many only be used from process context only 1201 */ 1202 int transport_handle_cdb_direct( 1203 struct se_cmd *cmd) 1204 { 1205 sense_reason_t ret; 1206 1207 if (!cmd->se_lun) { 1208 dump_stack(); 1209 pr_err("cmd->se_lun is NULL\n"); 1210 return -EINVAL; 1211 } 1212 if (in_interrupt()) { 1213 dump_stack(); 1214 pr_err("transport_generic_handle_cdb cannot be called" 1215 " from interrupt context\n"); 1216 return -EINVAL; 1217 } 1218 /* 1219 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1220 * outstanding descriptors are handled correctly during shutdown via 1221 * transport_wait_for_tasks() 1222 * 1223 * Also, we don't take cmd->t_state_lock here as we only expect 1224 * this to be called for initial descriptor submission. 1225 */ 1226 cmd->t_state = TRANSPORT_NEW_CMD; 1227 cmd->transport_state |= CMD_T_ACTIVE; 1228 1229 /* 1230 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1231 * so follow TRANSPORT_NEW_CMD processing thread context usage 1232 * and call transport_generic_request_failure() if necessary.. 1233 */ 1234 ret = transport_generic_new_cmd(cmd); 1235 if (ret) 1236 transport_generic_request_failure(cmd, ret); 1237 return 0; 1238 } 1239 EXPORT_SYMBOL(transport_handle_cdb_direct); 1240 1241 sense_reason_t 1242 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1243 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1244 { 1245 if (!sgl || !sgl_count) 1246 return 0; 1247 1248 /* 1249 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1250 * scatterlists already have been set to follow what the fabric 1251 * passes for the original expected data transfer length. 1252 */ 1253 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1254 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1255 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1256 return TCM_INVALID_CDB_FIELD; 1257 } 1258 1259 cmd->t_data_sg = sgl; 1260 cmd->t_data_nents = sgl_count; 1261 1262 if (sgl_bidi && sgl_bidi_count) { 1263 cmd->t_bidi_data_sg = sgl_bidi; 1264 cmd->t_bidi_data_nents = sgl_bidi_count; 1265 } 1266 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1267 return 0; 1268 } 1269 1270 /* 1271 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1272 * se_cmd + use pre-allocated SGL memory. 1273 * 1274 * @se_cmd: command descriptor to submit 1275 * @se_sess: associated se_sess for endpoint 1276 * @cdb: pointer to SCSI CDB 1277 * @sense: pointer to SCSI sense buffer 1278 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1279 * @data_length: fabric expected data transfer length 1280 * @task_addr: SAM task attribute 1281 * @data_dir: DMA data direction 1282 * @flags: flags for command submission from target_sc_flags_tables 1283 * @sgl: struct scatterlist memory for unidirectional mapping 1284 * @sgl_count: scatterlist count for unidirectional mapping 1285 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1286 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1287 * 1288 * Returns non zero to signal active I/O shutdown failure. All other 1289 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1290 * but still return zero here. 1291 * 1292 * This may only be called from process context, and also currently 1293 * assumes internal allocation of fabric payload buffer by target-core. 1294 */ 1295 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1296 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1297 u32 data_length, int task_attr, int data_dir, int flags, 1298 struct scatterlist *sgl, u32 sgl_count, 1299 struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1300 { 1301 struct se_portal_group *se_tpg; 1302 sense_reason_t rc; 1303 int ret; 1304 1305 se_tpg = se_sess->se_tpg; 1306 BUG_ON(!se_tpg); 1307 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1308 BUG_ON(in_interrupt()); 1309 /* 1310 * Initialize se_cmd for target operation. From this point 1311 * exceptions are handled by sending exception status via 1312 * target_core_fabric_ops->queue_status() callback 1313 */ 1314 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1315 data_length, data_dir, task_attr, sense); 1316 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1317 se_cmd->unknown_data_length = 1; 1318 /* 1319 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1320 * se_sess->sess_cmd_list. A second kref_get here is necessary 1321 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1322 * kref_put() to happen during fabric packet acknowledgement. 1323 */ 1324 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1325 if (ret) 1326 return ret; 1327 /* 1328 * Signal bidirectional data payloads to target-core 1329 */ 1330 if (flags & TARGET_SCF_BIDI_OP) 1331 se_cmd->se_cmd_flags |= SCF_BIDI; 1332 /* 1333 * Locate se_lun pointer and attach it to struct se_cmd 1334 */ 1335 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1336 if (rc) { 1337 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1338 target_put_sess_cmd(se_sess, se_cmd); 1339 return 0; 1340 } 1341 1342 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1343 if (rc != 0) { 1344 transport_generic_request_failure(se_cmd, rc); 1345 return 0; 1346 } 1347 /* 1348 * When a non zero sgl_count has been passed perform SGL passthrough 1349 * mapping for pre-allocated fabric memory instead of having target 1350 * core perform an internal SGL allocation.. 1351 */ 1352 if (sgl_count != 0) { 1353 BUG_ON(!sgl); 1354 1355 /* 1356 * A work-around for tcm_loop as some userspace code via 1357 * scsi-generic do not memset their associated read buffers, 1358 * so go ahead and do that here for type non-data CDBs. Also 1359 * note that this is currently guaranteed to be a single SGL 1360 * for this case by target core in target_setup_cmd_from_cdb() 1361 * -> transport_generic_cmd_sequencer(). 1362 */ 1363 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1364 se_cmd->data_direction == DMA_FROM_DEVICE) { 1365 unsigned char *buf = NULL; 1366 1367 if (sgl) 1368 buf = kmap(sg_page(sgl)) + sgl->offset; 1369 1370 if (buf) { 1371 memset(buf, 0, sgl->length); 1372 kunmap(sg_page(sgl)); 1373 } 1374 } 1375 1376 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1377 sgl_bidi, sgl_bidi_count); 1378 if (rc != 0) { 1379 transport_generic_request_failure(se_cmd, rc); 1380 return 0; 1381 } 1382 } 1383 /* 1384 * Check if we need to delay processing because of ALUA 1385 * Active/NonOptimized primary access state.. 1386 */ 1387 core_alua_check_nonop_delay(se_cmd); 1388 1389 transport_handle_cdb_direct(se_cmd); 1390 return 0; 1391 } 1392 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1393 1394 /* 1395 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1396 * 1397 * @se_cmd: command descriptor to submit 1398 * @se_sess: associated se_sess for endpoint 1399 * @cdb: pointer to SCSI CDB 1400 * @sense: pointer to SCSI sense buffer 1401 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1402 * @data_length: fabric expected data transfer length 1403 * @task_addr: SAM task attribute 1404 * @data_dir: DMA data direction 1405 * @flags: flags for command submission from target_sc_flags_tables 1406 * 1407 * Returns non zero to signal active I/O shutdown failure. All other 1408 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1409 * but still return zero here. 1410 * 1411 * This may only be called from process context, and also currently 1412 * assumes internal allocation of fabric payload buffer by target-core. 1413 * 1414 * It also assumes interal target core SGL memory allocation. 1415 */ 1416 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1417 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1418 u32 data_length, int task_attr, int data_dir, int flags) 1419 { 1420 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1421 unpacked_lun, data_length, task_attr, data_dir, 1422 flags, NULL, 0, NULL, 0); 1423 } 1424 EXPORT_SYMBOL(target_submit_cmd); 1425 1426 static void target_complete_tmr_failure(struct work_struct *work) 1427 { 1428 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1429 1430 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1431 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1432 1433 transport_cmd_check_stop_to_fabric(se_cmd); 1434 } 1435 1436 /** 1437 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1438 * for TMR CDBs 1439 * 1440 * @se_cmd: command descriptor to submit 1441 * @se_sess: associated se_sess for endpoint 1442 * @sense: pointer to SCSI sense buffer 1443 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1444 * @fabric_context: fabric context for TMR req 1445 * @tm_type: Type of TM request 1446 * @gfp: gfp type for caller 1447 * @tag: referenced task tag for TMR_ABORT_TASK 1448 * @flags: submit cmd flags 1449 * 1450 * Callable from all contexts. 1451 **/ 1452 1453 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1454 unsigned char *sense, u32 unpacked_lun, 1455 void *fabric_tmr_ptr, unsigned char tm_type, 1456 gfp_t gfp, unsigned int tag, int flags) 1457 { 1458 struct se_portal_group *se_tpg; 1459 int ret; 1460 1461 se_tpg = se_sess->se_tpg; 1462 BUG_ON(!se_tpg); 1463 1464 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1465 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1466 /* 1467 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1468 * allocation failure. 1469 */ 1470 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1471 if (ret < 0) 1472 return -ENOMEM; 1473 1474 if (tm_type == TMR_ABORT_TASK) 1475 se_cmd->se_tmr_req->ref_task_tag = tag; 1476 1477 /* See target_submit_cmd for commentary */ 1478 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1479 if (ret) { 1480 core_tmr_release_req(se_cmd->se_tmr_req); 1481 return ret; 1482 } 1483 1484 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1485 if (ret) { 1486 /* 1487 * For callback during failure handling, push this work off 1488 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1489 */ 1490 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1491 schedule_work(&se_cmd->work); 1492 return 0; 1493 } 1494 transport_generic_handle_tmr(se_cmd); 1495 return 0; 1496 } 1497 EXPORT_SYMBOL(target_submit_tmr); 1498 1499 /* 1500 * If the cmd is active, request it to be stopped and sleep until it 1501 * has completed. 1502 */ 1503 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1504 { 1505 bool was_active = false; 1506 1507 if (cmd->transport_state & CMD_T_BUSY) { 1508 cmd->transport_state |= CMD_T_REQUEST_STOP; 1509 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1510 1511 pr_debug("cmd %p waiting to complete\n", cmd); 1512 wait_for_completion(&cmd->task_stop_comp); 1513 pr_debug("cmd %p stopped successfully\n", cmd); 1514 1515 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1516 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1517 cmd->transport_state &= ~CMD_T_BUSY; 1518 was_active = true; 1519 } 1520 1521 return was_active; 1522 } 1523 1524 /* 1525 * Handle SAM-esque emulation for generic transport request failures. 1526 */ 1527 void transport_generic_request_failure(struct se_cmd *cmd, 1528 sense_reason_t sense_reason) 1529 { 1530 int ret = 0; 1531 1532 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1533 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1534 cmd->t_task_cdb[0]); 1535 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1536 cmd->se_tfo->get_cmd_state(cmd), 1537 cmd->t_state, sense_reason); 1538 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1539 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1540 (cmd->transport_state & CMD_T_STOP) != 0, 1541 (cmd->transport_state & CMD_T_SENT) != 0); 1542 1543 /* 1544 * For SAM Task Attribute emulation for failed struct se_cmd 1545 */ 1546 transport_complete_task_attr(cmd); 1547 /* 1548 * Handle special case for COMPARE_AND_WRITE failure, where the 1549 * callback is expected to drop the per device ->caw_mutex. 1550 */ 1551 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1552 cmd->transport_complete_callback) 1553 cmd->transport_complete_callback(cmd); 1554 1555 switch (sense_reason) { 1556 case TCM_NON_EXISTENT_LUN: 1557 case TCM_UNSUPPORTED_SCSI_OPCODE: 1558 case TCM_INVALID_CDB_FIELD: 1559 case TCM_INVALID_PARAMETER_LIST: 1560 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1561 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1562 case TCM_UNKNOWN_MODE_PAGE: 1563 case TCM_WRITE_PROTECTED: 1564 case TCM_ADDRESS_OUT_OF_RANGE: 1565 case TCM_CHECK_CONDITION_ABORT_CMD: 1566 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1567 case TCM_CHECK_CONDITION_NOT_READY: 1568 break; 1569 case TCM_OUT_OF_RESOURCES: 1570 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1571 break; 1572 case TCM_RESERVATION_CONFLICT: 1573 /* 1574 * No SENSE Data payload for this case, set SCSI Status 1575 * and queue the response to $FABRIC_MOD. 1576 * 1577 * Uses linux/include/scsi/scsi.h SAM status codes defs 1578 */ 1579 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1580 /* 1581 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1582 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1583 * CONFLICT STATUS. 1584 * 1585 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1586 */ 1587 if (cmd->se_sess && 1588 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1589 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1590 cmd->orig_fe_lun, 0x2C, 1591 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1592 1593 trace_target_cmd_complete(cmd); 1594 ret = cmd->se_tfo-> queue_status(cmd); 1595 if (ret == -EAGAIN || ret == -ENOMEM) 1596 goto queue_full; 1597 goto check_stop; 1598 default: 1599 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1600 cmd->t_task_cdb[0], sense_reason); 1601 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1602 break; 1603 } 1604 1605 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1606 if (ret == -EAGAIN || ret == -ENOMEM) 1607 goto queue_full; 1608 1609 check_stop: 1610 transport_lun_remove_cmd(cmd); 1611 if (!transport_cmd_check_stop_to_fabric(cmd)) 1612 ; 1613 return; 1614 1615 queue_full: 1616 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1617 transport_handle_queue_full(cmd, cmd->se_dev); 1618 } 1619 EXPORT_SYMBOL(transport_generic_request_failure); 1620 1621 void __target_execute_cmd(struct se_cmd *cmd) 1622 { 1623 sense_reason_t ret; 1624 1625 if (cmd->execute_cmd) { 1626 ret = cmd->execute_cmd(cmd); 1627 if (ret) { 1628 spin_lock_irq(&cmd->t_state_lock); 1629 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1630 spin_unlock_irq(&cmd->t_state_lock); 1631 1632 transport_generic_request_failure(cmd, ret); 1633 } 1634 } 1635 } 1636 1637 static bool target_handle_task_attr(struct se_cmd *cmd) 1638 { 1639 struct se_device *dev = cmd->se_dev; 1640 1641 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1642 return false; 1643 1644 /* 1645 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1646 * to allow the passed struct se_cmd list of tasks to the front of the list. 1647 */ 1648 switch (cmd->sam_task_attr) { 1649 case MSG_HEAD_TAG: 1650 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1651 "se_ordered_id: %u\n", 1652 cmd->t_task_cdb[0], cmd->se_ordered_id); 1653 return false; 1654 case MSG_ORDERED_TAG: 1655 atomic_inc(&dev->dev_ordered_sync); 1656 smp_mb__after_atomic_inc(); 1657 1658 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1659 " se_ordered_id: %u\n", 1660 cmd->t_task_cdb[0], cmd->se_ordered_id); 1661 1662 /* 1663 * Execute an ORDERED command if no other older commands 1664 * exist that need to be completed first. 1665 */ 1666 if (!atomic_read(&dev->simple_cmds)) 1667 return false; 1668 break; 1669 default: 1670 /* 1671 * For SIMPLE and UNTAGGED Task Attribute commands 1672 */ 1673 atomic_inc(&dev->simple_cmds); 1674 smp_mb__after_atomic_inc(); 1675 break; 1676 } 1677 1678 if (atomic_read(&dev->dev_ordered_sync) == 0) 1679 return false; 1680 1681 spin_lock(&dev->delayed_cmd_lock); 1682 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1683 spin_unlock(&dev->delayed_cmd_lock); 1684 1685 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1686 " delayed CMD list, se_ordered_id: %u\n", 1687 cmd->t_task_cdb[0], cmd->sam_task_attr, 1688 cmd->se_ordered_id); 1689 return true; 1690 } 1691 1692 void target_execute_cmd(struct se_cmd *cmd) 1693 { 1694 /* 1695 * If the received CDB has aleady been aborted stop processing it here. 1696 */ 1697 if (transport_check_aborted_status(cmd, 1)) 1698 return; 1699 1700 /* 1701 * Determine if frontend context caller is requesting the stopping of 1702 * this command for frontend exceptions. 1703 */ 1704 spin_lock_irq(&cmd->t_state_lock); 1705 if (cmd->transport_state & CMD_T_STOP) { 1706 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 1707 __func__, __LINE__, 1708 cmd->se_tfo->get_task_tag(cmd)); 1709 1710 spin_unlock_irq(&cmd->t_state_lock); 1711 complete(&cmd->t_transport_stop_comp); 1712 return; 1713 } 1714 1715 cmd->t_state = TRANSPORT_PROCESSING; 1716 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1717 spin_unlock_irq(&cmd->t_state_lock); 1718 1719 if (target_handle_task_attr(cmd)) { 1720 spin_lock_irq(&cmd->t_state_lock); 1721 cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1722 spin_unlock_irq(&cmd->t_state_lock); 1723 return; 1724 } 1725 1726 __target_execute_cmd(cmd); 1727 } 1728 EXPORT_SYMBOL(target_execute_cmd); 1729 1730 /* 1731 * Process all commands up to the last received ORDERED task attribute which 1732 * requires another blocking boundary 1733 */ 1734 static void target_restart_delayed_cmds(struct se_device *dev) 1735 { 1736 for (;;) { 1737 struct se_cmd *cmd; 1738 1739 spin_lock(&dev->delayed_cmd_lock); 1740 if (list_empty(&dev->delayed_cmd_list)) { 1741 spin_unlock(&dev->delayed_cmd_lock); 1742 break; 1743 } 1744 1745 cmd = list_entry(dev->delayed_cmd_list.next, 1746 struct se_cmd, se_delayed_node); 1747 list_del(&cmd->se_delayed_node); 1748 spin_unlock(&dev->delayed_cmd_lock); 1749 1750 __target_execute_cmd(cmd); 1751 1752 if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1753 break; 1754 } 1755 } 1756 1757 /* 1758 * Called from I/O completion to determine which dormant/delayed 1759 * and ordered cmds need to have their tasks added to the execution queue. 1760 */ 1761 static void transport_complete_task_attr(struct se_cmd *cmd) 1762 { 1763 struct se_device *dev = cmd->se_dev; 1764 1765 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1766 return; 1767 1768 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1769 atomic_dec(&dev->simple_cmds); 1770 smp_mb__after_atomic_dec(); 1771 dev->dev_cur_ordered_id++; 1772 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1773 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1774 cmd->se_ordered_id); 1775 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1776 dev->dev_cur_ordered_id++; 1777 pr_debug("Incremented dev_cur_ordered_id: %u for" 1778 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1779 cmd->se_ordered_id); 1780 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1781 atomic_dec(&dev->dev_ordered_sync); 1782 smp_mb__after_atomic_dec(); 1783 1784 dev->dev_cur_ordered_id++; 1785 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1786 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 1787 } 1788 1789 target_restart_delayed_cmds(dev); 1790 } 1791 1792 static void transport_complete_qf(struct se_cmd *cmd) 1793 { 1794 int ret = 0; 1795 1796 transport_complete_task_attr(cmd); 1797 1798 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1799 trace_target_cmd_complete(cmd); 1800 ret = cmd->se_tfo->queue_status(cmd); 1801 if (ret) 1802 goto out; 1803 } 1804 1805 switch (cmd->data_direction) { 1806 case DMA_FROM_DEVICE: 1807 trace_target_cmd_complete(cmd); 1808 ret = cmd->se_tfo->queue_data_in(cmd); 1809 break; 1810 case DMA_TO_DEVICE: 1811 if (cmd->se_cmd_flags & SCF_BIDI) { 1812 ret = cmd->se_tfo->queue_data_in(cmd); 1813 if (ret < 0) 1814 break; 1815 } 1816 /* Fall through for DMA_TO_DEVICE */ 1817 case DMA_NONE: 1818 trace_target_cmd_complete(cmd); 1819 ret = cmd->se_tfo->queue_status(cmd); 1820 break; 1821 default: 1822 break; 1823 } 1824 1825 out: 1826 if (ret < 0) { 1827 transport_handle_queue_full(cmd, cmd->se_dev); 1828 return; 1829 } 1830 transport_lun_remove_cmd(cmd); 1831 transport_cmd_check_stop_to_fabric(cmd); 1832 } 1833 1834 static void transport_handle_queue_full( 1835 struct se_cmd *cmd, 1836 struct se_device *dev) 1837 { 1838 spin_lock_irq(&dev->qf_cmd_lock); 1839 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1840 atomic_inc(&dev->dev_qf_count); 1841 smp_mb__after_atomic_inc(); 1842 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1843 1844 schedule_work(&cmd->se_dev->qf_work_queue); 1845 } 1846 1847 static void target_complete_ok_work(struct work_struct *work) 1848 { 1849 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1850 int ret; 1851 1852 /* 1853 * Check if we need to move delayed/dormant tasks from cmds on the 1854 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 1855 * Attribute. 1856 */ 1857 transport_complete_task_attr(cmd); 1858 1859 /* 1860 * Check to schedule QUEUE_FULL work, or execute an existing 1861 * cmd->transport_qf_callback() 1862 */ 1863 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 1864 schedule_work(&cmd->se_dev->qf_work_queue); 1865 1866 /* 1867 * Check if we need to send a sense buffer from 1868 * the struct se_cmd in question. 1869 */ 1870 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1871 WARN_ON(!cmd->scsi_status); 1872 ret = transport_send_check_condition_and_sense( 1873 cmd, 0, 1); 1874 if (ret == -EAGAIN || ret == -ENOMEM) 1875 goto queue_full; 1876 1877 transport_lun_remove_cmd(cmd); 1878 transport_cmd_check_stop_to_fabric(cmd); 1879 return; 1880 } 1881 /* 1882 * Check for a callback, used by amongst other things 1883 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 1884 */ 1885 if (cmd->transport_complete_callback) { 1886 sense_reason_t rc; 1887 1888 rc = cmd->transport_complete_callback(cmd); 1889 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 1890 return; 1891 } else if (rc) { 1892 ret = transport_send_check_condition_and_sense(cmd, 1893 rc, 0); 1894 if (ret == -EAGAIN || ret == -ENOMEM) 1895 goto queue_full; 1896 1897 transport_lun_remove_cmd(cmd); 1898 transport_cmd_check_stop_to_fabric(cmd); 1899 return; 1900 } 1901 } 1902 1903 switch (cmd->data_direction) { 1904 case DMA_FROM_DEVICE: 1905 spin_lock(&cmd->se_lun->lun_sep_lock); 1906 if (cmd->se_lun->lun_sep) { 1907 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1908 cmd->data_length; 1909 } 1910 spin_unlock(&cmd->se_lun->lun_sep_lock); 1911 1912 trace_target_cmd_complete(cmd); 1913 ret = cmd->se_tfo->queue_data_in(cmd); 1914 if (ret == -EAGAIN || ret == -ENOMEM) 1915 goto queue_full; 1916 break; 1917 case DMA_TO_DEVICE: 1918 spin_lock(&cmd->se_lun->lun_sep_lock); 1919 if (cmd->se_lun->lun_sep) { 1920 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 1921 cmd->data_length; 1922 } 1923 spin_unlock(&cmd->se_lun->lun_sep_lock); 1924 /* 1925 * Check if we need to send READ payload for BIDI-COMMAND 1926 */ 1927 if (cmd->se_cmd_flags & SCF_BIDI) { 1928 spin_lock(&cmd->se_lun->lun_sep_lock); 1929 if (cmd->se_lun->lun_sep) { 1930 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1931 cmd->data_length; 1932 } 1933 spin_unlock(&cmd->se_lun->lun_sep_lock); 1934 ret = cmd->se_tfo->queue_data_in(cmd); 1935 if (ret == -EAGAIN || ret == -ENOMEM) 1936 goto queue_full; 1937 break; 1938 } 1939 /* Fall through for DMA_TO_DEVICE */ 1940 case DMA_NONE: 1941 trace_target_cmd_complete(cmd); 1942 ret = cmd->se_tfo->queue_status(cmd); 1943 if (ret == -EAGAIN || ret == -ENOMEM) 1944 goto queue_full; 1945 break; 1946 default: 1947 break; 1948 } 1949 1950 transport_lun_remove_cmd(cmd); 1951 transport_cmd_check_stop_to_fabric(cmd); 1952 return; 1953 1954 queue_full: 1955 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 1956 " data_direction: %d\n", cmd, cmd->data_direction); 1957 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1958 transport_handle_queue_full(cmd, cmd->se_dev); 1959 } 1960 1961 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 1962 { 1963 struct scatterlist *sg; 1964 int count; 1965 1966 for_each_sg(sgl, sg, nents, count) 1967 __free_page(sg_page(sg)); 1968 1969 kfree(sgl); 1970 } 1971 1972 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 1973 { 1974 /* 1975 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 1976 * emulation, and free + reset pointers if necessary.. 1977 */ 1978 if (!cmd->t_data_sg_orig) 1979 return; 1980 1981 kfree(cmd->t_data_sg); 1982 cmd->t_data_sg = cmd->t_data_sg_orig; 1983 cmd->t_data_sg_orig = NULL; 1984 cmd->t_data_nents = cmd->t_data_nents_orig; 1985 cmd->t_data_nents_orig = 0; 1986 } 1987 1988 static inline void transport_free_pages(struct se_cmd *cmd) 1989 { 1990 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 1991 transport_reset_sgl_orig(cmd); 1992 return; 1993 } 1994 transport_reset_sgl_orig(cmd); 1995 1996 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 1997 cmd->t_data_sg = NULL; 1998 cmd->t_data_nents = 0; 1999 2000 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2001 cmd->t_bidi_data_sg = NULL; 2002 cmd->t_bidi_data_nents = 0; 2003 } 2004 2005 /** 2006 * transport_release_cmd - free a command 2007 * @cmd: command to free 2008 * 2009 * This routine unconditionally frees a command, and reference counting 2010 * or list removal must be done in the caller. 2011 */ 2012 static int transport_release_cmd(struct se_cmd *cmd) 2013 { 2014 BUG_ON(!cmd->se_tfo); 2015 2016 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2017 core_tmr_release_req(cmd->se_tmr_req); 2018 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2019 kfree(cmd->t_task_cdb); 2020 /* 2021 * If this cmd has been setup with target_get_sess_cmd(), drop 2022 * the kref and call ->release_cmd() in kref callback. 2023 */ 2024 return target_put_sess_cmd(cmd->se_sess, cmd); 2025 } 2026 2027 /** 2028 * transport_put_cmd - release a reference to a command 2029 * @cmd: command to release 2030 * 2031 * This routine releases our reference to the command and frees it if possible. 2032 */ 2033 static int transport_put_cmd(struct se_cmd *cmd) 2034 { 2035 transport_free_pages(cmd); 2036 return transport_release_cmd(cmd); 2037 } 2038 2039 void *transport_kmap_data_sg(struct se_cmd *cmd) 2040 { 2041 struct scatterlist *sg = cmd->t_data_sg; 2042 struct page **pages; 2043 int i; 2044 2045 /* 2046 * We need to take into account a possible offset here for fabrics like 2047 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2048 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2049 */ 2050 if (!cmd->t_data_nents) 2051 return NULL; 2052 2053 BUG_ON(!sg); 2054 if (cmd->t_data_nents == 1) 2055 return kmap(sg_page(sg)) + sg->offset; 2056 2057 /* >1 page. use vmap */ 2058 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2059 if (!pages) 2060 return NULL; 2061 2062 /* convert sg[] to pages[] */ 2063 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2064 pages[i] = sg_page(sg); 2065 } 2066 2067 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2068 kfree(pages); 2069 if (!cmd->t_data_vmap) 2070 return NULL; 2071 2072 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2073 } 2074 EXPORT_SYMBOL(transport_kmap_data_sg); 2075 2076 void transport_kunmap_data_sg(struct se_cmd *cmd) 2077 { 2078 if (!cmd->t_data_nents) { 2079 return; 2080 } else if (cmd->t_data_nents == 1) { 2081 kunmap(sg_page(cmd->t_data_sg)); 2082 return; 2083 } 2084 2085 vunmap(cmd->t_data_vmap); 2086 cmd->t_data_vmap = NULL; 2087 } 2088 EXPORT_SYMBOL(transport_kunmap_data_sg); 2089 2090 int 2091 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2092 bool zero_page) 2093 { 2094 struct scatterlist *sg; 2095 struct page *page; 2096 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2097 unsigned int nent; 2098 int i = 0; 2099 2100 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2101 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2102 if (!sg) 2103 return -ENOMEM; 2104 2105 sg_init_table(sg, nent); 2106 2107 while (length) { 2108 u32 page_len = min_t(u32, length, PAGE_SIZE); 2109 page = alloc_page(GFP_KERNEL | zero_flag); 2110 if (!page) 2111 goto out; 2112 2113 sg_set_page(&sg[i], page, page_len, 0); 2114 length -= page_len; 2115 i++; 2116 } 2117 *sgl = sg; 2118 *nents = nent; 2119 return 0; 2120 2121 out: 2122 while (i > 0) { 2123 i--; 2124 __free_page(sg_page(&sg[i])); 2125 } 2126 kfree(sg); 2127 return -ENOMEM; 2128 } 2129 2130 /* 2131 * Allocate any required resources to execute the command. For writes we 2132 * might not have the payload yet, so notify the fabric via a call to 2133 * ->write_pending instead. Otherwise place it on the execution queue. 2134 */ 2135 sense_reason_t 2136 transport_generic_new_cmd(struct se_cmd *cmd) 2137 { 2138 int ret = 0; 2139 2140 /* 2141 * Determine is the TCM fabric module has already allocated physical 2142 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2143 * beforehand. 2144 */ 2145 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2146 cmd->data_length) { 2147 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2148 2149 if ((cmd->se_cmd_flags & SCF_BIDI) || 2150 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2151 u32 bidi_length; 2152 2153 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2154 bidi_length = cmd->t_task_nolb * 2155 cmd->se_dev->dev_attrib.block_size; 2156 else 2157 bidi_length = cmd->data_length; 2158 2159 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2160 &cmd->t_bidi_data_nents, 2161 bidi_length, zero_flag); 2162 if (ret < 0) 2163 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2164 } 2165 2166 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2167 cmd->data_length, zero_flag); 2168 if (ret < 0) 2169 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2170 } 2171 /* 2172 * If this command is not a write we can execute it right here, 2173 * for write buffers we need to notify the fabric driver first 2174 * and let it call back once the write buffers are ready. 2175 */ 2176 target_add_to_state_list(cmd); 2177 if (cmd->data_direction != DMA_TO_DEVICE) { 2178 target_execute_cmd(cmd); 2179 return 0; 2180 } 2181 transport_cmd_check_stop(cmd, false, true); 2182 2183 ret = cmd->se_tfo->write_pending(cmd); 2184 if (ret == -EAGAIN || ret == -ENOMEM) 2185 goto queue_full; 2186 2187 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2188 WARN_ON(ret); 2189 2190 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2191 2192 queue_full: 2193 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2194 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2195 transport_handle_queue_full(cmd, cmd->se_dev); 2196 return 0; 2197 } 2198 EXPORT_SYMBOL(transport_generic_new_cmd); 2199 2200 static void transport_write_pending_qf(struct se_cmd *cmd) 2201 { 2202 int ret; 2203 2204 ret = cmd->se_tfo->write_pending(cmd); 2205 if (ret == -EAGAIN || ret == -ENOMEM) { 2206 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2207 cmd); 2208 transport_handle_queue_full(cmd, cmd->se_dev); 2209 } 2210 } 2211 2212 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2213 { 2214 unsigned long flags; 2215 int ret = 0; 2216 2217 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2218 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2219 transport_wait_for_tasks(cmd); 2220 2221 ret = transport_release_cmd(cmd); 2222 } else { 2223 if (wait_for_tasks) 2224 transport_wait_for_tasks(cmd); 2225 /* 2226 * Handle WRITE failure case where transport_generic_new_cmd() 2227 * has already added se_cmd to state_list, but fabric has 2228 * failed command before I/O submission. 2229 */ 2230 if (cmd->state_active) { 2231 spin_lock_irqsave(&cmd->t_state_lock, flags); 2232 target_remove_from_state_list(cmd); 2233 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2234 } 2235 2236 if (cmd->se_lun) 2237 transport_lun_remove_cmd(cmd); 2238 2239 ret = transport_put_cmd(cmd); 2240 } 2241 return ret; 2242 } 2243 EXPORT_SYMBOL(transport_generic_free_cmd); 2244 2245 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2246 * @se_sess: session to reference 2247 * @se_cmd: command descriptor to add 2248 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2249 */ 2250 int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2251 bool ack_kref) 2252 { 2253 unsigned long flags; 2254 int ret = 0; 2255 2256 kref_init(&se_cmd->cmd_kref); 2257 /* 2258 * Add a second kref if the fabric caller is expecting to handle 2259 * fabric acknowledgement that requires two target_put_sess_cmd() 2260 * invocations before se_cmd descriptor release. 2261 */ 2262 if (ack_kref == true) { 2263 kref_get(&se_cmd->cmd_kref); 2264 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2265 } 2266 2267 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2268 if (se_sess->sess_tearing_down) { 2269 ret = -ESHUTDOWN; 2270 goto out; 2271 } 2272 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2273 out: 2274 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2275 return ret; 2276 } 2277 EXPORT_SYMBOL(target_get_sess_cmd); 2278 2279 static void target_release_cmd_kref(struct kref *kref) 2280 { 2281 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2282 struct se_session *se_sess = se_cmd->se_sess; 2283 2284 if (list_empty(&se_cmd->se_cmd_list)) { 2285 spin_unlock(&se_sess->sess_cmd_lock); 2286 se_cmd->se_tfo->release_cmd(se_cmd); 2287 return; 2288 } 2289 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2290 spin_unlock(&se_sess->sess_cmd_lock); 2291 complete(&se_cmd->cmd_wait_comp); 2292 return; 2293 } 2294 list_del(&se_cmd->se_cmd_list); 2295 spin_unlock(&se_sess->sess_cmd_lock); 2296 2297 se_cmd->se_tfo->release_cmd(se_cmd); 2298 } 2299 2300 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2301 * @se_sess: session to reference 2302 * @se_cmd: command descriptor to drop 2303 */ 2304 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2305 { 2306 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2307 &se_sess->sess_cmd_lock); 2308 } 2309 EXPORT_SYMBOL(target_put_sess_cmd); 2310 2311 /* target_sess_cmd_list_set_waiting - Flag all commands in 2312 * sess_cmd_list to complete cmd_wait_comp. Set 2313 * sess_tearing_down so no more commands are queued. 2314 * @se_sess: session to flag 2315 */ 2316 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2317 { 2318 struct se_cmd *se_cmd; 2319 unsigned long flags; 2320 2321 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2322 if (se_sess->sess_tearing_down) { 2323 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2324 return; 2325 } 2326 se_sess->sess_tearing_down = 1; 2327 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2328 2329 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2330 se_cmd->cmd_wait_set = 1; 2331 2332 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2333 } 2334 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2335 2336 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2337 * @se_sess: session to wait for active I/O 2338 */ 2339 void target_wait_for_sess_cmds(struct se_session *se_sess) 2340 { 2341 struct se_cmd *se_cmd, *tmp_cmd; 2342 unsigned long flags; 2343 2344 list_for_each_entry_safe(se_cmd, tmp_cmd, 2345 &se_sess->sess_wait_list, se_cmd_list) { 2346 list_del(&se_cmd->se_cmd_list); 2347 2348 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2349 " %d\n", se_cmd, se_cmd->t_state, 2350 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2351 2352 wait_for_completion(&se_cmd->cmd_wait_comp); 2353 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2354 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2355 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2356 2357 se_cmd->se_tfo->release_cmd(se_cmd); 2358 } 2359 2360 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2361 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2362 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2363 2364 } 2365 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2366 2367 static int transport_clear_lun_ref_thread(void *p) 2368 { 2369 struct se_lun *lun = p; 2370 2371 percpu_ref_kill(&lun->lun_ref); 2372 2373 wait_for_completion(&lun->lun_ref_comp); 2374 complete(&lun->lun_shutdown_comp); 2375 2376 return 0; 2377 } 2378 2379 int transport_clear_lun_ref(struct se_lun *lun) 2380 { 2381 struct task_struct *kt; 2382 2383 kt = kthread_run(transport_clear_lun_ref_thread, lun, 2384 "tcm_cl_%u", lun->unpacked_lun); 2385 if (IS_ERR(kt)) { 2386 pr_err("Unable to start clear_lun thread\n"); 2387 return PTR_ERR(kt); 2388 } 2389 wait_for_completion(&lun->lun_shutdown_comp); 2390 2391 return 0; 2392 } 2393 2394 /** 2395 * transport_wait_for_tasks - wait for completion to occur 2396 * @cmd: command to wait 2397 * 2398 * Called from frontend fabric context to wait for storage engine 2399 * to pause and/or release frontend generated struct se_cmd. 2400 */ 2401 bool transport_wait_for_tasks(struct se_cmd *cmd) 2402 { 2403 unsigned long flags; 2404 2405 spin_lock_irqsave(&cmd->t_state_lock, flags); 2406 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2407 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2408 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2409 return false; 2410 } 2411 2412 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2413 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2414 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2415 return false; 2416 } 2417 2418 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2419 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2420 return false; 2421 } 2422 2423 cmd->transport_state |= CMD_T_STOP; 2424 2425 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 2426 " i_state: %d, t_state: %d, CMD_T_STOP\n", 2427 cmd, cmd->se_tfo->get_task_tag(cmd), 2428 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2429 2430 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2431 2432 wait_for_completion(&cmd->t_transport_stop_comp); 2433 2434 spin_lock_irqsave(&cmd->t_state_lock, flags); 2435 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2436 2437 pr_debug("wait_for_tasks: Stopped wait_for_completion(" 2438 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 2439 cmd->se_tfo->get_task_tag(cmd)); 2440 2441 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2442 2443 return true; 2444 } 2445 EXPORT_SYMBOL(transport_wait_for_tasks); 2446 2447 static int transport_get_sense_codes( 2448 struct se_cmd *cmd, 2449 u8 *asc, 2450 u8 *ascq) 2451 { 2452 *asc = cmd->scsi_asc; 2453 *ascq = cmd->scsi_ascq; 2454 2455 return 0; 2456 } 2457 2458 int 2459 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2460 sense_reason_t reason, int from_transport) 2461 { 2462 unsigned char *buffer = cmd->sense_buffer; 2463 unsigned long flags; 2464 u8 asc = 0, ascq = 0; 2465 2466 spin_lock_irqsave(&cmd->t_state_lock, flags); 2467 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2468 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2469 return 0; 2470 } 2471 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2472 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2473 2474 if (!reason && from_transport) 2475 goto after_reason; 2476 2477 if (!from_transport) 2478 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2479 2480 /* 2481 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2482 * SENSE KEY values from include/scsi/scsi.h 2483 */ 2484 switch (reason) { 2485 case TCM_NO_SENSE: 2486 /* CURRENT ERROR */ 2487 buffer[0] = 0x70; 2488 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2489 /* Not Ready */ 2490 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2491 /* NO ADDITIONAL SENSE INFORMATION */ 2492 buffer[SPC_ASC_KEY_OFFSET] = 0; 2493 buffer[SPC_ASCQ_KEY_OFFSET] = 0; 2494 break; 2495 case TCM_NON_EXISTENT_LUN: 2496 /* CURRENT ERROR */ 2497 buffer[0] = 0x70; 2498 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2499 /* ILLEGAL REQUEST */ 2500 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2501 /* LOGICAL UNIT NOT SUPPORTED */ 2502 buffer[SPC_ASC_KEY_OFFSET] = 0x25; 2503 break; 2504 case TCM_UNSUPPORTED_SCSI_OPCODE: 2505 case TCM_SECTOR_COUNT_TOO_MANY: 2506 /* CURRENT ERROR */ 2507 buffer[0] = 0x70; 2508 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2509 /* ILLEGAL REQUEST */ 2510 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2511 /* INVALID COMMAND OPERATION CODE */ 2512 buffer[SPC_ASC_KEY_OFFSET] = 0x20; 2513 break; 2514 case TCM_UNKNOWN_MODE_PAGE: 2515 /* CURRENT ERROR */ 2516 buffer[0] = 0x70; 2517 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2518 /* ILLEGAL REQUEST */ 2519 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2520 /* INVALID FIELD IN CDB */ 2521 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2522 break; 2523 case TCM_CHECK_CONDITION_ABORT_CMD: 2524 /* CURRENT ERROR */ 2525 buffer[0] = 0x70; 2526 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2527 /* ABORTED COMMAND */ 2528 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2529 /* BUS DEVICE RESET FUNCTION OCCURRED */ 2530 buffer[SPC_ASC_KEY_OFFSET] = 0x29; 2531 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2532 break; 2533 case TCM_INCORRECT_AMOUNT_OF_DATA: 2534 /* CURRENT ERROR */ 2535 buffer[0] = 0x70; 2536 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2537 /* ABORTED COMMAND */ 2538 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2539 /* WRITE ERROR */ 2540 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2541 /* NOT ENOUGH UNSOLICITED DATA */ 2542 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d; 2543 break; 2544 case TCM_INVALID_CDB_FIELD: 2545 /* CURRENT ERROR */ 2546 buffer[0] = 0x70; 2547 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2548 /* ILLEGAL REQUEST */ 2549 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2550 /* INVALID FIELD IN CDB */ 2551 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2552 break; 2553 case TCM_INVALID_PARAMETER_LIST: 2554 /* CURRENT ERROR */ 2555 buffer[0] = 0x70; 2556 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2557 /* ILLEGAL REQUEST */ 2558 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2559 /* INVALID FIELD IN PARAMETER LIST */ 2560 buffer[SPC_ASC_KEY_OFFSET] = 0x26; 2561 break; 2562 case TCM_PARAMETER_LIST_LENGTH_ERROR: 2563 /* CURRENT ERROR */ 2564 buffer[0] = 0x70; 2565 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2566 /* ILLEGAL REQUEST */ 2567 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2568 /* PARAMETER LIST LENGTH ERROR */ 2569 buffer[SPC_ASC_KEY_OFFSET] = 0x1a; 2570 break; 2571 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2572 /* CURRENT ERROR */ 2573 buffer[0] = 0x70; 2574 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2575 /* ABORTED COMMAND */ 2576 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2577 /* WRITE ERROR */ 2578 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2579 /* UNEXPECTED_UNSOLICITED_DATA */ 2580 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c; 2581 break; 2582 case TCM_SERVICE_CRC_ERROR: 2583 /* CURRENT ERROR */ 2584 buffer[0] = 0x70; 2585 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2586 /* ABORTED COMMAND */ 2587 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2588 /* PROTOCOL SERVICE CRC ERROR */ 2589 buffer[SPC_ASC_KEY_OFFSET] = 0x47; 2590 /* N/A */ 2591 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05; 2592 break; 2593 case TCM_SNACK_REJECTED: 2594 /* CURRENT ERROR */ 2595 buffer[0] = 0x70; 2596 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2597 /* ABORTED COMMAND */ 2598 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2599 /* READ ERROR */ 2600 buffer[SPC_ASC_KEY_OFFSET] = 0x11; 2601 /* FAILED RETRANSMISSION REQUEST */ 2602 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13; 2603 break; 2604 case TCM_WRITE_PROTECTED: 2605 /* CURRENT ERROR */ 2606 buffer[0] = 0x70; 2607 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2608 /* DATA PROTECT */ 2609 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 2610 /* WRITE PROTECTED */ 2611 buffer[SPC_ASC_KEY_OFFSET] = 0x27; 2612 break; 2613 case TCM_ADDRESS_OUT_OF_RANGE: 2614 /* CURRENT ERROR */ 2615 buffer[0] = 0x70; 2616 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2617 /* ILLEGAL REQUEST */ 2618 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2619 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2620 buffer[SPC_ASC_KEY_OFFSET] = 0x21; 2621 break; 2622 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2623 /* CURRENT ERROR */ 2624 buffer[0] = 0x70; 2625 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2626 /* UNIT ATTENTION */ 2627 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 2628 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2629 buffer[SPC_ASC_KEY_OFFSET] = asc; 2630 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2631 break; 2632 case TCM_CHECK_CONDITION_NOT_READY: 2633 /* CURRENT ERROR */ 2634 buffer[0] = 0x70; 2635 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2636 /* Not Ready */ 2637 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2638 transport_get_sense_codes(cmd, &asc, &ascq); 2639 buffer[SPC_ASC_KEY_OFFSET] = asc; 2640 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2641 break; 2642 case TCM_MISCOMPARE_VERIFY: 2643 /* CURRENT ERROR */ 2644 buffer[0] = 0x70; 2645 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2646 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE; 2647 /* MISCOMPARE DURING VERIFY OPERATION */ 2648 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2649 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2650 break; 2651 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2652 default: 2653 /* CURRENT ERROR */ 2654 buffer[0] = 0x70; 2655 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2656 /* 2657 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2658 * Solaris initiators. Returning NOT READY instead means the 2659 * operations will be retried a finite number of times and we 2660 * can survive intermittent errors. 2661 */ 2662 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2663 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2664 buffer[SPC_ASC_KEY_OFFSET] = 0x08; 2665 break; 2666 } 2667 /* 2668 * This code uses linux/include/scsi/scsi.h SAM status codes! 2669 */ 2670 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2671 /* 2672 * Automatically padded, this value is encoded in the fabric's 2673 * data_length response PDU containing the SCSI defined sense data. 2674 */ 2675 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2676 2677 after_reason: 2678 trace_target_cmd_complete(cmd); 2679 return cmd->se_tfo->queue_status(cmd); 2680 } 2681 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2682 2683 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2684 { 2685 if (!(cmd->transport_state & CMD_T_ABORTED)) 2686 return 0; 2687 2688 if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 2689 return 1; 2690 2691 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", 2692 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2693 2694 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2695 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2696 trace_target_cmd_complete(cmd); 2697 cmd->se_tfo->queue_status(cmd); 2698 2699 return 1; 2700 } 2701 EXPORT_SYMBOL(transport_check_aborted_status); 2702 2703 void transport_send_task_abort(struct se_cmd *cmd) 2704 { 2705 unsigned long flags; 2706 2707 spin_lock_irqsave(&cmd->t_state_lock, flags); 2708 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { 2709 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2710 return; 2711 } 2712 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2713 2714 /* 2715 * If there are still expected incoming fabric WRITEs, we wait 2716 * until until they have completed before sending a TASK_ABORTED 2717 * response. This response with TASK_ABORTED status will be 2718 * queued back to fabric module by transport_check_aborted_status(). 2719 */ 2720 if (cmd->data_direction == DMA_TO_DEVICE) { 2721 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2722 cmd->transport_state |= CMD_T_ABORTED; 2723 smp_mb__after_atomic_inc(); 2724 return; 2725 } 2726 } 2727 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2728 2729 transport_lun_remove_cmd(cmd); 2730 2731 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 2732 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 2733 cmd->se_tfo->get_task_tag(cmd)); 2734 2735 trace_target_cmd_complete(cmd); 2736 cmd->se_tfo->queue_status(cmd); 2737 } 2738 2739 static void target_tmr_work(struct work_struct *work) 2740 { 2741 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2742 struct se_device *dev = cmd->se_dev; 2743 struct se_tmr_req *tmr = cmd->se_tmr_req; 2744 int ret; 2745 2746 switch (tmr->function) { 2747 case TMR_ABORT_TASK: 2748 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2749 break; 2750 case TMR_ABORT_TASK_SET: 2751 case TMR_CLEAR_ACA: 2752 case TMR_CLEAR_TASK_SET: 2753 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2754 break; 2755 case TMR_LUN_RESET: 2756 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2757 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2758 TMR_FUNCTION_REJECTED; 2759 break; 2760 case TMR_TARGET_WARM_RESET: 2761 tmr->response = TMR_FUNCTION_REJECTED; 2762 break; 2763 case TMR_TARGET_COLD_RESET: 2764 tmr->response = TMR_FUNCTION_REJECTED; 2765 break; 2766 default: 2767 pr_err("Uknown TMR function: 0x%02x.\n", 2768 tmr->function); 2769 tmr->response = TMR_FUNCTION_REJECTED; 2770 break; 2771 } 2772 2773 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2774 cmd->se_tfo->queue_tm_rsp(cmd); 2775 2776 transport_cmd_check_stop_to_fabric(cmd); 2777 } 2778 2779 int transport_generic_handle_tmr( 2780 struct se_cmd *cmd) 2781 { 2782 INIT_WORK(&cmd->work, target_tmr_work); 2783 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2784 return 0; 2785 } 2786 EXPORT_SYMBOL(transport_generic_handle_tmr); 2787