1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <asm/unaligned.h> 38 #include <net/sock.h> 39 #include <net/tcp.h> 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_tcq.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 #include <target/target_core_configfs.h> 48 49 #include "target_core_internal.h" 50 #include "target_core_alua.h" 51 #include "target_core_pr.h" 52 #include "target_core_ua.h" 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/target.h> 56 57 static struct workqueue_struct *target_completion_wq; 58 static struct kmem_cache *se_sess_cache; 59 struct kmem_cache *se_ua_cache; 60 struct kmem_cache *t10_pr_reg_cache; 61 struct kmem_cache *t10_alua_lu_gp_cache; 62 struct kmem_cache *t10_alua_lu_gp_mem_cache; 63 struct kmem_cache *t10_alua_tg_pt_gp_cache; 64 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 122 "t10_alua_tg_pt_gp_mem_cache", 123 sizeof(struct t10_alua_tg_pt_gp_member), 124 __alignof__(struct t10_alua_tg_pt_gp_member), 125 0, NULL); 126 if (!t10_alua_tg_pt_gp_mem_cache) { 127 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 128 "mem_t failed\n"); 129 goto out_free_tg_pt_gp_cache; 130 } 131 132 target_completion_wq = alloc_workqueue("target_completion", 133 WQ_MEM_RECLAIM, 0); 134 if (!target_completion_wq) 135 goto out_free_tg_pt_gp_mem_cache; 136 137 return 0; 138 139 out_free_tg_pt_gp_mem_cache: 140 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 141 out_free_tg_pt_gp_cache: 142 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 143 out_free_lu_gp_mem_cache: 144 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 145 out_free_lu_gp_cache: 146 kmem_cache_destroy(t10_alua_lu_gp_cache); 147 out_free_pr_reg_cache: 148 kmem_cache_destroy(t10_pr_reg_cache); 149 out_free_ua_cache: 150 kmem_cache_destroy(se_ua_cache); 151 out_free_sess_cache: 152 kmem_cache_destroy(se_sess_cache); 153 out: 154 return -ENOMEM; 155 } 156 157 void release_se_kmem_caches(void) 158 { 159 destroy_workqueue(target_completion_wq); 160 kmem_cache_destroy(se_sess_cache); 161 kmem_cache_destroy(se_ua_cache); 162 kmem_cache_destroy(t10_pr_reg_cache); 163 kmem_cache_destroy(t10_alua_lu_gp_cache); 164 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 166 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 167 } 168 169 /* This code ensures unique mib indexes are handed out. */ 170 static DEFINE_SPINLOCK(scsi_mib_index_lock); 171 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 172 173 /* 174 * Allocate a new row index for the entry type specified 175 */ 176 u32 scsi_get_new_index(scsi_index_t type) 177 { 178 u32 new_index; 179 180 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 181 182 spin_lock(&scsi_mib_index_lock); 183 new_index = ++scsi_mib_index[type]; 184 spin_unlock(&scsi_mib_index_lock); 185 186 return new_index; 187 } 188 189 void transport_subsystem_check_init(void) 190 { 191 int ret; 192 static int sub_api_initialized; 193 194 if (sub_api_initialized) 195 return; 196 197 ret = request_module("target_core_iblock"); 198 if (ret != 0) 199 pr_err("Unable to load target_core_iblock\n"); 200 201 ret = request_module("target_core_file"); 202 if (ret != 0) 203 pr_err("Unable to load target_core_file\n"); 204 205 ret = request_module("target_core_pscsi"); 206 if (ret != 0) 207 pr_err("Unable to load target_core_pscsi\n"); 208 209 sub_api_initialized = 1; 210 } 211 212 struct se_session *transport_init_session(void) 213 { 214 struct se_session *se_sess; 215 216 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 217 if (!se_sess) { 218 pr_err("Unable to allocate struct se_session from" 219 " se_sess_cache\n"); 220 return ERR_PTR(-ENOMEM); 221 } 222 INIT_LIST_HEAD(&se_sess->sess_list); 223 INIT_LIST_HEAD(&se_sess->sess_acl_list); 224 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 225 INIT_LIST_HEAD(&se_sess->sess_wait_list); 226 spin_lock_init(&se_sess->sess_cmd_lock); 227 kref_init(&se_sess->sess_kref); 228 229 return se_sess; 230 } 231 EXPORT_SYMBOL(transport_init_session); 232 233 int transport_alloc_session_tags(struct se_session *se_sess, 234 unsigned int tag_num, unsigned int tag_size) 235 { 236 int rc; 237 238 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 239 if (!se_sess->sess_cmd_map) { 240 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 241 return -ENOMEM; 242 } 243 244 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 245 if (rc < 0) { 246 pr_err("Unable to init se_sess->sess_tag_pool," 247 " tag_num: %u\n", tag_num); 248 kfree(se_sess->sess_cmd_map); 249 se_sess->sess_cmd_map = NULL; 250 return -ENOMEM; 251 } 252 253 return 0; 254 } 255 EXPORT_SYMBOL(transport_alloc_session_tags); 256 257 struct se_session *transport_init_session_tags(unsigned int tag_num, 258 unsigned int tag_size) 259 { 260 struct se_session *se_sess; 261 int rc; 262 263 se_sess = transport_init_session(); 264 if (IS_ERR(se_sess)) 265 return se_sess; 266 267 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 268 if (rc < 0) { 269 transport_free_session(se_sess); 270 return ERR_PTR(-ENOMEM); 271 } 272 273 return se_sess; 274 } 275 EXPORT_SYMBOL(transport_init_session_tags); 276 277 /* 278 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 279 */ 280 void __transport_register_session( 281 struct se_portal_group *se_tpg, 282 struct se_node_acl *se_nacl, 283 struct se_session *se_sess, 284 void *fabric_sess_ptr) 285 { 286 unsigned char buf[PR_REG_ISID_LEN]; 287 288 se_sess->se_tpg = se_tpg; 289 se_sess->fabric_sess_ptr = fabric_sess_ptr; 290 /* 291 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 292 * 293 * Only set for struct se_session's that will actually be moving I/O. 294 * eg: *NOT* discovery sessions. 295 */ 296 if (se_nacl) { 297 /* 298 * If the fabric module supports an ISID based TransportID, 299 * save this value in binary from the fabric I_T Nexus now. 300 */ 301 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 302 memset(&buf[0], 0, PR_REG_ISID_LEN); 303 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 304 &buf[0], PR_REG_ISID_LEN); 305 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 306 } 307 kref_get(&se_nacl->acl_kref); 308 309 spin_lock_irq(&se_nacl->nacl_sess_lock); 310 /* 311 * The se_nacl->nacl_sess pointer will be set to the 312 * last active I_T Nexus for each struct se_node_acl. 313 */ 314 se_nacl->nacl_sess = se_sess; 315 316 list_add_tail(&se_sess->sess_acl_list, 317 &se_nacl->acl_sess_list); 318 spin_unlock_irq(&se_nacl->nacl_sess_lock); 319 } 320 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 321 322 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 323 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 324 } 325 EXPORT_SYMBOL(__transport_register_session); 326 327 void transport_register_session( 328 struct se_portal_group *se_tpg, 329 struct se_node_acl *se_nacl, 330 struct se_session *se_sess, 331 void *fabric_sess_ptr) 332 { 333 unsigned long flags; 334 335 spin_lock_irqsave(&se_tpg->session_lock, flags); 336 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 337 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 338 } 339 EXPORT_SYMBOL(transport_register_session); 340 341 static void target_release_session(struct kref *kref) 342 { 343 struct se_session *se_sess = container_of(kref, 344 struct se_session, sess_kref); 345 struct se_portal_group *se_tpg = se_sess->se_tpg; 346 347 se_tpg->se_tpg_tfo->close_session(se_sess); 348 } 349 350 void target_get_session(struct se_session *se_sess) 351 { 352 kref_get(&se_sess->sess_kref); 353 } 354 EXPORT_SYMBOL(target_get_session); 355 356 void target_put_session(struct se_session *se_sess) 357 { 358 struct se_portal_group *tpg = se_sess->se_tpg; 359 360 if (tpg->se_tpg_tfo->put_session != NULL) { 361 tpg->se_tpg_tfo->put_session(se_sess); 362 return; 363 } 364 kref_put(&se_sess->sess_kref, target_release_session); 365 } 366 EXPORT_SYMBOL(target_put_session); 367 368 static void target_complete_nacl(struct kref *kref) 369 { 370 struct se_node_acl *nacl = container_of(kref, 371 struct se_node_acl, acl_kref); 372 373 complete(&nacl->acl_free_comp); 374 } 375 376 void target_put_nacl(struct se_node_acl *nacl) 377 { 378 kref_put(&nacl->acl_kref, target_complete_nacl); 379 } 380 381 void transport_deregister_session_configfs(struct se_session *se_sess) 382 { 383 struct se_node_acl *se_nacl; 384 unsigned long flags; 385 /* 386 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 387 */ 388 se_nacl = se_sess->se_node_acl; 389 if (se_nacl) { 390 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 391 if (se_nacl->acl_stop == 0) 392 list_del(&se_sess->sess_acl_list); 393 /* 394 * If the session list is empty, then clear the pointer. 395 * Otherwise, set the struct se_session pointer from the tail 396 * element of the per struct se_node_acl active session list. 397 */ 398 if (list_empty(&se_nacl->acl_sess_list)) 399 se_nacl->nacl_sess = NULL; 400 else { 401 se_nacl->nacl_sess = container_of( 402 se_nacl->acl_sess_list.prev, 403 struct se_session, sess_acl_list); 404 } 405 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 406 } 407 } 408 EXPORT_SYMBOL(transport_deregister_session_configfs); 409 410 void transport_free_session(struct se_session *se_sess) 411 { 412 if (se_sess->sess_cmd_map) { 413 percpu_ida_destroy(&se_sess->sess_tag_pool); 414 kfree(se_sess->sess_cmd_map); 415 } 416 kmem_cache_free(se_sess_cache, se_sess); 417 } 418 EXPORT_SYMBOL(transport_free_session); 419 420 void transport_deregister_session(struct se_session *se_sess) 421 { 422 struct se_portal_group *se_tpg = se_sess->se_tpg; 423 struct target_core_fabric_ops *se_tfo; 424 struct se_node_acl *se_nacl; 425 unsigned long flags; 426 bool comp_nacl = true; 427 428 if (!se_tpg) { 429 transport_free_session(se_sess); 430 return; 431 } 432 se_tfo = se_tpg->se_tpg_tfo; 433 434 spin_lock_irqsave(&se_tpg->session_lock, flags); 435 list_del(&se_sess->sess_list); 436 se_sess->se_tpg = NULL; 437 se_sess->fabric_sess_ptr = NULL; 438 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 439 440 /* 441 * Determine if we need to do extra work for this initiator node's 442 * struct se_node_acl if it had been previously dynamically generated. 443 */ 444 se_nacl = se_sess->se_node_acl; 445 446 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 447 if (se_nacl && se_nacl->dynamic_node_acl) { 448 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 449 list_del(&se_nacl->acl_list); 450 se_tpg->num_node_acls--; 451 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 452 core_tpg_wait_for_nacl_pr_ref(se_nacl); 453 core_free_device_list_for_node(se_nacl, se_tpg); 454 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); 455 456 comp_nacl = false; 457 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 458 } 459 } 460 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 461 462 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 463 se_tpg->se_tpg_tfo->get_fabric_name()); 464 /* 465 * If last kref is dropping now for an explict NodeACL, awake sleeping 466 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 467 * removal context. 468 */ 469 if (se_nacl && comp_nacl == true) 470 target_put_nacl(se_nacl); 471 472 transport_free_session(se_sess); 473 } 474 EXPORT_SYMBOL(transport_deregister_session); 475 476 /* 477 * Called with cmd->t_state_lock held. 478 */ 479 static void target_remove_from_state_list(struct se_cmd *cmd) 480 { 481 struct se_device *dev = cmd->se_dev; 482 unsigned long flags; 483 484 if (!dev) 485 return; 486 487 if (cmd->transport_state & CMD_T_BUSY) 488 return; 489 490 spin_lock_irqsave(&dev->execute_task_lock, flags); 491 if (cmd->state_active) { 492 list_del(&cmd->state_list); 493 cmd->state_active = false; 494 } 495 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 496 } 497 498 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 499 bool write_pending) 500 { 501 unsigned long flags; 502 503 spin_lock_irqsave(&cmd->t_state_lock, flags); 504 if (write_pending) 505 cmd->t_state = TRANSPORT_WRITE_PENDING; 506 507 if (remove_from_lists) { 508 target_remove_from_state_list(cmd); 509 510 /* 511 * Clear struct se_cmd->se_lun before the handoff to FE. 512 */ 513 cmd->se_lun = NULL; 514 } 515 516 /* 517 * Determine if frontend context caller is requesting the stopping of 518 * this command for frontend exceptions. 519 */ 520 if (cmd->transport_state & CMD_T_STOP) { 521 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 522 __func__, __LINE__, 523 cmd->se_tfo->get_task_tag(cmd)); 524 525 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 526 527 complete(&cmd->t_transport_stop_comp); 528 return 1; 529 } 530 531 cmd->transport_state &= ~CMD_T_ACTIVE; 532 if (remove_from_lists) { 533 /* 534 * Some fabric modules like tcm_loop can release 535 * their internally allocated I/O reference now and 536 * struct se_cmd now. 537 * 538 * Fabric modules are expected to return '1' here if the 539 * se_cmd being passed is released at this point, 540 * or zero if not being released. 541 */ 542 if (cmd->se_tfo->check_stop_free != NULL) { 543 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 544 return cmd->se_tfo->check_stop_free(cmd); 545 } 546 } 547 548 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 549 return 0; 550 } 551 552 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 553 { 554 return transport_cmd_check_stop(cmd, true, false); 555 } 556 557 static void transport_lun_remove_cmd(struct se_cmd *cmd) 558 { 559 struct se_lun *lun = cmd->se_lun; 560 561 if (!lun || !cmd->lun_ref_active) 562 return; 563 564 percpu_ref_put(&lun->lun_ref); 565 } 566 567 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 568 { 569 if (transport_cmd_check_stop_to_fabric(cmd)) 570 return; 571 if (remove) 572 transport_put_cmd(cmd); 573 } 574 575 static void target_complete_failure_work(struct work_struct *work) 576 { 577 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 578 579 transport_generic_request_failure(cmd, 580 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 581 } 582 583 /* 584 * Used when asking transport to copy Sense Data from the underlying 585 * Linux/SCSI struct scsi_cmnd 586 */ 587 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 588 { 589 struct se_device *dev = cmd->se_dev; 590 591 WARN_ON(!cmd->se_lun); 592 593 if (!dev) 594 return NULL; 595 596 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 597 return NULL; 598 599 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 600 601 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 602 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 603 return cmd->sense_buffer; 604 } 605 606 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 607 { 608 struct se_device *dev = cmd->se_dev; 609 int success = scsi_status == GOOD; 610 unsigned long flags; 611 612 cmd->scsi_status = scsi_status; 613 614 615 spin_lock_irqsave(&cmd->t_state_lock, flags); 616 cmd->transport_state &= ~CMD_T_BUSY; 617 618 if (dev && dev->transport->transport_complete) { 619 dev->transport->transport_complete(cmd, 620 cmd->t_data_sg, 621 transport_get_sense_buffer(cmd)); 622 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 623 success = 1; 624 } 625 626 /* 627 * See if we are waiting to complete for an exception condition. 628 */ 629 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 630 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 631 complete(&cmd->task_stop_comp); 632 return; 633 } 634 635 if (!success) 636 cmd->transport_state |= CMD_T_FAILED; 637 638 /* 639 * Check for case where an explict ABORT_TASK has been received 640 * and transport_wait_for_tasks() will be waiting for completion.. 641 */ 642 if (cmd->transport_state & CMD_T_ABORTED && 643 cmd->transport_state & CMD_T_STOP) { 644 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 645 complete(&cmd->t_transport_stop_comp); 646 return; 647 } else if (cmd->transport_state & CMD_T_FAILED) { 648 INIT_WORK(&cmd->work, target_complete_failure_work); 649 } else { 650 INIT_WORK(&cmd->work, target_complete_ok_work); 651 } 652 653 cmd->t_state = TRANSPORT_COMPLETE; 654 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 655 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 656 657 queue_work(target_completion_wq, &cmd->work); 658 } 659 EXPORT_SYMBOL(target_complete_cmd); 660 661 static void target_add_to_state_list(struct se_cmd *cmd) 662 { 663 struct se_device *dev = cmd->se_dev; 664 unsigned long flags; 665 666 spin_lock_irqsave(&dev->execute_task_lock, flags); 667 if (!cmd->state_active) { 668 list_add_tail(&cmd->state_list, &dev->state_list); 669 cmd->state_active = true; 670 } 671 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 672 } 673 674 /* 675 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 676 */ 677 static void transport_write_pending_qf(struct se_cmd *cmd); 678 static void transport_complete_qf(struct se_cmd *cmd); 679 680 void target_qf_do_work(struct work_struct *work) 681 { 682 struct se_device *dev = container_of(work, struct se_device, 683 qf_work_queue); 684 LIST_HEAD(qf_cmd_list); 685 struct se_cmd *cmd, *cmd_tmp; 686 687 spin_lock_irq(&dev->qf_cmd_lock); 688 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 689 spin_unlock_irq(&dev->qf_cmd_lock); 690 691 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 692 list_del(&cmd->se_qf_node); 693 atomic_dec(&dev->dev_qf_count); 694 smp_mb__after_atomic_dec(); 695 696 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 697 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 698 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 699 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 700 : "UNKNOWN"); 701 702 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 703 transport_write_pending_qf(cmd); 704 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 705 transport_complete_qf(cmd); 706 } 707 } 708 709 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 710 { 711 switch (cmd->data_direction) { 712 case DMA_NONE: 713 return "NONE"; 714 case DMA_FROM_DEVICE: 715 return "READ"; 716 case DMA_TO_DEVICE: 717 return "WRITE"; 718 case DMA_BIDIRECTIONAL: 719 return "BIDI"; 720 default: 721 break; 722 } 723 724 return "UNKNOWN"; 725 } 726 727 void transport_dump_dev_state( 728 struct se_device *dev, 729 char *b, 730 int *bl) 731 { 732 *bl += sprintf(b + *bl, "Status: "); 733 if (dev->export_count) 734 *bl += sprintf(b + *bl, "ACTIVATED"); 735 else 736 *bl += sprintf(b + *bl, "DEACTIVATED"); 737 738 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 739 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 740 dev->dev_attrib.block_size, 741 dev->dev_attrib.hw_max_sectors); 742 *bl += sprintf(b + *bl, " "); 743 } 744 745 void transport_dump_vpd_proto_id( 746 struct t10_vpd *vpd, 747 unsigned char *p_buf, 748 int p_buf_len) 749 { 750 unsigned char buf[VPD_TMP_BUF_SIZE]; 751 int len; 752 753 memset(buf, 0, VPD_TMP_BUF_SIZE); 754 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 755 756 switch (vpd->protocol_identifier) { 757 case 0x00: 758 sprintf(buf+len, "Fibre Channel\n"); 759 break; 760 case 0x10: 761 sprintf(buf+len, "Parallel SCSI\n"); 762 break; 763 case 0x20: 764 sprintf(buf+len, "SSA\n"); 765 break; 766 case 0x30: 767 sprintf(buf+len, "IEEE 1394\n"); 768 break; 769 case 0x40: 770 sprintf(buf+len, "SCSI Remote Direct Memory Access" 771 " Protocol\n"); 772 break; 773 case 0x50: 774 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 775 break; 776 case 0x60: 777 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 778 break; 779 case 0x70: 780 sprintf(buf+len, "Automation/Drive Interface Transport" 781 " Protocol\n"); 782 break; 783 case 0x80: 784 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 785 break; 786 default: 787 sprintf(buf+len, "Unknown 0x%02x\n", 788 vpd->protocol_identifier); 789 break; 790 } 791 792 if (p_buf) 793 strncpy(p_buf, buf, p_buf_len); 794 else 795 pr_debug("%s", buf); 796 } 797 798 void 799 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 800 { 801 /* 802 * Check if the Protocol Identifier Valid (PIV) bit is set.. 803 * 804 * from spc3r23.pdf section 7.5.1 805 */ 806 if (page_83[1] & 0x80) { 807 vpd->protocol_identifier = (page_83[0] & 0xf0); 808 vpd->protocol_identifier_set = 1; 809 transport_dump_vpd_proto_id(vpd, NULL, 0); 810 } 811 } 812 EXPORT_SYMBOL(transport_set_vpd_proto_id); 813 814 int transport_dump_vpd_assoc( 815 struct t10_vpd *vpd, 816 unsigned char *p_buf, 817 int p_buf_len) 818 { 819 unsigned char buf[VPD_TMP_BUF_SIZE]; 820 int ret = 0; 821 int len; 822 823 memset(buf, 0, VPD_TMP_BUF_SIZE); 824 len = sprintf(buf, "T10 VPD Identifier Association: "); 825 826 switch (vpd->association) { 827 case 0x00: 828 sprintf(buf+len, "addressed logical unit\n"); 829 break; 830 case 0x10: 831 sprintf(buf+len, "target port\n"); 832 break; 833 case 0x20: 834 sprintf(buf+len, "SCSI target device\n"); 835 break; 836 default: 837 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 838 ret = -EINVAL; 839 break; 840 } 841 842 if (p_buf) 843 strncpy(p_buf, buf, p_buf_len); 844 else 845 pr_debug("%s", buf); 846 847 return ret; 848 } 849 850 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 851 { 852 /* 853 * The VPD identification association.. 854 * 855 * from spc3r23.pdf Section 7.6.3.1 Table 297 856 */ 857 vpd->association = (page_83[1] & 0x30); 858 return transport_dump_vpd_assoc(vpd, NULL, 0); 859 } 860 EXPORT_SYMBOL(transport_set_vpd_assoc); 861 862 int transport_dump_vpd_ident_type( 863 struct t10_vpd *vpd, 864 unsigned char *p_buf, 865 int p_buf_len) 866 { 867 unsigned char buf[VPD_TMP_BUF_SIZE]; 868 int ret = 0; 869 int len; 870 871 memset(buf, 0, VPD_TMP_BUF_SIZE); 872 len = sprintf(buf, "T10 VPD Identifier Type: "); 873 874 switch (vpd->device_identifier_type) { 875 case 0x00: 876 sprintf(buf+len, "Vendor specific\n"); 877 break; 878 case 0x01: 879 sprintf(buf+len, "T10 Vendor ID based\n"); 880 break; 881 case 0x02: 882 sprintf(buf+len, "EUI-64 based\n"); 883 break; 884 case 0x03: 885 sprintf(buf+len, "NAA\n"); 886 break; 887 case 0x04: 888 sprintf(buf+len, "Relative target port identifier\n"); 889 break; 890 case 0x08: 891 sprintf(buf+len, "SCSI name string\n"); 892 break; 893 default: 894 sprintf(buf+len, "Unsupported: 0x%02x\n", 895 vpd->device_identifier_type); 896 ret = -EINVAL; 897 break; 898 } 899 900 if (p_buf) { 901 if (p_buf_len < strlen(buf)+1) 902 return -EINVAL; 903 strncpy(p_buf, buf, p_buf_len); 904 } else { 905 pr_debug("%s", buf); 906 } 907 908 return ret; 909 } 910 911 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 912 { 913 /* 914 * The VPD identifier type.. 915 * 916 * from spc3r23.pdf Section 7.6.3.1 Table 298 917 */ 918 vpd->device_identifier_type = (page_83[1] & 0x0f); 919 return transport_dump_vpd_ident_type(vpd, NULL, 0); 920 } 921 EXPORT_SYMBOL(transport_set_vpd_ident_type); 922 923 int transport_dump_vpd_ident( 924 struct t10_vpd *vpd, 925 unsigned char *p_buf, 926 int p_buf_len) 927 { 928 unsigned char buf[VPD_TMP_BUF_SIZE]; 929 int ret = 0; 930 931 memset(buf, 0, VPD_TMP_BUF_SIZE); 932 933 switch (vpd->device_identifier_code_set) { 934 case 0x01: /* Binary */ 935 snprintf(buf, sizeof(buf), 936 "T10 VPD Binary Device Identifier: %s\n", 937 &vpd->device_identifier[0]); 938 break; 939 case 0x02: /* ASCII */ 940 snprintf(buf, sizeof(buf), 941 "T10 VPD ASCII Device Identifier: %s\n", 942 &vpd->device_identifier[0]); 943 break; 944 case 0x03: /* UTF-8 */ 945 snprintf(buf, sizeof(buf), 946 "T10 VPD UTF-8 Device Identifier: %s\n", 947 &vpd->device_identifier[0]); 948 break; 949 default: 950 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 951 " 0x%02x", vpd->device_identifier_code_set); 952 ret = -EINVAL; 953 break; 954 } 955 956 if (p_buf) 957 strncpy(p_buf, buf, p_buf_len); 958 else 959 pr_debug("%s", buf); 960 961 return ret; 962 } 963 964 int 965 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 966 { 967 static const char hex_str[] = "0123456789abcdef"; 968 int j = 0, i = 4; /* offset to start of the identifier */ 969 970 /* 971 * The VPD Code Set (encoding) 972 * 973 * from spc3r23.pdf Section 7.6.3.1 Table 296 974 */ 975 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 976 switch (vpd->device_identifier_code_set) { 977 case 0x01: /* Binary */ 978 vpd->device_identifier[j++] = 979 hex_str[vpd->device_identifier_type]; 980 while (i < (4 + page_83[3])) { 981 vpd->device_identifier[j++] = 982 hex_str[(page_83[i] & 0xf0) >> 4]; 983 vpd->device_identifier[j++] = 984 hex_str[page_83[i] & 0x0f]; 985 i++; 986 } 987 break; 988 case 0x02: /* ASCII */ 989 case 0x03: /* UTF-8 */ 990 while (i < (4 + page_83[3])) 991 vpd->device_identifier[j++] = page_83[i++]; 992 break; 993 default: 994 break; 995 } 996 997 return transport_dump_vpd_ident(vpd, NULL, 0); 998 } 999 EXPORT_SYMBOL(transport_set_vpd_ident); 1000 1001 sense_reason_t 1002 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1003 { 1004 struct se_device *dev = cmd->se_dev; 1005 1006 if (cmd->unknown_data_length) { 1007 cmd->data_length = size; 1008 } else if (size != cmd->data_length) { 1009 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1010 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1011 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1012 cmd->data_length, size, cmd->t_task_cdb[0]); 1013 1014 if (cmd->data_direction == DMA_TO_DEVICE) { 1015 pr_err("Rejecting underflow/overflow" 1016 " WRITE data\n"); 1017 return TCM_INVALID_CDB_FIELD; 1018 } 1019 /* 1020 * Reject READ_* or WRITE_* with overflow/underflow for 1021 * type SCF_SCSI_DATA_CDB. 1022 */ 1023 if (dev->dev_attrib.block_size != 512) { 1024 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1025 " CDB on non 512-byte sector setup subsystem" 1026 " plugin: %s\n", dev->transport->name); 1027 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1028 return TCM_INVALID_CDB_FIELD; 1029 } 1030 /* 1031 * For the overflow case keep the existing fabric provided 1032 * ->data_length. Otherwise for the underflow case, reset 1033 * ->data_length to the smaller SCSI expected data transfer 1034 * length. 1035 */ 1036 if (size > cmd->data_length) { 1037 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1038 cmd->residual_count = (size - cmd->data_length); 1039 } else { 1040 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1041 cmd->residual_count = (cmd->data_length - size); 1042 cmd->data_length = size; 1043 } 1044 } 1045 1046 return 0; 1047 1048 } 1049 1050 /* 1051 * Used by fabric modules containing a local struct se_cmd within their 1052 * fabric dependent per I/O descriptor. 1053 */ 1054 void transport_init_se_cmd( 1055 struct se_cmd *cmd, 1056 struct target_core_fabric_ops *tfo, 1057 struct se_session *se_sess, 1058 u32 data_length, 1059 int data_direction, 1060 int task_attr, 1061 unsigned char *sense_buffer) 1062 { 1063 INIT_LIST_HEAD(&cmd->se_delayed_node); 1064 INIT_LIST_HEAD(&cmd->se_qf_node); 1065 INIT_LIST_HEAD(&cmd->se_cmd_list); 1066 INIT_LIST_HEAD(&cmd->state_list); 1067 init_completion(&cmd->t_transport_stop_comp); 1068 init_completion(&cmd->cmd_wait_comp); 1069 init_completion(&cmd->task_stop_comp); 1070 spin_lock_init(&cmd->t_state_lock); 1071 cmd->transport_state = CMD_T_DEV_ACTIVE; 1072 1073 cmd->se_tfo = tfo; 1074 cmd->se_sess = se_sess; 1075 cmd->data_length = data_length; 1076 cmd->data_direction = data_direction; 1077 cmd->sam_task_attr = task_attr; 1078 cmd->sense_buffer = sense_buffer; 1079 1080 cmd->state_active = false; 1081 } 1082 EXPORT_SYMBOL(transport_init_se_cmd); 1083 1084 static sense_reason_t 1085 transport_check_alloc_task_attr(struct se_cmd *cmd) 1086 { 1087 struct se_device *dev = cmd->se_dev; 1088 1089 /* 1090 * Check if SAM Task Attribute emulation is enabled for this 1091 * struct se_device storage object 1092 */ 1093 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1094 return 0; 1095 1096 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1097 pr_debug("SAM Task Attribute ACA" 1098 " emulation is not supported\n"); 1099 return TCM_INVALID_CDB_FIELD; 1100 } 1101 /* 1102 * Used to determine when ORDERED commands should go from 1103 * Dormant to Active status. 1104 */ 1105 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1106 smp_mb__after_atomic_inc(); 1107 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1108 cmd->se_ordered_id, cmd->sam_task_attr, 1109 dev->transport->name); 1110 return 0; 1111 } 1112 1113 sense_reason_t 1114 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1115 { 1116 struct se_device *dev = cmd->se_dev; 1117 sense_reason_t ret; 1118 1119 /* 1120 * Ensure that the received CDB is less than the max (252 + 8) bytes 1121 * for VARIABLE_LENGTH_CMD 1122 */ 1123 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1124 pr_err("Received SCSI CDB with command_size: %d that" 1125 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1126 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1127 return TCM_INVALID_CDB_FIELD; 1128 } 1129 /* 1130 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1131 * allocate the additional extended CDB buffer now.. Otherwise 1132 * setup the pointer from __t_task_cdb to t_task_cdb. 1133 */ 1134 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1135 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1136 GFP_KERNEL); 1137 if (!cmd->t_task_cdb) { 1138 pr_err("Unable to allocate cmd->t_task_cdb" 1139 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1140 scsi_command_size(cdb), 1141 (unsigned long)sizeof(cmd->__t_task_cdb)); 1142 return TCM_OUT_OF_RESOURCES; 1143 } 1144 } else 1145 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1146 /* 1147 * Copy the original CDB into cmd-> 1148 */ 1149 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1150 1151 trace_target_sequencer_start(cmd); 1152 1153 /* 1154 * Check for an existing UNIT ATTENTION condition 1155 */ 1156 ret = target_scsi3_ua_check(cmd); 1157 if (ret) 1158 return ret; 1159 1160 ret = target_alua_state_check(cmd); 1161 if (ret) 1162 return ret; 1163 1164 ret = target_check_reservation(cmd); 1165 if (ret) { 1166 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1167 return ret; 1168 } 1169 1170 ret = dev->transport->parse_cdb(cmd); 1171 if (ret) 1172 return ret; 1173 1174 ret = transport_check_alloc_task_attr(cmd); 1175 if (ret) 1176 return ret; 1177 1178 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1179 1180 spin_lock(&cmd->se_lun->lun_sep_lock); 1181 if (cmd->se_lun->lun_sep) 1182 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1183 spin_unlock(&cmd->se_lun->lun_sep_lock); 1184 return 0; 1185 } 1186 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1187 1188 /* 1189 * Used by fabric module frontends to queue tasks directly. 1190 * Many only be used from process context only 1191 */ 1192 int transport_handle_cdb_direct( 1193 struct se_cmd *cmd) 1194 { 1195 sense_reason_t ret; 1196 1197 if (!cmd->se_lun) { 1198 dump_stack(); 1199 pr_err("cmd->se_lun is NULL\n"); 1200 return -EINVAL; 1201 } 1202 if (in_interrupt()) { 1203 dump_stack(); 1204 pr_err("transport_generic_handle_cdb cannot be called" 1205 " from interrupt context\n"); 1206 return -EINVAL; 1207 } 1208 /* 1209 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1210 * outstanding descriptors are handled correctly during shutdown via 1211 * transport_wait_for_tasks() 1212 * 1213 * Also, we don't take cmd->t_state_lock here as we only expect 1214 * this to be called for initial descriptor submission. 1215 */ 1216 cmd->t_state = TRANSPORT_NEW_CMD; 1217 cmd->transport_state |= CMD_T_ACTIVE; 1218 1219 /* 1220 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1221 * so follow TRANSPORT_NEW_CMD processing thread context usage 1222 * and call transport_generic_request_failure() if necessary.. 1223 */ 1224 ret = transport_generic_new_cmd(cmd); 1225 if (ret) 1226 transport_generic_request_failure(cmd, ret); 1227 return 0; 1228 } 1229 EXPORT_SYMBOL(transport_handle_cdb_direct); 1230 1231 sense_reason_t 1232 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1233 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1234 { 1235 if (!sgl || !sgl_count) 1236 return 0; 1237 1238 /* 1239 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1240 * scatterlists already have been set to follow what the fabric 1241 * passes for the original expected data transfer length. 1242 */ 1243 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1244 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1245 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1246 return TCM_INVALID_CDB_FIELD; 1247 } 1248 1249 cmd->t_data_sg = sgl; 1250 cmd->t_data_nents = sgl_count; 1251 1252 if (sgl_bidi && sgl_bidi_count) { 1253 cmd->t_bidi_data_sg = sgl_bidi; 1254 cmd->t_bidi_data_nents = sgl_bidi_count; 1255 } 1256 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1257 return 0; 1258 } 1259 1260 /* 1261 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1262 * se_cmd + use pre-allocated SGL memory. 1263 * 1264 * @se_cmd: command descriptor to submit 1265 * @se_sess: associated se_sess for endpoint 1266 * @cdb: pointer to SCSI CDB 1267 * @sense: pointer to SCSI sense buffer 1268 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1269 * @data_length: fabric expected data transfer length 1270 * @task_addr: SAM task attribute 1271 * @data_dir: DMA data direction 1272 * @flags: flags for command submission from target_sc_flags_tables 1273 * @sgl: struct scatterlist memory for unidirectional mapping 1274 * @sgl_count: scatterlist count for unidirectional mapping 1275 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1276 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1277 * 1278 * Returns non zero to signal active I/O shutdown failure. All other 1279 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1280 * but still return zero here. 1281 * 1282 * This may only be called from process context, and also currently 1283 * assumes internal allocation of fabric payload buffer by target-core. 1284 */ 1285 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1286 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1287 u32 data_length, int task_attr, int data_dir, int flags, 1288 struct scatterlist *sgl, u32 sgl_count, 1289 struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1290 { 1291 struct se_portal_group *se_tpg; 1292 sense_reason_t rc; 1293 int ret; 1294 1295 se_tpg = se_sess->se_tpg; 1296 BUG_ON(!se_tpg); 1297 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1298 BUG_ON(in_interrupt()); 1299 /* 1300 * Initialize se_cmd for target operation. From this point 1301 * exceptions are handled by sending exception status via 1302 * target_core_fabric_ops->queue_status() callback 1303 */ 1304 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1305 data_length, data_dir, task_attr, sense); 1306 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1307 se_cmd->unknown_data_length = 1; 1308 /* 1309 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1310 * se_sess->sess_cmd_list. A second kref_get here is necessary 1311 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1312 * kref_put() to happen during fabric packet acknowledgement. 1313 */ 1314 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1315 if (ret) 1316 return ret; 1317 /* 1318 * Signal bidirectional data payloads to target-core 1319 */ 1320 if (flags & TARGET_SCF_BIDI_OP) 1321 se_cmd->se_cmd_flags |= SCF_BIDI; 1322 /* 1323 * Locate se_lun pointer and attach it to struct se_cmd 1324 */ 1325 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1326 if (rc) { 1327 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1328 target_put_sess_cmd(se_sess, se_cmd); 1329 return 0; 1330 } 1331 1332 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1333 if (rc != 0) { 1334 transport_generic_request_failure(se_cmd, rc); 1335 return 0; 1336 } 1337 /* 1338 * When a non zero sgl_count has been passed perform SGL passthrough 1339 * mapping for pre-allocated fabric memory instead of having target 1340 * core perform an internal SGL allocation.. 1341 */ 1342 if (sgl_count != 0) { 1343 BUG_ON(!sgl); 1344 1345 /* 1346 * A work-around for tcm_loop as some userspace code via 1347 * scsi-generic do not memset their associated read buffers, 1348 * so go ahead and do that here for type non-data CDBs. Also 1349 * note that this is currently guaranteed to be a single SGL 1350 * for this case by target core in target_setup_cmd_from_cdb() 1351 * -> transport_generic_cmd_sequencer(). 1352 */ 1353 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1354 se_cmd->data_direction == DMA_FROM_DEVICE) { 1355 unsigned char *buf = NULL; 1356 1357 if (sgl) 1358 buf = kmap(sg_page(sgl)) + sgl->offset; 1359 1360 if (buf) { 1361 memset(buf, 0, sgl->length); 1362 kunmap(sg_page(sgl)); 1363 } 1364 } 1365 1366 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1367 sgl_bidi, sgl_bidi_count); 1368 if (rc != 0) { 1369 transport_generic_request_failure(se_cmd, rc); 1370 return 0; 1371 } 1372 } 1373 /* 1374 * Check if we need to delay processing because of ALUA 1375 * Active/NonOptimized primary access state.. 1376 */ 1377 core_alua_check_nonop_delay(se_cmd); 1378 1379 transport_handle_cdb_direct(se_cmd); 1380 return 0; 1381 } 1382 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1383 1384 /* 1385 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1386 * 1387 * @se_cmd: command descriptor to submit 1388 * @se_sess: associated se_sess for endpoint 1389 * @cdb: pointer to SCSI CDB 1390 * @sense: pointer to SCSI sense buffer 1391 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1392 * @data_length: fabric expected data transfer length 1393 * @task_addr: SAM task attribute 1394 * @data_dir: DMA data direction 1395 * @flags: flags for command submission from target_sc_flags_tables 1396 * 1397 * Returns non zero to signal active I/O shutdown failure. All other 1398 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1399 * but still return zero here. 1400 * 1401 * This may only be called from process context, and also currently 1402 * assumes internal allocation of fabric payload buffer by target-core. 1403 * 1404 * It also assumes interal target core SGL memory allocation. 1405 */ 1406 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1407 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1408 u32 data_length, int task_attr, int data_dir, int flags) 1409 { 1410 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1411 unpacked_lun, data_length, task_attr, data_dir, 1412 flags, NULL, 0, NULL, 0); 1413 } 1414 EXPORT_SYMBOL(target_submit_cmd); 1415 1416 static void target_complete_tmr_failure(struct work_struct *work) 1417 { 1418 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1419 1420 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1421 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1422 1423 transport_cmd_check_stop_to_fabric(se_cmd); 1424 } 1425 1426 /** 1427 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1428 * for TMR CDBs 1429 * 1430 * @se_cmd: command descriptor to submit 1431 * @se_sess: associated se_sess for endpoint 1432 * @sense: pointer to SCSI sense buffer 1433 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1434 * @fabric_context: fabric context for TMR req 1435 * @tm_type: Type of TM request 1436 * @gfp: gfp type for caller 1437 * @tag: referenced task tag for TMR_ABORT_TASK 1438 * @flags: submit cmd flags 1439 * 1440 * Callable from all contexts. 1441 **/ 1442 1443 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1444 unsigned char *sense, u32 unpacked_lun, 1445 void *fabric_tmr_ptr, unsigned char tm_type, 1446 gfp_t gfp, unsigned int tag, int flags) 1447 { 1448 struct se_portal_group *se_tpg; 1449 int ret; 1450 1451 se_tpg = se_sess->se_tpg; 1452 BUG_ON(!se_tpg); 1453 1454 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1455 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1456 /* 1457 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1458 * allocation failure. 1459 */ 1460 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1461 if (ret < 0) 1462 return -ENOMEM; 1463 1464 if (tm_type == TMR_ABORT_TASK) 1465 se_cmd->se_tmr_req->ref_task_tag = tag; 1466 1467 /* See target_submit_cmd for commentary */ 1468 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1469 if (ret) { 1470 core_tmr_release_req(se_cmd->se_tmr_req); 1471 return ret; 1472 } 1473 1474 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1475 if (ret) { 1476 /* 1477 * For callback during failure handling, push this work off 1478 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1479 */ 1480 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1481 schedule_work(&se_cmd->work); 1482 return 0; 1483 } 1484 transport_generic_handle_tmr(se_cmd); 1485 return 0; 1486 } 1487 EXPORT_SYMBOL(target_submit_tmr); 1488 1489 /* 1490 * If the cmd is active, request it to be stopped and sleep until it 1491 * has completed. 1492 */ 1493 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1494 { 1495 bool was_active = false; 1496 1497 if (cmd->transport_state & CMD_T_BUSY) { 1498 cmd->transport_state |= CMD_T_REQUEST_STOP; 1499 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1500 1501 pr_debug("cmd %p waiting to complete\n", cmd); 1502 wait_for_completion(&cmd->task_stop_comp); 1503 pr_debug("cmd %p stopped successfully\n", cmd); 1504 1505 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1506 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1507 cmd->transport_state &= ~CMD_T_BUSY; 1508 was_active = true; 1509 } 1510 1511 return was_active; 1512 } 1513 1514 /* 1515 * Handle SAM-esque emulation for generic transport request failures. 1516 */ 1517 void transport_generic_request_failure(struct se_cmd *cmd, 1518 sense_reason_t sense_reason) 1519 { 1520 int ret = 0; 1521 1522 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1523 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1524 cmd->t_task_cdb[0]); 1525 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1526 cmd->se_tfo->get_cmd_state(cmd), 1527 cmd->t_state, sense_reason); 1528 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1529 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1530 (cmd->transport_state & CMD_T_STOP) != 0, 1531 (cmd->transport_state & CMD_T_SENT) != 0); 1532 1533 /* 1534 * For SAM Task Attribute emulation for failed struct se_cmd 1535 */ 1536 transport_complete_task_attr(cmd); 1537 /* 1538 * Handle special case for COMPARE_AND_WRITE failure, where the 1539 * callback is expected to drop the per device ->caw_mutex. 1540 */ 1541 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1542 cmd->transport_complete_callback) 1543 cmd->transport_complete_callback(cmd); 1544 1545 switch (sense_reason) { 1546 case TCM_NON_EXISTENT_LUN: 1547 case TCM_UNSUPPORTED_SCSI_OPCODE: 1548 case TCM_INVALID_CDB_FIELD: 1549 case TCM_INVALID_PARAMETER_LIST: 1550 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1551 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1552 case TCM_UNKNOWN_MODE_PAGE: 1553 case TCM_WRITE_PROTECTED: 1554 case TCM_ADDRESS_OUT_OF_RANGE: 1555 case TCM_CHECK_CONDITION_ABORT_CMD: 1556 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1557 case TCM_CHECK_CONDITION_NOT_READY: 1558 break; 1559 case TCM_OUT_OF_RESOURCES: 1560 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1561 break; 1562 case TCM_RESERVATION_CONFLICT: 1563 /* 1564 * No SENSE Data payload for this case, set SCSI Status 1565 * and queue the response to $FABRIC_MOD. 1566 * 1567 * Uses linux/include/scsi/scsi.h SAM status codes defs 1568 */ 1569 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1570 /* 1571 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1572 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1573 * CONFLICT STATUS. 1574 * 1575 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1576 */ 1577 if (cmd->se_sess && 1578 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1579 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1580 cmd->orig_fe_lun, 0x2C, 1581 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1582 1583 trace_target_cmd_complete(cmd); 1584 ret = cmd->se_tfo-> queue_status(cmd); 1585 if (ret == -EAGAIN || ret == -ENOMEM) 1586 goto queue_full; 1587 goto check_stop; 1588 default: 1589 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1590 cmd->t_task_cdb[0], sense_reason); 1591 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1592 break; 1593 } 1594 1595 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1596 if (ret == -EAGAIN || ret == -ENOMEM) 1597 goto queue_full; 1598 1599 check_stop: 1600 transport_lun_remove_cmd(cmd); 1601 if (!transport_cmd_check_stop_to_fabric(cmd)) 1602 ; 1603 return; 1604 1605 queue_full: 1606 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1607 transport_handle_queue_full(cmd, cmd->se_dev); 1608 } 1609 EXPORT_SYMBOL(transport_generic_request_failure); 1610 1611 void __target_execute_cmd(struct se_cmd *cmd) 1612 { 1613 sense_reason_t ret; 1614 1615 if (cmd->execute_cmd) { 1616 ret = cmd->execute_cmd(cmd); 1617 if (ret) { 1618 spin_lock_irq(&cmd->t_state_lock); 1619 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1620 spin_unlock_irq(&cmd->t_state_lock); 1621 1622 transport_generic_request_failure(cmd, ret); 1623 } 1624 } 1625 } 1626 1627 static bool target_handle_task_attr(struct se_cmd *cmd) 1628 { 1629 struct se_device *dev = cmd->se_dev; 1630 1631 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1632 return false; 1633 1634 /* 1635 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1636 * to allow the passed struct se_cmd list of tasks to the front of the list. 1637 */ 1638 switch (cmd->sam_task_attr) { 1639 case MSG_HEAD_TAG: 1640 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1641 "se_ordered_id: %u\n", 1642 cmd->t_task_cdb[0], cmd->se_ordered_id); 1643 return false; 1644 case MSG_ORDERED_TAG: 1645 atomic_inc(&dev->dev_ordered_sync); 1646 smp_mb__after_atomic_inc(); 1647 1648 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1649 " se_ordered_id: %u\n", 1650 cmd->t_task_cdb[0], cmd->se_ordered_id); 1651 1652 /* 1653 * Execute an ORDERED command if no other older commands 1654 * exist that need to be completed first. 1655 */ 1656 if (!atomic_read(&dev->simple_cmds)) 1657 return false; 1658 break; 1659 default: 1660 /* 1661 * For SIMPLE and UNTAGGED Task Attribute commands 1662 */ 1663 atomic_inc(&dev->simple_cmds); 1664 smp_mb__after_atomic_inc(); 1665 break; 1666 } 1667 1668 if (atomic_read(&dev->dev_ordered_sync) == 0) 1669 return false; 1670 1671 spin_lock(&dev->delayed_cmd_lock); 1672 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1673 spin_unlock(&dev->delayed_cmd_lock); 1674 1675 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1676 " delayed CMD list, se_ordered_id: %u\n", 1677 cmd->t_task_cdb[0], cmd->sam_task_attr, 1678 cmd->se_ordered_id); 1679 return true; 1680 } 1681 1682 void target_execute_cmd(struct se_cmd *cmd) 1683 { 1684 /* 1685 * If the received CDB has aleady been aborted stop processing it here. 1686 */ 1687 if (transport_check_aborted_status(cmd, 1)) 1688 return; 1689 1690 /* 1691 * Determine if frontend context caller is requesting the stopping of 1692 * this command for frontend exceptions. 1693 */ 1694 spin_lock_irq(&cmd->t_state_lock); 1695 if (cmd->transport_state & CMD_T_STOP) { 1696 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 1697 __func__, __LINE__, 1698 cmd->se_tfo->get_task_tag(cmd)); 1699 1700 spin_unlock_irq(&cmd->t_state_lock); 1701 complete(&cmd->t_transport_stop_comp); 1702 return; 1703 } 1704 1705 cmd->t_state = TRANSPORT_PROCESSING; 1706 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1707 spin_unlock_irq(&cmd->t_state_lock); 1708 1709 if (target_handle_task_attr(cmd)) { 1710 spin_lock_irq(&cmd->t_state_lock); 1711 cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1712 spin_unlock_irq(&cmd->t_state_lock); 1713 return; 1714 } 1715 1716 __target_execute_cmd(cmd); 1717 } 1718 EXPORT_SYMBOL(target_execute_cmd); 1719 1720 /* 1721 * Process all commands up to the last received ORDERED task attribute which 1722 * requires another blocking boundary 1723 */ 1724 static void target_restart_delayed_cmds(struct se_device *dev) 1725 { 1726 for (;;) { 1727 struct se_cmd *cmd; 1728 1729 spin_lock(&dev->delayed_cmd_lock); 1730 if (list_empty(&dev->delayed_cmd_list)) { 1731 spin_unlock(&dev->delayed_cmd_lock); 1732 break; 1733 } 1734 1735 cmd = list_entry(dev->delayed_cmd_list.next, 1736 struct se_cmd, se_delayed_node); 1737 list_del(&cmd->se_delayed_node); 1738 spin_unlock(&dev->delayed_cmd_lock); 1739 1740 __target_execute_cmd(cmd); 1741 1742 if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1743 break; 1744 } 1745 } 1746 1747 /* 1748 * Called from I/O completion to determine which dormant/delayed 1749 * and ordered cmds need to have their tasks added to the execution queue. 1750 */ 1751 static void transport_complete_task_attr(struct se_cmd *cmd) 1752 { 1753 struct se_device *dev = cmd->se_dev; 1754 1755 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1756 return; 1757 1758 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1759 atomic_dec(&dev->simple_cmds); 1760 smp_mb__after_atomic_dec(); 1761 dev->dev_cur_ordered_id++; 1762 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1763 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1764 cmd->se_ordered_id); 1765 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1766 dev->dev_cur_ordered_id++; 1767 pr_debug("Incremented dev_cur_ordered_id: %u for" 1768 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1769 cmd->se_ordered_id); 1770 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1771 atomic_dec(&dev->dev_ordered_sync); 1772 smp_mb__after_atomic_dec(); 1773 1774 dev->dev_cur_ordered_id++; 1775 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1776 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 1777 } 1778 1779 target_restart_delayed_cmds(dev); 1780 } 1781 1782 static void transport_complete_qf(struct se_cmd *cmd) 1783 { 1784 int ret = 0; 1785 1786 transport_complete_task_attr(cmd); 1787 1788 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1789 trace_target_cmd_complete(cmd); 1790 ret = cmd->se_tfo->queue_status(cmd); 1791 if (ret) 1792 goto out; 1793 } 1794 1795 switch (cmd->data_direction) { 1796 case DMA_FROM_DEVICE: 1797 trace_target_cmd_complete(cmd); 1798 ret = cmd->se_tfo->queue_data_in(cmd); 1799 break; 1800 case DMA_TO_DEVICE: 1801 if (cmd->se_cmd_flags & SCF_BIDI) { 1802 ret = cmd->se_tfo->queue_data_in(cmd); 1803 if (ret < 0) 1804 break; 1805 } 1806 /* Fall through for DMA_TO_DEVICE */ 1807 case DMA_NONE: 1808 trace_target_cmd_complete(cmd); 1809 ret = cmd->se_tfo->queue_status(cmd); 1810 break; 1811 default: 1812 break; 1813 } 1814 1815 out: 1816 if (ret < 0) { 1817 transport_handle_queue_full(cmd, cmd->se_dev); 1818 return; 1819 } 1820 transport_lun_remove_cmd(cmd); 1821 transport_cmd_check_stop_to_fabric(cmd); 1822 } 1823 1824 static void transport_handle_queue_full( 1825 struct se_cmd *cmd, 1826 struct se_device *dev) 1827 { 1828 spin_lock_irq(&dev->qf_cmd_lock); 1829 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1830 atomic_inc(&dev->dev_qf_count); 1831 smp_mb__after_atomic_inc(); 1832 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1833 1834 schedule_work(&cmd->se_dev->qf_work_queue); 1835 } 1836 1837 static void target_complete_ok_work(struct work_struct *work) 1838 { 1839 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1840 int ret; 1841 1842 /* 1843 * Check if we need to move delayed/dormant tasks from cmds on the 1844 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 1845 * Attribute. 1846 */ 1847 transport_complete_task_attr(cmd); 1848 1849 /* 1850 * Check to schedule QUEUE_FULL work, or execute an existing 1851 * cmd->transport_qf_callback() 1852 */ 1853 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 1854 schedule_work(&cmd->se_dev->qf_work_queue); 1855 1856 /* 1857 * Check if we need to send a sense buffer from 1858 * the struct se_cmd in question. 1859 */ 1860 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1861 WARN_ON(!cmd->scsi_status); 1862 ret = transport_send_check_condition_and_sense( 1863 cmd, 0, 1); 1864 if (ret == -EAGAIN || ret == -ENOMEM) 1865 goto queue_full; 1866 1867 transport_lun_remove_cmd(cmd); 1868 transport_cmd_check_stop_to_fabric(cmd); 1869 return; 1870 } 1871 /* 1872 * Check for a callback, used by amongst other things 1873 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 1874 */ 1875 if (cmd->transport_complete_callback) { 1876 sense_reason_t rc; 1877 1878 rc = cmd->transport_complete_callback(cmd); 1879 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 1880 return; 1881 } else if (rc) { 1882 ret = transport_send_check_condition_and_sense(cmd, 1883 rc, 0); 1884 if (ret == -EAGAIN || ret == -ENOMEM) 1885 goto queue_full; 1886 1887 transport_lun_remove_cmd(cmd); 1888 transport_cmd_check_stop_to_fabric(cmd); 1889 return; 1890 } 1891 } 1892 1893 switch (cmd->data_direction) { 1894 case DMA_FROM_DEVICE: 1895 spin_lock(&cmd->se_lun->lun_sep_lock); 1896 if (cmd->se_lun->lun_sep) { 1897 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1898 cmd->data_length; 1899 } 1900 spin_unlock(&cmd->se_lun->lun_sep_lock); 1901 1902 trace_target_cmd_complete(cmd); 1903 ret = cmd->se_tfo->queue_data_in(cmd); 1904 if (ret == -EAGAIN || ret == -ENOMEM) 1905 goto queue_full; 1906 break; 1907 case DMA_TO_DEVICE: 1908 spin_lock(&cmd->se_lun->lun_sep_lock); 1909 if (cmd->se_lun->lun_sep) { 1910 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 1911 cmd->data_length; 1912 } 1913 spin_unlock(&cmd->se_lun->lun_sep_lock); 1914 /* 1915 * Check if we need to send READ payload for BIDI-COMMAND 1916 */ 1917 if (cmd->se_cmd_flags & SCF_BIDI) { 1918 spin_lock(&cmd->se_lun->lun_sep_lock); 1919 if (cmd->se_lun->lun_sep) { 1920 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1921 cmd->data_length; 1922 } 1923 spin_unlock(&cmd->se_lun->lun_sep_lock); 1924 ret = cmd->se_tfo->queue_data_in(cmd); 1925 if (ret == -EAGAIN || ret == -ENOMEM) 1926 goto queue_full; 1927 break; 1928 } 1929 /* Fall through for DMA_TO_DEVICE */ 1930 case DMA_NONE: 1931 trace_target_cmd_complete(cmd); 1932 ret = cmd->se_tfo->queue_status(cmd); 1933 if (ret == -EAGAIN || ret == -ENOMEM) 1934 goto queue_full; 1935 break; 1936 default: 1937 break; 1938 } 1939 1940 transport_lun_remove_cmd(cmd); 1941 transport_cmd_check_stop_to_fabric(cmd); 1942 return; 1943 1944 queue_full: 1945 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 1946 " data_direction: %d\n", cmd, cmd->data_direction); 1947 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1948 transport_handle_queue_full(cmd, cmd->se_dev); 1949 } 1950 1951 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 1952 { 1953 struct scatterlist *sg; 1954 int count; 1955 1956 for_each_sg(sgl, sg, nents, count) 1957 __free_page(sg_page(sg)); 1958 1959 kfree(sgl); 1960 } 1961 1962 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 1963 { 1964 /* 1965 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 1966 * emulation, and free + reset pointers if necessary.. 1967 */ 1968 if (!cmd->t_data_sg_orig) 1969 return; 1970 1971 kfree(cmd->t_data_sg); 1972 cmd->t_data_sg = cmd->t_data_sg_orig; 1973 cmd->t_data_sg_orig = NULL; 1974 cmd->t_data_nents = cmd->t_data_nents_orig; 1975 cmd->t_data_nents_orig = 0; 1976 } 1977 1978 static inline void transport_free_pages(struct se_cmd *cmd) 1979 { 1980 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 1981 transport_reset_sgl_orig(cmd); 1982 return; 1983 } 1984 transport_reset_sgl_orig(cmd); 1985 1986 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 1987 cmd->t_data_sg = NULL; 1988 cmd->t_data_nents = 0; 1989 1990 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 1991 cmd->t_bidi_data_sg = NULL; 1992 cmd->t_bidi_data_nents = 0; 1993 } 1994 1995 /** 1996 * transport_release_cmd - free a command 1997 * @cmd: command to free 1998 * 1999 * This routine unconditionally frees a command, and reference counting 2000 * or list removal must be done in the caller. 2001 */ 2002 static int transport_release_cmd(struct se_cmd *cmd) 2003 { 2004 BUG_ON(!cmd->se_tfo); 2005 2006 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2007 core_tmr_release_req(cmd->se_tmr_req); 2008 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2009 kfree(cmd->t_task_cdb); 2010 /* 2011 * If this cmd has been setup with target_get_sess_cmd(), drop 2012 * the kref and call ->release_cmd() in kref callback. 2013 */ 2014 return target_put_sess_cmd(cmd->se_sess, cmd); 2015 } 2016 2017 /** 2018 * transport_put_cmd - release a reference to a command 2019 * @cmd: command to release 2020 * 2021 * This routine releases our reference to the command and frees it if possible. 2022 */ 2023 static int transport_put_cmd(struct se_cmd *cmd) 2024 { 2025 transport_free_pages(cmd); 2026 return transport_release_cmd(cmd); 2027 } 2028 2029 void *transport_kmap_data_sg(struct se_cmd *cmd) 2030 { 2031 struct scatterlist *sg = cmd->t_data_sg; 2032 struct page **pages; 2033 int i; 2034 2035 /* 2036 * We need to take into account a possible offset here for fabrics like 2037 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2038 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2039 */ 2040 if (!cmd->t_data_nents) 2041 return NULL; 2042 2043 BUG_ON(!sg); 2044 if (cmd->t_data_nents == 1) 2045 return kmap(sg_page(sg)) + sg->offset; 2046 2047 /* >1 page. use vmap */ 2048 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2049 if (!pages) 2050 return NULL; 2051 2052 /* convert sg[] to pages[] */ 2053 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2054 pages[i] = sg_page(sg); 2055 } 2056 2057 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2058 kfree(pages); 2059 if (!cmd->t_data_vmap) 2060 return NULL; 2061 2062 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2063 } 2064 EXPORT_SYMBOL(transport_kmap_data_sg); 2065 2066 void transport_kunmap_data_sg(struct se_cmd *cmd) 2067 { 2068 if (!cmd->t_data_nents) { 2069 return; 2070 } else if (cmd->t_data_nents == 1) { 2071 kunmap(sg_page(cmd->t_data_sg)); 2072 return; 2073 } 2074 2075 vunmap(cmd->t_data_vmap); 2076 cmd->t_data_vmap = NULL; 2077 } 2078 EXPORT_SYMBOL(transport_kunmap_data_sg); 2079 2080 int 2081 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2082 bool zero_page) 2083 { 2084 struct scatterlist *sg; 2085 struct page *page; 2086 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2087 unsigned int nent; 2088 int i = 0; 2089 2090 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2091 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2092 if (!sg) 2093 return -ENOMEM; 2094 2095 sg_init_table(sg, nent); 2096 2097 while (length) { 2098 u32 page_len = min_t(u32, length, PAGE_SIZE); 2099 page = alloc_page(GFP_KERNEL | zero_flag); 2100 if (!page) 2101 goto out; 2102 2103 sg_set_page(&sg[i], page, page_len, 0); 2104 length -= page_len; 2105 i++; 2106 } 2107 *sgl = sg; 2108 *nents = nent; 2109 return 0; 2110 2111 out: 2112 while (i > 0) { 2113 i--; 2114 __free_page(sg_page(&sg[i])); 2115 } 2116 kfree(sg); 2117 return -ENOMEM; 2118 } 2119 2120 /* 2121 * Allocate any required resources to execute the command. For writes we 2122 * might not have the payload yet, so notify the fabric via a call to 2123 * ->write_pending instead. Otherwise place it on the execution queue. 2124 */ 2125 sense_reason_t 2126 transport_generic_new_cmd(struct se_cmd *cmd) 2127 { 2128 int ret = 0; 2129 2130 /* 2131 * Determine is the TCM fabric module has already allocated physical 2132 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2133 * beforehand. 2134 */ 2135 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2136 cmd->data_length) { 2137 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2138 2139 if ((cmd->se_cmd_flags & SCF_BIDI) || 2140 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2141 u32 bidi_length; 2142 2143 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2144 bidi_length = cmd->t_task_nolb * 2145 cmd->se_dev->dev_attrib.block_size; 2146 else 2147 bidi_length = cmd->data_length; 2148 2149 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2150 &cmd->t_bidi_data_nents, 2151 bidi_length, zero_flag); 2152 if (ret < 0) 2153 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2154 } 2155 2156 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2157 cmd->data_length, zero_flag); 2158 if (ret < 0) 2159 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2160 } 2161 /* 2162 * If this command is not a write we can execute it right here, 2163 * for write buffers we need to notify the fabric driver first 2164 * and let it call back once the write buffers are ready. 2165 */ 2166 target_add_to_state_list(cmd); 2167 if (cmd->data_direction != DMA_TO_DEVICE) { 2168 target_execute_cmd(cmd); 2169 return 0; 2170 } 2171 transport_cmd_check_stop(cmd, false, true); 2172 2173 ret = cmd->se_tfo->write_pending(cmd); 2174 if (ret == -EAGAIN || ret == -ENOMEM) 2175 goto queue_full; 2176 2177 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2178 WARN_ON(ret); 2179 2180 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2181 2182 queue_full: 2183 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2184 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2185 transport_handle_queue_full(cmd, cmd->se_dev); 2186 return 0; 2187 } 2188 EXPORT_SYMBOL(transport_generic_new_cmd); 2189 2190 static void transport_write_pending_qf(struct se_cmd *cmd) 2191 { 2192 int ret; 2193 2194 ret = cmd->se_tfo->write_pending(cmd); 2195 if (ret == -EAGAIN || ret == -ENOMEM) { 2196 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2197 cmd); 2198 transport_handle_queue_full(cmd, cmd->se_dev); 2199 } 2200 } 2201 2202 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2203 { 2204 unsigned long flags; 2205 int ret = 0; 2206 2207 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2208 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2209 transport_wait_for_tasks(cmd); 2210 2211 ret = transport_release_cmd(cmd); 2212 } else { 2213 if (wait_for_tasks) 2214 transport_wait_for_tasks(cmd); 2215 /* 2216 * Handle WRITE failure case where transport_generic_new_cmd() 2217 * has already added se_cmd to state_list, but fabric has 2218 * failed command before I/O submission. 2219 */ 2220 if (cmd->state_active) { 2221 spin_lock_irqsave(&cmd->t_state_lock, flags); 2222 target_remove_from_state_list(cmd); 2223 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2224 } 2225 2226 if (cmd->se_lun) 2227 transport_lun_remove_cmd(cmd); 2228 2229 ret = transport_put_cmd(cmd); 2230 } 2231 return ret; 2232 } 2233 EXPORT_SYMBOL(transport_generic_free_cmd); 2234 2235 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2236 * @se_sess: session to reference 2237 * @se_cmd: command descriptor to add 2238 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2239 */ 2240 int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2241 bool ack_kref) 2242 { 2243 unsigned long flags; 2244 int ret = 0; 2245 2246 kref_init(&se_cmd->cmd_kref); 2247 /* 2248 * Add a second kref if the fabric caller is expecting to handle 2249 * fabric acknowledgement that requires two target_put_sess_cmd() 2250 * invocations before se_cmd descriptor release. 2251 */ 2252 if (ack_kref == true) { 2253 kref_get(&se_cmd->cmd_kref); 2254 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2255 } 2256 2257 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2258 if (se_sess->sess_tearing_down) { 2259 ret = -ESHUTDOWN; 2260 goto out; 2261 } 2262 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2263 out: 2264 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2265 return ret; 2266 } 2267 EXPORT_SYMBOL(target_get_sess_cmd); 2268 2269 static void target_release_cmd_kref(struct kref *kref) 2270 { 2271 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2272 struct se_session *se_sess = se_cmd->se_sess; 2273 2274 if (list_empty(&se_cmd->se_cmd_list)) { 2275 spin_unlock(&se_sess->sess_cmd_lock); 2276 se_cmd->se_tfo->release_cmd(se_cmd); 2277 return; 2278 } 2279 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2280 spin_unlock(&se_sess->sess_cmd_lock); 2281 complete(&se_cmd->cmd_wait_comp); 2282 return; 2283 } 2284 list_del(&se_cmd->se_cmd_list); 2285 spin_unlock(&se_sess->sess_cmd_lock); 2286 2287 se_cmd->se_tfo->release_cmd(se_cmd); 2288 } 2289 2290 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2291 * @se_sess: session to reference 2292 * @se_cmd: command descriptor to drop 2293 */ 2294 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2295 { 2296 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2297 &se_sess->sess_cmd_lock); 2298 } 2299 EXPORT_SYMBOL(target_put_sess_cmd); 2300 2301 /* target_sess_cmd_list_set_waiting - Flag all commands in 2302 * sess_cmd_list to complete cmd_wait_comp. Set 2303 * sess_tearing_down so no more commands are queued. 2304 * @se_sess: session to flag 2305 */ 2306 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2307 { 2308 struct se_cmd *se_cmd; 2309 unsigned long flags; 2310 2311 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2312 if (se_sess->sess_tearing_down) { 2313 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2314 return; 2315 } 2316 se_sess->sess_tearing_down = 1; 2317 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2318 2319 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2320 se_cmd->cmd_wait_set = 1; 2321 2322 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2323 } 2324 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2325 2326 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2327 * @se_sess: session to wait for active I/O 2328 */ 2329 void target_wait_for_sess_cmds(struct se_session *se_sess) 2330 { 2331 struct se_cmd *se_cmd, *tmp_cmd; 2332 unsigned long flags; 2333 2334 list_for_each_entry_safe(se_cmd, tmp_cmd, 2335 &se_sess->sess_wait_list, se_cmd_list) { 2336 list_del(&se_cmd->se_cmd_list); 2337 2338 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2339 " %d\n", se_cmd, se_cmd->t_state, 2340 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2341 2342 wait_for_completion(&se_cmd->cmd_wait_comp); 2343 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2344 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2345 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2346 2347 se_cmd->se_tfo->release_cmd(se_cmd); 2348 } 2349 2350 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2351 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2352 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2353 2354 } 2355 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2356 2357 static int transport_clear_lun_ref_thread(void *p) 2358 { 2359 struct se_lun *lun = p; 2360 2361 percpu_ref_kill(&lun->lun_ref); 2362 2363 wait_for_completion(&lun->lun_ref_comp); 2364 complete(&lun->lun_shutdown_comp); 2365 2366 return 0; 2367 } 2368 2369 int transport_clear_lun_ref(struct se_lun *lun) 2370 { 2371 struct task_struct *kt; 2372 2373 kt = kthread_run(transport_clear_lun_ref_thread, lun, 2374 "tcm_cl_%u", lun->unpacked_lun); 2375 if (IS_ERR(kt)) { 2376 pr_err("Unable to start clear_lun thread\n"); 2377 return PTR_ERR(kt); 2378 } 2379 wait_for_completion(&lun->lun_shutdown_comp); 2380 2381 return 0; 2382 } 2383 2384 /** 2385 * transport_wait_for_tasks - wait for completion to occur 2386 * @cmd: command to wait 2387 * 2388 * Called from frontend fabric context to wait for storage engine 2389 * to pause and/or release frontend generated struct se_cmd. 2390 */ 2391 bool transport_wait_for_tasks(struct se_cmd *cmd) 2392 { 2393 unsigned long flags; 2394 2395 spin_lock_irqsave(&cmd->t_state_lock, flags); 2396 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2397 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2398 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2399 return false; 2400 } 2401 2402 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2403 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2404 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2405 return false; 2406 } 2407 2408 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2409 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2410 return false; 2411 } 2412 2413 cmd->transport_state |= CMD_T_STOP; 2414 2415 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 2416 " i_state: %d, t_state: %d, CMD_T_STOP\n", 2417 cmd, cmd->se_tfo->get_task_tag(cmd), 2418 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2419 2420 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2421 2422 wait_for_completion(&cmd->t_transport_stop_comp); 2423 2424 spin_lock_irqsave(&cmd->t_state_lock, flags); 2425 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2426 2427 pr_debug("wait_for_tasks: Stopped wait_for_completion(" 2428 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 2429 cmd->se_tfo->get_task_tag(cmd)); 2430 2431 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2432 2433 return true; 2434 } 2435 EXPORT_SYMBOL(transport_wait_for_tasks); 2436 2437 static int transport_get_sense_codes( 2438 struct se_cmd *cmd, 2439 u8 *asc, 2440 u8 *ascq) 2441 { 2442 *asc = cmd->scsi_asc; 2443 *ascq = cmd->scsi_ascq; 2444 2445 return 0; 2446 } 2447 2448 int 2449 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2450 sense_reason_t reason, int from_transport) 2451 { 2452 unsigned char *buffer = cmd->sense_buffer; 2453 unsigned long flags; 2454 u8 asc = 0, ascq = 0; 2455 2456 spin_lock_irqsave(&cmd->t_state_lock, flags); 2457 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2458 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2459 return 0; 2460 } 2461 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2462 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2463 2464 if (!reason && from_transport) 2465 goto after_reason; 2466 2467 if (!from_transport) 2468 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2469 2470 /* 2471 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2472 * SENSE KEY values from include/scsi/scsi.h 2473 */ 2474 switch (reason) { 2475 case TCM_NO_SENSE: 2476 /* CURRENT ERROR */ 2477 buffer[0] = 0x70; 2478 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2479 /* Not Ready */ 2480 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2481 /* NO ADDITIONAL SENSE INFORMATION */ 2482 buffer[SPC_ASC_KEY_OFFSET] = 0; 2483 buffer[SPC_ASCQ_KEY_OFFSET] = 0; 2484 break; 2485 case TCM_NON_EXISTENT_LUN: 2486 /* CURRENT ERROR */ 2487 buffer[0] = 0x70; 2488 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2489 /* ILLEGAL REQUEST */ 2490 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2491 /* LOGICAL UNIT NOT SUPPORTED */ 2492 buffer[SPC_ASC_KEY_OFFSET] = 0x25; 2493 break; 2494 case TCM_UNSUPPORTED_SCSI_OPCODE: 2495 case TCM_SECTOR_COUNT_TOO_MANY: 2496 /* CURRENT ERROR */ 2497 buffer[0] = 0x70; 2498 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2499 /* ILLEGAL REQUEST */ 2500 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2501 /* INVALID COMMAND OPERATION CODE */ 2502 buffer[SPC_ASC_KEY_OFFSET] = 0x20; 2503 break; 2504 case TCM_UNKNOWN_MODE_PAGE: 2505 /* CURRENT ERROR */ 2506 buffer[0] = 0x70; 2507 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2508 /* ILLEGAL REQUEST */ 2509 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2510 /* INVALID FIELD IN CDB */ 2511 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2512 break; 2513 case TCM_CHECK_CONDITION_ABORT_CMD: 2514 /* CURRENT ERROR */ 2515 buffer[0] = 0x70; 2516 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2517 /* ABORTED COMMAND */ 2518 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2519 /* BUS DEVICE RESET FUNCTION OCCURRED */ 2520 buffer[SPC_ASC_KEY_OFFSET] = 0x29; 2521 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2522 break; 2523 case TCM_INCORRECT_AMOUNT_OF_DATA: 2524 /* CURRENT ERROR */ 2525 buffer[0] = 0x70; 2526 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2527 /* ABORTED COMMAND */ 2528 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2529 /* WRITE ERROR */ 2530 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2531 /* NOT ENOUGH UNSOLICITED DATA */ 2532 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d; 2533 break; 2534 case TCM_INVALID_CDB_FIELD: 2535 /* CURRENT ERROR */ 2536 buffer[0] = 0x70; 2537 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2538 /* ILLEGAL REQUEST */ 2539 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2540 /* INVALID FIELD IN CDB */ 2541 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2542 break; 2543 case TCM_INVALID_PARAMETER_LIST: 2544 /* CURRENT ERROR */ 2545 buffer[0] = 0x70; 2546 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2547 /* ILLEGAL REQUEST */ 2548 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2549 /* INVALID FIELD IN PARAMETER LIST */ 2550 buffer[SPC_ASC_KEY_OFFSET] = 0x26; 2551 break; 2552 case TCM_PARAMETER_LIST_LENGTH_ERROR: 2553 /* CURRENT ERROR */ 2554 buffer[0] = 0x70; 2555 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2556 /* ILLEGAL REQUEST */ 2557 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2558 /* PARAMETER LIST LENGTH ERROR */ 2559 buffer[SPC_ASC_KEY_OFFSET] = 0x1a; 2560 break; 2561 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2562 /* CURRENT ERROR */ 2563 buffer[0] = 0x70; 2564 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2565 /* ABORTED COMMAND */ 2566 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2567 /* WRITE ERROR */ 2568 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2569 /* UNEXPECTED_UNSOLICITED_DATA */ 2570 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c; 2571 break; 2572 case TCM_SERVICE_CRC_ERROR: 2573 /* CURRENT ERROR */ 2574 buffer[0] = 0x70; 2575 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2576 /* ABORTED COMMAND */ 2577 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2578 /* PROTOCOL SERVICE CRC ERROR */ 2579 buffer[SPC_ASC_KEY_OFFSET] = 0x47; 2580 /* N/A */ 2581 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05; 2582 break; 2583 case TCM_SNACK_REJECTED: 2584 /* CURRENT ERROR */ 2585 buffer[0] = 0x70; 2586 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2587 /* ABORTED COMMAND */ 2588 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2589 /* READ ERROR */ 2590 buffer[SPC_ASC_KEY_OFFSET] = 0x11; 2591 /* FAILED RETRANSMISSION REQUEST */ 2592 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13; 2593 break; 2594 case TCM_WRITE_PROTECTED: 2595 /* CURRENT ERROR */ 2596 buffer[0] = 0x70; 2597 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2598 /* DATA PROTECT */ 2599 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 2600 /* WRITE PROTECTED */ 2601 buffer[SPC_ASC_KEY_OFFSET] = 0x27; 2602 break; 2603 case TCM_ADDRESS_OUT_OF_RANGE: 2604 /* CURRENT ERROR */ 2605 buffer[0] = 0x70; 2606 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2607 /* ILLEGAL REQUEST */ 2608 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2609 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2610 buffer[SPC_ASC_KEY_OFFSET] = 0x21; 2611 break; 2612 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2613 /* CURRENT ERROR */ 2614 buffer[0] = 0x70; 2615 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2616 /* UNIT ATTENTION */ 2617 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 2618 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2619 buffer[SPC_ASC_KEY_OFFSET] = asc; 2620 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2621 break; 2622 case TCM_CHECK_CONDITION_NOT_READY: 2623 /* CURRENT ERROR */ 2624 buffer[0] = 0x70; 2625 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2626 /* Not Ready */ 2627 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2628 transport_get_sense_codes(cmd, &asc, &ascq); 2629 buffer[SPC_ASC_KEY_OFFSET] = asc; 2630 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2631 break; 2632 case TCM_MISCOMPARE_VERIFY: 2633 /* CURRENT ERROR */ 2634 buffer[0] = 0x70; 2635 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2636 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE; 2637 /* MISCOMPARE DURING VERIFY OPERATION */ 2638 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2639 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2640 break; 2641 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2642 default: 2643 /* CURRENT ERROR */ 2644 buffer[0] = 0x70; 2645 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2646 /* 2647 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2648 * Solaris initiators. Returning NOT READY instead means the 2649 * operations will be retried a finite number of times and we 2650 * can survive intermittent errors. 2651 */ 2652 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2653 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2654 buffer[SPC_ASC_KEY_OFFSET] = 0x08; 2655 break; 2656 } 2657 /* 2658 * This code uses linux/include/scsi/scsi.h SAM status codes! 2659 */ 2660 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2661 /* 2662 * Automatically padded, this value is encoded in the fabric's 2663 * data_length response PDU containing the SCSI defined sense data. 2664 */ 2665 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2666 2667 after_reason: 2668 trace_target_cmd_complete(cmd); 2669 return cmd->se_tfo->queue_status(cmd); 2670 } 2671 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2672 2673 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2674 { 2675 if (!(cmd->transport_state & CMD_T_ABORTED)) 2676 return 0; 2677 2678 if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 2679 return 1; 2680 2681 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", 2682 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2683 2684 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2685 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2686 trace_target_cmd_complete(cmd); 2687 cmd->se_tfo->queue_status(cmd); 2688 2689 return 1; 2690 } 2691 EXPORT_SYMBOL(transport_check_aborted_status); 2692 2693 void transport_send_task_abort(struct se_cmd *cmd) 2694 { 2695 unsigned long flags; 2696 2697 spin_lock_irqsave(&cmd->t_state_lock, flags); 2698 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { 2699 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2700 return; 2701 } 2702 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2703 2704 /* 2705 * If there are still expected incoming fabric WRITEs, we wait 2706 * until until they have completed before sending a TASK_ABORTED 2707 * response. This response with TASK_ABORTED status will be 2708 * queued back to fabric module by transport_check_aborted_status(). 2709 */ 2710 if (cmd->data_direction == DMA_TO_DEVICE) { 2711 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2712 cmd->transport_state |= CMD_T_ABORTED; 2713 smp_mb__after_atomic_inc(); 2714 return; 2715 } 2716 } 2717 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2718 2719 transport_lun_remove_cmd(cmd); 2720 2721 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 2722 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 2723 cmd->se_tfo->get_task_tag(cmd)); 2724 2725 trace_target_cmd_complete(cmd); 2726 cmd->se_tfo->queue_status(cmd); 2727 } 2728 2729 static void target_tmr_work(struct work_struct *work) 2730 { 2731 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2732 struct se_device *dev = cmd->se_dev; 2733 struct se_tmr_req *tmr = cmd->se_tmr_req; 2734 int ret; 2735 2736 switch (tmr->function) { 2737 case TMR_ABORT_TASK: 2738 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2739 break; 2740 case TMR_ABORT_TASK_SET: 2741 case TMR_CLEAR_ACA: 2742 case TMR_CLEAR_TASK_SET: 2743 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2744 break; 2745 case TMR_LUN_RESET: 2746 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2747 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2748 TMR_FUNCTION_REJECTED; 2749 break; 2750 case TMR_TARGET_WARM_RESET: 2751 tmr->response = TMR_FUNCTION_REJECTED; 2752 break; 2753 case TMR_TARGET_COLD_RESET: 2754 tmr->response = TMR_FUNCTION_REJECTED; 2755 break; 2756 default: 2757 pr_err("Uknown TMR function: 0x%02x.\n", 2758 tmr->function); 2759 tmr->response = TMR_FUNCTION_REJECTED; 2760 break; 2761 } 2762 2763 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2764 cmd->se_tfo->queue_tm_rsp(cmd); 2765 2766 transport_cmd_check_stop_to_fabric(cmd); 2767 } 2768 2769 int transport_generic_handle_tmr( 2770 struct se_cmd *cmd) 2771 { 2772 INIT_WORK(&cmd->work, target_tmr_work); 2773 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2774 return 0; 2775 } 2776 EXPORT_SYMBOL(transport_generic_handle_tmr); 2777