1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 228 { 229 struct se_session *se_sess; 230 231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 232 if (!se_sess) { 233 pr_err("Unable to allocate struct se_session from" 234 " se_sess_cache\n"); 235 return ERR_PTR(-ENOMEM); 236 } 237 INIT_LIST_HEAD(&se_sess->sess_list); 238 INIT_LIST_HEAD(&se_sess->sess_acl_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 241 spin_lock_init(&se_sess->sess_cmd_lock); 242 kref_init(&se_sess->sess_kref); 243 se_sess->sup_prot_ops = sup_prot_ops; 244 245 return se_sess; 246 } 247 EXPORT_SYMBOL(transport_init_session); 248 249 int transport_alloc_session_tags(struct se_session *se_sess, 250 unsigned int tag_num, unsigned int tag_size) 251 { 252 int rc; 253 254 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 255 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 256 if (!se_sess->sess_cmd_map) { 257 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 258 if (!se_sess->sess_cmd_map) { 259 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 260 return -ENOMEM; 261 } 262 } 263 264 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 265 if (rc < 0) { 266 pr_err("Unable to init se_sess->sess_tag_pool," 267 " tag_num: %u\n", tag_num); 268 kvfree(se_sess->sess_cmd_map); 269 se_sess->sess_cmd_map = NULL; 270 return -ENOMEM; 271 } 272 273 return 0; 274 } 275 EXPORT_SYMBOL(transport_alloc_session_tags); 276 277 struct se_session *transport_init_session_tags(unsigned int tag_num, 278 unsigned int tag_size, 279 enum target_prot_op sup_prot_ops) 280 { 281 struct se_session *se_sess; 282 int rc; 283 284 se_sess = transport_init_session(sup_prot_ops); 285 if (IS_ERR(se_sess)) 286 return se_sess; 287 288 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 289 if (rc < 0) { 290 transport_free_session(se_sess); 291 return ERR_PTR(-ENOMEM); 292 } 293 294 return se_sess; 295 } 296 EXPORT_SYMBOL(transport_init_session_tags); 297 298 /* 299 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 300 */ 301 void __transport_register_session( 302 struct se_portal_group *se_tpg, 303 struct se_node_acl *se_nacl, 304 struct se_session *se_sess, 305 void *fabric_sess_ptr) 306 { 307 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 308 unsigned char buf[PR_REG_ISID_LEN]; 309 310 se_sess->se_tpg = se_tpg; 311 se_sess->fabric_sess_ptr = fabric_sess_ptr; 312 /* 313 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 314 * 315 * Only set for struct se_session's that will actually be moving I/O. 316 * eg: *NOT* discovery sessions. 317 */ 318 if (se_nacl) { 319 /* 320 * 321 * Determine if fabric allows for T10-PI feature bits exposed to 322 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 323 * 324 * If so, then always save prot_type on a per se_node_acl node 325 * basis and re-instate the previous sess_prot_type to avoid 326 * disabling PI from below any previously initiator side 327 * registered LUNs. 328 */ 329 if (se_nacl->saved_prot_type) 330 se_sess->sess_prot_type = se_nacl->saved_prot_type; 331 else if (tfo->tpg_check_prot_fabric_only) 332 se_sess->sess_prot_type = se_nacl->saved_prot_type = 333 tfo->tpg_check_prot_fabric_only(se_tpg); 334 /* 335 * If the fabric module supports an ISID based TransportID, 336 * save this value in binary from the fabric I_T Nexus now. 337 */ 338 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 339 memset(&buf[0], 0, PR_REG_ISID_LEN); 340 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 341 &buf[0], PR_REG_ISID_LEN); 342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 343 } 344 345 spin_lock_irq(&se_nacl->nacl_sess_lock); 346 /* 347 * The se_nacl->nacl_sess pointer will be set to the 348 * last active I_T Nexus for each struct se_node_acl. 349 */ 350 se_nacl->nacl_sess = se_sess; 351 352 list_add_tail(&se_sess->sess_acl_list, 353 &se_nacl->acl_sess_list); 354 spin_unlock_irq(&se_nacl->nacl_sess_lock); 355 } 356 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 357 358 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 359 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 360 } 361 EXPORT_SYMBOL(__transport_register_session); 362 363 void transport_register_session( 364 struct se_portal_group *se_tpg, 365 struct se_node_acl *se_nacl, 366 struct se_session *se_sess, 367 void *fabric_sess_ptr) 368 { 369 unsigned long flags; 370 371 spin_lock_irqsave(&se_tpg->session_lock, flags); 372 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 373 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 374 } 375 EXPORT_SYMBOL(transport_register_session); 376 377 static void target_release_session(struct kref *kref) 378 { 379 struct se_session *se_sess = container_of(kref, 380 struct se_session, sess_kref); 381 struct se_portal_group *se_tpg = se_sess->se_tpg; 382 383 se_tpg->se_tpg_tfo->close_session(se_sess); 384 } 385 386 int target_get_session(struct se_session *se_sess) 387 { 388 return kref_get_unless_zero(&se_sess->sess_kref); 389 } 390 EXPORT_SYMBOL(target_get_session); 391 392 void target_put_session(struct se_session *se_sess) 393 { 394 kref_put(&se_sess->sess_kref, target_release_session); 395 } 396 EXPORT_SYMBOL(target_put_session); 397 398 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 399 { 400 struct se_session *se_sess; 401 ssize_t len = 0; 402 403 spin_lock_bh(&se_tpg->session_lock); 404 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 405 if (!se_sess->se_node_acl) 406 continue; 407 if (!se_sess->se_node_acl->dynamic_node_acl) 408 continue; 409 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 410 break; 411 412 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 413 se_sess->se_node_acl->initiatorname); 414 len += 1; /* Include NULL terminator */ 415 } 416 spin_unlock_bh(&se_tpg->session_lock); 417 418 return len; 419 } 420 EXPORT_SYMBOL(target_show_dynamic_sessions); 421 422 static void target_complete_nacl(struct kref *kref) 423 { 424 struct se_node_acl *nacl = container_of(kref, 425 struct se_node_acl, acl_kref); 426 427 complete(&nacl->acl_free_comp); 428 } 429 430 void target_put_nacl(struct se_node_acl *nacl) 431 { 432 kref_put(&nacl->acl_kref, target_complete_nacl); 433 } 434 EXPORT_SYMBOL(target_put_nacl); 435 436 void transport_deregister_session_configfs(struct se_session *se_sess) 437 { 438 struct se_node_acl *se_nacl; 439 unsigned long flags; 440 /* 441 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 442 */ 443 se_nacl = se_sess->se_node_acl; 444 if (se_nacl) { 445 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 446 if (se_nacl->acl_stop == 0) 447 list_del(&se_sess->sess_acl_list); 448 /* 449 * If the session list is empty, then clear the pointer. 450 * Otherwise, set the struct se_session pointer from the tail 451 * element of the per struct se_node_acl active session list. 452 */ 453 if (list_empty(&se_nacl->acl_sess_list)) 454 se_nacl->nacl_sess = NULL; 455 else { 456 se_nacl->nacl_sess = container_of( 457 se_nacl->acl_sess_list.prev, 458 struct se_session, sess_acl_list); 459 } 460 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 461 } 462 } 463 EXPORT_SYMBOL(transport_deregister_session_configfs); 464 465 void transport_free_session(struct se_session *se_sess) 466 { 467 struct se_node_acl *se_nacl = se_sess->se_node_acl; 468 /* 469 * Drop the se_node_acl->nacl_kref obtained from within 470 * core_tpg_get_initiator_node_acl(). 471 */ 472 if (se_nacl) { 473 se_sess->se_node_acl = NULL; 474 target_put_nacl(se_nacl); 475 } 476 if (se_sess->sess_cmd_map) { 477 percpu_ida_destroy(&se_sess->sess_tag_pool); 478 kvfree(se_sess->sess_cmd_map); 479 } 480 kmem_cache_free(se_sess_cache, se_sess); 481 } 482 EXPORT_SYMBOL(transport_free_session); 483 484 void transport_deregister_session(struct se_session *se_sess) 485 { 486 struct se_portal_group *se_tpg = se_sess->se_tpg; 487 const struct target_core_fabric_ops *se_tfo; 488 struct se_node_acl *se_nacl; 489 unsigned long flags; 490 bool drop_nacl = false; 491 492 if (!se_tpg) { 493 transport_free_session(se_sess); 494 return; 495 } 496 se_tfo = se_tpg->se_tpg_tfo; 497 498 spin_lock_irqsave(&se_tpg->session_lock, flags); 499 list_del(&se_sess->sess_list); 500 se_sess->se_tpg = NULL; 501 se_sess->fabric_sess_ptr = NULL; 502 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 503 504 /* 505 * Determine if we need to do extra work for this initiator node's 506 * struct se_node_acl if it had been previously dynamically generated. 507 */ 508 se_nacl = se_sess->se_node_acl; 509 510 mutex_lock(&se_tpg->acl_node_mutex); 511 if (se_nacl && se_nacl->dynamic_node_acl) { 512 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 513 list_del(&se_nacl->acl_list); 514 drop_nacl = true; 515 } 516 } 517 mutex_unlock(&se_tpg->acl_node_mutex); 518 519 if (drop_nacl) { 520 core_tpg_wait_for_nacl_pr_ref(se_nacl); 521 core_free_device_list_for_node(se_nacl, se_tpg); 522 se_sess->se_node_acl = NULL; 523 kfree(se_nacl); 524 } 525 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 526 se_tpg->se_tpg_tfo->get_fabric_name()); 527 /* 528 * If last kref is dropping now for an explicit NodeACL, awake sleeping 529 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 530 * removal context from within transport_free_session() code. 531 */ 532 533 transport_free_session(se_sess); 534 } 535 EXPORT_SYMBOL(transport_deregister_session); 536 537 static void target_remove_from_state_list(struct se_cmd *cmd) 538 { 539 struct se_device *dev = cmd->se_dev; 540 unsigned long flags; 541 542 if (!dev) 543 return; 544 545 if (cmd->transport_state & CMD_T_BUSY) 546 return; 547 548 spin_lock_irqsave(&dev->execute_task_lock, flags); 549 if (cmd->state_active) { 550 list_del(&cmd->state_list); 551 cmd->state_active = false; 552 } 553 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 554 } 555 556 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 557 bool write_pending) 558 { 559 unsigned long flags; 560 561 if (remove_from_lists) { 562 target_remove_from_state_list(cmd); 563 564 /* 565 * Clear struct se_cmd->se_lun before the handoff to FE. 566 */ 567 cmd->se_lun = NULL; 568 } 569 570 spin_lock_irqsave(&cmd->t_state_lock, flags); 571 if (write_pending) 572 cmd->t_state = TRANSPORT_WRITE_PENDING; 573 574 /* 575 * Determine if frontend context caller is requesting the stopping of 576 * this command for frontend exceptions. 577 */ 578 if (cmd->transport_state & CMD_T_STOP) { 579 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 580 __func__, __LINE__, cmd->tag); 581 582 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 583 584 complete_all(&cmd->t_transport_stop_comp); 585 return 1; 586 } 587 588 cmd->transport_state &= ~CMD_T_ACTIVE; 589 if (remove_from_lists) { 590 /* 591 * Some fabric modules like tcm_loop can release 592 * their internally allocated I/O reference now and 593 * struct se_cmd now. 594 * 595 * Fabric modules are expected to return '1' here if the 596 * se_cmd being passed is released at this point, 597 * or zero if not being released. 598 */ 599 if (cmd->se_tfo->check_stop_free != NULL) { 600 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 601 return cmd->se_tfo->check_stop_free(cmd); 602 } 603 } 604 605 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 606 return 0; 607 } 608 609 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 610 { 611 return transport_cmd_check_stop(cmd, true, false); 612 } 613 614 static void transport_lun_remove_cmd(struct se_cmd *cmd) 615 { 616 struct se_lun *lun = cmd->se_lun; 617 618 if (!lun) 619 return; 620 621 if (cmpxchg(&cmd->lun_ref_active, true, false)) 622 percpu_ref_put(&lun->lun_ref); 623 } 624 625 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 626 { 627 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 628 629 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 630 transport_lun_remove_cmd(cmd); 631 /* 632 * Allow the fabric driver to unmap any resources before 633 * releasing the descriptor via TFO->release_cmd() 634 */ 635 if (remove) 636 cmd->se_tfo->aborted_task(cmd); 637 638 if (transport_cmd_check_stop_to_fabric(cmd)) 639 return; 640 if (remove && ack_kref) 641 transport_put_cmd(cmd); 642 } 643 644 static void target_complete_failure_work(struct work_struct *work) 645 { 646 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 647 648 transport_generic_request_failure(cmd, 649 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 650 } 651 652 /* 653 * Used when asking transport to copy Sense Data from the underlying 654 * Linux/SCSI struct scsi_cmnd 655 */ 656 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 657 { 658 struct se_device *dev = cmd->se_dev; 659 660 WARN_ON(!cmd->se_lun); 661 662 if (!dev) 663 return NULL; 664 665 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 666 return NULL; 667 668 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 669 670 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 671 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 672 return cmd->sense_buffer; 673 } 674 675 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 676 { 677 struct se_device *dev = cmd->se_dev; 678 int success = scsi_status == GOOD; 679 unsigned long flags; 680 681 cmd->scsi_status = scsi_status; 682 683 684 spin_lock_irqsave(&cmd->t_state_lock, flags); 685 cmd->transport_state &= ~CMD_T_BUSY; 686 687 if (dev && dev->transport->transport_complete) { 688 dev->transport->transport_complete(cmd, 689 cmd->t_data_sg, 690 transport_get_sense_buffer(cmd)); 691 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 692 success = 1; 693 } 694 695 /* 696 * See if we are waiting to complete for an exception condition. 697 */ 698 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 699 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 700 complete(&cmd->task_stop_comp); 701 return; 702 } 703 704 /* 705 * Check for case where an explicit ABORT_TASK has been received 706 * and transport_wait_for_tasks() will be waiting for completion.. 707 */ 708 if (cmd->transport_state & CMD_T_ABORTED || 709 cmd->transport_state & CMD_T_STOP) { 710 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 711 complete_all(&cmd->t_transport_stop_comp); 712 return; 713 } else if (!success) { 714 INIT_WORK(&cmd->work, target_complete_failure_work); 715 } else { 716 INIT_WORK(&cmd->work, target_complete_ok_work); 717 } 718 719 cmd->t_state = TRANSPORT_COMPLETE; 720 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 721 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 722 723 if (cmd->cpuid == -1) 724 queue_work(target_completion_wq, &cmd->work); 725 else 726 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 727 } 728 EXPORT_SYMBOL(target_complete_cmd); 729 730 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 731 { 732 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 733 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 734 cmd->residual_count += cmd->data_length - length; 735 } else { 736 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 737 cmd->residual_count = cmd->data_length - length; 738 } 739 740 cmd->data_length = length; 741 } 742 743 target_complete_cmd(cmd, scsi_status); 744 } 745 EXPORT_SYMBOL(target_complete_cmd_with_length); 746 747 static void target_add_to_state_list(struct se_cmd *cmd) 748 { 749 struct se_device *dev = cmd->se_dev; 750 unsigned long flags; 751 752 spin_lock_irqsave(&dev->execute_task_lock, flags); 753 if (!cmd->state_active) { 754 list_add_tail(&cmd->state_list, &dev->state_list); 755 cmd->state_active = true; 756 } 757 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 758 } 759 760 /* 761 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 762 */ 763 static void transport_write_pending_qf(struct se_cmd *cmd); 764 static void transport_complete_qf(struct se_cmd *cmd); 765 766 void target_qf_do_work(struct work_struct *work) 767 { 768 struct se_device *dev = container_of(work, struct se_device, 769 qf_work_queue); 770 LIST_HEAD(qf_cmd_list); 771 struct se_cmd *cmd, *cmd_tmp; 772 773 spin_lock_irq(&dev->qf_cmd_lock); 774 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 775 spin_unlock_irq(&dev->qf_cmd_lock); 776 777 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 778 list_del(&cmd->se_qf_node); 779 atomic_dec_mb(&dev->dev_qf_count); 780 781 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 782 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 783 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 784 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 785 : "UNKNOWN"); 786 787 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 788 transport_write_pending_qf(cmd); 789 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 790 transport_complete_qf(cmd); 791 } 792 } 793 794 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 795 { 796 switch (cmd->data_direction) { 797 case DMA_NONE: 798 return "NONE"; 799 case DMA_FROM_DEVICE: 800 return "READ"; 801 case DMA_TO_DEVICE: 802 return "WRITE"; 803 case DMA_BIDIRECTIONAL: 804 return "BIDI"; 805 default: 806 break; 807 } 808 809 return "UNKNOWN"; 810 } 811 812 void transport_dump_dev_state( 813 struct se_device *dev, 814 char *b, 815 int *bl) 816 { 817 *bl += sprintf(b + *bl, "Status: "); 818 if (dev->export_count) 819 *bl += sprintf(b + *bl, "ACTIVATED"); 820 else 821 *bl += sprintf(b + *bl, "DEACTIVATED"); 822 823 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 824 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 825 dev->dev_attrib.block_size, 826 dev->dev_attrib.hw_max_sectors); 827 *bl += sprintf(b + *bl, " "); 828 } 829 830 void transport_dump_vpd_proto_id( 831 struct t10_vpd *vpd, 832 unsigned char *p_buf, 833 int p_buf_len) 834 { 835 unsigned char buf[VPD_TMP_BUF_SIZE]; 836 int len; 837 838 memset(buf, 0, VPD_TMP_BUF_SIZE); 839 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 840 841 switch (vpd->protocol_identifier) { 842 case 0x00: 843 sprintf(buf+len, "Fibre Channel\n"); 844 break; 845 case 0x10: 846 sprintf(buf+len, "Parallel SCSI\n"); 847 break; 848 case 0x20: 849 sprintf(buf+len, "SSA\n"); 850 break; 851 case 0x30: 852 sprintf(buf+len, "IEEE 1394\n"); 853 break; 854 case 0x40: 855 sprintf(buf+len, "SCSI Remote Direct Memory Access" 856 " Protocol\n"); 857 break; 858 case 0x50: 859 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 860 break; 861 case 0x60: 862 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 863 break; 864 case 0x70: 865 sprintf(buf+len, "Automation/Drive Interface Transport" 866 " Protocol\n"); 867 break; 868 case 0x80: 869 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 870 break; 871 default: 872 sprintf(buf+len, "Unknown 0x%02x\n", 873 vpd->protocol_identifier); 874 break; 875 } 876 877 if (p_buf) 878 strncpy(p_buf, buf, p_buf_len); 879 else 880 pr_debug("%s", buf); 881 } 882 883 void 884 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 885 { 886 /* 887 * Check if the Protocol Identifier Valid (PIV) bit is set.. 888 * 889 * from spc3r23.pdf section 7.5.1 890 */ 891 if (page_83[1] & 0x80) { 892 vpd->protocol_identifier = (page_83[0] & 0xf0); 893 vpd->protocol_identifier_set = 1; 894 transport_dump_vpd_proto_id(vpd, NULL, 0); 895 } 896 } 897 EXPORT_SYMBOL(transport_set_vpd_proto_id); 898 899 int transport_dump_vpd_assoc( 900 struct t10_vpd *vpd, 901 unsigned char *p_buf, 902 int p_buf_len) 903 { 904 unsigned char buf[VPD_TMP_BUF_SIZE]; 905 int ret = 0; 906 int len; 907 908 memset(buf, 0, VPD_TMP_BUF_SIZE); 909 len = sprintf(buf, "T10 VPD Identifier Association: "); 910 911 switch (vpd->association) { 912 case 0x00: 913 sprintf(buf+len, "addressed logical unit\n"); 914 break; 915 case 0x10: 916 sprintf(buf+len, "target port\n"); 917 break; 918 case 0x20: 919 sprintf(buf+len, "SCSI target device\n"); 920 break; 921 default: 922 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 923 ret = -EINVAL; 924 break; 925 } 926 927 if (p_buf) 928 strncpy(p_buf, buf, p_buf_len); 929 else 930 pr_debug("%s", buf); 931 932 return ret; 933 } 934 935 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 936 { 937 /* 938 * The VPD identification association.. 939 * 940 * from spc3r23.pdf Section 7.6.3.1 Table 297 941 */ 942 vpd->association = (page_83[1] & 0x30); 943 return transport_dump_vpd_assoc(vpd, NULL, 0); 944 } 945 EXPORT_SYMBOL(transport_set_vpd_assoc); 946 947 int transport_dump_vpd_ident_type( 948 struct t10_vpd *vpd, 949 unsigned char *p_buf, 950 int p_buf_len) 951 { 952 unsigned char buf[VPD_TMP_BUF_SIZE]; 953 int ret = 0; 954 int len; 955 956 memset(buf, 0, VPD_TMP_BUF_SIZE); 957 len = sprintf(buf, "T10 VPD Identifier Type: "); 958 959 switch (vpd->device_identifier_type) { 960 case 0x00: 961 sprintf(buf+len, "Vendor specific\n"); 962 break; 963 case 0x01: 964 sprintf(buf+len, "T10 Vendor ID based\n"); 965 break; 966 case 0x02: 967 sprintf(buf+len, "EUI-64 based\n"); 968 break; 969 case 0x03: 970 sprintf(buf+len, "NAA\n"); 971 break; 972 case 0x04: 973 sprintf(buf+len, "Relative target port identifier\n"); 974 break; 975 case 0x08: 976 sprintf(buf+len, "SCSI name string\n"); 977 break; 978 default: 979 sprintf(buf+len, "Unsupported: 0x%02x\n", 980 vpd->device_identifier_type); 981 ret = -EINVAL; 982 break; 983 } 984 985 if (p_buf) { 986 if (p_buf_len < strlen(buf)+1) 987 return -EINVAL; 988 strncpy(p_buf, buf, p_buf_len); 989 } else { 990 pr_debug("%s", buf); 991 } 992 993 return ret; 994 } 995 996 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 997 { 998 /* 999 * The VPD identifier type.. 1000 * 1001 * from spc3r23.pdf Section 7.6.3.1 Table 298 1002 */ 1003 vpd->device_identifier_type = (page_83[1] & 0x0f); 1004 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1005 } 1006 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1007 1008 int transport_dump_vpd_ident( 1009 struct t10_vpd *vpd, 1010 unsigned char *p_buf, 1011 int p_buf_len) 1012 { 1013 unsigned char buf[VPD_TMP_BUF_SIZE]; 1014 int ret = 0; 1015 1016 memset(buf, 0, VPD_TMP_BUF_SIZE); 1017 1018 switch (vpd->device_identifier_code_set) { 1019 case 0x01: /* Binary */ 1020 snprintf(buf, sizeof(buf), 1021 "T10 VPD Binary Device Identifier: %s\n", 1022 &vpd->device_identifier[0]); 1023 break; 1024 case 0x02: /* ASCII */ 1025 snprintf(buf, sizeof(buf), 1026 "T10 VPD ASCII Device Identifier: %s\n", 1027 &vpd->device_identifier[0]); 1028 break; 1029 case 0x03: /* UTF-8 */ 1030 snprintf(buf, sizeof(buf), 1031 "T10 VPD UTF-8 Device Identifier: %s\n", 1032 &vpd->device_identifier[0]); 1033 break; 1034 default: 1035 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1036 " 0x%02x", vpd->device_identifier_code_set); 1037 ret = -EINVAL; 1038 break; 1039 } 1040 1041 if (p_buf) 1042 strncpy(p_buf, buf, p_buf_len); 1043 else 1044 pr_debug("%s", buf); 1045 1046 return ret; 1047 } 1048 1049 int 1050 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1051 { 1052 static const char hex_str[] = "0123456789abcdef"; 1053 int j = 0, i = 4; /* offset to start of the identifier */ 1054 1055 /* 1056 * The VPD Code Set (encoding) 1057 * 1058 * from spc3r23.pdf Section 7.6.3.1 Table 296 1059 */ 1060 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1061 switch (vpd->device_identifier_code_set) { 1062 case 0x01: /* Binary */ 1063 vpd->device_identifier[j++] = 1064 hex_str[vpd->device_identifier_type]; 1065 while (i < (4 + page_83[3])) { 1066 vpd->device_identifier[j++] = 1067 hex_str[(page_83[i] & 0xf0) >> 4]; 1068 vpd->device_identifier[j++] = 1069 hex_str[page_83[i] & 0x0f]; 1070 i++; 1071 } 1072 break; 1073 case 0x02: /* ASCII */ 1074 case 0x03: /* UTF-8 */ 1075 while (i < (4 + page_83[3])) 1076 vpd->device_identifier[j++] = page_83[i++]; 1077 break; 1078 default: 1079 break; 1080 } 1081 1082 return transport_dump_vpd_ident(vpd, NULL, 0); 1083 } 1084 EXPORT_SYMBOL(transport_set_vpd_ident); 1085 1086 static sense_reason_t 1087 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1088 unsigned int size) 1089 { 1090 u32 mtl; 1091 1092 if (!cmd->se_tfo->max_data_sg_nents) 1093 return TCM_NO_SENSE; 1094 /* 1095 * Check if fabric enforced maximum SGL entries per I/O descriptor 1096 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1097 * residual_count and reduce original cmd->data_length to maximum 1098 * length based on single PAGE_SIZE entry scatter-lists. 1099 */ 1100 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1101 if (cmd->data_length > mtl) { 1102 /* 1103 * If an existing CDB overflow is present, calculate new residual 1104 * based on CDB size minus fabric maximum transfer length. 1105 * 1106 * If an existing CDB underflow is present, calculate new residual 1107 * based on original cmd->data_length minus fabric maximum transfer 1108 * length. 1109 * 1110 * Otherwise, set the underflow residual based on cmd->data_length 1111 * minus fabric maximum transfer length. 1112 */ 1113 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1114 cmd->residual_count = (size - mtl); 1115 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1116 u32 orig_dl = size + cmd->residual_count; 1117 cmd->residual_count = (orig_dl - mtl); 1118 } else { 1119 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1120 cmd->residual_count = (cmd->data_length - mtl); 1121 } 1122 cmd->data_length = mtl; 1123 /* 1124 * Reset sbc_check_prot() calculated protection payload 1125 * length based upon the new smaller MTL. 1126 */ 1127 if (cmd->prot_length) { 1128 u32 sectors = (mtl / dev->dev_attrib.block_size); 1129 cmd->prot_length = dev->prot_length * sectors; 1130 } 1131 } 1132 return TCM_NO_SENSE; 1133 } 1134 1135 sense_reason_t 1136 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1137 { 1138 struct se_device *dev = cmd->se_dev; 1139 1140 if (cmd->unknown_data_length) { 1141 cmd->data_length = size; 1142 } else if (size != cmd->data_length) { 1143 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1144 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1145 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1146 cmd->data_length, size, cmd->t_task_cdb[0]); 1147 1148 if (cmd->data_direction == DMA_TO_DEVICE && 1149 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1150 pr_err("Rejecting underflow/overflow WRITE data\n"); 1151 return TCM_INVALID_CDB_FIELD; 1152 } 1153 /* 1154 * Reject READ_* or WRITE_* with overflow/underflow for 1155 * type SCF_SCSI_DATA_CDB. 1156 */ 1157 if (dev->dev_attrib.block_size != 512) { 1158 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1159 " CDB on non 512-byte sector setup subsystem" 1160 " plugin: %s\n", dev->transport->name); 1161 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1162 return TCM_INVALID_CDB_FIELD; 1163 } 1164 /* 1165 * For the overflow case keep the existing fabric provided 1166 * ->data_length. Otherwise for the underflow case, reset 1167 * ->data_length to the smaller SCSI expected data transfer 1168 * length. 1169 */ 1170 if (size > cmd->data_length) { 1171 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1172 cmd->residual_count = (size - cmd->data_length); 1173 } else { 1174 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1175 cmd->residual_count = (cmd->data_length - size); 1176 cmd->data_length = size; 1177 } 1178 } 1179 1180 return target_check_max_data_sg_nents(cmd, dev, size); 1181 1182 } 1183 1184 /* 1185 * Used by fabric modules containing a local struct se_cmd within their 1186 * fabric dependent per I/O descriptor. 1187 * 1188 * Preserves the value of @cmd->tag. 1189 */ 1190 void transport_init_se_cmd( 1191 struct se_cmd *cmd, 1192 const struct target_core_fabric_ops *tfo, 1193 struct se_session *se_sess, 1194 u32 data_length, 1195 int data_direction, 1196 int task_attr, 1197 unsigned char *sense_buffer) 1198 { 1199 INIT_LIST_HEAD(&cmd->se_delayed_node); 1200 INIT_LIST_HEAD(&cmd->se_qf_node); 1201 INIT_LIST_HEAD(&cmd->se_cmd_list); 1202 INIT_LIST_HEAD(&cmd->state_list); 1203 init_completion(&cmd->t_transport_stop_comp); 1204 init_completion(&cmd->cmd_wait_comp); 1205 init_completion(&cmd->task_stop_comp); 1206 spin_lock_init(&cmd->t_state_lock); 1207 kref_init(&cmd->cmd_kref); 1208 cmd->transport_state = CMD_T_DEV_ACTIVE; 1209 1210 cmd->se_tfo = tfo; 1211 cmd->se_sess = se_sess; 1212 cmd->data_length = data_length; 1213 cmd->data_direction = data_direction; 1214 cmd->sam_task_attr = task_attr; 1215 cmd->sense_buffer = sense_buffer; 1216 1217 cmd->state_active = false; 1218 } 1219 EXPORT_SYMBOL(transport_init_se_cmd); 1220 1221 static sense_reason_t 1222 transport_check_alloc_task_attr(struct se_cmd *cmd) 1223 { 1224 struct se_device *dev = cmd->se_dev; 1225 1226 /* 1227 * Check if SAM Task Attribute emulation is enabled for this 1228 * struct se_device storage object 1229 */ 1230 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1231 return 0; 1232 1233 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1234 pr_debug("SAM Task Attribute ACA" 1235 " emulation is not supported\n"); 1236 return TCM_INVALID_CDB_FIELD; 1237 } 1238 1239 return 0; 1240 } 1241 1242 sense_reason_t 1243 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1244 { 1245 struct se_device *dev = cmd->se_dev; 1246 sense_reason_t ret; 1247 1248 /* 1249 * Ensure that the received CDB is less than the max (252 + 8) bytes 1250 * for VARIABLE_LENGTH_CMD 1251 */ 1252 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1253 pr_err("Received SCSI CDB with command_size: %d that" 1254 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1255 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1256 return TCM_INVALID_CDB_FIELD; 1257 } 1258 /* 1259 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1260 * allocate the additional extended CDB buffer now.. Otherwise 1261 * setup the pointer from __t_task_cdb to t_task_cdb. 1262 */ 1263 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1264 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1265 GFP_KERNEL); 1266 if (!cmd->t_task_cdb) { 1267 pr_err("Unable to allocate cmd->t_task_cdb" 1268 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1269 scsi_command_size(cdb), 1270 (unsigned long)sizeof(cmd->__t_task_cdb)); 1271 return TCM_OUT_OF_RESOURCES; 1272 } 1273 } else 1274 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1275 /* 1276 * Copy the original CDB into cmd-> 1277 */ 1278 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1279 1280 trace_target_sequencer_start(cmd); 1281 1282 /* 1283 * Check for an existing UNIT ATTENTION condition 1284 */ 1285 ret = target_scsi3_ua_check(cmd); 1286 if (ret) 1287 return ret; 1288 1289 ret = target_alua_state_check(cmd); 1290 if (ret) 1291 return ret; 1292 1293 ret = target_check_reservation(cmd); 1294 if (ret) { 1295 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1296 return ret; 1297 } 1298 1299 ret = dev->transport->parse_cdb(cmd); 1300 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1301 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1302 cmd->se_tfo->get_fabric_name(), 1303 cmd->se_sess->se_node_acl->initiatorname, 1304 cmd->t_task_cdb[0]); 1305 if (ret) 1306 return ret; 1307 1308 ret = transport_check_alloc_task_attr(cmd); 1309 if (ret) 1310 return ret; 1311 1312 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1313 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1314 return 0; 1315 } 1316 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1317 1318 /* 1319 * Used by fabric module frontends to queue tasks directly. 1320 * May only be used from process context. 1321 */ 1322 int transport_handle_cdb_direct( 1323 struct se_cmd *cmd) 1324 { 1325 sense_reason_t ret; 1326 1327 if (!cmd->se_lun) { 1328 dump_stack(); 1329 pr_err("cmd->se_lun is NULL\n"); 1330 return -EINVAL; 1331 } 1332 if (in_interrupt()) { 1333 dump_stack(); 1334 pr_err("transport_generic_handle_cdb cannot be called" 1335 " from interrupt context\n"); 1336 return -EINVAL; 1337 } 1338 /* 1339 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1340 * outstanding descriptors are handled correctly during shutdown via 1341 * transport_wait_for_tasks() 1342 * 1343 * Also, we don't take cmd->t_state_lock here as we only expect 1344 * this to be called for initial descriptor submission. 1345 */ 1346 cmd->t_state = TRANSPORT_NEW_CMD; 1347 cmd->transport_state |= CMD_T_ACTIVE; 1348 1349 /* 1350 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1351 * so follow TRANSPORT_NEW_CMD processing thread context usage 1352 * and call transport_generic_request_failure() if necessary.. 1353 */ 1354 ret = transport_generic_new_cmd(cmd); 1355 if (ret) 1356 transport_generic_request_failure(cmd, ret); 1357 return 0; 1358 } 1359 EXPORT_SYMBOL(transport_handle_cdb_direct); 1360 1361 sense_reason_t 1362 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1363 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1364 { 1365 if (!sgl || !sgl_count) 1366 return 0; 1367 1368 /* 1369 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1370 * scatterlists already have been set to follow what the fabric 1371 * passes for the original expected data transfer length. 1372 */ 1373 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1374 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1375 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1376 return TCM_INVALID_CDB_FIELD; 1377 } 1378 1379 cmd->t_data_sg = sgl; 1380 cmd->t_data_nents = sgl_count; 1381 cmd->t_bidi_data_sg = sgl_bidi; 1382 cmd->t_bidi_data_nents = sgl_bidi_count; 1383 1384 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1385 return 0; 1386 } 1387 1388 /* 1389 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1390 * se_cmd + use pre-allocated SGL memory. 1391 * 1392 * @se_cmd: command descriptor to submit 1393 * @se_sess: associated se_sess for endpoint 1394 * @cdb: pointer to SCSI CDB 1395 * @sense: pointer to SCSI sense buffer 1396 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1397 * @data_length: fabric expected data transfer length 1398 * @task_addr: SAM task attribute 1399 * @data_dir: DMA data direction 1400 * @flags: flags for command submission from target_sc_flags_tables 1401 * @sgl: struct scatterlist memory for unidirectional mapping 1402 * @sgl_count: scatterlist count for unidirectional mapping 1403 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1404 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1405 * @sgl_prot: struct scatterlist memory protection information 1406 * @sgl_prot_count: scatterlist count for protection information 1407 * 1408 * Task tags are supported if the caller has set @se_cmd->tag. 1409 * 1410 * Returns non zero to signal active I/O shutdown failure. All other 1411 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1412 * but still return zero here. 1413 * 1414 * This may only be called from process context, and also currently 1415 * assumes internal allocation of fabric payload buffer by target-core. 1416 */ 1417 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1418 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1419 u32 data_length, int task_attr, int data_dir, int flags, 1420 struct scatterlist *sgl, u32 sgl_count, 1421 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1422 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1423 { 1424 struct se_portal_group *se_tpg; 1425 sense_reason_t rc; 1426 int ret; 1427 1428 se_tpg = se_sess->se_tpg; 1429 BUG_ON(!se_tpg); 1430 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1431 BUG_ON(in_interrupt()); 1432 /* 1433 * Initialize se_cmd for target operation. From this point 1434 * exceptions are handled by sending exception status via 1435 * target_core_fabric_ops->queue_status() callback 1436 */ 1437 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1438 data_length, data_dir, task_attr, sense); 1439 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1440 se_cmd->unknown_data_length = 1; 1441 /* 1442 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1443 * se_sess->sess_cmd_list. A second kref_get here is necessary 1444 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1445 * kref_put() to happen during fabric packet acknowledgement. 1446 */ 1447 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1448 if (ret) 1449 return ret; 1450 /* 1451 * Signal bidirectional data payloads to target-core 1452 */ 1453 if (flags & TARGET_SCF_BIDI_OP) 1454 se_cmd->se_cmd_flags |= SCF_BIDI; 1455 /* 1456 * Locate se_lun pointer and attach it to struct se_cmd 1457 */ 1458 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1459 if (rc) { 1460 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1461 target_put_sess_cmd(se_cmd); 1462 return 0; 1463 } 1464 1465 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1466 if (rc != 0) { 1467 transport_generic_request_failure(se_cmd, rc); 1468 return 0; 1469 } 1470 1471 /* 1472 * Save pointers for SGLs containing protection information, 1473 * if present. 1474 */ 1475 if (sgl_prot_count) { 1476 se_cmd->t_prot_sg = sgl_prot; 1477 se_cmd->t_prot_nents = sgl_prot_count; 1478 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1479 } 1480 1481 /* 1482 * When a non zero sgl_count has been passed perform SGL passthrough 1483 * mapping for pre-allocated fabric memory instead of having target 1484 * core perform an internal SGL allocation.. 1485 */ 1486 if (sgl_count != 0) { 1487 BUG_ON(!sgl); 1488 1489 /* 1490 * A work-around for tcm_loop as some userspace code via 1491 * scsi-generic do not memset their associated read buffers, 1492 * so go ahead and do that here for type non-data CDBs. Also 1493 * note that this is currently guaranteed to be a single SGL 1494 * for this case by target core in target_setup_cmd_from_cdb() 1495 * -> transport_generic_cmd_sequencer(). 1496 */ 1497 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1498 se_cmd->data_direction == DMA_FROM_DEVICE) { 1499 unsigned char *buf = NULL; 1500 1501 if (sgl) 1502 buf = kmap(sg_page(sgl)) + sgl->offset; 1503 1504 if (buf) { 1505 memset(buf, 0, sgl->length); 1506 kunmap(sg_page(sgl)); 1507 } 1508 } 1509 1510 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1511 sgl_bidi, sgl_bidi_count); 1512 if (rc != 0) { 1513 transport_generic_request_failure(se_cmd, rc); 1514 return 0; 1515 } 1516 } 1517 1518 /* 1519 * Check if we need to delay processing because of ALUA 1520 * Active/NonOptimized primary access state.. 1521 */ 1522 core_alua_check_nonop_delay(se_cmd); 1523 1524 transport_handle_cdb_direct(se_cmd); 1525 return 0; 1526 } 1527 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1528 1529 /* 1530 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1531 * 1532 * @se_cmd: command descriptor to submit 1533 * @se_sess: associated se_sess for endpoint 1534 * @cdb: pointer to SCSI CDB 1535 * @sense: pointer to SCSI sense buffer 1536 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1537 * @data_length: fabric expected data transfer length 1538 * @task_addr: SAM task attribute 1539 * @data_dir: DMA data direction 1540 * @flags: flags for command submission from target_sc_flags_tables 1541 * 1542 * Task tags are supported if the caller has set @se_cmd->tag. 1543 * 1544 * Returns non zero to signal active I/O shutdown failure. All other 1545 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1546 * but still return zero here. 1547 * 1548 * This may only be called from process context, and also currently 1549 * assumes internal allocation of fabric payload buffer by target-core. 1550 * 1551 * It also assumes interal target core SGL memory allocation. 1552 */ 1553 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1554 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1555 u32 data_length, int task_attr, int data_dir, int flags) 1556 { 1557 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1558 unpacked_lun, data_length, task_attr, data_dir, 1559 flags, NULL, 0, NULL, 0, NULL, 0); 1560 } 1561 EXPORT_SYMBOL(target_submit_cmd); 1562 1563 static void target_complete_tmr_failure(struct work_struct *work) 1564 { 1565 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1566 1567 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1568 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1569 1570 transport_cmd_check_stop_to_fabric(se_cmd); 1571 } 1572 1573 /** 1574 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1575 * for TMR CDBs 1576 * 1577 * @se_cmd: command descriptor to submit 1578 * @se_sess: associated se_sess for endpoint 1579 * @sense: pointer to SCSI sense buffer 1580 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1581 * @fabric_context: fabric context for TMR req 1582 * @tm_type: Type of TM request 1583 * @gfp: gfp type for caller 1584 * @tag: referenced task tag for TMR_ABORT_TASK 1585 * @flags: submit cmd flags 1586 * 1587 * Callable from all contexts. 1588 **/ 1589 1590 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1591 unsigned char *sense, u64 unpacked_lun, 1592 void *fabric_tmr_ptr, unsigned char tm_type, 1593 gfp_t gfp, u64 tag, int flags) 1594 { 1595 struct se_portal_group *se_tpg; 1596 int ret; 1597 1598 se_tpg = se_sess->se_tpg; 1599 BUG_ON(!se_tpg); 1600 1601 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1602 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1603 /* 1604 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1605 * allocation failure. 1606 */ 1607 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1608 if (ret < 0) 1609 return -ENOMEM; 1610 1611 if (tm_type == TMR_ABORT_TASK) 1612 se_cmd->se_tmr_req->ref_task_tag = tag; 1613 1614 /* See target_submit_cmd for commentary */ 1615 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1616 if (ret) { 1617 core_tmr_release_req(se_cmd->se_tmr_req); 1618 return ret; 1619 } 1620 1621 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1622 if (ret) { 1623 /* 1624 * For callback during failure handling, push this work off 1625 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1626 */ 1627 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1628 schedule_work(&se_cmd->work); 1629 return 0; 1630 } 1631 transport_generic_handle_tmr(se_cmd); 1632 return 0; 1633 } 1634 EXPORT_SYMBOL(target_submit_tmr); 1635 1636 /* 1637 * If the cmd is active, request it to be stopped and sleep until it 1638 * has completed. 1639 */ 1640 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1641 __releases(&cmd->t_state_lock) 1642 __acquires(&cmd->t_state_lock) 1643 { 1644 bool was_active = false; 1645 1646 if (cmd->transport_state & CMD_T_BUSY) { 1647 cmd->transport_state |= CMD_T_REQUEST_STOP; 1648 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1649 1650 pr_debug("cmd %p waiting to complete\n", cmd); 1651 wait_for_completion(&cmd->task_stop_comp); 1652 pr_debug("cmd %p stopped successfully\n", cmd); 1653 1654 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1655 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1656 cmd->transport_state &= ~CMD_T_BUSY; 1657 was_active = true; 1658 } 1659 1660 return was_active; 1661 } 1662 1663 /* 1664 * Handle SAM-esque emulation for generic transport request failures. 1665 */ 1666 void transport_generic_request_failure(struct se_cmd *cmd, 1667 sense_reason_t sense_reason) 1668 { 1669 int ret = 0, post_ret = 0; 1670 1671 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1672 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1673 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1674 cmd->se_tfo->get_cmd_state(cmd), 1675 cmd->t_state, sense_reason); 1676 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1677 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1678 (cmd->transport_state & CMD_T_STOP) != 0, 1679 (cmd->transport_state & CMD_T_SENT) != 0); 1680 1681 /* 1682 * For SAM Task Attribute emulation for failed struct se_cmd 1683 */ 1684 transport_complete_task_attr(cmd); 1685 /* 1686 * Handle special case for COMPARE_AND_WRITE failure, where the 1687 * callback is expected to drop the per device ->caw_sem. 1688 */ 1689 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1690 cmd->transport_complete_callback) 1691 cmd->transport_complete_callback(cmd, false, &post_ret); 1692 1693 switch (sense_reason) { 1694 case TCM_NON_EXISTENT_LUN: 1695 case TCM_UNSUPPORTED_SCSI_OPCODE: 1696 case TCM_INVALID_CDB_FIELD: 1697 case TCM_INVALID_PARAMETER_LIST: 1698 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1699 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1700 case TCM_UNKNOWN_MODE_PAGE: 1701 case TCM_WRITE_PROTECTED: 1702 case TCM_ADDRESS_OUT_OF_RANGE: 1703 case TCM_CHECK_CONDITION_ABORT_CMD: 1704 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1705 case TCM_CHECK_CONDITION_NOT_READY: 1706 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1707 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1708 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1709 break; 1710 case TCM_OUT_OF_RESOURCES: 1711 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1712 break; 1713 case TCM_RESERVATION_CONFLICT: 1714 /* 1715 * No SENSE Data payload for this case, set SCSI Status 1716 * and queue the response to $FABRIC_MOD. 1717 * 1718 * Uses linux/include/scsi/scsi.h SAM status codes defs 1719 */ 1720 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1721 /* 1722 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1723 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1724 * CONFLICT STATUS. 1725 * 1726 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1727 */ 1728 if (cmd->se_sess && 1729 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1730 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1731 cmd->orig_fe_lun, 0x2C, 1732 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1733 } 1734 trace_target_cmd_complete(cmd); 1735 ret = cmd->se_tfo->queue_status(cmd); 1736 if (ret == -EAGAIN || ret == -ENOMEM) 1737 goto queue_full; 1738 goto check_stop; 1739 default: 1740 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1741 cmd->t_task_cdb[0], sense_reason); 1742 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1743 break; 1744 } 1745 1746 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1747 if (ret == -EAGAIN || ret == -ENOMEM) 1748 goto queue_full; 1749 1750 check_stop: 1751 transport_lun_remove_cmd(cmd); 1752 transport_cmd_check_stop_to_fabric(cmd); 1753 return; 1754 1755 queue_full: 1756 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1757 transport_handle_queue_full(cmd, cmd->se_dev); 1758 } 1759 EXPORT_SYMBOL(transport_generic_request_failure); 1760 1761 void __target_execute_cmd(struct se_cmd *cmd) 1762 { 1763 sense_reason_t ret; 1764 1765 if (cmd->execute_cmd) { 1766 ret = cmd->execute_cmd(cmd); 1767 if (ret) { 1768 spin_lock_irq(&cmd->t_state_lock); 1769 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1770 spin_unlock_irq(&cmd->t_state_lock); 1771 1772 transport_generic_request_failure(cmd, ret); 1773 } 1774 } 1775 } 1776 1777 static int target_write_prot_action(struct se_cmd *cmd) 1778 { 1779 u32 sectors; 1780 /* 1781 * Perform WRITE_INSERT of PI using software emulation when backend 1782 * device has PI enabled, if the transport has not already generated 1783 * PI using hardware WRITE_INSERT offload. 1784 */ 1785 switch (cmd->prot_op) { 1786 case TARGET_PROT_DOUT_INSERT: 1787 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1788 sbc_dif_generate(cmd); 1789 break; 1790 case TARGET_PROT_DOUT_STRIP: 1791 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1792 break; 1793 1794 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1795 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1796 sectors, 0, cmd->t_prot_sg, 0); 1797 if (unlikely(cmd->pi_err)) { 1798 spin_lock_irq(&cmd->t_state_lock); 1799 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1800 spin_unlock_irq(&cmd->t_state_lock); 1801 transport_generic_request_failure(cmd, cmd->pi_err); 1802 return -1; 1803 } 1804 break; 1805 default: 1806 break; 1807 } 1808 1809 return 0; 1810 } 1811 1812 static bool target_handle_task_attr(struct se_cmd *cmd) 1813 { 1814 struct se_device *dev = cmd->se_dev; 1815 1816 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1817 return false; 1818 1819 /* 1820 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1821 * to allow the passed struct se_cmd list of tasks to the front of the list. 1822 */ 1823 switch (cmd->sam_task_attr) { 1824 case TCM_HEAD_TAG: 1825 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1826 cmd->t_task_cdb[0]); 1827 return false; 1828 case TCM_ORDERED_TAG: 1829 atomic_inc_mb(&dev->dev_ordered_sync); 1830 1831 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1832 cmd->t_task_cdb[0]); 1833 1834 /* 1835 * Execute an ORDERED command if no other older commands 1836 * exist that need to be completed first. 1837 */ 1838 if (!atomic_read(&dev->simple_cmds)) 1839 return false; 1840 break; 1841 default: 1842 /* 1843 * For SIMPLE and UNTAGGED Task Attribute commands 1844 */ 1845 atomic_inc_mb(&dev->simple_cmds); 1846 break; 1847 } 1848 1849 if (atomic_read(&dev->dev_ordered_sync) == 0) 1850 return false; 1851 1852 spin_lock(&dev->delayed_cmd_lock); 1853 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1854 spin_unlock(&dev->delayed_cmd_lock); 1855 1856 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 1857 cmd->t_task_cdb[0], cmd->sam_task_attr); 1858 return true; 1859 } 1860 1861 void target_execute_cmd(struct se_cmd *cmd) 1862 { 1863 /* 1864 * If the received CDB has aleady been aborted stop processing it here. 1865 */ 1866 if (transport_check_aborted_status(cmd, 1)) 1867 return; 1868 1869 /* 1870 * Determine if frontend context caller is requesting the stopping of 1871 * this command for frontend exceptions. 1872 */ 1873 spin_lock_irq(&cmd->t_state_lock); 1874 if (cmd->transport_state & CMD_T_STOP) { 1875 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1876 __func__, __LINE__, cmd->tag); 1877 1878 spin_unlock_irq(&cmd->t_state_lock); 1879 complete_all(&cmd->t_transport_stop_comp); 1880 return; 1881 } 1882 1883 cmd->t_state = TRANSPORT_PROCESSING; 1884 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1885 spin_unlock_irq(&cmd->t_state_lock); 1886 1887 if (target_write_prot_action(cmd)) 1888 return; 1889 1890 if (target_handle_task_attr(cmd)) { 1891 spin_lock_irq(&cmd->t_state_lock); 1892 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1893 spin_unlock_irq(&cmd->t_state_lock); 1894 return; 1895 } 1896 1897 __target_execute_cmd(cmd); 1898 } 1899 EXPORT_SYMBOL(target_execute_cmd); 1900 1901 /* 1902 * Process all commands up to the last received ORDERED task attribute which 1903 * requires another blocking boundary 1904 */ 1905 static void target_restart_delayed_cmds(struct se_device *dev) 1906 { 1907 for (;;) { 1908 struct se_cmd *cmd; 1909 1910 spin_lock(&dev->delayed_cmd_lock); 1911 if (list_empty(&dev->delayed_cmd_list)) { 1912 spin_unlock(&dev->delayed_cmd_lock); 1913 break; 1914 } 1915 1916 cmd = list_entry(dev->delayed_cmd_list.next, 1917 struct se_cmd, se_delayed_node); 1918 list_del(&cmd->se_delayed_node); 1919 spin_unlock(&dev->delayed_cmd_lock); 1920 1921 __target_execute_cmd(cmd); 1922 1923 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1924 break; 1925 } 1926 } 1927 1928 /* 1929 * Called from I/O completion to determine which dormant/delayed 1930 * and ordered cmds need to have their tasks added to the execution queue. 1931 */ 1932 static void transport_complete_task_attr(struct se_cmd *cmd) 1933 { 1934 struct se_device *dev = cmd->se_dev; 1935 1936 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1937 return; 1938 1939 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1940 atomic_dec_mb(&dev->simple_cmds); 1941 dev->dev_cur_ordered_id++; 1942 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", 1943 dev->dev_cur_ordered_id); 1944 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1945 dev->dev_cur_ordered_id++; 1946 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 1947 dev->dev_cur_ordered_id); 1948 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1949 atomic_dec_mb(&dev->dev_ordered_sync); 1950 1951 dev->dev_cur_ordered_id++; 1952 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 1953 dev->dev_cur_ordered_id); 1954 } 1955 1956 target_restart_delayed_cmds(dev); 1957 } 1958 1959 static void transport_complete_qf(struct se_cmd *cmd) 1960 { 1961 int ret = 0; 1962 1963 transport_complete_task_attr(cmd); 1964 1965 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1966 trace_target_cmd_complete(cmd); 1967 ret = cmd->se_tfo->queue_status(cmd); 1968 goto out; 1969 } 1970 1971 switch (cmd->data_direction) { 1972 case DMA_FROM_DEVICE: 1973 trace_target_cmd_complete(cmd); 1974 ret = cmd->se_tfo->queue_data_in(cmd); 1975 break; 1976 case DMA_TO_DEVICE: 1977 if (cmd->se_cmd_flags & SCF_BIDI) { 1978 ret = cmd->se_tfo->queue_data_in(cmd); 1979 break; 1980 } 1981 /* Fall through for DMA_TO_DEVICE */ 1982 case DMA_NONE: 1983 trace_target_cmd_complete(cmd); 1984 ret = cmd->se_tfo->queue_status(cmd); 1985 break; 1986 default: 1987 break; 1988 } 1989 1990 out: 1991 if (ret < 0) { 1992 transport_handle_queue_full(cmd, cmd->se_dev); 1993 return; 1994 } 1995 transport_lun_remove_cmd(cmd); 1996 transport_cmd_check_stop_to_fabric(cmd); 1997 } 1998 1999 static void transport_handle_queue_full( 2000 struct se_cmd *cmd, 2001 struct se_device *dev) 2002 { 2003 spin_lock_irq(&dev->qf_cmd_lock); 2004 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2005 atomic_inc_mb(&dev->dev_qf_count); 2006 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2007 2008 schedule_work(&cmd->se_dev->qf_work_queue); 2009 } 2010 2011 static bool target_read_prot_action(struct se_cmd *cmd) 2012 { 2013 switch (cmd->prot_op) { 2014 case TARGET_PROT_DIN_STRIP: 2015 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2016 u32 sectors = cmd->data_length >> 2017 ilog2(cmd->se_dev->dev_attrib.block_size); 2018 2019 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2020 sectors, 0, cmd->t_prot_sg, 2021 0); 2022 if (cmd->pi_err) 2023 return true; 2024 } 2025 break; 2026 case TARGET_PROT_DIN_INSERT: 2027 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2028 break; 2029 2030 sbc_dif_generate(cmd); 2031 break; 2032 default: 2033 break; 2034 } 2035 2036 return false; 2037 } 2038 2039 static void target_complete_ok_work(struct work_struct *work) 2040 { 2041 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2042 int ret; 2043 2044 /* 2045 * Check if we need to move delayed/dormant tasks from cmds on the 2046 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2047 * Attribute. 2048 */ 2049 transport_complete_task_attr(cmd); 2050 2051 /* 2052 * Check to schedule QUEUE_FULL work, or execute an existing 2053 * cmd->transport_qf_callback() 2054 */ 2055 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2056 schedule_work(&cmd->se_dev->qf_work_queue); 2057 2058 /* 2059 * Check if we need to send a sense buffer from 2060 * the struct se_cmd in question. 2061 */ 2062 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2063 WARN_ON(!cmd->scsi_status); 2064 ret = transport_send_check_condition_and_sense( 2065 cmd, 0, 1); 2066 if (ret == -EAGAIN || ret == -ENOMEM) 2067 goto queue_full; 2068 2069 transport_lun_remove_cmd(cmd); 2070 transport_cmd_check_stop_to_fabric(cmd); 2071 return; 2072 } 2073 /* 2074 * Check for a callback, used by amongst other things 2075 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2076 */ 2077 if (cmd->transport_complete_callback) { 2078 sense_reason_t rc; 2079 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2080 bool zero_dl = !(cmd->data_length); 2081 int post_ret = 0; 2082 2083 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2084 if (!rc && !post_ret) { 2085 if (caw && zero_dl) 2086 goto queue_rsp; 2087 2088 return; 2089 } else if (rc) { 2090 ret = transport_send_check_condition_and_sense(cmd, 2091 rc, 0); 2092 if (ret == -EAGAIN || ret == -ENOMEM) 2093 goto queue_full; 2094 2095 transport_lun_remove_cmd(cmd); 2096 transport_cmd_check_stop_to_fabric(cmd); 2097 return; 2098 } 2099 } 2100 2101 queue_rsp: 2102 switch (cmd->data_direction) { 2103 case DMA_FROM_DEVICE: 2104 atomic_long_add(cmd->data_length, 2105 &cmd->se_lun->lun_stats.tx_data_octets); 2106 /* 2107 * Perform READ_STRIP of PI using software emulation when 2108 * backend had PI enabled, if the transport will not be 2109 * performing hardware READ_STRIP offload. 2110 */ 2111 if (target_read_prot_action(cmd)) { 2112 ret = transport_send_check_condition_and_sense(cmd, 2113 cmd->pi_err, 0); 2114 if (ret == -EAGAIN || ret == -ENOMEM) 2115 goto queue_full; 2116 2117 transport_lun_remove_cmd(cmd); 2118 transport_cmd_check_stop_to_fabric(cmd); 2119 return; 2120 } 2121 2122 trace_target_cmd_complete(cmd); 2123 ret = cmd->se_tfo->queue_data_in(cmd); 2124 if (ret == -EAGAIN || ret == -ENOMEM) 2125 goto queue_full; 2126 break; 2127 case DMA_TO_DEVICE: 2128 atomic_long_add(cmd->data_length, 2129 &cmd->se_lun->lun_stats.rx_data_octets); 2130 /* 2131 * Check if we need to send READ payload for BIDI-COMMAND 2132 */ 2133 if (cmd->se_cmd_flags & SCF_BIDI) { 2134 atomic_long_add(cmd->data_length, 2135 &cmd->se_lun->lun_stats.tx_data_octets); 2136 ret = cmd->se_tfo->queue_data_in(cmd); 2137 if (ret == -EAGAIN || ret == -ENOMEM) 2138 goto queue_full; 2139 break; 2140 } 2141 /* Fall through for DMA_TO_DEVICE */ 2142 case DMA_NONE: 2143 trace_target_cmd_complete(cmd); 2144 ret = cmd->se_tfo->queue_status(cmd); 2145 if (ret == -EAGAIN || ret == -ENOMEM) 2146 goto queue_full; 2147 break; 2148 default: 2149 break; 2150 } 2151 2152 transport_lun_remove_cmd(cmd); 2153 transport_cmd_check_stop_to_fabric(cmd); 2154 return; 2155 2156 queue_full: 2157 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2158 " data_direction: %d\n", cmd, cmd->data_direction); 2159 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2160 transport_handle_queue_full(cmd, cmd->se_dev); 2161 } 2162 2163 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 2164 { 2165 struct scatterlist *sg; 2166 int count; 2167 2168 for_each_sg(sgl, sg, nents, count) 2169 __free_page(sg_page(sg)); 2170 2171 kfree(sgl); 2172 } 2173 2174 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2175 { 2176 /* 2177 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2178 * emulation, and free + reset pointers if necessary.. 2179 */ 2180 if (!cmd->t_data_sg_orig) 2181 return; 2182 2183 kfree(cmd->t_data_sg); 2184 cmd->t_data_sg = cmd->t_data_sg_orig; 2185 cmd->t_data_sg_orig = NULL; 2186 cmd->t_data_nents = cmd->t_data_nents_orig; 2187 cmd->t_data_nents_orig = 0; 2188 } 2189 2190 static inline void transport_free_pages(struct se_cmd *cmd) 2191 { 2192 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2193 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2194 cmd->t_prot_sg = NULL; 2195 cmd->t_prot_nents = 0; 2196 } 2197 2198 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2199 /* 2200 * Release special case READ buffer payload required for 2201 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2202 */ 2203 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2204 transport_free_sgl(cmd->t_bidi_data_sg, 2205 cmd->t_bidi_data_nents); 2206 cmd->t_bidi_data_sg = NULL; 2207 cmd->t_bidi_data_nents = 0; 2208 } 2209 transport_reset_sgl_orig(cmd); 2210 return; 2211 } 2212 transport_reset_sgl_orig(cmd); 2213 2214 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2215 cmd->t_data_sg = NULL; 2216 cmd->t_data_nents = 0; 2217 2218 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2219 cmd->t_bidi_data_sg = NULL; 2220 cmd->t_bidi_data_nents = 0; 2221 } 2222 2223 /** 2224 * transport_put_cmd - release a reference to a command 2225 * @cmd: command to release 2226 * 2227 * This routine releases our reference to the command and frees it if possible. 2228 */ 2229 static int transport_put_cmd(struct se_cmd *cmd) 2230 { 2231 BUG_ON(!cmd->se_tfo); 2232 /* 2233 * If this cmd has been setup with target_get_sess_cmd(), drop 2234 * the kref and call ->release_cmd() in kref callback. 2235 */ 2236 return target_put_sess_cmd(cmd); 2237 } 2238 2239 void *transport_kmap_data_sg(struct se_cmd *cmd) 2240 { 2241 struct scatterlist *sg = cmd->t_data_sg; 2242 struct page **pages; 2243 int i; 2244 2245 /* 2246 * We need to take into account a possible offset here for fabrics like 2247 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2248 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2249 */ 2250 if (!cmd->t_data_nents) 2251 return NULL; 2252 2253 BUG_ON(!sg); 2254 if (cmd->t_data_nents == 1) 2255 return kmap(sg_page(sg)) + sg->offset; 2256 2257 /* >1 page. use vmap */ 2258 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2259 if (!pages) 2260 return NULL; 2261 2262 /* convert sg[] to pages[] */ 2263 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2264 pages[i] = sg_page(sg); 2265 } 2266 2267 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2268 kfree(pages); 2269 if (!cmd->t_data_vmap) 2270 return NULL; 2271 2272 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2273 } 2274 EXPORT_SYMBOL(transport_kmap_data_sg); 2275 2276 void transport_kunmap_data_sg(struct se_cmd *cmd) 2277 { 2278 if (!cmd->t_data_nents) { 2279 return; 2280 } else if (cmd->t_data_nents == 1) { 2281 kunmap(sg_page(cmd->t_data_sg)); 2282 return; 2283 } 2284 2285 vunmap(cmd->t_data_vmap); 2286 cmd->t_data_vmap = NULL; 2287 } 2288 EXPORT_SYMBOL(transport_kunmap_data_sg); 2289 2290 int 2291 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2292 bool zero_page) 2293 { 2294 struct scatterlist *sg; 2295 struct page *page; 2296 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2297 unsigned int nent; 2298 int i = 0; 2299 2300 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2301 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2302 if (!sg) 2303 return -ENOMEM; 2304 2305 sg_init_table(sg, nent); 2306 2307 while (length) { 2308 u32 page_len = min_t(u32, length, PAGE_SIZE); 2309 page = alloc_page(GFP_KERNEL | zero_flag); 2310 if (!page) 2311 goto out; 2312 2313 sg_set_page(&sg[i], page, page_len, 0); 2314 length -= page_len; 2315 i++; 2316 } 2317 *sgl = sg; 2318 *nents = nent; 2319 return 0; 2320 2321 out: 2322 while (i > 0) { 2323 i--; 2324 __free_page(sg_page(&sg[i])); 2325 } 2326 kfree(sg); 2327 return -ENOMEM; 2328 } 2329 2330 /* 2331 * Allocate any required resources to execute the command. For writes we 2332 * might not have the payload yet, so notify the fabric via a call to 2333 * ->write_pending instead. Otherwise place it on the execution queue. 2334 */ 2335 sense_reason_t 2336 transport_generic_new_cmd(struct se_cmd *cmd) 2337 { 2338 int ret = 0; 2339 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2340 2341 if (cmd->prot_op != TARGET_PROT_NORMAL && 2342 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2343 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2344 cmd->prot_length, true); 2345 if (ret < 0) 2346 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2347 } 2348 2349 /* 2350 * Determine is the TCM fabric module has already allocated physical 2351 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2352 * beforehand. 2353 */ 2354 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2355 cmd->data_length) { 2356 2357 if ((cmd->se_cmd_flags & SCF_BIDI) || 2358 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2359 u32 bidi_length; 2360 2361 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2362 bidi_length = cmd->t_task_nolb * 2363 cmd->se_dev->dev_attrib.block_size; 2364 else 2365 bidi_length = cmd->data_length; 2366 2367 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2368 &cmd->t_bidi_data_nents, 2369 bidi_length, zero_flag); 2370 if (ret < 0) 2371 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2372 } 2373 2374 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2375 cmd->data_length, zero_flag); 2376 if (ret < 0) 2377 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2378 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2379 cmd->data_length) { 2380 /* 2381 * Special case for COMPARE_AND_WRITE with fabrics 2382 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2383 */ 2384 u32 caw_length = cmd->t_task_nolb * 2385 cmd->se_dev->dev_attrib.block_size; 2386 2387 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2388 &cmd->t_bidi_data_nents, 2389 caw_length, zero_flag); 2390 if (ret < 0) 2391 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2392 } 2393 /* 2394 * If this command is not a write we can execute it right here, 2395 * for write buffers we need to notify the fabric driver first 2396 * and let it call back once the write buffers are ready. 2397 */ 2398 target_add_to_state_list(cmd); 2399 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2400 target_execute_cmd(cmd); 2401 return 0; 2402 } 2403 transport_cmd_check_stop(cmd, false, true); 2404 2405 ret = cmd->se_tfo->write_pending(cmd); 2406 if (ret == -EAGAIN || ret == -ENOMEM) 2407 goto queue_full; 2408 2409 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2410 WARN_ON(ret); 2411 2412 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2413 2414 queue_full: 2415 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2416 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2417 transport_handle_queue_full(cmd, cmd->se_dev); 2418 return 0; 2419 } 2420 EXPORT_SYMBOL(transport_generic_new_cmd); 2421 2422 static void transport_write_pending_qf(struct se_cmd *cmd) 2423 { 2424 int ret; 2425 2426 ret = cmd->se_tfo->write_pending(cmd); 2427 if (ret == -EAGAIN || ret == -ENOMEM) { 2428 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2429 cmd); 2430 transport_handle_queue_full(cmd, cmd->se_dev); 2431 } 2432 } 2433 2434 static bool 2435 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2436 unsigned long *flags); 2437 2438 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2439 { 2440 unsigned long flags; 2441 2442 spin_lock_irqsave(&cmd->t_state_lock, flags); 2443 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2444 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2445 } 2446 2447 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2448 { 2449 int ret = 0; 2450 bool aborted = false, tas = false; 2451 2452 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2453 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2454 target_wait_free_cmd(cmd, &aborted, &tas); 2455 2456 if (!aborted || tas) 2457 ret = transport_put_cmd(cmd); 2458 } else { 2459 if (wait_for_tasks) 2460 target_wait_free_cmd(cmd, &aborted, &tas); 2461 /* 2462 * Handle WRITE failure case where transport_generic_new_cmd() 2463 * has already added se_cmd to state_list, but fabric has 2464 * failed command before I/O submission. 2465 */ 2466 if (cmd->state_active) 2467 target_remove_from_state_list(cmd); 2468 2469 if (cmd->se_lun) 2470 transport_lun_remove_cmd(cmd); 2471 2472 if (!aborted || tas) 2473 ret = transport_put_cmd(cmd); 2474 } 2475 /* 2476 * If the task has been internally aborted due to TMR ABORT_TASK 2477 * or LUN_RESET, target_core_tmr.c is responsible for performing 2478 * the remaining calls to target_put_sess_cmd(), and not the 2479 * callers of this function. 2480 */ 2481 if (aborted) { 2482 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2483 wait_for_completion(&cmd->cmd_wait_comp); 2484 cmd->se_tfo->release_cmd(cmd); 2485 ret = 1; 2486 } 2487 return ret; 2488 } 2489 EXPORT_SYMBOL(transport_generic_free_cmd); 2490 2491 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2492 * @se_cmd: command descriptor to add 2493 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2494 */ 2495 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2496 { 2497 struct se_session *se_sess = se_cmd->se_sess; 2498 unsigned long flags; 2499 int ret = 0; 2500 2501 /* 2502 * Add a second kref if the fabric caller is expecting to handle 2503 * fabric acknowledgement that requires two target_put_sess_cmd() 2504 * invocations before se_cmd descriptor release. 2505 */ 2506 if (ack_kref) 2507 kref_get(&se_cmd->cmd_kref); 2508 2509 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2510 if (se_sess->sess_tearing_down) { 2511 ret = -ESHUTDOWN; 2512 goto out; 2513 } 2514 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2515 out: 2516 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2517 2518 if (ret && ack_kref) 2519 target_put_sess_cmd(se_cmd); 2520 2521 return ret; 2522 } 2523 EXPORT_SYMBOL(target_get_sess_cmd); 2524 2525 static void target_free_cmd_mem(struct se_cmd *cmd) 2526 { 2527 transport_free_pages(cmd); 2528 2529 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2530 core_tmr_release_req(cmd->se_tmr_req); 2531 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2532 kfree(cmd->t_task_cdb); 2533 } 2534 2535 static void target_release_cmd_kref(struct kref *kref) 2536 { 2537 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2538 struct se_session *se_sess = se_cmd->se_sess; 2539 unsigned long flags; 2540 bool fabric_stop; 2541 2542 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2543 if (list_empty(&se_cmd->se_cmd_list)) { 2544 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2545 target_free_cmd_mem(se_cmd); 2546 se_cmd->se_tfo->release_cmd(se_cmd); 2547 return; 2548 } 2549 2550 spin_lock(&se_cmd->t_state_lock); 2551 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); 2552 spin_unlock(&se_cmd->t_state_lock); 2553 2554 if (se_cmd->cmd_wait_set || fabric_stop) { 2555 list_del_init(&se_cmd->se_cmd_list); 2556 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2557 target_free_cmd_mem(se_cmd); 2558 complete(&se_cmd->cmd_wait_comp); 2559 return; 2560 } 2561 list_del_init(&se_cmd->se_cmd_list); 2562 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2563 2564 target_free_cmd_mem(se_cmd); 2565 se_cmd->se_tfo->release_cmd(se_cmd); 2566 } 2567 2568 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2569 * @se_cmd: command descriptor to drop 2570 */ 2571 int target_put_sess_cmd(struct se_cmd *se_cmd) 2572 { 2573 struct se_session *se_sess = se_cmd->se_sess; 2574 2575 if (!se_sess) { 2576 target_free_cmd_mem(se_cmd); 2577 se_cmd->se_tfo->release_cmd(se_cmd); 2578 return 1; 2579 } 2580 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2581 } 2582 EXPORT_SYMBOL(target_put_sess_cmd); 2583 2584 /* target_sess_cmd_list_set_waiting - Flag all commands in 2585 * sess_cmd_list to complete cmd_wait_comp. Set 2586 * sess_tearing_down so no more commands are queued. 2587 * @se_sess: session to flag 2588 */ 2589 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2590 { 2591 struct se_cmd *se_cmd; 2592 unsigned long flags; 2593 int rc; 2594 2595 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2596 if (se_sess->sess_tearing_down) { 2597 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2598 return; 2599 } 2600 se_sess->sess_tearing_down = 1; 2601 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2602 2603 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { 2604 rc = kref_get_unless_zero(&se_cmd->cmd_kref); 2605 if (rc) { 2606 se_cmd->cmd_wait_set = 1; 2607 spin_lock(&se_cmd->t_state_lock); 2608 se_cmd->transport_state |= CMD_T_FABRIC_STOP; 2609 spin_unlock(&se_cmd->t_state_lock); 2610 } 2611 } 2612 2613 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2614 } 2615 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2616 2617 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2618 * @se_sess: session to wait for active I/O 2619 */ 2620 void target_wait_for_sess_cmds(struct se_session *se_sess) 2621 { 2622 struct se_cmd *se_cmd, *tmp_cmd; 2623 unsigned long flags; 2624 bool tas; 2625 2626 list_for_each_entry_safe(se_cmd, tmp_cmd, 2627 &se_sess->sess_wait_list, se_cmd_list) { 2628 list_del_init(&se_cmd->se_cmd_list); 2629 2630 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2631 " %d\n", se_cmd, se_cmd->t_state, 2632 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2633 2634 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 2635 tas = (se_cmd->transport_state & CMD_T_TAS); 2636 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 2637 2638 if (!target_put_sess_cmd(se_cmd)) { 2639 if (tas) 2640 target_put_sess_cmd(se_cmd); 2641 } 2642 2643 wait_for_completion(&se_cmd->cmd_wait_comp); 2644 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2645 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2646 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2647 2648 se_cmd->se_tfo->release_cmd(se_cmd); 2649 } 2650 2651 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2652 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2653 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2654 2655 } 2656 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2657 2658 void transport_clear_lun_ref(struct se_lun *lun) 2659 { 2660 percpu_ref_kill(&lun->lun_ref); 2661 wait_for_completion(&lun->lun_ref_comp); 2662 } 2663 2664 static bool 2665 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2666 bool *aborted, bool *tas, unsigned long *flags) 2667 __releases(&cmd->t_state_lock) 2668 __acquires(&cmd->t_state_lock) 2669 { 2670 2671 assert_spin_locked(&cmd->t_state_lock); 2672 WARN_ON_ONCE(!irqs_disabled()); 2673 2674 if (fabric_stop) 2675 cmd->transport_state |= CMD_T_FABRIC_STOP; 2676 2677 if (cmd->transport_state & CMD_T_ABORTED) 2678 *aborted = true; 2679 2680 if (cmd->transport_state & CMD_T_TAS) 2681 *tas = true; 2682 2683 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2684 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2685 return false; 2686 2687 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2688 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2689 return false; 2690 2691 if (!(cmd->transport_state & CMD_T_ACTIVE)) 2692 return false; 2693 2694 if (fabric_stop && *aborted) 2695 return false; 2696 2697 cmd->transport_state |= CMD_T_STOP; 2698 2699 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," 2700 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, 2701 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2702 2703 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2704 2705 wait_for_completion(&cmd->t_transport_stop_comp); 2706 2707 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2708 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2709 2710 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 2711 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 2712 2713 return true; 2714 } 2715 2716 /** 2717 * transport_wait_for_tasks - wait for completion to occur 2718 * @cmd: command to wait 2719 * 2720 * Called from frontend fabric context to wait for storage engine 2721 * to pause and/or release frontend generated struct se_cmd. 2722 */ 2723 bool transport_wait_for_tasks(struct se_cmd *cmd) 2724 { 2725 unsigned long flags; 2726 bool ret, aborted = false, tas = false; 2727 2728 spin_lock_irqsave(&cmd->t_state_lock, flags); 2729 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 2730 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2731 2732 return ret; 2733 } 2734 EXPORT_SYMBOL(transport_wait_for_tasks); 2735 2736 struct sense_info { 2737 u8 key; 2738 u8 asc; 2739 u8 ascq; 2740 bool add_sector_info; 2741 }; 2742 2743 static const struct sense_info sense_info_table[] = { 2744 [TCM_NO_SENSE] = { 2745 .key = NOT_READY 2746 }, 2747 [TCM_NON_EXISTENT_LUN] = { 2748 .key = ILLEGAL_REQUEST, 2749 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 2750 }, 2751 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 2752 .key = ILLEGAL_REQUEST, 2753 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2754 }, 2755 [TCM_SECTOR_COUNT_TOO_MANY] = { 2756 .key = ILLEGAL_REQUEST, 2757 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2758 }, 2759 [TCM_UNKNOWN_MODE_PAGE] = { 2760 .key = ILLEGAL_REQUEST, 2761 .asc = 0x24, /* INVALID FIELD IN CDB */ 2762 }, 2763 [TCM_CHECK_CONDITION_ABORT_CMD] = { 2764 .key = ABORTED_COMMAND, 2765 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 2766 .ascq = 0x03, 2767 }, 2768 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 2769 .key = ABORTED_COMMAND, 2770 .asc = 0x0c, /* WRITE ERROR */ 2771 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 2772 }, 2773 [TCM_INVALID_CDB_FIELD] = { 2774 .key = ILLEGAL_REQUEST, 2775 .asc = 0x24, /* INVALID FIELD IN CDB */ 2776 }, 2777 [TCM_INVALID_PARAMETER_LIST] = { 2778 .key = ILLEGAL_REQUEST, 2779 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2780 }, 2781 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2782 .key = ILLEGAL_REQUEST, 2783 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2784 }, 2785 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 2786 .key = ILLEGAL_REQUEST, 2787 .asc = 0x0c, /* WRITE ERROR */ 2788 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 2789 }, 2790 [TCM_SERVICE_CRC_ERROR] = { 2791 .key = ABORTED_COMMAND, 2792 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 2793 .ascq = 0x05, /* N/A */ 2794 }, 2795 [TCM_SNACK_REJECTED] = { 2796 .key = ABORTED_COMMAND, 2797 .asc = 0x11, /* READ ERROR */ 2798 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 2799 }, 2800 [TCM_WRITE_PROTECTED] = { 2801 .key = DATA_PROTECT, 2802 .asc = 0x27, /* WRITE PROTECTED */ 2803 }, 2804 [TCM_ADDRESS_OUT_OF_RANGE] = { 2805 .key = ILLEGAL_REQUEST, 2806 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2807 }, 2808 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 2809 .key = UNIT_ATTENTION, 2810 }, 2811 [TCM_CHECK_CONDITION_NOT_READY] = { 2812 .key = NOT_READY, 2813 }, 2814 [TCM_MISCOMPARE_VERIFY] = { 2815 .key = MISCOMPARE, 2816 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 2817 .ascq = 0x00, 2818 }, 2819 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 2820 .key = ABORTED_COMMAND, 2821 .asc = 0x10, 2822 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 2823 .add_sector_info = true, 2824 }, 2825 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 2826 .key = ABORTED_COMMAND, 2827 .asc = 0x10, 2828 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2829 .add_sector_info = true, 2830 }, 2831 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 2832 .key = ABORTED_COMMAND, 2833 .asc = 0x10, 2834 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2835 .add_sector_info = true, 2836 }, 2837 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2838 /* 2839 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2840 * Solaris initiators. Returning NOT READY instead means the 2841 * operations will be retried a finite number of times and we 2842 * can survive intermittent errors. 2843 */ 2844 .key = NOT_READY, 2845 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 2846 }, 2847 }; 2848 2849 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 2850 { 2851 const struct sense_info *si; 2852 u8 *buffer = cmd->sense_buffer; 2853 int r = (__force int)reason; 2854 u8 asc, ascq; 2855 bool desc_format = target_sense_desc_format(cmd->se_dev); 2856 2857 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 2858 si = &sense_info_table[r]; 2859 else 2860 si = &sense_info_table[(__force int) 2861 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 2862 2863 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 2864 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2865 WARN_ON_ONCE(asc == 0); 2866 } else if (si->asc == 0) { 2867 WARN_ON_ONCE(cmd->scsi_asc == 0); 2868 asc = cmd->scsi_asc; 2869 ascq = cmd->scsi_ascq; 2870 } else { 2871 asc = si->asc; 2872 ascq = si->ascq; 2873 } 2874 2875 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); 2876 if (si->add_sector_info) 2877 return scsi_set_sense_information(buffer, 2878 cmd->scsi_sense_length, 2879 cmd->bad_sector); 2880 2881 return 0; 2882 } 2883 2884 int 2885 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2886 sense_reason_t reason, int from_transport) 2887 { 2888 unsigned long flags; 2889 2890 spin_lock_irqsave(&cmd->t_state_lock, flags); 2891 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2892 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2893 return 0; 2894 } 2895 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2896 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2897 2898 if (!from_transport) { 2899 int rc; 2900 2901 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2902 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2903 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2904 rc = translate_sense_reason(cmd, reason); 2905 if (rc) 2906 return rc; 2907 } 2908 2909 trace_target_cmd_complete(cmd); 2910 return cmd->se_tfo->queue_status(cmd); 2911 } 2912 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2913 2914 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2915 { 2916 if (!(cmd->transport_state & CMD_T_ABORTED)) 2917 return 0; 2918 2919 /* 2920 * If cmd has been aborted but either no status is to be sent or it has 2921 * already been sent, just return 2922 */ 2923 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2924 return 1; 2925 2926 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2927 cmd->t_task_cdb[0], cmd->tag); 2928 2929 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2930 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2931 trace_target_cmd_complete(cmd); 2932 cmd->se_tfo->queue_status(cmd); 2933 2934 return 1; 2935 } 2936 EXPORT_SYMBOL(transport_check_aborted_status); 2937 2938 void transport_send_task_abort(struct se_cmd *cmd) 2939 { 2940 unsigned long flags; 2941 2942 spin_lock_irqsave(&cmd->t_state_lock, flags); 2943 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 2944 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2945 return; 2946 } 2947 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2948 2949 /* 2950 * If there are still expected incoming fabric WRITEs, we wait 2951 * until until they have completed before sending a TASK_ABORTED 2952 * response. This response with TASK_ABORTED status will be 2953 * queued back to fabric module by transport_check_aborted_status(). 2954 */ 2955 if (cmd->data_direction == DMA_TO_DEVICE) { 2956 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2957 cmd->transport_state |= CMD_T_ABORTED; 2958 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2959 return; 2960 } 2961 } 2962 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2963 2964 transport_lun_remove_cmd(cmd); 2965 2966 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 2967 cmd->t_task_cdb[0], cmd->tag); 2968 2969 trace_target_cmd_complete(cmd); 2970 cmd->se_tfo->queue_status(cmd); 2971 } 2972 2973 static void target_tmr_work(struct work_struct *work) 2974 { 2975 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2976 struct se_device *dev = cmd->se_dev; 2977 struct se_tmr_req *tmr = cmd->se_tmr_req; 2978 unsigned long flags; 2979 int ret; 2980 2981 spin_lock_irqsave(&cmd->t_state_lock, flags); 2982 if (cmd->transport_state & CMD_T_ABORTED) { 2983 tmr->response = TMR_FUNCTION_REJECTED; 2984 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2985 goto check_stop; 2986 } 2987 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2988 2989 switch (tmr->function) { 2990 case TMR_ABORT_TASK: 2991 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2992 break; 2993 case TMR_ABORT_TASK_SET: 2994 case TMR_CLEAR_ACA: 2995 case TMR_CLEAR_TASK_SET: 2996 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2997 break; 2998 case TMR_LUN_RESET: 2999 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3000 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3001 TMR_FUNCTION_REJECTED; 3002 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3003 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3004 cmd->orig_fe_lun, 0x29, 3005 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3006 } 3007 break; 3008 case TMR_TARGET_WARM_RESET: 3009 tmr->response = TMR_FUNCTION_REJECTED; 3010 break; 3011 case TMR_TARGET_COLD_RESET: 3012 tmr->response = TMR_FUNCTION_REJECTED; 3013 break; 3014 default: 3015 pr_err("Uknown TMR function: 0x%02x.\n", 3016 tmr->function); 3017 tmr->response = TMR_FUNCTION_REJECTED; 3018 break; 3019 } 3020 3021 spin_lock_irqsave(&cmd->t_state_lock, flags); 3022 if (cmd->transport_state & CMD_T_ABORTED) { 3023 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3024 goto check_stop; 3025 } 3026 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3027 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3028 3029 cmd->se_tfo->queue_tm_rsp(cmd); 3030 3031 check_stop: 3032 transport_cmd_check_stop_to_fabric(cmd); 3033 } 3034 3035 int transport_generic_handle_tmr( 3036 struct se_cmd *cmd) 3037 { 3038 unsigned long flags; 3039 3040 spin_lock_irqsave(&cmd->t_state_lock, flags); 3041 cmd->transport_state |= CMD_T_ACTIVE; 3042 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3043 3044 INIT_WORK(&cmd->work, target_tmr_work); 3045 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3046 return 0; 3047 } 3048 EXPORT_SYMBOL(transport_generic_handle_tmr); 3049 3050 bool 3051 target_check_wce(struct se_device *dev) 3052 { 3053 bool wce = false; 3054 3055 if (dev->transport->get_write_cache) 3056 wce = dev->transport->get_write_cache(dev); 3057 else if (dev->dev_attrib.emulate_write_cache > 0) 3058 wce = true; 3059 3060 return wce; 3061 } 3062 3063 bool 3064 target_check_fua(struct se_device *dev) 3065 { 3066 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3067 } 3068