1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 43 #include <target/target_core_base.h> 44 #include <target/target_core_backend.h> 45 #include <target/target_core_fabric.h> 46 47 #include "target_core_internal.h" 48 #include "target_core_alua.h" 49 #include "target_core_pr.h" 50 #include "target_core_ua.h" 51 52 #define CREATE_TRACE_POINTS 53 #include <trace/events/target.h> 54 55 static struct workqueue_struct *target_completion_wq; 56 static struct kmem_cache *se_sess_cache; 57 struct kmem_cache *se_ua_cache; 58 struct kmem_cache *t10_pr_reg_cache; 59 struct kmem_cache *t10_alua_lu_gp_cache; 60 struct kmem_cache *t10_alua_lu_gp_mem_cache; 61 struct kmem_cache *t10_alua_tg_pt_gp_cache; 62 struct kmem_cache *t10_alua_lba_map_cache; 63 struct kmem_cache *t10_alua_lba_map_mem_cache; 64 65 static void transport_complete_task_attr(struct se_cmd *cmd); 66 static void transport_handle_queue_full(struct se_cmd *cmd, 67 struct se_device *dev); 68 static int transport_put_cmd(struct se_cmd *cmd); 69 static void target_complete_ok_work(struct work_struct *work); 70 71 int init_se_kmem_caches(void) 72 { 73 se_sess_cache = kmem_cache_create("se_sess_cache", 74 sizeof(struct se_session), __alignof__(struct se_session), 75 0, NULL); 76 if (!se_sess_cache) { 77 pr_err("kmem_cache_create() for struct se_session" 78 " failed\n"); 79 goto out; 80 } 81 se_ua_cache = kmem_cache_create("se_ua_cache", 82 sizeof(struct se_ua), __alignof__(struct se_ua), 83 0, NULL); 84 if (!se_ua_cache) { 85 pr_err("kmem_cache_create() for struct se_ua failed\n"); 86 goto out_free_sess_cache; 87 } 88 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 89 sizeof(struct t10_pr_registration), 90 __alignof__(struct t10_pr_registration), 0, NULL); 91 if (!t10_pr_reg_cache) { 92 pr_err("kmem_cache_create() for struct t10_pr_registration" 93 " failed\n"); 94 goto out_free_ua_cache; 95 } 96 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 97 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 98 0, NULL); 99 if (!t10_alua_lu_gp_cache) { 100 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 101 " failed\n"); 102 goto out_free_pr_reg_cache; 103 } 104 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 105 sizeof(struct t10_alua_lu_gp_member), 106 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 107 if (!t10_alua_lu_gp_mem_cache) { 108 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 109 "cache failed\n"); 110 goto out_free_lu_gp_cache; 111 } 112 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 113 sizeof(struct t10_alua_tg_pt_gp), 114 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 115 if (!t10_alua_tg_pt_gp_cache) { 116 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 117 "cache failed\n"); 118 goto out_free_lu_gp_mem_cache; 119 } 120 t10_alua_lba_map_cache = kmem_cache_create( 121 "t10_alua_lba_map_cache", 122 sizeof(struct t10_alua_lba_map), 123 __alignof__(struct t10_alua_lba_map), 0, NULL); 124 if (!t10_alua_lba_map_cache) { 125 pr_err("kmem_cache_create() for t10_alua_lba_map_" 126 "cache failed\n"); 127 goto out_free_tg_pt_gp_cache; 128 } 129 t10_alua_lba_map_mem_cache = kmem_cache_create( 130 "t10_alua_lba_map_mem_cache", 131 sizeof(struct t10_alua_lba_map_member), 132 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 133 if (!t10_alua_lba_map_mem_cache) { 134 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 135 "cache failed\n"); 136 goto out_free_lba_map_cache; 137 } 138 139 target_completion_wq = alloc_workqueue("target_completion", 140 WQ_MEM_RECLAIM, 0); 141 if (!target_completion_wq) 142 goto out_free_lba_map_mem_cache; 143 144 return 0; 145 146 out_free_lba_map_mem_cache: 147 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 148 out_free_lba_map_cache: 149 kmem_cache_destroy(t10_alua_lba_map_cache); 150 out_free_tg_pt_gp_cache: 151 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 152 out_free_lu_gp_mem_cache: 153 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 154 out_free_lu_gp_cache: 155 kmem_cache_destroy(t10_alua_lu_gp_cache); 156 out_free_pr_reg_cache: 157 kmem_cache_destroy(t10_pr_reg_cache); 158 out_free_ua_cache: 159 kmem_cache_destroy(se_ua_cache); 160 out_free_sess_cache: 161 kmem_cache_destroy(se_sess_cache); 162 out: 163 return -ENOMEM; 164 } 165 166 void release_se_kmem_caches(void) 167 { 168 destroy_workqueue(target_completion_wq); 169 kmem_cache_destroy(se_sess_cache); 170 kmem_cache_destroy(se_ua_cache); 171 kmem_cache_destroy(t10_pr_reg_cache); 172 kmem_cache_destroy(t10_alua_lu_gp_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 174 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 175 kmem_cache_destroy(t10_alua_lba_map_cache); 176 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 177 } 178 179 /* This code ensures unique mib indexes are handed out. */ 180 static DEFINE_SPINLOCK(scsi_mib_index_lock); 181 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 182 183 /* 184 * Allocate a new row index for the entry type specified 185 */ 186 u32 scsi_get_new_index(scsi_index_t type) 187 { 188 u32 new_index; 189 190 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 191 192 spin_lock(&scsi_mib_index_lock); 193 new_index = ++scsi_mib_index[type]; 194 spin_unlock(&scsi_mib_index_lock); 195 196 return new_index; 197 } 198 199 void transport_subsystem_check_init(void) 200 { 201 int ret; 202 static int sub_api_initialized; 203 204 if (sub_api_initialized) 205 return; 206 207 ret = request_module("target_core_iblock"); 208 if (ret != 0) 209 pr_err("Unable to load target_core_iblock\n"); 210 211 ret = request_module("target_core_file"); 212 if (ret != 0) 213 pr_err("Unable to load target_core_file\n"); 214 215 ret = request_module("target_core_pscsi"); 216 if (ret != 0) 217 pr_err("Unable to load target_core_pscsi\n"); 218 219 ret = request_module("target_core_user"); 220 if (ret != 0) 221 pr_err("Unable to load target_core_user\n"); 222 223 sub_api_initialized = 1; 224 } 225 226 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 227 { 228 struct se_session *se_sess; 229 230 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 231 if (!se_sess) { 232 pr_err("Unable to allocate struct se_session from" 233 " se_sess_cache\n"); 234 return ERR_PTR(-ENOMEM); 235 } 236 INIT_LIST_HEAD(&se_sess->sess_list); 237 INIT_LIST_HEAD(&se_sess->sess_acl_list); 238 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 239 INIT_LIST_HEAD(&se_sess->sess_wait_list); 240 spin_lock_init(&se_sess->sess_cmd_lock); 241 kref_init(&se_sess->sess_kref); 242 se_sess->sup_prot_ops = sup_prot_ops; 243 244 return se_sess; 245 } 246 EXPORT_SYMBOL(transport_init_session); 247 248 int transport_alloc_session_tags(struct se_session *se_sess, 249 unsigned int tag_num, unsigned int tag_size) 250 { 251 int rc; 252 253 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 254 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 255 if (!se_sess->sess_cmd_map) { 256 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 257 if (!se_sess->sess_cmd_map) { 258 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 259 return -ENOMEM; 260 } 261 } 262 263 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 264 if (rc < 0) { 265 pr_err("Unable to init se_sess->sess_tag_pool," 266 " tag_num: %u\n", tag_num); 267 kvfree(se_sess->sess_cmd_map); 268 se_sess->sess_cmd_map = NULL; 269 return -ENOMEM; 270 } 271 272 return 0; 273 } 274 EXPORT_SYMBOL(transport_alloc_session_tags); 275 276 struct se_session *transport_init_session_tags(unsigned int tag_num, 277 unsigned int tag_size, 278 enum target_prot_op sup_prot_ops) 279 { 280 struct se_session *se_sess; 281 int rc; 282 283 se_sess = transport_init_session(sup_prot_ops); 284 if (IS_ERR(se_sess)) 285 return se_sess; 286 287 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 288 if (rc < 0) { 289 transport_free_session(se_sess); 290 return ERR_PTR(-ENOMEM); 291 } 292 293 return se_sess; 294 } 295 EXPORT_SYMBOL(transport_init_session_tags); 296 297 /* 298 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 299 */ 300 void __transport_register_session( 301 struct se_portal_group *se_tpg, 302 struct se_node_acl *se_nacl, 303 struct se_session *se_sess, 304 void *fabric_sess_ptr) 305 { 306 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 307 unsigned char buf[PR_REG_ISID_LEN]; 308 309 se_sess->se_tpg = se_tpg; 310 se_sess->fabric_sess_ptr = fabric_sess_ptr; 311 /* 312 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 313 * 314 * Only set for struct se_session's that will actually be moving I/O. 315 * eg: *NOT* discovery sessions. 316 */ 317 if (se_nacl) { 318 /* 319 * 320 * Determine if fabric allows for T10-PI feature bits exposed to 321 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 322 * 323 * If so, then always save prot_type on a per se_node_acl node 324 * basis and re-instate the previous sess_prot_type to avoid 325 * disabling PI from below any previously initiator side 326 * registered LUNs. 327 */ 328 if (se_nacl->saved_prot_type) 329 se_sess->sess_prot_type = se_nacl->saved_prot_type; 330 else if (tfo->tpg_check_prot_fabric_only) 331 se_sess->sess_prot_type = se_nacl->saved_prot_type = 332 tfo->tpg_check_prot_fabric_only(se_tpg); 333 /* 334 * If the fabric module supports an ISID based TransportID, 335 * save this value in binary from the fabric I_T Nexus now. 336 */ 337 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 338 memset(&buf[0], 0, PR_REG_ISID_LEN); 339 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 340 &buf[0], PR_REG_ISID_LEN); 341 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 342 } 343 kref_get(&se_nacl->acl_kref); 344 345 spin_lock_irq(&se_nacl->nacl_sess_lock); 346 /* 347 * The se_nacl->nacl_sess pointer will be set to the 348 * last active I_T Nexus for each struct se_node_acl. 349 */ 350 se_nacl->nacl_sess = se_sess; 351 352 list_add_tail(&se_sess->sess_acl_list, 353 &se_nacl->acl_sess_list); 354 spin_unlock_irq(&se_nacl->nacl_sess_lock); 355 } 356 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 357 358 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 359 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 360 } 361 EXPORT_SYMBOL(__transport_register_session); 362 363 void transport_register_session( 364 struct se_portal_group *se_tpg, 365 struct se_node_acl *se_nacl, 366 struct se_session *se_sess, 367 void *fabric_sess_ptr) 368 { 369 unsigned long flags; 370 371 spin_lock_irqsave(&se_tpg->session_lock, flags); 372 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 373 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 374 } 375 EXPORT_SYMBOL(transport_register_session); 376 377 static void target_release_session(struct kref *kref) 378 { 379 struct se_session *se_sess = container_of(kref, 380 struct se_session, sess_kref); 381 struct se_portal_group *se_tpg = se_sess->se_tpg; 382 383 se_tpg->se_tpg_tfo->close_session(se_sess); 384 } 385 386 void target_get_session(struct se_session *se_sess) 387 { 388 kref_get(&se_sess->sess_kref); 389 } 390 EXPORT_SYMBOL(target_get_session); 391 392 void target_put_session(struct se_session *se_sess) 393 { 394 kref_put(&se_sess->sess_kref, target_release_session); 395 } 396 EXPORT_SYMBOL(target_put_session); 397 398 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 399 { 400 struct se_session *se_sess; 401 ssize_t len = 0; 402 403 spin_lock_bh(&se_tpg->session_lock); 404 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 405 if (!se_sess->se_node_acl) 406 continue; 407 if (!se_sess->se_node_acl->dynamic_node_acl) 408 continue; 409 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 410 break; 411 412 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 413 se_sess->se_node_acl->initiatorname); 414 len += 1; /* Include NULL terminator */ 415 } 416 spin_unlock_bh(&se_tpg->session_lock); 417 418 return len; 419 } 420 EXPORT_SYMBOL(target_show_dynamic_sessions); 421 422 static void target_complete_nacl(struct kref *kref) 423 { 424 struct se_node_acl *nacl = container_of(kref, 425 struct se_node_acl, acl_kref); 426 427 complete(&nacl->acl_free_comp); 428 } 429 430 void target_put_nacl(struct se_node_acl *nacl) 431 { 432 kref_put(&nacl->acl_kref, target_complete_nacl); 433 } 434 435 void transport_deregister_session_configfs(struct se_session *se_sess) 436 { 437 struct se_node_acl *se_nacl; 438 unsigned long flags; 439 /* 440 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 441 */ 442 se_nacl = se_sess->se_node_acl; 443 if (se_nacl) { 444 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 445 if (se_nacl->acl_stop == 0) 446 list_del(&se_sess->sess_acl_list); 447 /* 448 * If the session list is empty, then clear the pointer. 449 * Otherwise, set the struct se_session pointer from the tail 450 * element of the per struct se_node_acl active session list. 451 */ 452 if (list_empty(&se_nacl->acl_sess_list)) 453 se_nacl->nacl_sess = NULL; 454 else { 455 se_nacl->nacl_sess = container_of( 456 se_nacl->acl_sess_list.prev, 457 struct se_session, sess_acl_list); 458 } 459 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 460 } 461 } 462 EXPORT_SYMBOL(transport_deregister_session_configfs); 463 464 void transport_free_session(struct se_session *se_sess) 465 { 466 if (se_sess->sess_cmd_map) { 467 percpu_ida_destroy(&se_sess->sess_tag_pool); 468 kvfree(se_sess->sess_cmd_map); 469 } 470 kmem_cache_free(se_sess_cache, se_sess); 471 } 472 EXPORT_SYMBOL(transport_free_session); 473 474 void transport_deregister_session(struct se_session *se_sess) 475 { 476 struct se_portal_group *se_tpg = se_sess->se_tpg; 477 const struct target_core_fabric_ops *se_tfo; 478 struct se_node_acl *se_nacl; 479 unsigned long flags; 480 bool comp_nacl = true, drop_nacl = false; 481 482 if (!se_tpg) { 483 transport_free_session(se_sess); 484 return; 485 } 486 se_tfo = se_tpg->se_tpg_tfo; 487 488 spin_lock_irqsave(&se_tpg->session_lock, flags); 489 list_del(&se_sess->sess_list); 490 se_sess->se_tpg = NULL; 491 se_sess->fabric_sess_ptr = NULL; 492 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 493 494 /* 495 * Determine if we need to do extra work for this initiator node's 496 * struct se_node_acl if it had been previously dynamically generated. 497 */ 498 se_nacl = se_sess->se_node_acl; 499 500 mutex_lock(&se_tpg->acl_node_mutex); 501 if (se_nacl && se_nacl->dynamic_node_acl) { 502 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 503 list_del(&se_nacl->acl_list); 504 se_tpg->num_node_acls--; 505 drop_nacl = true; 506 } 507 } 508 mutex_unlock(&se_tpg->acl_node_mutex); 509 510 if (drop_nacl) { 511 core_tpg_wait_for_nacl_pr_ref(se_nacl); 512 core_free_device_list_for_node(se_nacl, se_tpg); 513 kfree(se_nacl); 514 comp_nacl = false; 515 } 516 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 517 se_tpg->se_tpg_tfo->get_fabric_name()); 518 /* 519 * If last kref is dropping now for an explicit NodeACL, awake sleeping 520 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 521 * removal context. 522 */ 523 if (se_nacl && comp_nacl) 524 target_put_nacl(se_nacl); 525 526 transport_free_session(se_sess); 527 } 528 EXPORT_SYMBOL(transport_deregister_session); 529 530 /* 531 * Called with cmd->t_state_lock held. 532 */ 533 static void target_remove_from_state_list(struct se_cmd *cmd) 534 { 535 struct se_device *dev = cmd->se_dev; 536 unsigned long flags; 537 538 if (!dev) 539 return; 540 541 if (cmd->transport_state & CMD_T_BUSY) 542 return; 543 544 spin_lock_irqsave(&dev->execute_task_lock, flags); 545 if (cmd->state_active) { 546 list_del(&cmd->state_list); 547 cmd->state_active = false; 548 } 549 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 550 } 551 552 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 553 bool write_pending) 554 { 555 unsigned long flags; 556 557 spin_lock_irqsave(&cmd->t_state_lock, flags); 558 if (write_pending) 559 cmd->t_state = TRANSPORT_WRITE_PENDING; 560 561 if (remove_from_lists) { 562 target_remove_from_state_list(cmd); 563 564 /* 565 * Clear struct se_cmd->se_lun before the handoff to FE. 566 */ 567 cmd->se_lun = NULL; 568 } 569 570 /* 571 * Determine if frontend context caller is requesting the stopping of 572 * this command for frontend exceptions. 573 */ 574 if (cmd->transport_state & CMD_T_STOP) { 575 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 576 __func__, __LINE__, cmd->tag); 577 578 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 579 580 complete_all(&cmd->t_transport_stop_comp); 581 return 1; 582 } 583 584 cmd->transport_state &= ~CMD_T_ACTIVE; 585 if (remove_from_lists) { 586 /* 587 * Some fabric modules like tcm_loop can release 588 * their internally allocated I/O reference now and 589 * struct se_cmd now. 590 * 591 * Fabric modules are expected to return '1' here if the 592 * se_cmd being passed is released at this point, 593 * or zero if not being released. 594 */ 595 if (cmd->se_tfo->check_stop_free != NULL) { 596 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 597 return cmd->se_tfo->check_stop_free(cmd); 598 } 599 } 600 601 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 602 return 0; 603 } 604 605 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 606 { 607 return transport_cmd_check_stop(cmd, true, false); 608 } 609 610 static void transport_lun_remove_cmd(struct se_cmd *cmd) 611 { 612 struct se_lun *lun = cmd->se_lun; 613 614 if (!lun) 615 return; 616 617 if (cmpxchg(&cmd->lun_ref_active, true, false)) 618 percpu_ref_put(&lun->lun_ref); 619 } 620 621 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 622 { 623 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 624 transport_lun_remove_cmd(cmd); 625 /* 626 * Allow the fabric driver to unmap any resources before 627 * releasing the descriptor via TFO->release_cmd() 628 */ 629 if (remove) 630 cmd->se_tfo->aborted_task(cmd); 631 632 if (transport_cmd_check_stop_to_fabric(cmd)) 633 return; 634 if (remove) 635 transport_put_cmd(cmd); 636 } 637 638 static void target_complete_failure_work(struct work_struct *work) 639 { 640 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 641 642 transport_generic_request_failure(cmd, 643 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 644 } 645 646 /* 647 * Used when asking transport to copy Sense Data from the underlying 648 * Linux/SCSI struct scsi_cmnd 649 */ 650 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 651 { 652 struct se_device *dev = cmd->se_dev; 653 654 WARN_ON(!cmd->se_lun); 655 656 if (!dev) 657 return NULL; 658 659 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 660 return NULL; 661 662 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 663 664 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 665 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 666 return cmd->sense_buffer; 667 } 668 669 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 670 { 671 struct se_device *dev = cmd->se_dev; 672 int success = scsi_status == GOOD; 673 unsigned long flags; 674 675 cmd->scsi_status = scsi_status; 676 677 678 spin_lock_irqsave(&cmd->t_state_lock, flags); 679 cmd->transport_state &= ~CMD_T_BUSY; 680 681 if (dev && dev->transport->transport_complete) { 682 dev->transport->transport_complete(cmd, 683 cmd->t_data_sg, 684 transport_get_sense_buffer(cmd)); 685 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 686 success = 1; 687 } 688 689 /* 690 * See if we are waiting to complete for an exception condition. 691 */ 692 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 693 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 694 complete(&cmd->task_stop_comp); 695 return; 696 } 697 698 /* 699 * Check for case where an explicit ABORT_TASK has been received 700 * and transport_wait_for_tasks() will be waiting for completion.. 701 */ 702 if (cmd->transport_state & CMD_T_ABORTED && 703 cmd->transport_state & CMD_T_STOP) { 704 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 705 complete_all(&cmd->t_transport_stop_comp); 706 return; 707 } else if (!success) { 708 INIT_WORK(&cmd->work, target_complete_failure_work); 709 } else { 710 INIT_WORK(&cmd->work, target_complete_ok_work); 711 } 712 713 cmd->t_state = TRANSPORT_COMPLETE; 714 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 715 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 716 717 queue_work(target_completion_wq, &cmd->work); 718 } 719 EXPORT_SYMBOL(target_complete_cmd); 720 721 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 722 { 723 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 724 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 725 cmd->residual_count += cmd->data_length - length; 726 } else { 727 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 728 cmd->residual_count = cmd->data_length - length; 729 } 730 731 cmd->data_length = length; 732 } 733 734 target_complete_cmd(cmd, scsi_status); 735 } 736 EXPORT_SYMBOL(target_complete_cmd_with_length); 737 738 static void target_add_to_state_list(struct se_cmd *cmd) 739 { 740 struct se_device *dev = cmd->se_dev; 741 unsigned long flags; 742 743 spin_lock_irqsave(&dev->execute_task_lock, flags); 744 if (!cmd->state_active) { 745 list_add_tail(&cmd->state_list, &dev->state_list); 746 cmd->state_active = true; 747 } 748 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 749 } 750 751 /* 752 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 753 */ 754 static void transport_write_pending_qf(struct se_cmd *cmd); 755 static void transport_complete_qf(struct se_cmd *cmd); 756 757 void target_qf_do_work(struct work_struct *work) 758 { 759 struct se_device *dev = container_of(work, struct se_device, 760 qf_work_queue); 761 LIST_HEAD(qf_cmd_list); 762 struct se_cmd *cmd, *cmd_tmp; 763 764 spin_lock_irq(&dev->qf_cmd_lock); 765 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 766 spin_unlock_irq(&dev->qf_cmd_lock); 767 768 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 769 list_del(&cmd->se_qf_node); 770 atomic_dec_mb(&dev->dev_qf_count); 771 772 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 773 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 774 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 775 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 776 : "UNKNOWN"); 777 778 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 779 transport_write_pending_qf(cmd); 780 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 781 transport_complete_qf(cmd); 782 } 783 } 784 785 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 786 { 787 switch (cmd->data_direction) { 788 case DMA_NONE: 789 return "NONE"; 790 case DMA_FROM_DEVICE: 791 return "READ"; 792 case DMA_TO_DEVICE: 793 return "WRITE"; 794 case DMA_BIDIRECTIONAL: 795 return "BIDI"; 796 default: 797 break; 798 } 799 800 return "UNKNOWN"; 801 } 802 803 void transport_dump_dev_state( 804 struct se_device *dev, 805 char *b, 806 int *bl) 807 { 808 *bl += sprintf(b + *bl, "Status: "); 809 if (dev->export_count) 810 *bl += sprintf(b + *bl, "ACTIVATED"); 811 else 812 *bl += sprintf(b + *bl, "DEACTIVATED"); 813 814 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 815 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 816 dev->dev_attrib.block_size, 817 dev->dev_attrib.hw_max_sectors); 818 *bl += sprintf(b + *bl, " "); 819 } 820 821 void transport_dump_vpd_proto_id( 822 struct t10_vpd *vpd, 823 unsigned char *p_buf, 824 int p_buf_len) 825 { 826 unsigned char buf[VPD_TMP_BUF_SIZE]; 827 int len; 828 829 memset(buf, 0, VPD_TMP_BUF_SIZE); 830 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 831 832 switch (vpd->protocol_identifier) { 833 case 0x00: 834 sprintf(buf+len, "Fibre Channel\n"); 835 break; 836 case 0x10: 837 sprintf(buf+len, "Parallel SCSI\n"); 838 break; 839 case 0x20: 840 sprintf(buf+len, "SSA\n"); 841 break; 842 case 0x30: 843 sprintf(buf+len, "IEEE 1394\n"); 844 break; 845 case 0x40: 846 sprintf(buf+len, "SCSI Remote Direct Memory Access" 847 " Protocol\n"); 848 break; 849 case 0x50: 850 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 851 break; 852 case 0x60: 853 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 854 break; 855 case 0x70: 856 sprintf(buf+len, "Automation/Drive Interface Transport" 857 " Protocol\n"); 858 break; 859 case 0x80: 860 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 861 break; 862 default: 863 sprintf(buf+len, "Unknown 0x%02x\n", 864 vpd->protocol_identifier); 865 break; 866 } 867 868 if (p_buf) 869 strncpy(p_buf, buf, p_buf_len); 870 else 871 pr_debug("%s", buf); 872 } 873 874 void 875 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 876 { 877 /* 878 * Check if the Protocol Identifier Valid (PIV) bit is set.. 879 * 880 * from spc3r23.pdf section 7.5.1 881 */ 882 if (page_83[1] & 0x80) { 883 vpd->protocol_identifier = (page_83[0] & 0xf0); 884 vpd->protocol_identifier_set = 1; 885 transport_dump_vpd_proto_id(vpd, NULL, 0); 886 } 887 } 888 EXPORT_SYMBOL(transport_set_vpd_proto_id); 889 890 int transport_dump_vpd_assoc( 891 struct t10_vpd *vpd, 892 unsigned char *p_buf, 893 int p_buf_len) 894 { 895 unsigned char buf[VPD_TMP_BUF_SIZE]; 896 int ret = 0; 897 int len; 898 899 memset(buf, 0, VPD_TMP_BUF_SIZE); 900 len = sprintf(buf, "T10 VPD Identifier Association: "); 901 902 switch (vpd->association) { 903 case 0x00: 904 sprintf(buf+len, "addressed logical unit\n"); 905 break; 906 case 0x10: 907 sprintf(buf+len, "target port\n"); 908 break; 909 case 0x20: 910 sprintf(buf+len, "SCSI target device\n"); 911 break; 912 default: 913 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 914 ret = -EINVAL; 915 break; 916 } 917 918 if (p_buf) 919 strncpy(p_buf, buf, p_buf_len); 920 else 921 pr_debug("%s", buf); 922 923 return ret; 924 } 925 926 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 927 { 928 /* 929 * The VPD identification association.. 930 * 931 * from spc3r23.pdf Section 7.6.3.1 Table 297 932 */ 933 vpd->association = (page_83[1] & 0x30); 934 return transport_dump_vpd_assoc(vpd, NULL, 0); 935 } 936 EXPORT_SYMBOL(transport_set_vpd_assoc); 937 938 int transport_dump_vpd_ident_type( 939 struct t10_vpd *vpd, 940 unsigned char *p_buf, 941 int p_buf_len) 942 { 943 unsigned char buf[VPD_TMP_BUF_SIZE]; 944 int ret = 0; 945 int len; 946 947 memset(buf, 0, VPD_TMP_BUF_SIZE); 948 len = sprintf(buf, "T10 VPD Identifier Type: "); 949 950 switch (vpd->device_identifier_type) { 951 case 0x00: 952 sprintf(buf+len, "Vendor specific\n"); 953 break; 954 case 0x01: 955 sprintf(buf+len, "T10 Vendor ID based\n"); 956 break; 957 case 0x02: 958 sprintf(buf+len, "EUI-64 based\n"); 959 break; 960 case 0x03: 961 sprintf(buf+len, "NAA\n"); 962 break; 963 case 0x04: 964 sprintf(buf+len, "Relative target port identifier\n"); 965 break; 966 case 0x08: 967 sprintf(buf+len, "SCSI name string\n"); 968 break; 969 default: 970 sprintf(buf+len, "Unsupported: 0x%02x\n", 971 vpd->device_identifier_type); 972 ret = -EINVAL; 973 break; 974 } 975 976 if (p_buf) { 977 if (p_buf_len < strlen(buf)+1) 978 return -EINVAL; 979 strncpy(p_buf, buf, p_buf_len); 980 } else { 981 pr_debug("%s", buf); 982 } 983 984 return ret; 985 } 986 987 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 988 { 989 /* 990 * The VPD identifier type.. 991 * 992 * from spc3r23.pdf Section 7.6.3.1 Table 298 993 */ 994 vpd->device_identifier_type = (page_83[1] & 0x0f); 995 return transport_dump_vpd_ident_type(vpd, NULL, 0); 996 } 997 EXPORT_SYMBOL(transport_set_vpd_ident_type); 998 999 int transport_dump_vpd_ident( 1000 struct t10_vpd *vpd, 1001 unsigned char *p_buf, 1002 int p_buf_len) 1003 { 1004 unsigned char buf[VPD_TMP_BUF_SIZE]; 1005 int ret = 0; 1006 1007 memset(buf, 0, VPD_TMP_BUF_SIZE); 1008 1009 switch (vpd->device_identifier_code_set) { 1010 case 0x01: /* Binary */ 1011 snprintf(buf, sizeof(buf), 1012 "T10 VPD Binary Device Identifier: %s\n", 1013 &vpd->device_identifier[0]); 1014 break; 1015 case 0x02: /* ASCII */ 1016 snprintf(buf, sizeof(buf), 1017 "T10 VPD ASCII Device Identifier: %s\n", 1018 &vpd->device_identifier[0]); 1019 break; 1020 case 0x03: /* UTF-8 */ 1021 snprintf(buf, sizeof(buf), 1022 "T10 VPD UTF-8 Device Identifier: %s\n", 1023 &vpd->device_identifier[0]); 1024 break; 1025 default: 1026 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1027 " 0x%02x", vpd->device_identifier_code_set); 1028 ret = -EINVAL; 1029 break; 1030 } 1031 1032 if (p_buf) 1033 strncpy(p_buf, buf, p_buf_len); 1034 else 1035 pr_debug("%s", buf); 1036 1037 return ret; 1038 } 1039 1040 int 1041 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1042 { 1043 static const char hex_str[] = "0123456789abcdef"; 1044 int j = 0, i = 4; /* offset to start of the identifier */ 1045 1046 /* 1047 * The VPD Code Set (encoding) 1048 * 1049 * from spc3r23.pdf Section 7.6.3.1 Table 296 1050 */ 1051 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1052 switch (vpd->device_identifier_code_set) { 1053 case 0x01: /* Binary */ 1054 vpd->device_identifier[j++] = 1055 hex_str[vpd->device_identifier_type]; 1056 while (i < (4 + page_83[3])) { 1057 vpd->device_identifier[j++] = 1058 hex_str[(page_83[i] & 0xf0) >> 4]; 1059 vpd->device_identifier[j++] = 1060 hex_str[page_83[i] & 0x0f]; 1061 i++; 1062 } 1063 break; 1064 case 0x02: /* ASCII */ 1065 case 0x03: /* UTF-8 */ 1066 while (i < (4 + page_83[3])) 1067 vpd->device_identifier[j++] = page_83[i++]; 1068 break; 1069 default: 1070 break; 1071 } 1072 1073 return transport_dump_vpd_ident(vpd, NULL, 0); 1074 } 1075 EXPORT_SYMBOL(transport_set_vpd_ident); 1076 1077 sense_reason_t 1078 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1079 { 1080 struct se_device *dev = cmd->se_dev; 1081 1082 if (cmd->unknown_data_length) { 1083 cmd->data_length = size; 1084 } else if (size != cmd->data_length) { 1085 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1086 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1087 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1088 cmd->data_length, size, cmd->t_task_cdb[0]); 1089 1090 if (cmd->data_direction == DMA_TO_DEVICE) { 1091 pr_err("Rejecting underflow/overflow" 1092 " WRITE data\n"); 1093 return TCM_INVALID_CDB_FIELD; 1094 } 1095 /* 1096 * Reject READ_* or WRITE_* with overflow/underflow for 1097 * type SCF_SCSI_DATA_CDB. 1098 */ 1099 if (dev->dev_attrib.block_size != 512) { 1100 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1101 " CDB on non 512-byte sector setup subsystem" 1102 " plugin: %s\n", dev->transport->name); 1103 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1104 return TCM_INVALID_CDB_FIELD; 1105 } 1106 /* 1107 * For the overflow case keep the existing fabric provided 1108 * ->data_length. Otherwise for the underflow case, reset 1109 * ->data_length to the smaller SCSI expected data transfer 1110 * length. 1111 */ 1112 if (size > cmd->data_length) { 1113 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1114 cmd->residual_count = (size - cmd->data_length); 1115 } else { 1116 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1117 cmd->residual_count = (cmd->data_length - size); 1118 cmd->data_length = size; 1119 } 1120 } 1121 1122 return 0; 1123 1124 } 1125 1126 /* 1127 * Used by fabric modules containing a local struct se_cmd within their 1128 * fabric dependent per I/O descriptor. 1129 * 1130 * Preserves the value of @cmd->tag. 1131 */ 1132 void transport_init_se_cmd( 1133 struct se_cmd *cmd, 1134 const struct target_core_fabric_ops *tfo, 1135 struct se_session *se_sess, 1136 u32 data_length, 1137 int data_direction, 1138 int task_attr, 1139 unsigned char *sense_buffer) 1140 { 1141 INIT_LIST_HEAD(&cmd->se_delayed_node); 1142 INIT_LIST_HEAD(&cmd->se_qf_node); 1143 INIT_LIST_HEAD(&cmd->se_cmd_list); 1144 INIT_LIST_HEAD(&cmd->state_list); 1145 init_completion(&cmd->t_transport_stop_comp); 1146 init_completion(&cmd->cmd_wait_comp); 1147 init_completion(&cmd->task_stop_comp); 1148 spin_lock_init(&cmd->t_state_lock); 1149 kref_init(&cmd->cmd_kref); 1150 cmd->transport_state = CMD_T_DEV_ACTIVE; 1151 1152 cmd->se_tfo = tfo; 1153 cmd->se_sess = se_sess; 1154 cmd->data_length = data_length; 1155 cmd->data_direction = data_direction; 1156 cmd->sam_task_attr = task_attr; 1157 cmd->sense_buffer = sense_buffer; 1158 1159 cmd->state_active = false; 1160 } 1161 EXPORT_SYMBOL(transport_init_se_cmd); 1162 1163 static sense_reason_t 1164 transport_check_alloc_task_attr(struct se_cmd *cmd) 1165 { 1166 struct se_device *dev = cmd->se_dev; 1167 1168 /* 1169 * Check if SAM Task Attribute emulation is enabled for this 1170 * struct se_device storage object 1171 */ 1172 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1173 return 0; 1174 1175 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1176 pr_debug("SAM Task Attribute ACA" 1177 " emulation is not supported\n"); 1178 return TCM_INVALID_CDB_FIELD; 1179 } 1180 /* 1181 * Used to determine when ORDERED commands should go from 1182 * Dormant to Active status. 1183 */ 1184 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1185 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1186 cmd->se_ordered_id, cmd->sam_task_attr, 1187 dev->transport->name); 1188 return 0; 1189 } 1190 1191 sense_reason_t 1192 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1193 { 1194 struct se_device *dev = cmd->se_dev; 1195 sense_reason_t ret; 1196 1197 /* 1198 * Ensure that the received CDB is less than the max (252 + 8) bytes 1199 * for VARIABLE_LENGTH_CMD 1200 */ 1201 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1202 pr_err("Received SCSI CDB with command_size: %d that" 1203 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1204 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1205 return TCM_INVALID_CDB_FIELD; 1206 } 1207 /* 1208 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1209 * allocate the additional extended CDB buffer now.. Otherwise 1210 * setup the pointer from __t_task_cdb to t_task_cdb. 1211 */ 1212 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1213 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1214 GFP_KERNEL); 1215 if (!cmd->t_task_cdb) { 1216 pr_err("Unable to allocate cmd->t_task_cdb" 1217 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1218 scsi_command_size(cdb), 1219 (unsigned long)sizeof(cmd->__t_task_cdb)); 1220 return TCM_OUT_OF_RESOURCES; 1221 } 1222 } else 1223 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1224 /* 1225 * Copy the original CDB into cmd-> 1226 */ 1227 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1228 1229 trace_target_sequencer_start(cmd); 1230 1231 /* 1232 * Check for an existing UNIT ATTENTION condition 1233 */ 1234 ret = target_scsi3_ua_check(cmd); 1235 if (ret) 1236 return ret; 1237 1238 ret = target_alua_state_check(cmd); 1239 if (ret) 1240 return ret; 1241 1242 ret = target_check_reservation(cmd); 1243 if (ret) { 1244 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1245 return ret; 1246 } 1247 1248 ret = dev->transport->parse_cdb(cmd); 1249 if (ret) 1250 return ret; 1251 1252 ret = transport_check_alloc_task_attr(cmd); 1253 if (ret) 1254 return ret; 1255 1256 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1257 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1258 return 0; 1259 } 1260 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1261 1262 /* 1263 * Used by fabric module frontends to queue tasks directly. 1264 * Many only be used from process context only 1265 */ 1266 int transport_handle_cdb_direct( 1267 struct se_cmd *cmd) 1268 { 1269 sense_reason_t ret; 1270 1271 if (!cmd->se_lun) { 1272 dump_stack(); 1273 pr_err("cmd->se_lun is NULL\n"); 1274 return -EINVAL; 1275 } 1276 if (in_interrupt()) { 1277 dump_stack(); 1278 pr_err("transport_generic_handle_cdb cannot be called" 1279 " from interrupt context\n"); 1280 return -EINVAL; 1281 } 1282 /* 1283 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1284 * outstanding descriptors are handled correctly during shutdown via 1285 * transport_wait_for_tasks() 1286 * 1287 * Also, we don't take cmd->t_state_lock here as we only expect 1288 * this to be called for initial descriptor submission. 1289 */ 1290 cmd->t_state = TRANSPORT_NEW_CMD; 1291 cmd->transport_state |= CMD_T_ACTIVE; 1292 1293 /* 1294 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1295 * so follow TRANSPORT_NEW_CMD processing thread context usage 1296 * and call transport_generic_request_failure() if necessary.. 1297 */ 1298 ret = transport_generic_new_cmd(cmd); 1299 if (ret) 1300 transport_generic_request_failure(cmd, ret); 1301 return 0; 1302 } 1303 EXPORT_SYMBOL(transport_handle_cdb_direct); 1304 1305 sense_reason_t 1306 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1307 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1308 { 1309 if (!sgl || !sgl_count) 1310 return 0; 1311 1312 /* 1313 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1314 * scatterlists already have been set to follow what the fabric 1315 * passes for the original expected data transfer length. 1316 */ 1317 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1318 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1319 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1320 return TCM_INVALID_CDB_FIELD; 1321 } 1322 1323 cmd->t_data_sg = sgl; 1324 cmd->t_data_nents = sgl_count; 1325 cmd->t_bidi_data_sg = sgl_bidi; 1326 cmd->t_bidi_data_nents = sgl_bidi_count; 1327 1328 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1329 return 0; 1330 } 1331 1332 /* 1333 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1334 * se_cmd + use pre-allocated SGL memory. 1335 * 1336 * @se_cmd: command descriptor to submit 1337 * @se_sess: associated se_sess for endpoint 1338 * @cdb: pointer to SCSI CDB 1339 * @sense: pointer to SCSI sense buffer 1340 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1341 * @data_length: fabric expected data transfer length 1342 * @task_addr: SAM task attribute 1343 * @data_dir: DMA data direction 1344 * @flags: flags for command submission from target_sc_flags_tables 1345 * @sgl: struct scatterlist memory for unidirectional mapping 1346 * @sgl_count: scatterlist count for unidirectional mapping 1347 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1348 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1349 * @sgl_prot: struct scatterlist memory protection information 1350 * @sgl_prot_count: scatterlist count for protection information 1351 * 1352 * Task tags are supported if the caller has set @se_cmd->tag. 1353 * 1354 * Returns non zero to signal active I/O shutdown failure. All other 1355 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1356 * but still return zero here. 1357 * 1358 * This may only be called from process context, and also currently 1359 * assumes internal allocation of fabric payload buffer by target-core. 1360 */ 1361 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1362 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1363 u32 data_length, int task_attr, int data_dir, int flags, 1364 struct scatterlist *sgl, u32 sgl_count, 1365 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1366 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1367 { 1368 struct se_portal_group *se_tpg; 1369 sense_reason_t rc; 1370 int ret; 1371 1372 se_tpg = se_sess->se_tpg; 1373 BUG_ON(!se_tpg); 1374 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1375 BUG_ON(in_interrupt()); 1376 /* 1377 * Initialize se_cmd for target operation. From this point 1378 * exceptions are handled by sending exception status via 1379 * target_core_fabric_ops->queue_status() callback 1380 */ 1381 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1382 data_length, data_dir, task_attr, sense); 1383 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1384 se_cmd->unknown_data_length = 1; 1385 /* 1386 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1387 * se_sess->sess_cmd_list. A second kref_get here is necessary 1388 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1389 * kref_put() to happen during fabric packet acknowledgement. 1390 */ 1391 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1392 if (ret) 1393 return ret; 1394 /* 1395 * Signal bidirectional data payloads to target-core 1396 */ 1397 if (flags & TARGET_SCF_BIDI_OP) 1398 se_cmd->se_cmd_flags |= SCF_BIDI; 1399 /* 1400 * Locate se_lun pointer and attach it to struct se_cmd 1401 */ 1402 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1403 if (rc) { 1404 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1405 target_put_sess_cmd(se_cmd); 1406 return 0; 1407 } 1408 1409 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1410 if (rc != 0) { 1411 transport_generic_request_failure(se_cmd, rc); 1412 return 0; 1413 } 1414 1415 /* 1416 * Save pointers for SGLs containing protection information, 1417 * if present. 1418 */ 1419 if (sgl_prot_count) { 1420 se_cmd->t_prot_sg = sgl_prot; 1421 se_cmd->t_prot_nents = sgl_prot_count; 1422 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1423 } 1424 1425 /* 1426 * When a non zero sgl_count has been passed perform SGL passthrough 1427 * mapping for pre-allocated fabric memory instead of having target 1428 * core perform an internal SGL allocation.. 1429 */ 1430 if (sgl_count != 0) { 1431 BUG_ON(!sgl); 1432 1433 /* 1434 * A work-around for tcm_loop as some userspace code via 1435 * scsi-generic do not memset their associated read buffers, 1436 * so go ahead and do that here for type non-data CDBs. Also 1437 * note that this is currently guaranteed to be a single SGL 1438 * for this case by target core in target_setup_cmd_from_cdb() 1439 * -> transport_generic_cmd_sequencer(). 1440 */ 1441 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1442 se_cmd->data_direction == DMA_FROM_DEVICE) { 1443 unsigned char *buf = NULL; 1444 1445 if (sgl) 1446 buf = kmap(sg_page(sgl)) + sgl->offset; 1447 1448 if (buf) { 1449 memset(buf, 0, sgl->length); 1450 kunmap(sg_page(sgl)); 1451 } 1452 } 1453 1454 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1455 sgl_bidi, sgl_bidi_count); 1456 if (rc != 0) { 1457 transport_generic_request_failure(se_cmd, rc); 1458 return 0; 1459 } 1460 } 1461 1462 /* 1463 * Check if we need to delay processing because of ALUA 1464 * Active/NonOptimized primary access state.. 1465 */ 1466 core_alua_check_nonop_delay(se_cmd); 1467 1468 transport_handle_cdb_direct(se_cmd); 1469 return 0; 1470 } 1471 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1472 1473 /* 1474 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1475 * 1476 * @se_cmd: command descriptor to submit 1477 * @se_sess: associated se_sess for endpoint 1478 * @cdb: pointer to SCSI CDB 1479 * @sense: pointer to SCSI sense buffer 1480 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1481 * @data_length: fabric expected data transfer length 1482 * @task_addr: SAM task attribute 1483 * @data_dir: DMA data direction 1484 * @flags: flags for command submission from target_sc_flags_tables 1485 * 1486 * Task tags are supported if the caller has set @se_cmd->tag. 1487 * 1488 * Returns non zero to signal active I/O shutdown failure. All other 1489 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1490 * but still return zero here. 1491 * 1492 * This may only be called from process context, and also currently 1493 * assumes internal allocation of fabric payload buffer by target-core. 1494 * 1495 * It also assumes interal target core SGL memory allocation. 1496 */ 1497 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1498 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1499 u32 data_length, int task_attr, int data_dir, int flags) 1500 { 1501 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1502 unpacked_lun, data_length, task_attr, data_dir, 1503 flags, NULL, 0, NULL, 0, NULL, 0); 1504 } 1505 EXPORT_SYMBOL(target_submit_cmd); 1506 1507 static void target_complete_tmr_failure(struct work_struct *work) 1508 { 1509 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1510 1511 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1512 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1513 1514 transport_cmd_check_stop_to_fabric(se_cmd); 1515 } 1516 1517 /** 1518 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1519 * for TMR CDBs 1520 * 1521 * @se_cmd: command descriptor to submit 1522 * @se_sess: associated se_sess for endpoint 1523 * @sense: pointer to SCSI sense buffer 1524 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1525 * @fabric_context: fabric context for TMR req 1526 * @tm_type: Type of TM request 1527 * @gfp: gfp type for caller 1528 * @tag: referenced task tag for TMR_ABORT_TASK 1529 * @flags: submit cmd flags 1530 * 1531 * Callable from all contexts. 1532 **/ 1533 1534 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1535 unsigned char *sense, u64 unpacked_lun, 1536 void *fabric_tmr_ptr, unsigned char tm_type, 1537 gfp_t gfp, unsigned int tag, int flags) 1538 { 1539 struct se_portal_group *se_tpg; 1540 int ret; 1541 1542 se_tpg = se_sess->se_tpg; 1543 BUG_ON(!se_tpg); 1544 1545 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1546 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1547 /* 1548 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1549 * allocation failure. 1550 */ 1551 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1552 if (ret < 0) 1553 return -ENOMEM; 1554 1555 if (tm_type == TMR_ABORT_TASK) 1556 se_cmd->se_tmr_req->ref_task_tag = tag; 1557 1558 /* See target_submit_cmd for commentary */ 1559 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1560 if (ret) { 1561 core_tmr_release_req(se_cmd->se_tmr_req); 1562 return ret; 1563 } 1564 1565 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1566 if (ret) { 1567 /* 1568 * For callback during failure handling, push this work off 1569 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1570 */ 1571 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1572 schedule_work(&se_cmd->work); 1573 return 0; 1574 } 1575 transport_generic_handle_tmr(se_cmd); 1576 return 0; 1577 } 1578 EXPORT_SYMBOL(target_submit_tmr); 1579 1580 /* 1581 * If the cmd is active, request it to be stopped and sleep until it 1582 * has completed. 1583 */ 1584 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1585 __releases(&cmd->t_state_lock) 1586 __acquires(&cmd->t_state_lock) 1587 { 1588 bool was_active = false; 1589 1590 if (cmd->transport_state & CMD_T_BUSY) { 1591 cmd->transport_state |= CMD_T_REQUEST_STOP; 1592 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1593 1594 pr_debug("cmd %p waiting to complete\n", cmd); 1595 wait_for_completion(&cmd->task_stop_comp); 1596 pr_debug("cmd %p stopped successfully\n", cmd); 1597 1598 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1599 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1600 cmd->transport_state &= ~CMD_T_BUSY; 1601 was_active = true; 1602 } 1603 1604 return was_active; 1605 } 1606 1607 /* 1608 * Handle SAM-esque emulation for generic transport request failures. 1609 */ 1610 void transport_generic_request_failure(struct se_cmd *cmd, 1611 sense_reason_t sense_reason) 1612 { 1613 int ret = 0; 1614 1615 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1616 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1617 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1618 cmd->se_tfo->get_cmd_state(cmd), 1619 cmd->t_state, sense_reason); 1620 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1621 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1622 (cmd->transport_state & CMD_T_STOP) != 0, 1623 (cmd->transport_state & CMD_T_SENT) != 0); 1624 1625 /* 1626 * For SAM Task Attribute emulation for failed struct se_cmd 1627 */ 1628 transport_complete_task_attr(cmd); 1629 /* 1630 * Handle special case for COMPARE_AND_WRITE failure, where the 1631 * callback is expected to drop the per device ->caw_sem. 1632 */ 1633 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1634 cmd->transport_complete_callback) 1635 cmd->transport_complete_callback(cmd, false); 1636 1637 switch (sense_reason) { 1638 case TCM_NON_EXISTENT_LUN: 1639 case TCM_UNSUPPORTED_SCSI_OPCODE: 1640 case TCM_INVALID_CDB_FIELD: 1641 case TCM_INVALID_PARAMETER_LIST: 1642 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1643 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1644 case TCM_UNKNOWN_MODE_PAGE: 1645 case TCM_WRITE_PROTECTED: 1646 case TCM_ADDRESS_OUT_OF_RANGE: 1647 case TCM_CHECK_CONDITION_ABORT_CMD: 1648 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1649 case TCM_CHECK_CONDITION_NOT_READY: 1650 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1651 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1652 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1653 break; 1654 case TCM_OUT_OF_RESOURCES: 1655 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1656 break; 1657 case TCM_RESERVATION_CONFLICT: 1658 /* 1659 * No SENSE Data payload for this case, set SCSI Status 1660 * and queue the response to $FABRIC_MOD. 1661 * 1662 * Uses linux/include/scsi/scsi.h SAM status codes defs 1663 */ 1664 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1665 /* 1666 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1667 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1668 * CONFLICT STATUS. 1669 * 1670 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1671 */ 1672 if (cmd->se_sess && 1673 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1674 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1675 cmd->orig_fe_lun, 0x2C, 1676 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1677 } 1678 trace_target_cmd_complete(cmd); 1679 ret = cmd->se_tfo->queue_status(cmd); 1680 if (ret == -EAGAIN || ret == -ENOMEM) 1681 goto queue_full; 1682 goto check_stop; 1683 default: 1684 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1685 cmd->t_task_cdb[0], sense_reason); 1686 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1687 break; 1688 } 1689 1690 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1691 if (ret == -EAGAIN || ret == -ENOMEM) 1692 goto queue_full; 1693 1694 check_stop: 1695 transport_lun_remove_cmd(cmd); 1696 if (!transport_cmd_check_stop_to_fabric(cmd)) 1697 ; 1698 return; 1699 1700 queue_full: 1701 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1702 transport_handle_queue_full(cmd, cmd->se_dev); 1703 } 1704 EXPORT_SYMBOL(transport_generic_request_failure); 1705 1706 void __target_execute_cmd(struct se_cmd *cmd) 1707 { 1708 sense_reason_t ret; 1709 1710 if (cmd->execute_cmd) { 1711 ret = cmd->execute_cmd(cmd); 1712 if (ret) { 1713 spin_lock_irq(&cmd->t_state_lock); 1714 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1715 spin_unlock_irq(&cmd->t_state_lock); 1716 1717 transport_generic_request_failure(cmd, ret); 1718 } 1719 } 1720 } 1721 1722 static int target_write_prot_action(struct se_cmd *cmd) 1723 { 1724 u32 sectors; 1725 /* 1726 * Perform WRITE_INSERT of PI using software emulation when backend 1727 * device has PI enabled, if the transport has not already generated 1728 * PI using hardware WRITE_INSERT offload. 1729 */ 1730 switch (cmd->prot_op) { 1731 case TARGET_PROT_DOUT_INSERT: 1732 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1733 sbc_dif_generate(cmd); 1734 break; 1735 case TARGET_PROT_DOUT_STRIP: 1736 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1737 break; 1738 1739 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1740 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1741 sectors, 0, cmd->t_prot_sg, 0); 1742 if (unlikely(cmd->pi_err)) { 1743 spin_lock_irq(&cmd->t_state_lock); 1744 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1745 spin_unlock_irq(&cmd->t_state_lock); 1746 transport_generic_request_failure(cmd, cmd->pi_err); 1747 return -1; 1748 } 1749 break; 1750 default: 1751 break; 1752 } 1753 1754 return 0; 1755 } 1756 1757 static bool target_handle_task_attr(struct se_cmd *cmd) 1758 { 1759 struct se_device *dev = cmd->se_dev; 1760 1761 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1762 return false; 1763 1764 /* 1765 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1766 * to allow the passed struct se_cmd list of tasks to the front of the list. 1767 */ 1768 switch (cmd->sam_task_attr) { 1769 case TCM_HEAD_TAG: 1770 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1771 "se_ordered_id: %u\n", 1772 cmd->t_task_cdb[0], cmd->se_ordered_id); 1773 return false; 1774 case TCM_ORDERED_TAG: 1775 atomic_inc_mb(&dev->dev_ordered_sync); 1776 1777 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1778 " se_ordered_id: %u\n", 1779 cmd->t_task_cdb[0], cmd->se_ordered_id); 1780 1781 /* 1782 * Execute an ORDERED command if no other older commands 1783 * exist that need to be completed first. 1784 */ 1785 if (!atomic_read(&dev->simple_cmds)) 1786 return false; 1787 break; 1788 default: 1789 /* 1790 * For SIMPLE and UNTAGGED Task Attribute commands 1791 */ 1792 atomic_inc_mb(&dev->simple_cmds); 1793 break; 1794 } 1795 1796 if (atomic_read(&dev->dev_ordered_sync) == 0) 1797 return false; 1798 1799 spin_lock(&dev->delayed_cmd_lock); 1800 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1801 spin_unlock(&dev->delayed_cmd_lock); 1802 1803 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1804 " delayed CMD list, se_ordered_id: %u\n", 1805 cmd->t_task_cdb[0], cmd->sam_task_attr, 1806 cmd->se_ordered_id); 1807 return true; 1808 } 1809 1810 void target_execute_cmd(struct se_cmd *cmd) 1811 { 1812 /* 1813 * If the received CDB has aleady been aborted stop processing it here. 1814 */ 1815 if (transport_check_aborted_status(cmd, 1)) 1816 return; 1817 1818 /* 1819 * Determine if frontend context caller is requesting the stopping of 1820 * this command for frontend exceptions. 1821 */ 1822 spin_lock_irq(&cmd->t_state_lock); 1823 if (cmd->transport_state & CMD_T_STOP) { 1824 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1825 __func__, __LINE__, cmd->tag); 1826 1827 spin_unlock_irq(&cmd->t_state_lock); 1828 complete_all(&cmd->t_transport_stop_comp); 1829 return; 1830 } 1831 1832 cmd->t_state = TRANSPORT_PROCESSING; 1833 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1834 spin_unlock_irq(&cmd->t_state_lock); 1835 1836 if (target_write_prot_action(cmd)) 1837 return; 1838 1839 if (target_handle_task_attr(cmd)) { 1840 spin_lock_irq(&cmd->t_state_lock); 1841 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1842 spin_unlock_irq(&cmd->t_state_lock); 1843 return; 1844 } 1845 1846 __target_execute_cmd(cmd); 1847 } 1848 EXPORT_SYMBOL(target_execute_cmd); 1849 1850 /* 1851 * Process all commands up to the last received ORDERED task attribute which 1852 * requires another blocking boundary 1853 */ 1854 static void target_restart_delayed_cmds(struct se_device *dev) 1855 { 1856 for (;;) { 1857 struct se_cmd *cmd; 1858 1859 spin_lock(&dev->delayed_cmd_lock); 1860 if (list_empty(&dev->delayed_cmd_list)) { 1861 spin_unlock(&dev->delayed_cmd_lock); 1862 break; 1863 } 1864 1865 cmd = list_entry(dev->delayed_cmd_list.next, 1866 struct se_cmd, se_delayed_node); 1867 list_del(&cmd->se_delayed_node); 1868 spin_unlock(&dev->delayed_cmd_lock); 1869 1870 __target_execute_cmd(cmd); 1871 1872 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1873 break; 1874 } 1875 } 1876 1877 /* 1878 * Called from I/O completion to determine which dormant/delayed 1879 * and ordered cmds need to have their tasks added to the execution queue. 1880 */ 1881 static void transport_complete_task_attr(struct se_cmd *cmd) 1882 { 1883 struct se_device *dev = cmd->se_dev; 1884 1885 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1886 return; 1887 1888 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1889 atomic_dec_mb(&dev->simple_cmds); 1890 dev->dev_cur_ordered_id++; 1891 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1892 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1893 cmd->se_ordered_id); 1894 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1895 dev->dev_cur_ordered_id++; 1896 pr_debug("Incremented dev_cur_ordered_id: %u for" 1897 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1898 cmd->se_ordered_id); 1899 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1900 atomic_dec_mb(&dev->dev_ordered_sync); 1901 1902 dev->dev_cur_ordered_id++; 1903 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1904 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 1905 } 1906 1907 target_restart_delayed_cmds(dev); 1908 } 1909 1910 static void transport_complete_qf(struct se_cmd *cmd) 1911 { 1912 int ret = 0; 1913 1914 transport_complete_task_attr(cmd); 1915 1916 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1917 trace_target_cmd_complete(cmd); 1918 ret = cmd->se_tfo->queue_status(cmd); 1919 goto out; 1920 } 1921 1922 switch (cmd->data_direction) { 1923 case DMA_FROM_DEVICE: 1924 trace_target_cmd_complete(cmd); 1925 ret = cmd->se_tfo->queue_data_in(cmd); 1926 break; 1927 case DMA_TO_DEVICE: 1928 if (cmd->se_cmd_flags & SCF_BIDI) { 1929 ret = cmd->se_tfo->queue_data_in(cmd); 1930 break; 1931 } 1932 /* Fall through for DMA_TO_DEVICE */ 1933 case DMA_NONE: 1934 trace_target_cmd_complete(cmd); 1935 ret = cmd->se_tfo->queue_status(cmd); 1936 break; 1937 default: 1938 break; 1939 } 1940 1941 out: 1942 if (ret < 0) { 1943 transport_handle_queue_full(cmd, cmd->se_dev); 1944 return; 1945 } 1946 transport_lun_remove_cmd(cmd); 1947 transport_cmd_check_stop_to_fabric(cmd); 1948 } 1949 1950 static void transport_handle_queue_full( 1951 struct se_cmd *cmd, 1952 struct se_device *dev) 1953 { 1954 spin_lock_irq(&dev->qf_cmd_lock); 1955 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1956 atomic_inc_mb(&dev->dev_qf_count); 1957 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1958 1959 schedule_work(&cmd->se_dev->qf_work_queue); 1960 } 1961 1962 static bool target_read_prot_action(struct se_cmd *cmd) 1963 { 1964 switch (cmd->prot_op) { 1965 case TARGET_PROT_DIN_STRIP: 1966 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 1967 u32 sectors = cmd->data_length >> 1968 ilog2(cmd->se_dev->dev_attrib.block_size); 1969 1970 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1971 sectors, 0, cmd->t_prot_sg, 1972 0); 1973 if (cmd->pi_err) 1974 return true; 1975 } 1976 break; 1977 case TARGET_PROT_DIN_INSERT: 1978 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 1979 break; 1980 1981 sbc_dif_generate(cmd); 1982 break; 1983 default: 1984 break; 1985 } 1986 1987 return false; 1988 } 1989 1990 static void target_complete_ok_work(struct work_struct *work) 1991 { 1992 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1993 int ret; 1994 1995 /* 1996 * Check if we need to move delayed/dormant tasks from cmds on the 1997 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 1998 * Attribute. 1999 */ 2000 transport_complete_task_attr(cmd); 2001 2002 /* 2003 * Check to schedule QUEUE_FULL work, or execute an existing 2004 * cmd->transport_qf_callback() 2005 */ 2006 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2007 schedule_work(&cmd->se_dev->qf_work_queue); 2008 2009 /* 2010 * Check if we need to send a sense buffer from 2011 * the struct se_cmd in question. 2012 */ 2013 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2014 WARN_ON(!cmd->scsi_status); 2015 ret = transport_send_check_condition_and_sense( 2016 cmd, 0, 1); 2017 if (ret == -EAGAIN || ret == -ENOMEM) 2018 goto queue_full; 2019 2020 transport_lun_remove_cmd(cmd); 2021 transport_cmd_check_stop_to_fabric(cmd); 2022 return; 2023 } 2024 /* 2025 * Check for a callback, used by amongst other things 2026 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2027 */ 2028 if (cmd->transport_complete_callback) { 2029 sense_reason_t rc; 2030 2031 rc = cmd->transport_complete_callback(cmd, true); 2032 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 2033 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2034 !cmd->data_length) 2035 goto queue_rsp; 2036 2037 return; 2038 } else if (rc) { 2039 ret = transport_send_check_condition_and_sense(cmd, 2040 rc, 0); 2041 if (ret == -EAGAIN || ret == -ENOMEM) 2042 goto queue_full; 2043 2044 transport_lun_remove_cmd(cmd); 2045 transport_cmd_check_stop_to_fabric(cmd); 2046 return; 2047 } 2048 } 2049 2050 queue_rsp: 2051 switch (cmd->data_direction) { 2052 case DMA_FROM_DEVICE: 2053 atomic_long_add(cmd->data_length, 2054 &cmd->se_lun->lun_stats.tx_data_octets); 2055 /* 2056 * Perform READ_STRIP of PI using software emulation when 2057 * backend had PI enabled, if the transport will not be 2058 * performing hardware READ_STRIP offload. 2059 */ 2060 if (target_read_prot_action(cmd)) { 2061 ret = transport_send_check_condition_and_sense(cmd, 2062 cmd->pi_err, 0); 2063 if (ret == -EAGAIN || ret == -ENOMEM) 2064 goto queue_full; 2065 2066 transport_lun_remove_cmd(cmd); 2067 transport_cmd_check_stop_to_fabric(cmd); 2068 return; 2069 } 2070 2071 trace_target_cmd_complete(cmd); 2072 ret = cmd->se_tfo->queue_data_in(cmd); 2073 if (ret == -EAGAIN || ret == -ENOMEM) 2074 goto queue_full; 2075 break; 2076 case DMA_TO_DEVICE: 2077 atomic_long_add(cmd->data_length, 2078 &cmd->se_lun->lun_stats.rx_data_octets); 2079 /* 2080 * Check if we need to send READ payload for BIDI-COMMAND 2081 */ 2082 if (cmd->se_cmd_flags & SCF_BIDI) { 2083 atomic_long_add(cmd->data_length, 2084 &cmd->se_lun->lun_stats.tx_data_octets); 2085 ret = cmd->se_tfo->queue_data_in(cmd); 2086 if (ret == -EAGAIN || ret == -ENOMEM) 2087 goto queue_full; 2088 break; 2089 } 2090 /* Fall through for DMA_TO_DEVICE */ 2091 case DMA_NONE: 2092 trace_target_cmd_complete(cmd); 2093 ret = cmd->se_tfo->queue_status(cmd); 2094 if (ret == -EAGAIN || ret == -ENOMEM) 2095 goto queue_full; 2096 break; 2097 default: 2098 break; 2099 } 2100 2101 transport_lun_remove_cmd(cmd); 2102 transport_cmd_check_stop_to_fabric(cmd); 2103 return; 2104 2105 queue_full: 2106 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2107 " data_direction: %d\n", cmd, cmd->data_direction); 2108 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2109 transport_handle_queue_full(cmd, cmd->se_dev); 2110 } 2111 2112 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 2113 { 2114 struct scatterlist *sg; 2115 int count; 2116 2117 for_each_sg(sgl, sg, nents, count) 2118 __free_page(sg_page(sg)); 2119 2120 kfree(sgl); 2121 } 2122 2123 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2124 { 2125 /* 2126 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2127 * emulation, and free + reset pointers if necessary.. 2128 */ 2129 if (!cmd->t_data_sg_orig) 2130 return; 2131 2132 kfree(cmd->t_data_sg); 2133 cmd->t_data_sg = cmd->t_data_sg_orig; 2134 cmd->t_data_sg_orig = NULL; 2135 cmd->t_data_nents = cmd->t_data_nents_orig; 2136 cmd->t_data_nents_orig = 0; 2137 } 2138 2139 static inline void transport_free_pages(struct se_cmd *cmd) 2140 { 2141 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2142 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2143 cmd->t_prot_sg = NULL; 2144 cmd->t_prot_nents = 0; 2145 } 2146 2147 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2148 /* 2149 * Release special case READ buffer payload required for 2150 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2151 */ 2152 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2153 transport_free_sgl(cmd->t_bidi_data_sg, 2154 cmd->t_bidi_data_nents); 2155 cmd->t_bidi_data_sg = NULL; 2156 cmd->t_bidi_data_nents = 0; 2157 } 2158 transport_reset_sgl_orig(cmd); 2159 return; 2160 } 2161 transport_reset_sgl_orig(cmd); 2162 2163 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2164 cmd->t_data_sg = NULL; 2165 cmd->t_data_nents = 0; 2166 2167 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2168 cmd->t_bidi_data_sg = NULL; 2169 cmd->t_bidi_data_nents = 0; 2170 } 2171 2172 /** 2173 * transport_release_cmd - free a command 2174 * @cmd: command to free 2175 * 2176 * This routine unconditionally frees a command, and reference counting 2177 * or list removal must be done in the caller. 2178 */ 2179 static int transport_release_cmd(struct se_cmd *cmd) 2180 { 2181 BUG_ON(!cmd->se_tfo); 2182 2183 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2184 core_tmr_release_req(cmd->se_tmr_req); 2185 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2186 kfree(cmd->t_task_cdb); 2187 /* 2188 * If this cmd has been setup with target_get_sess_cmd(), drop 2189 * the kref and call ->release_cmd() in kref callback. 2190 */ 2191 return target_put_sess_cmd(cmd); 2192 } 2193 2194 /** 2195 * transport_put_cmd - release a reference to a command 2196 * @cmd: command to release 2197 * 2198 * This routine releases our reference to the command and frees it if possible. 2199 */ 2200 static int transport_put_cmd(struct se_cmd *cmd) 2201 { 2202 transport_free_pages(cmd); 2203 return transport_release_cmd(cmd); 2204 } 2205 2206 void *transport_kmap_data_sg(struct se_cmd *cmd) 2207 { 2208 struct scatterlist *sg = cmd->t_data_sg; 2209 struct page **pages; 2210 int i; 2211 2212 /* 2213 * We need to take into account a possible offset here for fabrics like 2214 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2215 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2216 */ 2217 if (!cmd->t_data_nents) 2218 return NULL; 2219 2220 BUG_ON(!sg); 2221 if (cmd->t_data_nents == 1) 2222 return kmap(sg_page(sg)) + sg->offset; 2223 2224 /* >1 page. use vmap */ 2225 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2226 if (!pages) 2227 return NULL; 2228 2229 /* convert sg[] to pages[] */ 2230 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2231 pages[i] = sg_page(sg); 2232 } 2233 2234 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2235 kfree(pages); 2236 if (!cmd->t_data_vmap) 2237 return NULL; 2238 2239 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2240 } 2241 EXPORT_SYMBOL(transport_kmap_data_sg); 2242 2243 void transport_kunmap_data_sg(struct se_cmd *cmd) 2244 { 2245 if (!cmd->t_data_nents) { 2246 return; 2247 } else if (cmd->t_data_nents == 1) { 2248 kunmap(sg_page(cmd->t_data_sg)); 2249 return; 2250 } 2251 2252 vunmap(cmd->t_data_vmap); 2253 cmd->t_data_vmap = NULL; 2254 } 2255 EXPORT_SYMBOL(transport_kunmap_data_sg); 2256 2257 int 2258 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2259 bool zero_page) 2260 { 2261 struct scatterlist *sg; 2262 struct page *page; 2263 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2264 unsigned int nent; 2265 int i = 0; 2266 2267 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2268 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2269 if (!sg) 2270 return -ENOMEM; 2271 2272 sg_init_table(sg, nent); 2273 2274 while (length) { 2275 u32 page_len = min_t(u32, length, PAGE_SIZE); 2276 page = alloc_page(GFP_KERNEL | zero_flag); 2277 if (!page) 2278 goto out; 2279 2280 sg_set_page(&sg[i], page, page_len, 0); 2281 length -= page_len; 2282 i++; 2283 } 2284 *sgl = sg; 2285 *nents = nent; 2286 return 0; 2287 2288 out: 2289 while (i > 0) { 2290 i--; 2291 __free_page(sg_page(&sg[i])); 2292 } 2293 kfree(sg); 2294 return -ENOMEM; 2295 } 2296 2297 /* 2298 * Allocate any required resources to execute the command. For writes we 2299 * might not have the payload yet, so notify the fabric via a call to 2300 * ->write_pending instead. Otherwise place it on the execution queue. 2301 */ 2302 sense_reason_t 2303 transport_generic_new_cmd(struct se_cmd *cmd) 2304 { 2305 int ret = 0; 2306 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2307 2308 if (cmd->prot_op != TARGET_PROT_NORMAL && 2309 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2310 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2311 cmd->prot_length, true); 2312 if (ret < 0) 2313 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2314 } 2315 2316 /* 2317 * Determine is the TCM fabric module has already allocated physical 2318 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2319 * beforehand. 2320 */ 2321 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2322 cmd->data_length) { 2323 2324 if ((cmd->se_cmd_flags & SCF_BIDI) || 2325 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2326 u32 bidi_length; 2327 2328 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2329 bidi_length = cmd->t_task_nolb * 2330 cmd->se_dev->dev_attrib.block_size; 2331 else 2332 bidi_length = cmd->data_length; 2333 2334 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2335 &cmd->t_bidi_data_nents, 2336 bidi_length, zero_flag); 2337 if (ret < 0) 2338 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2339 } 2340 2341 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2342 cmd->data_length, zero_flag); 2343 if (ret < 0) 2344 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2345 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2346 cmd->data_length) { 2347 /* 2348 * Special case for COMPARE_AND_WRITE with fabrics 2349 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2350 */ 2351 u32 caw_length = cmd->t_task_nolb * 2352 cmd->se_dev->dev_attrib.block_size; 2353 2354 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2355 &cmd->t_bidi_data_nents, 2356 caw_length, zero_flag); 2357 if (ret < 0) 2358 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2359 } 2360 /* 2361 * If this command is not a write we can execute it right here, 2362 * for write buffers we need to notify the fabric driver first 2363 * and let it call back once the write buffers are ready. 2364 */ 2365 target_add_to_state_list(cmd); 2366 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2367 target_execute_cmd(cmd); 2368 return 0; 2369 } 2370 transport_cmd_check_stop(cmd, false, true); 2371 2372 ret = cmd->se_tfo->write_pending(cmd); 2373 if (ret == -EAGAIN || ret == -ENOMEM) 2374 goto queue_full; 2375 2376 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2377 WARN_ON(ret); 2378 2379 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2380 2381 queue_full: 2382 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2383 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2384 transport_handle_queue_full(cmd, cmd->se_dev); 2385 return 0; 2386 } 2387 EXPORT_SYMBOL(transport_generic_new_cmd); 2388 2389 static void transport_write_pending_qf(struct se_cmd *cmd) 2390 { 2391 int ret; 2392 2393 ret = cmd->se_tfo->write_pending(cmd); 2394 if (ret == -EAGAIN || ret == -ENOMEM) { 2395 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2396 cmd); 2397 transport_handle_queue_full(cmd, cmd->se_dev); 2398 } 2399 } 2400 2401 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2402 { 2403 unsigned long flags; 2404 int ret = 0; 2405 2406 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2407 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2408 transport_wait_for_tasks(cmd); 2409 2410 ret = transport_release_cmd(cmd); 2411 } else { 2412 if (wait_for_tasks) 2413 transport_wait_for_tasks(cmd); 2414 /* 2415 * Handle WRITE failure case where transport_generic_new_cmd() 2416 * has already added se_cmd to state_list, but fabric has 2417 * failed command before I/O submission. 2418 */ 2419 if (cmd->state_active) { 2420 spin_lock_irqsave(&cmd->t_state_lock, flags); 2421 target_remove_from_state_list(cmd); 2422 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2423 } 2424 2425 if (cmd->se_lun) 2426 transport_lun_remove_cmd(cmd); 2427 2428 ret = transport_put_cmd(cmd); 2429 } 2430 return ret; 2431 } 2432 EXPORT_SYMBOL(transport_generic_free_cmd); 2433 2434 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2435 * @se_cmd: command descriptor to add 2436 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2437 */ 2438 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2439 { 2440 struct se_session *se_sess = se_cmd->se_sess; 2441 unsigned long flags; 2442 int ret = 0; 2443 2444 /* 2445 * Add a second kref if the fabric caller is expecting to handle 2446 * fabric acknowledgement that requires two target_put_sess_cmd() 2447 * invocations before se_cmd descriptor release. 2448 */ 2449 if (ack_kref) 2450 kref_get(&se_cmd->cmd_kref); 2451 2452 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2453 if (se_sess->sess_tearing_down) { 2454 ret = -ESHUTDOWN; 2455 goto out; 2456 } 2457 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2458 out: 2459 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2460 2461 if (ret && ack_kref) 2462 target_put_sess_cmd(se_cmd); 2463 2464 return ret; 2465 } 2466 EXPORT_SYMBOL(target_get_sess_cmd); 2467 2468 static void target_release_cmd_kref(struct kref *kref) 2469 __releases(&se_cmd->se_sess->sess_cmd_lock) 2470 { 2471 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2472 struct se_session *se_sess = se_cmd->se_sess; 2473 2474 if (list_empty(&se_cmd->se_cmd_list)) { 2475 spin_unlock(&se_sess->sess_cmd_lock); 2476 se_cmd->se_tfo->release_cmd(se_cmd); 2477 return; 2478 } 2479 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2480 spin_unlock(&se_sess->sess_cmd_lock); 2481 complete(&se_cmd->cmd_wait_comp); 2482 return; 2483 } 2484 list_del(&se_cmd->se_cmd_list); 2485 spin_unlock(&se_sess->sess_cmd_lock); 2486 2487 se_cmd->se_tfo->release_cmd(se_cmd); 2488 } 2489 2490 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2491 * @se_cmd: command descriptor to drop 2492 */ 2493 int target_put_sess_cmd(struct se_cmd *se_cmd) 2494 { 2495 struct se_session *se_sess = se_cmd->se_sess; 2496 2497 if (!se_sess) { 2498 se_cmd->se_tfo->release_cmd(se_cmd); 2499 return 1; 2500 } 2501 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2502 &se_sess->sess_cmd_lock); 2503 } 2504 EXPORT_SYMBOL(target_put_sess_cmd); 2505 2506 /* target_sess_cmd_list_set_waiting - Flag all commands in 2507 * sess_cmd_list to complete cmd_wait_comp. Set 2508 * sess_tearing_down so no more commands are queued. 2509 * @se_sess: session to flag 2510 */ 2511 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2512 { 2513 struct se_cmd *se_cmd; 2514 unsigned long flags; 2515 2516 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2517 if (se_sess->sess_tearing_down) { 2518 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2519 return; 2520 } 2521 se_sess->sess_tearing_down = 1; 2522 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2523 2524 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2525 se_cmd->cmd_wait_set = 1; 2526 2527 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2528 } 2529 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2530 2531 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2532 * @se_sess: session to wait for active I/O 2533 */ 2534 void target_wait_for_sess_cmds(struct se_session *se_sess) 2535 { 2536 struct se_cmd *se_cmd, *tmp_cmd; 2537 unsigned long flags; 2538 2539 list_for_each_entry_safe(se_cmd, tmp_cmd, 2540 &se_sess->sess_wait_list, se_cmd_list) { 2541 list_del(&se_cmd->se_cmd_list); 2542 2543 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2544 " %d\n", se_cmd, se_cmd->t_state, 2545 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2546 2547 wait_for_completion(&se_cmd->cmd_wait_comp); 2548 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2549 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2550 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2551 2552 se_cmd->se_tfo->release_cmd(se_cmd); 2553 } 2554 2555 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2556 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2557 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2558 2559 } 2560 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2561 2562 void transport_clear_lun_ref(struct se_lun *lun) 2563 { 2564 percpu_ref_kill(&lun->lun_ref); 2565 wait_for_completion(&lun->lun_ref_comp); 2566 } 2567 2568 /** 2569 * transport_wait_for_tasks - wait for completion to occur 2570 * @cmd: command to wait 2571 * 2572 * Called from frontend fabric context to wait for storage engine 2573 * to pause and/or release frontend generated struct se_cmd. 2574 */ 2575 bool transport_wait_for_tasks(struct se_cmd *cmd) 2576 { 2577 unsigned long flags; 2578 2579 spin_lock_irqsave(&cmd->t_state_lock, flags); 2580 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2581 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2582 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2583 return false; 2584 } 2585 2586 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2587 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2588 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2589 return false; 2590 } 2591 2592 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2593 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2594 return false; 2595 } 2596 2597 cmd->transport_state |= CMD_T_STOP; 2598 2599 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", 2600 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2601 2602 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2603 2604 wait_for_completion(&cmd->t_transport_stop_comp); 2605 2606 spin_lock_irqsave(&cmd->t_state_lock, flags); 2607 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2608 2609 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", 2610 cmd->tag); 2611 2612 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2613 2614 return true; 2615 } 2616 EXPORT_SYMBOL(transport_wait_for_tasks); 2617 2618 static int transport_get_sense_codes( 2619 struct se_cmd *cmd, 2620 u8 *asc, 2621 u8 *ascq) 2622 { 2623 *asc = cmd->scsi_asc; 2624 *ascq = cmd->scsi_ascq; 2625 2626 return 0; 2627 } 2628 2629 static 2630 void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) 2631 { 2632 /* Place failed LBA in sense data information descriptor 0. */ 2633 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; 2634 buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ 2635 buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; 2636 buffer[SPC_VALIDITY_OFFSET] = 0x80; 2637 2638 /* Descriptor Information: failing sector */ 2639 put_unaligned_be64(bad_sector, &buffer[12]); 2640 } 2641 2642 int 2643 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2644 sense_reason_t reason, int from_transport) 2645 { 2646 unsigned char *buffer = cmd->sense_buffer; 2647 unsigned long flags; 2648 u8 asc = 0, ascq = 0; 2649 2650 spin_lock_irqsave(&cmd->t_state_lock, flags); 2651 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2652 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2653 return 0; 2654 } 2655 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2656 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2657 2658 if (!reason && from_transport) 2659 goto after_reason; 2660 2661 if (!from_transport) 2662 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2663 2664 /* 2665 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2666 * SENSE KEY values from include/scsi/scsi.h 2667 */ 2668 switch (reason) { 2669 case TCM_NO_SENSE: 2670 /* CURRENT ERROR */ 2671 buffer[0] = 0x70; 2672 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2673 /* Not Ready */ 2674 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2675 /* NO ADDITIONAL SENSE INFORMATION */ 2676 buffer[SPC_ASC_KEY_OFFSET] = 0; 2677 buffer[SPC_ASCQ_KEY_OFFSET] = 0; 2678 break; 2679 case TCM_NON_EXISTENT_LUN: 2680 /* CURRENT ERROR */ 2681 buffer[0] = 0x70; 2682 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2683 /* ILLEGAL REQUEST */ 2684 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2685 /* LOGICAL UNIT NOT SUPPORTED */ 2686 buffer[SPC_ASC_KEY_OFFSET] = 0x25; 2687 break; 2688 case TCM_UNSUPPORTED_SCSI_OPCODE: 2689 case TCM_SECTOR_COUNT_TOO_MANY: 2690 /* CURRENT ERROR */ 2691 buffer[0] = 0x70; 2692 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2693 /* ILLEGAL REQUEST */ 2694 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2695 /* INVALID COMMAND OPERATION CODE */ 2696 buffer[SPC_ASC_KEY_OFFSET] = 0x20; 2697 break; 2698 case TCM_UNKNOWN_MODE_PAGE: 2699 /* CURRENT ERROR */ 2700 buffer[0] = 0x70; 2701 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2702 /* ILLEGAL REQUEST */ 2703 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2704 /* INVALID FIELD IN CDB */ 2705 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2706 break; 2707 case TCM_CHECK_CONDITION_ABORT_CMD: 2708 /* CURRENT ERROR */ 2709 buffer[0] = 0x70; 2710 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2711 /* ABORTED COMMAND */ 2712 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2713 /* BUS DEVICE RESET FUNCTION OCCURRED */ 2714 buffer[SPC_ASC_KEY_OFFSET] = 0x29; 2715 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2716 break; 2717 case TCM_INCORRECT_AMOUNT_OF_DATA: 2718 /* CURRENT ERROR */ 2719 buffer[0] = 0x70; 2720 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2721 /* ABORTED COMMAND */ 2722 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2723 /* WRITE ERROR */ 2724 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2725 /* NOT ENOUGH UNSOLICITED DATA */ 2726 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d; 2727 break; 2728 case TCM_INVALID_CDB_FIELD: 2729 /* CURRENT ERROR */ 2730 buffer[0] = 0x70; 2731 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2732 /* ILLEGAL REQUEST */ 2733 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2734 /* INVALID FIELD IN CDB */ 2735 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2736 break; 2737 case TCM_INVALID_PARAMETER_LIST: 2738 /* CURRENT ERROR */ 2739 buffer[0] = 0x70; 2740 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2741 /* ILLEGAL REQUEST */ 2742 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2743 /* INVALID FIELD IN PARAMETER LIST */ 2744 buffer[SPC_ASC_KEY_OFFSET] = 0x26; 2745 break; 2746 case TCM_PARAMETER_LIST_LENGTH_ERROR: 2747 /* CURRENT ERROR */ 2748 buffer[0] = 0x70; 2749 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2750 /* ILLEGAL REQUEST */ 2751 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2752 /* PARAMETER LIST LENGTH ERROR */ 2753 buffer[SPC_ASC_KEY_OFFSET] = 0x1a; 2754 break; 2755 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2756 /* CURRENT ERROR */ 2757 buffer[0] = 0x70; 2758 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2759 /* ABORTED COMMAND */ 2760 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2761 /* WRITE ERROR */ 2762 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2763 /* UNEXPECTED_UNSOLICITED_DATA */ 2764 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c; 2765 break; 2766 case TCM_SERVICE_CRC_ERROR: 2767 /* CURRENT ERROR */ 2768 buffer[0] = 0x70; 2769 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2770 /* ABORTED COMMAND */ 2771 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2772 /* PROTOCOL SERVICE CRC ERROR */ 2773 buffer[SPC_ASC_KEY_OFFSET] = 0x47; 2774 /* N/A */ 2775 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05; 2776 break; 2777 case TCM_SNACK_REJECTED: 2778 /* CURRENT ERROR */ 2779 buffer[0] = 0x70; 2780 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2781 /* ABORTED COMMAND */ 2782 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2783 /* READ ERROR */ 2784 buffer[SPC_ASC_KEY_OFFSET] = 0x11; 2785 /* FAILED RETRANSMISSION REQUEST */ 2786 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13; 2787 break; 2788 case TCM_WRITE_PROTECTED: 2789 /* CURRENT ERROR */ 2790 buffer[0] = 0x70; 2791 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2792 /* DATA PROTECT */ 2793 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 2794 /* WRITE PROTECTED */ 2795 buffer[SPC_ASC_KEY_OFFSET] = 0x27; 2796 break; 2797 case TCM_ADDRESS_OUT_OF_RANGE: 2798 /* CURRENT ERROR */ 2799 buffer[0] = 0x70; 2800 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2801 /* ILLEGAL REQUEST */ 2802 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2803 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2804 buffer[SPC_ASC_KEY_OFFSET] = 0x21; 2805 break; 2806 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2807 /* CURRENT ERROR */ 2808 buffer[0] = 0x70; 2809 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2810 /* UNIT ATTENTION */ 2811 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 2812 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2813 buffer[SPC_ASC_KEY_OFFSET] = asc; 2814 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2815 break; 2816 case TCM_CHECK_CONDITION_NOT_READY: 2817 /* CURRENT ERROR */ 2818 buffer[0] = 0x70; 2819 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2820 /* Not Ready */ 2821 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2822 transport_get_sense_codes(cmd, &asc, &ascq); 2823 buffer[SPC_ASC_KEY_OFFSET] = asc; 2824 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2825 break; 2826 case TCM_MISCOMPARE_VERIFY: 2827 /* CURRENT ERROR */ 2828 buffer[0] = 0x70; 2829 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2830 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE; 2831 /* MISCOMPARE DURING VERIFY OPERATION */ 2832 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2833 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2834 break; 2835 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 2836 /* CURRENT ERROR */ 2837 buffer[0] = 0x70; 2838 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2839 /* ILLEGAL REQUEST */ 2840 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2841 /* LOGICAL BLOCK GUARD CHECK FAILED */ 2842 buffer[SPC_ASC_KEY_OFFSET] = 0x10; 2843 buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; 2844 transport_err_sector_info(buffer, cmd->bad_sector); 2845 break; 2846 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 2847 /* CURRENT ERROR */ 2848 buffer[0] = 0x70; 2849 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2850 /* ILLEGAL REQUEST */ 2851 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2852 /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2853 buffer[SPC_ASC_KEY_OFFSET] = 0x10; 2854 buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; 2855 transport_err_sector_info(buffer, cmd->bad_sector); 2856 break; 2857 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 2858 /* CURRENT ERROR */ 2859 buffer[0] = 0x70; 2860 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2861 /* ILLEGAL REQUEST */ 2862 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2863 /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2864 buffer[SPC_ASC_KEY_OFFSET] = 0x10; 2865 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2866 transport_err_sector_info(buffer, cmd->bad_sector); 2867 break; 2868 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2869 default: 2870 /* CURRENT ERROR */ 2871 buffer[0] = 0x70; 2872 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2873 /* 2874 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2875 * Solaris initiators. Returning NOT READY instead means the 2876 * operations will be retried a finite number of times and we 2877 * can survive intermittent errors. 2878 */ 2879 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2880 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2881 buffer[SPC_ASC_KEY_OFFSET] = 0x08; 2882 break; 2883 } 2884 /* 2885 * This code uses linux/include/scsi/scsi.h SAM status codes! 2886 */ 2887 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2888 /* 2889 * Automatically padded, this value is encoded in the fabric's 2890 * data_length response PDU containing the SCSI defined sense data. 2891 */ 2892 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2893 2894 after_reason: 2895 trace_target_cmd_complete(cmd); 2896 return cmd->se_tfo->queue_status(cmd); 2897 } 2898 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2899 2900 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2901 { 2902 if (!(cmd->transport_state & CMD_T_ABORTED)) 2903 return 0; 2904 2905 /* 2906 * If cmd has been aborted but either no status is to be sent or it has 2907 * already been sent, just return 2908 */ 2909 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2910 return 1; 2911 2912 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2913 cmd->t_task_cdb[0], cmd->tag); 2914 2915 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2916 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2917 trace_target_cmd_complete(cmd); 2918 cmd->se_tfo->queue_status(cmd); 2919 2920 return 1; 2921 } 2922 EXPORT_SYMBOL(transport_check_aborted_status); 2923 2924 void transport_send_task_abort(struct se_cmd *cmd) 2925 { 2926 unsigned long flags; 2927 2928 spin_lock_irqsave(&cmd->t_state_lock, flags); 2929 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 2930 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2931 return; 2932 } 2933 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2934 2935 /* 2936 * If there are still expected incoming fabric WRITEs, we wait 2937 * until until they have completed before sending a TASK_ABORTED 2938 * response. This response with TASK_ABORTED status will be 2939 * queued back to fabric module by transport_check_aborted_status(). 2940 */ 2941 if (cmd->data_direction == DMA_TO_DEVICE) { 2942 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2943 cmd->transport_state |= CMD_T_ABORTED; 2944 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2945 return; 2946 } 2947 } 2948 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2949 2950 transport_lun_remove_cmd(cmd); 2951 2952 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 2953 cmd->t_task_cdb[0], cmd->tag); 2954 2955 trace_target_cmd_complete(cmd); 2956 cmd->se_tfo->queue_status(cmd); 2957 } 2958 2959 static void target_tmr_work(struct work_struct *work) 2960 { 2961 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2962 struct se_device *dev = cmd->se_dev; 2963 struct se_tmr_req *tmr = cmd->se_tmr_req; 2964 int ret; 2965 2966 switch (tmr->function) { 2967 case TMR_ABORT_TASK: 2968 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2969 break; 2970 case TMR_ABORT_TASK_SET: 2971 case TMR_CLEAR_ACA: 2972 case TMR_CLEAR_TASK_SET: 2973 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2974 break; 2975 case TMR_LUN_RESET: 2976 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2977 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2978 TMR_FUNCTION_REJECTED; 2979 if (tmr->response == TMR_FUNCTION_COMPLETE) { 2980 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 2981 cmd->orig_fe_lun, 0x29, 2982 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 2983 } 2984 break; 2985 case TMR_TARGET_WARM_RESET: 2986 tmr->response = TMR_FUNCTION_REJECTED; 2987 break; 2988 case TMR_TARGET_COLD_RESET: 2989 tmr->response = TMR_FUNCTION_REJECTED; 2990 break; 2991 default: 2992 pr_err("Uknown TMR function: 0x%02x.\n", 2993 tmr->function); 2994 tmr->response = TMR_FUNCTION_REJECTED; 2995 break; 2996 } 2997 2998 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2999 cmd->se_tfo->queue_tm_rsp(cmd); 3000 3001 transport_cmd_check_stop_to_fabric(cmd); 3002 } 3003 3004 int transport_generic_handle_tmr( 3005 struct se_cmd *cmd) 3006 { 3007 unsigned long flags; 3008 3009 spin_lock_irqsave(&cmd->t_state_lock, flags); 3010 cmd->transport_state |= CMD_T_ACTIVE; 3011 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3012 3013 INIT_WORK(&cmd->work, target_tmr_work); 3014 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3015 return 0; 3016 } 3017 EXPORT_SYMBOL(transport_generic_handle_tmr); 3018 3019 bool 3020 target_check_wce(struct se_device *dev) 3021 { 3022 bool wce = false; 3023 3024 if (dev->transport->get_write_cache) 3025 wce = dev->transport->get_write_cache(dev); 3026 else if (dev->dev_attrib.emulate_write_cache > 0) 3027 wce = true; 3028 3029 return wce; 3030 } 3031 3032 bool 3033 target_check_fua(struct se_device *dev) 3034 { 3035 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3036 } 3037