1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 228 { 229 struct se_session *se_sess; 230 231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 232 if (!se_sess) { 233 pr_err("Unable to allocate struct se_session from" 234 " se_sess_cache\n"); 235 return ERR_PTR(-ENOMEM); 236 } 237 INIT_LIST_HEAD(&se_sess->sess_list); 238 INIT_LIST_HEAD(&se_sess->sess_acl_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 241 spin_lock_init(&se_sess->sess_cmd_lock); 242 kref_init(&se_sess->sess_kref); 243 se_sess->sup_prot_ops = sup_prot_ops; 244 245 return se_sess; 246 } 247 EXPORT_SYMBOL(transport_init_session); 248 249 int transport_alloc_session_tags(struct se_session *se_sess, 250 unsigned int tag_num, unsigned int tag_size) 251 { 252 int rc; 253 254 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 255 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 256 if (!se_sess->sess_cmd_map) { 257 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 258 if (!se_sess->sess_cmd_map) { 259 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 260 return -ENOMEM; 261 } 262 } 263 264 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 265 if (rc < 0) { 266 pr_err("Unable to init se_sess->sess_tag_pool," 267 " tag_num: %u\n", tag_num); 268 kvfree(se_sess->sess_cmd_map); 269 se_sess->sess_cmd_map = NULL; 270 return -ENOMEM; 271 } 272 273 return 0; 274 } 275 EXPORT_SYMBOL(transport_alloc_session_tags); 276 277 struct se_session *transport_init_session_tags(unsigned int tag_num, 278 unsigned int tag_size, 279 enum target_prot_op sup_prot_ops) 280 { 281 struct se_session *se_sess; 282 int rc; 283 284 se_sess = transport_init_session(sup_prot_ops); 285 if (IS_ERR(se_sess)) 286 return se_sess; 287 288 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 289 if (rc < 0) { 290 transport_free_session(se_sess); 291 return ERR_PTR(-ENOMEM); 292 } 293 294 return se_sess; 295 } 296 EXPORT_SYMBOL(transport_init_session_tags); 297 298 /* 299 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 300 */ 301 void __transport_register_session( 302 struct se_portal_group *se_tpg, 303 struct se_node_acl *se_nacl, 304 struct se_session *se_sess, 305 void *fabric_sess_ptr) 306 { 307 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 308 unsigned char buf[PR_REG_ISID_LEN]; 309 310 se_sess->se_tpg = se_tpg; 311 se_sess->fabric_sess_ptr = fabric_sess_ptr; 312 /* 313 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 314 * 315 * Only set for struct se_session's that will actually be moving I/O. 316 * eg: *NOT* discovery sessions. 317 */ 318 if (se_nacl) { 319 /* 320 * 321 * Determine if fabric allows for T10-PI feature bits exposed to 322 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 323 * 324 * If so, then always save prot_type on a per se_node_acl node 325 * basis and re-instate the previous sess_prot_type to avoid 326 * disabling PI from below any previously initiator side 327 * registered LUNs. 328 */ 329 if (se_nacl->saved_prot_type) 330 se_sess->sess_prot_type = se_nacl->saved_prot_type; 331 else if (tfo->tpg_check_prot_fabric_only) 332 se_sess->sess_prot_type = se_nacl->saved_prot_type = 333 tfo->tpg_check_prot_fabric_only(se_tpg); 334 /* 335 * If the fabric module supports an ISID based TransportID, 336 * save this value in binary from the fabric I_T Nexus now. 337 */ 338 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 339 memset(&buf[0], 0, PR_REG_ISID_LEN); 340 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 341 &buf[0], PR_REG_ISID_LEN); 342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 343 } 344 kref_get(&se_nacl->acl_kref); 345 346 spin_lock_irq(&se_nacl->nacl_sess_lock); 347 /* 348 * The se_nacl->nacl_sess pointer will be set to the 349 * last active I_T Nexus for each struct se_node_acl. 350 */ 351 se_nacl->nacl_sess = se_sess; 352 353 list_add_tail(&se_sess->sess_acl_list, 354 &se_nacl->acl_sess_list); 355 spin_unlock_irq(&se_nacl->nacl_sess_lock); 356 } 357 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 358 359 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 360 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 361 } 362 EXPORT_SYMBOL(__transport_register_session); 363 364 void transport_register_session( 365 struct se_portal_group *se_tpg, 366 struct se_node_acl *se_nacl, 367 struct se_session *se_sess, 368 void *fabric_sess_ptr) 369 { 370 unsigned long flags; 371 372 spin_lock_irqsave(&se_tpg->session_lock, flags); 373 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 374 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 375 } 376 EXPORT_SYMBOL(transport_register_session); 377 378 static void target_release_session(struct kref *kref) 379 { 380 struct se_session *se_sess = container_of(kref, 381 struct se_session, sess_kref); 382 struct se_portal_group *se_tpg = se_sess->se_tpg; 383 384 se_tpg->se_tpg_tfo->close_session(se_sess); 385 } 386 387 int target_get_session(struct se_session *se_sess) 388 { 389 return kref_get_unless_zero(&se_sess->sess_kref); 390 } 391 EXPORT_SYMBOL(target_get_session); 392 393 void target_put_session(struct se_session *se_sess) 394 { 395 kref_put(&se_sess->sess_kref, target_release_session); 396 } 397 EXPORT_SYMBOL(target_put_session); 398 399 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 400 { 401 struct se_session *se_sess; 402 ssize_t len = 0; 403 404 spin_lock_bh(&se_tpg->session_lock); 405 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 406 if (!se_sess->se_node_acl) 407 continue; 408 if (!se_sess->se_node_acl->dynamic_node_acl) 409 continue; 410 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 411 break; 412 413 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 414 se_sess->se_node_acl->initiatorname); 415 len += 1; /* Include NULL terminator */ 416 } 417 spin_unlock_bh(&se_tpg->session_lock); 418 419 return len; 420 } 421 EXPORT_SYMBOL(target_show_dynamic_sessions); 422 423 static void target_complete_nacl(struct kref *kref) 424 { 425 struct se_node_acl *nacl = container_of(kref, 426 struct se_node_acl, acl_kref); 427 428 complete(&nacl->acl_free_comp); 429 } 430 431 void target_put_nacl(struct se_node_acl *nacl) 432 { 433 kref_put(&nacl->acl_kref, target_complete_nacl); 434 } 435 436 void transport_deregister_session_configfs(struct se_session *se_sess) 437 { 438 struct se_node_acl *se_nacl; 439 unsigned long flags; 440 /* 441 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 442 */ 443 se_nacl = se_sess->se_node_acl; 444 if (se_nacl) { 445 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 446 if (se_nacl->acl_stop == 0) 447 list_del(&se_sess->sess_acl_list); 448 /* 449 * If the session list is empty, then clear the pointer. 450 * Otherwise, set the struct se_session pointer from the tail 451 * element of the per struct se_node_acl active session list. 452 */ 453 if (list_empty(&se_nacl->acl_sess_list)) 454 se_nacl->nacl_sess = NULL; 455 else { 456 se_nacl->nacl_sess = container_of( 457 se_nacl->acl_sess_list.prev, 458 struct se_session, sess_acl_list); 459 } 460 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 461 } 462 } 463 EXPORT_SYMBOL(transport_deregister_session_configfs); 464 465 void transport_free_session(struct se_session *se_sess) 466 { 467 if (se_sess->sess_cmd_map) { 468 percpu_ida_destroy(&se_sess->sess_tag_pool); 469 kvfree(se_sess->sess_cmd_map); 470 } 471 kmem_cache_free(se_sess_cache, se_sess); 472 } 473 EXPORT_SYMBOL(transport_free_session); 474 475 void transport_deregister_session(struct se_session *se_sess) 476 { 477 struct se_portal_group *se_tpg = se_sess->se_tpg; 478 const struct target_core_fabric_ops *se_tfo; 479 struct se_node_acl *se_nacl; 480 unsigned long flags; 481 bool comp_nacl = true, drop_nacl = false; 482 483 if (!se_tpg) { 484 transport_free_session(se_sess); 485 return; 486 } 487 se_tfo = se_tpg->se_tpg_tfo; 488 489 spin_lock_irqsave(&se_tpg->session_lock, flags); 490 list_del(&se_sess->sess_list); 491 se_sess->se_tpg = NULL; 492 se_sess->fabric_sess_ptr = NULL; 493 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 494 495 /* 496 * Determine if we need to do extra work for this initiator node's 497 * struct se_node_acl if it had been previously dynamically generated. 498 */ 499 se_nacl = se_sess->se_node_acl; 500 501 mutex_lock(&se_tpg->acl_node_mutex); 502 if (se_nacl && se_nacl->dynamic_node_acl) { 503 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 504 list_del(&se_nacl->acl_list); 505 drop_nacl = true; 506 } 507 } 508 mutex_unlock(&se_tpg->acl_node_mutex); 509 510 if (drop_nacl) { 511 core_tpg_wait_for_nacl_pr_ref(se_nacl); 512 core_free_device_list_for_node(se_nacl, se_tpg); 513 kfree(se_nacl); 514 comp_nacl = false; 515 } 516 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 517 se_tpg->se_tpg_tfo->get_fabric_name()); 518 /* 519 * If last kref is dropping now for an explicit NodeACL, awake sleeping 520 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 521 * removal context. 522 */ 523 if (se_nacl && comp_nacl) 524 target_put_nacl(se_nacl); 525 526 transport_free_session(se_sess); 527 } 528 EXPORT_SYMBOL(transport_deregister_session); 529 530 /* 531 * Called with cmd->t_state_lock held. 532 */ 533 static void target_remove_from_state_list(struct se_cmd *cmd) 534 { 535 struct se_device *dev = cmd->se_dev; 536 unsigned long flags; 537 538 if (!dev) 539 return; 540 541 if (cmd->transport_state & CMD_T_BUSY) 542 return; 543 544 spin_lock_irqsave(&dev->execute_task_lock, flags); 545 if (cmd->state_active) { 546 list_del(&cmd->state_list); 547 cmd->state_active = false; 548 } 549 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 550 } 551 552 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 553 bool write_pending) 554 { 555 unsigned long flags; 556 557 spin_lock_irqsave(&cmd->t_state_lock, flags); 558 if (write_pending) 559 cmd->t_state = TRANSPORT_WRITE_PENDING; 560 561 if (remove_from_lists) { 562 target_remove_from_state_list(cmd); 563 564 /* 565 * Clear struct se_cmd->se_lun before the handoff to FE. 566 */ 567 cmd->se_lun = NULL; 568 } 569 570 /* 571 * Determine if frontend context caller is requesting the stopping of 572 * this command for frontend exceptions. 573 */ 574 if (cmd->transport_state & CMD_T_STOP) { 575 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 576 __func__, __LINE__, cmd->tag); 577 578 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 579 580 complete_all(&cmd->t_transport_stop_comp); 581 return 1; 582 } 583 584 cmd->transport_state &= ~CMD_T_ACTIVE; 585 if (remove_from_lists) { 586 /* 587 * Some fabric modules like tcm_loop can release 588 * their internally allocated I/O reference now and 589 * struct se_cmd now. 590 * 591 * Fabric modules are expected to return '1' here if the 592 * se_cmd being passed is released at this point, 593 * or zero if not being released. 594 */ 595 if (cmd->se_tfo->check_stop_free != NULL) { 596 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 597 return cmd->se_tfo->check_stop_free(cmd); 598 } 599 } 600 601 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 602 return 0; 603 } 604 605 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 606 { 607 return transport_cmd_check_stop(cmd, true, false); 608 } 609 610 static void transport_lun_remove_cmd(struct se_cmd *cmd) 611 { 612 struct se_lun *lun = cmd->se_lun; 613 614 if (!lun) 615 return; 616 617 if (cmpxchg(&cmd->lun_ref_active, true, false)) 618 percpu_ref_put(&lun->lun_ref); 619 } 620 621 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 622 { 623 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 624 transport_lun_remove_cmd(cmd); 625 /* 626 * Allow the fabric driver to unmap any resources before 627 * releasing the descriptor via TFO->release_cmd() 628 */ 629 if (remove) 630 cmd->se_tfo->aborted_task(cmd); 631 632 if (transport_cmd_check_stop_to_fabric(cmd)) 633 return; 634 if (remove) 635 transport_put_cmd(cmd); 636 } 637 638 static void target_complete_failure_work(struct work_struct *work) 639 { 640 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 641 642 transport_generic_request_failure(cmd, 643 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 644 } 645 646 /* 647 * Used when asking transport to copy Sense Data from the underlying 648 * Linux/SCSI struct scsi_cmnd 649 */ 650 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 651 { 652 struct se_device *dev = cmd->se_dev; 653 654 WARN_ON(!cmd->se_lun); 655 656 if (!dev) 657 return NULL; 658 659 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 660 return NULL; 661 662 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 663 664 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 665 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 666 return cmd->sense_buffer; 667 } 668 669 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 670 { 671 struct se_device *dev = cmd->se_dev; 672 int success = scsi_status == GOOD; 673 unsigned long flags; 674 675 cmd->scsi_status = scsi_status; 676 677 678 spin_lock_irqsave(&cmd->t_state_lock, flags); 679 cmd->transport_state &= ~CMD_T_BUSY; 680 681 if (dev && dev->transport->transport_complete) { 682 dev->transport->transport_complete(cmd, 683 cmd->t_data_sg, 684 transport_get_sense_buffer(cmd)); 685 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 686 success = 1; 687 } 688 689 /* 690 * See if we are waiting to complete for an exception condition. 691 */ 692 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 693 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 694 complete(&cmd->task_stop_comp); 695 return; 696 } 697 698 /* 699 * Check for case where an explicit ABORT_TASK has been received 700 * and transport_wait_for_tasks() will be waiting for completion.. 701 */ 702 if (cmd->transport_state & CMD_T_ABORTED && 703 cmd->transport_state & CMD_T_STOP) { 704 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 705 complete_all(&cmd->t_transport_stop_comp); 706 return; 707 } else if (!success) { 708 INIT_WORK(&cmd->work, target_complete_failure_work); 709 } else { 710 INIT_WORK(&cmd->work, target_complete_ok_work); 711 } 712 713 cmd->t_state = TRANSPORT_COMPLETE; 714 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 715 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 716 717 if (cmd->cpuid == -1) 718 queue_work(target_completion_wq, &cmd->work); 719 else 720 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 721 } 722 EXPORT_SYMBOL(target_complete_cmd); 723 724 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 725 { 726 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 727 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 728 cmd->residual_count += cmd->data_length - length; 729 } else { 730 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 731 cmd->residual_count = cmd->data_length - length; 732 } 733 734 cmd->data_length = length; 735 } 736 737 target_complete_cmd(cmd, scsi_status); 738 } 739 EXPORT_SYMBOL(target_complete_cmd_with_length); 740 741 static void target_add_to_state_list(struct se_cmd *cmd) 742 { 743 struct se_device *dev = cmd->se_dev; 744 unsigned long flags; 745 746 spin_lock_irqsave(&dev->execute_task_lock, flags); 747 if (!cmd->state_active) { 748 list_add_tail(&cmd->state_list, &dev->state_list); 749 cmd->state_active = true; 750 } 751 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 752 } 753 754 /* 755 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 756 */ 757 static void transport_write_pending_qf(struct se_cmd *cmd); 758 static void transport_complete_qf(struct se_cmd *cmd); 759 760 void target_qf_do_work(struct work_struct *work) 761 { 762 struct se_device *dev = container_of(work, struct se_device, 763 qf_work_queue); 764 LIST_HEAD(qf_cmd_list); 765 struct se_cmd *cmd, *cmd_tmp; 766 767 spin_lock_irq(&dev->qf_cmd_lock); 768 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 769 spin_unlock_irq(&dev->qf_cmd_lock); 770 771 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 772 list_del(&cmd->se_qf_node); 773 atomic_dec_mb(&dev->dev_qf_count); 774 775 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 776 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 777 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 778 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 779 : "UNKNOWN"); 780 781 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 782 transport_write_pending_qf(cmd); 783 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 784 transport_complete_qf(cmd); 785 } 786 } 787 788 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 789 { 790 switch (cmd->data_direction) { 791 case DMA_NONE: 792 return "NONE"; 793 case DMA_FROM_DEVICE: 794 return "READ"; 795 case DMA_TO_DEVICE: 796 return "WRITE"; 797 case DMA_BIDIRECTIONAL: 798 return "BIDI"; 799 default: 800 break; 801 } 802 803 return "UNKNOWN"; 804 } 805 806 void transport_dump_dev_state( 807 struct se_device *dev, 808 char *b, 809 int *bl) 810 { 811 *bl += sprintf(b + *bl, "Status: "); 812 if (dev->export_count) 813 *bl += sprintf(b + *bl, "ACTIVATED"); 814 else 815 *bl += sprintf(b + *bl, "DEACTIVATED"); 816 817 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 818 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 819 dev->dev_attrib.block_size, 820 dev->dev_attrib.hw_max_sectors); 821 *bl += sprintf(b + *bl, " "); 822 } 823 824 void transport_dump_vpd_proto_id( 825 struct t10_vpd *vpd, 826 unsigned char *p_buf, 827 int p_buf_len) 828 { 829 unsigned char buf[VPD_TMP_BUF_SIZE]; 830 int len; 831 832 memset(buf, 0, VPD_TMP_BUF_SIZE); 833 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 834 835 switch (vpd->protocol_identifier) { 836 case 0x00: 837 sprintf(buf+len, "Fibre Channel\n"); 838 break; 839 case 0x10: 840 sprintf(buf+len, "Parallel SCSI\n"); 841 break; 842 case 0x20: 843 sprintf(buf+len, "SSA\n"); 844 break; 845 case 0x30: 846 sprintf(buf+len, "IEEE 1394\n"); 847 break; 848 case 0x40: 849 sprintf(buf+len, "SCSI Remote Direct Memory Access" 850 " Protocol\n"); 851 break; 852 case 0x50: 853 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 854 break; 855 case 0x60: 856 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 857 break; 858 case 0x70: 859 sprintf(buf+len, "Automation/Drive Interface Transport" 860 " Protocol\n"); 861 break; 862 case 0x80: 863 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 864 break; 865 default: 866 sprintf(buf+len, "Unknown 0x%02x\n", 867 vpd->protocol_identifier); 868 break; 869 } 870 871 if (p_buf) 872 strncpy(p_buf, buf, p_buf_len); 873 else 874 pr_debug("%s", buf); 875 } 876 877 void 878 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 879 { 880 /* 881 * Check if the Protocol Identifier Valid (PIV) bit is set.. 882 * 883 * from spc3r23.pdf section 7.5.1 884 */ 885 if (page_83[1] & 0x80) { 886 vpd->protocol_identifier = (page_83[0] & 0xf0); 887 vpd->protocol_identifier_set = 1; 888 transport_dump_vpd_proto_id(vpd, NULL, 0); 889 } 890 } 891 EXPORT_SYMBOL(transport_set_vpd_proto_id); 892 893 int transport_dump_vpd_assoc( 894 struct t10_vpd *vpd, 895 unsigned char *p_buf, 896 int p_buf_len) 897 { 898 unsigned char buf[VPD_TMP_BUF_SIZE]; 899 int ret = 0; 900 int len; 901 902 memset(buf, 0, VPD_TMP_BUF_SIZE); 903 len = sprintf(buf, "T10 VPD Identifier Association: "); 904 905 switch (vpd->association) { 906 case 0x00: 907 sprintf(buf+len, "addressed logical unit\n"); 908 break; 909 case 0x10: 910 sprintf(buf+len, "target port\n"); 911 break; 912 case 0x20: 913 sprintf(buf+len, "SCSI target device\n"); 914 break; 915 default: 916 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 917 ret = -EINVAL; 918 break; 919 } 920 921 if (p_buf) 922 strncpy(p_buf, buf, p_buf_len); 923 else 924 pr_debug("%s", buf); 925 926 return ret; 927 } 928 929 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 930 { 931 /* 932 * The VPD identification association.. 933 * 934 * from spc3r23.pdf Section 7.6.3.1 Table 297 935 */ 936 vpd->association = (page_83[1] & 0x30); 937 return transport_dump_vpd_assoc(vpd, NULL, 0); 938 } 939 EXPORT_SYMBOL(transport_set_vpd_assoc); 940 941 int transport_dump_vpd_ident_type( 942 struct t10_vpd *vpd, 943 unsigned char *p_buf, 944 int p_buf_len) 945 { 946 unsigned char buf[VPD_TMP_BUF_SIZE]; 947 int ret = 0; 948 int len; 949 950 memset(buf, 0, VPD_TMP_BUF_SIZE); 951 len = sprintf(buf, "T10 VPD Identifier Type: "); 952 953 switch (vpd->device_identifier_type) { 954 case 0x00: 955 sprintf(buf+len, "Vendor specific\n"); 956 break; 957 case 0x01: 958 sprintf(buf+len, "T10 Vendor ID based\n"); 959 break; 960 case 0x02: 961 sprintf(buf+len, "EUI-64 based\n"); 962 break; 963 case 0x03: 964 sprintf(buf+len, "NAA\n"); 965 break; 966 case 0x04: 967 sprintf(buf+len, "Relative target port identifier\n"); 968 break; 969 case 0x08: 970 sprintf(buf+len, "SCSI name string\n"); 971 break; 972 default: 973 sprintf(buf+len, "Unsupported: 0x%02x\n", 974 vpd->device_identifier_type); 975 ret = -EINVAL; 976 break; 977 } 978 979 if (p_buf) { 980 if (p_buf_len < strlen(buf)+1) 981 return -EINVAL; 982 strncpy(p_buf, buf, p_buf_len); 983 } else { 984 pr_debug("%s", buf); 985 } 986 987 return ret; 988 } 989 990 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 991 { 992 /* 993 * The VPD identifier type.. 994 * 995 * from spc3r23.pdf Section 7.6.3.1 Table 298 996 */ 997 vpd->device_identifier_type = (page_83[1] & 0x0f); 998 return transport_dump_vpd_ident_type(vpd, NULL, 0); 999 } 1000 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1001 1002 int transport_dump_vpd_ident( 1003 struct t10_vpd *vpd, 1004 unsigned char *p_buf, 1005 int p_buf_len) 1006 { 1007 unsigned char buf[VPD_TMP_BUF_SIZE]; 1008 int ret = 0; 1009 1010 memset(buf, 0, VPD_TMP_BUF_SIZE); 1011 1012 switch (vpd->device_identifier_code_set) { 1013 case 0x01: /* Binary */ 1014 snprintf(buf, sizeof(buf), 1015 "T10 VPD Binary Device Identifier: %s\n", 1016 &vpd->device_identifier[0]); 1017 break; 1018 case 0x02: /* ASCII */ 1019 snprintf(buf, sizeof(buf), 1020 "T10 VPD ASCII Device Identifier: %s\n", 1021 &vpd->device_identifier[0]); 1022 break; 1023 case 0x03: /* UTF-8 */ 1024 snprintf(buf, sizeof(buf), 1025 "T10 VPD UTF-8 Device Identifier: %s\n", 1026 &vpd->device_identifier[0]); 1027 break; 1028 default: 1029 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1030 " 0x%02x", vpd->device_identifier_code_set); 1031 ret = -EINVAL; 1032 break; 1033 } 1034 1035 if (p_buf) 1036 strncpy(p_buf, buf, p_buf_len); 1037 else 1038 pr_debug("%s", buf); 1039 1040 return ret; 1041 } 1042 1043 int 1044 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1045 { 1046 static const char hex_str[] = "0123456789abcdef"; 1047 int j = 0, i = 4; /* offset to start of the identifier */ 1048 1049 /* 1050 * The VPD Code Set (encoding) 1051 * 1052 * from spc3r23.pdf Section 7.6.3.1 Table 296 1053 */ 1054 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1055 switch (vpd->device_identifier_code_set) { 1056 case 0x01: /* Binary */ 1057 vpd->device_identifier[j++] = 1058 hex_str[vpd->device_identifier_type]; 1059 while (i < (4 + page_83[3])) { 1060 vpd->device_identifier[j++] = 1061 hex_str[(page_83[i] & 0xf0) >> 4]; 1062 vpd->device_identifier[j++] = 1063 hex_str[page_83[i] & 0x0f]; 1064 i++; 1065 } 1066 break; 1067 case 0x02: /* ASCII */ 1068 case 0x03: /* UTF-8 */ 1069 while (i < (4 + page_83[3])) 1070 vpd->device_identifier[j++] = page_83[i++]; 1071 break; 1072 default: 1073 break; 1074 } 1075 1076 return transport_dump_vpd_ident(vpd, NULL, 0); 1077 } 1078 EXPORT_SYMBOL(transport_set_vpd_ident); 1079 1080 static sense_reason_t 1081 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1082 unsigned int size) 1083 { 1084 u32 mtl; 1085 1086 if (!cmd->se_tfo->max_data_sg_nents) 1087 return TCM_NO_SENSE; 1088 /* 1089 * Check if fabric enforced maximum SGL entries per I/O descriptor 1090 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1091 * residual_count and reduce original cmd->data_length to maximum 1092 * length based on single PAGE_SIZE entry scatter-lists. 1093 */ 1094 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1095 if (cmd->data_length > mtl) { 1096 /* 1097 * If an existing CDB overflow is present, calculate new residual 1098 * based on CDB size minus fabric maximum transfer length. 1099 * 1100 * If an existing CDB underflow is present, calculate new residual 1101 * based on original cmd->data_length minus fabric maximum transfer 1102 * length. 1103 * 1104 * Otherwise, set the underflow residual based on cmd->data_length 1105 * minus fabric maximum transfer length. 1106 */ 1107 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1108 cmd->residual_count = (size - mtl); 1109 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1110 u32 orig_dl = size + cmd->residual_count; 1111 cmd->residual_count = (orig_dl - mtl); 1112 } else { 1113 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1114 cmd->residual_count = (cmd->data_length - mtl); 1115 } 1116 cmd->data_length = mtl; 1117 /* 1118 * Reset sbc_check_prot() calculated protection payload 1119 * length based upon the new smaller MTL. 1120 */ 1121 if (cmd->prot_length) { 1122 u32 sectors = (mtl / dev->dev_attrib.block_size); 1123 cmd->prot_length = dev->prot_length * sectors; 1124 } 1125 } 1126 return TCM_NO_SENSE; 1127 } 1128 1129 sense_reason_t 1130 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1131 { 1132 struct se_device *dev = cmd->se_dev; 1133 1134 if (cmd->unknown_data_length) { 1135 cmd->data_length = size; 1136 } else if (size != cmd->data_length) { 1137 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1138 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1139 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1140 cmd->data_length, size, cmd->t_task_cdb[0]); 1141 1142 if (cmd->data_direction == DMA_TO_DEVICE && 1143 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1144 pr_err("Rejecting underflow/overflow WRITE data\n"); 1145 return TCM_INVALID_CDB_FIELD; 1146 } 1147 /* 1148 * Reject READ_* or WRITE_* with overflow/underflow for 1149 * type SCF_SCSI_DATA_CDB. 1150 */ 1151 if (dev->dev_attrib.block_size != 512) { 1152 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1153 " CDB on non 512-byte sector setup subsystem" 1154 " plugin: %s\n", dev->transport->name); 1155 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1156 return TCM_INVALID_CDB_FIELD; 1157 } 1158 /* 1159 * For the overflow case keep the existing fabric provided 1160 * ->data_length. Otherwise for the underflow case, reset 1161 * ->data_length to the smaller SCSI expected data transfer 1162 * length. 1163 */ 1164 if (size > cmd->data_length) { 1165 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1166 cmd->residual_count = (size - cmd->data_length); 1167 } else { 1168 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1169 cmd->residual_count = (cmd->data_length - size); 1170 cmd->data_length = size; 1171 } 1172 } 1173 1174 return target_check_max_data_sg_nents(cmd, dev, size); 1175 1176 } 1177 1178 /* 1179 * Used by fabric modules containing a local struct se_cmd within their 1180 * fabric dependent per I/O descriptor. 1181 * 1182 * Preserves the value of @cmd->tag. 1183 */ 1184 void transport_init_se_cmd( 1185 struct se_cmd *cmd, 1186 const struct target_core_fabric_ops *tfo, 1187 struct se_session *se_sess, 1188 u32 data_length, 1189 int data_direction, 1190 int task_attr, 1191 unsigned char *sense_buffer) 1192 { 1193 INIT_LIST_HEAD(&cmd->se_delayed_node); 1194 INIT_LIST_HEAD(&cmd->se_qf_node); 1195 INIT_LIST_HEAD(&cmd->se_cmd_list); 1196 INIT_LIST_HEAD(&cmd->state_list); 1197 init_completion(&cmd->t_transport_stop_comp); 1198 init_completion(&cmd->cmd_wait_comp); 1199 init_completion(&cmd->task_stop_comp); 1200 spin_lock_init(&cmd->t_state_lock); 1201 kref_init(&cmd->cmd_kref); 1202 cmd->transport_state = CMD_T_DEV_ACTIVE; 1203 1204 cmd->se_tfo = tfo; 1205 cmd->se_sess = se_sess; 1206 cmd->data_length = data_length; 1207 cmd->data_direction = data_direction; 1208 cmd->sam_task_attr = task_attr; 1209 cmd->sense_buffer = sense_buffer; 1210 1211 cmd->state_active = false; 1212 } 1213 EXPORT_SYMBOL(transport_init_se_cmd); 1214 1215 static sense_reason_t 1216 transport_check_alloc_task_attr(struct se_cmd *cmd) 1217 { 1218 struct se_device *dev = cmd->se_dev; 1219 1220 /* 1221 * Check if SAM Task Attribute emulation is enabled for this 1222 * struct se_device storage object 1223 */ 1224 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1225 return 0; 1226 1227 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1228 pr_debug("SAM Task Attribute ACA" 1229 " emulation is not supported\n"); 1230 return TCM_INVALID_CDB_FIELD; 1231 } 1232 1233 return 0; 1234 } 1235 1236 sense_reason_t 1237 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1238 { 1239 struct se_device *dev = cmd->se_dev; 1240 sense_reason_t ret; 1241 1242 /* 1243 * Ensure that the received CDB is less than the max (252 + 8) bytes 1244 * for VARIABLE_LENGTH_CMD 1245 */ 1246 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1247 pr_err("Received SCSI CDB with command_size: %d that" 1248 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1249 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1250 return TCM_INVALID_CDB_FIELD; 1251 } 1252 /* 1253 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1254 * allocate the additional extended CDB buffer now.. Otherwise 1255 * setup the pointer from __t_task_cdb to t_task_cdb. 1256 */ 1257 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1258 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1259 GFP_KERNEL); 1260 if (!cmd->t_task_cdb) { 1261 pr_err("Unable to allocate cmd->t_task_cdb" 1262 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1263 scsi_command_size(cdb), 1264 (unsigned long)sizeof(cmd->__t_task_cdb)); 1265 return TCM_OUT_OF_RESOURCES; 1266 } 1267 } else 1268 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1269 /* 1270 * Copy the original CDB into cmd-> 1271 */ 1272 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1273 1274 trace_target_sequencer_start(cmd); 1275 1276 /* 1277 * Check for an existing UNIT ATTENTION condition 1278 */ 1279 ret = target_scsi3_ua_check(cmd); 1280 if (ret) 1281 return ret; 1282 1283 ret = target_alua_state_check(cmd); 1284 if (ret) 1285 return ret; 1286 1287 ret = target_check_reservation(cmd); 1288 if (ret) { 1289 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1290 return ret; 1291 } 1292 1293 ret = dev->transport->parse_cdb(cmd); 1294 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1295 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1296 cmd->se_tfo->get_fabric_name(), 1297 cmd->se_sess->se_node_acl->initiatorname, 1298 cmd->t_task_cdb[0]); 1299 if (ret) 1300 return ret; 1301 1302 ret = transport_check_alloc_task_attr(cmd); 1303 if (ret) 1304 return ret; 1305 1306 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1307 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1308 return 0; 1309 } 1310 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1311 1312 /* 1313 * Used by fabric module frontends to queue tasks directly. 1314 * May only be used from process context. 1315 */ 1316 int transport_handle_cdb_direct( 1317 struct se_cmd *cmd) 1318 { 1319 sense_reason_t ret; 1320 1321 if (!cmd->se_lun) { 1322 dump_stack(); 1323 pr_err("cmd->se_lun is NULL\n"); 1324 return -EINVAL; 1325 } 1326 if (in_interrupt()) { 1327 dump_stack(); 1328 pr_err("transport_generic_handle_cdb cannot be called" 1329 " from interrupt context\n"); 1330 return -EINVAL; 1331 } 1332 /* 1333 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1334 * outstanding descriptors are handled correctly during shutdown via 1335 * transport_wait_for_tasks() 1336 * 1337 * Also, we don't take cmd->t_state_lock here as we only expect 1338 * this to be called for initial descriptor submission. 1339 */ 1340 cmd->t_state = TRANSPORT_NEW_CMD; 1341 cmd->transport_state |= CMD_T_ACTIVE; 1342 1343 /* 1344 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1345 * so follow TRANSPORT_NEW_CMD processing thread context usage 1346 * and call transport_generic_request_failure() if necessary.. 1347 */ 1348 ret = transport_generic_new_cmd(cmd); 1349 if (ret) 1350 transport_generic_request_failure(cmd, ret); 1351 return 0; 1352 } 1353 EXPORT_SYMBOL(transport_handle_cdb_direct); 1354 1355 sense_reason_t 1356 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1357 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1358 { 1359 if (!sgl || !sgl_count) 1360 return 0; 1361 1362 /* 1363 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1364 * scatterlists already have been set to follow what the fabric 1365 * passes for the original expected data transfer length. 1366 */ 1367 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1368 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1369 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1370 return TCM_INVALID_CDB_FIELD; 1371 } 1372 1373 cmd->t_data_sg = sgl; 1374 cmd->t_data_nents = sgl_count; 1375 cmd->t_bidi_data_sg = sgl_bidi; 1376 cmd->t_bidi_data_nents = sgl_bidi_count; 1377 1378 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1379 return 0; 1380 } 1381 1382 /* 1383 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1384 * se_cmd + use pre-allocated SGL memory. 1385 * 1386 * @se_cmd: command descriptor to submit 1387 * @se_sess: associated se_sess for endpoint 1388 * @cdb: pointer to SCSI CDB 1389 * @sense: pointer to SCSI sense buffer 1390 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1391 * @data_length: fabric expected data transfer length 1392 * @task_addr: SAM task attribute 1393 * @data_dir: DMA data direction 1394 * @flags: flags for command submission from target_sc_flags_tables 1395 * @sgl: struct scatterlist memory for unidirectional mapping 1396 * @sgl_count: scatterlist count for unidirectional mapping 1397 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1398 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1399 * @sgl_prot: struct scatterlist memory protection information 1400 * @sgl_prot_count: scatterlist count for protection information 1401 * 1402 * Task tags are supported if the caller has set @se_cmd->tag. 1403 * 1404 * Returns non zero to signal active I/O shutdown failure. All other 1405 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1406 * but still return zero here. 1407 * 1408 * This may only be called from process context, and also currently 1409 * assumes internal allocation of fabric payload buffer by target-core. 1410 */ 1411 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1412 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1413 u32 data_length, int task_attr, int data_dir, int flags, 1414 struct scatterlist *sgl, u32 sgl_count, 1415 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1416 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1417 { 1418 struct se_portal_group *se_tpg; 1419 sense_reason_t rc; 1420 int ret; 1421 1422 se_tpg = se_sess->se_tpg; 1423 BUG_ON(!se_tpg); 1424 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1425 BUG_ON(in_interrupt()); 1426 /* 1427 * Initialize se_cmd for target operation. From this point 1428 * exceptions are handled by sending exception status via 1429 * target_core_fabric_ops->queue_status() callback 1430 */ 1431 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1432 data_length, data_dir, task_attr, sense); 1433 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1434 se_cmd->unknown_data_length = 1; 1435 /* 1436 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1437 * se_sess->sess_cmd_list. A second kref_get here is necessary 1438 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1439 * kref_put() to happen during fabric packet acknowledgement. 1440 */ 1441 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1442 if (ret) 1443 return ret; 1444 /* 1445 * Signal bidirectional data payloads to target-core 1446 */ 1447 if (flags & TARGET_SCF_BIDI_OP) 1448 se_cmd->se_cmd_flags |= SCF_BIDI; 1449 /* 1450 * Locate se_lun pointer and attach it to struct se_cmd 1451 */ 1452 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1453 if (rc) { 1454 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1455 target_put_sess_cmd(se_cmd); 1456 return 0; 1457 } 1458 1459 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1460 if (rc != 0) { 1461 transport_generic_request_failure(se_cmd, rc); 1462 return 0; 1463 } 1464 1465 /* 1466 * Save pointers for SGLs containing protection information, 1467 * if present. 1468 */ 1469 if (sgl_prot_count) { 1470 se_cmd->t_prot_sg = sgl_prot; 1471 se_cmd->t_prot_nents = sgl_prot_count; 1472 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1473 } 1474 1475 /* 1476 * When a non zero sgl_count has been passed perform SGL passthrough 1477 * mapping for pre-allocated fabric memory instead of having target 1478 * core perform an internal SGL allocation.. 1479 */ 1480 if (sgl_count != 0) { 1481 BUG_ON(!sgl); 1482 1483 /* 1484 * A work-around for tcm_loop as some userspace code via 1485 * scsi-generic do not memset their associated read buffers, 1486 * so go ahead and do that here for type non-data CDBs. Also 1487 * note that this is currently guaranteed to be a single SGL 1488 * for this case by target core in target_setup_cmd_from_cdb() 1489 * -> transport_generic_cmd_sequencer(). 1490 */ 1491 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1492 se_cmd->data_direction == DMA_FROM_DEVICE) { 1493 unsigned char *buf = NULL; 1494 1495 if (sgl) 1496 buf = kmap(sg_page(sgl)) + sgl->offset; 1497 1498 if (buf) { 1499 memset(buf, 0, sgl->length); 1500 kunmap(sg_page(sgl)); 1501 } 1502 } 1503 1504 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1505 sgl_bidi, sgl_bidi_count); 1506 if (rc != 0) { 1507 transport_generic_request_failure(se_cmd, rc); 1508 return 0; 1509 } 1510 } 1511 1512 /* 1513 * Check if we need to delay processing because of ALUA 1514 * Active/NonOptimized primary access state.. 1515 */ 1516 core_alua_check_nonop_delay(se_cmd); 1517 1518 transport_handle_cdb_direct(se_cmd); 1519 return 0; 1520 } 1521 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1522 1523 /* 1524 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1525 * 1526 * @se_cmd: command descriptor to submit 1527 * @se_sess: associated se_sess for endpoint 1528 * @cdb: pointer to SCSI CDB 1529 * @sense: pointer to SCSI sense buffer 1530 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1531 * @data_length: fabric expected data transfer length 1532 * @task_addr: SAM task attribute 1533 * @data_dir: DMA data direction 1534 * @flags: flags for command submission from target_sc_flags_tables 1535 * 1536 * Task tags are supported if the caller has set @se_cmd->tag. 1537 * 1538 * Returns non zero to signal active I/O shutdown failure. All other 1539 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1540 * but still return zero here. 1541 * 1542 * This may only be called from process context, and also currently 1543 * assumes internal allocation of fabric payload buffer by target-core. 1544 * 1545 * It also assumes interal target core SGL memory allocation. 1546 */ 1547 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1548 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1549 u32 data_length, int task_attr, int data_dir, int flags) 1550 { 1551 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1552 unpacked_lun, data_length, task_attr, data_dir, 1553 flags, NULL, 0, NULL, 0, NULL, 0); 1554 } 1555 EXPORT_SYMBOL(target_submit_cmd); 1556 1557 static void target_complete_tmr_failure(struct work_struct *work) 1558 { 1559 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1560 1561 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1562 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1563 1564 transport_cmd_check_stop_to_fabric(se_cmd); 1565 } 1566 1567 /** 1568 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1569 * for TMR CDBs 1570 * 1571 * @se_cmd: command descriptor to submit 1572 * @se_sess: associated se_sess for endpoint 1573 * @sense: pointer to SCSI sense buffer 1574 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1575 * @fabric_context: fabric context for TMR req 1576 * @tm_type: Type of TM request 1577 * @gfp: gfp type for caller 1578 * @tag: referenced task tag for TMR_ABORT_TASK 1579 * @flags: submit cmd flags 1580 * 1581 * Callable from all contexts. 1582 **/ 1583 1584 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1585 unsigned char *sense, u64 unpacked_lun, 1586 void *fabric_tmr_ptr, unsigned char tm_type, 1587 gfp_t gfp, u64 tag, int flags) 1588 { 1589 struct se_portal_group *se_tpg; 1590 int ret; 1591 1592 se_tpg = se_sess->se_tpg; 1593 BUG_ON(!se_tpg); 1594 1595 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1596 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1597 /* 1598 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1599 * allocation failure. 1600 */ 1601 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1602 if (ret < 0) 1603 return -ENOMEM; 1604 1605 if (tm_type == TMR_ABORT_TASK) 1606 se_cmd->se_tmr_req->ref_task_tag = tag; 1607 1608 /* See target_submit_cmd for commentary */ 1609 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1610 if (ret) { 1611 core_tmr_release_req(se_cmd->se_tmr_req); 1612 return ret; 1613 } 1614 1615 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1616 if (ret) { 1617 /* 1618 * For callback during failure handling, push this work off 1619 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1620 */ 1621 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1622 schedule_work(&se_cmd->work); 1623 return 0; 1624 } 1625 transport_generic_handle_tmr(se_cmd); 1626 return 0; 1627 } 1628 EXPORT_SYMBOL(target_submit_tmr); 1629 1630 /* 1631 * If the cmd is active, request it to be stopped and sleep until it 1632 * has completed. 1633 */ 1634 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1635 __releases(&cmd->t_state_lock) 1636 __acquires(&cmd->t_state_lock) 1637 { 1638 bool was_active = false; 1639 1640 if (cmd->transport_state & CMD_T_BUSY) { 1641 cmd->transport_state |= CMD_T_REQUEST_STOP; 1642 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1643 1644 pr_debug("cmd %p waiting to complete\n", cmd); 1645 wait_for_completion(&cmd->task_stop_comp); 1646 pr_debug("cmd %p stopped successfully\n", cmd); 1647 1648 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1649 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1650 cmd->transport_state &= ~CMD_T_BUSY; 1651 was_active = true; 1652 } 1653 1654 return was_active; 1655 } 1656 1657 /* 1658 * Handle SAM-esque emulation for generic transport request failures. 1659 */ 1660 void transport_generic_request_failure(struct se_cmd *cmd, 1661 sense_reason_t sense_reason) 1662 { 1663 int ret = 0; 1664 1665 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1666 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1667 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1668 cmd->se_tfo->get_cmd_state(cmd), 1669 cmd->t_state, sense_reason); 1670 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1671 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1672 (cmd->transport_state & CMD_T_STOP) != 0, 1673 (cmd->transport_state & CMD_T_SENT) != 0); 1674 1675 /* 1676 * For SAM Task Attribute emulation for failed struct se_cmd 1677 */ 1678 transport_complete_task_attr(cmd); 1679 /* 1680 * Handle special case for COMPARE_AND_WRITE failure, where the 1681 * callback is expected to drop the per device ->caw_sem. 1682 */ 1683 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1684 cmd->transport_complete_callback) 1685 cmd->transport_complete_callback(cmd, false); 1686 1687 switch (sense_reason) { 1688 case TCM_NON_EXISTENT_LUN: 1689 case TCM_UNSUPPORTED_SCSI_OPCODE: 1690 case TCM_INVALID_CDB_FIELD: 1691 case TCM_INVALID_PARAMETER_LIST: 1692 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1693 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1694 case TCM_UNKNOWN_MODE_PAGE: 1695 case TCM_WRITE_PROTECTED: 1696 case TCM_ADDRESS_OUT_OF_RANGE: 1697 case TCM_CHECK_CONDITION_ABORT_CMD: 1698 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1699 case TCM_CHECK_CONDITION_NOT_READY: 1700 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1701 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1702 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1703 break; 1704 case TCM_OUT_OF_RESOURCES: 1705 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1706 break; 1707 case TCM_RESERVATION_CONFLICT: 1708 /* 1709 * No SENSE Data payload for this case, set SCSI Status 1710 * and queue the response to $FABRIC_MOD. 1711 * 1712 * Uses linux/include/scsi/scsi.h SAM status codes defs 1713 */ 1714 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1715 /* 1716 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1717 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1718 * CONFLICT STATUS. 1719 * 1720 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1721 */ 1722 if (cmd->se_sess && 1723 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1724 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1725 cmd->orig_fe_lun, 0x2C, 1726 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1727 } 1728 trace_target_cmd_complete(cmd); 1729 ret = cmd->se_tfo->queue_status(cmd); 1730 if (ret == -EAGAIN || ret == -ENOMEM) 1731 goto queue_full; 1732 goto check_stop; 1733 default: 1734 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1735 cmd->t_task_cdb[0], sense_reason); 1736 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1737 break; 1738 } 1739 1740 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1741 if (ret == -EAGAIN || ret == -ENOMEM) 1742 goto queue_full; 1743 1744 check_stop: 1745 transport_lun_remove_cmd(cmd); 1746 transport_cmd_check_stop_to_fabric(cmd); 1747 return; 1748 1749 queue_full: 1750 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1751 transport_handle_queue_full(cmd, cmd->se_dev); 1752 } 1753 EXPORT_SYMBOL(transport_generic_request_failure); 1754 1755 void __target_execute_cmd(struct se_cmd *cmd) 1756 { 1757 sense_reason_t ret; 1758 1759 if (cmd->execute_cmd) { 1760 ret = cmd->execute_cmd(cmd); 1761 if (ret) { 1762 spin_lock_irq(&cmd->t_state_lock); 1763 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1764 spin_unlock_irq(&cmd->t_state_lock); 1765 1766 transport_generic_request_failure(cmd, ret); 1767 } 1768 } 1769 } 1770 1771 static int target_write_prot_action(struct se_cmd *cmd) 1772 { 1773 u32 sectors; 1774 /* 1775 * Perform WRITE_INSERT of PI using software emulation when backend 1776 * device has PI enabled, if the transport has not already generated 1777 * PI using hardware WRITE_INSERT offload. 1778 */ 1779 switch (cmd->prot_op) { 1780 case TARGET_PROT_DOUT_INSERT: 1781 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1782 sbc_dif_generate(cmd); 1783 break; 1784 case TARGET_PROT_DOUT_STRIP: 1785 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1786 break; 1787 1788 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1789 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1790 sectors, 0, cmd->t_prot_sg, 0); 1791 if (unlikely(cmd->pi_err)) { 1792 spin_lock_irq(&cmd->t_state_lock); 1793 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1794 spin_unlock_irq(&cmd->t_state_lock); 1795 transport_generic_request_failure(cmd, cmd->pi_err); 1796 return -1; 1797 } 1798 break; 1799 default: 1800 break; 1801 } 1802 1803 return 0; 1804 } 1805 1806 static bool target_handle_task_attr(struct se_cmd *cmd) 1807 { 1808 struct se_device *dev = cmd->se_dev; 1809 1810 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1811 return false; 1812 1813 /* 1814 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1815 * to allow the passed struct se_cmd list of tasks to the front of the list. 1816 */ 1817 switch (cmd->sam_task_attr) { 1818 case TCM_HEAD_TAG: 1819 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1820 cmd->t_task_cdb[0]); 1821 return false; 1822 case TCM_ORDERED_TAG: 1823 atomic_inc_mb(&dev->dev_ordered_sync); 1824 1825 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1826 cmd->t_task_cdb[0]); 1827 1828 /* 1829 * Execute an ORDERED command if no other older commands 1830 * exist that need to be completed first. 1831 */ 1832 if (!atomic_read(&dev->simple_cmds)) 1833 return false; 1834 break; 1835 default: 1836 /* 1837 * For SIMPLE and UNTAGGED Task Attribute commands 1838 */ 1839 atomic_inc_mb(&dev->simple_cmds); 1840 break; 1841 } 1842 1843 if (atomic_read(&dev->dev_ordered_sync) == 0) 1844 return false; 1845 1846 spin_lock(&dev->delayed_cmd_lock); 1847 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1848 spin_unlock(&dev->delayed_cmd_lock); 1849 1850 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 1851 cmd->t_task_cdb[0], cmd->sam_task_attr); 1852 return true; 1853 } 1854 1855 void target_execute_cmd(struct se_cmd *cmd) 1856 { 1857 /* 1858 * If the received CDB has aleady been aborted stop processing it here. 1859 */ 1860 if (transport_check_aborted_status(cmd, 1)) 1861 return; 1862 1863 /* 1864 * Determine if frontend context caller is requesting the stopping of 1865 * this command for frontend exceptions. 1866 */ 1867 spin_lock_irq(&cmd->t_state_lock); 1868 if (cmd->transport_state & CMD_T_STOP) { 1869 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1870 __func__, __LINE__, cmd->tag); 1871 1872 spin_unlock_irq(&cmd->t_state_lock); 1873 complete_all(&cmd->t_transport_stop_comp); 1874 return; 1875 } 1876 1877 cmd->t_state = TRANSPORT_PROCESSING; 1878 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1879 spin_unlock_irq(&cmd->t_state_lock); 1880 1881 if (target_write_prot_action(cmd)) 1882 return; 1883 1884 if (target_handle_task_attr(cmd)) { 1885 spin_lock_irq(&cmd->t_state_lock); 1886 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1887 spin_unlock_irq(&cmd->t_state_lock); 1888 return; 1889 } 1890 1891 __target_execute_cmd(cmd); 1892 } 1893 EXPORT_SYMBOL(target_execute_cmd); 1894 1895 /* 1896 * Process all commands up to the last received ORDERED task attribute which 1897 * requires another blocking boundary 1898 */ 1899 static void target_restart_delayed_cmds(struct se_device *dev) 1900 { 1901 for (;;) { 1902 struct se_cmd *cmd; 1903 1904 spin_lock(&dev->delayed_cmd_lock); 1905 if (list_empty(&dev->delayed_cmd_list)) { 1906 spin_unlock(&dev->delayed_cmd_lock); 1907 break; 1908 } 1909 1910 cmd = list_entry(dev->delayed_cmd_list.next, 1911 struct se_cmd, se_delayed_node); 1912 list_del(&cmd->se_delayed_node); 1913 spin_unlock(&dev->delayed_cmd_lock); 1914 1915 __target_execute_cmd(cmd); 1916 1917 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1918 break; 1919 } 1920 } 1921 1922 /* 1923 * Called from I/O completion to determine which dormant/delayed 1924 * and ordered cmds need to have their tasks added to the execution queue. 1925 */ 1926 static void transport_complete_task_attr(struct se_cmd *cmd) 1927 { 1928 struct se_device *dev = cmd->se_dev; 1929 1930 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1931 return; 1932 1933 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1934 atomic_dec_mb(&dev->simple_cmds); 1935 dev->dev_cur_ordered_id++; 1936 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", 1937 dev->dev_cur_ordered_id); 1938 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1939 dev->dev_cur_ordered_id++; 1940 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 1941 dev->dev_cur_ordered_id); 1942 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1943 atomic_dec_mb(&dev->dev_ordered_sync); 1944 1945 dev->dev_cur_ordered_id++; 1946 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 1947 dev->dev_cur_ordered_id); 1948 } 1949 1950 target_restart_delayed_cmds(dev); 1951 } 1952 1953 static void transport_complete_qf(struct se_cmd *cmd) 1954 { 1955 int ret = 0; 1956 1957 transport_complete_task_attr(cmd); 1958 1959 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1960 trace_target_cmd_complete(cmd); 1961 ret = cmd->se_tfo->queue_status(cmd); 1962 goto out; 1963 } 1964 1965 switch (cmd->data_direction) { 1966 case DMA_FROM_DEVICE: 1967 trace_target_cmd_complete(cmd); 1968 ret = cmd->se_tfo->queue_data_in(cmd); 1969 break; 1970 case DMA_TO_DEVICE: 1971 if (cmd->se_cmd_flags & SCF_BIDI) { 1972 ret = cmd->se_tfo->queue_data_in(cmd); 1973 break; 1974 } 1975 /* Fall through for DMA_TO_DEVICE */ 1976 case DMA_NONE: 1977 trace_target_cmd_complete(cmd); 1978 ret = cmd->se_tfo->queue_status(cmd); 1979 break; 1980 default: 1981 break; 1982 } 1983 1984 out: 1985 if (ret < 0) { 1986 transport_handle_queue_full(cmd, cmd->se_dev); 1987 return; 1988 } 1989 transport_lun_remove_cmd(cmd); 1990 transport_cmd_check_stop_to_fabric(cmd); 1991 } 1992 1993 static void transport_handle_queue_full( 1994 struct se_cmd *cmd, 1995 struct se_device *dev) 1996 { 1997 spin_lock_irq(&dev->qf_cmd_lock); 1998 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1999 atomic_inc_mb(&dev->dev_qf_count); 2000 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2001 2002 schedule_work(&cmd->se_dev->qf_work_queue); 2003 } 2004 2005 static bool target_read_prot_action(struct se_cmd *cmd) 2006 { 2007 switch (cmd->prot_op) { 2008 case TARGET_PROT_DIN_STRIP: 2009 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2010 u32 sectors = cmd->data_length >> 2011 ilog2(cmd->se_dev->dev_attrib.block_size); 2012 2013 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2014 sectors, 0, cmd->t_prot_sg, 2015 0); 2016 if (cmd->pi_err) 2017 return true; 2018 } 2019 break; 2020 case TARGET_PROT_DIN_INSERT: 2021 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2022 break; 2023 2024 sbc_dif_generate(cmd); 2025 break; 2026 default: 2027 break; 2028 } 2029 2030 return false; 2031 } 2032 2033 static void target_complete_ok_work(struct work_struct *work) 2034 { 2035 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2036 int ret; 2037 2038 /* 2039 * Check if we need to move delayed/dormant tasks from cmds on the 2040 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2041 * Attribute. 2042 */ 2043 transport_complete_task_attr(cmd); 2044 2045 /* 2046 * Check to schedule QUEUE_FULL work, or execute an existing 2047 * cmd->transport_qf_callback() 2048 */ 2049 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2050 schedule_work(&cmd->se_dev->qf_work_queue); 2051 2052 /* 2053 * Check if we need to send a sense buffer from 2054 * the struct se_cmd in question. 2055 */ 2056 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2057 WARN_ON(!cmd->scsi_status); 2058 ret = transport_send_check_condition_and_sense( 2059 cmd, 0, 1); 2060 if (ret == -EAGAIN || ret == -ENOMEM) 2061 goto queue_full; 2062 2063 transport_lun_remove_cmd(cmd); 2064 transport_cmd_check_stop_to_fabric(cmd); 2065 return; 2066 } 2067 /* 2068 * Check for a callback, used by amongst other things 2069 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2070 */ 2071 if (cmd->transport_complete_callback) { 2072 sense_reason_t rc; 2073 2074 rc = cmd->transport_complete_callback(cmd, true); 2075 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 2076 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2077 !cmd->data_length) 2078 goto queue_rsp; 2079 2080 return; 2081 } else if (rc) { 2082 ret = transport_send_check_condition_and_sense(cmd, 2083 rc, 0); 2084 if (ret == -EAGAIN || ret == -ENOMEM) 2085 goto queue_full; 2086 2087 transport_lun_remove_cmd(cmd); 2088 transport_cmd_check_stop_to_fabric(cmd); 2089 return; 2090 } 2091 } 2092 2093 queue_rsp: 2094 switch (cmd->data_direction) { 2095 case DMA_FROM_DEVICE: 2096 atomic_long_add(cmd->data_length, 2097 &cmd->se_lun->lun_stats.tx_data_octets); 2098 /* 2099 * Perform READ_STRIP of PI using software emulation when 2100 * backend had PI enabled, if the transport will not be 2101 * performing hardware READ_STRIP offload. 2102 */ 2103 if (target_read_prot_action(cmd)) { 2104 ret = transport_send_check_condition_and_sense(cmd, 2105 cmd->pi_err, 0); 2106 if (ret == -EAGAIN || ret == -ENOMEM) 2107 goto queue_full; 2108 2109 transport_lun_remove_cmd(cmd); 2110 transport_cmd_check_stop_to_fabric(cmd); 2111 return; 2112 } 2113 2114 trace_target_cmd_complete(cmd); 2115 ret = cmd->se_tfo->queue_data_in(cmd); 2116 if (ret == -EAGAIN || ret == -ENOMEM) 2117 goto queue_full; 2118 break; 2119 case DMA_TO_DEVICE: 2120 atomic_long_add(cmd->data_length, 2121 &cmd->se_lun->lun_stats.rx_data_octets); 2122 /* 2123 * Check if we need to send READ payload for BIDI-COMMAND 2124 */ 2125 if (cmd->se_cmd_flags & SCF_BIDI) { 2126 atomic_long_add(cmd->data_length, 2127 &cmd->se_lun->lun_stats.tx_data_octets); 2128 ret = cmd->se_tfo->queue_data_in(cmd); 2129 if (ret == -EAGAIN || ret == -ENOMEM) 2130 goto queue_full; 2131 break; 2132 } 2133 /* Fall through for DMA_TO_DEVICE */ 2134 case DMA_NONE: 2135 trace_target_cmd_complete(cmd); 2136 ret = cmd->se_tfo->queue_status(cmd); 2137 if (ret == -EAGAIN || ret == -ENOMEM) 2138 goto queue_full; 2139 break; 2140 default: 2141 break; 2142 } 2143 2144 transport_lun_remove_cmd(cmd); 2145 transport_cmd_check_stop_to_fabric(cmd); 2146 return; 2147 2148 queue_full: 2149 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2150 " data_direction: %d\n", cmd, cmd->data_direction); 2151 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2152 transport_handle_queue_full(cmd, cmd->se_dev); 2153 } 2154 2155 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 2156 { 2157 struct scatterlist *sg; 2158 int count; 2159 2160 for_each_sg(sgl, sg, nents, count) 2161 __free_page(sg_page(sg)); 2162 2163 kfree(sgl); 2164 } 2165 2166 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2167 { 2168 /* 2169 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2170 * emulation, and free + reset pointers if necessary.. 2171 */ 2172 if (!cmd->t_data_sg_orig) 2173 return; 2174 2175 kfree(cmd->t_data_sg); 2176 cmd->t_data_sg = cmd->t_data_sg_orig; 2177 cmd->t_data_sg_orig = NULL; 2178 cmd->t_data_nents = cmd->t_data_nents_orig; 2179 cmd->t_data_nents_orig = 0; 2180 } 2181 2182 static inline void transport_free_pages(struct se_cmd *cmd) 2183 { 2184 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2185 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2186 cmd->t_prot_sg = NULL; 2187 cmd->t_prot_nents = 0; 2188 } 2189 2190 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2191 /* 2192 * Release special case READ buffer payload required for 2193 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2194 */ 2195 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2196 transport_free_sgl(cmd->t_bidi_data_sg, 2197 cmd->t_bidi_data_nents); 2198 cmd->t_bidi_data_sg = NULL; 2199 cmd->t_bidi_data_nents = 0; 2200 } 2201 transport_reset_sgl_orig(cmd); 2202 return; 2203 } 2204 transport_reset_sgl_orig(cmd); 2205 2206 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2207 cmd->t_data_sg = NULL; 2208 cmd->t_data_nents = 0; 2209 2210 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2211 cmd->t_bidi_data_sg = NULL; 2212 cmd->t_bidi_data_nents = 0; 2213 } 2214 2215 /** 2216 * transport_release_cmd - free a command 2217 * @cmd: command to free 2218 * 2219 * This routine unconditionally frees a command, and reference counting 2220 * or list removal must be done in the caller. 2221 */ 2222 static int transport_release_cmd(struct se_cmd *cmd) 2223 { 2224 BUG_ON(!cmd->se_tfo); 2225 2226 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2227 core_tmr_release_req(cmd->se_tmr_req); 2228 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2229 kfree(cmd->t_task_cdb); 2230 /* 2231 * If this cmd has been setup with target_get_sess_cmd(), drop 2232 * the kref and call ->release_cmd() in kref callback. 2233 */ 2234 return target_put_sess_cmd(cmd); 2235 } 2236 2237 /** 2238 * transport_put_cmd - release a reference to a command 2239 * @cmd: command to release 2240 * 2241 * This routine releases our reference to the command and frees it if possible. 2242 */ 2243 static int transport_put_cmd(struct se_cmd *cmd) 2244 { 2245 transport_free_pages(cmd); 2246 return transport_release_cmd(cmd); 2247 } 2248 2249 void *transport_kmap_data_sg(struct se_cmd *cmd) 2250 { 2251 struct scatterlist *sg = cmd->t_data_sg; 2252 struct page **pages; 2253 int i; 2254 2255 /* 2256 * We need to take into account a possible offset here for fabrics like 2257 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2258 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2259 */ 2260 if (!cmd->t_data_nents) 2261 return NULL; 2262 2263 BUG_ON(!sg); 2264 if (cmd->t_data_nents == 1) 2265 return kmap(sg_page(sg)) + sg->offset; 2266 2267 /* >1 page. use vmap */ 2268 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2269 if (!pages) 2270 return NULL; 2271 2272 /* convert sg[] to pages[] */ 2273 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2274 pages[i] = sg_page(sg); 2275 } 2276 2277 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2278 kfree(pages); 2279 if (!cmd->t_data_vmap) 2280 return NULL; 2281 2282 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2283 } 2284 EXPORT_SYMBOL(transport_kmap_data_sg); 2285 2286 void transport_kunmap_data_sg(struct se_cmd *cmd) 2287 { 2288 if (!cmd->t_data_nents) { 2289 return; 2290 } else if (cmd->t_data_nents == 1) { 2291 kunmap(sg_page(cmd->t_data_sg)); 2292 return; 2293 } 2294 2295 vunmap(cmd->t_data_vmap); 2296 cmd->t_data_vmap = NULL; 2297 } 2298 EXPORT_SYMBOL(transport_kunmap_data_sg); 2299 2300 int 2301 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2302 bool zero_page) 2303 { 2304 struct scatterlist *sg; 2305 struct page *page; 2306 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2307 unsigned int nent; 2308 int i = 0; 2309 2310 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2311 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2312 if (!sg) 2313 return -ENOMEM; 2314 2315 sg_init_table(sg, nent); 2316 2317 while (length) { 2318 u32 page_len = min_t(u32, length, PAGE_SIZE); 2319 page = alloc_page(GFP_KERNEL | zero_flag); 2320 if (!page) 2321 goto out; 2322 2323 sg_set_page(&sg[i], page, page_len, 0); 2324 length -= page_len; 2325 i++; 2326 } 2327 *sgl = sg; 2328 *nents = nent; 2329 return 0; 2330 2331 out: 2332 while (i > 0) { 2333 i--; 2334 __free_page(sg_page(&sg[i])); 2335 } 2336 kfree(sg); 2337 return -ENOMEM; 2338 } 2339 2340 /* 2341 * Allocate any required resources to execute the command. For writes we 2342 * might not have the payload yet, so notify the fabric via a call to 2343 * ->write_pending instead. Otherwise place it on the execution queue. 2344 */ 2345 sense_reason_t 2346 transport_generic_new_cmd(struct se_cmd *cmd) 2347 { 2348 int ret = 0; 2349 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2350 2351 if (cmd->prot_op != TARGET_PROT_NORMAL && 2352 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2353 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2354 cmd->prot_length, true); 2355 if (ret < 0) 2356 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2357 } 2358 2359 /* 2360 * Determine is the TCM fabric module has already allocated physical 2361 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2362 * beforehand. 2363 */ 2364 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2365 cmd->data_length) { 2366 2367 if ((cmd->se_cmd_flags & SCF_BIDI) || 2368 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2369 u32 bidi_length; 2370 2371 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2372 bidi_length = cmd->t_task_nolb * 2373 cmd->se_dev->dev_attrib.block_size; 2374 else 2375 bidi_length = cmd->data_length; 2376 2377 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2378 &cmd->t_bidi_data_nents, 2379 bidi_length, zero_flag); 2380 if (ret < 0) 2381 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2382 } 2383 2384 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2385 cmd->data_length, zero_flag); 2386 if (ret < 0) 2387 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2388 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2389 cmd->data_length) { 2390 /* 2391 * Special case for COMPARE_AND_WRITE with fabrics 2392 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2393 */ 2394 u32 caw_length = cmd->t_task_nolb * 2395 cmd->se_dev->dev_attrib.block_size; 2396 2397 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2398 &cmd->t_bidi_data_nents, 2399 caw_length, zero_flag); 2400 if (ret < 0) 2401 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2402 } 2403 /* 2404 * If this command is not a write we can execute it right here, 2405 * for write buffers we need to notify the fabric driver first 2406 * and let it call back once the write buffers are ready. 2407 */ 2408 target_add_to_state_list(cmd); 2409 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2410 target_execute_cmd(cmd); 2411 return 0; 2412 } 2413 transport_cmd_check_stop(cmd, false, true); 2414 2415 ret = cmd->se_tfo->write_pending(cmd); 2416 if (ret == -EAGAIN || ret == -ENOMEM) 2417 goto queue_full; 2418 2419 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2420 WARN_ON(ret); 2421 2422 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2423 2424 queue_full: 2425 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2426 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2427 transport_handle_queue_full(cmd, cmd->se_dev); 2428 return 0; 2429 } 2430 EXPORT_SYMBOL(transport_generic_new_cmd); 2431 2432 static void transport_write_pending_qf(struct se_cmd *cmd) 2433 { 2434 int ret; 2435 2436 ret = cmd->se_tfo->write_pending(cmd); 2437 if (ret == -EAGAIN || ret == -ENOMEM) { 2438 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2439 cmd); 2440 transport_handle_queue_full(cmd, cmd->se_dev); 2441 } 2442 } 2443 2444 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2445 { 2446 unsigned long flags; 2447 int ret = 0; 2448 2449 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2450 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2451 transport_wait_for_tasks(cmd); 2452 2453 ret = transport_release_cmd(cmd); 2454 } else { 2455 if (wait_for_tasks) 2456 transport_wait_for_tasks(cmd); 2457 /* 2458 * Handle WRITE failure case where transport_generic_new_cmd() 2459 * has already added se_cmd to state_list, but fabric has 2460 * failed command before I/O submission. 2461 */ 2462 if (cmd->state_active) { 2463 spin_lock_irqsave(&cmd->t_state_lock, flags); 2464 target_remove_from_state_list(cmd); 2465 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2466 } 2467 2468 if (cmd->se_lun) 2469 transport_lun_remove_cmd(cmd); 2470 2471 ret = transport_put_cmd(cmd); 2472 } 2473 return ret; 2474 } 2475 EXPORT_SYMBOL(transport_generic_free_cmd); 2476 2477 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2478 * @se_cmd: command descriptor to add 2479 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2480 */ 2481 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2482 { 2483 struct se_session *se_sess = se_cmd->se_sess; 2484 unsigned long flags; 2485 int ret = 0; 2486 2487 /* 2488 * Add a second kref if the fabric caller is expecting to handle 2489 * fabric acknowledgement that requires two target_put_sess_cmd() 2490 * invocations before se_cmd descriptor release. 2491 */ 2492 if (ack_kref) 2493 kref_get(&se_cmd->cmd_kref); 2494 2495 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2496 if (se_sess->sess_tearing_down) { 2497 ret = -ESHUTDOWN; 2498 goto out; 2499 } 2500 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2501 out: 2502 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2503 2504 if (ret && ack_kref) 2505 target_put_sess_cmd(se_cmd); 2506 2507 return ret; 2508 } 2509 EXPORT_SYMBOL(target_get_sess_cmd); 2510 2511 static void target_release_cmd_kref(struct kref *kref) 2512 __releases(&se_cmd->se_sess->sess_cmd_lock) 2513 { 2514 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2515 struct se_session *se_sess = se_cmd->se_sess; 2516 2517 if (list_empty(&se_cmd->se_cmd_list)) { 2518 spin_unlock(&se_sess->sess_cmd_lock); 2519 se_cmd->se_tfo->release_cmd(se_cmd); 2520 return; 2521 } 2522 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2523 spin_unlock(&se_sess->sess_cmd_lock); 2524 complete(&se_cmd->cmd_wait_comp); 2525 return; 2526 } 2527 list_del(&se_cmd->se_cmd_list); 2528 spin_unlock(&se_sess->sess_cmd_lock); 2529 2530 se_cmd->se_tfo->release_cmd(se_cmd); 2531 } 2532 2533 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2534 * @se_cmd: command descriptor to drop 2535 */ 2536 int target_put_sess_cmd(struct se_cmd *se_cmd) 2537 { 2538 struct se_session *se_sess = se_cmd->se_sess; 2539 2540 if (!se_sess) { 2541 se_cmd->se_tfo->release_cmd(se_cmd); 2542 return 1; 2543 } 2544 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2545 &se_sess->sess_cmd_lock); 2546 } 2547 EXPORT_SYMBOL(target_put_sess_cmd); 2548 2549 /* target_sess_cmd_list_set_waiting - Flag all commands in 2550 * sess_cmd_list to complete cmd_wait_comp. Set 2551 * sess_tearing_down so no more commands are queued. 2552 * @se_sess: session to flag 2553 */ 2554 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2555 { 2556 struct se_cmd *se_cmd; 2557 unsigned long flags; 2558 2559 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2560 if (se_sess->sess_tearing_down) { 2561 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2562 return; 2563 } 2564 se_sess->sess_tearing_down = 1; 2565 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2566 2567 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2568 se_cmd->cmd_wait_set = 1; 2569 2570 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2571 } 2572 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2573 2574 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2575 * @se_sess: session to wait for active I/O 2576 */ 2577 void target_wait_for_sess_cmds(struct se_session *se_sess) 2578 { 2579 struct se_cmd *se_cmd, *tmp_cmd; 2580 unsigned long flags; 2581 2582 list_for_each_entry_safe(se_cmd, tmp_cmd, 2583 &se_sess->sess_wait_list, se_cmd_list) { 2584 list_del(&se_cmd->se_cmd_list); 2585 2586 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2587 " %d\n", se_cmd, se_cmd->t_state, 2588 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2589 2590 wait_for_completion(&se_cmd->cmd_wait_comp); 2591 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2592 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2593 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2594 2595 se_cmd->se_tfo->release_cmd(se_cmd); 2596 } 2597 2598 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2599 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2600 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2601 2602 } 2603 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2604 2605 void transport_clear_lun_ref(struct se_lun *lun) 2606 { 2607 percpu_ref_kill(&lun->lun_ref); 2608 wait_for_completion(&lun->lun_ref_comp); 2609 } 2610 2611 /** 2612 * transport_wait_for_tasks - wait for completion to occur 2613 * @cmd: command to wait 2614 * 2615 * Called from frontend fabric context to wait for storage engine 2616 * to pause and/or release frontend generated struct se_cmd. 2617 */ 2618 bool transport_wait_for_tasks(struct se_cmd *cmd) 2619 { 2620 unsigned long flags; 2621 2622 spin_lock_irqsave(&cmd->t_state_lock, flags); 2623 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2624 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2625 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2626 return false; 2627 } 2628 2629 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2630 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2631 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2632 return false; 2633 } 2634 2635 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2636 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2637 return false; 2638 } 2639 2640 cmd->transport_state |= CMD_T_STOP; 2641 2642 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", 2643 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2644 2645 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2646 2647 wait_for_completion(&cmd->t_transport_stop_comp); 2648 2649 spin_lock_irqsave(&cmd->t_state_lock, flags); 2650 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2651 2652 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", 2653 cmd->tag); 2654 2655 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2656 2657 return true; 2658 } 2659 EXPORT_SYMBOL(transport_wait_for_tasks); 2660 2661 struct sense_info { 2662 u8 key; 2663 u8 asc; 2664 u8 ascq; 2665 bool add_sector_info; 2666 }; 2667 2668 static const struct sense_info sense_info_table[] = { 2669 [TCM_NO_SENSE] = { 2670 .key = NOT_READY 2671 }, 2672 [TCM_NON_EXISTENT_LUN] = { 2673 .key = ILLEGAL_REQUEST, 2674 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 2675 }, 2676 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 2677 .key = ILLEGAL_REQUEST, 2678 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2679 }, 2680 [TCM_SECTOR_COUNT_TOO_MANY] = { 2681 .key = ILLEGAL_REQUEST, 2682 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2683 }, 2684 [TCM_UNKNOWN_MODE_PAGE] = { 2685 .key = ILLEGAL_REQUEST, 2686 .asc = 0x24, /* INVALID FIELD IN CDB */ 2687 }, 2688 [TCM_CHECK_CONDITION_ABORT_CMD] = { 2689 .key = ABORTED_COMMAND, 2690 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 2691 .ascq = 0x03, 2692 }, 2693 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 2694 .key = ABORTED_COMMAND, 2695 .asc = 0x0c, /* WRITE ERROR */ 2696 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 2697 }, 2698 [TCM_INVALID_CDB_FIELD] = { 2699 .key = ILLEGAL_REQUEST, 2700 .asc = 0x24, /* INVALID FIELD IN CDB */ 2701 }, 2702 [TCM_INVALID_PARAMETER_LIST] = { 2703 .key = ILLEGAL_REQUEST, 2704 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2705 }, 2706 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2707 .key = ILLEGAL_REQUEST, 2708 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2709 }, 2710 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 2711 .key = ILLEGAL_REQUEST, 2712 .asc = 0x0c, /* WRITE ERROR */ 2713 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 2714 }, 2715 [TCM_SERVICE_CRC_ERROR] = { 2716 .key = ABORTED_COMMAND, 2717 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 2718 .ascq = 0x05, /* N/A */ 2719 }, 2720 [TCM_SNACK_REJECTED] = { 2721 .key = ABORTED_COMMAND, 2722 .asc = 0x11, /* READ ERROR */ 2723 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 2724 }, 2725 [TCM_WRITE_PROTECTED] = { 2726 .key = DATA_PROTECT, 2727 .asc = 0x27, /* WRITE PROTECTED */ 2728 }, 2729 [TCM_ADDRESS_OUT_OF_RANGE] = { 2730 .key = ILLEGAL_REQUEST, 2731 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2732 }, 2733 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 2734 .key = UNIT_ATTENTION, 2735 }, 2736 [TCM_CHECK_CONDITION_NOT_READY] = { 2737 .key = NOT_READY, 2738 }, 2739 [TCM_MISCOMPARE_VERIFY] = { 2740 .key = MISCOMPARE, 2741 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 2742 .ascq = 0x00, 2743 }, 2744 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 2745 .key = ABORTED_COMMAND, 2746 .asc = 0x10, 2747 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 2748 .add_sector_info = true, 2749 }, 2750 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 2751 .key = ABORTED_COMMAND, 2752 .asc = 0x10, 2753 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2754 .add_sector_info = true, 2755 }, 2756 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 2757 .key = ABORTED_COMMAND, 2758 .asc = 0x10, 2759 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2760 .add_sector_info = true, 2761 }, 2762 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2763 /* 2764 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2765 * Solaris initiators. Returning NOT READY instead means the 2766 * operations will be retried a finite number of times and we 2767 * can survive intermittent errors. 2768 */ 2769 .key = NOT_READY, 2770 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 2771 }, 2772 }; 2773 2774 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 2775 { 2776 const struct sense_info *si; 2777 u8 *buffer = cmd->sense_buffer; 2778 int r = (__force int)reason; 2779 u8 asc, ascq; 2780 bool desc_format = target_sense_desc_format(cmd->se_dev); 2781 2782 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 2783 si = &sense_info_table[r]; 2784 else 2785 si = &sense_info_table[(__force int) 2786 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 2787 2788 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 2789 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2790 WARN_ON_ONCE(asc == 0); 2791 } else if (si->asc == 0) { 2792 WARN_ON_ONCE(cmd->scsi_asc == 0); 2793 asc = cmd->scsi_asc; 2794 ascq = cmd->scsi_ascq; 2795 } else { 2796 asc = si->asc; 2797 ascq = si->ascq; 2798 } 2799 2800 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); 2801 if (si->add_sector_info) 2802 return scsi_set_sense_information(buffer, 2803 cmd->scsi_sense_length, 2804 cmd->bad_sector); 2805 2806 return 0; 2807 } 2808 2809 int 2810 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2811 sense_reason_t reason, int from_transport) 2812 { 2813 unsigned long flags; 2814 2815 spin_lock_irqsave(&cmd->t_state_lock, flags); 2816 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2817 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2818 return 0; 2819 } 2820 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2821 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2822 2823 if (!from_transport) { 2824 int rc; 2825 2826 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2827 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2828 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2829 rc = translate_sense_reason(cmd, reason); 2830 if (rc) 2831 return rc; 2832 } 2833 2834 trace_target_cmd_complete(cmd); 2835 return cmd->se_tfo->queue_status(cmd); 2836 } 2837 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2838 2839 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2840 { 2841 if (!(cmd->transport_state & CMD_T_ABORTED)) 2842 return 0; 2843 2844 /* 2845 * If cmd has been aborted but either no status is to be sent or it has 2846 * already been sent, just return 2847 */ 2848 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2849 return 1; 2850 2851 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2852 cmd->t_task_cdb[0], cmd->tag); 2853 2854 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2855 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2856 trace_target_cmd_complete(cmd); 2857 cmd->se_tfo->queue_status(cmd); 2858 2859 return 1; 2860 } 2861 EXPORT_SYMBOL(transport_check_aborted_status); 2862 2863 void transport_send_task_abort(struct se_cmd *cmd) 2864 { 2865 unsigned long flags; 2866 2867 spin_lock_irqsave(&cmd->t_state_lock, flags); 2868 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 2869 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2870 return; 2871 } 2872 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2873 2874 /* 2875 * If there are still expected incoming fabric WRITEs, we wait 2876 * until until they have completed before sending a TASK_ABORTED 2877 * response. This response with TASK_ABORTED status will be 2878 * queued back to fabric module by transport_check_aborted_status(). 2879 */ 2880 if (cmd->data_direction == DMA_TO_DEVICE) { 2881 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2882 cmd->transport_state |= CMD_T_ABORTED; 2883 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2884 return; 2885 } 2886 } 2887 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2888 2889 transport_lun_remove_cmd(cmd); 2890 2891 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 2892 cmd->t_task_cdb[0], cmd->tag); 2893 2894 trace_target_cmd_complete(cmd); 2895 cmd->se_tfo->queue_status(cmd); 2896 } 2897 2898 static void target_tmr_work(struct work_struct *work) 2899 { 2900 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2901 struct se_device *dev = cmd->se_dev; 2902 struct se_tmr_req *tmr = cmd->se_tmr_req; 2903 int ret; 2904 2905 switch (tmr->function) { 2906 case TMR_ABORT_TASK: 2907 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2908 break; 2909 case TMR_ABORT_TASK_SET: 2910 case TMR_CLEAR_ACA: 2911 case TMR_CLEAR_TASK_SET: 2912 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2913 break; 2914 case TMR_LUN_RESET: 2915 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2916 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2917 TMR_FUNCTION_REJECTED; 2918 if (tmr->response == TMR_FUNCTION_COMPLETE) { 2919 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 2920 cmd->orig_fe_lun, 0x29, 2921 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 2922 } 2923 break; 2924 case TMR_TARGET_WARM_RESET: 2925 tmr->response = TMR_FUNCTION_REJECTED; 2926 break; 2927 case TMR_TARGET_COLD_RESET: 2928 tmr->response = TMR_FUNCTION_REJECTED; 2929 break; 2930 default: 2931 pr_err("Uknown TMR function: 0x%02x.\n", 2932 tmr->function); 2933 tmr->response = TMR_FUNCTION_REJECTED; 2934 break; 2935 } 2936 2937 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2938 cmd->se_tfo->queue_tm_rsp(cmd); 2939 2940 transport_cmd_check_stop_to_fabric(cmd); 2941 } 2942 2943 int transport_generic_handle_tmr( 2944 struct se_cmd *cmd) 2945 { 2946 unsigned long flags; 2947 2948 spin_lock_irqsave(&cmd->t_state_lock, flags); 2949 cmd->transport_state |= CMD_T_ACTIVE; 2950 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2951 2952 INIT_WORK(&cmd->work, target_tmr_work); 2953 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2954 return 0; 2955 } 2956 EXPORT_SYMBOL(transport_generic_handle_tmr); 2957 2958 bool 2959 target_check_wce(struct se_device *dev) 2960 { 2961 bool wce = false; 2962 2963 if (dev->transport->get_write_cache) 2964 wce = dev->transport->get_write_cache(dev); 2965 else if (dev->dev_attrib.emulate_write_cache > 0) 2966 wce = true; 2967 2968 return wce; 2969 } 2970 2971 bool 2972 target_check_fua(struct se_device *dev) 2973 { 2974 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 2975 } 2976