1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 228 { 229 struct se_session *se_sess; 230 231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 232 if (!se_sess) { 233 pr_err("Unable to allocate struct se_session from" 234 " se_sess_cache\n"); 235 return ERR_PTR(-ENOMEM); 236 } 237 INIT_LIST_HEAD(&se_sess->sess_list); 238 INIT_LIST_HEAD(&se_sess->sess_acl_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 241 spin_lock_init(&se_sess->sess_cmd_lock); 242 kref_init(&se_sess->sess_kref); 243 se_sess->sup_prot_ops = sup_prot_ops; 244 245 return se_sess; 246 } 247 EXPORT_SYMBOL(transport_init_session); 248 249 int transport_alloc_session_tags(struct se_session *se_sess, 250 unsigned int tag_num, unsigned int tag_size) 251 { 252 int rc; 253 254 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 255 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 256 if (!se_sess->sess_cmd_map) { 257 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 258 if (!se_sess->sess_cmd_map) { 259 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 260 return -ENOMEM; 261 } 262 } 263 264 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 265 if (rc < 0) { 266 pr_err("Unable to init se_sess->sess_tag_pool," 267 " tag_num: %u\n", tag_num); 268 kvfree(se_sess->sess_cmd_map); 269 se_sess->sess_cmd_map = NULL; 270 return -ENOMEM; 271 } 272 273 return 0; 274 } 275 EXPORT_SYMBOL(transport_alloc_session_tags); 276 277 struct se_session *transport_init_session_tags(unsigned int tag_num, 278 unsigned int tag_size, 279 enum target_prot_op sup_prot_ops) 280 { 281 struct se_session *se_sess; 282 int rc; 283 284 if (tag_num != 0 && !tag_size) { 285 pr_err("init_session_tags called with percpu-ida tag_num:" 286 " %u, but zero tag_size\n", tag_num); 287 return ERR_PTR(-EINVAL); 288 } 289 if (!tag_num && tag_size) { 290 pr_err("init_session_tags called with percpu-ida tag_size:" 291 " %u, but zero tag_num\n", tag_size); 292 return ERR_PTR(-EINVAL); 293 } 294 295 se_sess = transport_init_session(sup_prot_ops); 296 if (IS_ERR(se_sess)) 297 return se_sess; 298 299 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 300 if (rc < 0) { 301 transport_free_session(se_sess); 302 return ERR_PTR(-ENOMEM); 303 } 304 305 return se_sess; 306 } 307 EXPORT_SYMBOL(transport_init_session_tags); 308 309 /* 310 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 311 */ 312 void __transport_register_session( 313 struct se_portal_group *se_tpg, 314 struct se_node_acl *se_nacl, 315 struct se_session *se_sess, 316 void *fabric_sess_ptr) 317 { 318 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 319 unsigned char buf[PR_REG_ISID_LEN]; 320 321 se_sess->se_tpg = se_tpg; 322 se_sess->fabric_sess_ptr = fabric_sess_ptr; 323 /* 324 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 325 * 326 * Only set for struct se_session's that will actually be moving I/O. 327 * eg: *NOT* discovery sessions. 328 */ 329 if (se_nacl) { 330 /* 331 * 332 * Determine if fabric allows for T10-PI feature bits exposed to 333 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 334 * 335 * If so, then always save prot_type on a per se_node_acl node 336 * basis and re-instate the previous sess_prot_type to avoid 337 * disabling PI from below any previously initiator side 338 * registered LUNs. 339 */ 340 if (se_nacl->saved_prot_type) 341 se_sess->sess_prot_type = se_nacl->saved_prot_type; 342 else if (tfo->tpg_check_prot_fabric_only) 343 se_sess->sess_prot_type = se_nacl->saved_prot_type = 344 tfo->tpg_check_prot_fabric_only(se_tpg); 345 /* 346 * If the fabric module supports an ISID based TransportID, 347 * save this value in binary from the fabric I_T Nexus now. 348 */ 349 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 350 memset(&buf[0], 0, PR_REG_ISID_LEN); 351 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 352 &buf[0], PR_REG_ISID_LEN); 353 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 354 } 355 356 spin_lock_irq(&se_nacl->nacl_sess_lock); 357 /* 358 * The se_nacl->nacl_sess pointer will be set to the 359 * last active I_T Nexus for each struct se_node_acl. 360 */ 361 se_nacl->nacl_sess = se_sess; 362 363 list_add_tail(&se_sess->sess_acl_list, 364 &se_nacl->acl_sess_list); 365 spin_unlock_irq(&se_nacl->nacl_sess_lock); 366 } 367 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 368 369 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 370 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 371 } 372 EXPORT_SYMBOL(__transport_register_session); 373 374 void transport_register_session( 375 struct se_portal_group *se_tpg, 376 struct se_node_acl *se_nacl, 377 struct se_session *se_sess, 378 void *fabric_sess_ptr) 379 { 380 unsigned long flags; 381 382 spin_lock_irqsave(&se_tpg->session_lock, flags); 383 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 384 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 385 } 386 EXPORT_SYMBOL(transport_register_session); 387 388 struct se_session * 389 target_alloc_session(struct se_portal_group *tpg, 390 unsigned int tag_num, unsigned int tag_size, 391 enum target_prot_op prot_op, 392 const char *initiatorname, void *private, 393 int (*callback)(struct se_portal_group *, 394 struct se_session *, void *)) 395 { 396 struct se_session *sess; 397 398 /* 399 * If the fabric driver is using percpu-ida based pre allocation 400 * of I/O descriptor tags, go ahead and perform that setup now.. 401 */ 402 if (tag_num != 0) 403 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 404 else 405 sess = transport_init_session(prot_op); 406 407 if (IS_ERR(sess)) 408 return sess; 409 410 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 411 (unsigned char *)initiatorname); 412 if (!sess->se_node_acl) { 413 transport_free_session(sess); 414 return ERR_PTR(-EACCES); 415 } 416 /* 417 * Go ahead and perform any remaining fabric setup that is 418 * required before transport_register_session(). 419 */ 420 if (callback != NULL) { 421 int rc = callback(tpg, sess, private); 422 if (rc) { 423 transport_free_session(sess); 424 return ERR_PTR(rc); 425 } 426 } 427 428 transport_register_session(tpg, sess->se_node_acl, sess, private); 429 return sess; 430 } 431 EXPORT_SYMBOL(target_alloc_session); 432 433 static void target_release_session(struct kref *kref) 434 { 435 struct se_session *se_sess = container_of(kref, 436 struct se_session, sess_kref); 437 struct se_portal_group *se_tpg = se_sess->se_tpg; 438 439 se_tpg->se_tpg_tfo->close_session(se_sess); 440 } 441 442 int target_get_session(struct se_session *se_sess) 443 { 444 return kref_get_unless_zero(&se_sess->sess_kref); 445 } 446 EXPORT_SYMBOL(target_get_session); 447 448 void target_put_session(struct se_session *se_sess) 449 { 450 kref_put(&se_sess->sess_kref, target_release_session); 451 } 452 EXPORT_SYMBOL(target_put_session); 453 454 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 455 { 456 struct se_session *se_sess; 457 ssize_t len = 0; 458 459 spin_lock_bh(&se_tpg->session_lock); 460 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 461 if (!se_sess->se_node_acl) 462 continue; 463 if (!se_sess->se_node_acl->dynamic_node_acl) 464 continue; 465 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 466 break; 467 468 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 469 se_sess->se_node_acl->initiatorname); 470 len += 1; /* Include NULL terminator */ 471 } 472 spin_unlock_bh(&se_tpg->session_lock); 473 474 return len; 475 } 476 EXPORT_SYMBOL(target_show_dynamic_sessions); 477 478 static void target_complete_nacl(struct kref *kref) 479 { 480 struct se_node_acl *nacl = container_of(kref, 481 struct se_node_acl, acl_kref); 482 483 complete(&nacl->acl_free_comp); 484 } 485 486 void target_put_nacl(struct se_node_acl *nacl) 487 { 488 kref_put(&nacl->acl_kref, target_complete_nacl); 489 } 490 EXPORT_SYMBOL(target_put_nacl); 491 492 void transport_deregister_session_configfs(struct se_session *se_sess) 493 { 494 struct se_node_acl *se_nacl; 495 unsigned long flags; 496 /* 497 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 498 */ 499 se_nacl = se_sess->se_node_acl; 500 if (se_nacl) { 501 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 502 if (se_nacl->acl_stop == 0) 503 list_del(&se_sess->sess_acl_list); 504 /* 505 * If the session list is empty, then clear the pointer. 506 * Otherwise, set the struct se_session pointer from the tail 507 * element of the per struct se_node_acl active session list. 508 */ 509 if (list_empty(&se_nacl->acl_sess_list)) 510 se_nacl->nacl_sess = NULL; 511 else { 512 se_nacl->nacl_sess = container_of( 513 se_nacl->acl_sess_list.prev, 514 struct se_session, sess_acl_list); 515 } 516 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 517 } 518 } 519 EXPORT_SYMBOL(transport_deregister_session_configfs); 520 521 void transport_free_session(struct se_session *se_sess) 522 { 523 struct se_node_acl *se_nacl = se_sess->se_node_acl; 524 /* 525 * Drop the se_node_acl->nacl_kref obtained from within 526 * core_tpg_get_initiator_node_acl(). 527 */ 528 if (se_nacl) { 529 se_sess->se_node_acl = NULL; 530 target_put_nacl(se_nacl); 531 } 532 if (se_sess->sess_cmd_map) { 533 percpu_ida_destroy(&se_sess->sess_tag_pool); 534 kvfree(se_sess->sess_cmd_map); 535 } 536 kmem_cache_free(se_sess_cache, se_sess); 537 } 538 EXPORT_SYMBOL(transport_free_session); 539 540 void transport_deregister_session(struct se_session *se_sess) 541 { 542 struct se_portal_group *se_tpg = se_sess->se_tpg; 543 const struct target_core_fabric_ops *se_tfo; 544 struct se_node_acl *se_nacl; 545 unsigned long flags; 546 bool drop_nacl = false; 547 548 if (!se_tpg) { 549 transport_free_session(se_sess); 550 return; 551 } 552 se_tfo = se_tpg->se_tpg_tfo; 553 554 spin_lock_irqsave(&se_tpg->session_lock, flags); 555 list_del(&se_sess->sess_list); 556 se_sess->se_tpg = NULL; 557 se_sess->fabric_sess_ptr = NULL; 558 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 559 560 /* 561 * Determine if we need to do extra work for this initiator node's 562 * struct se_node_acl if it had been previously dynamically generated. 563 */ 564 se_nacl = se_sess->se_node_acl; 565 566 mutex_lock(&se_tpg->acl_node_mutex); 567 if (se_nacl && se_nacl->dynamic_node_acl) { 568 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 569 list_del(&se_nacl->acl_list); 570 drop_nacl = true; 571 } 572 } 573 mutex_unlock(&se_tpg->acl_node_mutex); 574 575 if (drop_nacl) { 576 core_tpg_wait_for_nacl_pr_ref(se_nacl); 577 core_free_device_list_for_node(se_nacl, se_tpg); 578 se_sess->se_node_acl = NULL; 579 kfree(se_nacl); 580 } 581 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 582 se_tpg->se_tpg_tfo->get_fabric_name()); 583 /* 584 * If last kref is dropping now for an explicit NodeACL, awake sleeping 585 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 586 * removal context from within transport_free_session() code. 587 */ 588 589 transport_free_session(se_sess); 590 } 591 EXPORT_SYMBOL(transport_deregister_session); 592 593 static void target_remove_from_state_list(struct se_cmd *cmd) 594 { 595 struct se_device *dev = cmd->se_dev; 596 unsigned long flags; 597 598 if (!dev) 599 return; 600 601 if (cmd->transport_state & CMD_T_BUSY) 602 return; 603 604 spin_lock_irqsave(&dev->execute_task_lock, flags); 605 if (cmd->state_active) { 606 list_del(&cmd->state_list); 607 cmd->state_active = false; 608 } 609 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 610 } 611 612 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 613 bool write_pending) 614 { 615 unsigned long flags; 616 617 if (remove_from_lists) { 618 target_remove_from_state_list(cmd); 619 620 /* 621 * Clear struct se_cmd->se_lun before the handoff to FE. 622 */ 623 cmd->se_lun = NULL; 624 } 625 626 spin_lock_irqsave(&cmd->t_state_lock, flags); 627 if (write_pending) 628 cmd->t_state = TRANSPORT_WRITE_PENDING; 629 630 /* 631 * Determine if frontend context caller is requesting the stopping of 632 * this command for frontend exceptions. 633 */ 634 if (cmd->transport_state & CMD_T_STOP) { 635 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 636 __func__, __LINE__, cmd->tag); 637 638 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 639 640 complete_all(&cmd->t_transport_stop_comp); 641 return 1; 642 } 643 644 cmd->transport_state &= ~CMD_T_ACTIVE; 645 if (remove_from_lists) { 646 /* 647 * Some fabric modules like tcm_loop can release 648 * their internally allocated I/O reference now and 649 * struct se_cmd now. 650 * 651 * Fabric modules are expected to return '1' here if the 652 * se_cmd being passed is released at this point, 653 * or zero if not being released. 654 */ 655 if (cmd->se_tfo->check_stop_free != NULL) { 656 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 657 return cmd->se_tfo->check_stop_free(cmd); 658 } 659 } 660 661 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 662 return 0; 663 } 664 665 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 666 { 667 return transport_cmd_check_stop(cmd, true, false); 668 } 669 670 static void transport_lun_remove_cmd(struct se_cmd *cmd) 671 { 672 struct se_lun *lun = cmd->se_lun; 673 674 if (!lun) 675 return; 676 677 if (cmpxchg(&cmd->lun_ref_active, true, false)) 678 percpu_ref_put(&lun->lun_ref); 679 } 680 681 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 682 { 683 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 684 685 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 686 transport_lun_remove_cmd(cmd); 687 /* 688 * Allow the fabric driver to unmap any resources before 689 * releasing the descriptor via TFO->release_cmd() 690 */ 691 if (remove) 692 cmd->se_tfo->aborted_task(cmd); 693 694 if (transport_cmd_check_stop_to_fabric(cmd)) 695 return; 696 if (remove && ack_kref) 697 transport_put_cmd(cmd); 698 } 699 700 static void target_complete_failure_work(struct work_struct *work) 701 { 702 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 703 704 transport_generic_request_failure(cmd, 705 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 706 } 707 708 /* 709 * Used when asking transport to copy Sense Data from the underlying 710 * Linux/SCSI struct scsi_cmnd 711 */ 712 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 713 { 714 struct se_device *dev = cmd->se_dev; 715 716 WARN_ON(!cmd->se_lun); 717 718 if (!dev) 719 return NULL; 720 721 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 722 return NULL; 723 724 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 725 726 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 727 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 728 return cmd->sense_buffer; 729 } 730 731 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 732 { 733 struct se_device *dev = cmd->se_dev; 734 int success = scsi_status == GOOD; 735 unsigned long flags; 736 737 cmd->scsi_status = scsi_status; 738 739 740 spin_lock_irqsave(&cmd->t_state_lock, flags); 741 cmd->transport_state &= ~CMD_T_BUSY; 742 743 if (dev && dev->transport->transport_complete) { 744 dev->transport->transport_complete(cmd, 745 cmd->t_data_sg, 746 transport_get_sense_buffer(cmd)); 747 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 748 success = 1; 749 } 750 751 /* 752 * Check for case where an explicit ABORT_TASK has been received 753 * and transport_wait_for_tasks() will be waiting for completion.. 754 */ 755 if (cmd->transport_state & CMD_T_ABORTED || 756 cmd->transport_state & CMD_T_STOP) { 757 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 758 complete_all(&cmd->t_transport_stop_comp); 759 return; 760 } else if (!success) { 761 INIT_WORK(&cmd->work, target_complete_failure_work); 762 } else { 763 INIT_WORK(&cmd->work, target_complete_ok_work); 764 } 765 766 cmd->t_state = TRANSPORT_COMPLETE; 767 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 768 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 769 770 if (cmd->se_cmd_flags & SCF_USE_CPUID) 771 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 772 else 773 queue_work(target_completion_wq, &cmd->work); 774 } 775 EXPORT_SYMBOL(target_complete_cmd); 776 777 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 778 { 779 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 780 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 781 cmd->residual_count += cmd->data_length - length; 782 } else { 783 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 784 cmd->residual_count = cmd->data_length - length; 785 } 786 787 cmd->data_length = length; 788 } 789 790 target_complete_cmd(cmd, scsi_status); 791 } 792 EXPORT_SYMBOL(target_complete_cmd_with_length); 793 794 static void target_add_to_state_list(struct se_cmd *cmd) 795 { 796 struct se_device *dev = cmd->se_dev; 797 unsigned long flags; 798 799 spin_lock_irqsave(&dev->execute_task_lock, flags); 800 if (!cmd->state_active) { 801 list_add_tail(&cmd->state_list, &dev->state_list); 802 cmd->state_active = true; 803 } 804 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 805 } 806 807 /* 808 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 809 */ 810 static void transport_write_pending_qf(struct se_cmd *cmd); 811 static void transport_complete_qf(struct se_cmd *cmd); 812 813 void target_qf_do_work(struct work_struct *work) 814 { 815 struct se_device *dev = container_of(work, struct se_device, 816 qf_work_queue); 817 LIST_HEAD(qf_cmd_list); 818 struct se_cmd *cmd, *cmd_tmp; 819 820 spin_lock_irq(&dev->qf_cmd_lock); 821 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 822 spin_unlock_irq(&dev->qf_cmd_lock); 823 824 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 825 list_del(&cmd->se_qf_node); 826 atomic_dec_mb(&dev->dev_qf_count); 827 828 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 829 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 830 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 831 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 832 : "UNKNOWN"); 833 834 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 835 transport_write_pending_qf(cmd); 836 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 837 transport_complete_qf(cmd); 838 } 839 } 840 841 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 842 { 843 switch (cmd->data_direction) { 844 case DMA_NONE: 845 return "NONE"; 846 case DMA_FROM_DEVICE: 847 return "READ"; 848 case DMA_TO_DEVICE: 849 return "WRITE"; 850 case DMA_BIDIRECTIONAL: 851 return "BIDI"; 852 default: 853 break; 854 } 855 856 return "UNKNOWN"; 857 } 858 859 void transport_dump_dev_state( 860 struct se_device *dev, 861 char *b, 862 int *bl) 863 { 864 *bl += sprintf(b + *bl, "Status: "); 865 if (dev->export_count) 866 *bl += sprintf(b + *bl, "ACTIVATED"); 867 else 868 *bl += sprintf(b + *bl, "DEACTIVATED"); 869 870 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 871 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 872 dev->dev_attrib.block_size, 873 dev->dev_attrib.hw_max_sectors); 874 *bl += sprintf(b + *bl, " "); 875 } 876 877 void transport_dump_vpd_proto_id( 878 struct t10_vpd *vpd, 879 unsigned char *p_buf, 880 int p_buf_len) 881 { 882 unsigned char buf[VPD_TMP_BUF_SIZE]; 883 int len; 884 885 memset(buf, 0, VPD_TMP_BUF_SIZE); 886 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 887 888 switch (vpd->protocol_identifier) { 889 case 0x00: 890 sprintf(buf+len, "Fibre Channel\n"); 891 break; 892 case 0x10: 893 sprintf(buf+len, "Parallel SCSI\n"); 894 break; 895 case 0x20: 896 sprintf(buf+len, "SSA\n"); 897 break; 898 case 0x30: 899 sprintf(buf+len, "IEEE 1394\n"); 900 break; 901 case 0x40: 902 sprintf(buf+len, "SCSI Remote Direct Memory Access" 903 " Protocol\n"); 904 break; 905 case 0x50: 906 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 907 break; 908 case 0x60: 909 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 910 break; 911 case 0x70: 912 sprintf(buf+len, "Automation/Drive Interface Transport" 913 " Protocol\n"); 914 break; 915 case 0x80: 916 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 917 break; 918 default: 919 sprintf(buf+len, "Unknown 0x%02x\n", 920 vpd->protocol_identifier); 921 break; 922 } 923 924 if (p_buf) 925 strncpy(p_buf, buf, p_buf_len); 926 else 927 pr_debug("%s", buf); 928 } 929 930 void 931 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 932 { 933 /* 934 * Check if the Protocol Identifier Valid (PIV) bit is set.. 935 * 936 * from spc3r23.pdf section 7.5.1 937 */ 938 if (page_83[1] & 0x80) { 939 vpd->protocol_identifier = (page_83[0] & 0xf0); 940 vpd->protocol_identifier_set = 1; 941 transport_dump_vpd_proto_id(vpd, NULL, 0); 942 } 943 } 944 EXPORT_SYMBOL(transport_set_vpd_proto_id); 945 946 int transport_dump_vpd_assoc( 947 struct t10_vpd *vpd, 948 unsigned char *p_buf, 949 int p_buf_len) 950 { 951 unsigned char buf[VPD_TMP_BUF_SIZE]; 952 int ret = 0; 953 int len; 954 955 memset(buf, 0, VPD_TMP_BUF_SIZE); 956 len = sprintf(buf, "T10 VPD Identifier Association: "); 957 958 switch (vpd->association) { 959 case 0x00: 960 sprintf(buf+len, "addressed logical unit\n"); 961 break; 962 case 0x10: 963 sprintf(buf+len, "target port\n"); 964 break; 965 case 0x20: 966 sprintf(buf+len, "SCSI target device\n"); 967 break; 968 default: 969 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 970 ret = -EINVAL; 971 break; 972 } 973 974 if (p_buf) 975 strncpy(p_buf, buf, p_buf_len); 976 else 977 pr_debug("%s", buf); 978 979 return ret; 980 } 981 982 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 983 { 984 /* 985 * The VPD identification association.. 986 * 987 * from spc3r23.pdf Section 7.6.3.1 Table 297 988 */ 989 vpd->association = (page_83[1] & 0x30); 990 return transport_dump_vpd_assoc(vpd, NULL, 0); 991 } 992 EXPORT_SYMBOL(transport_set_vpd_assoc); 993 994 int transport_dump_vpd_ident_type( 995 struct t10_vpd *vpd, 996 unsigned char *p_buf, 997 int p_buf_len) 998 { 999 unsigned char buf[VPD_TMP_BUF_SIZE]; 1000 int ret = 0; 1001 int len; 1002 1003 memset(buf, 0, VPD_TMP_BUF_SIZE); 1004 len = sprintf(buf, "T10 VPD Identifier Type: "); 1005 1006 switch (vpd->device_identifier_type) { 1007 case 0x00: 1008 sprintf(buf+len, "Vendor specific\n"); 1009 break; 1010 case 0x01: 1011 sprintf(buf+len, "T10 Vendor ID based\n"); 1012 break; 1013 case 0x02: 1014 sprintf(buf+len, "EUI-64 based\n"); 1015 break; 1016 case 0x03: 1017 sprintf(buf+len, "NAA\n"); 1018 break; 1019 case 0x04: 1020 sprintf(buf+len, "Relative target port identifier\n"); 1021 break; 1022 case 0x08: 1023 sprintf(buf+len, "SCSI name string\n"); 1024 break; 1025 default: 1026 sprintf(buf+len, "Unsupported: 0x%02x\n", 1027 vpd->device_identifier_type); 1028 ret = -EINVAL; 1029 break; 1030 } 1031 1032 if (p_buf) { 1033 if (p_buf_len < strlen(buf)+1) 1034 return -EINVAL; 1035 strncpy(p_buf, buf, p_buf_len); 1036 } else { 1037 pr_debug("%s", buf); 1038 } 1039 1040 return ret; 1041 } 1042 1043 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1044 { 1045 /* 1046 * The VPD identifier type.. 1047 * 1048 * from spc3r23.pdf Section 7.6.3.1 Table 298 1049 */ 1050 vpd->device_identifier_type = (page_83[1] & 0x0f); 1051 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1052 } 1053 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1054 1055 int transport_dump_vpd_ident( 1056 struct t10_vpd *vpd, 1057 unsigned char *p_buf, 1058 int p_buf_len) 1059 { 1060 unsigned char buf[VPD_TMP_BUF_SIZE]; 1061 int ret = 0; 1062 1063 memset(buf, 0, VPD_TMP_BUF_SIZE); 1064 1065 switch (vpd->device_identifier_code_set) { 1066 case 0x01: /* Binary */ 1067 snprintf(buf, sizeof(buf), 1068 "T10 VPD Binary Device Identifier: %s\n", 1069 &vpd->device_identifier[0]); 1070 break; 1071 case 0x02: /* ASCII */ 1072 snprintf(buf, sizeof(buf), 1073 "T10 VPD ASCII Device Identifier: %s\n", 1074 &vpd->device_identifier[0]); 1075 break; 1076 case 0x03: /* UTF-8 */ 1077 snprintf(buf, sizeof(buf), 1078 "T10 VPD UTF-8 Device Identifier: %s\n", 1079 &vpd->device_identifier[0]); 1080 break; 1081 default: 1082 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1083 " 0x%02x", vpd->device_identifier_code_set); 1084 ret = -EINVAL; 1085 break; 1086 } 1087 1088 if (p_buf) 1089 strncpy(p_buf, buf, p_buf_len); 1090 else 1091 pr_debug("%s", buf); 1092 1093 return ret; 1094 } 1095 1096 int 1097 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1098 { 1099 static const char hex_str[] = "0123456789abcdef"; 1100 int j = 0, i = 4; /* offset to start of the identifier */ 1101 1102 /* 1103 * The VPD Code Set (encoding) 1104 * 1105 * from spc3r23.pdf Section 7.6.3.1 Table 296 1106 */ 1107 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1108 switch (vpd->device_identifier_code_set) { 1109 case 0x01: /* Binary */ 1110 vpd->device_identifier[j++] = 1111 hex_str[vpd->device_identifier_type]; 1112 while (i < (4 + page_83[3])) { 1113 vpd->device_identifier[j++] = 1114 hex_str[(page_83[i] & 0xf0) >> 4]; 1115 vpd->device_identifier[j++] = 1116 hex_str[page_83[i] & 0x0f]; 1117 i++; 1118 } 1119 break; 1120 case 0x02: /* ASCII */ 1121 case 0x03: /* UTF-8 */ 1122 while (i < (4 + page_83[3])) 1123 vpd->device_identifier[j++] = page_83[i++]; 1124 break; 1125 default: 1126 break; 1127 } 1128 1129 return transport_dump_vpd_ident(vpd, NULL, 0); 1130 } 1131 EXPORT_SYMBOL(transport_set_vpd_ident); 1132 1133 static sense_reason_t 1134 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1135 unsigned int size) 1136 { 1137 u32 mtl; 1138 1139 if (!cmd->se_tfo->max_data_sg_nents) 1140 return TCM_NO_SENSE; 1141 /* 1142 * Check if fabric enforced maximum SGL entries per I/O descriptor 1143 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1144 * residual_count and reduce original cmd->data_length to maximum 1145 * length based on single PAGE_SIZE entry scatter-lists. 1146 */ 1147 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1148 if (cmd->data_length > mtl) { 1149 /* 1150 * If an existing CDB overflow is present, calculate new residual 1151 * based on CDB size minus fabric maximum transfer length. 1152 * 1153 * If an existing CDB underflow is present, calculate new residual 1154 * based on original cmd->data_length minus fabric maximum transfer 1155 * length. 1156 * 1157 * Otherwise, set the underflow residual based on cmd->data_length 1158 * minus fabric maximum transfer length. 1159 */ 1160 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1161 cmd->residual_count = (size - mtl); 1162 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1163 u32 orig_dl = size + cmd->residual_count; 1164 cmd->residual_count = (orig_dl - mtl); 1165 } else { 1166 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1167 cmd->residual_count = (cmd->data_length - mtl); 1168 } 1169 cmd->data_length = mtl; 1170 /* 1171 * Reset sbc_check_prot() calculated protection payload 1172 * length based upon the new smaller MTL. 1173 */ 1174 if (cmd->prot_length) { 1175 u32 sectors = (mtl / dev->dev_attrib.block_size); 1176 cmd->prot_length = dev->prot_length * sectors; 1177 } 1178 } 1179 return TCM_NO_SENSE; 1180 } 1181 1182 sense_reason_t 1183 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1184 { 1185 struct se_device *dev = cmd->se_dev; 1186 1187 if (cmd->unknown_data_length) { 1188 cmd->data_length = size; 1189 } else if (size != cmd->data_length) { 1190 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1191 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1192 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1193 cmd->data_length, size, cmd->t_task_cdb[0]); 1194 1195 if (cmd->data_direction == DMA_TO_DEVICE && 1196 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1197 pr_err("Rejecting underflow/overflow WRITE data\n"); 1198 return TCM_INVALID_CDB_FIELD; 1199 } 1200 /* 1201 * Reject READ_* or WRITE_* with overflow/underflow for 1202 * type SCF_SCSI_DATA_CDB. 1203 */ 1204 if (dev->dev_attrib.block_size != 512) { 1205 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1206 " CDB on non 512-byte sector setup subsystem" 1207 " plugin: %s\n", dev->transport->name); 1208 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1209 return TCM_INVALID_CDB_FIELD; 1210 } 1211 /* 1212 * For the overflow case keep the existing fabric provided 1213 * ->data_length. Otherwise for the underflow case, reset 1214 * ->data_length to the smaller SCSI expected data transfer 1215 * length. 1216 */ 1217 if (size > cmd->data_length) { 1218 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1219 cmd->residual_count = (size - cmd->data_length); 1220 } else { 1221 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1222 cmd->residual_count = (cmd->data_length - size); 1223 cmd->data_length = size; 1224 } 1225 } 1226 1227 return target_check_max_data_sg_nents(cmd, dev, size); 1228 1229 } 1230 1231 /* 1232 * Used by fabric modules containing a local struct se_cmd within their 1233 * fabric dependent per I/O descriptor. 1234 * 1235 * Preserves the value of @cmd->tag. 1236 */ 1237 void transport_init_se_cmd( 1238 struct se_cmd *cmd, 1239 const struct target_core_fabric_ops *tfo, 1240 struct se_session *se_sess, 1241 u32 data_length, 1242 int data_direction, 1243 int task_attr, 1244 unsigned char *sense_buffer) 1245 { 1246 INIT_LIST_HEAD(&cmd->se_delayed_node); 1247 INIT_LIST_HEAD(&cmd->se_qf_node); 1248 INIT_LIST_HEAD(&cmd->se_cmd_list); 1249 INIT_LIST_HEAD(&cmd->state_list); 1250 init_completion(&cmd->t_transport_stop_comp); 1251 init_completion(&cmd->cmd_wait_comp); 1252 spin_lock_init(&cmd->t_state_lock); 1253 kref_init(&cmd->cmd_kref); 1254 cmd->transport_state = CMD_T_DEV_ACTIVE; 1255 1256 cmd->se_tfo = tfo; 1257 cmd->se_sess = se_sess; 1258 cmd->data_length = data_length; 1259 cmd->data_direction = data_direction; 1260 cmd->sam_task_attr = task_attr; 1261 cmd->sense_buffer = sense_buffer; 1262 1263 cmd->state_active = false; 1264 } 1265 EXPORT_SYMBOL(transport_init_se_cmd); 1266 1267 static sense_reason_t 1268 transport_check_alloc_task_attr(struct se_cmd *cmd) 1269 { 1270 struct se_device *dev = cmd->se_dev; 1271 1272 /* 1273 * Check if SAM Task Attribute emulation is enabled for this 1274 * struct se_device storage object 1275 */ 1276 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1277 return 0; 1278 1279 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1280 pr_debug("SAM Task Attribute ACA" 1281 " emulation is not supported\n"); 1282 return TCM_INVALID_CDB_FIELD; 1283 } 1284 1285 return 0; 1286 } 1287 1288 sense_reason_t 1289 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1290 { 1291 struct se_device *dev = cmd->se_dev; 1292 sense_reason_t ret; 1293 1294 /* 1295 * Ensure that the received CDB is less than the max (252 + 8) bytes 1296 * for VARIABLE_LENGTH_CMD 1297 */ 1298 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1299 pr_err("Received SCSI CDB with command_size: %d that" 1300 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1301 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1302 return TCM_INVALID_CDB_FIELD; 1303 } 1304 /* 1305 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1306 * allocate the additional extended CDB buffer now.. Otherwise 1307 * setup the pointer from __t_task_cdb to t_task_cdb. 1308 */ 1309 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1310 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1311 GFP_KERNEL); 1312 if (!cmd->t_task_cdb) { 1313 pr_err("Unable to allocate cmd->t_task_cdb" 1314 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1315 scsi_command_size(cdb), 1316 (unsigned long)sizeof(cmd->__t_task_cdb)); 1317 return TCM_OUT_OF_RESOURCES; 1318 } 1319 } else 1320 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1321 /* 1322 * Copy the original CDB into cmd-> 1323 */ 1324 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1325 1326 trace_target_sequencer_start(cmd); 1327 1328 /* 1329 * Check for an existing UNIT ATTENTION condition 1330 */ 1331 ret = target_scsi3_ua_check(cmd); 1332 if (ret) 1333 return ret; 1334 1335 ret = target_alua_state_check(cmd); 1336 if (ret) 1337 return ret; 1338 1339 ret = target_check_reservation(cmd); 1340 if (ret) { 1341 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1342 return ret; 1343 } 1344 1345 ret = dev->transport->parse_cdb(cmd); 1346 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1347 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1348 cmd->se_tfo->get_fabric_name(), 1349 cmd->se_sess->se_node_acl->initiatorname, 1350 cmd->t_task_cdb[0]); 1351 if (ret) 1352 return ret; 1353 1354 ret = transport_check_alloc_task_attr(cmd); 1355 if (ret) 1356 return ret; 1357 1358 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1359 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1360 return 0; 1361 } 1362 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1363 1364 /* 1365 * Used by fabric module frontends to queue tasks directly. 1366 * May only be used from process context. 1367 */ 1368 int transport_handle_cdb_direct( 1369 struct se_cmd *cmd) 1370 { 1371 sense_reason_t ret; 1372 1373 if (!cmd->se_lun) { 1374 dump_stack(); 1375 pr_err("cmd->se_lun is NULL\n"); 1376 return -EINVAL; 1377 } 1378 if (in_interrupt()) { 1379 dump_stack(); 1380 pr_err("transport_generic_handle_cdb cannot be called" 1381 " from interrupt context\n"); 1382 return -EINVAL; 1383 } 1384 /* 1385 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1386 * outstanding descriptors are handled correctly during shutdown via 1387 * transport_wait_for_tasks() 1388 * 1389 * Also, we don't take cmd->t_state_lock here as we only expect 1390 * this to be called for initial descriptor submission. 1391 */ 1392 cmd->t_state = TRANSPORT_NEW_CMD; 1393 cmd->transport_state |= CMD_T_ACTIVE; 1394 1395 /* 1396 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1397 * so follow TRANSPORT_NEW_CMD processing thread context usage 1398 * and call transport_generic_request_failure() if necessary.. 1399 */ 1400 ret = transport_generic_new_cmd(cmd); 1401 if (ret) 1402 transport_generic_request_failure(cmd, ret); 1403 return 0; 1404 } 1405 EXPORT_SYMBOL(transport_handle_cdb_direct); 1406 1407 sense_reason_t 1408 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1409 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1410 { 1411 if (!sgl || !sgl_count) 1412 return 0; 1413 1414 /* 1415 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1416 * scatterlists already have been set to follow what the fabric 1417 * passes for the original expected data transfer length. 1418 */ 1419 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1420 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1421 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1422 return TCM_INVALID_CDB_FIELD; 1423 } 1424 1425 cmd->t_data_sg = sgl; 1426 cmd->t_data_nents = sgl_count; 1427 cmd->t_bidi_data_sg = sgl_bidi; 1428 cmd->t_bidi_data_nents = sgl_bidi_count; 1429 1430 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1431 return 0; 1432 } 1433 1434 /* 1435 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1436 * se_cmd + use pre-allocated SGL memory. 1437 * 1438 * @se_cmd: command descriptor to submit 1439 * @se_sess: associated se_sess for endpoint 1440 * @cdb: pointer to SCSI CDB 1441 * @sense: pointer to SCSI sense buffer 1442 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1443 * @data_length: fabric expected data transfer length 1444 * @task_addr: SAM task attribute 1445 * @data_dir: DMA data direction 1446 * @flags: flags for command submission from target_sc_flags_tables 1447 * @sgl: struct scatterlist memory for unidirectional mapping 1448 * @sgl_count: scatterlist count for unidirectional mapping 1449 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1450 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1451 * @sgl_prot: struct scatterlist memory protection information 1452 * @sgl_prot_count: scatterlist count for protection information 1453 * 1454 * Task tags are supported if the caller has set @se_cmd->tag. 1455 * 1456 * Returns non zero to signal active I/O shutdown failure. All other 1457 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1458 * but still return zero here. 1459 * 1460 * This may only be called from process context, and also currently 1461 * assumes internal allocation of fabric payload buffer by target-core. 1462 */ 1463 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1464 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1465 u32 data_length, int task_attr, int data_dir, int flags, 1466 struct scatterlist *sgl, u32 sgl_count, 1467 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1468 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1469 { 1470 struct se_portal_group *se_tpg; 1471 sense_reason_t rc; 1472 int ret; 1473 1474 se_tpg = se_sess->se_tpg; 1475 BUG_ON(!se_tpg); 1476 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1477 BUG_ON(in_interrupt()); 1478 /* 1479 * Initialize se_cmd for target operation. From this point 1480 * exceptions are handled by sending exception status via 1481 * target_core_fabric_ops->queue_status() callback 1482 */ 1483 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1484 data_length, data_dir, task_attr, sense); 1485 1486 if (flags & TARGET_SCF_USE_CPUID) 1487 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1488 else 1489 se_cmd->cpuid = WORK_CPU_UNBOUND; 1490 1491 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1492 se_cmd->unknown_data_length = 1; 1493 /* 1494 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1495 * se_sess->sess_cmd_list. A second kref_get here is necessary 1496 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1497 * kref_put() to happen during fabric packet acknowledgement. 1498 */ 1499 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1500 if (ret) 1501 return ret; 1502 /* 1503 * Signal bidirectional data payloads to target-core 1504 */ 1505 if (flags & TARGET_SCF_BIDI_OP) 1506 se_cmd->se_cmd_flags |= SCF_BIDI; 1507 /* 1508 * Locate se_lun pointer and attach it to struct se_cmd 1509 */ 1510 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1511 if (rc) { 1512 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1513 target_put_sess_cmd(se_cmd); 1514 return 0; 1515 } 1516 1517 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1518 if (rc != 0) { 1519 transport_generic_request_failure(se_cmd, rc); 1520 return 0; 1521 } 1522 1523 /* 1524 * Save pointers for SGLs containing protection information, 1525 * if present. 1526 */ 1527 if (sgl_prot_count) { 1528 se_cmd->t_prot_sg = sgl_prot; 1529 se_cmd->t_prot_nents = sgl_prot_count; 1530 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1531 } 1532 1533 /* 1534 * When a non zero sgl_count has been passed perform SGL passthrough 1535 * mapping for pre-allocated fabric memory instead of having target 1536 * core perform an internal SGL allocation.. 1537 */ 1538 if (sgl_count != 0) { 1539 BUG_ON(!sgl); 1540 1541 /* 1542 * A work-around for tcm_loop as some userspace code via 1543 * scsi-generic do not memset their associated read buffers, 1544 * so go ahead and do that here for type non-data CDBs. Also 1545 * note that this is currently guaranteed to be a single SGL 1546 * for this case by target core in target_setup_cmd_from_cdb() 1547 * -> transport_generic_cmd_sequencer(). 1548 */ 1549 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1550 se_cmd->data_direction == DMA_FROM_DEVICE) { 1551 unsigned char *buf = NULL; 1552 1553 if (sgl) 1554 buf = kmap(sg_page(sgl)) + sgl->offset; 1555 1556 if (buf) { 1557 memset(buf, 0, sgl->length); 1558 kunmap(sg_page(sgl)); 1559 } 1560 } 1561 1562 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1563 sgl_bidi, sgl_bidi_count); 1564 if (rc != 0) { 1565 transport_generic_request_failure(se_cmd, rc); 1566 return 0; 1567 } 1568 } 1569 1570 /* 1571 * Check if we need to delay processing because of ALUA 1572 * Active/NonOptimized primary access state.. 1573 */ 1574 core_alua_check_nonop_delay(se_cmd); 1575 1576 transport_handle_cdb_direct(se_cmd); 1577 return 0; 1578 } 1579 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1580 1581 /* 1582 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1583 * 1584 * @se_cmd: command descriptor to submit 1585 * @se_sess: associated se_sess for endpoint 1586 * @cdb: pointer to SCSI CDB 1587 * @sense: pointer to SCSI sense buffer 1588 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1589 * @data_length: fabric expected data transfer length 1590 * @task_addr: SAM task attribute 1591 * @data_dir: DMA data direction 1592 * @flags: flags for command submission from target_sc_flags_tables 1593 * 1594 * Task tags are supported if the caller has set @se_cmd->tag. 1595 * 1596 * Returns non zero to signal active I/O shutdown failure. All other 1597 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1598 * but still return zero here. 1599 * 1600 * This may only be called from process context, and also currently 1601 * assumes internal allocation of fabric payload buffer by target-core. 1602 * 1603 * It also assumes interal target core SGL memory allocation. 1604 */ 1605 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1606 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1607 u32 data_length, int task_attr, int data_dir, int flags) 1608 { 1609 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1610 unpacked_lun, data_length, task_attr, data_dir, 1611 flags, NULL, 0, NULL, 0, NULL, 0); 1612 } 1613 EXPORT_SYMBOL(target_submit_cmd); 1614 1615 static void target_complete_tmr_failure(struct work_struct *work) 1616 { 1617 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1618 1619 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1620 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1621 1622 transport_cmd_check_stop_to_fabric(se_cmd); 1623 } 1624 1625 /** 1626 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1627 * for TMR CDBs 1628 * 1629 * @se_cmd: command descriptor to submit 1630 * @se_sess: associated se_sess for endpoint 1631 * @sense: pointer to SCSI sense buffer 1632 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1633 * @fabric_context: fabric context for TMR req 1634 * @tm_type: Type of TM request 1635 * @gfp: gfp type for caller 1636 * @tag: referenced task tag for TMR_ABORT_TASK 1637 * @flags: submit cmd flags 1638 * 1639 * Callable from all contexts. 1640 **/ 1641 1642 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1643 unsigned char *sense, u64 unpacked_lun, 1644 void *fabric_tmr_ptr, unsigned char tm_type, 1645 gfp_t gfp, u64 tag, int flags) 1646 { 1647 struct se_portal_group *se_tpg; 1648 int ret; 1649 1650 se_tpg = se_sess->se_tpg; 1651 BUG_ON(!se_tpg); 1652 1653 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1654 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1655 /* 1656 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1657 * allocation failure. 1658 */ 1659 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1660 if (ret < 0) 1661 return -ENOMEM; 1662 1663 if (tm_type == TMR_ABORT_TASK) 1664 se_cmd->se_tmr_req->ref_task_tag = tag; 1665 1666 /* See target_submit_cmd for commentary */ 1667 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1668 if (ret) { 1669 core_tmr_release_req(se_cmd->se_tmr_req); 1670 return ret; 1671 } 1672 1673 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1674 if (ret) { 1675 /* 1676 * For callback during failure handling, push this work off 1677 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1678 */ 1679 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1680 schedule_work(&se_cmd->work); 1681 return 0; 1682 } 1683 transport_generic_handle_tmr(se_cmd); 1684 return 0; 1685 } 1686 EXPORT_SYMBOL(target_submit_tmr); 1687 1688 /* 1689 * Handle SAM-esque emulation for generic transport request failures. 1690 */ 1691 void transport_generic_request_failure(struct se_cmd *cmd, 1692 sense_reason_t sense_reason) 1693 { 1694 int ret = 0, post_ret = 0; 1695 1696 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1697 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1698 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1699 cmd->se_tfo->get_cmd_state(cmd), 1700 cmd->t_state, sense_reason); 1701 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1702 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1703 (cmd->transport_state & CMD_T_STOP) != 0, 1704 (cmd->transport_state & CMD_T_SENT) != 0); 1705 1706 /* 1707 * For SAM Task Attribute emulation for failed struct se_cmd 1708 */ 1709 transport_complete_task_attr(cmd); 1710 /* 1711 * Handle special case for COMPARE_AND_WRITE failure, where the 1712 * callback is expected to drop the per device ->caw_sem. 1713 */ 1714 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1715 cmd->transport_complete_callback) 1716 cmd->transport_complete_callback(cmd, false, &post_ret); 1717 1718 switch (sense_reason) { 1719 case TCM_NON_EXISTENT_LUN: 1720 case TCM_UNSUPPORTED_SCSI_OPCODE: 1721 case TCM_INVALID_CDB_FIELD: 1722 case TCM_INVALID_PARAMETER_LIST: 1723 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1724 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1725 case TCM_UNKNOWN_MODE_PAGE: 1726 case TCM_WRITE_PROTECTED: 1727 case TCM_ADDRESS_OUT_OF_RANGE: 1728 case TCM_CHECK_CONDITION_ABORT_CMD: 1729 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1730 case TCM_CHECK_CONDITION_NOT_READY: 1731 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1732 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1733 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1734 break; 1735 case TCM_OUT_OF_RESOURCES: 1736 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1737 break; 1738 case TCM_RESERVATION_CONFLICT: 1739 /* 1740 * No SENSE Data payload for this case, set SCSI Status 1741 * and queue the response to $FABRIC_MOD. 1742 * 1743 * Uses linux/include/scsi/scsi.h SAM status codes defs 1744 */ 1745 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1746 /* 1747 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1748 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1749 * CONFLICT STATUS. 1750 * 1751 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1752 */ 1753 if (cmd->se_sess && 1754 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1755 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1756 cmd->orig_fe_lun, 0x2C, 1757 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1758 } 1759 trace_target_cmd_complete(cmd); 1760 ret = cmd->se_tfo->queue_status(cmd); 1761 if (ret == -EAGAIN || ret == -ENOMEM) 1762 goto queue_full; 1763 goto check_stop; 1764 default: 1765 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1766 cmd->t_task_cdb[0], sense_reason); 1767 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1768 break; 1769 } 1770 1771 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1772 if (ret == -EAGAIN || ret == -ENOMEM) 1773 goto queue_full; 1774 1775 check_stop: 1776 transport_lun_remove_cmd(cmd); 1777 transport_cmd_check_stop_to_fabric(cmd); 1778 return; 1779 1780 queue_full: 1781 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1782 transport_handle_queue_full(cmd, cmd->se_dev); 1783 } 1784 EXPORT_SYMBOL(transport_generic_request_failure); 1785 1786 void __target_execute_cmd(struct se_cmd *cmd) 1787 { 1788 sense_reason_t ret; 1789 1790 if (cmd->execute_cmd) { 1791 ret = cmd->execute_cmd(cmd); 1792 if (ret) { 1793 spin_lock_irq(&cmd->t_state_lock); 1794 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1795 spin_unlock_irq(&cmd->t_state_lock); 1796 1797 transport_generic_request_failure(cmd, ret); 1798 } 1799 } 1800 } 1801 1802 static int target_write_prot_action(struct se_cmd *cmd) 1803 { 1804 u32 sectors; 1805 /* 1806 * Perform WRITE_INSERT of PI using software emulation when backend 1807 * device has PI enabled, if the transport has not already generated 1808 * PI using hardware WRITE_INSERT offload. 1809 */ 1810 switch (cmd->prot_op) { 1811 case TARGET_PROT_DOUT_INSERT: 1812 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1813 sbc_dif_generate(cmd); 1814 break; 1815 case TARGET_PROT_DOUT_STRIP: 1816 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1817 break; 1818 1819 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1820 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1821 sectors, 0, cmd->t_prot_sg, 0); 1822 if (unlikely(cmd->pi_err)) { 1823 spin_lock_irq(&cmd->t_state_lock); 1824 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1825 spin_unlock_irq(&cmd->t_state_lock); 1826 transport_generic_request_failure(cmd, cmd->pi_err); 1827 return -1; 1828 } 1829 break; 1830 default: 1831 break; 1832 } 1833 1834 return 0; 1835 } 1836 1837 static bool target_handle_task_attr(struct se_cmd *cmd) 1838 { 1839 struct se_device *dev = cmd->se_dev; 1840 1841 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1842 return false; 1843 1844 /* 1845 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1846 * to allow the passed struct se_cmd list of tasks to the front of the list. 1847 */ 1848 switch (cmd->sam_task_attr) { 1849 case TCM_HEAD_TAG: 1850 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1851 cmd->t_task_cdb[0]); 1852 return false; 1853 case TCM_ORDERED_TAG: 1854 atomic_inc_mb(&dev->dev_ordered_sync); 1855 1856 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1857 cmd->t_task_cdb[0]); 1858 1859 /* 1860 * Execute an ORDERED command if no other older commands 1861 * exist that need to be completed first. 1862 */ 1863 if (!atomic_read(&dev->simple_cmds)) 1864 return false; 1865 break; 1866 default: 1867 /* 1868 * For SIMPLE and UNTAGGED Task Attribute commands 1869 */ 1870 atomic_inc_mb(&dev->simple_cmds); 1871 break; 1872 } 1873 1874 if (atomic_read(&dev->dev_ordered_sync) == 0) 1875 return false; 1876 1877 spin_lock(&dev->delayed_cmd_lock); 1878 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1879 spin_unlock(&dev->delayed_cmd_lock); 1880 1881 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 1882 cmd->t_task_cdb[0], cmd->sam_task_attr); 1883 return true; 1884 } 1885 1886 static int __transport_check_aborted_status(struct se_cmd *, int); 1887 1888 void target_execute_cmd(struct se_cmd *cmd) 1889 { 1890 /* 1891 * Determine if frontend context caller is requesting the stopping of 1892 * this command for frontend exceptions. 1893 * 1894 * If the received CDB has aleady been aborted stop processing it here. 1895 */ 1896 spin_lock_irq(&cmd->t_state_lock); 1897 if (__transport_check_aborted_status(cmd, 1)) { 1898 spin_unlock_irq(&cmd->t_state_lock); 1899 return; 1900 } 1901 if (cmd->transport_state & CMD_T_STOP) { 1902 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1903 __func__, __LINE__, cmd->tag); 1904 1905 spin_unlock_irq(&cmd->t_state_lock); 1906 complete_all(&cmd->t_transport_stop_comp); 1907 return; 1908 } 1909 1910 cmd->t_state = TRANSPORT_PROCESSING; 1911 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1912 spin_unlock_irq(&cmd->t_state_lock); 1913 1914 if (target_write_prot_action(cmd)) 1915 return; 1916 1917 if (target_handle_task_attr(cmd)) { 1918 spin_lock_irq(&cmd->t_state_lock); 1919 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1920 spin_unlock_irq(&cmd->t_state_lock); 1921 return; 1922 } 1923 1924 __target_execute_cmd(cmd); 1925 } 1926 EXPORT_SYMBOL(target_execute_cmd); 1927 1928 /* 1929 * Process all commands up to the last received ORDERED task attribute which 1930 * requires another blocking boundary 1931 */ 1932 static void target_restart_delayed_cmds(struct se_device *dev) 1933 { 1934 for (;;) { 1935 struct se_cmd *cmd; 1936 1937 spin_lock(&dev->delayed_cmd_lock); 1938 if (list_empty(&dev->delayed_cmd_list)) { 1939 spin_unlock(&dev->delayed_cmd_lock); 1940 break; 1941 } 1942 1943 cmd = list_entry(dev->delayed_cmd_list.next, 1944 struct se_cmd, se_delayed_node); 1945 list_del(&cmd->se_delayed_node); 1946 spin_unlock(&dev->delayed_cmd_lock); 1947 1948 __target_execute_cmd(cmd); 1949 1950 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1951 break; 1952 } 1953 } 1954 1955 /* 1956 * Called from I/O completion to determine which dormant/delayed 1957 * and ordered cmds need to have their tasks added to the execution queue. 1958 */ 1959 static void transport_complete_task_attr(struct se_cmd *cmd) 1960 { 1961 struct se_device *dev = cmd->se_dev; 1962 1963 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1964 return; 1965 1966 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1967 atomic_dec_mb(&dev->simple_cmds); 1968 dev->dev_cur_ordered_id++; 1969 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", 1970 dev->dev_cur_ordered_id); 1971 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1972 dev->dev_cur_ordered_id++; 1973 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 1974 dev->dev_cur_ordered_id); 1975 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1976 atomic_dec_mb(&dev->dev_ordered_sync); 1977 1978 dev->dev_cur_ordered_id++; 1979 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 1980 dev->dev_cur_ordered_id); 1981 } 1982 1983 target_restart_delayed_cmds(dev); 1984 } 1985 1986 static void transport_complete_qf(struct se_cmd *cmd) 1987 { 1988 int ret = 0; 1989 1990 transport_complete_task_attr(cmd); 1991 1992 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1993 trace_target_cmd_complete(cmd); 1994 ret = cmd->se_tfo->queue_status(cmd); 1995 goto out; 1996 } 1997 1998 switch (cmd->data_direction) { 1999 case DMA_FROM_DEVICE: 2000 if (cmd->scsi_status) 2001 goto queue_status; 2002 2003 trace_target_cmd_complete(cmd); 2004 ret = cmd->se_tfo->queue_data_in(cmd); 2005 break; 2006 case DMA_TO_DEVICE: 2007 if (cmd->se_cmd_flags & SCF_BIDI) { 2008 ret = cmd->se_tfo->queue_data_in(cmd); 2009 break; 2010 } 2011 /* Fall through for DMA_TO_DEVICE */ 2012 case DMA_NONE: 2013 queue_status: 2014 trace_target_cmd_complete(cmd); 2015 ret = cmd->se_tfo->queue_status(cmd); 2016 break; 2017 default: 2018 break; 2019 } 2020 2021 out: 2022 if (ret < 0) { 2023 transport_handle_queue_full(cmd, cmd->se_dev); 2024 return; 2025 } 2026 transport_lun_remove_cmd(cmd); 2027 transport_cmd_check_stop_to_fabric(cmd); 2028 } 2029 2030 static void transport_handle_queue_full( 2031 struct se_cmd *cmd, 2032 struct se_device *dev) 2033 { 2034 spin_lock_irq(&dev->qf_cmd_lock); 2035 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2036 atomic_inc_mb(&dev->dev_qf_count); 2037 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2038 2039 schedule_work(&cmd->se_dev->qf_work_queue); 2040 } 2041 2042 static bool target_read_prot_action(struct se_cmd *cmd) 2043 { 2044 switch (cmd->prot_op) { 2045 case TARGET_PROT_DIN_STRIP: 2046 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2047 u32 sectors = cmd->data_length >> 2048 ilog2(cmd->se_dev->dev_attrib.block_size); 2049 2050 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2051 sectors, 0, cmd->t_prot_sg, 2052 0); 2053 if (cmd->pi_err) 2054 return true; 2055 } 2056 break; 2057 case TARGET_PROT_DIN_INSERT: 2058 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2059 break; 2060 2061 sbc_dif_generate(cmd); 2062 break; 2063 default: 2064 break; 2065 } 2066 2067 return false; 2068 } 2069 2070 static void target_complete_ok_work(struct work_struct *work) 2071 { 2072 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2073 int ret; 2074 2075 /* 2076 * Check if we need to move delayed/dormant tasks from cmds on the 2077 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2078 * Attribute. 2079 */ 2080 transport_complete_task_attr(cmd); 2081 2082 /* 2083 * Check to schedule QUEUE_FULL work, or execute an existing 2084 * cmd->transport_qf_callback() 2085 */ 2086 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2087 schedule_work(&cmd->se_dev->qf_work_queue); 2088 2089 /* 2090 * Check if we need to send a sense buffer from 2091 * the struct se_cmd in question. 2092 */ 2093 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2094 WARN_ON(!cmd->scsi_status); 2095 ret = transport_send_check_condition_and_sense( 2096 cmd, 0, 1); 2097 if (ret == -EAGAIN || ret == -ENOMEM) 2098 goto queue_full; 2099 2100 transport_lun_remove_cmd(cmd); 2101 transport_cmd_check_stop_to_fabric(cmd); 2102 return; 2103 } 2104 /* 2105 * Check for a callback, used by amongst other things 2106 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2107 */ 2108 if (cmd->transport_complete_callback) { 2109 sense_reason_t rc; 2110 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2111 bool zero_dl = !(cmd->data_length); 2112 int post_ret = 0; 2113 2114 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2115 if (!rc && !post_ret) { 2116 if (caw && zero_dl) 2117 goto queue_rsp; 2118 2119 return; 2120 } else if (rc) { 2121 ret = transport_send_check_condition_and_sense(cmd, 2122 rc, 0); 2123 if (ret == -EAGAIN || ret == -ENOMEM) 2124 goto queue_full; 2125 2126 transport_lun_remove_cmd(cmd); 2127 transport_cmd_check_stop_to_fabric(cmd); 2128 return; 2129 } 2130 } 2131 2132 queue_rsp: 2133 switch (cmd->data_direction) { 2134 case DMA_FROM_DEVICE: 2135 if (cmd->scsi_status) 2136 goto queue_status; 2137 2138 atomic_long_add(cmd->data_length, 2139 &cmd->se_lun->lun_stats.tx_data_octets); 2140 /* 2141 * Perform READ_STRIP of PI using software emulation when 2142 * backend had PI enabled, if the transport will not be 2143 * performing hardware READ_STRIP offload. 2144 */ 2145 if (target_read_prot_action(cmd)) { 2146 ret = transport_send_check_condition_and_sense(cmd, 2147 cmd->pi_err, 0); 2148 if (ret == -EAGAIN || ret == -ENOMEM) 2149 goto queue_full; 2150 2151 transport_lun_remove_cmd(cmd); 2152 transport_cmd_check_stop_to_fabric(cmd); 2153 return; 2154 } 2155 2156 trace_target_cmd_complete(cmd); 2157 ret = cmd->se_tfo->queue_data_in(cmd); 2158 if (ret == -EAGAIN || ret == -ENOMEM) 2159 goto queue_full; 2160 break; 2161 case DMA_TO_DEVICE: 2162 atomic_long_add(cmd->data_length, 2163 &cmd->se_lun->lun_stats.rx_data_octets); 2164 /* 2165 * Check if we need to send READ payload for BIDI-COMMAND 2166 */ 2167 if (cmd->se_cmd_flags & SCF_BIDI) { 2168 atomic_long_add(cmd->data_length, 2169 &cmd->se_lun->lun_stats.tx_data_octets); 2170 ret = cmd->se_tfo->queue_data_in(cmd); 2171 if (ret == -EAGAIN || ret == -ENOMEM) 2172 goto queue_full; 2173 break; 2174 } 2175 /* Fall through for DMA_TO_DEVICE */ 2176 case DMA_NONE: 2177 queue_status: 2178 trace_target_cmd_complete(cmd); 2179 ret = cmd->se_tfo->queue_status(cmd); 2180 if (ret == -EAGAIN || ret == -ENOMEM) 2181 goto queue_full; 2182 break; 2183 default: 2184 break; 2185 } 2186 2187 transport_lun_remove_cmd(cmd); 2188 transport_cmd_check_stop_to_fabric(cmd); 2189 return; 2190 2191 queue_full: 2192 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2193 " data_direction: %d\n", cmd, cmd->data_direction); 2194 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2195 transport_handle_queue_full(cmd, cmd->se_dev); 2196 } 2197 2198 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 2199 { 2200 struct scatterlist *sg; 2201 int count; 2202 2203 for_each_sg(sgl, sg, nents, count) 2204 __free_page(sg_page(sg)); 2205 2206 kfree(sgl); 2207 } 2208 2209 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2210 { 2211 /* 2212 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2213 * emulation, and free + reset pointers if necessary.. 2214 */ 2215 if (!cmd->t_data_sg_orig) 2216 return; 2217 2218 kfree(cmd->t_data_sg); 2219 cmd->t_data_sg = cmd->t_data_sg_orig; 2220 cmd->t_data_sg_orig = NULL; 2221 cmd->t_data_nents = cmd->t_data_nents_orig; 2222 cmd->t_data_nents_orig = 0; 2223 } 2224 2225 static inline void transport_free_pages(struct se_cmd *cmd) 2226 { 2227 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2228 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2229 cmd->t_prot_sg = NULL; 2230 cmd->t_prot_nents = 0; 2231 } 2232 2233 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2234 /* 2235 * Release special case READ buffer payload required for 2236 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2237 */ 2238 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2239 transport_free_sgl(cmd->t_bidi_data_sg, 2240 cmd->t_bidi_data_nents); 2241 cmd->t_bidi_data_sg = NULL; 2242 cmd->t_bidi_data_nents = 0; 2243 } 2244 transport_reset_sgl_orig(cmd); 2245 return; 2246 } 2247 transport_reset_sgl_orig(cmd); 2248 2249 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2250 cmd->t_data_sg = NULL; 2251 cmd->t_data_nents = 0; 2252 2253 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2254 cmd->t_bidi_data_sg = NULL; 2255 cmd->t_bidi_data_nents = 0; 2256 } 2257 2258 /** 2259 * transport_put_cmd - release a reference to a command 2260 * @cmd: command to release 2261 * 2262 * This routine releases our reference to the command and frees it if possible. 2263 */ 2264 static int transport_put_cmd(struct se_cmd *cmd) 2265 { 2266 BUG_ON(!cmd->se_tfo); 2267 /* 2268 * If this cmd has been setup with target_get_sess_cmd(), drop 2269 * the kref and call ->release_cmd() in kref callback. 2270 */ 2271 return target_put_sess_cmd(cmd); 2272 } 2273 2274 void *transport_kmap_data_sg(struct se_cmd *cmd) 2275 { 2276 struct scatterlist *sg = cmd->t_data_sg; 2277 struct page **pages; 2278 int i; 2279 2280 /* 2281 * We need to take into account a possible offset here for fabrics like 2282 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2283 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2284 */ 2285 if (!cmd->t_data_nents) 2286 return NULL; 2287 2288 BUG_ON(!sg); 2289 if (cmd->t_data_nents == 1) 2290 return kmap(sg_page(sg)) + sg->offset; 2291 2292 /* >1 page. use vmap */ 2293 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2294 if (!pages) 2295 return NULL; 2296 2297 /* convert sg[] to pages[] */ 2298 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2299 pages[i] = sg_page(sg); 2300 } 2301 2302 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2303 kfree(pages); 2304 if (!cmd->t_data_vmap) 2305 return NULL; 2306 2307 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2308 } 2309 EXPORT_SYMBOL(transport_kmap_data_sg); 2310 2311 void transport_kunmap_data_sg(struct se_cmd *cmd) 2312 { 2313 if (!cmd->t_data_nents) { 2314 return; 2315 } else if (cmd->t_data_nents == 1) { 2316 kunmap(sg_page(cmd->t_data_sg)); 2317 return; 2318 } 2319 2320 vunmap(cmd->t_data_vmap); 2321 cmd->t_data_vmap = NULL; 2322 } 2323 EXPORT_SYMBOL(transport_kunmap_data_sg); 2324 2325 int 2326 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2327 bool zero_page) 2328 { 2329 struct scatterlist *sg; 2330 struct page *page; 2331 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2332 unsigned int nent; 2333 int i = 0; 2334 2335 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2336 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2337 if (!sg) 2338 return -ENOMEM; 2339 2340 sg_init_table(sg, nent); 2341 2342 while (length) { 2343 u32 page_len = min_t(u32, length, PAGE_SIZE); 2344 page = alloc_page(GFP_KERNEL | zero_flag); 2345 if (!page) 2346 goto out; 2347 2348 sg_set_page(&sg[i], page, page_len, 0); 2349 length -= page_len; 2350 i++; 2351 } 2352 *sgl = sg; 2353 *nents = nent; 2354 return 0; 2355 2356 out: 2357 while (i > 0) { 2358 i--; 2359 __free_page(sg_page(&sg[i])); 2360 } 2361 kfree(sg); 2362 return -ENOMEM; 2363 } 2364 2365 /* 2366 * Allocate any required resources to execute the command. For writes we 2367 * might not have the payload yet, so notify the fabric via a call to 2368 * ->write_pending instead. Otherwise place it on the execution queue. 2369 */ 2370 sense_reason_t 2371 transport_generic_new_cmd(struct se_cmd *cmd) 2372 { 2373 int ret = 0; 2374 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2375 2376 if (cmd->prot_op != TARGET_PROT_NORMAL && 2377 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2378 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2379 cmd->prot_length, true); 2380 if (ret < 0) 2381 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2382 } 2383 2384 /* 2385 * Determine is the TCM fabric module has already allocated physical 2386 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2387 * beforehand. 2388 */ 2389 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2390 cmd->data_length) { 2391 2392 if ((cmd->se_cmd_flags & SCF_BIDI) || 2393 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2394 u32 bidi_length; 2395 2396 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2397 bidi_length = cmd->t_task_nolb * 2398 cmd->se_dev->dev_attrib.block_size; 2399 else 2400 bidi_length = cmd->data_length; 2401 2402 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2403 &cmd->t_bidi_data_nents, 2404 bidi_length, zero_flag); 2405 if (ret < 0) 2406 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2407 } 2408 2409 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2410 cmd->data_length, zero_flag); 2411 if (ret < 0) 2412 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2413 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2414 cmd->data_length) { 2415 /* 2416 * Special case for COMPARE_AND_WRITE with fabrics 2417 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2418 */ 2419 u32 caw_length = cmd->t_task_nolb * 2420 cmd->se_dev->dev_attrib.block_size; 2421 2422 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2423 &cmd->t_bidi_data_nents, 2424 caw_length, zero_flag); 2425 if (ret < 0) 2426 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2427 } 2428 /* 2429 * If this command is not a write we can execute it right here, 2430 * for write buffers we need to notify the fabric driver first 2431 * and let it call back once the write buffers are ready. 2432 */ 2433 target_add_to_state_list(cmd); 2434 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2435 target_execute_cmd(cmd); 2436 return 0; 2437 } 2438 transport_cmd_check_stop(cmd, false, true); 2439 2440 ret = cmd->se_tfo->write_pending(cmd); 2441 if (ret == -EAGAIN || ret == -ENOMEM) 2442 goto queue_full; 2443 2444 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2445 WARN_ON(ret); 2446 2447 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2448 2449 queue_full: 2450 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2451 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2452 transport_handle_queue_full(cmd, cmd->se_dev); 2453 return 0; 2454 } 2455 EXPORT_SYMBOL(transport_generic_new_cmd); 2456 2457 static void transport_write_pending_qf(struct se_cmd *cmd) 2458 { 2459 int ret; 2460 2461 ret = cmd->se_tfo->write_pending(cmd); 2462 if (ret == -EAGAIN || ret == -ENOMEM) { 2463 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2464 cmd); 2465 transport_handle_queue_full(cmd, cmd->se_dev); 2466 } 2467 } 2468 2469 static bool 2470 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2471 unsigned long *flags); 2472 2473 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2474 { 2475 unsigned long flags; 2476 2477 spin_lock_irqsave(&cmd->t_state_lock, flags); 2478 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2479 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2480 } 2481 2482 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2483 { 2484 int ret = 0; 2485 bool aborted = false, tas = false; 2486 2487 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2488 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2489 target_wait_free_cmd(cmd, &aborted, &tas); 2490 2491 if (!aborted || tas) 2492 ret = transport_put_cmd(cmd); 2493 } else { 2494 if (wait_for_tasks) 2495 target_wait_free_cmd(cmd, &aborted, &tas); 2496 /* 2497 * Handle WRITE failure case where transport_generic_new_cmd() 2498 * has already added se_cmd to state_list, but fabric has 2499 * failed command before I/O submission. 2500 */ 2501 if (cmd->state_active) 2502 target_remove_from_state_list(cmd); 2503 2504 if (cmd->se_lun) 2505 transport_lun_remove_cmd(cmd); 2506 2507 if (!aborted || tas) 2508 ret = transport_put_cmd(cmd); 2509 } 2510 /* 2511 * If the task has been internally aborted due to TMR ABORT_TASK 2512 * or LUN_RESET, target_core_tmr.c is responsible for performing 2513 * the remaining calls to target_put_sess_cmd(), and not the 2514 * callers of this function. 2515 */ 2516 if (aborted) { 2517 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2518 wait_for_completion(&cmd->cmd_wait_comp); 2519 cmd->se_tfo->release_cmd(cmd); 2520 ret = 1; 2521 } 2522 return ret; 2523 } 2524 EXPORT_SYMBOL(transport_generic_free_cmd); 2525 2526 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2527 * @se_cmd: command descriptor to add 2528 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2529 */ 2530 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2531 { 2532 struct se_session *se_sess = se_cmd->se_sess; 2533 unsigned long flags; 2534 int ret = 0; 2535 2536 /* 2537 * Add a second kref if the fabric caller is expecting to handle 2538 * fabric acknowledgement that requires two target_put_sess_cmd() 2539 * invocations before se_cmd descriptor release. 2540 */ 2541 if (ack_kref) 2542 kref_get(&se_cmd->cmd_kref); 2543 2544 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2545 if (se_sess->sess_tearing_down) { 2546 ret = -ESHUTDOWN; 2547 goto out; 2548 } 2549 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2550 out: 2551 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2552 2553 if (ret && ack_kref) 2554 target_put_sess_cmd(se_cmd); 2555 2556 return ret; 2557 } 2558 EXPORT_SYMBOL(target_get_sess_cmd); 2559 2560 static void target_free_cmd_mem(struct se_cmd *cmd) 2561 { 2562 transport_free_pages(cmd); 2563 2564 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2565 core_tmr_release_req(cmd->se_tmr_req); 2566 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2567 kfree(cmd->t_task_cdb); 2568 } 2569 2570 static void target_release_cmd_kref(struct kref *kref) 2571 { 2572 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2573 struct se_session *se_sess = se_cmd->se_sess; 2574 unsigned long flags; 2575 bool fabric_stop; 2576 2577 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2578 if (list_empty(&se_cmd->se_cmd_list)) { 2579 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2580 target_free_cmd_mem(se_cmd); 2581 se_cmd->se_tfo->release_cmd(se_cmd); 2582 return; 2583 } 2584 2585 spin_lock(&se_cmd->t_state_lock); 2586 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); 2587 spin_unlock(&se_cmd->t_state_lock); 2588 2589 if (se_cmd->cmd_wait_set || fabric_stop) { 2590 list_del_init(&se_cmd->se_cmd_list); 2591 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2592 target_free_cmd_mem(se_cmd); 2593 complete(&se_cmd->cmd_wait_comp); 2594 return; 2595 } 2596 list_del_init(&se_cmd->se_cmd_list); 2597 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2598 2599 target_free_cmd_mem(se_cmd); 2600 se_cmd->se_tfo->release_cmd(se_cmd); 2601 } 2602 2603 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2604 * @se_cmd: command descriptor to drop 2605 */ 2606 int target_put_sess_cmd(struct se_cmd *se_cmd) 2607 { 2608 struct se_session *se_sess = se_cmd->se_sess; 2609 2610 if (!se_sess) { 2611 target_free_cmd_mem(se_cmd); 2612 se_cmd->se_tfo->release_cmd(se_cmd); 2613 return 1; 2614 } 2615 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2616 } 2617 EXPORT_SYMBOL(target_put_sess_cmd); 2618 2619 /* target_sess_cmd_list_set_waiting - Flag all commands in 2620 * sess_cmd_list to complete cmd_wait_comp. Set 2621 * sess_tearing_down so no more commands are queued. 2622 * @se_sess: session to flag 2623 */ 2624 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2625 { 2626 struct se_cmd *se_cmd; 2627 unsigned long flags; 2628 int rc; 2629 2630 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2631 if (se_sess->sess_tearing_down) { 2632 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2633 return; 2634 } 2635 se_sess->sess_tearing_down = 1; 2636 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2637 2638 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { 2639 rc = kref_get_unless_zero(&se_cmd->cmd_kref); 2640 if (rc) { 2641 se_cmd->cmd_wait_set = 1; 2642 spin_lock(&se_cmd->t_state_lock); 2643 se_cmd->transport_state |= CMD_T_FABRIC_STOP; 2644 spin_unlock(&se_cmd->t_state_lock); 2645 } 2646 } 2647 2648 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2649 } 2650 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2651 2652 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2653 * @se_sess: session to wait for active I/O 2654 */ 2655 void target_wait_for_sess_cmds(struct se_session *se_sess) 2656 { 2657 struct se_cmd *se_cmd, *tmp_cmd; 2658 unsigned long flags; 2659 bool tas; 2660 2661 list_for_each_entry_safe(se_cmd, tmp_cmd, 2662 &se_sess->sess_wait_list, se_cmd_list) { 2663 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2664 " %d\n", se_cmd, se_cmd->t_state, 2665 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2666 2667 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 2668 tas = (se_cmd->transport_state & CMD_T_TAS); 2669 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 2670 2671 if (!target_put_sess_cmd(se_cmd)) { 2672 if (tas) 2673 target_put_sess_cmd(se_cmd); 2674 } 2675 2676 wait_for_completion(&se_cmd->cmd_wait_comp); 2677 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2678 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2679 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2680 2681 se_cmd->se_tfo->release_cmd(se_cmd); 2682 } 2683 2684 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2685 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2686 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2687 2688 } 2689 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2690 2691 void transport_clear_lun_ref(struct se_lun *lun) 2692 { 2693 percpu_ref_kill(&lun->lun_ref); 2694 wait_for_completion(&lun->lun_ref_comp); 2695 } 2696 2697 static bool 2698 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2699 bool *aborted, bool *tas, unsigned long *flags) 2700 __releases(&cmd->t_state_lock) 2701 __acquires(&cmd->t_state_lock) 2702 { 2703 2704 assert_spin_locked(&cmd->t_state_lock); 2705 WARN_ON_ONCE(!irqs_disabled()); 2706 2707 if (fabric_stop) 2708 cmd->transport_state |= CMD_T_FABRIC_STOP; 2709 2710 if (cmd->transport_state & CMD_T_ABORTED) 2711 *aborted = true; 2712 2713 if (cmd->transport_state & CMD_T_TAS) 2714 *tas = true; 2715 2716 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2717 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2718 return false; 2719 2720 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2721 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2722 return false; 2723 2724 if (!(cmd->transport_state & CMD_T_ACTIVE)) 2725 return false; 2726 2727 if (fabric_stop && *aborted) 2728 return false; 2729 2730 cmd->transport_state |= CMD_T_STOP; 2731 2732 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," 2733 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, 2734 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2735 2736 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2737 2738 wait_for_completion(&cmd->t_transport_stop_comp); 2739 2740 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2741 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2742 2743 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 2744 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 2745 2746 return true; 2747 } 2748 2749 /** 2750 * transport_wait_for_tasks - wait for completion to occur 2751 * @cmd: command to wait 2752 * 2753 * Called from frontend fabric context to wait for storage engine 2754 * to pause and/or release frontend generated struct se_cmd. 2755 */ 2756 bool transport_wait_for_tasks(struct se_cmd *cmd) 2757 { 2758 unsigned long flags; 2759 bool ret, aborted = false, tas = false; 2760 2761 spin_lock_irqsave(&cmd->t_state_lock, flags); 2762 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 2763 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2764 2765 return ret; 2766 } 2767 EXPORT_SYMBOL(transport_wait_for_tasks); 2768 2769 struct sense_info { 2770 u8 key; 2771 u8 asc; 2772 u8 ascq; 2773 bool add_sector_info; 2774 }; 2775 2776 static const struct sense_info sense_info_table[] = { 2777 [TCM_NO_SENSE] = { 2778 .key = NOT_READY 2779 }, 2780 [TCM_NON_EXISTENT_LUN] = { 2781 .key = ILLEGAL_REQUEST, 2782 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 2783 }, 2784 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 2785 .key = ILLEGAL_REQUEST, 2786 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2787 }, 2788 [TCM_SECTOR_COUNT_TOO_MANY] = { 2789 .key = ILLEGAL_REQUEST, 2790 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2791 }, 2792 [TCM_UNKNOWN_MODE_PAGE] = { 2793 .key = ILLEGAL_REQUEST, 2794 .asc = 0x24, /* INVALID FIELD IN CDB */ 2795 }, 2796 [TCM_CHECK_CONDITION_ABORT_CMD] = { 2797 .key = ABORTED_COMMAND, 2798 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 2799 .ascq = 0x03, 2800 }, 2801 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 2802 .key = ABORTED_COMMAND, 2803 .asc = 0x0c, /* WRITE ERROR */ 2804 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 2805 }, 2806 [TCM_INVALID_CDB_FIELD] = { 2807 .key = ILLEGAL_REQUEST, 2808 .asc = 0x24, /* INVALID FIELD IN CDB */ 2809 }, 2810 [TCM_INVALID_PARAMETER_LIST] = { 2811 .key = ILLEGAL_REQUEST, 2812 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2813 }, 2814 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2815 .key = ILLEGAL_REQUEST, 2816 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2817 }, 2818 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 2819 .key = ILLEGAL_REQUEST, 2820 .asc = 0x0c, /* WRITE ERROR */ 2821 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 2822 }, 2823 [TCM_SERVICE_CRC_ERROR] = { 2824 .key = ABORTED_COMMAND, 2825 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 2826 .ascq = 0x05, /* N/A */ 2827 }, 2828 [TCM_SNACK_REJECTED] = { 2829 .key = ABORTED_COMMAND, 2830 .asc = 0x11, /* READ ERROR */ 2831 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 2832 }, 2833 [TCM_WRITE_PROTECTED] = { 2834 .key = DATA_PROTECT, 2835 .asc = 0x27, /* WRITE PROTECTED */ 2836 }, 2837 [TCM_ADDRESS_OUT_OF_RANGE] = { 2838 .key = ILLEGAL_REQUEST, 2839 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2840 }, 2841 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 2842 .key = UNIT_ATTENTION, 2843 }, 2844 [TCM_CHECK_CONDITION_NOT_READY] = { 2845 .key = NOT_READY, 2846 }, 2847 [TCM_MISCOMPARE_VERIFY] = { 2848 .key = MISCOMPARE, 2849 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 2850 .ascq = 0x00, 2851 }, 2852 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 2853 .key = ABORTED_COMMAND, 2854 .asc = 0x10, 2855 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 2856 .add_sector_info = true, 2857 }, 2858 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 2859 .key = ABORTED_COMMAND, 2860 .asc = 0x10, 2861 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2862 .add_sector_info = true, 2863 }, 2864 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 2865 .key = ABORTED_COMMAND, 2866 .asc = 0x10, 2867 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2868 .add_sector_info = true, 2869 }, 2870 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2871 /* 2872 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2873 * Solaris initiators. Returning NOT READY instead means the 2874 * operations will be retried a finite number of times and we 2875 * can survive intermittent errors. 2876 */ 2877 .key = NOT_READY, 2878 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 2879 }, 2880 }; 2881 2882 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 2883 { 2884 const struct sense_info *si; 2885 u8 *buffer = cmd->sense_buffer; 2886 int r = (__force int)reason; 2887 u8 asc, ascq; 2888 bool desc_format = target_sense_desc_format(cmd->se_dev); 2889 2890 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 2891 si = &sense_info_table[r]; 2892 else 2893 si = &sense_info_table[(__force int) 2894 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 2895 2896 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 2897 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2898 WARN_ON_ONCE(asc == 0); 2899 } else if (si->asc == 0) { 2900 WARN_ON_ONCE(cmd->scsi_asc == 0); 2901 asc = cmd->scsi_asc; 2902 ascq = cmd->scsi_ascq; 2903 } else { 2904 asc = si->asc; 2905 ascq = si->ascq; 2906 } 2907 2908 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); 2909 if (si->add_sector_info) 2910 return scsi_set_sense_information(buffer, 2911 cmd->scsi_sense_length, 2912 cmd->bad_sector); 2913 2914 return 0; 2915 } 2916 2917 int 2918 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2919 sense_reason_t reason, int from_transport) 2920 { 2921 unsigned long flags; 2922 2923 spin_lock_irqsave(&cmd->t_state_lock, flags); 2924 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2925 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2926 return 0; 2927 } 2928 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2929 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2930 2931 if (!from_transport) { 2932 int rc; 2933 2934 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2935 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2936 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2937 rc = translate_sense_reason(cmd, reason); 2938 if (rc) 2939 return rc; 2940 } 2941 2942 trace_target_cmd_complete(cmd); 2943 return cmd->se_tfo->queue_status(cmd); 2944 } 2945 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2946 2947 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2948 __releases(&cmd->t_state_lock) 2949 __acquires(&cmd->t_state_lock) 2950 { 2951 assert_spin_locked(&cmd->t_state_lock); 2952 WARN_ON_ONCE(!irqs_disabled()); 2953 2954 if (!(cmd->transport_state & CMD_T_ABORTED)) 2955 return 0; 2956 /* 2957 * If cmd has been aborted but either no status is to be sent or it has 2958 * already been sent, just return 2959 */ 2960 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { 2961 if (send_status) 2962 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2963 return 1; 2964 } 2965 2966 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" 2967 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); 2968 2969 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2970 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2971 trace_target_cmd_complete(cmd); 2972 2973 spin_unlock_irq(&cmd->t_state_lock); 2974 cmd->se_tfo->queue_status(cmd); 2975 spin_lock_irq(&cmd->t_state_lock); 2976 2977 return 1; 2978 } 2979 2980 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2981 { 2982 int ret; 2983 2984 spin_lock_irq(&cmd->t_state_lock); 2985 ret = __transport_check_aborted_status(cmd, send_status); 2986 spin_unlock_irq(&cmd->t_state_lock); 2987 2988 return ret; 2989 } 2990 EXPORT_SYMBOL(transport_check_aborted_status); 2991 2992 void transport_send_task_abort(struct se_cmd *cmd) 2993 { 2994 unsigned long flags; 2995 2996 spin_lock_irqsave(&cmd->t_state_lock, flags); 2997 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 2998 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2999 return; 3000 } 3001 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3002 3003 /* 3004 * If there are still expected incoming fabric WRITEs, we wait 3005 * until until they have completed before sending a TASK_ABORTED 3006 * response. This response with TASK_ABORTED status will be 3007 * queued back to fabric module by transport_check_aborted_status(). 3008 */ 3009 if (cmd->data_direction == DMA_TO_DEVICE) { 3010 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 3011 spin_lock_irqsave(&cmd->t_state_lock, flags); 3012 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { 3013 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3014 goto send_abort; 3015 } 3016 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 3017 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3018 return; 3019 } 3020 } 3021 send_abort: 3022 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 3023 3024 transport_lun_remove_cmd(cmd); 3025 3026 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 3027 cmd->t_task_cdb[0], cmd->tag); 3028 3029 trace_target_cmd_complete(cmd); 3030 cmd->se_tfo->queue_status(cmd); 3031 } 3032 3033 static void target_tmr_work(struct work_struct *work) 3034 { 3035 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3036 struct se_device *dev = cmd->se_dev; 3037 struct se_tmr_req *tmr = cmd->se_tmr_req; 3038 unsigned long flags; 3039 int ret; 3040 3041 spin_lock_irqsave(&cmd->t_state_lock, flags); 3042 if (cmd->transport_state & CMD_T_ABORTED) { 3043 tmr->response = TMR_FUNCTION_REJECTED; 3044 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3045 goto check_stop; 3046 } 3047 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3048 3049 switch (tmr->function) { 3050 case TMR_ABORT_TASK: 3051 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3052 break; 3053 case TMR_ABORT_TASK_SET: 3054 case TMR_CLEAR_ACA: 3055 case TMR_CLEAR_TASK_SET: 3056 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3057 break; 3058 case TMR_LUN_RESET: 3059 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3060 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3061 TMR_FUNCTION_REJECTED; 3062 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3063 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3064 cmd->orig_fe_lun, 0x29, 3065 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3066 } 3067 break; 3068 case TMR_TARGET_WARM_RESET: 3069 tmr->response = TMR_FUNCTION_REJECTED; 3070 break; 3071 case TMR_TARGET_COLD_RESET: 3072 tmr->response = TMR_FUNCTION_REJECTED; 3073 break; 3074 default: 3075 pr_err("Uknown TMR function: 0x%02x.\n", 3076 tmr->function); 3077 tmr->response = TMR_FUNCTION_REJECTED; 3078 break; 3079 } 3080 3081 spin_lock_irqsave(&cmd->t_state_lock, flags); 3082 if (cmd->transport_state & CMD_T_ABORTED) { 3083 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3084 goto check_stop; 3085 } 3086 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3087 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3088 3089 cmd->se_tfo->queue_tm_rsp(cmd); 3090 3091 check_stop: 3092 transport_cmd_check_stop_to_fabric(cmd); 3093 } 3094 3095 int transport_generic_handle_tmr( 3096 struct se_cmd *cmd) 3097 { 3098 unsigned long flags; 3099 3100 spin_lock_irqsave(&cmd->t_state_lock, flags); 3101 cmd->transport_state |= CMD_T_ACTIVE; 3102 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3103 3104 INIT_WORK(&cmd->work, target_tmr_work); 3105 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3106 return 0; 3107 } 3108 EXPORT_SYMBOL(transport_generic_handle_tmr); 3109 3110 bool 3111 target_check_wce(struct se_device *dev) 3112 { 3113 bool wce = false; 3114 3115 if (dev->transport->get_write_cache) 3116 wce = dev->transport->get_write_cache(dev); 3117 else if (dev->dev_attrib.emulate_write_cache > 0) 3118 wce = true; 3119 3120 return wce; 3121 } 3122 3123 bool 3124 target_check_fua(struct se_device *dev) 3125 { 3126 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3127 } 3128