1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 228 { 229 struct se_session *se_sess; 230 231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 232 if (!se_sess) { 233 pr_err("Unable to allocate struct se_session from" 234 " se_sess_cache\n"); 235 return ERR_PTR(-ENOMEM); 236 } 237 INIT_LIST_HEAD(&se_sess->sess_list); 238 INIT_LIST_HEAD(&se_sess->sess_acl_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 241 spin_lock_init(&se_sess->sess_cmd_lock); 242 se_sess->sup_prot_ops = sup_prot_ops; 243 244 return se_sess; 245 } 246 EXPORT_SYMBOL(transport_init_session); 247 248 int transport_alloc_session_tags(struct se_session *se_sess, 249 unsigned int tag_num, unsigned int tag_size) 250 { 251 int rc; 252 253 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 254 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 255 if (!se_sess->sess_cmd_map) { 256 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 257 if (!se_sess->sess_cmd_map) { 258 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 259 return -ENOMEM; 260 } 261 } 262 263 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 264 if (rc < 0) { 265 pr_err("Unable to init se_sess->sess_tag_pool," 266 " tag_num: %u\n", tag_num); 267 kvfree(se_sess->sess_cmd_map); 268 se_sess->sess_cmd_map = NULL; 269 return -ENOMEM; 270 } 271 272 return 0; 273 } 274 EXPORT_SYMBOL(transport_alloc_session_tags); 275 276 struct se_session *transport_init_session_tags(unsigned int tag_num, 277 unsigned int tag_size, 278 enum target_prot_op sup_prot_ops) 279 { 280 struct se_session *se_sess; 281 int rc; 282 283 if (tag_num != 0 && !tag_size) { 284 pr_err("init_session_tags called with percpu-ida tag_num:" 285 " %u, but zero tag_size\n", tag_num); 286 return ERR_PTR(-EINVAL); 287 } 288 if (!tag_num && tag_size) { 289 pr_err("init_session_tags called with percpu-ida tag_size:" 290 " %u, but zero tag_num\n", tag_size); 291 return ERR_PTR(-EINVAL); 292 } 293 294 se_sess = transport_init_session(sup_prot_ops); 295 if (IS_ERR(se_sess)) 296 return se_sess; 297 298 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 299 if (rc < 0) { 300 transport_free_session(se_sess); 301 return ERR_PTR(-ENOMEM); 302 } 303 304 return se_sess; 305 } 306 EXPORT_SYMBOL(transport_init_session_tags); 307 308 /* 309 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 310 */ 311 void __transport_register_session( 312 struct se_portal_group *se_tpg, 313 struct se_node_acl *se_nacl, 314 struct se_session *se_sess, 315 void *fabric_sess_ptr) 316 { 317 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 318 unsigned char buf[PR_REG_ISID_LEN]; 319 320 se_sess->se_tpg = se_tpg; 321 se_sess->fabric_sess_ptr = fabric_sess_ptr; 322 /* 323 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 324 * 325 * Only set for struct se_session's that will actually be moving I/O. 326 * eg: *NOT* discovery sessions. 327 */ 328 if (se_nacl) { 329 /* 330 * 331 * Determine if fabric allows for T10-PI feature bits exposed to 332 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 333 * 334 * If so, then always save prot_type on a per se_node_acl node 335 * basis and re-instate the previous sess_prot_type to avoid 336 * disabling PI from below any previously initiator side 337 * registered LUNs. 338 */ 339 if (se_nacl->saved_prot_type) 340 se_sess->sess_prot_type = se_nacl->saved_prot_type; 341 else if (tfo->tpg_check_prot_fabric_only) 342 se_sess->sess_prot_type = se_nacl->saved_prot_type = 343 tfo->tpg_check_prot_fabric_only(se_tpg); 344 /* 345 * If the fabric module supports an ISID based TransportID, 346 * save this value in binary from the fabric I_T Nexus now. 347 */ 348 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 349 memset(&buf[0], 0, PR_REG_ISID_LEN); 350 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 351 &buf[0], PR_REG_ISID_LEN); 352 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 353 } 354 355 spin_lock_irq(&se_nacl->nacl_sess_lock); 356 /* 357 * The se_nacl->nacl_sess pointer will be set to the 358 * last active I_T Nexus for each struct se_node_acl. 359 */ 360 se_nacl->nacl_sess = se_sess; 361 362 list_add_tail(&se_sess->sess_acl_list, 363 &se_nacl->acl_sess_list); 364 spin_unlock_irq(&se_nacl->nacl_sess_lock); 365 } 366 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 367 368 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 369 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 370 } 371 EXPORT_SYMBOL(__transport_register_session); 372 373 void transport_register_session( 374 struct se_portal_group *se_tpg, 375 struct se_node_acl *se_nacl, 376 struct se_session *se_sess, 377 void *fabric_sess_ptr) 378 { 379 unsigned long flags; 380 381 spin_lock_irqsave(&se_tpg->session_lock, flags); 382 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 383 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 384 } 385 EXPORT_SYMBOL(transport_register_session); 386 387 struct se_session * 388 target_alloc_session(struct se_portal_group *tpg, 389 unsigned int tag_num, unsigned int tag_size, 390 enum target_prot_op prot_op, 391 const char *initiatorname, void *private, 392 int (*callback)(struct se_portal_group *, 393 struct se_session *, void *)) 394 { 395 struct se_session *sess; 396 397 /* 398 * If the fabric driver is using percpu-ida based pre allocation 399 * of I/O descriptor tags, go ahead and perform that setup now.. 400 */ 401 if (tag_num != 0) 402 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 403 else 404 sess = transport_init_session(prot_op); 405 406 if (IS_ERR(sess)) 407 return sess; 408 409 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 410 (unsigned char *)initiatorname); 411 if (!sess->se_node_acl) { 412 transport_free_session(sess); 413 return ERR_PTR(-EACCES); 414 } 415 /* 416 * Go ahead and perform any remaining fabric setup that is 417 * required before transport_register_session(). 418 */ 419 if (callback != NULL) { 420 int rc = callback(tpg, sess, private); 421 if (rc) { 422 transport_free_session(sess); 423 return ERR_PTR(rc); 424 } 425 } 426 427 transport_register_session(tpg, sess->se_node_acl, sess, private); 428 return sess; 429 } 430 EXPORT_SYMBOL(target_alloc_session); 431 432 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 433 { 434 struct se_session *se_sess; 435 ssize_t len = 0; 436 437 spin_lock_bh(&se_tpg->session_lock); 438 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 439 if (!se_sess->se_node_acl) 440 continue; 441 if (!se_sess->se_node_acl->dynamic_node_acl) 442 continue; 443 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 444 break; 445 446 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 447 se_sess->se_node_acl->initiatorname); 448 len += 1; /* Include NULL terminator */ 449 } 450 spin_unlock_bh(&se_tpg->session_lock); 451 452 return len; 453 } 454 EXPORT_SYMBOL(target_show_dynamic_sessions); 455 456 static void target_complete_nacl(struct kref *kref) 457 { 458 struct se_node_acl *nacl = container_of(kref, 459 struct se_node_acl, acl_kref); 460 461 complete(&nacl->acl_free_comp); 462 } 463 464 void target_put_nacl(struct se_node_acl *nacl) 465 { 466 kref_put(&nacl->acl_kref, target_complete_nacl); 467 } 468 EXPORT_SYMBOL(target_put_nacl); 469 470 void transport_deregister_session_configfs(struct se_session *se_sess) 471 { 472 struct se_node_acl *se_nacl; 473 unsigned long flags; 474 /* 475 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 476 */ 477 se_nacl = se_sess->se_node_acl; 478 if (se_nacl) { 479 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 480 if (!list_empty(&se_sess->sess_acl_list)) 481 list_del_init(&se_sess->sess_acl_list); 482 /* 483 * If the session list is empty, then clear the pointer. 484 * Otherwise, set the struct se_session pointer from the tail 485 * element of the per struct se_node_acl active session list. 486 */ 487 if (list_empty(&se_nacl->acl_sess_list)) 488 se_nacl->nacl_sess = NULL; 489 else { 490 se_nacl->nacl_sess = container_of( 491 se_nacl->acl_sess_list.prev, 492 struct se_session, sess_acl_list); 493 } 494 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 495 } 496 } 497 EXPORT_SYMBOL(transport_deregister_session_configfs); 498 499 void transport_free_session(struct se_session *se_sess) 500 { 501 struct se_node_acl *se_nacl = se_sess->se_node_acl; 502 /* 503 * Drop the se_node_acl->nacl_kref obtained from within 504 * core_tpg_get_initiator_node_acl(). 505 */ 506 if (se_nacl) { 507 se_sess->se_node_acl = NULL; 508 target_put_nacl(se_nacl); 509 } 510 if (se_sess->sess_cmd_map) { 511 percpu_ida_destroy(&se_sess->sess_tag_pool); 512 kvfree(se_sess->sess_cmd_map); 513 } 514 kmem_cache_free(se_sess_cache, se_sess); 515 } 516 EXPORT_SYMBOL(transport_free_session); 517 518 void transport_deregister_session(struct se_session *se_sess) 519 { 520 struct se_portal_group *se_tpg = se_sess->se_tpg; 521 const struct target_core_fabric_ops *se_tfo; 522 struct se_node_acl *se_nacl; 523 unsigned long flags; 524 bool drop_nacl = false; 525 526 if (!se_tpg) { 527 transport_free_session(se_sess); 528 return; 529 } 530 se_tfo = se_tpg->se_tpg_tfo; 531 532 spin_lock_irqsave(&se_tpg->session_lock, flags); 533 list_del(&se_sess->sess_list); 534 se_sess->se_tpg = NULL; 535 se_sess->fabric_sess_ptr = NULL; 536 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 537 538 /* 539 * Determine if we need to do extra work for this initiator node's 540 * struct se_node_acl if it had been previously dynamically generated. 541 */ 542 se_nacl = se_sess->se_node_acl; 543 544 mutex_lock(&se_tpg->acl_node_mutex); 545 if (se_nacl && se_nacl->dynamic_node_acl) { 546 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 547 list_del(&se_nacl->acl_list); 548 drop_nacl = true; 549 } 550 } 551 mutex_unlock(&se_tpg->acl_node_mutex); 552 553 if (drop_nacl) { 554 core_tpg_wait_for_nacl_pr_ref(se_nacl); 555 core_free_device_list_for_node(se_nacl, se_tpg); 556 se_sess->se_node_acl = NULL; 557 kfree(se_nacl); 558 } 559 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 560 se_tpg->se_tpg_tfo->get_fabric_name()); 561 /* 562 * If last kref is dropping now for an explicit NodeACL, awake sleeping 563 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 564 * removal context from within transport_free_session() code. 565 */ 566 567 transport_free_session(se_sess); 568 } 569 EXPORT_SYMBOL(transport_deregister_session); 570 571 static void target_remove_from_state_list(struct se_cmd *cmd) 572 { 573 struct se_device *dev = cmd->se_dev; 574 unsigned long flags; 575 576 if (!dev) 577 return; 578 579 if (cmd->transport_state & CMD_T_BUSY) 580 return; 581 582 spin_lock_irqsave(&dev->execute_task_lock, flags); 583 if (cmd->state_active) { 584 list_del(&cmd->state_list); 585 cmd->state_active = false; 586 } 587 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 588 } 589 590 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 591 bool write_pending) 592 { 593 unsigned long flags; 594 595 if (remove_from_lists) { 596 target_remove_from_state_list(cmd); 597 598 /* 599 * Clear struct se_cmd->se_lun before the handoff to FE. 600 */ 601 cmd->se_lun = NULL; 602 } 603 604 spin_lock_irqsave(&cmd->t_state_lock, flags); 605 if (write_pending) 606 cmd->t_state = TRANSPORT_WRITE_PENDING; 607 608 /* 609 * Determine if frontend context caller is requesting the stopping of 610 * this command for frontend exceptions. 611 */ 612 if (cmd->transport_state & CMD_T_STOP) { 613 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 614 __func__, __LINE__, cmd->tag); 615 616 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 617 618 complete_all(&cmd->t_transport_stop_comp); 619 return 1; 620 } 621 622 cmd->transport_state &= ~CMD_T_ACTIVE; 623 if (remove_from_lists) { 624 /* 625 * Some fabric modules like tcm_loop can release 626 * their internally allocated I/O reference now and 627 * struct se_cmd now. 628 * 629 * Fabric modules are expected to return '1' here if the 630 * se_cmd being passed is released at this point, 631 * or zero if not being released. 632 */ 633 if (cmd->se_tfo->check_stop_free != NULL) { 634 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 635 return cmd->se_tfo->check_stop_free(cmd); 636 } 637 } 638 639 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 640 return 0; 641 } 642 643 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 644 { 645 return transport_cmd_check_stop(cmd, true, false); 646 } 647 648 static void transport_lun_remove_cmd(struct se_cmd *cmd) 649 { 650 struct se_lun *lun = cmd->se_lun; 651 652 if (!lun) 653 return; 654 655 if (cmpxchg(&cmd->lun_ref_active, true, false)) 656 percpu_ref_put(&lun->lun_ref); 657 } 658 659 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 660 { 661 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 662 663 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 664 transport_lun_remove_cmd(cmd); 665 /* 666 * Allow the fabric driver to unmap any resources before 667 * releasing the descriptor via TFO->release_cmd() 668 */ 669 if (remove) 670 cmd->se_tfo->aborted_task(cmd); 671 672 if (transport_cmd_check_stop_to_fabric(cmd)) 673 return; 674 if (remove && ack_kref) 675 transport_put_cmd(cmd); 676 } 677 678 static void target_complete_failure_work(struct work_struct *work) 679 { 680 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 681 682 transport_generic_request_failure(cmd, 683 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 684 } 685 686 /* 687 * Used when asking transport to copy Sense Data from the underlying 688 * Linux/SCSI struct scsi_cmnd 689 */ 690 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 691 { 692 struct se_device *dev = cmd->se_dev; 693 694 WARN_ON(!cmd->se_lun); 695 696 if (!dev) 697 return NULL; 698 699 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 700 return NULL; 701 702 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 703 704 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 705 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 706 return cmd->sense_buffer; 707 } 708 709 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 710 { 711 struct se_device *dev = cmd->se_dev; 712 int success = scsi_status == GOOD; 713 unsigned long flags; 714 715 cmd->scsi_status = scsi_status; 716 717 718 spin_lock_irqsave(&cmd->t_state_lock, flags); 719 cmd->transport_state &= ~CMD_T_BUSY; 720 721 if (dev && dev->transport->transport_complete) { 722 dev->transport->transport_complete(cmd, 723 cmd->t_data_sg, 724 transport_get_sense_buffer(cmd)); 725 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 726 success = 1; 727 } 728 729 /* 730 * Check for case where an explicit ABORT_TASK has been received 731 * and transport_wait_for_tasks() will be waiting for completion.. 732 */ 733 if (cmd->transport_state & CMD_T_ABORTED || 734 cmd->transport_state & CMD_T_STOP) { 735 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 736 complete_all(&cmd->t_transport_stop_comp); 737 return; 738 } else if (!success) { 739 INIT_WORK(&cmd->work, target_complete_failure_work); 740 } else { 741 INIT_WORK(&cmd->work, target_complete_ok_work); 742 } 743 744 cmd->t_state = TRANSPORT_COMPLETE; 745 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 746 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 747 748 if (cmd->se_cmd_flags & SCF_USE_CPUID) 749 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 750 else 751 queue_work(target_completion_wq, &cmd->work); 752 } 753 EXPORT_SYMBOL(target_complete_cmd); 754 755 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 756 { 757 if (scsi_status != SAM_STAT_GOOD) { 758 return; 759 } 760 761 /* 762 * Calculate new residual count based upon length of SCSI data 763 * transferred. 764 */ 765 if (length < cmd->data_length) { 766 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 767 cmd->residual_count += cmd->data_length - length; 768 } else { 769 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 770 cmd->residual_count = cmd->data_length - length; 771 } 772 773 cmd->data_length = length; 774 } else if (length > cmd->data_length) { 775 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 776 cmd->residual_count = length - cmd->data_length; 777 } else { 778 cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT); 779 cmd->residual_count = 0; 780 } 781 782 target_complete_cmd(cmd, scsi_status); 783 } 784 EXPORT_SYMBOL(target_complete_cmd_with_length); 785 786 static void target_add_to_state_list(struct se_cmd *cmd) 787 { 788 struct se_device *dev = cmd->se_dev; 789 unsigned long flags; 790 791 spin_lock_irqsave(&dev->execute_task_lock, flags); 792 if (!cmd->state_active) { 793 list_add_tail(&cmd->state_list, &dev->state_list); 794 cmd->state_active = true; 795 } 796 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 797 } 798 799 /* 800 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 801 */ 802 static void transport_write_pending_qf(struct se_cmd *cmd); 803 static void transport_complete_qf(struct se_cmd *cmd); 804 805 void target_qf_do_work(struct work_struct *work) 806 { 807 struct se_device *dev = container_of(work, struct se_device, 808 qf_work_queue); 809 LIST_HEAD(qf_cmd_list); 810 struct se_cmd *cmd, *cmd_tmp; 811 812 spin_lock_irq(&dev->qf_cmd_lock); 813 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 814 spin_unlock_irq(&dev->qf_cmd_lock); 815 816 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 817 list_del(&cmd->se_qf_node); 818 atomic_dec_mb(&dev->dev_qf_count); 819 820 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 821 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 822 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 823 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 824 : "UNKNOWN"); 825 826 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 827 transport_write_pending_qf(cmd); 828 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 829 transport_complete_qf(cmd); 830 } 831 } 832 833 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 834 { 835 switch (cmd->data_direction) { 836 case DMA_NONE: 837 return "NONE"; 838 case DMA_FROM_DEVICE: 839 return "READ"; 840 case DMA_TO_DEVICE: 841 return "WRITE"; 842 case DMA_BIDIRECTIONAL: 843 return "BIDI"; 844 default: 845 break; 846 } 847 848 return "UNKNOWN"; 849 } 850 851 void transport_dump_dev_state( 852 struct se_device *dev, 853 char *b, 854 int *bl) 855 { 856 *bl += sprintf(b + *bl, "Status: "); 857 if (dev->export_count) 858 *bl += sprintf(b + *bl, "ACTIVATED"); 859 else 860 *bl += sprintf(b + *bl, "DEACTIVATED"); 861 862 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 863 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 864 dev->dev_attrib.block_size, 865 dev->dev_attrib.hw_max_sectors); 866 *bl += sprintf(b + *bl, " "); 867 } 868 869 void transport_dump_vpd_proto_id( 870 struct t10_vpd *vpd, 871 unsigned char *p_buf, 872 int p_buf_len) 873 { 874 unsigned char buf[VPD_TMP_BUF_SIZE]; 875 int len; 876 877 memset(buf, 0, VPD_TMP_BUF_SIZE); 878 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 879 880 switch (vpd->protocol_identifier) { 881 case 0x00: 882 sprintf(buf+len, "Fibre Channel\n"); 883 break; 884 case 0x10: 885 sprintf(buf+len, "Parallel SCSI\n"); 886 break; 887 case 0x20: 888 sprintf(buf+len, "SSA\n"); 889 break; 890 case 0x30: 891 sprintf(buf+len, "IEEE 1394\n"); 892 break; 893 case 0x40: 894 sprintf(buf+len, "SCSI Remote Direct Memory Access" 895 " Protocol\n"); 896 break; 897 case 0x50: 898 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 899 break; 900 case 0x60: 901 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 902 break; 903 case 0x70: 904 sprintf(buf+len, "Automation/Drive Interface Transport" 905 " Protocol\n"); 906 break; 907 case 0x80: 908 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 909 break; 910 default: 911 sprintf(buf+len, "Unknown 0x%02x\n", 912 vpd->protocol_identifier); 913 break; 914 } 915 916 if (p_buf) 917 strncpy(p_buf, buf, p_buf_len); 918 else 919 pr_debug("%s", buf); 920 } 921 922 void 923 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 924 { 925 /* 926 * Check if the Protocol Identifier Valid (PIV) bit is set.. 927 * 928 * from spc3r23.pdf section 7.5.1 929 */ 930 if (page_83[1] & 0x80) { 931 vpd->protocol_identifier = (page_83[0] & 0xf0); 932 vpd->protocol_identifier_set = 1; 933 transport_dump_vpd_proto_id(vpd, NULL, 0); 934 } 935 } 936 EXPORT_SYMBOL(transport_set_vpd_proto_id); 937 938 int transport_dump_vpd_assoc( 939 struct t10_vpd *vpd, 940 unsigned char *p_buf, 941 int p_buf_len) 942 { 943 unsigned char buf[VPD_TMP_BUF_SIZE]; 944 int ret = 0; 945 int len; 946 947 memset(buf, 0, VPD_TMP_BUF_SIZE); 948 len = sprintf(buf, "T10 VPD Identifier Association: "); 949 950 switch (vpd->association) { 951 case 0x00: 952 sprintf(buf+len, "addressed logical unit\n"); 953 break; 954 case 0x10: 955 sprintf(buf+len, "target port\n"); 956 break; 957 case 0x20: 958 sprintf(buf+len, "SCSI target device\n"); 959 break; 960 default: 961 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 962 ret = -EINVAL; 963 break; 964 } 965 966 if (p_buf) 967 strncpy(p_buf, buf, p_buf_len); 968 else 969 pr_debug("%s", buf); 970 971 return ret; 972 } 973 974 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 975 { 976 /* 977 * The VPD identification association.. 978 * 979 * from spc3r23.pdf Section 7.6.3.1 Table 297 980 */ 981 vpd->association = (page_83[1] & 0x30); 982 return transport_dump_vpd_assoc(vpd, NULL, 0); 983 } 984 EXPORT_SYMBOL(transport_set_vpd_assoc); 985 986 int transport_dump_vpd_ident_type( 987 struct t10_vpd *vpd, 988 unsigned char *p_buf, 989 int p_buf_len) 990 { 991 unsigned char buf[VPD_TMP_BUF_SIZE]; 992 int ret = 0; 993 int len; 994 995 memset(buf, 0, VPD_TMP_BUF_SIZE); 996 len = sprintf(buf, "T10 VPD Identifier Type: "); 997 998 switch (vpd->device_identifier_type) { 999 case 0x00: 1000 sprintf(buf+len, "Vendor specific\n"); 1001 break; 1002 case 0x01: 1003 sprintf(buf+len, "T10 Vendor ID based\n"); 1004 break; 1005 case 0x02: 1006 sprintf(buf+len, "EUI-64 based\n"); 1007 break; 1008 case 0x03: 1009 sprintf(buf+len, "NAA\n"); 1010 break; 1011 case 0x04: 1012 sprintf(buf+len, "Relative target port identifier\n"); 1013 break; 1014 case 0x08: 1015 sprintf(buf+len, "SCSI name string\n"); 1016 break; 1017 default: 1018 sprintf(buf+len, "Unsupported: 0x%02x\n", 1019 vpd->device_identifier_type); 1020 ret = -EINVAL; 1021 break; 1022 } 1023 1024 if (p_buf) { 1025 if (p_buf_len < strlen(buf)+1) 1026 return -EINVAL; 1027 strncpy(p_buf, buf, p_buf_len); 1028 } else { 1029 pr_debug("%s", buf); 1030 } 1031 1032 return ret; 1033 } 1034 1035 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1036 { 1037 /* 1038 * The VPD identifier type.. 1039 * 1040 * from spc3r23.pdf Section 7.6.3.1 Table 298 1041 */ 1042 vpd->device_identifier_type = (page_83[1] & 0x0f); 1043 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1044 } 1045 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1046 1047 int transport_dump_vpd_ident( 1048 struct t10_vpd *vpd, 1049 unsigned char *p_buf, 1050 int p_buf_len) 1051 { 1052 unsigned char buf[VPD_TMP_BUF_SIZE]; 1053 int ret = 0; 1054 1055 memset(buf, 0, VPD_TMP_BUF_SIZE); 1056 1057 switch (vpd->device_identifier_code_set) { 1058 case 0x01: /* Binary */ 1059 snprintf(buf, sizeof(buf), 1060 "T10 VPD Binary Device Identifier: %s\n", 1061 &vpd->device_identifier[0]); 1062 break; 1063 case 0x02: /* ASCII */ 1064 snprintf(buf, sizeof(buf), 1065 "T10 VPD ASCII Device Identifier: %s\n", 1066 &vpd->device_identifier[0]); 1067 break; 1068 case 0x03: /* UTF-8 */ 1069 snprintf(buf, sizeof(buf), 1070 "T10 VPD UTF-8 Device Identifier: %s\n", 1071 &vpd->device_identifier[0]); 1072 break; 1073 default: 1074 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1075 " 0x%02x", vpd->device_identifier_code_set); 1076 ret = -EINVAL; 1077 break; 1078 } 1079 1080 if (p_buf) 1081 strncpy(p_buf, buf, p_buf_len); 1082 else 1083 pr_debug("%s", buf); 1084 1085 return ret; 1086 } 1087 1088 int 1089 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1090 { 1091 static const char hex_str[] = "0123456789abcdef"; 1092 int j = 0, i = 4; /* offset to start of the identifier */ 1093 1094 /* 1095 * The VPD Code Set (encoding) 1096 * 1097 * from spc3r23.pdf Section 7.6.3.1 Table 296 1098 */ 1099 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1100 switch (vpd->device_identifier_code_set) { 1101 case 0x01: /* Binary */ 1102 vpd->device_identifier[j++] = 1103 hex_str[vpd->device_identifier_type]; 1104 while (i < (4 + page_83[3])) { 1105 vpd->device_identifier[j++] = 1106 hex_str[(page_83[i] & 0xf0) >> 4]; 1107 vpd->device_identifier[j++] = 1108 hex_str[page_83[i] & 0x0f]; 1109 i++; 1110 } 1111 break; 1112 case 0x02: /* ASCII */ 1113 case 0x03: /* UTF-8 */ 1114 while (i < (4 + page_83[3])) 1115 vpd->device_identifier[j++] = page_83[i++]; 1116 break; 1117 default: 1118 break; 1119 } 1120 1121 return transport_dump_vpd_ident(vpd, NULL, 0); 1122 } 1123 EXPORT_SYMBOL(transport_set_vpd_ident); 1124 1125 static sense_reason_t 1126 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1127 unsigned int size) 1128 { 1129 u32 mtl; 1130 1131 if (!cmd->se_tfo->max_data_sg_nents) 1132 return TCM_NO_SENSE; 1133 /* 1134 * Check if fabric enforced maximum SGL entries per I/O descriptor 1135 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1136 * residual_count and reduce original cmd->data_length to maximum 1137 * length based on single PAGE_SIZE entry scatter-lists. 1138 */ 1139 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1140 if (cmd->data_length > mtl) { 1141 /* 1142 * If an existing CDB overflow is present, calculate new residual 1143 * based on CDB size minus fabric maximum transfer length. 1144 * 1145 * If an existing CDB underflow is present, calculate new residual 1146 * based on original cmd->data_length minus fabric maximum transfer 1147 * length. 1148 * 1149 * Otherwise, set the underflow residual based on cmd->data_length 1150 * minus fabric maximum transfer length. 1151 */ 1152 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1153 cmd->residual_count = (size - mtl); 1154 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1155 u32 orig_dl = size + cmd->residual_count; 1156 cmd->residual_count = (orig_dl - mtl); 1157 } else { 1158 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1159 cmd->residual_count = (cmd->data_length - mtl); 1160 } 1161 cmd->data_length = mtl; 1162 /* 1163 * Reset sbc_check_prot() calculated protection payload 1164 * length based upon the new smaller MTL. 1165 */ 1166 if (cmd->prot_length) { 1167 u32 sectors = (mtl / dev->dev_attrib.block_size); 1168 cmd->prot_length = dev->prot_length * sectors; 1169 } 1170 } 1171 return TCM_NO_SENSE; 1172 } 1173 1174 sense_reason_t 1175 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1176 { 1177 struct se_device *dev = cmd->se_dev; 1178 1179 if (cmd->unknown_data_length) { 1180 cmd->data_length = size; 1181 } else if (size != cmd->data_length) { 1182 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1183 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1184 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1185 cmd->data_length, size, cmd->t_task_cdb[0]); 1186 1187 if (cmd->data_direction == DMA_TO_DEVICE && 1188 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1189 pr_err("Rejecting underflow/overflow WRITE data\n"); 1190 return TCM_INVALID_CDB_FIELD; 1191 } 1192 /* 1193 * Reject READ_* or WRITE_* with overflow/underflow for 1194 * type SCF_SCSI_DATA_CDB. 1195 */ 1196 if (dev->dev_attrib.block_size != 512) { 1197 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1198 " CDB on non 512-byte sector setup subsystem" 1199 " plugin: %s\n", dev->transport->name); 1200 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1201 return TCM_INVALID_CDB_FIELD; 1202 } 1203 /* 1204 * For the overflow case keep the existing fabric provided 1205 * ->data_length. Otherwise for the underflow case, reset 1206 * ->data_length to the smaller SCSI expected data transfer 1207 * length. 1208 */ 1209 if (size > cmd->data_length) { 1210 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1211 cmd->residual_count = (size - cmd->data_length); 1212 } else { 1213 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1214 cmd->residual_count = (cmd->data_length - size); 1215 cmd->data_length = size; 1216 } 1217 } 1218 1219 return target_check_max_data_sg_nents(cmd, dev, size); 1220 1221 } 1222 1223 /* 1224 * Used by fabric modules containing a local struct se_cmd within their 1225 * fabric dependent per I/O descriptor. 1226 * 1227 * Preserves the value of @cmd->tag. 1228 */ 1229 void transport_init_se_cmd( 1230 struct se_cmd *cmd, 1231 const struct target_core_fabric_ops *tfo, 1232 struct se_session *se_sess, 1233 u32 data_length, 1234 int data_direction, 1235 int task_attr, 1236 unsigned char *sense_buffer) 1237 { 1238 INIT_LIST_HEAD(&cmd->se_delayed_node); 1239 INIT_LIST_HEAD(&cmd->se_qf_node); 1240 INIT_LIST_HEAD(&cmd->se_cmd_list); 1241 INIT_LIST_HEAD(&cmd->state_list); 1242 init_completion(&cmd->t_transport_stop_comp); 1243 init_completion(&cmd->cmd_wait_comp); 1244 spin_lock_init(&cmd->t_state_lock); 1245 kref_init(&cmd->cmd_kref); 1246 cmd->transport_state = CMD_T_DEV_ACTIVE; 1247 1248 cmd->se_tfo = tfo; 1249 cmd->se_sess = se_sess; 1250 cmd->data_length = data_length; 1251 cmd->data_direction = data_direction; 1252 cmd->sam_task_attr = task_attr; 1253 cmd->sense_buffer = sense_buffer; 1254 1255 cmd->state_active = false; 1256 } 1257 EXPORT_SYMBOL(transport_init_se_cmd); 1258 1259 static sense_reason_t 1260 transport_check_alloc_task_attr(struct se_cmd *cmd) 1261 { 1262 struct se_device *dev = cmd->se_dev; 1263 1264 /* 1265 * Check if SAM Task Attribute emulation is enabled for this 1266 * struct se_device storage object 1267 */ 1268 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1269 return 0; 1270 1271 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1272 pr_debug("SAM Task Attribute ACA" 1273 " emulation is not supported\n"); 1274 return TCM_INVALID_CDB_FIELD; 1275 } 1276 1277 return 0; 1278 } 1279 1280 sense_reason_t 1281 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1282 { 1283 struct se_device *dev = cmd->se_dev; 1284 sense_reason_t ret; 1285 1286 /* 1287 * Ensure that the received CDB is less than the max (252 + 8) bytes 1288 * for VARIABLE_LENGTH_CMD 1289 */ 1290 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1291 pr_err("Received SCSI CDB with command_size: %d that" 1292 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1293 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1294 return TCM_INVALID_CDB_FIELD; 1295 } 1296 /* 1297 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1298 * allocate the additional extended CDB buffer now.. Otherwise 1299 * setup the pointer from __t_task_cdb to t_task_cdb. 1300 */ 1301 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1302 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1303 GFP_KERNEL); 1304 if (!cmd->t_task_cdb) { 1305 pr_err("Unable to allocate cmd->t_task_cdb" 1306 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1307 scsi_command_size(cdb), 1308 (unsigned long)sizeof(cmd->__t_task_cdb)); 1309 return TCM_OUT_OF_RESOURCES; 1310 } 1311 } else 1312 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1313 /* 1314 * Copy the original CDB into cmd-> 1315 */ 1316 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1317 1318 trace_target_sequencer_start(cmd); 1319 1320 ret = dev->transport->parse_cdb(cmd); 1321 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1322 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1323 cmd->se_tfo->get_fabric_name(), 1324 cmd->se_sess->se_node_acl->initiatorname, 1325 cmd->t_task_cdb[0]); 1326 if (ret) 1327 return ret; 1328 1329 ret = transport_check_alloc_task_attr(cmd); 1330 if (ret) 1331 return ret; 1332 1333 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1334 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1335 return 0; 1336 } 1337 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1338 1339 /* 1340 * Used by fabric module frontends to queue tasks directly. 1341 * May only be used from process context. 1342 */ 1343 int transport_handle_cdb_direct( 1344 struct se_cmd *cmd) 1345 { 1346 sense_reason_t ret; 1347 1348 if (!cmd->se_lun) { 1349 dump_stack(); 1350 pr_err("cmd->se_lun is NULL\n"); 1351 return -EINVAL; 1352 } 1353 if (in_interrupt()) { 1354 dump_stack(); 1355 pr_err("transport_generic_handle_cdb cannot be called" 1356 " from interrupt context\n"); 1357 return -EINVAL; 1358 } 1359 /* 1360 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1361 * outstanding descriptors are handled correctly during shutdown via 1362 * transport_wait_for_tasks() 1363 * 1364 * Also, we don't take cmd->t_state_lock here as we only expect 1365 * this to be called for initial descriptor submission. 1366 */ 1367 cmd->t_state = TRANSPORT_NEW_CMD; 1368 cmd->transport_state |= CMD_T_ACTIVE; 1369 1370 /* 1371 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1372 * so follow TRANSPORT_NEW_CMD processing thread context usage 1373 * and call transport_generic_request_failure() if necessary.. 1374 */ 1375 ret = transport_generic_new_cmd(cmd); 1376 if (ret) 1377 transport_generic_request_failure(cmd, ret); 1378 return 0; 1379 } 1380 EXPORT_SYMBOL(transport_handle_cdb_direct); 1381 1382 sense_reason_t 1383 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1384 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1385 { 1386 if (!sgl || !sgl_count) 1387 return 0; 1388 1389 /* 1390 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1391 * scatterlists already have been set to follow what the fabric 1392 * passes for the original expected data transfer length. 1393 */ 1394 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1395 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1396 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1397 return TCM_INVALID_CDB_FIELD; 1398 } 1399 1400 cmd->t_data_sg = sgl; 1401 cmd->t_data_nents = sgl_count; 1402 cmd->t_bidi_data_sg = sgl_bidi; 1403 cmd->t_bidi_data_nents = sgl_bidi_count; 1404 1405 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1406 return 0; 1407 } 1408 1409 /* 1410 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1411 * se_cmd + use pre-allocated SGL memory. 1412 * 1413 * @se_cmd: command descriptor to submit 1414 * @se_sess: associated se_sess for endpoint 1415 * @cdb: pointer to SCSI CDB 1416 * @sense: pointer to SCSI sense buffer 1417 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1418 * @data_length: fabric expected data transfer length 1419 * @task_addr: SAM task attribute 1420 * @data_dir: DMA data direction 1421 * @flags: flags for command submission from target_sc_flags_tables 1422 * @sgl: struct scatterlist memory for unidirectional mapping 1423 * @sgl_count: scatterlist count for unidirectional mapping 1424 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1425 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1426 * @sgl_prot: struct scatterlist memory protection information 1427 * @sgl_prot_count: scatterlist count for protection information 1428 * 1429 * Task tags are supported if the caller has set @se_cmd->tag. 1430 * 1431 * Returns non zero to signal active I/O shutdown failure. All other 1432 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1433 * but still return zero here. 1434 * 1435 * This may only be called from process context, and also currently 1436 * assumes internal allocation of fabric payload buffer by target-core. 1437 */ 1438 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1439 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1440 u32 data_length, int task_attr, int data_dir, int flags, 1441 struct scatterlist *sgl, u32 sgl_count, 1442 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1443 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1444 { 1445 struct se_portal_group *se_tpg; 1446 sense_reason_t rc; 1447 int ret; 1448 1449 se_tpg = se_sess->se_tpg; 1450 BUG_ON(!se_tpg); 1451 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1452 BUG_ON(in_interrupt()); 1453 /* 1454 * Initialize se_cmd for target operation. From this point 1455 * exceptions are handled by sending exception status via 1456 * target_core_fabric_ops->queue_status() callback 1457 */ 1458 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1459 data_length, data_dir, task_attr, sense); 1460 1461 if (flags & TARGET_SCF_USE_CPUID) 1462 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1463 else 1464 se_cmd->cpuid = WORK_CPU_UNBOUND; 1465 1466 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1467 se_cmd->unknown_data_length = 1; 1468 /* 1469 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1470 * se_sess->sess_cmd_list. A second kref_get here is necessary 1471 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1472 * kref_put() to happen during fabric packet acknowledgement. 1473 */ 1474 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1475 if (ret) 1476 return ret; 1477 /* 1478 * Signal bidirectional data payloads to target-core 1479 */ 1480 if (flags & TARGET_SCF_BIDI_OP) 1481 se_cmd->se_cmd_flags |= SCF_BIDI; 1482 /* 1483 * Locate se_lun pointer and attach it to struct se_cmd 1484 */ 1485 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1486 if (rc) { 1487 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1488 target_put_sess_cmd(se_cmd); 1489 return 0; 1490 } 1491 1492 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1493 if (rc != 0) { 1494 transport_generic_request_failure(se_cmd, rc); 1495 return 0; 1496 } 1497 1498 /* 1499 * Save pointers for SGLs containing protection information, 1500 * if present. 1501 */ 1502 if (sgl_prot_count) { 1503 se_cmd->t_prot_sg = sgl_prot; 1504 se_cmd->t_prot_nents = sgl_prot_count; 1505 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1506 } 1507 1508 /* 1509 * When a non zero sgl_count has been passed perform SGL passthrough 1510 * mapping for pre-allocated fabric memory instead of having target 1511 * core perform an internal SGL allocation.. 1512 */ 1513 if (sgl_count != 0) { 1514 BUG_ON(!sgl); 1515 1516 /* 1517 * A work-around for tcm_loop as some userspace code via 1518 * scsi-generic do not memset their associated read buffers, 1519 * so go ahead and do that here for type non-data CDBs. Also 1520 * note that this is currently guaranteed to be a single SGL 1521 * for this case by target core in target_setup_cmd_from_cdb() 1522 * -> transport_generic_cmd_sequencer(). 1523 */ 1524 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1525 se_cmd->data_direction == DMA_FROM_DEVICE) { 1526 unsigned char *buf = NULL; 1527 1528 if (sgl) 1529 buf = kmap(sg_page(sgl)) + sgl->offset; 1530 1531 if (buf) { 1532 memset(buf, 0, sgl->length); 1533 kunmap(sg_page(sgl)); 1534 } 1535 } 1536 1537 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1538 sgl_bidi, sgl_bidi_count); 1539 if (rc != 0) { 1540 transport_generic_request_failure(se_cmd, rc); 1541 return 0; 1542 } 1543 } 1544 1545 /* 1546 * Check if we need to delay processing because of ALUA 1547 * Active/NonOptimized primary access state.. 1548 */ 1549 core_alua_check_nonop_delay(se_cmd); 1550 1551 transport_handle_cdb_direct(se_cmd); 1552 return 0; 1553 } 1554 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1555 1556 /* 1557 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1558 * 1559 * @se_cmd: command descriptor to submit 1560 * @se_sess: associated se_sess for endpoint 1561 * @cdb: pointer to SCSI CDB 1562 * @sense: pointer to SCSI sense buffer 1563 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1564 * @data_length: fabric expected data transfer length 1565 * @task_addr: SAM task attribute 1566 * @data_dir: DMA data direction 1567 * @flags: flags for command submission from target_sc_flags_tables 1568 * 1569 * Task tags are supported if the caller has set @se_cmd->tag. 1570 * 1571 * Returns non zero to signal active I/O shutdown failure. All other 1572 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1573 * but still return zero here. 1574 * 1575 * This may only be called from process context, and also currently 1576 * assumes internal allocation of fabric payload buffer by target-core. 1577 * 1578 * It also assumes interal target core SGL memory allocation. 1579 */ 1580 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1581 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1582 u32 data_length, int task_attr, int data_dir, int flags) 1583 { 1584 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1585 unpacked_lun, data_length, task_attr, data_dir, 1586 flags, NULL, 0, NULL, 0, NULL, 0); 1587 } 1588 EXPORT_SYMBOL(target_submit_cmd); 1589 1590 static void target_complete_tmr_failure(struct work_struct *work) 1591 { 1592 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1593 1594 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1595 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1596 1597 transport_cmd_check_stop_to_fabric(se_cmd); 1598 } 1599 1600 /** 1601 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1602 * for TMR CDBs 1603 * 1604 * @se_cmd: command descriptor to submit 1605 * @se_sess: associated se_sess for endpoint 1606 * @sense: pointer to SCSI sense buffer 1607 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1608 * @fabric_context: fabric context for TMR req 1609 * @tm_type: Type of TM request 1610 * @gfp: gfp type for caller 1611 * @tag: referenced task tag for TMR_ABORT_TASK 1612 * @flags: submit cmd flags 1613 * 1614 * Callable from all contexts. 1615 **/ 1616 1617 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1618 unsigned char *sense, u64 unpacked_lun, 1619 void *fabric_tmr_ptr, unsigned char tm_type, 1620 gfp_t gfp, u64 tag, int flags) 1621 { 1622 struct se_portal_group *se_tpg; 1623 int ret; 1624 1625 se_tpg = se_sess->se_tpg; 1626 BUG_ON(!se_tpg); 1627 1628 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1629 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1630 /* 1631 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1632 * allocation failure. 1633 */ 1634 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1635 if (ret < 0) 1636 return -ENOMEM; 1637 1638 if (tm_type == TMR_ABORT_TASK) 1639 se_cmd->se_tmr_req->ref_task_tag = tag; 1640 1641 /* See target_submit_cmd for commentary */ 1642 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1643 if (ret) { 1644 core_tmr_release_req(se_cmd->se_tmr_req); 1645 return ret; 1646 } 1647 1648 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1649 if (ret) { 1650 /* 1651 * For callback during failure handling, push this work off 1652 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1653 */ 1654 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1655 schedule_work(&se_cmd->work); 1656 return 0; 1657 } 1658 transport_generic_handle_tmr(se_cmd); 1659 return 0; 1660 } 1661 EXPORT_SYMBOL(target_submit_tmr); 1662 1663 /* 1664 * Handle SAM-esque emulation for generic transport request failures. 1665 */ 1666 void transport_generic_request_failure(struct se_cmd *cmd, 1667 sense_reason_t sense_reason) 1668 { 1669 int ret = 0, post_ret = 0; 1670 1671 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1672 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1673 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1674 cmd->se_tfo->get_cmd_state(cmd), 1675 cmd->t_state, sense_reason); 1676 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1677 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1678 (cmd->transport_state & CMD_T_STOP) != 0, 1679 (cmd->transport_state & CMD_T_SENT) != 0); 1680 1681 /* 1682 * For SAM Task Attribute emulation for failed struct se_cmd 1683 */ 1684 transport_complete_task_attr(cmd); 1685 /* 1686 * Handle special case for COMPARE_AND_WRITE failure, where the 1687 * callback is expected to drop the per device ->caw_sem. 1688 */ 1689 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1690 cmd->transport_complete_callback) 1691 cmd->transport_complete_callback(cmd, false, &post_ret); 1692 1693 switch (sense_reason) { 1694 case TCM_NON_EXISTENT_LUN: 1695 case TCM_UNSUPPORTED_SCSI_OPCODE: 1696 case TCM_INVALID_CDB_FIELD: 1697 case TCM_INVALID_PARAMETER_LIST: 1698 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1699 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1700 case TCM_UNKNOWN_MODE_PAGE: 1701 case TCM_WRITE_PROTECTED: 1702 case TCM_ADDRESS_OUT_OF_RANGE: 1703 case TCM_CHECK_CONDITION_ABORT_CMD: 1704 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1705 case TCM_CHECK_CONDITION_NOT_READY: 1706 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1707 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1708 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1709 break; 1710 case TCM_OUT_OF_RESOURCES: 1711 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1712 break; 1713 case TCM_RESERVATION_CONFLICT: 1714 /* 1715 * No SENSE Data payload for this case, set SCSI Status 1716 * and queue the response to $FABRIC_MOD. 1717 * 1718 * Uses linux/include/scsi/scsi.h SAM status codes defs 1719 */ 1720 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1721 /* 1722 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1723 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1724 * CONFLICT STATUS. 1725 * 1726 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1727 */ 1728 if (cmd->se_sess && 1729 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1730 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1731 cmd->orig_fe_lun, 0x2C, 1732 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1733 } 1734 trace_target_cmd_complete(cmd); 1735 ret = cmd->se_tfo->queue_status(cmd); 1736 if (ret == -EAGAIN || ret == -ENOMEM) 1737 goto queue_full; 1738 goto check_stop; 1739 default: 1740 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1741 cmd->t_task_cdb[0], sense_reason); 1742 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1743 break; 1744 } 1745 1746 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1747 if (ret == -EAGAIN || ret == -ENOMEM) 1748 goto queue_full; 1749 1750 check_stop: 1751 transport_lun_remove_cmd(cmd); 1752 transport_cmd_check_stop_to_fabric(cmd); 1753 return; 1754 1755 queue_full: 1756 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1757 transport_handle_queue_full(cmd, cmd->se_dev); 1758 } 1759 EXPORT_SYMBOL(transport_generic_request_failure); 1760 1761 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1762 { 1763 sense_reason_t ret; 1764 1765 if (!cmd->execute_cmd) { 1766 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1767 goto err; 1768 } 1769 if (do_checks) { 1770 /* 1771 * Check for an existing UNIT ATTENTION condition after 1772 * target_handle_task_attr() has done SAM task attr 1773 * checking, and possibly have already defered execution 1774 * out to target_restart_delayed_cmds() context. 1775 */ 1776 ret = target_scsi3_ua_check(cmd); 1777 if (ret) 1778 goto err; 1779 1780 ret = target_alua_state_check(cmd); 1781 if (ret) 1782 goto err; 1783 1784 ret = target_check_reservation(cmd); 1785 if (ret) { 1786 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1787 goto err; 1788 } 1789 } 1790 1791 ret = cmd->execute_cmd(cmd); 1792 if (!ret) 1793 return; 1794 err: 1795 spin_lock_irq(&cmd->t_state_lock); 1796 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1797 spin_unlock_irq(&cmd->t_state_lock); 1798 1799 transport_generic_request_failure(cmd, ret); 1800 } 1801 1802 static int target_write_prot_action(struct se_cmd *cmd) 1803 { 1804 u32 sectors; 1805 /* 1806 * Perform WRITE_INSERT of PI using software emulation when backend 1807 * device has PI enabled, if the transport has not already generated 1808 * PI using hardware WRITE_INSERT offload. 1809 */ 1810 switch (cmd->prot_op) { 1811 case TARGET_PROT_DOUT_INSERT: 1812 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1813 sbc_dif_generate(cmd); 1814 break; 1815 case TARGET_PROT_DOUT_STRIP: 1816 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1817 break; 1818 1819 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1820 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1821 sectors, 0, cmd->t_prot_sg, 0); 1822 if (unlikely(cmd->pi_err)) { 1823 spin_lock_irq(&cmd->t_state_lock); 1824 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1825 spin_unlock_irq(&cmd->t_state_lock); 1826 transport_generic_request_failure(cmd, cmd->pi_err); 1827 return -1; 1828 } 1829 break; 1830 default: 1831 break; 1832 } 1833 1834 return 0; 1835 } 1836 1837 static bool target_handle_task_attr(struct se_cmd *cmd) 1838 { 1839 struct se_device *dev = cmd->se_dev; 1840 1841 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1842 return false; 1843 1844 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 1845 1846 /* 1847 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1848 * to allow the passed struct se_cmd list of tasks to the front of the list. 1849 */ 1850 switch (cmd->sam_task_attr) { 1851 case TCM_HEAD_TAG: 1852 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1853 cmd->t_task_cdb[0]); 1854 return false; 1855 case TCM_ORDERED_TAG: 1856 atomic_inc_mb(&dev->dev_ordered_sync); 1857 1858 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1859 cmd->t_task_cdb[0]); 1860 1861 /* 1862 * Execute an ORDERED command if no other older commands 1863 * exist that need to be completed first. 1864 */ 1865 if (!atomic_read(&dev->simple_cmds)) 1866 return false; 1867 break; 1868 default: 1869 /* 1870 * For SIMPLE and UNTAGGED Task Attribute commands 1871 */ 1872 atomic_inc_mb(&dev->simple_cmds); 1873 break; 1874 } 1875 1876 if (atomic_read(&dev->dev_ordered_sync) == 0) 1877 return false; 1878 1879 spin_lock(&dev->delayed_cmd_lock); 1880 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1881 spin_unlock(&dev->delayed_cmd_lock); 1882 1883 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 1884 cmd->t_task_cdb[0], cmd->sam_task_attr); 1885 return true; 1886 } 1887 1888 static int __transport_check_aborted_status(struct se_cmd *, int); 1889 1890 void target_execute_cmd(struct se_cmd *cmd) 1891 { 1892 /* 1893 * Determine if frontend context caller is requesting the stopping of 1894 * this command for frontend exceptions. 1895 * 1896 * If the received CDB has aleady been aborted stop processing it here. 1897 */ 1898 spin_lock_irq(&cmd->t_state_lock); 1899 if (__transport_check_aborted_status(cmd, 1)) { 1900 spin_unlock_irq(&cmd->t_state_lock); 1901 return; 1902 } 1903 if (cmd->transport_state & CMD_T_STOP) { 1904 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1905 __func__, __LINE__, cmd->tag); 1906 1907 spin_unlock_irq(&cmd->t_state_lock); 1908 complete_all(&cmd->t_transport_stop_comp); 1909 return; 1910 } 1911 1912 cmd->t_state = TRANSPORT_PROCESSING; 1913 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1914 spin_unlock_irq(&cmd->t_state_lock); 1915 1916 if (target_write_prot_action(cmd)) 1917 return; 1918 1919 if (target_handle_task_attr(cmd)) { 1920 spin_lock_irq(&cmd->t_state_lock); 1921 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1922 spin_unlock_irq(&cmd->t_state_lock); 1923 return; 1924 } 1925 1926 __target_execute_cmd(cmd, true); 1927 } 1928 EXPORT_SYMBOL(target_execute_cmd); 1929 1930 /* 1931 * Process all commands up to the last received ORDERED task attribute which 1932 * requires another blocking boundary 1933 */ 1934 static void target_restart_delayed_cmds(struct se_device *dev) 1935 { 1936 for (;;) { 1937 struct se_cmd *cmd; 1938 1939 spin_lock(&dev->delayed_cmd_lock); 1940 if (list_empty(&dev->delayed_cmd_list)) { 1941 spin_unlock(&dev->delayed_cmd_lock); 1942 break; 1943 } 1944 1945 cmd = list_entry(dev->delayed_cmd_list.next, 1946 struct se_cmd, se_delayed_node); 1947 list_del(&cmd->se_delayed_node); 1948 spin_unlock(&dev->delayed_cmd_lock); 1949 1950 __target_execute_cmd(cmd, true); 1951 1952 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1953 break; 1954 } 1955 } 1956 1957 /* 1958 * Called from I/O completion to determine which dormant/delayed 1959 * and ordered cmds need to have their tasks added to the execution queue. 1960 */ 1961 static void transport_complete_task_attr(struct se_cmd *cmd) 1962 { 1963 struct se_device *dev = cmd->se_dev; 1964 1965 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1966 return; 1967 1968 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 1969 goto restart; 1970 1971 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1972 atomic_dec_mb(&dev->simple_cmds); 1973 dev->dev_cur_ordered_id++; 1974 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", 1975 dev->dev_cur_ordered_id); 1976 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1977 dev->dev_cur_ordered_id++; 1978 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 1979 dev->dev_cur_ordered_id); 1980 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1981 atomic_dec_mb(&dev->dev_ordered_sync); 1982 1983 dev->dev_cur_ordered_id++; 1984 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 1985 dev->dev_cur_ordered_id); 1986 } 1987 restart: 1988 target_restart_delayed_cmds(dev); 1989 } 1990 1991 static void transport_complete_qf(struct se_cmd *cmd) 1992 { 1993 int ret = 0; 1994 1995 transport_complete_task_attr(cmd); 1996 1997 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1998 trace_target_cmd_complete(cmd); 1999 ret = cmd->se_tfo->queue_status(cmd); 2000 goto out; 2001 } 2002 2003 switch (cmd->data_direction) { 2004 case DMA_FROM_DEVICE: 2005 if (cmd->scsi_status) 2006 goto queue_status; 2007 2008 trace_target_cmd_complete(cmd); 2009 ret = cmd->se_tfo->queue_data_in(cmd); 2010 break; 2011 case DMA_TO_DEVICE: 2012 if (cmd->se_cmd_flags & SCF_BIDI) { 2013 ret = cmd->se_tfo->queue_data_in(cmd); 2014 break; 2015 } 2016 /* Fall through for DMA_TO_DEVICE */ 2017 case DMA_NONE: 2018 queue_status: 2019 trace_target_cmd_complete(cmd); 2020 ret = cmd->se_tfo->queue_status(cmd); 2021 break; 2022 default: 2023 break; 2024 } 2025 2026 out: 2027 if (ret < 0) { 2028 transport_handle_queue_full(cmd, cmd->se_dev); 2029 return; 2030 } 2031 transport_lun_remove_cmd(cmd); 2032 transport_cmd_check_stop_to_fabric(cmd); 2033 } 2034 2035 static void transport_handle_queue_full( 2036 struct se_cmd *cmd, 2037 struct se_device *dev) 2038 { 2039 spin_lock_irq(&dev->qf_cmd_lock); 2040 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2041 atomic_inc_mb(&dev->dev_qf_count); 2042 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2043 2044 schedule_work(&cmd->se_dev->qf_work_queue); 2045 } 2046 2047 static bool target_read_prot_action(struct se_cmd *cmd) 2048 { 2049 switch (cmd->prot_op) { 2050 case TARGET_PROT_DIN_STRIP: 2051 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2052 u32 sectors = cmd->data_length >> 2053 ilog2(cmd->se_dev->dev_attrib.block_size); 2054 2055 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2056 sectors, 0, cmd->t_prot_sg, 2057 0); 2058 if (cmd->pi_err) 2059 return true; 2060 } 2061 break; 2062 case TARGET_PROT_DIN_INSERT: 2063 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2064 break; 2065 2066 sbc_dif_generate(cmd); 2067 break; 2068 default: 2069 break; 2070 } 2071 2072 return false; 2073 } 2074 2075 static void target_complete_ok_work(struct work_struct *work) 2076 { 2077 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2078 int ret; 2079 2080 /* 2081 * Check if we need to move delayed/dormant tasks from cmds on the 2082 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2083 * Attribute. 2084 */ 2085 transport_complete_task_attr(cmd); 2086 2087 /* 2088 * Check to schedule QUEUE_FULL work, or execute an existing 2089 * cmd->transport_qf_callback() 2090 */ 2091 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2092 schedule_work(&cmd->se_dev->qf_work_queue); 2093 2094 /* 2095 * Check if we need to send a sense buffer from 2096 * the struct se_cmd in question. 2097 */ 2098 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2099 WARN_ON(!cmd->scsi_status); 2100 ret = transport_send_check_condition_and_sense( 2101 cmd, 0, 1); 2102 if (ret == -EAGAIN || ret == -ENOMEM) 2103 goto queue_full; 2104 2105 transport_lun_remove_cmd(cmd); 2106 transport_cmd_check_stop_to_fabric(cmd); 2107 return; 2108 } 2109 /* 2110 * Check for a callback, used by amongst other things 2111 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2112 */ 2113 if (cmd->transport_complete_callback) { 2114 sense_reason_t rc; 2115 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2116 bool zero_dl = !(cmd->data_length); 2117 int post_ret = 0; 2118 2119 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2120 if (!rc && !post_ret) { 2121 if (caw && zero_dl) 2122 goto queue_rsp; 2123 2124 return; 2125 } else if (rc) { 2126 ret = transport_send_check_condition_and_sense(cmd, 2127 rc, 0); 2128 if (ret == -EAGAIN || ret == -ENOMEM) 2129 goto queue_full; 2130 2131 transport_lun_remove_cmd(cmd); 2132 transport_cmd_check_stop_to_fabric(cmd); 2133 return; 2134 } 2135 } 2136 2137 queue_rsp: 2138 switch (cmd->data_direction) { 2139 case DMA_FROM_DEVICE: 2140 if (cmd->scsi_status) 2141 goto queue_status; 2142 2143 atomic_long_add(cmd->data_length, 2144 &cmd->se_lun->lun_stats.tx_data_octets); 2145 /* 2146 * Perform READ_STRIP of PI using software emulation when 2147 * backend had PI enabled, if the transport will not be 2148 * performing hardware READ_STRIP offload. 2149 */ 2150 if (target_read_prot_action(cmd)) { 2151 ret = transport_send_check_condition_and_sense(cmd, 2152 cmd->pi_err, 0); 2153 if (ret == -EAGAIN || ret == -ENOMEM) 2154 goto queue_full; 2155 2156 transport_lun_remove_cmd(cmd); 2157 transport_cmd_check_stop_to_fabric(cmd); 2158 return; 2159 } 2160 2161 trace_target_cmd_complete(cmd); 2162 ret = cmd->se_tfo->queue_data_in(cmd); 2163 if (ret == -EAGAIN || ret == -ENOMEM) 2164 goto queue_full; 2165 break; 2166 case DMA_TO_DEVICE: 2167 atomic_long_add(cmd->data_length, 2168 &cmd->se_lun->lun_stats.rx_data_octets); 2169 /* 2170 * Check if we need to send READ payload for BIDI-COMMAND 2171 */ 2172 if (cmd->se_cmd_flags & SCF_BIDI) { 2173 atomic_long_add(cmd->data_length, 2174 &cmd->se_lun->lun_stats.tx_data_octets); 2175 ret = cmd->se_tfo->queue_data_in(cmd); 2176 if (ret == -EAGAIN || ret == -ENOMEM) 2177 goto queue_full; 2178 break; 2179 } 2180 /* Fall through for DMA_TO_DEVICE */ 2181 case DMA_NONE: 2182 queue_status: 2183 trace_target_cmd_complete(cmd); 2184 ret = cmd->se_tfo->queue_status(cmd); 2185 if (ret == -EAGAIN || ret == -ENOMEM) 2186 goto queue_full; 2187 break; 2188 default: 2189 break; 2190 } 2191 2192 transport_lun_remove_cmd(cmd); 2193 transport_cmd_check_stop_to_fabric(cmd); 2194 return; 2195 2196 queue_full: 2197 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2198 " data_direction: %d\n", cmd, cmd->data_direction); 2199 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2200 transport_handle_queue_full(cmd, cmd->se_dev); 2201 } 2202 2203 void target_free_sgl(struct scatterlist *sgl, int nents) 2204 { 2205 struct scatterlist *sg; 2206 int count; 2207 2208 for_each_sg(sgl, sg, nents, count) 2209 __free_page(sg_page(sg)); 2210 2211 kfree(sgl); 2212 } 2213 EXPORT_SYMBOL(target_free_sgl); 2214 2215 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2216 { 2217 /* 2218 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2219 * emulation, and free + reset pointers if necessary.. 2220 */ 2221 if (!cmd->t_data_sg_orig) 2222 return; 2223 2224 kfree(cmd->t_data_sg); 2225 cmd->t_data_sg = cmd->t_data_sg_orig; 2226 cmd->t_data_sg_orig = NULL; 2227 cmd->t_data_nents = cmd->t_data_nents_orig; 2228 cmd->t_data_nents_orig = 0; 2229 } 2230 2231 static inline void transport_free_pages(struct se_cmd *cmd) 2232 { 2233 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2234 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2235 cmd->t_prot_sg = NULL; 2236 cmd->t_prot_nents = 0; 2237 } 2238 2239 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2240 /* 2241 * Release special case READ buffer payload required for 2242 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2243 */ 2244 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2245 target_free_sgl(cmd->t_bidi_data_sg, 2246 cmd->t_bidi_data_nents); 2247 cmd->t_bidi_data_sg = NULL; 2248 cmd->t_bidi_data_nents = 0; 2249 } 2250 transport_reset_sgl_orig(cmd); 2251 return; 2252 } 2253 transport_reset_sgl_orig(cmd); 2254 2255 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2256 cmd->t_data_sg = NULL; 2257 cmd->t_data_nents = 0; 2258 2259 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2260 cmd->t_bidi_data_sg = NULL; 2261 cmd->t_bidi_data_nents = 0; 2262 } 2263 2264 /** 2265 * transport_put_cmd - release a reference to a command 2266 * @cmd: command to release 2267 * 2268 * This routine releases our reference to the command and frees it if possible. 2269 */ 2270 static int transport_put_cmd(struct se_cmd *cmd) 2271 { 2272 BUG_ON(!cmd->se_tfo); 2273 /* 2274 * If this cmd has been setup with target_get_sess_cmd(), drop 2275 * the kref and call ->release_cmd() in kref callback. 2276 */ 2277 return target_put_sess_cmd(cmd); 2278 } 2279 2280 void *transport_kmap_data_sg(struct se_cmd *cmd) 2281 { 2282 struct scatterlist *sg = cmd->t_data_sg; 2283 struct page **pages; 2284 int i; 2285 2286 /* 2287 * We need to take into account a possible offset here for fabrics like 2288 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2289 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2290 */ 2291 if (!cmd->t_data_nents) 2292 return NULL; 2293 2294 BUG_ON(!sg); 2295 if (cmd->t_data_nents == 1) 2296 return kmap(sg_page(sg)) + sg->offset; 2297 2298 /* >1 page. use vmap */ 2299 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2300 if (!pages) 2301 return NULL; 2302 2303 /* convert sg[] to pages[] */ 2304 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2305 pages[i] = sg_page(sg); 2306 } 2307 2308 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2309 kfree(pages); 2310 if (!cmd->t_data_vmap) 2311 return NULL; 2312 2313 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2314 } 2315 EXPORT_SYMBOL(transport_kmap_data_sg); 2316 2317 void transport_kunmap_data_sg(struct se_cmd *cmd) 2318 { 2319 if (!cmd->t_data_nents) { 2320 return; 2321 } else if (cmd->t_data_nents == 1) { 2322 kunmap(sg_page(cmd->t_data_sg)); 2323 return; 2324 } 2325 2326 vunmap(cmd->t_data_vmap); 2327 cmd->t_data_vmap = NULL; 2328 } 2329 EXPORT_SYMBOL(transport_kunmap_data_sg); 2330 2331 int 2332 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2333 bool zero_page, bool chainable) 2334 { 2335 struct scatterlist *sg; 2336 struct page *page; 2337 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2338 unsigned int nalloc, nent; 2339 int i = 0; 2340 2341 nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE); 2342 if (chainable) 2343 nalloc++; 2344 sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL); 2345 if (!sg) 2346 return -ENOMEM; 2347 2348 sg_init_table(sg, nalloc); 2349 2350 while (length) { 2351 u32 page_len = min_t(u32, length, PAGE_SIZE); 2352 page = alloc_page(GFP_KERNEL | zero_flag); 2353 if (!page) 2354 goto out; 2355 2356 sg_set_page(&sg[i], page, page_len, 0); 2357 length -= page_len; 2358 i++; 2359 } 2360 *sgl = sg; 2361 *nents = nent; 2362 return 0; 2363 2364 out: 2365 while (i > 0) { 2366 i--; 2367 __free_page(sg_page(&sg[i])); 2368 } 2369 kfree(sg); 2370 return -ENOMEM; 2371 } 2372 EXPORT_SYMBOL(target_alloc_sgl); 2373 2374 /* 2375 * Allocate any required resources to execute the command. For writes we 2376 * might not have the payload yet, so notify the fabric via a call to 2377 * ->write_pending instead. Otherwise place it on the execution queue. 2378 */ 2379 sense_reason_t 2380 transport_generic_new_cmd(struct se_cmd *cmd) 2381 { 2382 int ret = 0; 2383 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2384 2385 if (cmd->prot_op != TARGET_PROT_NORMAL && 2386 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2387 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2388 cmd->prot_length, true, false); 2389 if (ret < 0) 2390 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2391 } 2392 2393 /* 2394 * Determine is the TCM fabric module has already allocated physical 2395 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2396 * beforehand. 2397 */ 2398 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2399 cmd->data_length) { 2400 2401 if ((cmd->se_cmd_flags & SCF_BIDI) || 2402 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2403 u32 bidi_length; 2404 2405 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2406 bidi_length = cmd->t_task_nolb * 2407 cmd->se_dev->dev_attrib.block_size; 2408 else 2409 bidi_length = cmd->data_length; 2410 2411 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2412 &cmd->t_bidi_data_nents, 2413 bidi_length, zero_flag, false); 2414 if (ret < 0) 2415 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2416 } 2417 2418 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2419 cmd->data_length, zero_flag, false); 2420 if (ret < 0) 2421 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2422 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2423 cmd->data_length) { 2424 /* 2425 * Special case for COMPARE_AND_WRITE with fabrics 2426 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2427 */ 2428 u32 caw_length = cmd->t_task_nolb * 2429 cmd->se_dev->dev_attrib.block_size; 2430 2431 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2432 &cmd->t_bidi_data_nents, 2433 caw_length, zero_flag, false); 2434 if (ret < 0) 2435 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2436 } 2437 /* 2438 * If this command is not a write we can execute it right here, 2439 * for write buffers we need to notify the fabric driver first 2440 * and let it call back once the write buffers are ready. 2441 */ 2442 target_add_to_state_list(cmd); 2443 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2444 target_execute_cmd(cmd); 2445 return 0; 2446 } 2447 transport_cmd_check_stop(cmd, false, true); 2448 2449 ret = cmd->se_tfo->write_pending(cmd); 2450 if (ret == -EAGAIN || ret == -ENOMEM) 2451 goto queue_full; 2452 2453 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2454 WARN_ON(ret); 2455 2456 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2457 2458 queue_full: 2459 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2460 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2461 transport_handle_queue_full(cmd, cmd->se_dev); 2462 return 0; 2463 } 2464 EXPORT_SYMBOL(transport_generic_new_cmd); 2465 2466 static void transport_write_pending_qf(struct se_cmd *cmd) 2467 { 2468 int ret; 2469 2470 ret = cmd->se_tfo->write_pending(cmd); 2471 if (ret == -EAGAIN || ret == -ENOMEM) { 2472 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2473 cmd); 2474 transport_handle_queue_full(cmd, cmd->se_dev); 2475 } 2476 } 2477 2478 static bool 2479 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2480 unsigned long *flags); 2481 2482 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2483 { 2484 unsigned long flags; 2485 2486 spin_lock_irqsave(&cmd->t_state_lock, flags); 2487 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2488 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2489 } 2490 2491 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2492 { 2493 int ret = 0; 2494 bool aborted = false, tas = false; 2495 2496 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2497 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2498 target_wait_free_cmd(cmd, &aborted, &tas); 2499 2500 if (!aborted || tas) 2501 ret = transport_put_cmd(cmd); 2502 } else { 2503 if (wait_for_tasks) 2504 target_wait_free_cmd(cmd, &aborted, &tas); 2505 /* 2506 * Handle WRITE failure case where transport_generic_new_cmd() 2507 * has already added se_cmd to state_list, but fabric has 2508 * failed command before I/O submission. 2509 */ 2510 if (cmd->state_active) 2511 target_remove_from_state_list(cmd); 2512 2513 if (cmd->se_lun) 2514 transport_lun_remove_cmd(cmd); 2515 2516 if (!aborted || tas) 2517 ret = transport_put_cmd(cmd); 2518 } 2519 /* 2520 * If the task has been internally aborted due to TMR ABORT_TASK 2521 * or LUN_RESET, target_core_tmr.c is responsible for performing 2522 * the remaining calls to target_put_sess_cmd(), and not the 2523 * callers of this function. 2524 */ 2525 if (aborted) { 2526 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2527 wait_for_completion(&cmd->cmd_wait_comp); 2528 cmd->se_tfo->release_cmd(cmd); 2529 ret = 1; 2530 } 2531 return ret; 2532 } 2533 EXPORT_SYMBOL(transport_generic_free_cmd); 2534 2535 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2536 * @se_cmd: command descriptor to add 2537 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2538 */ 2539 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2540 { 2541 struct se_session *se_sess = se_cmd->se_sess; 2542 unsigned long flags; 2543 int ret = 0; 2544 2545 /* 2546 * Add a second kref if the fabric caller is expecting to handle 2547 * fabric acknowledgement that requires two target_put_sess_cmd() 2548 * invocations before se_cmd descriptor release. 2549 */ 2550 if (ack_kref) 2551 kref_get(&se_cmd->cmd_kref); 2552 2553 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2554 if (se_sess->sess_tearing_down) { 2555 ret = -ESHUTDOWN; 2556 goto out; 2557 } 2558 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2559 out: 2560 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2561 2562 if (ret && ack_kref) 2563 target_put_sess_cmd(se_cmd); 2564 2565 return ret; 2566 } 2567 EXPORT_SYMBOL(target_get_sess_cmd); 2568 2569 static void target_free_cmd_mem(struct se_cmd *cmd) 2570 { 2571 transport_free_pages(cmd); 2572 2573 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2574 core_tmr_release_req(cmd->se_tmr_req); 2575 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2576 kfree(cmd->t_task_cdb); 2577 } 2578 2579 static void target_release_cmd_kref(struct kref *kref) 2580 { 2581 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2582 struct se_session *se_sess = se_cmd->se_sess; 2583 unsigned long flags; 2584 bool fabric_stop; 2585 2586 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2587 2588 spin_lock(&se_cmd->t_state_lock); 2589 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && 2590 (se_cmd->transport_state & CMD_T_ABORTED); 2591 spin_unlock(&se_cmd->t_state_lock); 2592 2593 if (se_cmd->cmd_wait_set || fabric_stop) { 2594 list_del_init(&se_cmd->se_cmd_list); 2595 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2596 target_free_cmd_mem(se_cmd); 2597 complete(&se_cmd->cmd_wait_comp); 2598 return; 2599 } 2600 list_del_init(&se_cmd->se_cmd_list); 2601 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2602 2603 target_free_cmd_mem(se_cmd); 2604 se_cmd->se_tfo->release_cmd(se_cmd); 2605 } 2606 2607 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2608 * @se_cmd: command descriptor to drop 2609 */ 2610 int target_put_sess_cmd(struct se_cmd *se_cmd) 2611 { 2612 struct se_session *se_sess = se_cmd->se_sess; 2613 2614 if (!se_sess) { 2615 target_free_cmd_mem(se_cmd); 2616 se_cmd->se_tfo->release_cmd(se_cmd); 2617 return 1; 2618 } 2619 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2620 } 2621 EXPORT_SYMBOL(target_put_sess_cmd); 2622 2623 /* target_sess_cmd_list_set_waiting - Flag all commands in 2624 * sess_cmd_list to complete cmd_wait_comp. Set 2625 * sess_tearing_down so no more commands are queued. 2626 * @se_sess: session to flag 2627 */ 2628 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2629 { 2630 struct se_cmd *se_cmd; 2631 unsigned long flags; 2632 int rc; 2633 2634 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2635 if (se_sess->sess_tearing_down) { 2636 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2637 return; 2638 } 2639 se_sess->sess_tearing_down = 1; 2640 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2641 2642 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { 2643 rc = kref_get_unless_zero(&se_cmd->cmd_kref); 2644 if (rc) { 2645 se_cmd->cmd_wait_set = 1; 2646 spin_lock(&se_cmd->t_state_lock); 2647 se_cmd->transport_state |= CMD_T_FABRIC_STOP; 2648 spin_unlock(&se_cmd->t_state_lock); 2649 } 2650 } 2651 2652 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2653 } 2654 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2655 2656 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2657 * @se_sess: session to wait for active I/O 2658 */ 2659 void target_wait_for_sess_cmds(struct se_session *se_sess) 2660 { 2661 struct se_cmd *se_cmd, *tmp_cmd; 2662 unsigned long flags; 2663 bool tas; 2664 2665 list_for_each_entry_safe(se_cmd, tmp_cmd, 2666 &se_sess->sess_wait_list, se_cmd_list) { 2667 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2668 " %d\n", se_cmd, se_cmd->t_state, 2669 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2670 2671 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 2672 tas = (se_cmd->transport_state & CMD_T_TAS); 2673 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 2674 2675 if (!target_put_sess_cmd(se_cmd)) { 2676 if (tas) 2677 target_put_sess_cmd(se_cmd); 2678 } 2679 2680 wait_for_completion(&se_cmd->cmd_wait_comp); 2681 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2682 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2683 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2684 2685 se_cmd->se_tfo->release_cmd(se_cmd); 2686 } 2687 2688 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2689 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2690 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2691 2692 } 2693 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2694 2695 void transport_clear_lun_ref(struct se_lun *lun) 2696 { 2697 percpu_ref_kill(&lun->lun_ref); 2698 wait_for_completion(&lun->lun_ref_comp); 2699 } 2700 2701 static bool 2702 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2703 bool *aborted, bool *tas, unsigned long *flags) 2704 __releases(&cmd->t_state_lock) 2705 __acquires(&cmd->t_state_lock) 2706 { 2707 2708 assert_spin_locked(&cmd->t_state_lock); 2709 WARN_ON_ONCE(!irqs_disabled()); 2710 2711 if (fabric_stop) 2712 cmd->transport_state |= CMD_T_FABRIC_STOP; 2713 2714 if (cmd->transport_state & CMD_T_ABORTED) 2715 *aborted = true; 2716 2717 if (cmd->transport_state & CMD_T_TAS) 2718 *tas = true; 2719 2720 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2721 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2722 return false; 2723 2724 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2725 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2726 return false; 2727 2728 if (!(cmd->transport_state & CMD_T_ACTIVE)) 2729 return false; 2730 2731 if (fabric_stop && *aborted) 2732 return false; 2733 2734 cmd->transport_state |= CMD_T_STOP; 2735 2736 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," 2737 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, 2738 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2739 2740 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2741 2742 wait_for_completion(&cmd->t_transport_stop_comp); 2743 2744 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2745 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2746 2747 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 2748 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 2749 2750 return true; 2751 } 2752 2753 /** 2754 * transport_wait_for_tasks - wait for completion to occur 2755 * @cmd: command to wait 2756 * 2757 * Called from frontend fabric context to wait for storage engine 2758 * to pause and/or release frontend generated struct se_cmd. 2759 */ 2760 bool transport_wait_for_tasks(struct se_cmd *cmd) 2761 { 2762 unsigned long flags; 2763 bool ret, aborted = false, tas = false; 2764 2765 spin_lock_irqsave(&cmd->t_state_lock, flags); 2766 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 2767 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2768 2769 return ret; 2770 } 2771 EXPORT_SYMBOL(transport_wait_for_tasks); 2772 2773 struct sense_info { 2774 u8 key; 2775 u8 asc; 2776 u8 ascq; 2777 bool add_sector_info; 2778 }; 2779 2780 static const struct sense_info sense_info_table[] = { 2781 [TCM_NO_SENSE] = { 2782 .key = NOT_READY 2783 }, 2784 [TCM_NON_EXISTENT_LUN] = { 2785 .key = ILLEGAL_REQUEST, 2786 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 2787 }, 2788 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 2789 .key = ILLEGAL_REQUEST, 2790 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2791 }, 2792 [TCM_SECTOR_COUNT_TOO_MANY] = { 2793 .key = ILLEGAL_REQUEST, 2794 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2795 }, 2796 [TCM_UNKNOWN_MODE_PAGE] = { 2797 .key = ILLEGAL_REQUEST, 2798 .asc = 0x24, /* INVALID FIELD IN CDB */ 2799 }, 2800 [TCM_CHECK_CONDITION_ABORT_CMD] = { 2801 .key = ABORTED_COMMAND, 2802 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 2803 .ascq = 0x03, 2804 }, 2805 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 2806 .key = ABORTED_COMMAND, 2807 .asc = 0x0c, /* WRITE ERROR */ 2808 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 2809 }, 2810 [TCM_INVALID_CDB_FIELD] = { 2811 .key = ILLEGAL_REQUEST, 2812 .asc = 0x24, /* INVALID FIELD IN CDB */ 2813 }, 2814 [TCM_INVALID_PARAMETER_LIST] = { 2815 .key = ILLEGAL_REQUEST, 2816 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2817 }, 2818 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2819 .key = ILLEGAL_REQUEST, 2820 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2821 }, 2822 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 2823 .key = ILLEGAL_REQUEST, 2824 .asc = 0x0c, /* WRITE ERROR */ 2825 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 2826 }, 2827 [TCM_SERVICE_CRC_ERROR] = { 2828 .key = ABORTED_COMMAND, 2829 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 2830 .ascq = 0x05, /* N/A */ 2831 }, 2832 [TCM_SNACK_REJECTED] = { 2833 .key = ABORTED_COMMAND, 2834 .asc = 0x11, /* READ ERROR */ 2835 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 2836 }, 2837 [TCM_WRITE_PROTECTED] = { 2838 .key = DATA_PROTECT, 2839 .asc = 0x27, /* WRITE PROTECTED */ 2840 }, 2841 [TCM_ADDRESS_OUT_OF_RANGE] = { 2842 .key = ILLEGAL_REQUEST, 2843 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2844 }, 2845 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 2846 .key = UNIT_ATTENTION, 2847 }, 2848 [TCM_CHECK_CONDITION_NOT_READY] = { 2849 .key = NOT_READY, 2850 }, 2851 [TCM_MISCOMPARE_VERIFY] = { 2852 .key = MISCOMPARE, 2853 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 2854 .ascq = 0x00, 2855 }, 2856 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 2857 .key = ABORTED_COMMAND, 2858 .asc = 0x10, 2859 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 2860 .add_sector_info = true, 2861 }, 2862 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 2863 .key = ABORTED_COMMAND, 2864 .asc = 0x10, 2865 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2866 .add_sector_info = true, 2867 }, 2868 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 2869 .key = ABORTED_COMMAND, 2870 .asc = 0x10, 2871 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2872 .add_sector_info = true, 2873 }, 2874 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2875 /* 2876 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2877 * Solaris initiators. Returning NOT READY instead means the 2878 * operations will be retried a finite number of times and we 2879 * can survive intermittent errors. 2880 */ 2881 .key = NOT_READY, 2882 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 2883 }, 2884 }; 2885 2886 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 2887 { 2888 const struct sense_info *si; 2889 u8 *buffer = cmd->sense_buffer; 2890 int r = (__force int)reason; 2891 u8 asc, ascq; 2892 bool desc_format = target_sense_desc_format(cmd->se_dev); 2893 2894 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 2895 si = &sense_info_table[r]; 2896 else 2897 si = &sense_info_table[(__force int) 2898 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 2899 2900 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 2901 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2902 WARN_ON_ONCE(asc == 0); 2903 } else if (si->asc == 0) { 2904 WARN_ON_ONCE(cmd->scsi_asc == 0); 2905 asc = cmd->scsi_asc; 2906 ascq = cmd->scsi_ascq; 2907 } else { 2908 asc = si->asc; 2909 ascq = si->ascq; 2910 } 2911 2912 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); 2913 if (si->add_sector_info) 2914 return scsi_set_sense_information(buffer, 2915 cmd->scsi_sense_length, 2916 cmd->bad_sector); 2917 2918 return 0; 2919 } 2920 2921 int 2922 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2923 sense_reason_t reason, int from_transport) 2924 { 2925 unsigned long flags; 2926 2927 spin_lock_irqsave(&cmd->t_state_lock, flags); 2928 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2929 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2930 return 0; 2931 } 2932 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2933 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2934 2935 if (!from_transport) { 2936 int rc; 2937 2938 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2939 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2940 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2941 rc = translate_sense_reason(cmd, reason); 2942 if (rc) 2943 return rc; 2944 } 2945 2946 trace_target_cmd_complete(cmd); 2947 return cmd->se_tfo->queue_status(cmd); 2948 } 2949 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2950 2951 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2952 __releases(&cmd->t_state_lock) 2953 __acquires(&cmd->t_state_lock) 2954 { 2955 assert_spin_locked(&cmd->t_state_lock); 2956 WARN_ON_ONCE(!irqs_disabled()); 2957 2958 if (!(cmd->transport_state & CMD_T_ABORTED)) 2959 return 0; 2960 /* 2961 * If cmd has been aborted but either no status is to be sent or it has 2962 * already been sent, just return 2963 */ 2964 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { 2965 if (send_status) 2966 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2967 return 1; 2968 } 2969 2970 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" 2971 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); 2972 2973 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2974 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2975 trace_target_cmd_complete(cmd); 2976 2977 spin_unlock_irq(&cmd->t_state_lock); 2978 cmd->se_tfo->queue_status(cmd); 2979 spin_lock_irq(&cmd->t_state_lock); 2980 2981 return 1; 2982 } 2983 2984 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2985 { 2986 int ret; 2987 2988 spin_lock_irq(&cmd->t_state_lock); 2989 ret = __transport_check_aborted_status(cmd, send_status); 2990 spin_unlock_irq(&cmd->t_state_lock); 2991 2992 return ret; 2993 } 2994 EXPORT_SYMBOL(transport_check_aborted_status); 2995 2996 void transport_send_task_abort(struct se_cmd *cmd) 2997 { 2998 unsigned long flags; 2999 3000 spin_lock_irqsave(&cmd->t_state_lock, flags); 3001 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3002 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3003 return; 3004 } 3005 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3006 3007 /* 3008 * If there are still expected incoming fabric WRITEs, we wait 3009 * until until they have completed before sending a TASK_ABORTED 3010 * response. This response with TASK_ABORTED status will be 3011 * queued back to fabric module by transport_check_aborted_status(). 3012 */ 3013 if (cmd->data_direction == DMA_TO_DEVICE) { 3014 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 3015 spin_lock_irqsave(&cmd->t_state_lock, flags); 3016 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { 3017 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3018 goto send_abort; 3019 } 3020 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 3021 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3022 return; 3023 } 3024 } 3025 send_abort: 3026 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 3027 3028 transport_lun_remove_cmd(cmd); 3029 3030 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 3031 cmd->t_task_cdb[0], cmd->tag); 3032 3033 trace_target_cmd_complete(cmd); 3034 cmd->se_tfo->queue_status(cmd); 3035 } 3036 3037 static void target_tmr_work(struct work_struct *work) 3038 { 3039 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3040 struct se_device *dev = cmd->se_dev; 3041 struct se_tmr_req *tmr = cmd->se_tmr_req; 3042 unsigned long flags; 3043 int ret; 3044 3045 spin_lock_irqsave(&cmd->t_state_lock, flags); 3046 if (cmd->transport_state & CMD_T_ABORTED) { 3047 tmr->response = TMR_FUNCTION_REJECTED; 3048 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3049 goto check_stop; 3050 } 3051 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3052 3053 switch (tmr->function) { 3054 case TMR_ABORT_TASK: 3055 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3056 break; 3057 case TMR_ABORT_TASK_SET: 3058 case TMR_CLEAR_ACA: 3059 case TMR_CLEAR_TASK_SET: 3060 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3061 break; 3062 case TMR_LUN_RESET: 3063 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3064 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3065 TMR_FUNCTION_REJECTED; 3066 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3067 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3068 cmd->orig_fe_lun, 0x29, 3069 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3070 } 3071 break; 3072 case TMR_TARGET_WARM_RESET: 3073 tmr->response = TMR_FUNCTION_REJECTED; 3074 break; 3075 case TMR_TARGET_COLD_RESET: 3076 tmr->response = TMR_FUNCTION_REJECTED; 3077 break; 3078 default: 3079 pr_err("Uknown TMR function: 0x%02x.\n", 3080 tmr->function); 3081 tmr->response = TMR_FUNCTION_REJECTED; 3082 break; 3083 } 3084 3085 spin_lock_irqsave(&cmd->t_state_lock, flags); 3086 if (cmd->transport_state & CMD_T_ABORTED) { 3087 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3088 goto check_stop; 3089 } 3090 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3091 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3092 3093 cmd->se_tfo->queue_tm_rsp(cmd); 3094 3095 check_stop: 3096 transport_cmd_check_stop_to_fabric(cmd); 3097 } 3098 3099 int transport_generic_handle_tmr( 3100 struct se_cmd *cmd) 3101 { 3102 unsigned long flags; 3103 3104 spin_lock_irqsave(&cmd->t_state_lock, flags); 3105 cmd->transport_state |= CMD_T_ACTIVE; 3106 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3107 3108 INIT_WORK(&cmd->work, target_tmr_work); 3109 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3110 return 0; 3111 } 3112 EXPORT_SYMBOL(transport_generic_handle_tmr); 3113 3114 bool 3115 target_check_wce(struct se_device *dev) 3116 { 3117 bool wce = false; 3118 3119 if (dev->transport->get_write_cache) 3120 wce = dev->transport->get_write_cache(dev); 3121 else if (dev->dev_attrib.emulate_write_cache > 0) 3122 wce = true; 3123 3124 return wce; 3125 } 3126 3127 bool 3128 target_check_fua(struct se_device *dev) 3129 { 3130 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3131 } 3132