1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_transport.c 4 * 5 * This file contains the Generic Target Engine Core. 6 * 7 * (c) Copyright 2002-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/net.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/timer.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/kthread.h> 20 #include <linux/in.h> 21 #include <linux/cdrom.h> 22 #include <linux/module.h> 23 #include <linux/ratelimit.h> 24 #include <linux/vmalloc.h> 25 #include <asm/unaligned.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_common.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/target.h> 42 43 static struct workqueue_struct *target_completion_wq; 44 static struct workqueue_struct *target_submission_wq; 45 static struct kmem_cache *se_sess_cache; 46 struct kmem_cache *se_ua_cache; 47 struct kmem_cache *t10_pr_reg_cache; 48 struct kmem_cache *t10_alua_lu_gp_cache; 49 struct kmem_cache *t10_alua_lu_gp_mem_cache; 50 struct kmem_cache *t10_alua_tg_pt_gp_cache; 51 struct kmem_cache *t10_alua_lba_map_cache; 52 struct kmem_cache *t10_alua_lba_map_mem_cache; 53 54 static void transport_complete_task_attr(struct se_cmd *cmd); 55 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 56 static void transport_handle_queue_full(struct se_cmd *cmd, 57 struct se_device *dev, int err, bool write_pending); 58 static void target_complete_ok_work(struct work_struct *work); 59 60 int init_se_kmem_caches(void) 61 { 62 se_sess_cache = kmem_cache_create("se_sess_cache", 63 sizeof(struct se_session), __alignof__(struct se_session), 64 0, NULL); 65 if (!se_sess_cache) { 66 pr_err("kmem_cache_create() for struct se_session" 67 " failed\n"); 68 goto out; 69 } 70 se_ua_cache = kmem_cache_create("se_ua_cache", 71 sizeof(struct se_ua), __alignof__(struct se_ua), 72 0, NULL); 73 if (!se_ua_cache) { 74 pr_err("kmem_cache_create() for struct se_ua failed\n"); 75 goto out_free_sess_cache; 76 } 77 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 78 sizeof(struct t10_pr_registration), 79 __alignof__(struct t10_pr_registration), 0, NULL); 80 if (!t10_pr_reg_cache) { 81 pr_err("kmem_cache_create() for struct t10_pr_registration" 82 " failed\n"); 83 goto out_free_ua_cache; 84 } 85 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 86 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 87 0, NULL); 88 if (!t10_alua_lu_gp_cache) { 89 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 90 " failed\n"); 91 goto out_free_pr_reg_cache; 92 } 93 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 94 sizeof(struct t10_alua_lu_gp_member), 95 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 96 if (!t10_alua_lu_gp_mem_cache) { 97 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 98 "cache failed\n"); 99 goto out_free_lu_gp_cache; 100 } 101 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 102 sizeof(struct t10_alua_tg_pt_gp), 103 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 104 if (!t10_alua_tg_pt_gp_cache) { 105 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 106 "cache failed\n"); 107 goto out_free_lu_gp_mem_cache; 108 } 109 t10_alua_lba_map_cache = kmem_cache_create( 110 "t10_alua_lba_map_cache", 111 sizeof(struct t10_alua_lba_map), 112 __alignof__(struct t10_alua_lba_map), 0, NULL); 113 if (!t10_alua_lba_map_cache) { 114 pr_err("kmem_cache_create() for t10_alua_lba_map_" 115 "cache failed\n"); 116 goto out_free_tg_pt_gp_cache; 117 } 118 t10_alua_lba_map_mem_cache = kmem_cache_create( 119 "t10_alua_lba_map_mem_cache", 120 sizeof(struct t10_alua_lba_map_member), 121 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 122 if (!t10_alua_lba_map_mem_cache) { 123 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 124 "cache failed\n"); 125 goto out_free_lba_map_cache; 126 } 127 128 target_completion_wq = alloc_workqueue("target_completion", 129 WQ_MEM_RECLAIM, 0); 130 if (!target_completion_wq) 131 goto out_free_lba_map_mem_cache; 132 133 target_submission_wq = alloc_workqueue("target_submission", 134 WQ_MEM_RECLAIM, 0); 135 if (!target_submission_wq) 136 goto out_free_completion_wq; 137 138 return 0; 139 140 out_free_completion_wq: 141 destroy_workqueue(target_completion_wq); 142 out_free_lba_map_mem_cache: 143 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 144 out_free_lba_map_cache: 145 kmem_cache_destroy(t10_alua_lba_map_cache); 146 out_free_tg_pt_gp_cache: 147 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 148 out_free_lu_gp_mem_cache: 149 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 150 out_free_lu_gp_cache: 151 kmem_cache_destroy(t10_alua_lu_gp_cache); 152 out_free_pr_reg_cache: 153 kmem_cache_destroy(t10_pr_reg_cache); 154 out_free_ua_cache: 155 kmem_cache_destroy(se_ua_cache); 156 out_free_sess_cache: 157 kmem_cache_destroy(se_sess_cache); 158 out: 159 return -ENOMEM; 160 } 161 162 void release_se_kmem_caches(void) 163 { 164 destroy_workqueue(target_submission_wq); 165 destroy_workqueue(target_completion_wq); 166 kmem_cache_destroy(se_sess_cache); 167 kmem_cache_destroy(se_ua_cache); 168 kmem_cache_destroy(t10_pr_reg_cache); 169 kmem_cache_destroy(t10_alua_lu_gp_cache); 170 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 171 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 172 kmem_cache_destroy(t10_alua_lba_map_cache); 173 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 174 } 175 176 /* This code ensures unique mib indexes are handed out. */ 177 static DEFINE_SPINLOCK(scsi_mib_index_lock); 178 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 179 180 /* 181 * Allocate a new row index for the entry type specified 182 */ 183 u32 scsi_get_new_index(scsi_index_t type) 184 { 185 u32 new_index; 186 187 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 188 189 spin_lock(&scsi_mib_index_lock); 190 new_index = ++scsi_mib_index[type]; 191 spin_unlock(&scsi_mib_index_lock); 192 193 return new_index; 194 } 195 196 void transport_subsystem_check_init(void) 197 { 198 int ret; 199 static int sub_api_initialized; 200 201 if (sub_api_initialized) 202 return; 203 204 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 205 if (ret != 0) 206 pr_err("Unable to load target_core_iblock\n"); 207 208 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_file\n"); 211 212 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_pscsi\n"); 215 216 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_user\n"); 219 220 sub_api_initialized = 1; 221 } 222 223 static void target_release_cmd_refcnt(struct percpu_ref *ref) 224 { 225 struct target_cmd_counter *cmd_cnt = container_of(ref, 226 typeof(*cmd_cnt), 227 refcnt); 228 wake_up(&cmd_cnt->refcnt_wq); 229 } 230 231 struct target_cmd_counter *target_alloc_cmd_counter(void) 232 { 233 struct target_cmd_counter *cmd_cnt; 234 int rc; 235 236 cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL); 237 if (!cmd_cnt) 238 return NULL; 239 240 init_completion(&cmd_cnt->stop_done); 241 init_waitqueue_head(&cmd_cnt->refcnt_wq); 242 atomic_set(&cmd_cnt->stopped, 0); 243 244 rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0, 245 GFP_KERNEL); 246 if (rc) 247 goto free_cmd_cnt; 248 249 return cmd_cnt; 250 251 free_cmd_cnt: 252 kfree(cmd_cnt); 253 return NULL; 254 } 255 EXPORT_SYMBOL_GPL(target_alloc_cmd_counter); 256 257 void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt) 258 { 259 /* 260 * Drivers like loop do not call target_stop_session during session 261 * shutdown so we have to drop the ref taken at init time here. 262 */ 263 if (!atomic_read(&cmd_cnt->stopped)) 264 percpu_ref_put(&cmd_cnt->refcnt); 265 266 percpu_ref_exit(&cmd_cnt->refcnt); 267 } 268 EXPORT_SYMBOL_GPL(target_free_cmd_counter); 269 270 /** 271 * transport_init_session - initialize a session object 272 * @se_sess: Session object pointer. 273 * 274 * The caller must have zero-initialized @se_sess before calling this function. 275 */ 276 void transport_init_session(struct se_session *se_sess) 277 { 278 INIT_LIST_HEAD(&se_sess->sess_list); 279 INIT_LIST_HEAD(&se_sess->sess_acl_list); 280 spin_lock_init(&se_sess->sess_cmd_lock); 281 } 282 EXPORT_SYMBOL(transport_init_session); 283 284 /** 285 * transport_alloc_session - allocate a session object and initialize it 286 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 287 */ 288 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 289 { 290 struct se_session *se_sess; 291 292 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 293 if (!se_sess) { 294 pr_err("Unable to allocate struct se_session from" 295 " se_sess_cache\n"); 296 return ERR_PTR(-ENOMEM); 297 } 298 transport_init_session(se_sess); 299 se_sess->sup_prot_ops = sup_prot_ops; 300 301 return se_sess; 302 } 303 EXPORT_SYMBOL(transport_alloc_session); 304 305 /** 306 * transport_alloc_session_tags - allocate target driver private data 307 * @se_sess: Session pointer. 308 * @tag_num: Maximum number of in-flight commands between initiator and target. 309 * @tag_size: Size in bytes of the private data a target driver associates with 310 * each command. 311 */ 312 int transport_alloc_session_tags(struct se_session *se_sess, 313 unsigned int tag_num, unsigned int tag_size) 314 { 315 int rc; 316 317 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 318 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 319 if (!se_sess->sess_cmd_map) { 320 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 321 return -ENOMEM; 322 } 323 324 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 325 false, GFP_KERNEL, NUMA_NO_NODE); 326 if (rc < 0) { 327 pr_err("Unable to init se_sess->sess_tag_pool," 328 " tag_num: %u\n", tag_num); 329 kvfree(se_sess->sess_cmd_map); 330 se_sess->sess_cmd_map = NULL; 331 return -ENOMEM; 332 } 333 334 return 0; 335 } 336 EXPORT_SYMBOL(transport_alloc_session_tags); 337 338 /** 339 * transport_init_session_tags - allocate a session and target driver private data 340 * @tag_num: Maximum number of in-flight commands between initiator and target. 341 * @tag_size: Size in bytes of the private data a target driver associates with 342 * each command. 343 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 344 */ 345 static struct se_session * 346 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 347 enum target_prot_op sup_prot_ops) 348 { 349 struct se_session *se_sess; 350 int rc; 351 352 if (tag_num != 0 && !tag_size) { 353 pr_err("init_session_tags called with percpu-ida tag_num:" 354 " %u, but zero tag_size\n", tag_num); 355 return ERR_PTR(-EINVAL); 356 } 357 if (!tag_num && tag_size) { 358 pr_err("init_session_tags called with percpu-ida tag_size:" 359 " %u, but zero tag_num\n", tag_size); 360 return ERR_PTR(-EINVAL); 361 } 362 363 se_sess = transport_alloc_session(sup_prot_ops); 364 if (IS_ERR(se_sess)) 365 return se_sess; 366 367 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 368 if (rc < 0) { 369 transport_free_session(se_sess); 370 return ERR_PTR(-ENOMEM); 371 } 372 373 return se_sess; 374 } 375 376 /* 377 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 378 */ 379 void __transport_register_session( 380 struct se_portal_group *se_tpg, 381 struct se_node_acl *se_nacl, 382 struct se_session *se_sess, 383 void *fabric_sess_ptr) 384 { 385 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 386 unsigned char buf[PR_REG_ISID_LEN]; 387 unsigned long flags; 388 389 se_sess->se_tpg = se_tpg; 390 se_sess->fabric_sess_ptr = fabric_sess_ptr; 391 /* 392 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 393 * 394 * Only set for struct se_session's that will actually be moving I/O. 395 * eg: *NOT* discovery sessions. 396 */ 397 if (se_nacl) { 398 /* 399 * 400 * Determine if fabric allows for T10-PI feature bits exposed to 401 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 402 * 403 * If so, then always save prot_type on a per se_node_acl node 404 * basis and re-instate the previous sess_prot_type to avoid 405 * disabling PI from below any previously initiator side 406 * registered LUNs. 407 */ 408 if (se_nacl->saved_prot_type) 409 se_sess->sess_prot_type = se_nacl->saved_prot_type; 410 else if (tfo->tpg_check_prot_fabric_only) 411 se_sess->sess_prot_type = se_nacl->saved_prot_type = 412 tfo->tpg_check_prot_fabric_only(se_tpg); 413 /* 414 * If the fabric module supports an ISID based TransportID, 415 * save this value in binary from the fabric I_T Nexus now. 416 */ 417 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 418 memset(&buf[0], 0, PR_REG_ISID_LEN); 419 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 420 &buf[0], PR_REG_ISID_LEN); 421 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 422 } 423 424 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 425 /* 426 * The se_nacl->nacl_sess pointer will be set to the 427 * last active I_T Nexus for each struct se_node_acl. 428 */ 429 se_nacl->nacl_sess = se_sess; 430 431 list_add_tail(&se_sess->sess_acl_list, 432 &se_nacl->acl_sess_list); 433 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 434 } 435 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 436 437 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 438 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 439 } 440 EXPORT_SYMBOL(__transport_register_session); 441 442 void transport_register_session( 443 struct se_portal_group *se_tpg, 444 struct se_node_acl *se_nacl, 445 struct se_session *se_sess, 446 void *fabric_sess_ptr) 447 { 448 unsigned long flags; 449 450 spin_lock_irqsave(&se_tpg->session_lock, flags); 451 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 452 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 453 } 454 EXPORT_SYMBOL(transport_register_session); 455 456 struct se_session * 457 target_setup_session(struct se_portal_group *tpg, 458 unsigned int tag_num, unsigned int tag_size, 459 enum target_prot_op prot_op, 460 const char *initiatorname, void *private, 461 int (*callback)(struct se_portal_group *, 462 struct se_session *, void *)) 463 { 464 struct target_cmd_counter *cmd_cnt; 465 struct se_session *sess; 466 int rc; 467 468 cmd_cnt = target_alloc_cmd_counter(); 469 if (!cmd_cnt) 470 return ERR_PTR(-ENOMEM); 471 /* 472 * If the fabric driver is using percpu-ida based pre allocation 473 * of I/O descriptor tags, go ahead and perform that setup now.. 474 */ 475 if (tag_num != 0) 476 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 477 else 478 sess = transport_alloc_session(prot_op); 479 480 if (IS_ERR(sess)) { 481 rc = PTR_ERR(sess); 482 goto free_cnt; 483 } 484 sess->cmd_cnt = cmd_cnt; 485 486 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 487 (unsigned char *)initiatorname); 488 if (!sess->se_node_acl) { 489 rc = -EACCES; 490 goto free_sess; 491 } 492 /* 493 * Go ahead and perform any remaining fabric setup that is 494 * required before transport_register_session(). 495 */ 496 if (callback != NULL) { 497 rc = callback(tpg, sess, private); 498 if (rc) 499 goto free_sess; 500 } 501 502 transport_register_session(tpg, sess->se_node_acl, sess, private); 503 return sess; 504 505 free_sess: 506 transport_free_session(sess); 507 free_cnt: 508 target_free_cmd_counter(cmd_cnt); 509 return ERR_PTR(rc); 510 } 511 EXPORT_SYMBOL(target_setup_session); 512 513 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 514 { 515 struct se_session *se_sess; 516 ssize_t len = 0; 517 518 spin_lock_bh(&se_tpg->session_lock); 519 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 520 if (!se_sess->se_node_acl) 521 continue; 522 if (!se_sess->se_node_acl->dynamic_node_acl) 523 continue; 524 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 525 break; 526 527 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 528 se_sess->se_node_acl->initiatorname); 529 len += 1; /* Include NULL terminator */ 530 } 531 spin_unlock_bh(&se_tpg->session_lock); 532 533 return len; 534 } 535 EXPORT_SYMBOL(target_show_dynamic_sessions); 536 537 static void target_complete_nacl(struct kref *kref) 538 { 539 struct se_node_acl *nacl = container_of(kref, 540 struct se_node_acl, acl_kref); 541 struct se_portal_group *se_tpg = nacl->se_tpg; 542 543 if (!nacl->dynamic_stop) { 544 complete(&nacl->acl_free_comp); 545 return; 546 } 547 548 mutex_lock(&se_tpg->acl_node_mutex); 549 list_del_init(&nacl->acl_list); 550 mutex_unlock(&se_tpg->acl_node_mutex); 551 552 core_tpg_wait_for_nacl_pr_ref(nacl); 553 core_free_device_list_for_node(nacl, se_tpg); 554 kfree(nacl); 555 } 556 557 void target_put_nacl(struct se_node_acl *nacl) 558 { 559 kref_put(&nacl->acl_kref, target_complete_nacl); 560 } 561 EXPORT_SYMBOL(target_put_nacl); 562 563 void transport_deregister_session_configfs(struct se_session *se_sess) 564 { 565 struct se_node_acl *se_nacl; 566 unsigned long flags; 567 /* 568 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 569 */ 570 se_nacl = se_sess->se_node_acl; 571 if (se_nacl) { 572 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 573 if (!list_empty(&se_sess->sess_acl_list)) 574 list_del_init(&se_sess->sess_acl_list); 575 /* 576 * If the session list is empty, then clear the pointer. 577 * Otherwise, set the struct se_session pointer from the tail 578 * element of the per struct se_node_acl active session list. 579 */ 580 if (list_empty(&se_nacl->acl_sess_list)) 581 se_nacl->nacl_sess = NULL; 582 else { 583 se_nacl->nacl_sess = container_of( 584 se_nacl->acl_sess_list.prev, 585 struct se_session, sess_acl_list); 586 } 587 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 588 } 589 } 590 EXPORT_SYMBOL(transport_deregister_session_configfs); 591 592 void transport_free_session(struct se_session *se_sess) 593 { 594 struct se_node_acl *se_nacl = se_sess->se_node_acl; 595 596 /* 597 * Drop the se_node_acl->nacl_kref obtained from within 598 * core_tpg_get_initiator_node_acl(). 599 */ 600 if (se_nacl) { 601 struct se_portal_group *se_tpg = se_nacl->se_tpg; 602 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 603 unsigned long flags; 604 605 se_sess->se_node_acl = NULL; 606 607 /* 608 * Also determine if we need to drop the extra ->cmd_kref if 609 * it had been previously dynamically generated, and 610 * the endpoint is not caching dynamic ACLs. 611 */ 612 mutex_lock(&se_tpg->acl_node_mutex); 613 if (se_nacl->dynamic_node_acl && 614 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 615 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 616 if (list_empty(&se_nacl->acl_sess_list)) 617 se_nacl->dynamic_stop = true; 618 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 619 620 if (se_nacl->dynamic_stop) 621 list_del_init(&se_nacl->acl_list); 622 } 623 mutex_unlock(&se_tpg->acl_node_mutex); 624 625 if (se_nacl->dynamic_stop) 626 target_put_nacl(se_nacl); 627 628 target_put_nacl(se_nacl); 629 } 630 if (se_sess->sess_cmd_map) { 631 sbitmap_queue_free(&se_sess->sess_tag_pool); 632 kvfree(se_sess->sess_cmd_map); 633 } 634 if (se_sess->cmd_cnt) 635 target_free_cmd_counter(se_sess->cmd_cnt); 636 kmem_cache_free(se_sess_cache, se_sess); 637 } 638 EXPORT_SYMBOL(transport_free_session); 639 640 static int target_release_res(struct se_device *dev, void *data) 641 { 642 struct se_session *sess = data; 643 644 if (dev->reservation_holder == sess) 645 target_release_reservation(dev); 646 return 0; 647 } 648 649 void transport_deregister_session(struct se_session *se_sess) 650 { 651 struct se_portal_group *se_tpg = se_sess->se_tpg; 652 unsigned long flags; 653 654 if (!se_tpg) { 655 transport_free_session(se_sess); 656 return; 657 } 658 659 spin_lock_irqsave(&se_tpg->session_lock, flags); 660 list_del(&se_sess->sess_list); 661 se_sess->se_tpg = NULL; 662 se_sess->fabric_sess_ptr = NULL; 663 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 664 665 /* 666 * Since the session is being removed, release SPC-2 667 * reservations held by the session that is disappearing. 668 */ 669 target_for_each_device(target_release_res, se_sess); 670 671 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 672 se_tpg->se_tpg_tfo->fabric_name); 673 /* 674 * If last kref is dropping now for an explicit NodeACL, awake sleeping 675 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 676 * removal context from within transport_free_session() code. 677 * 678 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 679 * to release all remaining generate_node_acl=1 created ACL resources. 680 */ 681 682 transport_free_session(se_sess); 683 } 684 EXPORT_SYMBOL(transport_deregister_session); 685 686 void target_remove_session(struct se_session *se_sess) 687 { 688 transport_deregister_session_configfs(se_sess); 689 transport_deregister_session(se_sess); 690 } 691 EXPORT_SYMBOL(target_remove_session); 692 693 static void target_remove_from_state_list(struct se_cmd *cmd) 694 { 695 struct se_device *dev = cmd->se_dev; 696 unsigned long flags; 697 698 if (!dev) 699 return; 700 701 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); 702 if (cmd->state_active) { 703 list_del(&cmd->state_list); 704 cmd->state_active = false; 705 } 706 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); 707 } 708 709 static void target_remove_from_tmr_list(struct se_cmd *cmd) 710 { 711 struct se_device *dev = NULL; 712 unsigned long flags; 713 714 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 715 dev = cmd->se_tmr_req->tmr_dev; 716 717 if (dev) { 718 spin_lock_irqsave(&dev->se_tmr_lock, flags); 719 if (cmd->se_tmr_req->tmr_dev) 720 list_del_init(&cmd->se_tmr_req->tmr_list); 721 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 722 } 723 } 724 /* 725 * This function is called by the target core after the target core has 726 * finished processing a SCSI command or SCSI TMF. Both the regular command 727 * processing code and the code for aborting commands can call this 728 * function. CMD_T_STOP is set if and only if another thread is waiting 729 * inside transport_wait_for_tasks() for t_transport_stop_comp. 730 */ 731 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 732 { 733 unsigned long flags; 734 735 spin_lock_irqsave(&cmd->t_state_lock, flags); 736 /* 737 * Determine if frontend context caller is requesting the stopping of 738 * this command for frontend exceptions. 739 */ 740 if (cmd->transport_state & CMD_T_STOP) { 741 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 742 __func__, __LINE__, cmd->tag); 743 744 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 745 746 complete_all(&cmd->t_transport_stop_comp); 747 return 1; 748 } 749 cmd->transport_state &= ~CMD_T_ACTIVE; 750 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 751 752 /* 753 * Some fabric modules like tcm_loop can release their internally 754 * allocated I/O reference and struct se_cmd now. 755 * 756 * Fabric modules are expected to return '1' here if the se_cmd being 757 * passed is released at this point, or zero if not being released. 758 */ 759 return cmd->se_tfo->check_stop_free(cmd); 760 } 761 762 static void transport_lun_remove_cmd(struct se_cmd *cmd) 763 { 764 struct se_lun *lun = cmd->se_lun; 765 766 if (!lun) 767 return; 768 769 target_remove_from_state_list(cmd); 770 target_remove_from_tmr_list(cmd); 771 772 if (cmpxchg(&cmd->lun_ref_active, true, false)) 773 percpu_ref_put(&lun->lun_ref); 774 775 /* 776 * Clear struct se_cmd->se_lun before the handoff to FE. 777 */ 778 cmd->se_lun = NULL; 779 } 780 781 static void target_complete_failure_work(struct work_struct *work) 782 { 783 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 784 785 transport_generic_request_failure(cmd, cmd->sense_reason); 786 } 787 788 /* 789 * Used when asking transport to copy Sense Data from the underlying 790 * Linux/SCSI struct scsi_cmnd 791 */ 792 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 793 { 794 struct se_device *dev = cmd->se_dev; 795 796 WARN_ON(!cmd->se_lun); 797 798 if (!dev) 799 return NULL; 800 801 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 802 return NULL; 803 804 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 805 806 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 807 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 808 return cmd->sense_buffer; 809 } 810 811 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 812 { 813 unsigned char *cmd_sense_buf; 814 unsigned long flags; 815 816 spin_lock_irqsave(&cmd->t_state_lock, flags); 817 cmd_sense_buf = transport_get_sense_buffer(cmd); 818 if (!cmd_sense_buf) { 819 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 820 return; 821 } 822 823 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 824 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 825 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 826 } 827 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 828 829 static void target_handle_abort(struct se_cmd *cmd) 830 { 831 bool tas = cmd->transport_state & CMD_T_TAS; 832 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 833 int ret; 834 835 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 836 837 if (tas) { 838 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 839 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 840 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 841 cmd->t_task_cdb[0], cmd->tag); 842 trace_target_cmd_complete(cmd); 843 ret = cmd->se_tfo->queue_status(cmd); 844 if (ret) { 845 transport_handle_queue_full(cmd, cmd->se_dev, 846 ret, false); 847 return; 848 } 849 } else { 850 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 851 cmd->se_tfo->queue_tm_rsp(cmd); 852 } 853 } else { 854 /* 855 * Allow the fabric driver to unmap any resources before 856 * releasing the descriptor via TFO->release_cmd(). 857 */ 858 cmd->se_tfo->aborted_task(cmd); 859 if (ack_kref) 860 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 861 /* 862 * To do: establish a unit attention condition on the I_T 863 * nexus associated with cmd. See also the paragraph "Aborting 864 * commands" in SAM. 865 */ 866 } 867 868 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 869 870 transport_lun_remove_cmd(cmd); 871 872 transport_cmd_check_stop_to_fabric(cmd); 873 } 874 875 static void target_abort_work(struct work_struct *work) 876 { 877 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 878 879 target_handle_abort(cmd); 880 } 881 882 static bool target_cmd_interrupted(struct se_cmd *cmd) 883 { 884 int post_ret; 885 886 if (cmd->transport_state & CMD_T_ABORTED) { 887 if (cmd->transport_complete_callback) 888 cmd->transport_complete_callback(cmd, false, &post_ret); 889 INIT_WORK(&cmd->work, target_abort_work); 890 queue_work(target_completion_wq, &cmd->work); 891 return true; 892 } else if (cmd->transport_state & CMD_T_STOP) { 893 if (cmd->transport_complete_callback) 894 cmd->transport_complete_callback(cmd, false, &post_ret); 895 complete_all(&cmd->t_transport_stop_comp); 896 return true; 897 } 898 899 return false; 900 } 901 902 /* May be called from interrupt context so must not sleep. */ 903 void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status, 904 sense_reason_t sense_reason) 905 { 906 struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; 907 int success, cpu; 908 unsigned long flags; 909 910 if (target_cmd_interrupted(cmd)) 911 return; 912 913 cmd->scsi_status = scsi_status; 914 cmd->sense_reason = sense_reason; 915 916 spin_lock_irqsave(&cmd->t_state_lock, flags); 917 switch (cmd->scsi_status) { 918 case SAM_STAT_CHECK_CONDITION: 919 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 920 success = 1; 921 else 922 success = 0; 923 break; 924 default: 925 success = 1; 926 break; 927 } 928 929 cmd->t_state = TRANSPORT_COMPLETE; 930 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 931 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 932 933 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 934 target_complete_failure_work); 935 936 if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) 937 cpu = cmd->cpuid; 938 else 939 cpu = wwn->cmd_compl_affinity; 940 941 queue_work_on(cpu, target_completion_wq, &cmd->work); 942 } 943 EXPORT_SYMBOL(target_complete_cmd_with_sense); 944 945 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 946 { 947 target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ? 948 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE : 949 TCM_NO_SENSE); 950 } 951 EXPORT_SYMBOL(target_complete_cmd); 952 953 void target_set_cmd_data_length(struct se_cmd *cmd, int length) 954 { 955 if (length < cmd->data_length) { 956 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 957 cmd->residual_count += cmd->data_length - length; 958 } else { 959 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 960 cmd->residual_count = cmd->data_length - length; 961 } 962 963 cmd->data_length = length; 964 } 965 } 966 EXPORT_SYMBOL(target_set_cmd_data_length); 967 968 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 969 { 970 if (scsi_status == SAM_STAT_GOOD || 971 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { 972 target_set_cmd_data_length(cmd, length); 973 } 974 975 target_complete_cmd(cmd, scsi_status); 976 } 977 EXPORT_SYMBOL(target_complete_cmd_with_length); 978 979 static void target_add_to_state_list(struct se_cmd *cmd) 980 { 981 struct se_device *dev = cmd->se_dev; 982 unsigned long flags; 983 984 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); 985 if (!cmd->state_active) { 986 list_add_tail(&cmd->state_list, 987 &dev->queues[cmd->cpuid].state_list); 988 cmd->state_active = true; 989 } 990 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); 991 } 992 993 /* 994 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 995 */ 996 static void transport_write_pending_qf(struct se_cmd *cmd); 997 static void transport_complete_qf(struct se_cmd *cmd); 998 999 void target_qf_do_work(struct work_struct *work) 1000 { 1001 struct se_device *dev = container_of(work, struct se_device, 1002 qf_work_queue); 1003 LIST_HEAD(qf_cmd_list); 1004 struct se_cmd *cmd, *cmd_tmp; 1005 1006 spin_lock_irq(&dev->qf_cmd_lock); 1007 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 1008 spin_unlock_irq(&dev->qf_cmd_lock); 1009 1010 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 1011 list_del(&cmd->se_qf_node); 1012 atomic_dec_mb(&dev->dev_qf_count); 1013 1014 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 1015 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 1016 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 1017 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 1018 : "UNKNOWN"); 1019 1020 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 1021 transport_write_pending_qf(cmd); 1022 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 1023 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 1024 transport_complete_qf(cmd); 1025 } 1026 } 1027 1028 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 1029 { 1030 switch (cmd->data_direction) { 1031 case DMA_NONE: 1032 return "NONE"; 1033 case DMA_FROM_DEVICE: 1034 return "READ"; 1035 case DMA_TO_DEVICE: 1036 return "WRITE"; 1037 case DMA_BIDIRECTIONAL: 1038 return "BIDI"; 1039 default: 1040 break; 1041 } 1042 1043 return "UNKNOWN"; 1044 } 1045 1046 void transport_dump_dev_state( 1047 struct se_device *dev, 1048 char *b, 1049 int *bl) 1050 { 1051 *bl += sprintf(b + *bl, "Status: "); 1052 if (dev->export_count) 1053 *bl += sprintf(b + *bl, "ACTIVATED"); 1054 else 1055 *bl += sprintf(b + *bl, "DEACTIVATED"); 1056 1057 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 1058 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 1059 dev->dev_attrib.block_size, 1060 dev->dev_attrib.hw_max_sectors); 1061 *bl += sprintf(b + *bl, " "); 1062 } 1063 1064 void transport_dump_vpd_proto_id( 1065 struct t10_vpd *vpd, 1066 unsigned char *p_buf, 1067 int p_buf_len) 1068 { 1069 unsigned char buf[VPD_TMP_BUF_SIZE]; 1070 int len; 1071 1072 memset(buf, 0, VPD_TMP_BUF_SIZE); 1073 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 1074 1075 switch (vpd->protocol_identifier) { 1076 case 0x00: 1077 sprintf(buf+len, "Fibre Channel\n"); 1078 break; 1079 case 0x10: 1080 sprintf(buf+len, "Parallel SCSI\n"); 1081 break; 1082 case 0x20: 1083 sprintf(buf+len, "SSA\n"); 1084 break; 1085 case 0x30: 1086 sprintf(buf+len, "IEEE 1394\n"); 1087 break; 1088 case 0x40: 1089 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1090 " Protocol\n"); 1091 break; 1092 case 0x50: 1093 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1094 break; 1095 case 0x60: 1096 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1097 break; 1098 case 0x70: 1099 sprintf(buf+len, "Automation/Drive Interface Transport" 1100 " Protocol\n"); 1101 break; 1102 case 0x80: 1103 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1104 break; 1105 default: 1106 sprintf(buf+len, "Unknown 0x%02x\n", 1107 vpd->protocol_identifier); 1108 break; 1109 } 1110 1111 if (p_buf) 1112 strncpy(p_buf, buf, p_buf_len); 1113 else 1114 pr_debug("%s", buf); 1115 } 1116 1117 void 1118 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1119 { 1120 /* 1121 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1122 * 1123 * from spc3r23.pdf section 7.5.1 1124 */ 1125 if (page_83[1] & 0x80) { 1126 vpd->protocol_identifier = (page_83[0] & 0xf0); 1127 vpd->protocol_identifier_set = 1; 1128 transport_dump_vpd_proto_id(vpd, NULL, 0); 1129 } 1130 } 1131 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1132 1133 int transport_dump_vpd_assoc( 1134 struct t10_vpd *vpd, 1135 unsigned char *p_buf, 1136 int p_buf_len) 1137 { 1138 unsigned char buf[VPD_TMP_BUF_SIZE]; 1139 int ret = 0; 1140 int len; 1141 1142 memset(buf, 0, VPD_TMP_BUF_SIZE); 1143 len = sprintf(buf, "T10 VPD Identifier Association: "); 1144 1145 switch (vpd->association) { 1146 case 0x00: 1147 sprintf(buf+len, "addressed logical unit\n"); 1148 break; 1149 case 0x10: 1150 sprintf(buf+len, "target port\n"); 1151 break; 1152 case 0x20: 1153 sprintf(buf+len, "SCSI target device\n"); 1154 break; 1155 default: 1156 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1157 ret = -EINVAL; 1158 break; 1159 } 1160 1161 if (p_buf) 1162 strncpy(p_buf, buf, p_buf_len); 1163 else 1164 pr_debug("%s", buf); 1165 1166 return ret; 1167 } 1168 1169 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1170 { 1171 /* 1172 * The VPD identification association.. 1173 * 1174 * from spc3r23.pdf Section 7.6.3.1 Table 297 1175 */ 1176 vpd->association = (page_83[1] & 0x30); 1177 return transport_dump_vpd_assoc(vpd, NULL, 0); 1178 } 1179 EXPORT_SYMBOL(transport_set_vpd_assoc); 1180 1181 int transport_dump_vpd_ident_type( 1182 struct t10_vpd *vpd, 1183 unsigned char *p_buf, 1184 int p_buf_len) 1185 { 1186 unsigned char buf[VPD_TMP_BUF_SIZE]; 1187 int ret = 0; 1188 int len; 1189 1190 memset(buf, 0, VPD_TMP_BUF_SIZE); 1191 len = sprintf(buf, "T10 VPD Identifier Type: "); 1192 1193 switch (vpd->device_identifier_type) { 1194 case 0x00: 1195 sprintf(buf+len, "Vendor specific\n"); 1196 break; 1197 case 0x01: 1198 sprintf(buf+len, "T10 Vendor ID based\n"); 1199 break; 1200 case 0x02: 1201 sprintf(buf+len, "EUI-64 based\n"); 1202 break; 1203 case 0x03: 1204 sprintf(buf+len, "NAA\n"); 1205 break; 1206 case 0x04: 1207 sprintf(buf+len, "Relative target port identifier\n"); 1208 break; 1209 case 0x08: 1210 sprintf(buf+len, "SCSI name string\n"); 1211 break; 1212 default: 1213 sprintf(buf+len, "Unsupported: 0x%02x\n", 1214 vpd->device_identifier_type); 1215 ret = -EINVAL; 1216 break; 1217 } 1218 1219 if (p_buf) { 1220 if (p_buf_len < strlen(buf)+1) 1221 return -EINVAL; 1222 strncpy(p_buf, buf, p_buf_len); 1223 } else { 1224 pr_debug("%s", buf); 1225 } 1226 1227 return ret; 1228 } 1229 1230 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1231 { 1232 /* 1233 * The VPD identifier type.. 1234 * 1235 * from spc3r23.pdf Section 7.6.3.1 Table 298 1236 */ 1237 vpd->device_identifier_type = (page_83[1] & 0x0f); 1238 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1239 } 1240 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1241 1242 int transport_dump_vpd_ident( 1243 struct t10_vpd *vpd, 1244 unsigned char *p_buf, 1245 int p_buf_len) 1246 { 1247 unsigned char buf[VPD_TMP_BUF_SIZE]; 1248 int ret = 0; 1249 1250 memset(buf, 0, VPD_TMP_BUF_SIZE); 1251 1252 switch (vpd->device_identifier_code_set) { 1253 case 0x01: /* Binary */ 1254 snprintf(buf, sizeof(buf), 1255 "T10 VPD Binary Device Identifier: %s\n", 1256 &vpd->device_identifier[0]); 1257 break; 1258 case 0x02: /* ASCII */ 1259 snprintf(buf, sizeof(buf), 1260 "T10 VPD ASCII Device Identifier: %s\n", 1261 &vpd->device_identifier[0]); 1262 break; 1263 case 0x03: /* UTF-8 */ 1264 snprintf(buf, sizeof(buf), 1265 "T10 VPD UTF-8 Device Identifier: %s\n", 1266 &vpd->device_identifier[0]); 1267 break; 1268 default: 1269 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1270 " 0x%02x", vpd->device_identifier_code_set); 1271 ret = -EINVAL; 1272 break; 1273 } 1274 1275 if (p_buf) 1276 strncpy(p_buf, buf, p_buf_len); 1277 else 1278 pr_debug("%s", buf); 1279 1280 return ret; 1281 } 1282 1283 int 1284 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1285 { 1286 static const char hex_str[] = "0123456789abcdef"; 1287 int j = 0, i = 4; /* offset to start of the identifier */ 1288 1289 /* 1290 * The VPD Code Set (encoding) 1291 * 1292 * from spc3r23.pdf Section 7.6.3.1 Table 296 1293 */ 1294 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1295 switch (vpd->device_identifier_code_set) { 1296 case 0x01: /* Binary */ 1297 vpd->device_identifier[j++] = 1298 hex_str[vpd->device_identifier_type]; 1299 while (i < (4 + page_83[3])) { 1300 vpd->device_identifier[j++] = 1301 hex_str[(page_83[i] & 0xf0) >> 4]; 1302 vpd->device_identifier[j++] = 1303 hex_str[page_83[i] & 0x0f]; 1304 i++; 1305 } 1306 break; 1307 case 0x02: /* ASCII */ 1308 case 0x03: /* UTF-8 */ 1309 while (i < (4 + page_83[3])) 1310 vpd->device_identifier[j++] = page_83[i++]; 1311 break; 1312 default: 1313 break; 1314 } 1315 1316 return transport_dump_vpd_ident(vpd, NULL, 0); 1317 } 1318 EXPORT_SYMBOL(transport_set_vpd_ident); 1319 1320 static sense_reason_t 1321 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1322 unsigned int size) 1323 { 1324 u32 mtl; 1325 1326 if (!cmd->se_tfo->max_data_sg_nents) 1327 return TCM_NO_SENSE; 1328 /* 1329 * Check if fabric enforced maximum SGL entries per I/O descriptor 1330 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1331 * residual_count and reduce original cmd->data_length to maximum 1332 * length based on single PAGE_SIZE entry scatter-lists. 1333 */ 1334 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1335 if (cmd->data_length > mtl) { 1336 /* 1337 * If an existing CDB overflow is present, calculate new residual 1338 * based on CDB size minus fabric maximum transfer length. 1339 * 1340 * If an existing CDB underflow is present, calculate new residual 1341 * based on original cmd->data_length minus fabric maximum transfer 1342 * length. 1343 * 1344 * Otherwise, set the underflow residual based on cmd->data_length 1345 * minus fabric maximum transfer length. 1346 */ 1347 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1348 cmd->residual_count = (size - mtl); 1349 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1350 u32 orig_dl = size + cmd->residual_count; 1351 cmd->residual_count = (orig_dl - mtl); 1352 } else { 1353 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1354 cmd->residual_count = (cmd->data_length - mtl); 1355 } 1356 cmd->data_length = mtl; 1357 /* 1358 * Reset sbc_check_prot() calculated protection payload 1359 * length based upon the new smaller MTL. 1360 */ 1361 if (cmd->prot_length) { 1362 u32 sectors = (mtl / dev->dev_attrib.block_size); 1363 cmd->prot_length = dev->prot_length * sectors; 1364 } 1365 } 1366 return TCM_NO_SENSE; 1367 } 1368 1369 /** 1370 * target_cmd_size_check - Check whether there will be a residual. 1371 * @cmd: SCSI command. 1372 * @size: Data buffer size derived from CDB. The data buffer size provided by 1373 * the SCSI transport driver is available in @cmd->data_length. 1374 * 1375 * Compare the data buffer size from the CDB with the data buffer limit from the transport 1376 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. 1377 * 1378 * Note: target drivers set @cmd->data_length by calling __target_init_cmd(). 1379 * 1380 * Return: TCM_NO_SENSE 1381 */ 1382 sense_reason_t 1383 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1384 { 1385 struct se_device *dev = cmd->se_dev; 1386 1387 if (cmd->unknown_data_length) { 1388 cmd->data_length = size; 1389 } else if (size != cmd->data_length) { 1390 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1391 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1392 " 0x%02x\n", cmd->se_tfo->fabric_name, 1393 cmd->data_length, size, cmd->t_task_cdb[0]); 1394 /* 1395 * For READ command for the overflow case keep the existing 1396 * fabric provided ->data_length. Otherwise for the underflow 1397 * case, reset ->data_length to the smaller SCSI expected data 1398 * transfer length. 1399 */ 1400 if (size > cmd->data_length) { 1401 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1402 cmd->residual_count = (size - cmd->data_length); 1403 } else { 1404 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1405 cmd->residual_count = (cmd->data_length - size); 1406 /* 1407 * Do not truncate ->data_length for WRITE command to 1408 * dump all payload 1409 */ 1410 if (cmd->data_direction == DMA_FROM_DEVICE) { 1411 cmd->data_length = size; 1412 } 1413 } 1414 1415 if (cmd->data_direction == DMA_TO_DEVICE) { 1416 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1417 pr_err_ratelimited("Rejecting underflow/overflow" 1418 " for WRITE data CDB\n"); 1419 return TCM_INVALID_FIELD_IN_COMMAND_IU; 1420 } 1421 /* 1422 * Some fabric drivers like iscsi-target still expect to 1423 * always reject overflow writes. Reject this case until 1424 * full fabric driver level support for overflow writes 1425 * is introduced tree-wide. 1426 */ 1427 if (size > cmd->data_length) { 1428 pr_err_ratelimited("Rejecting overflow for" 1429 " WRITE control CDB\n"); 1430 return TCM_INVALID_CDB_FIELD; 1431 } 1432 } 1433 } 1434 1435 return target_check_max_data_sg_nents(cmd, dev, size); 1436 1437 } 1438 1439 /* 1440 * Used by fabric modules containing a local struct se_cmd within their 1441 * fabric dependent per I/O descriptor. 1442 * 1443 * Preserves the value of @cmd->tag. 1444 */ 1445 void __target_init_cmd(struct se_cmd *cmd, 1446 const struct target_core_fabric_ops *tfo, 1447 struct se_session *se_sess, u32 data_length, 1448 int data_direction, int task_attr, 1449 unsigned char *sense_buffer, u64 unpacked_lun, 1450 struct target_cmd_counter *cmd_cnt) 1451 { 1452 INIT_LIST_HEAD(&cmd->se_delayed_node); 1453 INIT_LIST_HEAD(&cmd->se_qf_node); 1454 INIT_LIST_HEAD(&cmd->state_list); 1455 init_completion(&cmd->t_transport_stop_comp); 1456 cmd->free_compl = NULL; 1457 cmd->abrt_compl = NULL; 1458 spin_lock_init(&cmd->t_state_lock); 1459 INIT_WORK(&cmd->work, NULL); 1460 kref_init(&cmd->cmd_kref); 1461 1462 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1463 cmd->se_tfo = tfo; 1464 cmd->se_sess = se_sess; 1465 cmd->data_length = data_length; 1466 cmd->data_direction = data_direction; 1467 cmd->sam_task_attr = task_attr; 1468 cmd->sense_buffer = sense_buffer; 1469 cmd->orig_fe_lun = unpacked_lun; 1470 cmd->cmd_cnt = cmd_cnt; 1471 1472 if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) 1473 cmd->cpuid = raw_smp_processor_id(); 1474 1475 cmd->state_active = false; 1476 } 1477 EXPORT_SYMBOL(__target_init_cmd); 1478 1479 static sense_reason_t 1480 transport_check_alloc_task_attr(struct se_cmd *cmd) 1481 { 1482 struct se_device *dev = cmd->se_dev; 1483 1484 /* 1485 * Check if SAM Task Attribute emulation is enabled for this 1486 * struct se_device storage object 1487 */ 1488 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1489 return 0; 1490 1491 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1492 pr_debug("SAM Task Attribute ACA" 1493 " emulation is not supported\n"); 1494 return TCM_INVALID_CDB_FIELD; 1495 } 1496 1497 return 0; 1498 } 1499 1500 sense_reason_t 1501 target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) 1502 { 1503 sense_reason_t ret; 1504 1505 /* 1506 * Ensure that the received CDB is less than the max (252 + 8) bytes 1507 * for VARIABLE_LENGTH_CMD 1508 */ 1509 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1510 pr_err("Received SCSI CDB with command_size: %d that" 1511 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1512 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1513 ret = TCM_INVALID_CDB_FIELD; 1514 goto err; 1515 } 1516 /* 1517 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1518 * allocate the additional extended CDB buffer now.. Otherwise 1519 * setup the pointer from __t_task_cdb to t_task_cdb. 1520 */ 1521 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1522 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); 1523 if (!cmd->t_task_cdb) { 1524 pr_err("Unable to allocate cmd->t_task_cdb" 1525 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1526 scsi_command_size(cdb), 1527 (unsigned long)sizeof(cmd->__t_task_cdb)); 1528 ret = TCM_OUT_OF_RESOURCES; 1529 goto err; 1530 } 1531 } 1532 /* 1533 * Copy the original CDB into cmd-> 1534 */ 1535 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1536 1537 trace_target_sequencer_start(cmd); 1538 return 0; 1539 1540 err: 1541 /* 1542 * Copy the CDB here to allow trace_target_cmd_complete() to 1543 * print the cdb to the trace buffers. 1544 */ 1545 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), 1546 (unsigned int)TCM_MAX_COMMAND_SIZE)); 1547 return ret; 1548 } 1549 EXPORT_SYMBOL(target_cmd_init_cdb); 1550 1551 sense_reason_t 1552 target_cmd_parse_cdb(struct se_cmd *cmd) 1553 { 1554 struct se_device *dev = cmd->se_dev; 1555 sense_reason_t ret; 1556 1557 ret = dev->transport->parse_cdb(cmd); 1558 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1559 pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1560 cmd->se_tfo->fabric_name, 1561 cmd->se_sess->se_node_acl->initiatorname, 1562 cmd->t_task_cdb[0]); 1563 if (ret) 1564 return ret; 1565 1566 ret = transport_check_alloc_task_attr(cmd); 1567 if (ret) 1568 return ret; 1569 1570 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1571 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1572 return 0; 1573 } 1574 EXPORT_SYMBOL(target_cmd_parse_cdb); 1575 1576 /* 1577 * Used by fabric module frontends to queue tasks directly. 1578 * May only be used from process context. 1579 */ 1580 int transport_handle_cdb_direct( 1581 struct se_cmd *cmd) 1582 { 1583 sense_reason_t ret; 1584 1585 might_sleep(); 1586 1587 if (!cmd->se_lun) { 1588 dump_stack(); 1589 pr_err("cmd->se_lun is NULL\n"); 1590 return -EINVAL; 1591 } 1592 1593 /* 1594 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1595 * outstanding descriptors are handled correctly during shutdown via 1596 * transport_wait_for_tasks() 1597 * 1598 * Also, we don't take cmd->t_state_lock here as we only expect 1599 * this to be called for initial descriptor submission. 1600 */ 1601 cmd->t_state = TRANSPORT_NEW_CMD; 1602 cmd->transport_state |= CMD_T_ACTIVE; 1603 1604 /* 1605 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1606 * so follow TRANSPORT_NEW_CMD processing thread context usage 1607 * and call transport_generic_request_failure() if necessary.. 1608 */ 1609 ret = transport_generic_new_cmd(cmd); 1610 if (ret) 1611 transport_generic_request_failure(cmd, ret); 1612 return 0; 1613 } 1614 EXPORT_SYMBOL(transport_handle_cdb_direct); 1615 1616 sense_reason_t 1617 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1618 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1619 { 1620 if (!sgl || !sgl_count) 1621 return 0; 1622 1623 /* 1624 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1625 * scatterlists already have been set to follow what the fabric 1626 * passes for the original expected data transfer length. 1627 */ 1628 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1629 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1630 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1631 return TCM_INVALID_CDB_FIELD; 1632 } 1633 1634 cmd->t_data_sg = sgl; 1635 cmd->t_data_nents = sgl_count; 1636 cmd->t_bidi_data_sg = sgl_bidi; 1637 cmd->t_bidi_data_nents = sgl_bidi_count; 1638 1639 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1640 return 0; 1641 } 1642 1643 /** 1644 * target_init_cmd - initialize se_cmd 1645 * @se_cmd: command descriptor to init 1646 * @se_sess: associated se_sess for endpoint 1647 * @sense: pointer to SCSI sense buffer 1648 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1649 * @data_length: fabric expected data transfer length 1650 * @task_attr: SAM task attribute 1651 * @data_dir: DMA data direction 1652 * @flags: flags for command submission from target_sc_flags_tables 1653 * 1654 * Task tags are supported if the caller has set @se_cmd->tag. 1655 * 1656 * Returns: 1657 * - less than zero to signal active I/O shutdown failure. 1658 * - zero on success. 1659 * 1660 * If the fabric driver calls target_stop_session, then it must check the 1661 * return code and handle failures. This will never fail for other drivers, 1662 * and the return code can be ignored. 1663 */ 1664 int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1665 unsigned char *sense, u64 unpacked_lun, 1666 u32 data_length, int task_attr, int data_dir, int flags) 1667 { 1668 struct se_portal_group *se_tpg; 1669 1670 se_tpg = se_sess->se_tpg; 1671 BUG_ON(!se_tpg); 1672 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1673 1674 if (flags & TARGET_SCF_USE_CPUID) 1675 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1676 /* 1677 * Signal bidirectional data payloads to target-core 1678 */ 1679 if (flags & TARGET_SCF_BIDI_OP) 1680 se_cmd->se_cmd_flags |= SCF_BIDI; 1681 1682 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1683 se_cmd->unknown_data_length = 1; 1684 /* 1685 * Initialize se_cmd for target operation. From this point 1686 * exceptions are handled by sending exception status via 1687 * target_core_fabric_ops->queue_status() callback 1688 */ 1689 __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, 1690 data_dir, task_attr, sense, unpacked_lun, 1691 se_sess->cmd_cnt); 1692 1693 /* 1694 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is 1695 * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second 1696 * kref_put() to happen during fabric packet acknowledgement. 1697 */ 1698 return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1699 } 1700 EXPORT_SYMBOL_GPL(target_init_cmd); 1701 1702 /** 1703 * target_submit_prep - prepare cmd for submission 1704 * @se_cmd: command descriptor to prep 1705 * @cdb: pointer to SCSI CDB 1706 * @sgl: struct scatterlist memory for unidirectional mapping 1707 * @sgl_count: scatterlist count for unidirectional mapping 1708 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1709 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1710 * @sgl_prot: struct scatterlist memory protection information 1711 * @sgl_prot_count: scatterlist count for protection information 1712 * @gfp: gfp allocation type 1713 * 1714 * Returns: 1715 * - less than zero to signal failure. 1716 * - zero on success. 1717 * 1718 * If failure is returned, lio will the callers queue_status to complete 1719 * the cmd. 1720 */ 1721 int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, 1722 struct scatterlist *sgl, u32 sgl_count, 1723 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1724 struct scatterlist *sgl_prot, u32 sgl_prot_count, 1725 gfp_t gfp) 1726 { 1727 sense_reason_t rc; 1728 1729 rc = target_cmd_init_cdb(se_cmd, cdb, gfp); 1730 if (rc) 1731 goto send_cc_direct; 1732 1733 /* 1734 * Locate se_lun pointer and attach it to struct se_cmd 1735 */ 1736 rc = transport_lookup_cmd_lun(se_cmd); 1737 if (rc) 1738 goto send_cc_direct; 1739 1740 rc = target_cmd_parse_cdb(se_cmd); 1741 if (rc != 0) 1742 goto generic_fail; 1743 1744 /* 1745 * Save pointers for SGLs containing protection information, 1746 * if present. 1747 */ 1748 if (sgl_prot_count) { 1749 se_cmd->t_prot_sg = sgl_prot; 1750 se_cmd->t_prot_nents = sgl_prot_count; 1751 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1752 } 1753 1754 /* 1755 * When a non zero sgl_count has been passed perform SGL passthrough 1756 * mapping for pre-allocated fabric memory instead of having target 1757 * core perform an internal SGL allocation.. 1758 */ 1759 if (sgl_count != 0) { 1760 BUG_ON(!sgl); 1761 1762 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1763 sgl_bidi, sgl_bidi_count); 1764 if (rc != 0) 1765 goto generic_fail; 1766 } 1767 1768 return 0; 1769 1770 send_cc_direct: 1771 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1772 target_put_sess_cmd(se_cmd); 1773 return -EIO; 1774 1775 generic_fail: 1776 transport_generic_request_failure(se_cmd, rc); 1777 return -EIO; 1778 } 1779 EXPORT_SYMBOL_GPL(target_submit_prep); 1780 1781 /** 1782 * target_submit - perform final initialization and submit cmd to LIO core 1783 * @se_cmd: command descriptor to submit 1784 * 1785 * target_submit_prep must have been called on the cmd, and this must be 1786 * called from process context. 1787 */ 1788 void target_submit(struct se_cmd *se_cmd) 1789 { 1790 struct scatterlist *sgl = se_cmd->t_data_sg; 1791 unsigned char *buf = NULL; 1792 1793 might_sleep(); 1794 1795 if (se_cmd->t_data_nents != 0) { 1796 BUG_ON(!sgl); 1797 /* 1798 * A work-around for tcm_loop as some userspace code via 1799 * scsi-generic do not memset their associated read buffers, 1800 * so go ahead and do that here for type non-data CDBs. Also 1801 * note that this is currently guaranteed to be a single SGL 1802 * for this case by target core in target_setup_cmd_from_cdb() 1803 * -> transport_generic_cmd_sequencer(). 1804 */ 1805 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1806 se_cmd->data_direction == DMA_FROM_DEVICE) { 1807 if (sgl) 1808 buf = kmap(sg_page(sgl)) + sgl->offset; 1809 1810 if (buf) { 1811 memset(buf, 0, sgl->length); 1812 kunmap(sg_page(sgl)); 1813 } 1814 } 1815 1816 } 1817 1818 /* 1819 * Check if we need to delay processing because of ALUA 1820 * Active/NonOptimized primary access state.. 1821 */ 1822 core_alua_check_nonop_delay(se_cmd); 1823 1824 transport_handle_cdb_direct(se_cmd); 1825 } 1826 EXPORT_SYMBOL_GPL(target_submit); 1827 1828 /** 1829 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1830 * 1831 * @se_cmd: command descriptor to submit 1832 * @se_sess: associated se_sess for endpoint 1833 * @cdb: pointer to SCSI CDB 1834 * @sense: pointer to SCSI sense buffer 1835 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1836 * @data_length: fabric expected data transfer length 1837 * @task_attr: SAM task attribute 1838 * @data_dir: DMA data direction 1839 * @flags: flags for command submission from target_sc_flags_tables 1840 * 1841 * Task tags are supported if the caller has set @se_cmd->tag. 1842 * 1843 * This may only be called from process context, and also currently 1844 * assumes internal allocation of fabric payload buffer by target-core. 1845 * 1846 * It also assumes interal target core SGL memory allocation. 1847 * 1848 * This function must only be used by drivers that do their own 1849 * sync during shutdown and does not use target_stop_session. If there 1850 * is a failure this function will call into the fabric driver's 1851 * queue_status with a CHECK_CONDITION. 1852 */ 1853 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1854 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1855 u32 data_length, int task_attr, int data_dir, int flags) 1856 { 1857 int rc; 1858 1859 rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length, 1860 task_attr, data_dir, flags); 1861 WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n"); 1862 if (rc) 1863 return; 1864 1865 if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, 1866 GFP_KERNEL)) 1867 return; 1868 1869 target_submit(se_cmd); 1870 } 1871 EXPORT_SYMBOL(target_submit_cmd); 1872 1873 1874 static struct se_dev_plug *target_plug_device(struct se_device *se_dev) 1875 { 1876 struct se_dev_plug *se_plug; 1877 1878 if (!se_dev->transport->plug_device) 1879 return NULL; 1880 1881 se_plug = se_dev->transport->plug_device(se_dev); 1882 if (!se_plug) 1883 return NULL; 1884 1885 se_plug->se_dev = se_dev; 1886 /* 1887 * We have a ref to the lun at this point, but the cmds could 1888 * complete before we unplug, so grab a ref to the se_device so we 1889 * can call back into the backend. 1890 */ 1891 config_group_get(&se_dev->dev_group); 1892 return se_plug; 1893 } 1894 1895 static void target_unplug_device(struct se_dev_plug *se_plug) 1896 { 1897 struct se_device *se_dev = se_plug->se_dev; 1898 1899 se_dev->transport->unplug_device(se_plug); 1900 config_group_put(&se_dev->dev_group); 1901 } 1902 1903 void target_queued_submit_work(struct work_struct *work) 1904 { 1905 struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); 1906 struct se_cmd *se_cmd, *next_cmd; 1907 struct se_dev_plug *se_plug = NULL; 1908 struct se_device *se_dev = NULL; 1909 struct llist_node *cmd_list; 1910 1911 cmd_list = llist_del_all(&sq->cmd_list); 1912 if (!cmd_list) 1913 /* Previous call took what we were queued to submit */ 1914 return; 1915 1916 cmd_list = llist_reverse_order(cmd_list); 1917 llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) { 1918 if (!se_dev) { 1919 se_dev = se_cmd->se_dev; 1920 se_plug = target_plug_device(se_dev); 1921 } 1922 1923 target_submit(se_cmd); 1924 } 1925 1926 if (se_plug) 1927 target_unplug_device(se_plug); 1928 } 1929 1930 /** 1931 * target_queue_submission - queue the cmd to run on the LIO workqueue 1932 * @se_cmd: command descriptor to submit 1933 */ 1934 void target_queue_submission(struct se_cmd *se_cmd) 1935 { 1936 struct se_device *se_dev = se_cmd->se_dev; 1937 int cpu = se_cmd->cpuid; 1938 struct se_cmd_queue *sq; 1939 1940 sq = &se_dev->queues[cpu].sq; 1941 llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); 1942 queue_work_on(cpu, target_submission_wq, &sq->work); 1943 } 1944 EXPORT_SYMBOL_GPL(target_queue_submission); 1945 1946 static void target_complete_tmr_failure(struct work_struct *work) 1947 { 1948 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1949 1950 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1951 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1952 1953 transport_lun_remove_cmd(se_cmd); 1954 transport_cmd_check_stop_to_fabric(se_cmd); 1955 } 1956 1957 /** 1958 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1959 * for TMR CDBs 1960 * 1961 * @se_cmd: command descriptor to submit 1962 * @se_sess: associated se_sess for endpoint 1963 * @sense: pointer to SCSI sense buffer 1964 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1965 * @fabric_tmr_ptr: fabric context for TMR req 1966 * @tm_type: Type of TM request 1967 * @gfp: gfp type for caller 1968 * @tag: referenced task tag for TMR_ABORT_TASK 1969 * @flags: submit cmd flags 1970 * 1971 * Callable from all contexts. 1972 **/ 1973 1974 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1975 unsigned char *sense, u64 unpacked_lun, 1976 void *fabric_tmr_ptr, unsigned char tm_type, 1977 gfp_t gfp, u64 tag, int flags) 1978 { 1979 struct se_portal_group *se_tpg; 1980 int ret; 1981 1982 se_tpg = se_sess->se_tpg; 1983 BUG_ON(!se_tpg); 1984 1985 __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1986 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun, 1987 se_sess->cmd_cnt); 1988 /* 1989 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1990 * allocation failure. 1991 */ 1992 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1993 if (ret < 0) 1994 return -ENOMEM; 1995 1996 if (tm_type == TMR_ABORT_TASK) 1997 se_cmd->se_tmr_req->ref_task_tag = tag; 1998 1999 /* See target_submit_cmd for commentary */ 2000 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 2001 if (ret) { 2002 core_tmr_release_req(se_cmd->se_tmr_req); 2003 return ret; 2004 } 2005 2006 ret = transport_lookup_tmr_lun(se_cmd); 2007 if (ret) 2008 goto failure; 2009 2010 transport_generic_handle_tmr(se_cmd); 2011 return 0; 2012 2013 /* 2014 * For callback during failure handling, push this work off 2015 * to process context with TMR_LUN_DOES_NOT_EXIST status. 2016 */ 2017 failure: 2018 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 2019 schedule_work(&se_cmd->work); 2020 return 0; 2021 } 2022 EXPORT_SYMBOL(target_submit_tmr); 2023 2024 /* 2025 * Handle SAM-esque emulation for generic transport request failures. 2026 */ 2027 void transport_generic_request_failure(struct se_cmd *cmd, 2028 sense_reason_t sense_reason) 2029 { 2030 int ret = 0, post_ret; 2031 2032 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 2033 sense_reason); 2034 target_show_cmd("-----[ ", cmd); 2035 2036 /* 2037 * For SAM Task Attribute emulation for failed struct se_cmd 2038 */ 2039 transport_complete_task_attr(cmd); 2040 2041 if (cmd->transport_complete_callback) 2042 cmd->transport_complete_callback(cmd, false, &post_ret); 2043 2044 if (cmd->transport_state & CMD_T_ABORTED) { 2045 INIT_WORK(&cmd->work, target_abort_work); 2046 queue_work(target_completion_wq, &cmd->work); 2047 return; 2048 } 2049 2050 switch (sense_reason) { 2051 case TCM_NON_EXISTENT_LUN: 2052 case TCM_UNSUPPORTED_SCSI_OPCODE: 2053 case TCM_INVALID_CDB_FIELD: 2054 case TCM_INVALID_PARAMETER_LIST: 2055 case TCM_PARAMETER_LIST_LENGTH_ERROR: 2056 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2057 case TCM_UNKNOWN_MODE_PAGE: 2058 case TCM_WRITE_PROTECTED: 2059 case TCM_ADDRESS_OUT_OF_RANGE: 2060 case TCM_CHECK_CONDITION_ABORT_CMD: 2061 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2062 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 2063 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 2064 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 2065 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 2066 case TCM_TOO_MANY_TARGET_DESCS: 2067 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 2068 case TCM_TOO_MANY_SEGMENT_DESCS: 2069 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 2070 case TCM_INVALID_FIELD_IN_COMMAND_IU: 2071 case TCM_ALUA_TG_PT_STANDBY: 2072 case TCM_ALUA_TG_PT_UNAVAILABLE: 2073 case TCM_ALUA_STATE_TRANSITION: 2074 case TCM_ALUA_OFFLINE: 2075 break; 2076 case TCM_OUT_OF_RESOURCES: 2077 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 2078 goto queue_status; 2079 case TCM_LUN_BUSY: 2080 cmd->scsi_status = SAM_STAT_BUSY; 2081 goto queue_status; 2082 case TCM_RESERVATION_CONFLICT: 2083 /* 2084 * No SENSE Data payload for this case, set SCSI Status 2085 * and queue the response to $FABRIC_MOD. 2086 * 2087 * Uses linux/include/scsi/scsi.h SAM status codes defs 2088 */ 2089 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 2090 /* 2091 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 2092 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 2093 * CONFLICT STATUS. 2094 * 2095 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 2096 */ 2097 if (cmd->se_sess && 2098 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl 2099 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { 2100 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 2101 cmd->orig_fe_lun, 0x2C, 2102 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 2103 } 2104 2105 goto queue_status; 2106 default: 2107 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 2108 cmd->t_task_cdb[0], sense_reason); 2109 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2110 break; 2111 } 2112 2113 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 2114 if (ret) 2115 goto queue_full; 2116 2117 check_stop: 2118 transport_lun_remove_cmd(cmd); 2119 transport_cmd_check_stop_to_fabric(cmd); 2120 return; 2121 2122 queue_status: 2123 trace_target_cmd_complete(cmd); 2124 ret = cmd->se_tfo->queue_status(cmd); 2125 if (!ret) 2126 goto check_stop; 2127 queue_full: 2128 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2129 } 2130 EXPORT_SYMBOL(transport_generic_request_failure); 2131 2132 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 2133 { 2134 sense_reason_t ret; 2135 2136 if (!cmd->execute_cmd) { 2137 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2138 goto err; 2139 } 2140 if (do_checks) { 2141 /* 2142 * Check for an existing UNIT ATTENTION condition after 2143 * target_handle_task_attr() has done SAM task attr 2144 * checking, and possibly have already defered execution 2145 * out to target_restart_delayed_cmds() context. 2146 */ 2147 ret = target_scsi3_ua_check(cmd); 2148 if (ret) 2149 goto err; 2150 2151 ret = target_alua_state_check(cmd); 2152 if (ret) 2153 goto err; 2154 2155 ret = target_check_reservation(cmd); 2156 if (ret) { 2157 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 2158 goto err; 2159 } 2160 } 2161 2162 ret = cmd->execute_cmd(cmd); 2163 if (!ret) 2164 return; 2165 err: 2166 spin_lock_irq(&cmd->t_state_lock); 2167 cmd->transport_state &= ~CMD_T_SENT; 2168 spin_unlock_irq(&cmd->t_state_lock); 2169 2170 transport_generic_request_failure(cmd, ret); 2171 } 2172 2173 static int target_write_prot_action(struct se_cmd *cmd) 2174 { 2175 u32 sectors; 2176 /* 2177 * Perform WRITE_INSERT of PI using software emulation when backend 2178 * device has PI enabled, if the transport has not already generated 2179 * PI using hardware WRITE_INSERT offload. 2180 */ 2181 switch (cmd->prot_op) { 2182 case TARGET_PROT_DOUT_INSERT: 2183 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 2184 sbc_dif_generate(cmd); 2185 break; 2186 case TARGET_PROT_DOUT_STRIP: 2187 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 2188 break; 2189 2190 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 2191 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2192 sectors, 0, cmd->t_prot_sg, 0); 2193 if (unlikely(cmd->pi_err)) { 2194 spin_lock_irq(&cmd->t_state_lock); 2195 cmd->transport_state &= ~CMD_T_SENT; 2196 spin_unlock_irq(&cmd->t_state_lock); 2197 transport_generic_request_failure(cmd, cmd->pi_err); 2198 return -1; 2199 } 2200 break; 2201 default: 2202 break; 2203 } 2204 2205 return 0; 2206 } 2207 2208 static bool target_handle_task_attr(struct se_cmd *cmd) 2209 { 2210 struct se_device *dev = cmd->se_dev; 2211 2212 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2213 return false; 2214 2215 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 2216 2217 /* 2218 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2219 * to allow the passed struct se_cmd list of tasks to the front of the list. 2220 */ 2221 switch (cmd->sam_task_attr) { 2222 case TCM_HEAD_TAG: 2223 atomic_inc_mb(&dev->non_ordered); 2224 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 2225 cmd->t_task_cdb[0]); 2226 return false; 2227 case TCM_ORDERED_TAG: 2228 atomic_inc_mb(&dev->delayed_cmd_count); 2229 2230 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 2231 cmd->t_task_cdb[0]); 2232 break; 2233 default: 2234 /* 2235 * For SIMPLE and UNTAGGED Task Attribute commands 2236 */ 2237 atomic_inc_mb(&dev->non_ordered); 2238 2239 if (atomic_read(&dev->delayed_cmd_count) == 0) 2240 return false; 2241 break; 2242 } 2243 2244 if (cmd->sam_task_attr != TCM_ORDERED_TAG) { 2245 atomic_inc_mb(&dev->delayed_cmd_count); 2246 /* 2247 * We will account for this when we dequeue from the delayed 2248 * list. 2249 */ 2250 atomic_dec_mb(&dev->non_ordered); 2251 } 2252 2253 spin_lock_irq(&cmd->t_state_lock); 2254 cmd->transport_state &= ~CMD_T_SENT; 2255 spin_unlock_irq(&cmd->t_state_lock); 2256 2257 spin_lock(&dev->delayed_cmd_lock); 2258 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2259 spin_unlock(&dev->delayed_cmd_lock); 2260 2261 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2262 cmd->t_task_cdb[0], cmd->sam_task_attr); 2263 /* 2264 * We may have no non ordered cmds when this function started or we 2265 * could have raced with the last simple/head cmd completing, so kick 2266 * the delayed handler here. 2267 */ 2268 schedule_work(&dev->delayed_cmd_work); 2269 return true; 2270 } 2271 2272 void target_execute_cmd(struct se_cmd *cmd) 2273 { 2274 /* 2275 * Determine if frontend context caller is requesting the stopping of 2276 * this command for frontend exceptions. 2277 * 2278 * If the received CDB has already been aborted stop processing it here. 2279 */ 2280 if (target_cmd_interrupted(cmd)) 2281 return; 2282 2283 spin_lock_irq(&cmd->t_state_lock); 2284 cmd->t_state = TRANSPORT_PROCESSING; 2285 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2286 spin_unlock_irq(&cmd->t_state_lock); 2287 2288 if (target_write_prot_action(cmd)) 2289 return; 2290 2291 if (target_handle_task_attr(cmd)) 2292 return; 2293 2294 __target_execute_cmd(cmd, true); 2295 } 2296 EXPORT_SYMBOL(target_execute_cmd); 2297 2298 /* 2299 * Process all commands up to the last received ORDERED task attribute which 2300 * requires another blocking boundary 2301 */ 2302 void target_do_delayed_work(struct work_struct *work) 2303 { 2304 struct se_device *dev = container_of(work, struct se_device, 2305 delayed_cmd_work); 2306 2307 spin_lock(&dev->delayed_cmd_lock); 2308 while (!dev->ordered_sync_in_progress) { 2309 struct se_cmd *cmd; 2310 2311 if (list_empty(&dev->delayed_cmd_list)) 2312 break; 2313 2314 cmd = list_entry(dev->delayed_cmd_list.next, 2315 struct se_cmd, se_delayed_node); 2316 2317 if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2318 /* 2319 * Check if we started with: 2320 * [ordered] [simple] [ordered] 2321 * and we are now at the last ordered so we have to wait 2322 * for the simple cmd. 2323 */ 2324 if (atomic_read(&dev->non_ordered) > 0) 2325 break; 2326 2327 dev->ordered_sync_in_progress = true; 2328 } 2329 2330 list_del(&cmd->se_delayed_node); 2331 atomic_dec_mb(&dev->delayed_cmd_count); 2332 spin_unlock(&dev->delayed_cmd_lock); 2333 2334 if (cmd->sam_task_attr != TCM_ORDERED_TAG) 2335 atomic_inc_mb(&dev->non_ordered); 2336 2337 cmd->transport_state |= CMD_T_SENT; 2338 2339 __target_execute_cmd(cmd, true); 2340 2341 spin_lock(&dev->delayed_cmd_lock); 2342 } 2343 spin_unlock(&dev->delayed_cmd_lock); 2344 } 2345 2346 /* 2347 * Called from I/O completion to determine which dormant/delayed 2348 * and ordered cmds need to have their tasks added to the execution queue. 2349 */ 2350 static void transport_complete_task_attr(struct se_cmd *cmd) 2351 { 2352 struct se_device *dev = cmd->se_dev; 2353 2354 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2355 return; 2356 2357 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2358 goto restart; 2359 2360 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2361 atomic_dec_mb(&dev->non_ordered); 2362 dev->dev_cur_ordered_id++; 2363 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2364 atomic_dec_mb(&dev->non_ordered); 2365 dev->dev_cur_ordered_id++; 2366 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2367 dev->dev_cur_ordered_id); 2368 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2369 spin_lock(&dev->delayed_cmd_lock); 2370 dev->ordered_sync_in_progress = false; 2371 spin_unlock(&dev->delayed_cmd_lock); 2372 2373 dev->dev_cur_ordered_id++; 2374 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2375 dev->dev_cur_ordered_id); 2376 } 2377 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2378 2379 restart: 2380 if (atomic_read(&dev->delayed_cmd_count) > 0) 2381 schedule_work(&dev->delayed_cmd_work); 2382 } 2383 2384 static void transport_complete_qf(struct se_cmd *cmd) 2385 { 2386 int ret = 0; 2387 2388 transport_complete_task_attr(cmd); 2389 /* 2390 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2391 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2392 * the same callbacks should not be retried. Return CHECK_CONDITION 2393 * if a scsi_status is not already set. 2394 * 2395 * If a fabric driver ->queue_status() has returned non zero, always 2396 * keep retrying no matter what.. 2397 */ 2398 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2399 if (cmd->scsi_status) 2400 goto queue_status; 2401 2402 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2403 goto queue_status; 2404 } 2405 2406 /* 2407 * Check if we need to send a sense buffer from 2408 * the struct se_cmd in question. We do NOT want 2409 * to take this path of the IO has been marked as 2410 * needing to be treated like a "normal read". This 2411 * is the case if it's a tape read, and either the 2412 * FM, EOM, or ILI bits are set, but there is no 2413 * sense data. 2414 */ 2415 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2416 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2417 goto queue_status; 2418 2419 switch (cmd->data_direction) { 2420 case DMA_FROM_DEVICE: 2421 /* queue status if not treating this as a normal read */ 2422 if (cmd->scsi_status && 2423 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2424 goto queue_status; 2425 2426 trace_target_cmd_complete(cmd); 2427 ret = cmd->se_tfo->queue_data_in(cmd); 2428 break; 2429 case DMA_TO_DEVICE: 2430 if (cmd->se_cmd_flags & SCF_BIDI) { 2431 ret = cmd->se_tfo->queue_data_in(cmd); 2432 break; 2433 } 2434 fallthrough; 2435 case DMA_NONE: 2436 queue_status: 2437 trace_target_cmd_complete(cmd); 2438 ret = cmd->se_tfo->queue_status(cmd); 2439 break; 2440 default: 2441 break; 2442 } 2443 2444 if (ret < 0) { 2445 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2446 return; 2447 } 2448 transport_lun_remove_cmd(cmd); 2449 transport_cmd_check_stop_to_fabric(cmd); 2450 } 2451 2452 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2453 int err, bool write_pending) 2454 { 2455 /* 2456 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2457 * ->queue_data_in() callbacks from new process context. 2458 * 2459 * Otherwise for other errors, transport_complete_qf() will send 2460 * CHECK_CONDITION via ->queue_status() instead of attempting to 2461 * retry associated fabric driver data-transfer callbacks. 2462 */ 2463 if (err == -EAGAIN || err == -ENOMEM) { 2464 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2465 TRANSPORT_COMPLETE_QF_OK; 2466 } else { 2467 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2468 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2469 } 2470 2471 spin_lock_irq(&dev->qf_cmd_lock); 2472 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2473 atomic_inc_mb(&dev->dev_qf_count); 2474 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2475 2476 schedule_work(&cmd->se_dev->qf_work_queue); 2477 } 2478 2479 static bool target_read_prot_action(struct se_cmd *cmd) 2480 { 2481 switch (cmd->prot_op) { 2482 case TARGET_PROT_DIN_STRIP: 2483 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2484 u32 sectors = cmd->data_length >> 2485 ilog2(cmd->se_dev->dev_attrib.block_size); 2486 2487 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2488 sectors, 0, cmd->t_prot_sg, 2489 0); 2490 if (cmd->pi_err) 2491 return true; 2492 } 2493 break; 2494 case TARGET_PROT_DIN_INSERT: 2495 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2496 break; 2497 2498 sbc_dif_generate(cmd); 2499 break; 2500 default: 2501 break; 2502 } 2503 2504 return false; 2505 } 2506 2507 static void target_complete_ok_work(struct work_struct *work) 2508 { 2509 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2510 int ret; 2511 2512 /* 2513 * Check if we need to move delayed/dormant tasks from cmds on the 2514 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2515 * Attribute. 2516 */ 2517 transport_complete_task_attr(cmd); 2518 2519 /* 2520 * Check to schedule QUEUE_FULL work, or execute an existing 2521 * cmd->transport_qf_callback() 2522 */ 2523 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2524 schedule_work(&cmd->se_dev->qf_work_queue); 2525 2526 /* 2527 * Check if we need to send a sense buffer from 2528 * the struct se_cmd in question. We do NOT want 2529 * to take this path of the IO has been marked as 2530 * needing to be treated like a "normal read". This 2531 * is the case if it's a tape read, and either the 2532 * FM, EOM, or ILI bits are set, but there is no 2533 * sense data. 2534 */ 2535 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2536 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2537 WARN_ON(!cmd->scsi_status); 2538 ret = transport_send_check_condition_and_sense( 2539 cmd, 0, 1); 2540 if (ret) 2541 goto queue_full; 2542 2543 transport_lun_remove_cmd(cmd); 2544 transport_cmd_check_stop_to_fabric(cmd); 2545 return; 2546 } 2547 /* 2548 * Check for a callback, used by amongst other things 2549 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2550 */ 2551 if (cmd->transport_complete_callback) { 2552 sense_reason_t rc; 2553 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2554 bool zero_dl = !(cmd->data_length); 2555 int post_ret = 0; 2556 2557 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2558 if (!rc && !post_ret) { 2559 if (caw && zero_dl) 2560 goto queue_rsp; 2561 2562 return; 2563 } else if (rc) { 2564 ret = transport_send_check_condition_and_sense(cmd, 2565 rc, 0); 2566 if (ret) 2567 goto queue_full; 2568 2569 transport_lun_remove_cmd(cmd); 2570 transport_cmd_check_stop_to_fabric(cmd); 2571 return; 2572 } 2573 } 2574 2575 queue_rsp: 2576 switch (cmd->data_direction) { 2577 case DMA_FROM_DEVICE: 2578 /* 2579 * if this is a READ-type IO, but SCSI status 2580 * is set, then skip returning data and just 2581 * return the status -- unless this IO is marked 2582 * as needing to be treated as a normal read, 2583 * in which case we want to go ahead and return 2584 * the data. This happens, for example, for tape 2585 * reads with the FM, EOM, or ILI bits set, with 2586 * no sense data. 2587 */ 2588 if (cmd->scsi_status && 2589 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2590 goto queue_status; 2591 2592 atomic_long_add(cmd->data_length, 2593 &cmd->se_lun->lun_stats.tx_data_octets); 2594 /* 2595 * Perform READ_STRIP of PI using software emulation when 2596 * backend had PI enabled, if the transport will not be 2597 * performing hardware READ_STRIP offload. 2598 */ 2599 if (target_read_prot_action(cmd)) { 2600 ret = transport_send_check_condition_and_sense(cmd, 2601 cmd->pi_err, 0); 2602 if (ret) 2603 goto queue_full; 2604 2605 transport_lun_remove_cmd(cmd); 2606 transport_cmd_check_stop_to_fabric(cmd); 2607 return; 2608 } 2609 2610 trace_target_cmd_complete(cmd); 2611 ret = cmd->se_tfo->queue_data_in(cmd); 2612 if (ret) 2613 goto queue_full; 2614 break; 2615 case DMA_TO_DEVICE: 2616 atomic_long_add(cmd->data_length, 2617 &cmd->se_lun->lun_stats.rx_data_octets); 2618 /* 2619 * Check if we need to send READ payload for BIDI-COMMAND 2620 */ 2621 if (cmd->se_cmd_flags & SCF_BIDI) { 2622 atomic_long_add(cmd->data_length, 2623 &cmd->se_lun->lun_stats.tx_data_octets); 2624 ret = cmd->se_tfo->queue_data_in(cmd); 2625 if (ret) 2626 goto queue_full; 2627 break; 2628 } 2629 fallthrough; 2630 case DMA_NONE: 2631 queue_status: 2632 trace_target_cmd_complete(cmd); 2633 ret = cmd->se_tfo->queue_status(cmd); 2634 if (ret) 2635 goto queue_full; 2636 break; 2637 default: 2638 break; 2639 } 2640 2641 transport_lun_remove_cmd(cmd); 2642 transport_cmd_check_stop_to_fabric(cmd); 2643 return; 2644 2645 queue_full: 2646 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2647 " data_direction: %d\n", cmd, cmd->data_direction); 2648 2649 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2650 } 2651 2652 void target_free_sgl(struct scatterlist *sgl, int nents) 2653 { 2654 sgl_free_n_order(sgl, nents, 0); 2655 } 2656 EXPORT_SYMBOL(target_free_sgl); 2657 2658 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2659 { 2660 /* 2661 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2662 * emulation, and free + reset pointers if necessary.. 2663 */ 2664 if (!cmd->t_data_sg_orig) 2665 return; 2666 2667 kfree(cmd->t_data_sg); 2668 cmd->t_data_sg = cmd->t_data_sg_orig; 2669 cmd->t_data_sg_orig = NULL; 2670 cmd->t_data_nents = cmd->t_data_nents_orig; 2671 cmd->t_data_nents_orig = 0; 2672 } 2673 2674 static inline void transport_free_pages(struct se_cmd *cmd) 2675 { 2676 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2677 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2678 cmd->t_prot_sg = NULL; 2679 cmd->t_prot_nents = 0; 2680 } 2681 2682 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2683 /* 2684 * Release special case READ buffer payload required for 2685 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2686 */ 2687 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2688 target_free_sgl(cmd->t_bidi_data_sg, 2689 cmd->t_bidi_data_nents); 2690 cmd->t_bidi_data_sg = NULL; 2691 cmd->t_bidi_data_nents = 0; 2692 } 2693 transport_reset_sgl_orig(cmd); 2694 return; 2695 } 2696 transport_reset_sgl_orig(cmd); 2697 2698 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2699 cmd->t_data_sg = NULL; 2700 cmd->t_data_nents = 0; 2701 2702 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2703 cmd->t_bidi_data_sg = NULL; 2704 cmd->t_bidi_data_nents = 0; 2705 } 2706 2707 void *transport_kmap_data_sg(struct se_cmd *cmd) 2708 { 2709 struct scatterlist *sg = cmd->t_data_sg; 2710 struct page **pages; 2711 int i; 2712 2713 /* 2714 * We need to take into account a possible offset here for fabrics like 2715 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2716 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2717 */ 2718 if (!cmd->t_data_nents) 2719 return NULL; 2720 2721 BUG_ON(!sg); 2722 if (cmd->t_data_nents == 1) 2723 return kmap(sg_page(sg)) + sg->offset; 2724 2725 /* >1 page. use vmap */ 2726 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2727 if (!pages) 2728 return NULL; 2729 2730 /* convert sg[] to pages[] */ 2731 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2732 pages[i] = sg_page(sg); 2733 } 2734 2735 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2736 kfree(pages); 2737 if (!cmd->t_data_vmap) 2738 return NULL; 2739 2740 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2741 } 2742 EXPORT_SYMBOL(transport_kmap_data_sg); 2743 2744 void transport_kunmap_data_sg(struct se_cmd *cmd) 2745 { 2746 if (!cmd->t_data_nents) { 2747 return; 2748 } else if (cmd->t_data_nents == 1) { 2749 kunmap(sg_page(cmd->t_data_sg)); 2750 return; 2751 } 2752 2753 vunmap(cmd->t_data_vmap); 2754 cmd->t_data_vmap = NULL; 2755 } 2756 EXPORT_SYMBOL(transport_kunmap_data_sg); 2757 2758 int 2759 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2760 bool zero_page, bool chainable) 2761 { 2762 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2763 2764 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2765 return *sgl ? 0 : -ENOMEM; 2766 } 2767 EXPORT_SYMBOL(target_alloc_sgl); 2768 2769 /* 2770 * Allocate any required resources to execute the command. For writes we 2771 * might not have the payload yet, so notify the fabric via a call to 2772 * ->write_pending instead. Otherwise place it on the execution queue. 2773 */ 2774 sense_reason_t 2775 transport_generic_new_cmd(struct se_cmd *cmd) 2776 { 2777 unsigned long flags; 2778 int ret = 0; 2779 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2780 2781 if (cmd->prot_op != TARGET_PROT_NORMAL && 2782 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2783 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2784 cmd->prot_length, true, false); 2785 if (ret < 0) 2786 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2787 } 2788 2789 /* 2790 * Determine if the TCM fabric module has already allocated physical 2791 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2792 * beforehand. 2793 */ 2794 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2795 cmd->data_length) { 2796 2797 if ((cmd->se_cmd_flags & SCF_BIDI) || 2798 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2799 u32 bidi_length; 2800 2801 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2802 bidi_length = cmd->t_task_nolb * 2803 cmd->se_dev->dev_attrib.block_size; 2804 else 2805 bidi_length = cmd->data_length; 2806 2807 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2808 &cmd->t_bidi_data_nents, 2809 bidi_length, zero_flag, false); 2810 if (ret < 0) 2811 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2812 } 2813 2814 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2815 cmd->data_length, zero_flag, false); 2816 if (ret < 0) 2817 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2818 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2819 cmd->data_length) { 2820 /* 2821 * Special case for COMPARE_AND_WRITE with fabrics 2822 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2823 */ 2824 u32 caw_length = cmd->t_task_nolb * 2825 cmd->se_dev->dev_attrib.block_size; 2826 2827 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2828 &cmd->t_bidi_data_nents, 2829 caw_length, zero_flag, false); 2830 if (ret < 0) 2831 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2832 } 2833 /* 2834 * If this command is not a write we can execute it right here, 2835 * for write buffers we need to notify the fabric driver first 2836 * and let it call back once the write buffers are ready. 2837 */ 2838 target_add_to_state_list(cmd); 2839 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2840 target_execute_cmd(cmd); 2841 return 0; 2842 } 2843 2844 spin_lock_irqsave(&cmd->t_state_lock, flags); 2845 cmd->t_state = TRANSPORT_WRITE_PENDING; 2846 /* 2847 * Determine if frontend context caller is requesting the stopping of 2848 * this command for frontend exceptions. 2849 */ 2850 if (cmd->transport_state & CMD_T_STOP && 2851 !cmd->se_tfo->write_pending_must_be_called) { 2852 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2853 __func__, __LINE__, cmd->tag); 2854 2855 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2856 2857 complete_all(&cmd->t_transport_stop_comp); 2858 return 0; 2859 } 2860 cmd->transport_state &= ~CMD_T_ACTIVE; 2861 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2862 2863 ret = cmd->se_tfo->write_pending(cmd); 2864 if (ret) 2865 goto queue_full; 2866 2867 return 0; 2868 2869 queue_full: 2870 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2871 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2872 return 0; 2873 } 2874 EXPORT_SYMBOL(transport_generic_new_cmd); 2875 2876 static void transport_write_pending_qf(struct se_cmd *cmd) 2877 { 2878 unsigned long flags; 2879 int ret; 2880 bool stop; 2881 2882 spin_lock_irqsave(&cmd->t_state_lock, flags); 2883 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2884 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2885 2886 if (stop) { 2887 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2888 __func__, __LINE__, cmd->tag); 2889 complete_all(&cmd->t_transport_stop_comp); 2890 return; 2891 } 2892 2893 ret = cmd->se_tfo->write_pending(cmd); 2894 if (ret) { 2895 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2896 cmd); 2897 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2898 } 2899 } 2900 2901 static bool 2902 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2903 unsigned long *flags); 2904 2905 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2906 { 2907 unsigned long flags; 2908 2909 spin_lock_irqsave(&cmd->t_state_lock, flags); 2910 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2911 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2912 } 2913 2914 /* 2915 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2916 * finished. 2917 */ 2918 void target_put_cmd_and_wait(struct se_cmd *cmd) 2919 { 2920 DECLARE_COMPLETION_ONSTACK(compl); 2921 2922 WARN_ON_ONCE(cmd->abrt_compl); 2923 cmd->abrt_compl = &compl; 2924 target_put_sess_cmd(cmd); 2925 wait_for_completion(&compl); 2926 } 2927 2928 /* 2929 * This function is called by frontend drivers after processing of a command 2930 * has finished. 2931 * 2932 * The protocol for ensuring that either the regular frontend command 2933 * processing flow or target_handle_abort() code drops one reference is as 2934 * follows: 2935 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2936 * the frontend driver to call this function synchronously or asynchronously. 2937 * That will cause one reference to be dropped. 2938 * - During regular command processing the target core sets CMD_T_COMPLETE 2939 * before invoking one of the .queue_*() functions. 2940 * - The code that aborts commands skips commands and TMFs for which 2941 * CMD_T_COMPLETE has been set. 2942 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2943 * commands that will be aborted. 2944 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2945 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2946 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2947 * be called and will drop a reference. 2948 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2949 * will be called. target_handle_abort() will drop the final reference. 2950 */ 2951 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2952 { 2953 DECLARE_COMPLETION_ONSTACK(compl); 2954 int ret = 0; 2955 bool aborted = false, tas = false; 2956 2957 if (wait_for_tasks) 2958 target_wait_free_cmd(cmd, &aborted, &tas); 2959 2960 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2961 /* 2962 * Handle WRITE failure case where transport_generic_new_cmd() 2963 * has already added se_cmd to state_list, but fabric has 2964 * failed command before I/O submission. 2965 */ 2966 if (cmd->state_active) 2967 target_remove_from_state_list(cmd); 2968 2969 if (cmd->se_lun) 2970 transport_lun_remove_cmd(cmd); 2971 } 2972 if (aborted) 2973 cmd->free_compl = &compl; 2974 ret = target_put_sess_cmd(cmd); 2975 if (aborted) { 2976 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2977 wait_for_completion(&compl); 2978 ret = 1; 2979 } 2980 return ret; 2981 } 2982 EXPORT_SYMBOL(transport_generic_free_cmd); 2983 2984 /** 2985 * target_get_sess_cmd - Verify the session is accepting cmds and take ref 2986 * @se_cmd: command descriptor to add 2987 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2988 */ 2989 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2990 { 2991 int ret = 0; 2992 2993 /* 2994 * Add a second kref if the fabric caller is expecting to handle 2995 * fabric acknowledgement that requires two target_put_sess_cmd() 2996 * invocations before se_cmd descriptor release. 2997 */ 2998 if (ack_kref) { 2999 kref_get(&se_cmd->cmd_kref); 3000 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 3001 } 3002 3003 /* 3004 * Users like xcopy do not use counters since they never do a stop 3005 * and wait. 3006 */ 3007 if (se_cmd->cmd_cnt) { 3008 if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt)) 3009 ret = -ESHUTDOWN; 3010 } 3011 if (ret && ack_kref) 3012 target_put_sess_cmd(se_cmd); 3013 3014 return ret; 3015 } 3016 EXPORT_SYMBOL(target_get_sess_cmd); 3017 3018 static void target_free_cmd_mem(struct se_cmd *cmd) 3019 { 3020 transport_free_pages(cmd); 3021 3022 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 3023 core_tmr_release_req(cmd->se_tmr_req); 3024 if (cmd->t_task_cdb != cmd->__t_task_cdb) 3025 kfree(cmd->t_task_cdb); 3026 } 3027 3028 static void target_release_cmd_kref(struct kref *kref) 3029 { 3030 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 3031 struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt; 3032 struct completion *free_compl = se_cmd->free_compl; 3033 struct completion *abrt_compl = se_cmd->abrt_compl; 3034 3035 target_free_cmd_mem(se_cmd); 3036 se_cmd->se_tfo->release_cmd(se_cmd); 3037 if (free_compl) 3038 complete(free_compl); 3039 if (abrt_compl) 3040 complete(abrt_compl); 3041 3042 if (cmd_cnt) 3043 percpu_ref_put(&cmd_cnt->refcnt); 3044 } 3045 3046 /** 3047 * target_put_sess_cmd - decrease the command reference count 3048 * @se_cmd: command to drop a reference from 3049 * 3050 * Returns 1 if and only if this target_put_sess_cmd() call caused the 3051 * refcount to drop to zero. Returns zero otherwise. 3052 */ 3053 int target_put_sess_cmd(struct se_cmd *se_cmd) 3054 { 3055 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 3056 } 3057 EXPORT_SYMBOL(target_put_sess_cmd); 3058 3059 static const char *data_dir_name(enum dma_data_direction d) 3060 { 3061 switch (d) { 3062 case DMA_BIDIRECTIONAL: return "BIDI"; 3063 case DMA_TO_DEVICE: return "WRITE"; 3064 case DMA_FROM_DEVICE: return "READ"; 3065 case DMA_NONE: return "NONE"; 3066 } 3067 3068 return "(?)"; 3069 } 3070 3071 static const char *cmd_state_name(enum transport_state_table t) 3072 { 3073 switch (t) { 3074 case TRANSPORT_NO_STATE: return "NO_STATE"; 3075 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 3076 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 3077 case TRANSPORT_PROCESSING: return "PROCESSING"; 3078 case TRANSPORT_COMPLETE: return "COMPLETE"; 3079 case TRANSPORT_ISTATE_PROCESSING: 3080 return "ISTATE_PROCESSING"; 3081 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 3082 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 3083 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 3084 } 3085 3086 return "(?)"; 3087 } 3088 3089 static void target_append_str(char **str, const char *txt) 3090 { 3091 char *prev = *str; 3092 3093 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 3094 kstrdup(txt, GFP_ATOMIC); 3095 kfree(prev); 3096 } 3097 3098 /* 3099 * Convert a transport state bitmask into a string. The caller is 3100 * responsible for freeing the returned pointer. 3101 */ 3102 static char *target_ts_to_str(u32 ts) 3103 { 3104 char *str = NULL; 3105 3106 if (ts & CMD_T_ABORTED) 3107 target_append_str(&str, "aborted"); 3108 if (ts & CMD_T_ACTIVE) 3109 target_append_str(&str, "active"); 3110 if (ts & CMD_T_COMPLETE) 3111 target_append_str(&str, "complete"); 3112 if (ts & CMD_T_SENT) 3113 target_append_str(&str, "sent"); 3114 if (ts & CMD_T_STOP) 3115 target_append_str(&str, "stop"); 3116 if (ts & CMD_T_FABRIC_STOP) 3117 target_append_str(&str, "fabric_stop"); 3118 3119 return str; 3120 } 3121 3122 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 3123 { 3124 switch (tmf) { 3125 case TMR_ABORT_TASK: return "ABORT_TASK"; 3126 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 3127 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 3128 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 3129 case TMR_LUN_RESET: return "LUN_RESET"; 3130 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 3131 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 3132 case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; 3133 case TMR_UNKNOWN: break; 3134 } 3135 return "(?)"; 3136 } 3137 3138 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 3139 { 3140 char *ts_str = target_ts_to_str(cmd->transport_state); 3141 const u8 *cdb = cmd->t_task_cdb; 3142 struct se_tmr_req *tmf = cmd->se_tmr_req; 3143 3144 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 3145 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 3146 pfx, cdb[0], cdb[1], cmd->tag, 3147 data_dir_name(cmd->data_direction), 3148 cmd->se_tfo->get_cmd_state(cmd), 3149 cmd_state_name(cmd->t_state), cmd->data_length, 3150 kref_read(&cmd->cmd_kref), ts_str); 3151 } else { 3152 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 3153 pfx, target_tmf_name(tmf->function), cmd->tag, 3154 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 3155 cmd_state_name(cmd->t_state), 3156 kref_read(&cmd->cmd_kref), ts_str); 3157 } 3158 kfree(ts_str); 3159 } 3160 EXPORT_SYMBOL(target_show_cmd); 3161 3162 static void target_stop_cmd_counter_confirm(struct percpu_ref *ref) 3163 { 3164 struct target_cmd_counter *cmd_cnt = container_of(ref, 3165 struct target_cmd_counter, 3166 refcnt); 3167 complete_all(&cmd_cnt->stop_done); 3168 } 3169 3170 /** 3171 * target_stop_cmd_counter - Stop new IO from being added to the counter. 3172 * @cmd_cnt: counter to stop 3173 */ 3174 void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt) 3175 { 3176 pr_debug("Stopping command counter.\n"); 3177 if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1)) 3178 percpu_ref_kill_and_confirm(&cmd_cnt->refcnt, 3179 target_stop_cmd_counter_confirm); 3180 } 3181 EXPORT_SYMBOL_GPL(target_stop_cmd_counter); 3182 3183 /** 3184 * target_stop_session - Stop new IO from being queued on the session. 3185 * @se_sess: session to stop 3186 */ 3187 void target_stop_session(struct se_session *se_sess) 3188 { 3189 target_stop_cmd_counter(se_sess->cmd_cnt); 3190 } 3191 EXPORT_SYMBOL(target_stop_session); 3192 3193 /** 3194 * target_wait_for_cmds - Wait for outstanding cmds. 3195 * @cmd_cnt: counter to wait for active I/O for. 3196 */ 3197 void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt) 3198 { 3199 int ret; 3200 3201 WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped)); 3202 3203 do { 3204 pr_debug("Waiting for running cmds to complete.\n"); 3205 ret = wait_event_timeout(cmd_cnt->refcnt_wq, 3206 percpu_ref_is_zero(&cmd_cnt->refcnt), 3207 180 * HZ); 3208 } while (ret <= 0); 3209 3210 wait_for_completion(&cmd_cnt->stop_done); 3211 pr_debug("Waiting for cmds done.\n"); 3212 } 3213 EXPORT_SYMBOL_GPL(target_wait_for_cmds); 3214 3215 /** 3216 * target_wait_for_sess_cmds - Wait for outstanding commands 3217 * @se_sess: session to wait for active I/O 3218 */ 3219 void target_wait_for_sess_cmds(struct se_session *se_sess) 3220 { 3221 target_wait_for_cmds(se_sess->cmd_cnt); 3222 } 3223 EXPORT_SYMBOL(target_wait_for_sess_cmds); 3224 3225 /* 3226 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 3227 * all references to the LUN have been released. Called during LUN shutdown. 3228 */ 3229 void transport_clear_lun_ref(struct se_lun *lun) 3230 { 3231 percpu_ref_kill(&lun->lun_ref); 3232 wait_for_completion(&lun->lun_shutdown_comp); 3233 } 3234 3235 static bool 3236 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 3237 bool *aborted, bool *tas, unsigned long *flags) 3238 __releases(&cmd->t_state_lock) 3239 __acquires(&cmd->t_state_lock) 3240 { 3241 lockdep_assert_held(&cmd->t_state_lock); 3242 3243 if (fabric_stop) 3244 cmd->transport_state |= CMD_T_FABRIC_STOP; 3245 3246 if (cmd->transport_state & CMD_T_ABORTED) 3247 *aborted = true; 3248 3249 if (cmd->transport_state & CMD_T_TAS) 3250 *tas = true; 3251 3252 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 3253 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3254 return false; 3255 3256 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 3257 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3258 return false; 3259 3260 if (!(cmd->transport_state & CMD_T_ACTIVE)) 3261 return false; 3262 3263 if (fabric_stop && *aborted) 3264 return false; 3265 3266 cmd->transport_state |= CMD_T_STOP; 3267 3268 target_show_cmd("wait_for_tasks: Stopping ", cmd); 3269 3270 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 3271 3272 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 3273 180 * HZ)) 3274 target_show_cmd("wait for tasks: ", cmd); 3275 3276 spin_lock_irqsave(&cmd->t_state_lock, *flags); 3277 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 3278 3279 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 3280 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 3281 3282 return true; 3283 } 3284 3285 /** 3286 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3287 * @cmd: command to wait on 3288 */ 3289 bool transport_wait_for_tasks(struct se_cmd *cmd) 3290 { 3291 unsigned long flags; 3292 bool ret, aborted = false, tas = false; 3293 3294 spin_lock_irqsave(&cmd->t_state_lock, flags); 3295 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3296 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3297 3298 return ret; 3299 } 3300 EXPORT_SYMBOL(transport_wait_for_tasks); 3301 3302 struct sense_detail { 3303 u8 key; 3304 u8 asc; 3305 u8 ascq; 3306 bool add_sense_info; 3307 }; 3308 3309 static const struct sense_detail sense_detail_table[] = { 3310 [TCM_NO_SENSE] = { 3311 .key = NOT_READY 3312 }, 3313 [TCM_NON_EXISTENT_LUN] = { 3314 .key = ILLEGAL_REQUEST, 3315 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3316 }, 3317 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3318 .key = ILLEGAL_REQUEST, 3319 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3320 }, 3321 [TCM_SECTOR_COUNT_TOO_MANY] = { 3322 .key = ILLEGAL_REQUEST, 3323 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3324 }, 3325 [TCM_UNKNOWN_MODE_PAGE] = { 3326 .key = ILLEGAL_REQUEST, 3327 .asc = 0x24, /* INVALID FIELD IN CDB */ 3328 }, 3329 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3330 .key = ABORTED_COMMAND, 3331 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3332 .ascq = 0x03, 3333 }, 3334 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3335 .key = ABORTED_COMMAND, 3336 .asc = 0x0c, /* WRITE ERROR */ 3337 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3338 }, 3339 [TCM_INVALID_CDB_FIELD] = { 3340 .key = ILLEGAL_REQUEST, 3341 .asc = 0x24, /* INVALID FIELD IN CDB */ 3342 }, 3343 [TCM_INVALID_PARAMETER_LIST] = { 3344 .key = ILLEGAL_REQUEST, 3345 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3346 }, 3347 [TCM_TOO_MANY_TARGET_DESCS] = { 3348 .key = ILLEGAL_REQUEST, 3349 .asc = 0x26, 3350 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3351 }, 3352 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3353 .key = ILLEGAL_REQUEST, 3354 .asc = 0x26, 3355 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3356 }, 3357 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3358 .key = ILLEGAL_REQUEST, 3359 .asc = 0x26, 3360 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3361 }, 3362 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3363 .key = ILLEGAL_REQUEST, 3364 .asc = 0x26, 3365 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3366 }, 3367 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3368 .key = ILLEGAL_REQUEST, 3369 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3370 }, 3371 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3372 .key = ILLEGAL_REQUEST, 3373 .asc = 0x0c, /* WRITE ERROR */ 3374 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3375 }, 3376 [TCM_SERVICE_CRC_ERROR] = { 3377 .key = ABORTED_COMMAND, 3378 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3379 .ascq = 0x05, /* N/A */ 3380 }, 3381 [TCM_SNACK_REJECTED] = { 3382 .key = ABORTED_COMMAND, 3383 .asc = 0x11, /* READ ERROR */ 3384 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3385 }, 3386 [TCM_WRITE_PROTECTED] = { 3387 .key = DATA_PROTECT, 3388 .asc = 0x27, /* WRITE PROTECTED */ 3389 }, 3390 [TCM_ADDRESS_OUT_OF_RANGE] = { 3391 .key = ILLEGAL_REQUEST, 3392 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3393 }, 3394 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3395 .key = UNIT_ATTENTION, 3396 }, 3397 [TCM_MISCOMPARE_VERIFY] = { 3398 .key = MISCOMPARE, 3399 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3400 .ascq = 0x00, 3401 .add_sense_info = true, 3402 }, 3403 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3404 .key = ABORTED_COMMAND, 3405 .asc = 0x10, 3406 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3407 .add_sense_info = true, 3408 }, 3409 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3410 .key = ABORTED_COMMAND, 3411 .asc = 0x10, 3412 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3413 .add_sense_info = true, 3414 }, 3415 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3416 .key = ABORTED_COMMAND, 3417 .asc = 0x10, 3418 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3419 .add_sense_info = true, 3420 }, 3421 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3422 .key = COPY_ABORTED, 3423 .asc = 0x0d, 3424 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3425 3426 }, 3427 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3428 /* 3429 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3430 * Solaris initiators. Returning NOT READY instead means the 3431 * operations will be retried a finite number of times and we 3432 * can survive intermittent errors. 3433 */ 3434 .key = NOT_READY, 3435 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3436 }, 3437 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3438 /* 3439 * From spc4r22 section5.7.7,5.7.8 3440 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3441 * or a REGISTER AND IGNORE EXISTING KEY service action or 3442 * REGISTER AND MOVE service actionis attempted, 3443 * but there are insufficient device server resources to complete the 3444 * operation, then the command shall be terminated with CHECK CONDITION 3445 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3446 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3447 */ 3448 .key = ILLEGAL_REQUEST, 3449 .asc = 0x55, 3450 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3451 }, 3452 [TCM_INVALID_FIELD_IN_COMMAND_IU] = { 3453 .key = ILLEGAL_REQUEST, 3454 .asc = 0x0e, 3455 .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */ 3456 }, 3457 [TCM_ALUA_TG_PT_STANDBY] = { 3458 .key = NOT_READY, 3459 .asc = 0x04, 3460 .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY, 3461 }, 3462 [TCM_ALUA_TG_PT_UNAVAILABLE] = { 3463 .key = NOT_READY, 3464 .asc = 0x04, 3465 .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE, 3466 }, 3467 [TCM_ALUA_STATE_TRANSITION] = { 3468 .key = NOT_READY, 3469 .asc = 0x04, 3470 .ascq = ASCQ_04H_ALUA_STATE_TRANSITION, 3471 }, 3472 [TCM_ALUA_OFFLINE] = { 3473 .key = NOT_READY, 3474 .asc = 0x04, 3475 .ascq = ASCQ_04H_ALUA_OFFLINE, 3476 }, 3477 }; 3478 3479 /** 3480 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3481 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3482 * be stored. 3483 * @reason: LIO sense reason code. If this argument has the value 3484 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3485 * dequeuing a unit attention fails due to multiple commands being processed 3486 * concurrently, set the command status to BUSY. 3487 * 3488 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3489 */ 3490 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3491 { 3492 const struct sense_detail *sd; 3493 u8 *buffer = cmd->sense_buffer; 3494 int r = (__force int)reason; 3495 u8 key, asc, ascq; 3496 bool desc_format = target_sense_desc_format(cmd->se_dev); 3497 3498 if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) 3499 sd = &sense_detail_table[r]; 3500 else 3501 sd = &sense_detail_table[(__force int) 3502 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3503 3504 key = sd->key; 3505 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3506 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3507 &ascq)) { 3508 cmd->scsi_status = SAM_STAT_BUSY; 3509 return; 3510 } 3511 } else { 3512 WARN_ON_ONCE(sd->asc == 0); 3513 asc = sd->asc; 3514 ascq = sd->ascq; 3515 } 3516 3517 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3518 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3519 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3520 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3521 if (sd->add_sense_info) 3522 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3523 cmd->scsi_sense_length, 3524 cmd->sense_info) < 0); 3525 } 3526 3527 int 3528 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3529 sense_reason_t reason, int from_transport) 3530 { 3531 unsigned long flags; 3532 3533 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3534 3535 spin_lock_irqsave(&cmd->t_state_lock, flags); 3536 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3537 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3538 return 0; 3539 } 3540 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3541 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3542 3543 if (!from_transport) 3544 translate_sense_reason(cmd, reason); 3545 3546 trace_target_cmd_complete(cmd); 3547 return cmd->se_tfo->queue_status(cmd); 3548 } 3549 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3550 3551 /** 3552 * target_send_busy - Send SCSI BUSY status back to the initiator 3553 * @cmd: SCSI command for which to send a BUSY reply. 3554 * 3555 * Note: Only call this function if target_submit_cmd*() failed. 3556 */ 3557 int target_send_busy(struct se_cmd *cmd) 3558 { 3559 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3560 3561 cmd->scsi_status = SAM_STAT_BUSY; 3562 trace_target_cmd_complete(cmd); 3563 return cmd->se_tfo->queue_status(cmd); 3564 } 3565 EXPORT_SYMBOL(target_send_busy); 3566 3567 static void target_tmr_work(struct work_struct *work) 3568 { 3569 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3570 struct se_device *dev = cmd->se_dev; 3571 struct se_tmr_req *tmr = cmd->se_tmr_req; 3572 int ret; 3573 3574 if (cmd->transport_state & CMD_T_ABORTED) 3575 goto aborted; 3576 3577 switch (tmr->function) { 3578 case TMR_ABORT_TASK: 3579 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3580 break; 3581 case TMR_ABORT_TASK_SET: 3582 case TMR_CLEAR_ACA: 3583 case TMR_CLEAR_TASK_SET: 3584 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3585 break; 3586 case TMR_LUN_RESET: 3587 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3588 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3589 TMR_FUNCTION_REJECTED; 3590 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3591 target_dev_ua_allocate(dev, 0x29, 3592 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3593 } 3594 break; 3595 case TMR_TARGET_WARM_RESET: 3596 tmr->response = TMR_FUNCTION_REJECTED; 3597 break; 3598 case TMR_TARGET_COLD_RESET: 3599 tmr->response = TMR_FUNCTION_REJECTED; 3600 break; 3601 default: 3602 pr_err("Unknown TMR function: 0x%02x.\n", 3603 tmr->function); 3604 tmr->response = TMR_FUNCTION_REJECTED; 3605 break; 3606 } 3607 3608 if (cmd->transport_state & CMD_T_ABORTED) 3609 goto aborted; 3610 3611 cmd->se_tfo->queue_tm_rsp(cmd); 3612 3613 transport_lun_remove_cmd(cmd); 3614 transport_cmd_check_stop_to_fabric(cmd); 3615 return; 3616 3617 aborted: 3618 target_handle_abort(cmd); 3619 } 3620 3621 int transport_generic_handle_tmr( 3622 struct se_cmd *cmd) 3623 { 3624 unsigned long flags; 3625 bool aborted = false; 3626 3627 spin_lock_irqsave(&cmd->t_state_lock, flags); 3628 if (cmd->transport_state & CMD_T_ABORTED) { 3629 aborted = true; 3630 } else { 3631 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3632 cmd->transport_state |= CMD_T_ACTIVE; 3633 } 3634 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3635 3636 if (aborted) { 3637 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3638 cmd->se_tmr_req->function, 3639 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3640 target_handle_abort(cmd); 3641 return 0; 3642 } 3643 3644 INIT_WORK(&cmd->work, target_tmr_work); 3645 schedule_work(&cmd->work); 3646 return 0; 3647 } 3648 EXPORT_SYMBOL(transport_generic_handle_tmr); 3649 3650 bool 3651 target_check_wce(struct se_device *dev) 3652 { 3653 bool wce = false; 3654 3655 if (dev->transport->get_write_cache) 3656 wce = dev->transport->get_write_cache(dev); 3657 else if (dev->dev_attrib.emulate_write_cache > 0) 3658 wce = true; 3659 3660 return wce; 3661 } 3662 3663 bool 3664 target_check_fua(struct se_device *dev) 3665 { 3666 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3667 } 3668