1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_transport.c 4 * 5 * This file contains the Generic Target Engine Core. 6 * 7 * (c) Copyright 2002-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/net.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/timer.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/kthread.h> 20 #include <linux/in.h> 21 #include <linux/cdrom.h> 22 #include <linux/module.h> 23 #include <linux/ratelimit.h> 24 #include <linux/vmalloc.h> 25 #include <asm/unaligned.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_common.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/target.h> 42 43 static struct workqueue_struct *target_completion_wq; 44 static struct workqueue_struct *target_submission_wq; 45 static struct kmem_cache *se_sess_cache; 46 struct kmem_cache *se_ua_cache; 47 struct kmem_cache *t10_pr_reg_cache; 48 struct kmem_cache *t10_alua_lu_gp_cache; 49 struct kmem_cache *t10_alua_lu_gp_mem_cache; 50 struct kmem_cache *t10_alua_tg_pt_gp_cache; 51 struct kmem_cache *t10_alua_lba_map_cache; 52 struct kmem_cache *t10_alua_lba_map_mem_cache; 53 54 static void transport_complete_task_attr(struct se_cmd *cmd); 55 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 56 static void transport_handle_queue_full(struct se_cmd *cmd, 57 struct se_device *dev, int err, bool write_pending); 58 static void target_complete_ok_work(struct work_struct *work); 59 60 int init_se_kmem_caches(void) 61 { 62 se_sess_cache = kmem_cache_create("se_sess_cache", 63 sizeof(struct se_session), __alignof__(struct se_session), 64 0, NULL); 65 if (!se_sess_cache) { 66 pr_err("kmem_cache_create() for struct se_session" 67 " failed\n"); 68 goto out; 69 } 70 se_ua_cache = kmem_cache_create("se_ua_cache", 71 sizeof(struct se_ua), __alignof__(struct se_ua), 72 0, NULL); 73 if (!se_ua_cache) { 74 pr_err("kmem_cache_create() for struct se_ua failed\n"); 75 goto out_free_sess_cache; 76 } 77 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 78 sizeof(struct t10_pr_registration), 79 __alignof__(struct t10_pr_registration), 0, NULL); 80 if (!t10_pr_reg_cache) { 81 pr_err("kmem_cache_create() for struct t10_pr_registration" 82 " failed\n"); 83 goto out_free_ua_cache; 84 } 85 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 86 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 87 0, NULL); 88 if (!t10_alua_lu_gp_cache) { 89 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 90 " failed\n"); 91 goto out_free_pr_reg_cache; 92 } 93 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 94 sizeof(struct t10_alua_lu_gp_member), 95 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 96 if (!t10_alua_lu_gp_mem_cache) { 97 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 98 "cache failed\n"); 99 goto out_free_lu_gp_cache; 100 } 101 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 102 sizeof(struct t10_alua_tg_pt_gp), 103 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 104 if (!t10_alua_tg_pt_gp_cache) { 105 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 106 "cache failed\n"); 107 goto out_free_lu_gp_mem_cache; 108 } 109 t10_alua_lba_map_cache = kmem_cache_create( 110 "t10_alua_lba_map_cache", 111 sizeof(struct t10_alua_lba_map), 112 __alignof__(struct t10_alua_lba_map), 0, NULL); 113 if (!t10_alua_lba_map_cache) { 114 pr_err("kmem_cache_create() for t10_alua_lba_map_" 115 "cache failed\n"); 116 goto out_free_tg_pt_gp_cache; 117 } 118 t10_alua_lba_map_mem_cache = kmem_cache_create( 119 "t10_alua_lba_map_mem_cache", 120 sizeof(struct t10_alua_lba_map_member), 121 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 122 if (!t10_alua_lba_map_mem_cache) { 123 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 124 "cache failed\n"); 125 goto out_free_lba_map_cache; 126 } 127 128 target_completion_wq = alloc_workqueue("target_completion", 129 WQ_MEM_RECLAIM, 0); 130 if (!target_completion_wq) 131 goto out_free_lba_map_mem_cache; 132 133 target_submission_wq = alloc_workqueue("target_submission", 134 WQ_MEM_RECLAIM, 0); 135 if (!target_submission_wq) 136 goto out_free_completion_wq; 137 138 return 0; 139 140 out_free_completion_wq: 141 destroy_workqueue(target_completion_wq); 142 out_free_lba_map_mem_cache: 143 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 144 out_free_lba_map_cache: 145 kmem_cache_destroy(t10_alua_lba_map_cache); 146 out_free_tg_pt_gp_cache: 147 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 148 out_free_lu_gp_mem_cache: 149 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 150 out_free_lu_gp_cache: 151 kmem_cache_destroy(t10_alua_lu_gp_cache); 152 out_free_pr_reg_cache: 153 kmem_cache_destroy(t10_pr_reg_cache); 154 out_free_ua_cache: 155 kmem_cache_destroy(se_ua_cache); 156 out_free_sess_cache: 157 kmem_cache_destroy(se_sess_cache); 158 out: 159 return -ENOMEM; 160 } 161 162 void release_se_kmem_caches(void) 163 { 164 destroy_workqueue(target_submission_wq); 165 destroy_workqueue(target_completion_wq); 166 kmem_cache_destroy(se_sess_cache); 167 kmem_cache_destroy(se_ua_cache); 168 kmem_cache_destroy(t10_pr_reg_cache); 169 kmem_cache_destroy(t10_alua_lu_gp_cache); 170 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 171 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 172 kmem_cache_destroy(t10_alua_lba_map_cache); 173 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 174 } 175 176 /* This code ensures unique mib indexes are handed out. */ 177 static DEFINE_SPINLOCK(scsi_mib_index_lock); 178 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 179 180 /* 181 * Allocate a new row index for the entry type specified 182 */ 183 u32 scsi_get_new_index(scsi_index_t type) 184 { 185 u32 new_index; 186 187 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 188 189 spin_lock(&scsi_mib_index_lock); 190 new_index = ++scsi_mib_index[type]; 191 spin_unlock(&scsi_mib_index_lock); 192 193 return new_index; 194 } 195 196 void transport_subsystem_check_init(void) 197 { 198 int ret; 199 static int sub_api_initialized; 200 201 if (sub_api_initialized) 202 return; 203 204 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 205 if (ret != 0) 206 pr_err("Unable to load target_core_iblock\n"); 207 208 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_file\n"); 211 212 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_pscsi\n"); 215 216 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_user\n"); 219 220 sub_api_initialized = 1; 221 } 222 223 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) 224 { 225 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); 226 227 wake_up(&sess->cmd_count_wq); 228 } 229 230 /** 231 * transport_init_session - initialize a session object 232 * @se_sess: Session object pointer. 233 * 234 * The caller must have zero-initialized @se_sess before calling this function. 235 */ 236 int transport_init_session(struct se_session *se_sess) 237 { 238 INIT_LIST_HEAD(&se_sess->sess_list); 239 INIT_LIST_HEAD(&se_sess->sess_acl_list); 240 spin_lock_init(&se_sess->sess_cmd_lock); 241 init_waitqueue_head(&se_sess->cmd_count_wq); 242 init_completion(&se_sess->stop_done); 243 atomic_set(&se_sess->stopped, 0); 244 return percpu_ref_init(&se_sess->cmd_count, 245 target_release_sess_cmd_refcnt, 0, GFP_KERNEL); 246 } 247 EXPORT_SYMBOL(transport_init_session); 248 249 void transport_uninit_session(struct se_session *se_sess) 250 { 251 /* 252 * Drivers like iscsi and loop do not call target_stop_session 253 * during session shutdown so we have to drop the ref taken at init 254 * time here. 255 */ 256 if (!atomic_read(&se_sess->stopped)) 257 percpu_ref_put(&se_sess->cmd_count); 258 259 percpu_ref_exit(&se_sess->cmd_count); 260 } 261 262 /** 263 * transport_alloc_session - allocate a session object and initialize it 264 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 265 */ 266 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 267 { 268 struct se_session *se_sess; 269 int ret; 270 271 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 272 if (!se_sess) { 273 pr_err("Unable to allocate struct se_session from" 274 " se_sess_cache\n"); 275 return ERR_PTR(-ENOMEM); 276 } 277 ret = transport_init_session(se_sess); 278 if (ret < 0) { 279 kmem_cache_free(se_sess_cache, se_sess); 280 return ERR_PTR(ret); 281 } 282 se_sess->sup_prot_ops = sup_prot_ops; 283 284 return se_sess; 285 } 286 EXPORT_SYMBOL(transport_alloc_session); 287 288 /** 289 * transport_alloc_session_tags - allocate target driver private data 290 * @se_sess: Session pointer. 291 * @tag_num: Maximum number of in-flight commands between initiator and target. 292 * @tag_size: Size in bytes of the private data a target driver associates with 293 * each command. 294 */ 295 int transport_alloc_session_tags(struct se_session *se_sess, 296 unsigned int tag_num, unsigned int tag_size) 297 { 298 int rc; 299 300 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 301 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 302 if (!se_sess->sess_cmd_map) { 303 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 304 return -ENOMEM; 305 } 306 307 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 308 false, GFP_KERNEL, NUMA_NO_NODE); 309 if (rc < 0) { 310 pr_err("Unable to init se_sess->sess_tag_pool," 311 " tag_num: %u\n", tag_num); 312 kvfree(se_sess->sess_cmd_map); 313 se_sess->sess_cmd_map = NULL; 314 return -ENOMEM; 315 } 316 317 return 0; 318 } 319 EXPORT_SYMBOL(transport_alloc_session_tags); 320 321 /** 322 * transport_init_session_tags - allocate a session and target driver private data 323 * @tag_num: Maximum number of in-flight commands between initiator and target. 324 * @tag_size: Size in bytes of the private data a target driver associates with 325 * each command. 326 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 327 */ 328 static struct se_session * 329 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 330 enum target_prot_op sup_prot_ops) 331 { 332 struct se_session *se_sess; 333 int rc; 334 335 if (tag_num != 0 && !tag_size) { 336 pr_err("init_session_tags called with percpu-ida tag_num:" 337 " %u, but zero tag_size\n", tag_num); 338 return ERR_PTR(-EINVAL); 339 } 340 if (!tag_num && tag_size) { 341 pr_err("init_session_tags called with percpu-ida tag_size:" 342 " %u, but zero tag_num\n", tag_size); 343 return ERR_PTR(-EINVAL); 344 } 345 346 se_sess = transport_alloc_session(sup_prot_ops); 347 if (IS_ERR(se_sess)) 348 return se_sess; 349 350 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 351 if (rc < 0) { 352 transport_free_session(se_sess); 353 return ERR_PTR(-ENOMEM); 354 } 355 356 return se_sess; 357 } 358 359 /* 360 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 361 */ 362 void __transport_register_session( 363 struct se_portal_group *se_tpg, 364 struct se_node_acl *se_nacl, 365 struct se_session *se_sess, 366 void *fabric_sess_ptr) 367 { 368 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 369 unsigned char buf[PR_REG_ISID_LEN]; 370 unsigned long flags; 371 372 se_sess->se_tpg = se_tpg; 373 se_sess->fabric_sess_ptr = fabric_sess_ptr; 374 /* 375 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 376 * 377 * Only set for struct se_session's that will actually be moving I/O. 378 * eg: *NOT* discovery sessions. 379 */ 380 if (se_nacl) { 381 /* 382 * 383 * Determine if fabric allows for T10-PI feature bits exposed to 384 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 385 * 386 * If so, then always save prot_type on a per se_node_acl node 387 * basis and re-instate the previous sess_prot_type to avoid 388 * disabling PI from below any previously initiator side 389 * registered LUNs. 390 */ 391 if (se_nacl->saved_prot_type) 392 se_sess->sess_prot_type = se_nacl->saved_prot_type; 393 else if (tfo->tpg_check_prot_fabric_only) 394 se_sess->sess_prot_type = se_nacl->saved_prot_type = 395 tfo->tpg_check_prot_fabric_only(se_tpg); 396 /* 397 * If the fabric module supports an ISID based TransportID, 398 * save this value in binary from the fabric I_T Nexus now. 399 */ 400 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 401 memset(&buf[0], 0, PR_REG_ISID_LEN); 402 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 403 &buf[0], PR_REG_ISID_LEN); 404 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 405 } 406 407 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 408 /* 409 * The se_nacl->nacl_sess pointer will be set to the 410 * last active I_T Nexus for each struct se_node_acl. 411 */ 412 se_nacl->nacl_sess = se_sess; 413 414 list_add_tail(&se_sess->sess_acl_list, 415 &se_nacl->acl_sess_list); 416 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 417 } 418 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 419 420 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 421 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 422 } 423 EXPORT_SYMBOL(__transport_register_session); 424 425 void transport_register_session( 426 struct se_portal_group *se_tpg, 427 struct se_node_acl *se_nacl, 428 struct se_session *se_sess, 429 void *fabric_sess_ptr) 430 { 431 unsigned long flags; 432 433 spin_lock_irqsave(&se_tpg->session_lock, flags); 434 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 435 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 436 } 437 EXPORT_SYMBOL(transport_register_session); 438 439 struct se_session * 440 target_setup_session(struct se_portal_group *tpg, 441 unsigned int tag_num, unsigned int tag_size, 442 enum target_prot_op prot_op, 443 const char *initiatorname, void *private, 444 int (*callback)(struct se_portal_group *, 445 struct se_session *, void *)) 446 { 447 struct se_session *sess; 448 449 /* 450 * If the fabric driver is using percpu-ida based pre allocation 451 * of I/O descriptor tags, go ahead and perform that setup now.. 452 */ 453 if (tag_num != 0) 454 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 455 else 456 sess = transport_alloc_session(prot_op); 457 458 if (IS_ERR(sess)) 459 return sess; 460 461 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 462 (unsigned char *)initiatorname); 463 if (!sess->se_node_acl) { 464 transport_free_session(sess); 465 return ERR_PTR(-EACCES); 466 } 467 /* 468 * Go ahead and perform any remaining fabric setup that is 469 * required before transport_register_session(). 470 */ 471 if (callback != NULL) { 472 int rc = callback(tpg, sess, private); 473 if (rc) { 474 transport_free_session(sess); 475 return ERR_PTR(rc); 476 } 477 } 478 479 transport_register_session(tpg, sess->se_node_acl, sess, private); 480 return sess; 481 } 482 EXPORT_SYMBOL(target_setup_session); 483 484 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 485 { 486 struct se_session *se_sess; 487 ssize_t len = 0; 488 489 spin_lock_bh(&se_tpg->session_lock); 490 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 491 if (!se_sess->se_node_acl) 492 continue; 493 if (!se_sess->se_node_acl->dynamic_node_acl) 494 continue; 495 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 496 break; 497 498 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 499 se_sess->se_node_acl->initiatorname); 500 len += 1; /* Include NULL terminator */ 501 } 502 spin_unlock_bh(&se_tpg->session_lock); 503 504 return len; 505 } 506 EXPORT_SYMBOL(target_show_dynamic_sessions); 507 508 static void target_complete_nacl(struct kref *kref) 509 { 510 struct se_node_acl *nacl = container_of(kref, 511 struct se_node_acl, acl_kref); 512 struct se_portal_group *se_tpg = nacl->se_tpg; 513 514 if (!nacl->dynamic_stop) { 515 complete(&nacl->acl_free_comp); 516 return; 517 } 518 519 mutex_lock(&se_tpg->acl_node_mutex); 520 list_del_init(&nacl->acl_list); 521 mutex_unlock(&se_tpg->acl_node_mutex); 522 523 core_tpg_wait_for_nacl_pr_ref(nacl); 524 core_free_device_list_for_node(nacl, se_tpg); 525 kfree(nacl); 526 } 527 528 void target_put_nacl(struct se_node_acl *nacl) 529 { 530 kref_put(&nacl->acl_kref, target_complete_nacl); 531 } 532 EXPORT_SYMBOL(target_put_nacl); 533 534 void transport_deregister_session_configfs(struct se_session *se_sess) 535 { 536 struct se_node_acl *se_nacl; 537 unsigned long flags; 538 /* 539 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 540 */ 541 se_nacl = se_sess->se_node_acl; 542 if (se_nacl) { 543 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 544 if (!list_empty(&se_sess->sess_acl_list)) 545 list_del_init(&se_sess->sess_acl_list); 546 /* 547 * If the session list is empty, then clear the pointer. 548 * Otherwise, set the struct se_session pointer from the tail 549 * element of the per struct se_node_acl active session list. 550 */ 551 if (list_empty(&se_nacl->acl_sess_list)) 552 se_nacl->nacl_sess = NULL; 553 else { 554 se_nacl->nacl_sess = container_of( 555 se_nacl->acl_sess_list.prev, 556 struct se_session, sess_acl_list); 557 } 558 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 559 } 560 } 561 EXPORT_SYMBOL(transport_deregister_session_configfs); 562 563 void transport_free_session(struct se_session *se_sess) 564 { 565 struct se_node_acl *se_nacl = se_sess->se_node_acl; 566 567 /* 568 * Drop the se_node_acl->nacl_kref obtained from within 569 * core_tpg_get_initiator_node_acl(). 570 */ 571 if (se_nacl) { 572 struct se_portal_group *se_tpg = se_nacl->se_tpg; 573 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 574 unsigned long flags; 575 576 se_sess->se_node_acl = NULL; 577 578 /* 579 * Also determine if we need to drop the extra ->cmd_kref if 580 * it had been previously dynamically generated, and 581 * the endpoint is not caching dynamic ACLs. 582 */ 583 mutex_lock(&se_tpg->acl_node_mutex); 584 if (se_nacl->dynamic_node_acl && 585 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 586 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 587 if (list_empty(&se_nacl->acl_sess_list)) 588 se_nacl->dynamic_stop = true; 589 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 590 591 if (se_nacl->dynamic_stop) 592 list_del_init(&se_nacl->acl_list); 593 } 594 mutex_unlock(&se_tpg->acl_node_mutex); 595 596 if (se_nacl->dynamic_stop) 597 target_put_nacl(se_nacl); 598 599 target_put_nacl(se_nacl); 600 } 601 if (se_sess->sess_cmd_map) { 602 sbitmap_queue_free(&se_sess->sess_tag_pool); 603 kvfree(se_sess->sess_cmd_map); 604 } 605 transport_uninit_session(se_sess); 606 kmem_cache_free(se_sess_cache, se_sess); 607 } 608 EXPORT_SYMBOL(transport_free_session); 609 610 static int target_release_res(struct se_device *dev, void *data) 611 { 612 struct se_session *sess = data; 613 614 if (dev->reservation_holder == sess) 615 target_release_reservation(dev); 616 return 0; 617 } 618 619 void transport_deregister_session(struct se_session *se_sess) 620 { 621 struct se_portal_group *se_tpg = se_sess->se_tpg; 622 unsigned long flags; 623 624 if (!se_tpg) { 625 transport_free_session(se_sess); 626 return; 627 } 628 629 spin_lock_irqsave(&se_tpg->session_lock, flags); 630 list_del(&se_sess->sess_list); 631 se_sess->se_tpg = NULL; 632 se_sess->fabric_sess_ptr = NULL; 633 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 634 635 /* 636 * Since the session is being removed, release SPC-2 637 * reservations held by the session that is disappearing. 638 */ 639 target_for_each_device(target_release_res, se_sess); 640 641 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 642 se_tpg->se_tpg_tfo->fabric_name); 643 /* 644 * If last kref is dropping now for an explicit NodeACL, awake sleeping 645 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 646 * removal context from within transport_free_session() code. 647 * 648 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 649 * to release all remaining generate_node_acl=1 created ACL resources. 650 */ 651 652 transport_free_session(se_sess); 653 } 654 EXPORT_SYMBOL(transport_deregister_session); 655 656 void target_remove_session(struct se_session *se_sess) 657 { 658 transport_deregister_session_configfs(se_sess); 659 transport_deregister_session(se_sess); 660 } 661 EXPORT_SYMBOL(target_remove_session); 662 663 static void target_remove_from_state_list(struct se_cmd *cmd) 664 { 665 struct se_device *dev = cmd->se_dev; 666 unsigned long flags; 667 668 if (!dev) 669 return; 670 671 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); 672 if (cmd->state_active) { 673 list_del(&cmd->state_list); 674 cmd->state_active = false; 675 } 676 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); 677 } 678 679 /* 680 * This function is called by the target core after the target core has 681 * finished processing a SCSI command or SCSI TMF. Both the regular command 682 * processing code and the code for aborting commands can call this 683 * function. CMD_T_STOP is set if and only if another thread is waiting 684 * inside transport_wait_for_tasks() for t_transport_stop_comp. 685 */ 686 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 687 { 688 unsigned long flags; 689 690 target_remove_from_state_list(cmd); 691 692 /* 693 * Clear struct se_cmd->se_lun before the handoff to FE. 694 */ 695 cmd->se_lun = NULL; 696 697 spin_lock_irqsave(&cmd->t_state_lock, flags); 698 /* 699 * Determine if frontend context caller is requesting the stopping of 700 * this command for frontend exceptions. 701 */ 702 if (cmd->transport_state & CMD_T_STOP) { 703 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 704 __func__, __LINE__, cmd->tag); 705 706 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 707 708 complete_all(&cmd->t_transport_stop_comp); 709 return 1; 710 } 711 cmd->transport_state &= ~CMD_T_ACTIVE; 712 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 713 714 /* 715 * Some fabric modules like tcm_loop can release their internally 716 * allocated I/O reference and struct se_cmd now. 717 * 718 * Fabric modules are expected to return '1' here if the se_cmd being 719 * passed is released at this point, or zero if not being released. 720 */ 721 return cmd->se_tfo->check_stop_free(cmd); 722 } 723 724 static void transport_lun_remove_cmd(struct se_cmd *cmd) 725 { 726 struct se_lun *lun = cmd->se_lun; 727 728 if (!lun) 729 return; 730 731 if (cmpxchg(&cmd->lun_ref_active, true, false)) 732 percpu_ref_put(&lun->lun_ref); 733 } 734 735 static void target_complete_failure_work(struct work_struct *work) 736 { 737 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 738 739 transport_generic_request_failure(cmd, 740 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 741 } 742 743 /* 744 * Used when asking transport to copy Sense Data from the underlying 745 * Linux/SCSI struct scsi_cmnd 746 */ 747 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 748 { 749 struct se_device *dev = cmd->se_dev; 750 751 WARN_ON(!cmd->se_lun); 752 753 if (!dev) 754 return NULL; 755 756 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 757 return NULL; 758 759 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 760 761 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 762 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 763 return cmd->sense_buffer; 764 } 765 766 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 767 { 768 unsigned char *cmd_sense_buf; 769 unsigned long flags; 770 771 spin_lock_irqsave(&cmd->t_state_lock, flags); 772 cmd_sense_buf = transport_get_sense_buffer(cmd); 773 if (!cmd_sense_buf) { 774 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 775 return; 776 } 777 778 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 779 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 780 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 781 } 782 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 783 784 static void target_handle_abort(struct se_cmd *cmd) 785 { 786 bool tas = cmd->transport_state & CMD_T_TAS; 787 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 788 int ret; 789 790 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 791 792 if (tas) { 793 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 794 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 795 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 796 cmd->t_task_cdb[0], cmd->tag); 797 trace_target_cmd_complete(cmd); 798 ret = cmd->se_tfo->queue_status(cmd); 799 if (ret) { 800 transport_handle_queue_full(cmd, cmd->se_dev, 801 ret, false); 802 return; 803 } 804 } else { 805 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 806 cmd->se_tfo->queue_tm_rsp(cmd); 807 } 808 } else { 809 /* 810 * Allow the fabric driver to unmap any resources before 811 * releasing the descriptor via TFO->release_cmd(). 812 */ 813 cmd->se_tfo->aborted_task(cmd); 814 if (ack_kref) 815 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 816 /* 817 * To do: establish a unit attention condition on the I_T 818 * nexus associated with cmd. See also the paragraph "Aborting 819 * commands" in SAM. 820 */ 821 } 822 823 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 824 825 transport_lun_remove_cmd(cmd); 826 827 transport_cmd_check_stop_to_fabric(cmd); 828 } 829 830 static void target_abort_work(struct work_struct *work) 831 { 832 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 833 834 target_handle_abort(cmd); 835 } 836 837 static bool target_cmd_interrupted(struct se_cmd *cmd) 838 { 839 int post_ret; 840 841 if (cmd->transport_state & CMD_T_ABORTED) { 842 if (cmd->transport_complete_callback) 843 cmd->transport_complete_callback(cmd, false, &post_ret); 844 INIT_WORK(&cmd->work, target_abort_work); 845 queue_work(target_completion_wq, &cmd->work); 846 return true; 847 } else if (cmd->transport_state & CMD_T_STOP) { 848 if (cmd->transport_complete_callback) 849 cmd->transport_complete_callback(cmd, false, &post_ret); 850 complete_all(&cmd->t_transport_stop_comp); 851 return true; 852 } 853 854 return false; 855 } 856 857 /* May be called from interrupt context so must not sleep. */ 858 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 859 { 860 int success; 861 unsigned long flags; 862 863 if (target_cmd_interrupted(cmd)) 864 return; 865 866 cmd->scsi_status = scsi_status; 867 868 spin_lock_irqsave(&cmd->t_state_lock, flags); 869 switch (cmd->scsi_status) { 870 case SAM_STAT_CHECK_CONDITION: 871 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 872 success = 1; 873 else 874 success = 0; 875 break; 876 default: 877 success = 1; 878 break; 879 } 880 881 cmd->t_state = TRANSPORT_COMPLETE; 882 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 883 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 884 885 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 886 target_complete_failure_work); 887 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 888 } 889 EXPORT_SYMBOL(target_complete_cmd); 890 891 void target_set_cmd_data_length(struct se_cmd *cmd, int length) 892 { 893 if (length < cmd->data_length) { 894 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 895 cmd->residual_count += cmd->data_length - length; 896 } else { 897 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 898 cmd->residual_count = cmd->data_length - length; 899 } 900 901 cmd->data_length = length; 902 } 903 } 904 EXPORT_SYMBOL(target_set_cmd_data_length); 905 906 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 907 { 908 if (scsi_status == SAM_STAT_GOOD || 909 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { 910 target_set_cmd_data_length(cmd, length); 911 } 912 913 target_complete_cmd(cmd, scsi_status); 914 } 915 EXPORT_SYMBOL(target_complete_cmd_with_length); 916 917 static void target_add_to_state_list(struct se_cmd *cmd) 918 { 919 struct se_device *dev = cmd->se_dev; 920 unsigned long flags; 921 922 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); 923 if (!cmd->state_active) { 924 list_add_tail(&cmd->state_list, 925 &dev->queues[cmd->cpuid].state_list); 926 cmd->state_active = true; 927 } 928 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); 929 } 930 931 /* 932 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 933 */ 934 static void transport_write_pending_qf(struct se_cmd *cmd); 935 static void transport_complete_qf(struct se_cmd *cmd); 936 937 void target_qf_do_work(struct work_struct *work) 938 { 939 struct se_device *dev = container_of(work, struct se_device, 940 qf_work_queue); 941 LIST_HEAD(qf_cmd_list); 942 struct se_cmd *cmd, *cmd_tmp; 943 944 spin_lock_irq(&dev->qf_cmd_lock); 945 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 946 spin_unlock_irq(&dev->qf_cmd_lock); 947 948 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 949 list_del(&cmd->se_qf_node); 950 atomic_dec_mb(&dev->dev_qf_count); 951 952 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 953 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 954 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 955 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 956 : "UNKNOWN"); 957 958 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 959 transport_write_pending_qf(cmd); 960 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 961 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 962 transport_complete_qf(cmd); 963 } 964 } 965 966 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 967 { 968 switch (cmd->data_direction) { 969 case DMA_NONE: 970 return "NONE"; 971 case DMA_FROM_DEVICE: 972 return "READ"; 973 case DMA_TO_DEVICE: 974 return "WRITE"; 975 case DMA_BIDIRECTIONAL: 976 return "BIDI"; 977 default: 978 break; 979 } 980 981 return "UNKNOWN"; 982 } 983 984 void transport_dump_dev_state( 985 struct se_device *dev, 986 char *b, 987 int *bl) 988 { 989 *bl += sprintf(b + *bl, "Status: "); 990 if (dev->export_count) 991 *bl += sprintf(b + *bl, "ACTIVATED"); 992 else 993 *bl += sprintf(b + *bl, "DEACTIVATED"); 994 995 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 996 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 997 dev->dev_attrib.block_size, 998 dev->dev_attrib.hw_max_sectors); 999 *bl += sprintf(b + *bl, " "); 1000 } 1001 1002 void transport_dump_vpd_proto_id( 1003 struct t10_vpd *vpd, 1004 unsigned char *p_buf, 1005 int p_buf_len) 1006 { 1007 unsigned char buf[VPD_TMP_BUF_SIZE]; 1008 int len; 1009 1010 memset(buf, 0, VPD_TMP_BUF_SIZE); 1011 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 1012 1013 switch (vpd->protocol_identifier) { 1014 case 0x00: 1015 sprintf(buf+len, "Fibre Channel\n"); 1016 break; 1017 case 0x10: 1018 sprintf(buf+len, "Parallel SCSI\n"); 1019 break; 1020 case 0x20: 1021 sprintf(buf+len, "SSA\n"); 1022 break; 1023 case 0x30: 1024 sprintf(buf+len, "IEEE 1394\n"); 1025 break; 1026 case 0x40: 1027 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1028 " Protocol\n"); 1029 break; 1030 case 0x50: 1031 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1032 break; 1033 case 0x60: 1034 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1035 break; 1036 case 0x70: 1037 sprintf(buf+len, "Automation/Drive Interface Transport" 1038 " Protocol\n"); 1039 break; 1040 case 0x80: 1041 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1042 break; 1043 default: 1044 sprintf(buf+len, "Unknown 0x%02x\n", 1045 vpd->protocol_identifier); 1046 break; 1047 } 1048 1049 if (p_buf) 1050 strncpy(p_buf, buf, p_buf_len); 1051 else 1052 pr_debug("%s", buf); 1053 } 1054 1055 void 1056 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1057 { 1058 /* 1059 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1060 * 1061 * from spc3r23.pdf section 7.5.1 1062 */ 1063 if (page_83[1] & 0x80) { 1064 vpd->protocol_identifier = (page_83[0] & 0xf0); 1065 vpd->protocol_identifier_set = 1; 1066 transport_dump_vpd_proto_id(vpd, NULL, 0); 1067 } 1068 } 1069 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1070 1071 int transport_dump_vpd_assoc( 1072 struct t10_vpd *vpd, 1073 unsigned char *p_buf, 1074 int p_buf_len) 1075 { 1076 unsigned char buf[VPD_TMP_BUF_SIZE]; 1077 int ret = 0; 1078 int len; 1079 1080 memset(buf, 0, VPD_TMP_BUF_SIZE); 1081 len = sprintf(buf, "T10 VPD Identifier Association: "); 1082 1083 switch (vpd->association) { 1084 case 0x00: 1085 sprintf(buf+len, "addressed logical unit\n"); 1086 break; 1087 case 0x10: 1088 sprintf(buf+len, "target port\n"); 1089 break; 1090 case 0x20: 1091 sprintf(buf+len, "SCSI target device\n"); 1092 break; 1093 default: 1094 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1095 ret = -EINVAL; 1096 break; 1097 } 1098 1099 if (p_buf) 1100 strncpy(p_buf, buf, p_buf_len); 1101 else 1102 pr_debug("%s", buf); 1103 1104 return ret; 1105 } 1106 1107 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1108 { 1109 /* 1110 * The VPD identification association.. 1111 * 1112 * from spc3r23.pdf Section 7.6.3.1 Table 297 1113 */ 1114 vpd->association = (page_83[1] & 0x30); 1115 return transport_dump_vpd_assoc(vpd, NULL, 0); 1116 } 1117 EXPORT_SYMBOL(transport_set_vpd_assoc); 1118 1119 int transport_dump_vpd_ident_type( 1120 struct t10_vpd *vpd, 1121 unsigned char *p_buf, 1122 int p_buf_len) 1123 { 1124 unsigned char buf[VPD_TMP_BUF_SIZE]; 1125 int ret = 0; 1126 int len; 1127 1128 memset(buf, 0, VPD_TMP_BUF_SIZE); 1129 len = sprintf(buf, "T10 VPD Identifier Type: "); 1130 1131 switch (vpd->device_identifier_type) { 1132 case 0x00: 1133 sprintf(buf+len, "Vendor specific\n"); 1134 break; 1135 case 0x01: 1136 sprintf(buf+len, "T10 Vendor ID based\n"); 1137 break; 1138 case 0x02: 1139 sprintf(buf+len, "EUI-64 based\n"); 1140 break; 1141 case 0x03: 1142 sprintf(buf+len, "NAA\n"); 1143 break; 1144 case 0x04: 1145 sprintf(buf+len, "Relative target port identifier\n"); 1146 break; 1147 case 0x08: 1148 sprintf(buf+len, "SCSI name string\n"); 1149 break; 1150 default: 1151 sprintf(buf+len, "Unsupported: 0x%02x\n", 1152 vpd->device_identifier_type); 1153 ret = -EINVAL; 1154 break; 1155 } 1156 1157 if (p_buf) { 1158 if (p_buf_len < strlen(buf)+1) 1159 return -EINVAL; 1160 strncpy(p_buf, buf, p_buf_len); 1161 } else { 1162 pr_debug("%s", buf); 1163 } 1164 1165 return ret; 1166 } 1167 1168 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1169 { 1170 /* 1171 * The VPD identifier type.. 1172 * 1173 * from spc3r23.pdf Section 7.6.3.1 Table 298 1174 */ 1175 vpd->device_identifier_type = (page_83[1] & 0x0f); 1176 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1177 } 1178 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1179 1180 int transport_dump_vpd_ident( 1181 struct t10_vpd *vpd, 1182 unsigned char *p_buf, 1183 int p_buf_len) 1184 { 1185 unsigned char buf[VPD_TMP_BUF_SIZE]; 1186 int ret = 0; 1187 1188 memset(buf, 0, VPD_TMP_BUF_SIZE); 1189 1190 switch (vpd->device_identifier_code_set) { 1191 case 0x01: /* Binary */ 1192 snprintf(buf, sizeof(buf), 1193 "T10 VPD Binary Device Identifier: %s\n", 1194 &vpd->device_identifier[0]); 1195 break; 1196 case 0x02: /* ASCII */ 1197 snprintf(buf, sizeof(buf), 1198 "T10 VPD ASCII Device Identifier: %s\n", 1199 &vpd->device_identifier[0]); 1200 break; 1201 case 0x03: /* UTF-8 */ 1202 snprintf(buf, sizeof(buf), 1203 "T10 VPD UTF-8 Device Identifier: %s\n", 1204 &vpd->device_identifier[0]); 1205 break; 1206 default: 1207 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1208 " 0x%02x", vpd->device_identifier_code_set); 1209 ret = -EINVAL; 1210 break; 1211 } 1212 1213 if (p_buf) 1214 strncpy(p_buf, buf, p_buf_len); 1215 else 1216 pr_debug("%s", buf); 1217 1218 return ret; 1219 } 1220 1221 int 1222 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1223 { 1224 static const char hex_str[] = "0123456789abcdef"; 1225 int j = 0, i = 4; /* offset to start of the identifier */ 1226 1227 /* 1228 * The VPD Code Set (encoding) 1229 * 1230 * from spc3r23.pdf Section 7.6.3.1 Table 296 1231 */ 1232 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1233 switch (vpd->device_identifier_code_set) { 1234 case 0x01: /* Binary */ 1235 vpd->device_identifier[j++] = 1236 hex_str[vpd->device_identifier_type]; 1237 while (i < (4 + page_83[3])) { 1238 vpd->device_identifier[j++] = 1239 hex_str[(page_83[i] & 0xf0) >> 4]; 1240 vpd->device_identifier[j++] = 1241 hex_str[page_83[i] & 0x0f]; 1242 i++; 1243 } 1244 break; 1245 case 0x02: /* ASCII */ 1246 case 0x03: /* UTF-8 */ 1247 while (i < (4 + page_83[3])) 1248 vpd->device_identifier[j++] = page_83[i++]; 1249 break; 1250 default: 1251 break; 1252 } 1253 1254 return transport_dump_vpd_ident(vpd, NULL, 0); 1255 } 1256 EXPORT_SYMBOL(transport_set_vpd_ident); 1257 1258 static sense_reason_t 1259 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1260 unsigned int size) 1261 { 1262 u32 mtl; 1263 1264 if (!cmd->se_tfo->max_data_sg_nents) 1265 return TCM_NO_SENSE; 1266 /* 1267 * Check if fabric enforced maximum SGL entries per I/O descriptor 1268 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1269 * residual_count and reduce original cmd->data_length to maximum 1270 * length based on single PAGE_SIZE entry scatter-lists. 1271 */ 1272 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1273 if (cmd->data_length > mtl) { 1274 /* 1275 * If an existing CDB overflow is present, calculate new residual 1276 * based on CDB size minus fabric maximum transfer length. 1277 * 1278 * If an existing CDB underflow is present, calculate new residual 1279 * based on original cmd->data_length minus fabric maximum transfer 1280 * length. 1281 * 1282 * Otherwise, set the underflow residual based on cmd->data_length 1283 * minus fabric maximum transfer length. 1284 */ 1285 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1286 cmd->residual_count = (size - mtl); 1287 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1288 u32 orig_dl = size + cmd->residual_count; 1289 cmd->residual_count = (orig_dl - mtl); 1290 } else { 1291 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1292 cmd->residual_count = (cmd->data_length - mtl); 1293 } 1294 cmd->data_length = mtl; 1295 /* 1296 * Reset sbc_check_prot() calculated protection payload 1297 * length based upon the new smaller MTL. 1298 */ 1299 if (cmd->prot_length) { 1300 u32 sectors = (mtl / dev->dev_attrib.block_size); 1301 cmd->prot_length = dev->prot_length * sectors; 1302 } 1303 } 1304 return TCM_NO_SENSE; 1305 } 1306 1307 /** 1308 * target_cmd_size_check - Check whether there will be a residual. 1309 * @cmd: SCSI command. 1310 * @size: Data buffer size derived from CDB. The data buffer size provided by 1311 * the SCSI transport driver is available in @cmd->data_length. 1312 * 1313 * Compare the data buffer size from the CDB with the data buffer limit from the transport 1314 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. 1315 * 1316 * Note: target drivers set @cmd->data_length by calling __target_init_cmd(). 1317 * 1318 * Return: TCM_NO_SENSE 1319 */ 1320 sense_reason_t 1321 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1322 { 1323 struct se_device *dev = cmd->se_dev; 1324 1325 if (cmd->unknown_data_length) { 1326 cmd->data_length = size; 1327 } else if (size != cmd->data_length) { 1328 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1329 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1330 " 0x%02x\n", cmd->se_tfo->fabric_name, 1331 cmd->data_length, size, cmd->t_task_cdb[0]); 1332 /* 1333 * For READ command for the overflow case keep the existing 1334 * fabric provided ->data_length. Otherwise for the underflow 1335 * case, reset ->data_length to the smaller SCSI expected data 1336 * transfer length. 1337 */ 1338 if (size > cmd->data_length) { 1339 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1340 cmd->residual_count = (size - cmd->data_length); 1341 } else { 1342 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1343 cmd->residual_count = (cmd->data_length - size); 1344 /* 1345 * Do not truncate ->data_length for WRITE command to 1346 * dump all payload 1347 */ 1348 if (cmd->data_direction == DMA_FROM_DEVICE) { 1349 cmd->data_length = size; 1350 } 1351 } 1352 1353 if (cmd->data_direction == DMA_TO_DEVICE) { 1354 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1355 pr_err_ratelimited("Rejecting underflow/overflow" 1356 " for WRITE data CDB\n"); 1357 return TCM_INVALID_FIELD_IN_COMMAND_IU; 1358 } 1359 /* 1360 * Some fabric drivers like iscsi-target still expect to 1361 * always reject overflow writes. Reject this case until 1362 * full fabric driver level support for overflow writes 1363 * is introduced tree-wide. 1364 */ 1365 if (size > cmd->data_length) { 1366 pr_err_ratelimited("Rejecting overflow for" 1367 " WRITE control CDB\n"); 1368 return TCM_INVALID_CDB_FIELD; 1369 } 1370 } 1371 } 1372 1373 return target_check_max_data_sg_nents(cmd, dev, size); 1374 1375 } 1376 1377 /* 1378 * Used by fabric modules containing a local struct se_cmd within their 1379 * fabric dependent per I/O descriptor. 1380 * 1381 * Preserves the value of @cmd->tag. 1382 */ 1383 void __target_init_cmd( 1384 struct se_cmd *cmd, 1385 const struct target_core_fabric_ops *tfo, 1386 struct se_session *se_sess, 1387 u32 data_length, 1388 int data_direction, 1389 int task_attr, 1390 unsigned char *sense_buffer, u64 unpacked_lun) 1391 { 1392 INIT_LIST_HEAD(&cmd->se_delayed_node); 1393 INIT_LIST_HEAD(&cmd->se_qf_node); 1394 INIT_LIST_HEAD(&cmd->state_list); 1395 init_completion(&cmd->t_transport_stop_comp); 1396 cmd->free_compl = NULL; 1397 cmd->abrt_compl = NULL; 1398 spin_lock_init(&cmd->t_state_lock); 1399 INIT_WORK(&cmd->work, NULL); 1400 kref_init(&cmd->cmd_kref); 1401 1402 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1403 cmd->se_tfo = tfo; 1404 cmd->se_sess = se_sess; 1405 cmd->data_length = data_length; 1406 cmd->data_direction = data_direction; 1407 cmd->sam_task_attr = task_attr; 1408 cmd->sense_buffer = sense_buffer; 1409 cmd->orig_fe_lun = unpacked_lun; 1410 1411 if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) 1412 cmd->cpuid = smp_processor_id(); 1413 1414 cmd->state_active = false; 1415 } 1416 EXPORT_SYMBOL(__target_init_cmd); 1417 1418 static sense_reason_t 1419 transport_check_alloc_task_attr(struct se_cmd *cmd) 1420 { 1421 struct se_device *dev = cmd->se_dev; 1422 1423 /* 1424 * Check if SAM Task Attribute emulation is enabled for this 1425 * struct se_device storage object 1426 */ 1427 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1428 return 0; 1429 1430 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1431 pr_debug("SAM Task Attribute ACA" 1432 " emulation is not supported\n"); 1433 return TCM_INVALID_CDB_FIELD; 1434 } 1435 1436 return 0; 1437 } 1438 1439 sense_reason_t 1440 target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) 1441 { 1442 sense_reason_t ret; 1443 1444 /* 1445 * Ensure that the received CDB is less than the max (252 + 8) bytes 1446 * for VARIABLE_LENGTH_CMD 1447 */ 1448 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1449 pr_err("Received SCSI CDB with command_size: %d that" 1450 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1451 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1452 ret = TCM_INVALID_CDB_FIELD; 1453 goto err; 1454 } 1455 /* 1456 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1457 * allocate the additional extended CDB buffer now.. Otherwise 1458 * setup the pointer from __t_task_cdb to t_task_cdb. 1459 */ 1460 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1461 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); 1462 if (!cmd->t_task_cdb) { 1463 pr_err("Unable to allocate cmd->t_task_cdb" 1464 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1465 scsi_command_size(cdb), 1466 (unsigned long)sizeof(cmd->__t_task_cdb)); 1467 ret = TCM_OUT_OF_RESOURCES; 1468 goto err; 1469 } 1470 } 1471 /* 1472 * Copy the original CDB into cmd-> 1473 */ 1474 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1475 1476 trace_target_sequencer_start(cmd); 1477 return 0; 1478 1479 err: 1480 /* 1481 * Copy the CDB here to allow trace_target_cmd_complete() to 1482 * print the cdb to the trace buffers. 1483 */ 1484 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), 1485 (unsigned int)TCM_MAX_COMMAND_SIZE)); 1486 return ret; 1487 } 1488 EXPORT_SYMBOL(target_cmd_init_cdb); 1489 1490 sense_reason_t 1491 target_cmd_parse_cdb(struct se_cmd *cmd) 1492 { 1493 struct se_device *dev = cmd->se_dev; 1494 sense_reason_t ret; 1495 1496 ret = dev->transport->parse_cdb(cmd); 1497 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1498 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1499 cmd->se_tfo->fabric_name, 1500 cmd->se_sess->se_node_acl->initiatorname, 1501 cmd->t_task_cdb[0]); 1502 if (ret) 1503 return ret; 1504 1505 ret = transport_check_alloc_task_attr(cmd); 1506 if (ret) 1507 return ret; 1508 1509 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1510 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1511 return 0; 1512 } 1513 EXPORT_SYMBOL(target_cmd_parse_cdb); 1514 1515 /* 1516 * Used by fabric module frontends to queue tasks directly. 1517 * May only be used from process context. 1518 */ 1519 int transport_handle_cdb_direct( 1520 struct se_cmd *cmd) 1521 { 1522 sense_reason_t ret; 1523 1524 might_sleep(); 1525 1526 if (!cmd->se_lun) { 1527 dump_stack(); 1528 pr_err("cmd->se_lun is NULL\n"); 1529 return -EINVAL; 1530 } 1531 1532 /* 1533 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1534 * outstanding descriptors are handled correctly during shutdown via 1535 * transport_wait_for_tasks() 1536 * 1537 * Also, we don't take cmd->t_state_lock here as we only expect 1538 * this to be called for initial descriptor submission. 1539 */ 1540 cmd->t_state = TRANSPORT_NEW_CMD; 1541 cmd->transport_state |= CMD_T_ACTIVE; 1542 1543 /* 1544 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1545 * so follow TRANSPORT_NEW_CMD processing thread context usage 1546 * and call transport_generic_request_failure() if necessary.. 1547 */ 1548 ret = transport_generic_new_cmd(cmd); 1549 if (ret) 1550 transport_generic_request_failure(cmd, ret); 1551 return 0; 1552 } 1553 EXPORT_SYMBOL(transport_handle_cdb_direct); 1554 1555 sense_reason_t 1556 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1557 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1558 { 1559 if (!sgl || !sgl_count) 1560 return 0; 1561 1562 /* 1563 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1564 * scatterlists already have been set to follow what the fabric 1565 * passes for the original expected data transfer length. 1566 */ 1567 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1568 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1569 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1570 return TCM_INVALID_CDB_FIELD; 1571 } 1572 1573 cmd->t_data_sg = sgl; 1574 cmd->t_data_nents = sgl_count; 1575 cmd->t_bidi_data_sg = sgl_bidi; 1576 cmd->t_bidi_data_nents = sgl_bidi_count; 1577 1578 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1579 return 0; 1580 } 1581 1582 /** 1583 * target_init_cmd - initialize se_cmd 1584 * @se_cmd: command descriptor to init 1585 * @se_sess: associated se_sess for endpoint 1586 * @sense: pointer to SCSI sense buffer 1587 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1588 * @data_length: fabric expected data transfer length 1589 * @task_attr: SAM task attribute 1590 * @data_dir: DMA data direction 1591 * @flags: flags for command submission from target_sc_flags_tables 1592 * 1593 * Task tags are supported if the caller has set @se_cmd->tag. 1594 * 1595 * Returns: 1596 * - less than zero to signal active I/O shutdown failure. 1597 * - zero on success. 1598 * 1599 * If the fabric driver calls target_stop_session, then it must check the 1600 * return code and handle failures. This will never fail for other drivers, 1601 * and the return code can be ignored. 1602 */ 1603 int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1604 unsigned char *sense, u64 unpacked_lun, 1605 u32 data_length, int task_attr, int data_dir, int flags) 1606 { 1607 struct se_portal_group *se_tpg; 1608 1609 se_tpg = se_sess->se_tpg; 1610 BUG_ON(!se_tpg); 1611 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1612 1613 if (flags & TARGET_SCF_USE_CPUID) 1614 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1615 /* 1616 * Signal bidirectional data payloads to target-core 1617 */ 1618 if (flags & TARGET_SCF_BIDI_OP) 1619 se_cmd->se_cmd_flags |= SCF_BIDI; 1620 1621 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1622 se_cmd->unknown_data_length = 1; 1623 /* 1624 * Initialize se_cmd for target operation. From this point 1625 * exceptions are handled by sending exception status via 1626 * target_core_fabric_ops->queue_status() callback 1627 */ 1628 __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, 1629 data_dir, task_attr, sense, unpacked_lun); 1630 1631 /* 1632 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is 1633 * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second 1634 * kref_put() to happen during fabric packet acknowledgement. 1635 */ 1636 return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1637 } 1638 EXPORT_SYMBOL_GPL(target_init_cmd); 1639 1640 /** 1641 * target_submit_prep - prepare cmd for submission 1642 * @se_cmd: command descriptor to prep 1643 * @cdb: pointer to SCSI CDB 1644 * @sgl: struct scatterlist memory for unidirectional mapping 1645 * @sgl_count: scatterlist count for unidirectional mapping 1646 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1647 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1648 * @sgl_prot: struct scatterlist memory protection information 1649 * @sgl_prot_count: scatterlist count for protection information 1650 * @gfp: gfp allocation type 1651 * 1652 * Returns: 1653 * - less than zero to signal failure. 1654 * - zero on success. 1655 * If failure is returned, lio will the callers queue_status to complete 1656 * the cmd. 1657 */ 1658 int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, 1659 struct scatterlist *sgl, u32 sgl_count, 1660 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1661 struct scatterlist *sgl_prot, u32 sgl_prot_count, 1662 gfp_t gfp) 1663 { 1664 sense_reason_t rc; 1665 1666 rc = target_cmd_init_cdb(se_cmd, cdb, gfp); 1667 if (rc) 1668 goto send_cc_direct; 1669 1670 /* 1671 * Locate se_lun pointer and attach it to struct se_cmd 1672 */ 1673 rc = transport_lookup_cmd_lun(se_cmd); 1674 if (rc) 1675 goto send_cc_direct; 1676 1677 rc = target_cmd_parse_cdb(se_cmd); 1678 if (rc != 0) 1679 goto generic_fail; 1680 1681 /* 1682 * Save pointers for SGLs containing protection information, 1683 * if present. 1684 */ 1685 if (sgl_prot_count) { 1686 se_cmd->t_prot_sg = sgl_prot; 1687 se_cmd->t_prot_nents = sgl_prot_count; 1688 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1689 } 1690 1691 /* 1692 * When a non zero sgl_count has been passed perform SGL passthrough 1693 * mapping for pre-allocated fabric memory instead of having target 1694 * core perform an internal SGL allocation.. 1695 */ 1696 if (sgl_count != 0) { 1697 BUG_ON(!sgl); 1698 1699 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1700 sgl_bidi, sgl_bidi_count); 1701 if (rc != 0) 1702 goto generic_fail; 1703 } 1704 1705 return 0; 1706 1707 send_cc_direct: 1708 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1709 target_put_sess_cmd(se_cmd); 1710 return -EIO; 1711 1712 generic_fail: 1713 transport_generic_request_failure(se_cmd, rc); 1714 return -EIO; 1715 } 1716 EXPORT_SYMBOL_GPL(target_submit_prep); 1717 1718 /** 1719 * target_submit - perform final initialization and submit cmd to LIO core 1720 * @se_cmd: command descriptor to submit 1721 * 1722 * target_submit_prep must have been called on the cmd, and this must be 1723 * called from process context. 1724 */ 1725 void target_submit(struct se_cmd *se_cmd) 1726 { 1727 struct scatterlist *sgl = se_cmd->t_data_sg; 1728 unsigned char *buf = NULL; 1729 1730 might_sleep(); 1731 1732 if (se_cmd->t_data_nents != 0) { 1733 BUG_ON(!sgl); 1734 /* 1735 * A work-around for tcm_loop as some userspace code via 1736 * scsi-generic do not memset their associated read buffers, 1737 * so go ahead and do that here for type non-data CDBs. Also 1738 * note that this is currently guaranteed to be a single SGL 1739 * for this case by target core in target_setup_cmd_from_cdb() 1740 * -> transport_generic_cmd_sequencer(). 1741 */ 1742 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1743 se_cmd->data_direction == DMA_FROM_DEVICE) { 1744 if (sgl) 1745 buf = kmap(sg_page(sgl)) + sgl->offset; 1746 1747 if (buf) { 1748 memset(buf, 0, sgl->length); 1749 kunmap(sg_page(sgl)); 1750 } 1751 } 1752 1753 } 1754 1755 /* 1756 * Check if we need to delay processing because of ALUA 1757 * Active/NonOptimized primary access state.. 1758 */ 1759 core_alua_check_nonop_delay(se_cmd); 1760 1761 transport_handle_cdb_direct(se_cmd); 1762 } 1763 EXPORT_SYMBOL_GPL(target_submit); 1764 1765 /** 1766 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1767 * 1768 * @se_cmd: command descriptor to submit 1769 * @se_sess: associated se_sess for endpoint 1770 * @cdb: pointer to SCSI CDB 1771 * @sense: pointer to SCSI sense buffer 1772 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1773 * @data_length: fabric expected data transfer length 1774 * @task_attr: SAM task attribute 1775 * @data_dir: DMA data direction 1776 * @flags: flags for command submission from target_sc_flags_tables 1777 * 1778 * Task tags are supported if the caller has set @se_cmd->tag. 1779 * 1780 * This may only be called from process context, and also currently 1781 * assumes internal allocation of fabric payload buffer by target-core. 1782 * 1783 * It also assumes interal target core SGL memory allocation. 1784 * 1785 * This function must only be used by drivers that do their own 1786 * sync during shutdown and does not use target_stop_session. If there 1787 * is a failure this function will call into the fabric driver's 1788 * queue_status with a CHECK_CONDITION. 1789 */ 1790 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1791 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1792 u32 data_length, int task_attr, int data_dir, int flags) 1793 { 1794 int rc; 1795 1796 rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length, 1797 task_attr, data_dir, flags); 1798 WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n"); 1799 if (rc) 1800 return; 1801 1802 if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, 1803 GFP_KERNEL)) 1804 return; 1805 1806 target_submit(se_cmd); 1807 } 1808 EXPORT_SYMBOL(target_submit_cmd); 1809 1810 1811 static struct se_dev_plug *target_plug_device(struct se_device *se_dev) 1812 { 1813 struct se_dev_plug *se_plug; 1814 1815 if (!se_dev->transport->plug_device) 1816 return NULL; 1817 1818 se_plug = se_dev->transport->plug_device(se_dev); 1819 if (!se_plug) 1820 return NULL; 1821 1822 se_plug->se_dev = se_dev; 1823 /* 1824 * We have a ref to the lun at this point, but the cmds could 1825 * complete before we unplug, so grab a ref to the se_device so we 1826 * can call back into the backend. 1827 */ 1828 config_group_get(&se_dev->dev_group); 1829 return se_plug; 1830 } 1831 1832 static void target_unplug_device(struct se_dev_plug *se_plug) 1833 { 1834 struct se_device *se_dev = se_plug->se_dev; 1835 1836 se_dev->transport->unplug_device(se_plug); 1837 config_group_put(&se_dev->dev_group); 1838 } 1839 1840 void target_queued_submit_work(struct work_struct *work) 1841 { 1842 struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); 1843 struct se_cmd *se_cmd, *next_cmd; 1844 struct se_dev_plug *se_plug = NULL; 1845 struct se_device *se_dev = NULL; 1846 struct llist_node *cmd_list; 1847 1848 cmd_list = llist_del_all(&sq->cmd_list); 1849 if (!cmd_list) 1850 /* Previous call took what we were queued to submit */ 1851 return; 1852 1853 cmd_list = llist_reverse_order(cmd_list); 1854 llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) { 1855 if (!se_dev) { 1856 se_dev = se_cmd->se_dev; 1857 se_plug = target_plug_device(se_dev); 1858 } 1859 1860 target_submit(se_cmd); 1861 } 1862 1863 if (se_plug) 1864 target_unplug_device(se_plug); 1865 } 1866 1867 /** 1868 * target_queue_submission - queue the cmd to run on the LIO workqueue 1869 * @se_cmd: command descriptor to submit 1870 */ 1871 void target_queue_submission(struct se_cmd *se_cmd) 1872 { 1873 struct se_device *se_dev = se_cmd->se_dev; 1874 int cpu = se_cmd->cpuid; 1875 struct se_cmd_queue *sq; 1876 1877 sq = &se_dev->queues[cpu].sq; 1878 llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); 1879 queue_work_on(cpu, target_submission_wq, &sq->work); 1880 } 1881 EXPORT_SYMBOL_GPL(target_queue_submission); 1882 1883 static void target_complete_tmr_failure(struct work_struct *work) 1884 { 1885 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1886 1887 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1888 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1889 1890 transport_lun_remove_cmd(se_cmd); 1891 transport_cmd_check_stop_to_fabric(se_cmd); 1892 } 1893 1894 /** 1895 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1896 * for TMR CDBs 1897 * 1898 * @se_cmd: command descriptor to submit 1899 * @se_sess: associated se_sess for endpoint 1900 * @sense: pointer to SCSI sense buffer 1901 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1902 * @fabric_tmr_ptr: fabric context for TMR req 1903 * @tm_type: Type of TM request 1904 * @gfp: gfp type for caller 1905 * @tag: referenced task tag for TMR_ABORT_TASK 1906 * @flags: submit cmd flags 1907 * 1908 * Callable from all contexts. 1909 **/ 1910 1911 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1912 unsigned char *sense, u64 unpacked_lun, 1913 void *fabric_tmr_ptr, unsigned char tm_type, 1914 gfp_t gfp, u64 tag, int flags) 1915 { 1916 struct se_portal_group *se_tpg; 1917 int ret; 1918 1919 se_tpg = se_sess->se_tpg; 1920 BUG_ON(!se_tpg); 1921 1922 __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1923 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); 1924 /* 1925 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1926 * allocation failure. 1927 */ 1928 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1929 if (ret < 0) 1930 return -ENOMEM; 1931 1932 if (tm_type == TMR_ABORT_TASK) 1933 se_cmd->se_tmr_req->ref_task_tag = tag; 1934 1935 /* See target_submit_cmd for commentary */ 1936 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1937 if (ret) { 1938 core_tmr_release_req(se_cmd->se_tmr_req); 1939 return ret; 1940 } 1941 1942 ret = transport_lookup_tmr_lun(se_cmd); 1943 if (ret) 1944 goto failure; 1945 1946 transport_generic_handle_tmr(se_cmd); 1947 return 0; 1948 1949 /* 1950 * For callback during failure handling, push this work off 1951 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1952 */ 1953 failure: 1954 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1955 schedule_work(&se_cmd->work); 1956 return 0; 1957 } 1958 EXPORT_SYMBOL(target_submit_tmr); 1959 1960 /* 1961 * Handle SAM-esque emulation for generic transport request failures. 1962 */ 1963 void transport_generic_request_failure(struct se_cmd *cmd, 1964 sense_reason_t sense_reason) 1965 { 1966 int ret = 0, post_ret; 1967 1968 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1969 sense_reason); 1970 target_show_cmd("-----[ ", cmd); 1971 1972 /* 1973 * For SAM Task Attribute emulation for failed struct se_cmd 1974 */ 1975 transport_complete_task_attr(cmd); 1976 1977 if (cmd->transport_complete_callback) 1978 cmd->transport_complete_callback(cmd, false, &post_ret); 1979 1980 if (cmd->transport_state & CMD_T_ABORTED) { 1981 INIT_WORK(&cmd->work, target_abort_work); 1982 queue_work(target_completion_wq, &cmd->work); 1983 return; 1984 } 1985 1986 switch (sense_reason) { 1987 case TCM_NON_EXISTENT_LUN: 1988 case TCM_UNSUPPORTED_SCSI_OPCODE: 1989 case TCM_INVALID_CDB_FIELD: 1990 case TCM_INVALID_PARAMETER_LIST: 1991 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1992 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1993 case TCM_UNKNOWN_MODE_PAGE: 1994 case TCM_WRITE_PROTECTED: 1995 case TCM_ADDRESS_OUT_OF_RANGE: 1996 case TCM_CHECK_CONDITION_ABORT_CMD: 1997 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1998 case TCM_CHECK_CONDITION_NOT_READY: 1999 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 2000 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 2001 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 2002 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 2003 case TCM_TOO_MANY_TARGET_DESCS: 2004 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 2005 case TCM_TOO_MANY_SEGMENT_DESCS: 2006 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 2007 case TCM_INVALID_FIELD_IN_COMMAND_IU: 2008 break; 2009 case TCM_OUT_OF_RESOURCES: 2010 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 2011 goto queue_status; 2012 case TCM_LUN_BUSY: 2013 cmd->scsi_status = SAM_STAT_BUSY; 2014 goto queue_status; 2015 case TCM_RESERVATION_CONFLICT: 2016 /* 2017 * No SENSE Data payload for this case, set SCSI Status 2018 * and queue the response to $FABRIC_MOD. 2019 * 2020 * Uses linux/include/scsi/scsi.h SAM status codes defs 2021 */ 2022 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 2023 /* 2024 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 2025 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 2026 * CONFLICT STATUS. 2027 * 2028 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 2029 */ 2030 if (cmd->se_sess && 2031 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl 2032 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { 2033 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 2034 cmd->orig_fe_lun, 0x2C, 2035 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 2036 } 2037 2038 goto queue_status; 2039 default: 2040 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 2041 cmd->t_task_cdb[0], sense_reason); 2042 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2043 break; 2044 } 2045 2046 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 2047 if (ret) 2048 goto queue_full; 2049 2050 check_stop: 2051 transport_lun_remove_cmd(cmd); 2052 transport_cmd_check_stop_to_fabric(cmd); 2053 return; 2054 2055 queue_status: 2056 trace_target_cmd_complete(cmd); 2057 ret = cmd->se_tfo->queue_status(cmd); 2058 if (!ret) 2059 goto check_stop; 2060 queue_full: 2061 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2062 } 2063 EXPORT_SYMBOL(transport_generic_request_failure); 2064 2065 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 2066 { 2067 sense_reason_t ret; 2068 2069 if (!cmd->execute_cmd) { 2070 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2071 goto err; 2072 } 2073 if (do_checks) { 2074 /* 2075 * Check for an existing UNIT ATTENTION condition after 2076 * target_handle_task_attr() has done SAM task attr 2077 * checking, and possibly have already defered execution 2078 * out to target_restart_delayed_cmds() context. 2079 */ 2080 ret = target_scsi3_ua_check(cmd); 2081 if (ret) 2082 goto err; 2083 2084 ret = target_alua_state_check(cmd); 2085 if (ret) 2086 goto err; 2087 2088 ret = target_check_reservation(cmd); 2089 if (ret) { 2090 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 2091 goto err; 2092 } 2093 } 2094 2095 ret = cmd->execute_cmd(cmd); 2096 if (!ret) 2097 return; 2098 err: 2099 spin_lock_irq(&cmd->t_state_lock); 2100 cmd->transport_state &= ~CMD_T_SENT; 2101 spin_unlock_irq(&cmd->t_state_lock); 2102 2103 transport_generic_request_failure(cmd, ret); 2104 } 2105 2106 static int target_write_prot_action(struct se_cmd *cmd) 2107 { 2108 u32 sectors; 2109 /* 2110 * Perform WRITE_INSERT of PI using software emulation when backend 2111 * device has PI enabled, if the transport has not already generated 2112 * PI using hardware WRITE_INSERT offload. 2113 */ 2114 switch (cmd->prot_op) { 2115 case TARGET_PROT_DOUT_INSERT: 2116 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 2117 sbc_dif_generate(cmd); 2118 break; 2119 case TARGET_PROT_DOUT_STRIP: 2120 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 2121 break; 2122 2123 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 2124 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2125 sectors, 0, cmd->t_prot_sg, 0); 2126 if (unlikely(cmd->pi_err)) { 2127 spin_lock_irq(&cmd->t_state_lock); 2128 cmd->transport_state &= ~CMD_T_SENT; 2129 spin_unlock_irq(&cmd->t_state_lock); 2130 transport_generic_request_failure(cmd, cmd->pi_err); 2131 return -1; 2132 } 2133 break; 2134 default: 2135 break; 2136 } 2137 2138 return 0; 2139 } 2140 2141 static bool target_handle_task_attr(struct se_cmd *cmd) 2142 { 2143 struct se_device *dev = cmd->se_dev; 2144 2145 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2146 return false; 2147 2148 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 2149 2150 /* 2151 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2152 * to allow the passed struct se_cmd list of tasks to the front of the list. 2153 */ 2154 switch (cmd->sam_task_attr) { 2155 case TCM_HEAD_TAG: 2156 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 2157 cmd->t_task_cdb[0]); 2158 return false; 2159 case TCM_ORDERED_TAG: 2160 atomic_inc_mb(&dev->dev_ordered_sync); 2161 2162 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 2163 cmd->t_task_cdb[0]); 2164 2165 /* 2166 * Execute an ORDERED command if no other older commands 2167 * exist that need to be completed first. 2168 */ 2169 if (!atomic_read(&dev->simple_cmds)) 2170 return false; 2171 break; 2172 default: 2173 /* 2174 * For SIMPLE and UNTAGGED Task Attribute commands 2175 */ 2176 atomic_inc_mb(&dev->simple_cmds); 2177 break; 2178 } 2179 2180 if (atomic_read(&dev->dev_ordered_sync) == 0) 2181 return false; 2182 2183 spin_lock(&dev->delayed_cmd_lock); 2184 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2185 spin_unlock(&dev->delayed_cmd_lock); 2186 2187 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2188 cmd->t_task_cdb[0], cmd->sam_task_attr); 2189 return true; 2190 } 2191 2192 void target_execute_cmd(struct se_cmd *cmd) 2193 { 2194 /* 2195 * Determine if frontend context caller is requesting the stopping of 2196 * this command for frontend exceptions. 2197 * 2198 * If the received CDB has already been aborted stop processing it here. 2199 */ 2200 if (target_cmd_interrupted(cmd)) 2201 return; 2202 2203 spin_lock_irq(&cmd->t_state_lock); 2204 cmd->t_state = TRANSPORT_PROCESSING; 2205 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2206 spin_unlock_irq(&cmd->t_state_lock); 2207 2208 if (target_write_prot_action(cmd)) 2209 return; 2210 2211 if (target_handle_task_attr(cmd)) { 2212 spin_lock_irq(&cmd->t_state_lock); 2213 cmd->transport_state &= ~CMD_T_SENT; 2214 spin_unlock_irq(&cmd->t_state_lock); 2215 return; 2216 } 2217 2218 __target_execute_cmd(cmd, true); 2219 } 2220 EXPORT_SYMBOL(target_execute_cmd); 2221 2222 /* 2223 * Process all commands up to the last received ORDERED task attribute which 2224 * requires another blocking boundary 2225 */ 2226 static void target_restart_delayed_cmds(struct se_device *dev) 2227 { 2228 for (;;) { 2229 struct se_cmd *cmd; 2230 2231 spin_lock(&dev->delayed_cmd_lock); 2232 if (list_empty(&dev->delayed_cmd_list)) { 2233 spin_unlock(&dev->delayed_cmd_lock); 2234 break; 2235 } 2236 2237 cmd = list_entry(dev->delayed_cmd_list.next, 2238 struct se_cmd, se_delayed_node); 2239 list_del(&cmd->se_delayed_node); 2240 spin_unlock(&dev->delayed_cmd_lock); 2241 2242 cmd->transport_state |= CMD_T_SENT; 2243 2244 __target_execute_cmd(cmd, true); 2245 2246 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2247 break; 2248 } 2249 } 2250 2251 /* 2252 * Called from I/O completion to determine which dormant/delayed 2253 * and ordered cmds need to have their tasks added to the execution queue. 2254 */ 2255 static void transport_complete_task_attr(struct se_cmd *cmd) 2256 { 2257 struct se_device *dev = cmd->se_dev; 2258 2259 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2260 return; 2261 2262 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2263 goto restart; 2264 2265 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2266 atomic_dec_mb(&dev->simple_cmds); 2267 dev->dev_cur_ordered_id++; 2268 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2269 dev->dev_cur_ordered_id++; 2270 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2271 dev->dev_cur_ordered_id); 2272 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2273 atomic_dec_mb(&dev->dev_ordered_sync); 2274 2275 dev->dev_cur_ordered_id++; 2276 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2277 dev->dev_cur_ordered_id); 2278 } 2279 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2280 2281 restart: 2282 target_restart_delayed_cmds(dev); 2283 } 2284 2285 static void transport_complete_qf(struct se_cmd *cmd) 2286 { 2287 int ret = 0; 2288 2289 transport_complete_task_attr(cmd); 2290 /* 2291 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2292 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2293 * the same callbacks should not be retried. Return CHECK_CONDITION 2294 * if a scsi_status is not already set. 2295 * 2296 * If a fabric driver ->queue_status() has returned non zero, always 2297 * keep retrying no matter what.. 2298 */ 2299 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2300 if (cmd->scsi_status) 2301 goto queue_status; 2302 2303 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2304 goto queue_status; 2305 } 2306 2307 /* 2308 * Check if we need to send a sense buffer from 2309 * the struct se_cmd in question. We do NOT want 2310 * to take this path of the IO has been marked as 2311 * needing to be treated like a "normal read". This 2312 * is the case if it's a tape read, and either the 2313 * FM, EOM, or ILI bits are set, but there is no 2314 * sense data. 2315 */ 2316 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2317 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2318 goto queue_status; 2319 2320 switch (cmd->data_direction) { 2321 case DMA_FROM_DEVICE: 2322 /* queue status if not treating this as a normal read */ 2323 if (cmd->scsi_status && 2324 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2325 goto queue_status; 2326 2327 trace_target_cmd_complete(cmd); 2328 ret = cmd->se_tfo->queue_data_in(cmd); 2329 break; 2330 case DMA_TO_DEVICE: 2331 if (cmd->se_cmd_flags & SCF_BIDI) { 2332 ret = cmd->se_tfo->queue_data_in(cmd); 2333 break; 2334 } 2335 fallthrough; 2336 case DMA_NONE: 2337 queue_status: 2338 trace_target_cmd_complete(cmd); 2339 ret = cmd->se_tfo->queue_status(cmd); 2340 break; 2341 default: 2342 break; 2343 } 2344 2345 if (ret < 0) { 2346 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2347 return; 2348 } 2349 transport_lun_remove_cmd(cmd); 2350 transport_cmd_check_stop_to_fabric(cmd); 2351 } 2352 2353 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2354 int err, bool write_pending) 2355 { 2356 /* 2357 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2358 * ->queue_data_in() callbacks from new process context. 2359 * 2360 * Otherwise for other errors, transport_complete_qf() will send 2361 * CHECK_CONDITION via ->queue_status() instead of attempting to 2362 * retry associated fabric driver data-transfer callbacks. 2363 */ 2364 if (err == -EAGAIN || err == -ENOMEM) { 2365 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2366 TRANSPORT_COMPLETE_QF_OK; 2367 } else { 2368 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2369 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2370 } 2371 2372 spin_lock_irq(&dev->qf_cmd_lock); 2373 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2374 atomic_inc_mb(&dev->dev_qf_count); 2375 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2376 2377 schedule_work(&cmd->se_dev->qf_work_queue); 2378 } 2379 2380 static bool target_read_prot_action(struct se_cmd *cmd) 2381 { 2382 switch (cmd->prot_op) { 2383 case TARGET_PROT_DIN_STRIP: 2384 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2385 u32 sectors = cmd->data_length >> 2386 ilog2(cmd->se_dev->dev_attrib.block_size); 2387 2388 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2389 sectors, 0, cmd->t_prot_sg, 2390 0); 2391 if (cmd->pi_err) 2392 return true; 2393 } 2394 break; 2395 case TARGET_PROT_DIN_INSERT: 2396 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2397 break; 2398 2399 sbc_dif_generate(cmd); 2400 break; 2401 default: 2402 break; 2403 } 2404 2405 return false; 2406 } 2407 2408 static void target_complete_ok_work(struct work_struct *work) 2409 { 2410 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2411 int ret; 2412 2413 /* 2414 * Check if we need to move delayed/dormant tasks from cmds on the 2415 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2416 * Attribute. 2417 */ 2418 transport_complete_task_attr(cmd); 2419 2420 /* 2421 * Check to schedule QUEUE_FULL work, or execute an existing 2422 * cmd->transport_qf_callback() 2423 */ 2424 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2425 schedule_work(&cmd->se_dev->qf_work_queue); 2426 2427 /* 2428 * Check if we need to send a sense buffer from 2429 * the struct se_cmd in question. We do NOT want 2430 * to take this path of the IO has been marked as 2431 * needing to be treated like a "normal read". This 2432 * is the case if it's a tape read, and either the 2433 * FM, EOM, or ILI bits are set, but there is no 2434 * sense data. 2435 */ 2436 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2437 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2438 WARN_ON(!cmd->scsi_status); 2439 ret = transport_send_check_condition_and_sense( 2440 cmd, 0, 1); 2441 if (ret) 2442 goto queue_full; 2443 2444 transport_lun_remove_cmd(cmd); 2445 transport_cmd_check_stop_to_fabric(cmd); 2446 return; 2447 } 2448 /* 2449 * Check for a callback, used by amongst other things 2450 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2451 */ 2452 if (cmd->transport_complete_callback) { 2453 sense_reason_t rc; 2454 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2455 bool zero_dl = !(cmd->data_length); 2456 int post_ret = 0; 2457 2458 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2459 if (!rc && !post_ret) { 2460 if (caw && zero_dl) 2461 goto queue_rsp; 2462 2463 return; 2464 } else if (rc) { 2465 ret = transport_send_check_condition_and_sense(cmd, 2466 rc, 0); 2467 if (ret) 2468 goto queue_full; 2469 2470 transport_lun_remove_cmd(cmd); 2471 transport_cmd_check_stop_to_fabric(cmd); 2472 return; 2473 } 2474 } 2475 2476 queue_rsp: 2477 switch (cmd->data_direction) { 2478 case DMA_FROM_DEVICE: 2479 /* 2480 * if this is a READ-type IO, but SCSI status 2481 * is set, then skip returning data and just 2482 * return the status -- unless this IO is marked 2483 * as needing to be treated as a normal read, 2484 * in which case we want to go ahead and return 2485 * the data. This happens, for example, for tape 2486 * reads with the FM, EOM, or ILI bits set, with 2487 * no sense data. 2488 */ 2489 if (cmd->scsi_status && 2490 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2491 goto queue_status; 2492 2493 atomic_long_add(cmd->data_length, 2494 &cmd->se_lun->lun_stats.tx_data_octets); 2495 /* 2496 * Perform READ_STRIP of PI using software emulation when 2497 * backend had PI enabled, if the transport will not be 2498 * performing hardware READ_STRIP offload. 2499 */ 2500 if (target_read_prot_action(cmd)) { 2501 ret = transport_send_check_condition_and_sense(cmd, 2502 cmd->pi_err, 0); 2503 if (ret) 2504 goto queue_full; 2505 2506 transport_lun_remove_cmd(cmd); 2507 transport_cmd_check_stop_to_fabric(cmd); 2508 return; 2509 } 2510 2511 trace_target_cmd_complete(cmd); 2512 ret = cmd->se_tfo->queue_data_in(cmd); 2513 if (ret) 2514 goto queue_full; 2515 break; 2516 case DMA_TO_DEVICE: 2517 atomic_long_add(cmd->data_length, 2518 &cmd->se_lun->lun_stats.rx_data_octets); 2519 /* 2520 * Check if we need to send READ payload for BIDI-COMMAND 2521 */ 2522 if (cmd->se_cmd_flags & SCF_BIDI) { 2523 atomic_long_add(cmd->data_length, 2524 &cmd->se_lun->lun_stats.tx_data_octets); 2525 ret = cmd->se_tfo->queue_data_in(cmd); 2526 if (ret) 2527 goto queue_full; 2528 break; 2529 } 2530 fallthrough; 2531 case DMA_NONE: 2532 queue_status: 2533 trace_target_cmd_complete(cmd); 2534 ret = cmd->se_tfo->queue_status(cmd); 2535 if (ret) 2536 goto queue_full; 2537 break; 2538 default: 2539 break; 2540 } 2541 2542 transport_lun_remove_cmd(cmd); 2543 transport_cmd_check_stop_to_fabric(cmd); 2544 return; 2545 2546 queue_full: 2547 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2548 " data_direction: %d\n", cmd, cmd->data_direction); 2549 2550 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2551 } 2552 2553 void target_free_sgl(struct scatterlist *sgl, int nents) 2554 { 2555 sgl_free_n_order(sgl, nents, 0); 2556 } 2557 EXPORT_SYMBOL(target_free_sgl); 2558 2559 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2560 { 2561 /* 2562 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2563 * emulation, and free + reset pointers if necessary.. 2564 */ 2565 if (!cmd->t_data_sg_orig) 2566 return; 2567 2568 kfree(cmd->t_data_sg); 2569 cmd->t_data_sg = cmd->t_data_sg_orig; 2570 cmd->t_data_sg_orig = NULL; 2571 cmd->t_data_nents = cmd->t_data_nents_orig; 2572 cmd->t_data_nents_orig = 0; 2573 } 2574 2575 static inline void transport_free_pages(struct se_cmd *cmd) 2576 { 2577 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2578 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2579 cmd->t_prot_sg = NULL; 2580 cmd->t_prot_nents = 0; 2581 } 2582 2583 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2584 /* 2585 * Release special case READ buffer payload required for 2586 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2587 */ 2588 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2589 target_free_sgl(cmd->t_bidi_data_sg, 2590 cmd->t_bidi_data_nents); 2591 cmd->t_bidi_data_sg = NULL; 2592 cmd->t_bidi_data_nents = 0; 2593 } 2594 transport_reset_sgl_orig(cmd); 2595 return; 2596 } 2597 transport_reset_sgl_orig(cmd); 2598 2599 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2600 cmd->t_data_sg = NULL; 2601 cmd->t_data_nents = 0; 2602 2603 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2604 cmd->t_bidi_data_sg = NULL; 2605 cmd->t_bidi_data_nents = 0; 2606 } 2607 2608 void *transport_kmap_data_sg(struct se_cmd *cmd) 2609 { 2610 struct scatterlist *sg = cmd->t_data_sg; 2611 struct page **pages; 2612 int i; 2613 2614 /* 2615 * We need to take into account a possible offset here for fabrics like 2616 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2617 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2618 */ 2619 if (!cmd->t_data_nents) 2620 return NULL; 2621 2622 BUG_ON(!sg); 2623 if (cmd->t_data_nents == 1) 2624 return kmap(sg_page(sg)) + sg->offset; 2625 2626 /* >1 page. use vmap */ 2627 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2628 if (!pages) 2629 return NULL; 2630 2631 /* convert sg[] to pages[] */ 2632 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2633 pages[i] = sg_page(sg); 2634 } 2635 2636 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2637 kfree(pages); 2638 if (!cmd->t_data_vmap) 2639 return NULL; 2640 2641 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2642 } 2643 EXPORT_SYMBOL(transport_kmap_data_sg); 2644 2645 void transport_kunmap_data_sg(struct se_cmd *cmd) 2646 { 2647 if (!cmd->t_data_nents) { 2648 return; 2649 } else if (cmd->t_data_nents == 1) { 2650 kunmap(sg_page(cmd->t_data_sg)); 2651 return; 2652 } 2653 2654 vunmap(cmd->t_data_vmap); 2655 cmd->t_data_vmap = NULL; 2656 } 2657 EXPORT_SYMBOL(transport_kunmap_data_sg); 2658 2659 int 2660 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2661 bool zero_page, bool chainable) 2662 { 2663 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2664 2665 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2666 return *sgl ? 0 : -ENOMEM; 2667 } 2668 EXPORT_SYMBOL(target_alloc_sgl); 2669 2670 /* 2671 * Allocate any required resources to execute the command. For writes we 2672 * might not have the payload yet, so notify the fabric via a call to 2673 * ->write_pending instead. Otherwise place it on the execution queue. 2674 */ 2675 sense_reason_t 2676 transport_generic_new_cmd(struct se_cmd *cmd) 2677 { 2678 unsigned long flags; 2679 int ret = 0; 2680 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2681 2682 if (cmd->prot_op != TARGET_PROT_NORMAL && 2683 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2684 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2685 cmd->prot_length, true, false); 2686 if (ret < 0) 2687 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2688 } 2689 2690 /* 2691 * Determine if the TCM fabric module has already allocated physical 2692 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2693 * beforehand. 2694 */ 2695 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2696 cmd->data_length) { 2697 2698 if ((cmd->se_cmd_flags & SCF_BIDI) || 2699 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2700 u32 bidi_length; 2701 2702 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2703 bidi_length = cmd->t_task_nolb * 2704 cmd->se_dev->dev_attrib.block_size; 2705 else 2706 bidi_length = cmd->data_length; 2707 2708 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2709 &cmd->t_bidi_data_nents, 2710 bidi_length, zero_flag, false); 2711 if (ret < 0) 2712 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2713 } 2714 2715 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2716 cmd->data_length, zero_flag, false); 2717 if (ret < 0) 2718 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2719 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2720 cmd->data_length) { 2721 /* 2722 * Special case for COMPARE_AND_WRITE with fabrics 2723 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2724 */ 2725 u32 caw_length = cmd->t_task_nolb * 2726 cmd->se_dev->dev_attrib.block_size; 2727 2728 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2729 &cmd->t_bidi_data_nents, 2730 caw_length, zero_flag, false); 2731 if (ret < 0) 2732 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2733 } 2734 /* 2735 * If this command is not a write we can execute it right here, 2736 * for write buffers we need to notify the fabric driver first 2737 * and let it call back once the write buffers are ready. 2738 */ 2739 target_add_to_state_list(cmd); 2740 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2741 target_execute_cmd(cmd); 2742 return 0; 2743 } 2744 2745 spin_lock_irqsave(&cmd->t_state_lock, flags); 2746 cmd->t_state = TRANSPORT_WRITE_PENDING; 2747 /* 2748 * Determine if frontend context caller is requesting the stopping of 2749 * this command for frontend exceptions. 2750 */ 2751 if (cmd->transport_state & CMD_T_STOP && 2752 !cmd->se_tfo->write_pending_must_be_called) { 2753 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2754 __func__, __LINE__, cmd->tag); 2755 2756 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2757 2758 complete_all(&cmd->t_transport_stop_comp); 2759 return 0; 2760 } 2761 cmd->transport_state &= ~CMD_T_ACTIVE; 2762 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2763 2764 ret = cmd->se_tfo->write_pending(cmd); 2765 if (ret) 2766 goto queue_full; 2767 2768 return 0; 2769 2770 queue_full: 2771 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2772 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2773 return 0; 2774 } 2775 EXPORT_SYMBOL(transport_generic_new_cmd); 2776 2777 static void transport_write_pending_qf(struct se_cmd *cmd) 2778 { 2779 unsigned long flags; 2780 int ret; 2781 bool stop; 2782 2783 spin_lock_irqsave(&cmd->t_state_lock, flags); 2784 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2785 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2786 2787 if (stop) { 2788 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2789 __func__, __LINE__, cmd->tag); 2790 complete_all(&cmd->t_transport_stop_comp); 2791 return; 2792 } 2793 2794 ret = cmd->se_tfo->write_pending(cmd); 2795 if (ret) { 2796 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2797 cmd); 2798 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2799 } 2800 } 2801 2802 static bool 2803 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2804 unsigned long *flags); 2805 2806 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2807 { 2808 unsigned long flags; 2809 2810 spin_lock_irqsave(&cmd->t_state_lock, flags); 2811 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2812 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2813 } 2814 2815 /* 2816 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2817 * finished. 2818 */ 2819 void target_put_cmd_and_wait(struct se_cmd *cmd) 2820 { 2821 DECLARE_COMPLETION_ONSTACK(compl); 2822 2823 WARN_ON_ONCE(cmd->abrt_compl); 2824 cmd->abrt_compl = &compl; 2825 target_put_sess_cmd(cmd); 2826 wait_for_completion(&compl); 2827 } 2828 2829 /* 2830 * This function is called by frontend drivers after processing of a command 2831 * has finished. 2832 * 2833 * The protocol for ensuring that either the regular frontend command 2834 * processing flow or target_handle_abort() code drops one reference is as 2835 * follows: 2836 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2837 * the frontend driver to call this function synchronously or asynchronously. 2838 * That will cause one reference to be dropped. 2839 * - During regular command processing the target core sets CMD_T_COMPLETE 2840 * before invoking one of the .queue_*() functions. 2841 * - The code that aborts commands skips commands and TMFs for which 2842 * CMD_T_COMPLETE has been set. 2843 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2844 * commands that will be aborted. 2845 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2846 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2847 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2848 * be called and will drop a reference. 2849 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2850 * will be called. target_handle_abort() will drop the final reference. 2851 */ 2852 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2853 { 2854 DECLARE_COMPLETION_ONSTACK(compl); 2855 int ret = 0; 2856 bool aborted = false, tas = false; 2857 2858 if (wait_for_tasks) 2859 target_wait_free_cmd(cmd, &aborted, &tas); 2860 2861 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2862 /* 2863 * Handle WRITE failure case where transport_generic_new_cmd() 2864 * has already added se_cmd to state_list, but fabric has 2865 * failed command before I/O submission. 2866 */ 2867 if (cmd->state_active) 2868 target_remove_from_state_list(cmd); 2869 2870 if (cmd->se_lun) 2871 transport_lun_remove_cmd(cmd); 2872 } 2873 if (aborted) 2874 cmd->free_compl = &compl; 2875 ret = target_put_sess_cmd(cmd); 2876 if (aborted) { 2877 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2878 wait_for_completion(&compl); 2879 ret = 1; 2880 } 2881 return ret; 2882 } 2883 EXPORT_SYMBOL(transport_generic_free_cmd); 2884 2885 /** 2886 * target_get_sess_cmd - Verify the session is accepting cmds and take ref 2887 * @se_cmd: command descriptor to add 2888 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2889 */ 2890 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2891 { 2892 struct se_session *se_sess = se_cmd->se_sess; 2893 int ret = 0; 2894 2895 /* 2896 * Add a second kref if the fabric caller is expecting to handle 2897 * fabric acknowledgement that requires two target_put_sess_cmd() 2898 * invocations before se_cmd descriptor release. 2899 */ 2900 if (ack_kref) { 2901 kref_get(&se_cmd->cmd_kref); 2902 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2903 } 2904 2905 if (!percpu_ref_tryget_live(&se_sess->cmd_count)) 2906 ret = -ESHUTDOWN; 2907 2908 if (ret && ack_kref) 2909 target_put_sess_cmd(se_cmd); 2910 2911 return ret; 2912 } 2913 EXPORT_SYMBOL(target_get_sess_cmd); 2914 2915 static void target_free_cmd_mem(struct se_cmd *cmd) 2916 { 2917 transport_free_pages(cmd); 2918 2919 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2920 core_tmr_release_req(cmd->se_tmr_req); 2921 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2922 kfree(cmd->t_task_cdb); 2923 } 2924 2925 static void target_release_cmd_kref(struct kref *kref) 2926 { 2927 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2928 struct se_session *se_sess = se_cmd->se_sess; 2929 struct completion *free_compl = se_cmd->free_compl; 2930 struct completion *abrt_compl = se_cmd->abrt_compl; 2931 2932 target_free_cmd_mem(se_cmd); 2933 se_cmd->se_tfo->release_cmd(se_cmd); 2934 if (free_compl) 2935 complete(free_compl); 2936 if (abrt_compl) 2937 complete(abrt_compl); 2938 2939 percpu_ref_put(&se_sess->cmd_count); 2940 } 2941 2942 /** 2943 * target_put_sess_cmd - decrease the command reference count 2944 * @se_cmd: command to drop a reference from 2945 * 2946 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2947 * refcount to drop to zero. Returns zero otherwise. 2948 */ 2949 int target_put_sess_cmd(struct se_cmd *se_cmd) 2950 { 2951 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2952 } 2953 EXPORT_SYMBOL(target_put_sess_cmd); 2954 2955 static const char *data_dir_name(enum dma_data_direction d) 2956 { 2957 switch (d) { 2958 case DMA_BIDIRECTIONAL: return "BIDI"; 2959 case DMA_TO_DEVICE: return "WRITE"; 2960 case DMA_FROM_DEVICE: return "READ"; 2961 case DMA_NONE: return "NONE"; 2962 } 2963 2964 return "(?)"; 2965 } 2966 2967 static const char *cmd_state_name(enum transport_state_table t) 2968 { 2969 switch (t) { 2970 case TRANSPORT_NO_STATE: return "NO_STATE"; 2971 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2972 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2973 case TRANSPORT_PROCESSING: return "PROCESSING"; 2974 case TRANSPORT_COMPLETE: return "COMPLETE"; 2975 case TRANSPORT_ISTATE_PROCESSING: 2976 return "ISTATE_PROCESSING"; 2977 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2978 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2979 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2980 } 2981 2982 return "(?)"; 2983 } 2984 2985 static void target_append_str(char **str, const char *txt) 2986 { 2987 char *prev = *str; 2988 2989 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2990 kstrdup(txt, GFP_ATOMIC); 2991 kfree(prev); 2992 } 2993 2994 /* 2995 * Convert a transport state bitmask into a string. The caller is 2996 * responsible for freeing the returned pointer. 2997 */ 2998 static char *target_ts_to_str(u32 ts) 2999 { 3000 char *str = NULL; 3001 3002 if (ts & CMD_T_ABORTED) 3003 target_append_str(&str, "aborted"); 3004 if (ts & CMD_T_ACTIVE) 3005 target_append_str(&str, "active"); 3006 if (ts & CMD_T_COMPLETE) 3007 target_append_str(&str, "complete"); 3008 if (ts & CMD_T_SENT) 3009 target_append_str(&str, "sent"); 3010 if (ts & CMD_T_STOP) 3011 target_append_str(&str, "stop"); 3012 if (ts & CMD_T_FABRIC_STOP) 3013 target_append_str(&str, "fabric_stop"); 3014 3015 return str; 3016 } 3017 3018 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 3019 { 3020 switch (tmf) { 3021 case TMR_ABORT_TASK: return "ABORT_TASK"; 3022 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 3023 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 3024 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 3025 case TMR_LUN_RESET: return "LUN_RESET"; 3026 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 3027 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 3028 case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; 3029 case TMR_UNKNOWN: break; 3030 } 3031 return "(?)"; 3032 } 3033 3034 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 3035 { 3036 char *ts_str = target_ts_to_str(cmd->transport_state); 3037 const u8 *cdb = cmd->t_task_cdb; 3038 struct se_tmr_req *tmf = cmd->se_tmr_req; 3039 3040 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 3041 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 3042 pfx, cdb[0], cdb[1], cmd->tag, 3043 data_dir_name(cmd->data_direction), 3044 cmd->se_tfo->get_cmd_state(cmd), 3045 cmd_state_name(cmd->t_state), cmd->data_length, 3046 kref_read(&cmd->cmd_kref), ts_str); 3047 } else { 3048 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 3049 pfx, target_tmf_name(tmf->function), cmd->tag, 3050 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 3051 cmd_state_name(cmd->t_state), 3052 kref_read(&cmd->cmd_kref), ts_str); 3053 } 3054 kfree(ts_str); 3055 } 3056 EXPORT_SYMBOL(target_show_cmd); 3057 3058 static void target_stop_session_confirm(struct percpu_ref *ref) 3059 { 3060 struct se_session *se_sess = container_of(ref, struct se_session, 3061 cmd_count); 3062 complete_all(&se_sess->stop_done); 3063 } 3064 3065 /** 3066 * target_stop_session - Stop new IO from being queued on the session. 3067 * @se_sess: session to stop 3068 */ 3069 void target_stop_session(struct se_session *se_sess) 3070 { 3071 pr_debug("Stopping session queue.\n"); 3072 if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0) 3073 percpu_ref_kill_and_confirm(&se_sess->cmd_count, 3074 target_stop_session_confirm); 3075 } 3076 EXPORT_SYMBOL(target_stop_session); 3077 3078 /** 3079 * target_wait_for_sess_cmds - Wait for outstanding commands 3080 * @se_sess: session to wait for active I/O 3081 */ 3082 void target_wait_for_sess_cmds(struct se_session *se_sess) 3083 { 3084 int ret; 3085 3086 WARN_ON_ONCE(!atomic_read(&se_sess->stopped)); 3087 3088 do { 3089 pr_debug("Waiting for running cmds to complete.\n"); 3090 ret = wait_event_timeout(se_sess->cmd_count_wq, 3091 percpu_ref_is_zero(&se_sess->cmd_count), 3092 180 * HZ); 3093 } while (ret <= 0); 3094 3095 wait_for_completion(&se_sess->stop_done); 3096 pr_debug("Waiting for cmds done.\n"); 3097 } 3098 EXPORT_SYMBOL(target_wait_for_sess_cmds); 3099 3100 /* 3101 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 3102 * all references to the LUN have been released. Called during LUN shutdown. 3103 */ 3104 void transport_clear_lun_ref(struct se_lun *lun) 3105 { 3106 percpu_ref_kill(&lun->lun_ref); 3107 wait_for_completion(&lun->lun_shutdown_comp); 3108 } 3109 3110 static bool 3111 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 3112 bool *aborted, bool *tas, unsigned long *flags) 3113 __releases(&cmd->t_state_lock) 3114 __acquires(&cmd->t_state_lock) 3115 { 3116 3117 assert_spin_locked(&cmd->t_state_lock); 3118 WARN_ON_ONCE(!irqs_disabled()); 3119 3120 if (fabric_stop) 3121 cmd->transport_state |= CMD_T_FABRIC_STOP; 3122 3123 if (cmd->transport_state & CMD_T_ABORTED) 3124 *aborted = true; 3125 3126 if (cmd->transport_state & CMD_T_TAS) 3127 *tas = true; 3128 3129 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 3130 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3131 return false; 3132 3133 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 3134 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3135 return false; 3136 3137 if (!(cmd->transport_state & CMD_T_ACTIVE)) 3138 return false; 3139 3140 if (fabric_stop && *aborted) 3141 return false; 3142 3143 cmd->transport_state |= CMD_T_STOP; 3144 3145 target_show_cmd("wait_for_tasks: Stopping ", cmd); 3146 3147 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 3148 3149 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 3150 180 * HZ)) 3151 target_show_cmd("wait for tasks: ", cmd); 3152 3153 spin_lock_irqsave(&cmd->t_state_lock, *flags); 3154 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 3155 3156 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 3157 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 3158 3159 return true; 3160 } 3161 3162 /** 3163 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3164 * @cmd: command to wait on 3165 */ 3166 bool transport_wait_for_tasks(struct se_cmd *cmd) 3167 { 3168 unsigned long flags; 3169 bool ret, aborted = false, tas = false; 3170 3171 spin_lock_irqsave(&cmd->t_state_lock, flags); 3172 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3173 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3174 3175 return ret; 3176 } 3177 EXPORT_SYMBOL(transport_wait_for_tasks); 3178 3179 struct sense_detail { 3180 u8 key; 3181 u8 asc; 3182 u8 ascq; 3183 bool add_sense_info; 3184 }; 3185 3186 static const struct sense_detail sense_detail_table[] = { 3187 [TCM_NO_SENSE] = { 3188 .key = NOT_READY 3189 }, 3190 [TCM_NON_EXISTENT_LUN] = { 3191 .key = ILLEGAL_REQUEST, 3192 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3193 }, 3194 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3195 .key = ILLEGAL_REQUEST, 3196 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3197 }, 3198 [TCM_SECTOR_COUNT_TOO_MANY] = { 3199 .key = ILLEGAL_REQUEST, 3200 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3201 }, 3202 [TCM_UNKNOWN_MODE_PAGE] = { 3203 .key = ILLEGAL_REQUEST, 3204 .asc = 0x24, /* INVALID FIELD IN CDB */ 3205 }, 3206 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3207 .key = ABORTED_COMMAND, 3208 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3209 .ascq = 0x03, 3210 }, 3211 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3212 .key = ABORTED_COMMAND, 3213 .asc = 0x0c, /* WRITE ERROR */ 3214 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3215 }, 3216 [TCM_INVALID_CDB_FIELD] = { 3217 .key = ILLEGAL_REQUEST, 3218 .asc = 0x24, /* INVALID FIELD IN CDB */ 3219 }, 3220 [TCM_INVALID_PARAMETER_LIST] = { 3221 .key = ILLEGAL_REQUEST, 3222 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3223 }, 3224 [TCM_TOO_MANY_TARGET_DESCS] = { 3225 .key = ILLEGAL_REQUEST, 3226 .asc = 0x26, 3227 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3228 }, 3229 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3230 .key = ILLEGAL_REQUEST, 3231 .asc = 0x26, 3232 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3233 }, 3234 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3235 .key = ILLEGAL_REQUEST, 3236 .asc = 0x26, 3237 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3238 }, 3239 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3240 .key = ILLEGAL_REQUEST, 3241 .asc = 0x26, 3242 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3243 }, 3244 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3245 .key = ILLEGAL_REQUEST, 3246 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3247 }, 3248 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3249 .key = ILLEGAL_REQUEST, 3250 .asc = 0x0c, /* WRITE ERROR */ 3251 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3252 }, 3253 [TCM_SERVICE_CRC_ERROR] = { 3254 .key = ABORTED_COMMAND, 3255 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3256 .ascq = 0x05, /* N/A */ 3257 }, 3258 [TCM_SNACK_REJECTED] = { 3259 .key = ABORTED_COMMAND, 3260 .asc = 0x11, /* READ ERROR */ 3261 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3262 }, 3263 [TCM_WRITE_PROTECTED] = { 3264 .key = DATA_PROTECT, 3265 .asc = 0x27, /* WRITE PROTECTED */ 3266 }, 3267 [TCM_ADDRESS_OUT_OF_RANGE] = { 3268 .key = ILLEGAL_REQUEST, 3269 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3270 }, 3271 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3272 .key = UNIT_ATTENTION, 3273 }, 3274 [TCM_CHECK_CONDITION_NOT_READY] = { 3275 .key = NOT_READY, 3276 }, 3277 [TCM_MISCOMPARE_VERIFY] = { 3278 .key = MISCOMPARE, 3279 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3280 .ascq = 0x00, 3281 .add_sense_info = true, 3282 }, 3283 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3284 .key = ABORTED_COMMAND, 3285 .asc = 0x10, 3286 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3287 .add_sense_info = true, 3288 }, 3289 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3290 .key = ABORTED_COMMAND, 3291 .asc = 0x10, 3292 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3293 .add_sense_info = true, 3294 }, 3295 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3296 .key = ABORTED_COMMAND, 3297 .asc = 0x10, 3298 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3299 .add_sense_info = true, 3300 }, 3301 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3302 .key = COPY_ABORTED, 3303 .asc = 0x0d, 3304 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3305 3306 }, 3307 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3308 /* 3309 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3310 * Solaris initiators. Returning NOT READY instead means the 3311 * operations will be retried a finite number of times and we 3312 * can survive intermittent errors. 3313 */ 3314 .key = NOT_READY, 3315 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3316 }, 3317 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3318 /* 3319 * From spc4r22 section5.7.7,5.7.8 3320 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3321 * or a REGISTER AND IGNORE EXISTING KEY service action or 3322 * REGISTER AND MOVE service actionis attempted, 3323 * but there are insufficient device server resources to complete the 3324 * operation, then the command shall be terminated with CHECK CONDITION 3325 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3326 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3327 */ 3328 .key = ILLEGAL_REQUEST, 3329 .asc = 0x55, 3330 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3331 }, 3332 [TCM_INVALID_FIELD_IN_COMMAND_IU] = { 3333 .key = ILLEGAL_REQUEST, 3334 .asc = 0x0e, 3335 .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */ 3336 }, 3337 }; 3338 3339 /** 3340 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3341 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3342 * be stored. 3343 * @reason: LIO sense reason code. If this argument has the value 3344 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3345 * dequeuing a unit attention fails due to multiple commands being processed 3346 * concurrently, set the command status to BUSY. 3347 * 3348 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3349 */ 3350 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3351 { 3352 const struct sense_detail *sd; 3353 u8 *buffer = cmd->sense_buffer; 3354 int r = (__force int)reason; 3355 u8 key, asc, ascq; 3356 bool desc_format = target_sense_desc_format(cmd->se_dev); 3357 3358 if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) 3359 sd = &sense_detail_table[r]; 3360 else 3361 sd = &sense_detail_table[(__force int) 3362 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3363 3364 key = sd->key; 3365 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3366 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3367 &ascq)) { 3368 cmd->scsi_status = SAM_STAT_BUSY; 3369 return; 3370 } 3371 } else if (sd->asc == 0) { 3372 WARN_ON_ONCE(cmd->scsi_asc == 0); 3373 asc = cmd->scsi_asc; 3374 ascq = cmd->scsi_ascq; 3375 } else { 3376 asc = sd->asc; 3377 ascq = sd->ascq; 3378 } 3379 3380 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3381 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3382 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3383 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3384 if (sd->add_sense_info) 3385 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3386 cmd->scsi_sense_length, 3387 cmd->sense_info) < 0); 3388 } 3389 3390 int 3391 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3392 sense_reason_t reason, int from_transport) 3393 { 3394 unsigned long flags; 3395 3396 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3397 3398 spin_lock_irqsave(&cmd->t_state_lock, flags); 3399 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3400 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3401 return 0; 3402 } 3403 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3404 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3405 3406 if (!from_transport) 3407 translate_sense_reason(cmd, reason); 3408 3409 trace_target_cmd_complete(cmd); 3410 return cmd->se_tfo->queue_status(cmd); 3411 } 3412 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3413 3414 /** 3415 * target_send_busy - Send SCSI BUSY status back to the initiator 3416 * @cmd: SCSI command for which to send a BUSY reply. 3417 * 3418 * Note: Only call this function if target_submit_cmd*() failed. 3419 */ 3420 int target_send_busy(struct se_cmd *cmd) 3421 { 3422 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3423 3424 cmd->scsi_status = SAM_STAT_BUSY; 3425 trace_target_cmd_complete(cmd); 3426 return cmd->se_tfo->queue_status(cmd); 3427 } 3428 EXPORT_SYMBOL(target_send_busy); 3429 3430 static void target_tmr_work(struct work_struct *work) 3431 { 3432 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3433 struct se_device *dev = cmd->se_dev; 3434 struct se_tmr_req *tmr = cmd->se_tmr_req; 3435 int ret; 3436 3437 if (cmd->transport_state & CMD_T_ABORTED) 3438 goto aborted; 3439 3440 switch (tmr->function) { 3441 case TMR_ABORT_TASK: 3442 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3443 break; 3444 case TMR_ABORT_TASK_SET: 3445 case TMR_CLEAR_ACA: 3446 case TMR_CLEAR_TASK_SET: 3447 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3448 break; 3449 case TMR_LUN_RESET: 3450 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3451 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3452 TMR_FUNCTION_REJECTED; 3453 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3454 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3455 cmd->orig_fe_lun, 0x29, 3456 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3457 } 3458 break; 3459 case TMR_TARGET_WARM_RESET: 3460 tmr->response = TMR_FUNCTION_REJECTED; 3461 break; 3462 case TMR_TARGET_COLD_RESET: 3463 tmr->response = TMR_FUNCTION_REJECTED; 3464 break; 3465 default: 3466 pr_err("Unknown TMR function: 0x%02x.\n", 3467 tmr->function); 3468 tmr->response = TMR_FUNCTION_REJECTED; 3469 break; 3470 } 3471 3472 if (cmd->transport_state & CMD_T_ABORTED) 3473 goto aborted; 3474 3475 cmd->se_tfo->queue_tm_rsp(cmd); 3476 3477 transport_lun_remove_cmd(cmd); 3478 transport_cmd_check_stop_to_fabric(cmd); 3479 return; 3480 3481 aborted: 3482 target_handle_abort(cmd); 3483 } 3484 3485 int transport_generic_handle_tmr( 3486 struct se_cmd *cmd) 3487 { 3488 unsigned long flags; 3489 bool aborted = false; 3490 3491 spin_lock_irqsave(&cmd->t_state_lock, flags); 3492 if (cmd->transport_state & CMD_T_ABORTED) { 3493 aborted = true; 3494 } else { 3495 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3496 cmd->transport_state |= CMD_T_ACTIVE; 3497 } 3498 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3499 3500 if (aborted) { 3501 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3502 cmd->se_tmr_req->function, 3503 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3504 target_handle_abort(cmd); 3505 return 0; 3506 } 3507 3508 INIT_WORK(&cmd->work, target_tmr_work); 3509 schedule_work(&cmd->work); 3510 return 0; 3511 } 3512 EXPORT_SYMBOL(transport_generic_handle_tmr); 3513 3514 bool 3515 target_check_wce(struct se_device *dev) 3516 { 3517 bool wce = false; 3518 3519 if (dev->transport->get_write_cache) 3520 wce = dev->transport->get_write_cache(dev); 3521 else if (dev->dev_attrib.emulate_write_cache > 0) 3522 wce = true; 3523 3524 return wce; 3525 } 3526 3527 bool 3528 target_check_fua(struct se_device *dev) 3529 { 3530 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3531 } 3532