1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_transport.c 4 * 5 * This file contains the Generic Target Engine Core. 6 * 7 * (c) Copyright 2002-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/net.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/timer.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/kthread.h> 20 #include <linux/in.h> 21 #include <linux/cdrom.h> 22 #include <linux/module.h> 23 #include <linux/ratelimit.h> 24 #include <linux/vmalloc.h> 25 #include <asm/unaligned.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_common.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/target.h> 42 43 static struct workqueue_struct *target_completion_wq; 44 static struct kmem_cache *se_sess_cache; 45 struct kmem_cache *se_ua_cache; 46 struct kmem_cache *t10_pr_reg_cache; 47 struct kmem_cache *t10_alua_lu_gp_cache; 48 struct kmem_cache *t10_alua_lu_gp_mem_cache; 49 struct kmem_cache *t10_alua_tg_pt_gp_cache; 50 struct kmem_cache *t10_alua_lba_map_cache; 51 struct kmem_cache *t10_alua_lba_map_mem_cache; 52 53 static void transport_complete_task_attr(struct se_cmd *cmd); 54 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 55 static void transport_handle_queue_full(struct se_cmd *cmd, 56 struct se_device *dev, int err, bool write_pending); 57 static void target_complete_ok_work(struct work_struct *work); 58 59 int init_se_kmem_caches(void) 60 { 61 se_sess_cache = kmem_cache_create("se_sess_cache", 62 sizeof(struct se_session), __alignof__(struct se_session), 63 0, NULL); 64 if (!se_sess_cache) { 65 pr_err("kmem_cache_create() for struct se_session" 66 " failed\n"); 67 goto out; 68 } 69 se_ua_cache = kmem_cache_create("se_ua_cache", 70 sizeof(struct se_ua), __alignof__(struct se_ua), 71 0, NULL); 72 if (!se_ua_cache) { 73 pr_err("kmem_cache_create() for struct se_ua failed\n"); 74 goto out_free_sess_cache; 75 } 76 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 77 sizeof(struct t10_pr_registration), 78 __alignof__(struct t10_pr_registration), 0, NULL); 79 if (!t10_pr_reg_cache) { 80 pr_err("kmem_cache_create() for struct t10_pr_registration" 81 " failed\n"); 82 goto out_free_ua_cache; 83 } 84 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 85 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 86 0, NULL); 87 if (!t10_alua_lu_gp_cache) { 88 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 89 " failed\n"); 90 goto out_free_pr_reg_cache; 91 } 92 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 93 sizeof(struct t10_alua_lu_gp_member), 94 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 95 if (!t10_alua_lu_gp_mem_cache) { 96 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 97 "cache failed\n"); 98 goto out_free_lu_gp_cache; 99 } 100 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 101 sizeof(struct t10_alua_tg_pt_gp), 102 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 103 if (!t10_alua_tg_pt_gp_cache) { 104 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 105 "cache failed\n"); 106 goto out_free_lu_gp_mem_cache; 107 } 108 t10_alua_lba_map_cache = kmem_cache_create( 109 "t10_alua_lba_map_cache", 110 sizeof(struct t10_alua_lba_map), 111 __alignof__(struct t10_alua_lba_map), 0, NULL); 112 if (!t10_alua_lba_map_cache) { 113 pr_err("kmem_cache_create() for t10_alua_lba_map_" 114 "cache failed\n"); 115 goto out_free_tg_pt_gp_cache; 116 } 117 t10_alua_lba_map_mem_cache = kmem_cache_create( 118 "t10_alua_lba_map_mem_cache", 119 sizeof(struct t10_alua_lba_map_member), 120 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 121 if (!t10_alua_lba_map_mem_cache) { 122 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 123 "cache failed\n"); 124 goto out_free_lba_map_cache; 125 } 126 127 target_completion_wq = alloc_workqueue("target_completion", 128 WQ_MEM_RECLAIM, 0); 129 if (!target_completion_wq) 130 goto out_free_lba_map_mem_cache; 131 132 return 0; 133 134 out_free_lba_map_mem_cache: 135 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 136 out_free_lba_map_cache: 137 kmem_cache_destroy(t10_alua_lba_map_cache); 138 out_free_tg_pt_gp_cache: 139 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 140 out_free_lu_gp_mem_cache: 141 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 142 out_free_lu_gp_cache: 143 kmem_cache_destroy(t10_alua_lu_gp_cache); 144 out_free_pr_reg_cache: 145 kmem_cache_destroy(t10_pr_reg_cache); 146 out_free_ua_cache: 147 kmem_cache_destroy(se_ua_cache); 148 out_free_sess_cache: 149 kmem_cache_destroy(se_sess_cache); 150 out: 151 return -ENOMEM; 152 } 153 154 void release_se_kmem_caches(void) 155 { 156 destroy_workqueue(target_completion_wq); 157 kmem_cache_destroy(se_sess_cache); 158 kmem_cache_destroy(se_ua_cache); 159 kmem_cache_destroy(t10_pr_reg_cache); 160 kmem_cache_destroy(t10_alua_lu_gp_cache); 161 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 162 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 163 kmem_cache_destroy(t10_alua_lba_map_cache); 164 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 165 } 166 167 /* This code ensures unique mib indexes are handed out. */ 168 static DEFINE_SPINLOCK(scsi_mib_index_lock); 169 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 170 171 /* 172 * Allocate a new row index for the entry type specified 173 */ 174 u32 scsi_get_new_index(scsi_index_t type) 175 { 176 u32 new_index; 177 178 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 179 180 spin_lock(&scsi_mib_index_lock); 181 new_index = ++scsi_mib_index[type]; 182 spin_unlock(&scsi_mib_index_lock); 183 184 return new_index; 185 } 186 187 void transport_subsystem_check_init(void) 188 { 189 int ret; 190 static int sub_api_initialized; 191 192 if (sub_api_initialized) 193 return; 194 195 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 196 if (ret != 0) 197 pr_err("Unable to load target_core_iblock\n"); 198 199 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 200 if (ret != 0) 201 pr_err("Unable to load target_core_file\n"); 202 203 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 204 if (ret != 0) 205 pr_err("Unable to load target_core_pscsi\n"); 206 207 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 208 if (ret != 0) 209 pr_err("Unable to load target_core_user\n"); 210 211 sub_api_initialized = 1; 212 } 213 214 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) 215 { 216 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); 217 218 wake_up(&sess->cmd_list_wq); 219 } 220 221 /** 222 * transport_init_session - initialize a session object 223 * @se_sess: Session object pointer. 224 * 225 * The caller must have zero-initialized @se_sess before calling this function. 226 */ 227 int transport_init_session(struct se_session *se_sess) 228 { 229 INIT_LIST_HEAD(&se_sess->sess_list); 230 INIT_LIST_HEAD(&se_sess->sess_acl_list); 231 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 232 spin_lock_init(&se_sess->sess_cmd_lock); 233 init_waitqueue_head(&se_sess->cmd_list_wq); 234 return percpu_ref_init(&se_sess->cmd_count, 235 target_release_sess_cmd_refcnt, 0, GFP_KERNEL); 236 } 237 EXPORT_SYMBOL(transport_init_session); 238 239 void transport_uninit_session(struct se_session *se_sess) 240 { 241 percpu_ref_exit(&se_sess->cmd_count); 242 } 243 244 /** 245 * transport_alloc_session - allocate a session object and initialize it 246 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 247 */ 248 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 249 { 250 struct se_session *se_sess; 251 int ret; 252 253 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 254 if (!se_sess) { 255 pr_err("Unable to allocate struct se_session from" 256 " se_sess_cache\n"); 257 return ERR_PTR(-ENOMEM); 258 } 259 ret = transport_init_session(se_sess); 260 if (ret < 0) { 261 kmem_cache_free(se_sess_cache, se_sess); 262 return ERR_PTR(ret); 263 } 264 se_sess->sup_prot_ops = sup_prot_ops; 265 266 return se_sess; 267 } 268 EXPORT_SYMBOL(transport_alloc_session); 269 270 /** 271 * transport_alloc_session_tags - allocate target driver private data 272 * @se_sess: Session pointer. 273 * @tag_num: Maximum number of in-flight commands between initiator and target. 274 * @tag_size: Size in bytes of the private data a target driver associates with 275 * each command. 276 */ 277 int transport_alloc_session_tags(struct se_session *se_sess, 278 unsigned int tag_num, unsigned int tag_size) 279 { 280 int rc; 281 282 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 283 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 284 if (!se_sess->sess_cmd_map) { 285 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 286 return -ENOMEM; 287 } 288 289 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 290 false, GFP_KERNEL, NUMA_NO_NODE); 291 if (rc < 0) { 292 pr_err("Unable to init se_sess->sess_tag_pool," 293 " tag_num: %u\n", tag_num); 294 kvfree(se_sess->sess_cmd_map); 295 se_sess->sess_cmd_map = NULL; 296 return -ENOMEM; 297 } 298 299 return 0; 300 } 301 EXPORT_SYMBOL(transport_alloc_session_tags); 302 303 /** 304 * transport_init_session_tags - allocate a session and target driver private data 305 * @tag_num: Maximum number of in-flight commands between initiator and target. 306 * @tag_size: Size in bytes of the private data a target driver associates with 307 * each command. 308 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 309 */ 310 static struct se_session * 311 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 312 enum target_prot_op sup_prot_ops) 313 { 314 struct se_session *se_sess; 315 int rc; 316 317 if (tag_num != 0 && !tag_size) { 318 pr_err("init_session_tags called with percpu-ida tag_num:" 319 " %u, but zero tag_size\n", tag_num); 320 return ERR_PTR(-EINVAL); 321 } 322 if (!tag_num && tag_size) { 323 pr_err("init_session_tags called with percpu-ida tag_size:" 324 " %u, but zero tag_num\n", tag_size); 325 return ERR_PTR(-EINVAL); 326 } 327 328 se_sess = transport_alloc_session(sup_prot_ops); 329 if (IS_ERR(se_sess)) 330 return se_sess; 331 332 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 333 if (rc < 0) { 334 transport_free_session(se_sess); 335 return ERR_PTR(-ENOMEM); 336 } 337 338 return se_sess; 339 } 340 341 /* 342 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 343 */ 344 void __transport_register_session( 345 struct se_portal_group *se_tpg, 346 struct se_node_acl *se_nacl, 347 struct se_session *se_sess, 348 void *fabric_sess_ptr) 349 { 350 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 351 unsigned char buf[PR_REG_ISID_LEN]; 352 unsigned long flags; 353 354 se_sess->se_tpg = se_tpg; 355 se_sess->fabric_sess_ptr = fabric_sess_ptr; 356 /* 357 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 358 * 359 * Only set for struct se_session's that will actually be moving I/O. 360 * eg: *NOT* discovery sessions. 361 */ 362 if (se_nacl) { 363 /* 364 * 365 * Determine if fabric allows for T10-PI feature bits exposed to 366 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 367 * 368 * If so, then always save prot_type on a per se_node_acl node 369 * basis and re-instate the previous sess_prot_type to avoid 370 * disabling PI from below any previously initiator side 371 * registered LUNs. 372 */ 373 if (se_nacl->saved_prot_type) 374 se_sess->sess_prot_type = se_nacl->saved_prot_type; 375 else if (tfo->tpg_check_prot_fabric_only) 376 se_sess->sess_prot_type = se_nacl->saved_prot_type = 377 tfo->tpg_check_prot_fabric_only(se_tpg); 378 /* 379 * If the fabric module supports an ISID based TransportID, 380 * save this value in binary from the fabric I_T Nexus now. 381 */ 382 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 383 memset(&buf[0], 0, PR_REG_ISID_LEN); 384 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 385 &buf[0], PR_REG_ISID_LEN); 386 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 387 } 388 389 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 390 /* 391 * The se_nacl->nacl_sess pointer will be set to the 392 * last active I_T Nexus for each struct se_node_acl. 393 */ 394 se_nacl->nacl_sess = se_sess; 395 396 list_add_tail(&se_sess->sess_acl_list, 397 &se_nacl->acl_sess_list); 398 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 399 } 400 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 401 402 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 403 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 404 } 405 EXPORT_SYMBOL(__transport_register_session); 406 407 void transport_register_session( 408 struct se_portal_group *se_tpg, 409 struct se_node_acl *se_nacl, 410 struct se_session *se_sess, 411 void *fabric_sess_ptr) 412 { 413 unsigned long flags; 414 415 spin_lock_irqsave(&se_tpg->session_lock, flags); 416 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 417 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 418 } 419 EXPORT_SYMBOL(transport_register_session); 420 421 struct se_session * 422 target_setup_session(struct se_portal_group *tpg, 423 unsigned int tag_num, unsigned int tag_size, 424 enum target_prot_op prot_op, 425 const char *initiatorname, void *private, 426 int (*callback)(struct se_portal_group *, 427 struct se_session *, void *)) 428 { 429 struct se_session *sess; 430 431 /* 432 * If the fabric driver is using percpu-ida based pre allocation 433 * of I/O descriptor tags, go ahead and perform that setup now.. 434 */ 435 if (tag_num != 0) 436 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 437 else 438 sess = transport_alloc_session(prot_op); 439 440 if (IS_ERR(sess)) 441 return sess; 442 443 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 444 (unsigned char *)initiatorname); 445 if (!sess->se_node_acl) { 446 transport_free_session(sess); 447 return ERR_PTR(-EACCES); 448 } 449 /* 450 * Go ahead and perform any remaining fabric setup that is 451 * required before transport_register_session(). 452 */ 453 if (callback != NULL) { 454 int rc = callback(tpg, sess, private); 455 if (rc) { 456 transport_free_session(sess); 457 return ERR_PTR(rc); 458 } 459 } 460 461 transport_register_session(tpg, sess->se_node_acl, sess, private); 462 return sess; 463 } 464 EXPORT_SYMBOL(target_setup_session); 465 466 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 467 { 468 struct se_session *se_sess; 469 ssize_t len = 0; 470 471 spin_lock_bh(&se_tpg->session_lock); 472 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 473 if (!se_sess->se_node_acl) 474 continue; 475 if (!se_sess->se_node_acl->dynamic_node_acl) 476 continue; 477 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 478 break; 479 480 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 481 se_sess->se_node_acl->initiatorname); 482 len += 1; /* Include NULL terminator */ 483 } 484 spin_unlock_bh(&se_tpg->session_lock); 485 486 return len; 487 } 488 EXPORT_SYMBOL(target_show_dynamic_sessions); 489 490 static void target_complete_nacl(struct kref *kref) 491 { 492 struct se_node_acl *nacl = container_of(kref, 493 struct se_node_acl, acl_kref); 494 struct se_portal_group *se_tpg = nacl->se_tpg; 495 496 if (!nacl->dynamic_stop) { 497 complete(&nacl->acl_free_comp); 498 return; 499 } 500 501 mutex_lock(&se_tpg->acl_node_mutex); 502 list_del_init(&nacl->acl_list); 503 mutex_unlock(&se_tpg->acl_node_mutex); 504 505 core_tpg_wait_for_nacl_pr_ref(nacl); 506 core_free_device_list_for_node(nacl, se_tpg); 507 kfree(nacl); 508 } 509 510 void target_put_nacl(struct se_node_acl *nacl) 511 { 512 kref_put(&nacl->acl_kref, target_complete_nacl); 513 } 514 EXPORT_SYMBOL(target_put_nacl); 515 516 void transport_deregister_session_configfs(struct se_session *se_sess) 517 { 518 struct se_node_acl *se_nacl; 519 unsigned long flags; 520 /* 521 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 522 */ 523 se_nacl = se_sess->se_node_acl; 524 if (se_nacl) { 525 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 526 if (!list_empty(&se_sess->sess_acl_list)) 527 list_del_init(&se_sess->sess_acl_list); 528 /* 529 * If the session list is empty, then clear the pointer. 530 * Otherwise, set the struct se_session pointer from the tail 531 * element of the per struct se_node_acl active session list. 532 */ 533 if (list_empty(&se_nacl->acl_sess_list)) 534 se_nacl->nacl_sess = NULL; 535 else { 536 se_nacl->nacl_sess = container_of( 537 se_nacl->acl_sess_list.prev, 538 struct se_session, sess_acl_list); 539 } 540 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 541 } 542 } 543 EXPORT_SYMBOL(transport_deregister_session_configfs); 544 545 void transport_free_session(struct se_session *se_sess) 546 { 547 struct se_node_acl *se_nacl = se_sess->se_node_acl; 548 549 /* 550 * Drop the se_node_acl->nacl_kref obtained from within 551 * core_tpg_get_initiator_node_acl(). 552 */ 553 if (se_nacl) { 554 struct se_portal_group *se_tpg = se_nacl->se_tpg; 555 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 556 unsigned long flags; 557 558 se_sess->se_node_acl = NULL; 559 560 /* 561 * Also determine if we need to drop the extra ->cmd_kref if 562 * it had been previously dynamically generated, and 563 * the endpoint is not caching dynamic ACLs. 564 */ 565 mutex_lock(&se_tpg->acl_node_mutex); 566 if (se_nacl->dynamic_node_acl && 567 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 568 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 569 if (list_empty(&se_nacl->acl_sess_list)) 570 se_nacl->dynamic_stop = true; 571 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 572 573 if (se_nacl->dynamic_stop) 574 list_del_init(&se_nacl->acl_list); 575 } 576 mutex_unlock(&se_tpg->acl_node_mutex); 577 578 if (se_nacl->dynamic_stop) 579 target_put_nacl(se_nacl); 580 581 target_put_nacl(se_nacl); 582 } 583 if (se_sess->sess_cmd_map) { 584 sbitmap_queue_free(&se_sess->sess_tag_pool); 585 kvfree(se_sess->sess_cmd_map); 586 } 587 transport_uninit_session(se_sess); 588 kmem_cache_free(se_sess_cache, se_sess); 589 } 590 EXPORT_SYMBOL(transport_free_session); 591 592 static int target_release_res(struct se_device *dev, void *data) 593 { 594 struct se_session *sess = data; 595 596 if (dev->reservation_holder == sess) 597 target_release_reservation(dev); 598 return 0; 599 } 600 601 void transport_deregister_session(struct se_session *se_sess) 602 { 603 struct se_portal_group *se_tpg = se_sess->se_tpg; 604 unsigned long flags; 605 606 if (!se_tpg) { 607 transport_free_session(se_sess); 608 return; 609 } 610 611 spin_lock_irqsave(&se_tpg->session_lock, flags); 612 list_del(&se_sess->sess_list); 613 se_sess->se_tpg = NULL; 614 se_sess->fabric_sess_ptr = NULL; 615 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 616 617 /* 618 * Since the session is being removed, release SPC-2 619 * reservations held by the session that is disappearing. 620 */ 621 target_for_each_device(target_release_res, se_sess); 622 623 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 624 se_tpg->se_tpg_tfo->fabric_name); 625 /* 626 * If last kref is dropping now for an explicit NodeACL, awake sleeping 627 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 628 * removal context from within transport_free_session() code. 629 * 630 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 631 * to release all remaining generate_node_acl=1 created ACL resources. 632 */ 633 634 transport_free_session(se_sess); 635 } 636 EXPORT_SYMBOL(transport_deregister_session); 637 638 void target_remove_session(struct se_session *se_sess) 639 { 640 transport_deregister_session_configfs(se_sess); 641 transport_deregister_session(se_sess); 642 } 643 EXPORT_SYMBOL(target_remove_session); 644 645 static void target_remove_from_state_list(struct se_cmd *cmd) 646 { 647 struct se_device *dev = cmd->se_dev; 648 unsigned long flags; 649 650 if (!dev) 651 return; 652 653 spin_lock_irqsave(&dev->execute_task_lock, flags); 654 if (cmd->state_active) { 655 list_del(&cmd->state_list); 656 cmd->state_active = false; 657 } 658 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 659 } 660 661 /* 662 * This function is called by the target core after the target core has 663 * finished processing a SCSI command or SCSI TMF. Both the regular command 664 * processing code and the code for aborting commands can call this 665 * function. CMD_T_STOP is set if and only if another thread is waiting 666 * inside transport_wait_for_tasks() for t_transport_stop_comp. 667 */ 668 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 669 { 670 unsigned long flags; 671 672 target_remove_from_state_list(cmd); 673 674 /* 675 * Clear struct se_cmd->se_lun before the handoff to FE. 676 */ 677 cmd->se_lun = NULL; 678 679 spin_lock_irqsave(&cmd->t_state_lock, flags); 680 /* 681 * Determine if frontend context caller is requesting the stopping of 682 * this command for frontend exceptions. 683 */ 684 if (cmd->transport_state & CMD_T_STOP) { 685 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 686 __func__, __LINE__, cmd->tag); 687 688 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 689 690 complete_all(&cmd->t_transport_stop_comp); 691 return 1; 692 } 693 cmd->transport_state &= ~CMD_T_ACTIVE; 694 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 695 696 /* 697 * Some fabric modules like tcm_loop can release their internally 698 * allocated I/O reference and struct se_cmd now. 699 * 700 * Fabric modules are expected to return '1' here if the se_cmd being 701 * passed is released at this point, or zero if not being released. 702 */ 703 return cmd->se_tfo->check_stop_free(cmd); 704 } 705 706 static void transport_lun_remove_cmd(struct se_cmd *cmd) 707 { 708 struct se_lun *lun = cmd->se_lun; 709 710 if (!lun) 711 return; 712 713 if (cmpxchg(&cmd->lun_ref_active, true, false)) 714 percpu_ref_put(&lun->lun_ref); 715 } 716 717 static void target_complete_failure_work(struct work_struct *work) 718 { 719 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 720 721 transport_generic_request_failure(cmd, 722 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 723 } 724 725 /* 726 * Used when asking transport to copy Sense Data from the underlying 727 * Linux/SCSI struct scsi_cmnd 728 */ 729 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 730 { 731 struct se_device *dev = cmd->se_dev; 732 733 WARN_ON(!cmd->se_lun); 734 735 if (!dev) 736 return NULL; 737 738 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 739 return NULL; 740 741 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 742 743 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 744 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 745 return cmd->sense_buffer; 746 } 747 748 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 749 { 750 unsigned char *cmd_sense_buf; 751 unsigned long flags; 752 753 spin_lock_irqsave(&cmd->t_state_lock, flags); 754 cmd_sense_buf = transport_get_sense_buffer(cmd); 755 if (!cmd_sense_buf) { 756 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 757 return; 758 } 759 760 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 761 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 762 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 763 } 764 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 765 766 static void target_handle_abort(struct se_cmd *cmd) 767 { 768 bool tas = cmd->transport_state & CMD_T_TAS; 769 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 770 int ret; 771 772 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 773 774 if (tas) { 775 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 776 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 777 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 778 cmd->t_task_cdb[0], cmd->tag); 779 trace_target_cmd_complete(cmd); 780 ret = cmd->se_tfo->queue_status(cmd); 781 if (ret) { 782 transport_handle_queue_full(cmd, cmd->se_dev, 783 ret, false); 784 return; 785 } 786 } else { 787 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 788 cmd->se_tfo->queue_tm_rsp(cmd); 789 } 790 } else { 791 /* 792 * Allow the fabric driver to unmap any resources before 793 * releasing the descriptor via TFO->release_cmd(). 794 */ 795 cmd->se_tfo->aborted_task(cmd); 796 if (ack_kref) 797 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 798 /* 799 * To do: establish a unit attention condition on the I_T 800 * nexus associated with cmd. See also the paragraph "Aborting 801 * commands" in SAM. 802 */ 803 } 804 805 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 806 807 transport_lun_remove_cmd(cmd); 808 809 transport_cmd_check_stop_to_fabric(cmd); 810 } 811 812 static void target_abort_work(struct work_struct *work) 813 { 814 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 815 816 target_handle_abort(cmd); 817 } 818 819 static bool target_cmd_interrupted(struct se_cmd *cmd) 820 { 821 int post_ret; 822 823 if (cmd->transport_state & CMD_T_ABORTED) { 824 if (cmd->transport_complete_callback) 825 cmd->transport_complete_callback(cmd, false, &post_ret); 826 INIT_WORK(&cmd->work, target_abort_work); 827 queue_work(target_completion_wq, &cmd->work); 828 return true; 829 } else if (cmd->transport_state & CMD_T_STOP) { 830 if (cmd->transport_complete_callback) 831 cmd->transport_complete_callback(cmd, false, &post_ret); 832 complete_all(&cmd->t_transport_stop_comp); 833 return true; 834 } 835 836 return false; 837 } 838 839 /* May be called from interrupt context so must not sleep. */ 840 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 841 { 842 int success; 843 unsigned long flags; 844 845 if (target_cmd_interrupted(cmd)) 846 return; 847 848 cmd->scsi_status = scsi_status; 849 850 spin_lock_irqsave(&cmd->t_state_lock, flags); 851 switch (cmd->scsi_status) { 852 case SAM_STAT_CHECK_CONDITION: 853 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 854 success = 1; 855 else 856 success = 0; 857 break; 858 default: 859 success = 1; 860 break; 861 } 862 863 cmd->t_state = TRANSPORT_COMPLETE; 864 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 865 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 866 867 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 868 target_complete_failure_work); 869 if (cmd->se_cmd_flags & SCF_USE_CPUID) 870 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 871 else 872 queue_work(target_completion_wq, &cmd->work); 873 } 874 EXPORT_SYMBOL(target_complete_cmd); 875 876 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 877 { 878 if ((scsi_status == SAM_STAT_GOOD || 879 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 880 length < cmd->data_length) { 881 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 882 cmd->residual_count += cmd->data_length - length; 883 } else { 884 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 885 cmd->residual_count = cmd->data_length - length; 886 } 887 888 cmd->data_length = length; 889 } 890 891 target_complete_cmd(cmd, scsi_status); 892 } 893 EXPORT_SYMBOL(target_complete_cmd_with_length); 894 895 static void target_add_to_state_list(struct se_cmd *cmd) 896 { 897 struct se_device *dev = cmd->se_dev; 898 unsigned long flags; 899 900 spin_lock_irqsave(&dev->execute_task_lock, flags); 901 if (!cmd->state_active) { 902 list_add_tail(&cmd->state_list, &dev->state_list); 903 cmd->state_active = true; 904 } 905 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 906 } 907 908 /* 909 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 910 */ 911 static void transport_write_pending_qf(struct se_cmd *cmd); 912 static void transport_complete_qf(struct se_cmd *cmd); 913 914 void target_qf_do_work(struct work_struct *work) 915 { 916 struct se_device *dev = container_of(work, struct se_device, 917 qf_work_queue); 918 LIST_HEAD(qf_cmd_list); 919 struct se_cmd *cmd, *cmd_tmp; 920 921 spin_lock_irq(&dev->qf_cmd_lock); 922 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 923 spin_unlock_irq(&dev->qf_cmd_lock); 924 925 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 926 list_del(&cmd->se_qf_node); 927 atomic_dec_mb(&dev->dev_qf_count); 928 929 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 930 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 931 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 932 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 933 : "UNKNOWN"); 934 935 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 936 transport_write_pending_qf(cmd); 937 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 938 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 939 transport_complete_qf(cmd); 940 } 941 } 942 943 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 944 { 945 switch (cmd->data_direction) { 946 case DMA_NONE: 947 return "NONE"; 948 case DMA_FROM_DEVICE: 949 return "READ"; 950 case DMA_TO_DEVICE: 951 return "WRITE"; 952 case DMA_BIDIRECTIONAL: 953 return "BIDI"; 954 default: 955 break; 956 } 957 958 return "UNKNOWN"; 959 } 960 961 void transport_dump_dev_state( 962 struct se_device *dev, 963 char *b, 964 int *bl) 965 { 966 *bl += sprintf(b + *bl, "Status: "); 967 if (dev->export_count) 968 *bl += sprintf(b + *bl, "ACTIVATED"); 969 else 970 *bl += sprintf(b + *bl, "DEACTIVATED"); 971 972 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 973 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 974 dev->dev_attrib.block_size, 975 dev->dev_attrib.hw_max_sectors); 976 *bl += sprintf(b + *bl, " "); 977 } 978 979 void transport_dump_vpd_proto_id( 980 struct t10_vpd *vpd, 981 unsigned char *p_buf, 982 int p_buf_len) 983 { 984 unsigned char buf[VPD_TMP_BUF_SIZE]; 985 int len; 986 987 memset(buf, 0, VPD_TMP_BUF_SIZE); 988 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 989 990 switch (vpd->protocol_identifier) { 991 case 0x00: 992 sprintf(buf+len, "Fibre Channel\n"); 993 break; 994 case 0x10: 995 sprintf(buf+len, "Parallel SCSI\n"); 996 break; 997 case 0x20: 998 sprintf(buf+len, "SSA\n"); 999 break; 1000 case 0x30: 1001 sprintf(buf+len, "IEEE 1394\n"); 1002 break; 1003 case 0x40: 1004 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1005 " Protocol\n"); 1006 break; 1007 case 0x50: 1008 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1009 break; 1010 case 0x60: 1011 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1012 break; 1013 case 0x70: 1014 sprintf(buf+len, "Automation/Drive Interface Transport" 1015 " Protocol\n"); 1016 break; 1017 case 0x80: 1018 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1019 break; 1020 default: 1021 sprintf(buf+len, "Unknown 0x%02x\n", 1022 vpd->protocol_identifier); 1023 break; 1024 } 1025 1026 if (p_buf) 1027 strncpy(p_buf, buf, p_buf_len); 1028 else 1029 pr_debug("%s", buf); 1030 } 1031 1032 void 1033 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1034 { 1035 /* 1036 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1037 * 1038 * from spc3r23.pdf section 7.5.1 1039 */ 1040 if (page_83[1] & 0x80) { 1041 vpd->protocol_identifier = (page_83[0] & 0xf0); 1042 vpd->protocol_identifier_set = 1; 1043 transport_dump_vpd_proto_id(vpd, NULL, 0); 1044 } 1045 } 1046 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1047 1048 int transport_dump_vpd_assoc( 1049 struct t10_vpd *vpd, 1050 unsigned char *p_buf, 1051 int p_buf_len) 1052 { 1053 unsigned char buf[VPD_TMP_BUF_SIZE]; 1054 int ret = 0; 1055 int len; 1056 1057 memset(buf, 0, VPD_TMP_BUF_SIZE); 1058 len = sprintf(buf, "T10 VPD Identifier Association: "); 1059 1060 switch (vpd->association) { 1061 case 0x00: 1062 sprintf(buf+len, "addressed logical unit\n"); 1063 break; 1064 case 0x10: 1065 sprintf(buf+len, "target port\n"); 1066 break; 1067 case 0x20: 1068 sprintf(buf+len, "SCSI target device\n"); 1069 break; 1070 default: 1071 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1072 ret = -EINVAL; 1073 break; 1074 } 1075 1076 if (p_buf) 1077 strncpy(p_buf, buf, p_buf_len); 1078 else 1079 pr_debug("%s", buf); 1080 1081 return ret; 1082 } 1083 1084 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1085 { 1086 /* 1087 * The VPD identification association.. 1088 * 1089 * from spc3r23.pdf Section 7.6.3.1 Table 297 1090 */ 1091 vpd->association = (page_83[1] & 0x30); 1092 return transport_dump_vpd_assoc(vpd, NULL, 0); 1093 } 1094 EXPORT_SYMBOL(transport_set_vpd_assoc); 1095 1096 int transport_dump_vpd_ident_type( 1097 struct t10_vpd *vpd, 1098 unsigned char *p_buf, 1099 int p_buf_len) 1100 { 1101 unsigned char buf[VPD_TMP_BUF_SIZE]; 1102 int ret = 0; 1103 int len; 1104 1105 memset(buf, 0, VPD_TMP_BUF_SIZE); 1106 len = sprintf(buf, "T10 VPD Identifier Type: "); 1107 1108 switch (vpd->device_identifier_type) { 1109 case 0x00: 1110 sprintf(buf+len, "Vendor specific\n"); 1111 break; 1112 case 0x01: 1113 sprintf(buf+len, "T10 Vendor ID based\n"); 1114 break; 1115 case 0x02: 1116 sprintf(buf+len, "EUI-64 based\n"); 1117 break; 1118 case 0x03: 1119 sprintf(buf+len, "NAA\n"); 1120 break; 1121 case 0x04: 1122 sprintf(buf+len, "Relative target port identifier\n"); 1123 break; 1124 case 0x08: 1125 sprintf(buf+len, "SCSI name string\n"); 1126 break; 1127 default: 1128 sprintf(buf+len, "Unsupported: 0x%02x\n", 1129 vpd->device_identifier_type); 1130 ret = -EINVAL; 1131 break; 1132 } 1133 1134 if (p_buf) { 1135 if (p_buf_len < strlen(buf)+1) 1136 return -EINVAL; 1137 strncpy(p_buf, buf, p_buf_len); 1138 } else { 1139 pr_debug("%s", buf); 1140 } 1141 1142 return ret; 1143 } 1144 1145 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1146 { 1147 /* 1148 * The VPD identifier type.. 1149 * 1150 * from spc3r23.pdf Section 7.6.3.1 Table 298 1151 */ 1152 vpd->device_identifier_type = (page_83[1] & 0x0f); 1153 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1154 } 1155 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1156 1157 int transport_dump_vpd_ident( 1158 struct t10_vpd *vpd, 1159 unsigned char *p_buf, 1160 int p_buf_len) 1161 { 1162 unsigned char buf[VPD_TMP_BUF_SIZE]; 1163 int ret = 0; 1164 1165 memset(buf, 0, VPD_TMP_BUF_SIZE); 1166 1167 switch (vpd->device_identifier_code_set) { 1168 case 0x01: /* Binary */ 1169 snprintf(buf, sizeof(buf), 1170 "T10 VPD Binary Device Identifier: %s\n", 1171 &vpd->device_identifier[0]); 1172 break; 1173 case 0x02: /* ASCII */ 1174 snprintf(buf, sizeof(buf), 1175 "T10 VPD ASCII Device Identifier: %s\n", 1176 &vpd->device_identifier[0]); 1177 break; 1178 case 0x03: /* UTF-8 */ 1179 snprintf(buf, sizeof(buf), 1180 "T10 VPD UTF-8 Device Identifier: %s\n", 1181 &vpd->device_identifier[0]); 1182 break; 1183 default: 1184 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1185 " 0x%02x", vpd->device_identifier_code_set); 1186 ret = -EINVAL; 1187 break; 1188 } 1189 1190 if (p_buf) 1191 strncpy(p_buf, buf, p_buf_len); 1192 else 1193 pr_debug("%s", buf); 1194 1195 return ret; 1196 } 1197 1198 int 1199 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1200 { 1201 static const char hex_str[] = "0123456789abcdef"; 1202 int j = 0, i = 4; /* offset to start of the identifier */ 1203 1204 /* 1205 * The VPD Code Set (encoding) 1206 * 1207 * from spc3r23.pdf Section 7.6.3.1 Table 296 1208 */ 1209 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1210 switch (vpd->device_identifier_code_set) { 1211 case 0x01: /* Binary */ 1212 vpd->device_identifier[j++] = 1213 hex_str[vpd->device_identifier_type]; 1214 while (i < (4 + page_83[3])) { 1215 vpd->device_identifier[j++] = 1216 hex_str[(page_83[i] & 0xf0) >> 4]; 1217 vpd->device_identifier[j++] = 1218 hex_str[page_83[i] & 0x0f]; 1219 i++; 1220 } 1221 break; 1222 case 0x02: /* ASCII */ 1223 case 0x03: /* UTF-8 */ 1224 while (i < (4 + page_83[3])) 1225 vpd->device_identifier[j++] = page_83[i++]; 1226 break; 1227 default: 1228 break; 1229 } 1230 1231 return transport_dump_vpd_ident(vpd, NULL, 0); 1232 } 1233 EXPORT_SYMBOL(transport_set_vpd_ident); 1234 1235 static sense_reason_t 1236 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1237 unsigned int size) 1238 { 1239 u32 mtl; 1240 1241 if (!cmd->se_tfo->max_data_sg_nents) 1242 return TCM_NO_SENSE; 1243 /* 1244 * Check if fabric enforced maximum SGL entries per I/O descriptor 1245 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1246 * residual_count and reduce original cmd->data_length to maximum 1247 * length based on single PAGE_SIZE entry scatter-lists. 1248 */ 1249 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1250 if (cmd->data_length > mtl) { 1251 /* 1252 * If an existing CDB overflow is present, calculate new residual 1253 * based on CDB size minus fabric maximum transfer length. 1254 * 1255 * If an existing CDB underflow is present, calculate new residual 1256 * based on original cmd->data_length minus fabric maximum transfer 1257 * length. 1258 * 1259 * Otherwise, set the underflow residual based on cmd->data_length 1260 * minus fabric maximum transfer length. 1261 */ 1262 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1263 cmd->residual_count = (size - mtl); 1264 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1265 u32 orig_dl = size + cmd->residual_count; 1266 cmd->residual_count = (orig_dl - mtl); 1267 } else { 1268 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1269 cmd->residual_count = (cmd->data_length - mtl); 1270 } 1271 cmd->data_length = mtl; 1272 /* 1273 * Reset sbc_check_prot() calculated protection payload 1274 * length based upon the new smaller MTL. 1275 */ 1276 if (cmd->prot_length) { 1277 u32 sectors = (mtl / dev->dev_attrib.block_size); 1278 cmd->prot_length = dev->prot_length * sectors; 1279 } 1280 } 1281 return TCM_NO_SENSE; 1282 } 1283 1284 /** 1285 * target_cmd_size_check - Check whether there will be a residual. 1286 * @cmd: SCSI command. 1287 * @size: Data buffer size derived from CDB. The data buffer size provided by 1288 * the SCSI transport driver is available in @cmd->data_length. 1289 * 1290 * Compare the data buffer size from the CDB with the data buffer limit from the transport 1291 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. 1292 * 1293 * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd(). 1294 * 1295 * Return: TCM_NO_SENSE 1296 */ 1297 sense_reason_t 1298 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1299 { 1300 struct se_device *dev = cmd->se_dev; 1301 1302 if (cmd->unknown_data_length) { 1303 cmd->data_length = size; 1304 } else if (size != cmd->data_length) { 1305 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1306 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1307 " 0x%02x\n", cmd->se_tfo->fabric_name, 1308 cmd->data_length, size, cmd->t_task_cdb[0]); 1309 1310 if (cmd->data_direction == DMA_TO_DEVICE) { 1311 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1312 pr_err_ratelimited("Rejecting underflow/overflow" 1313 " for WRITE data CDB\n"); 1314 return TCM_INVALID_CDB_FIELD; 1315 } 1316 /* 1317 * Some fabric drivers like iscsi-target still expect to 1318 * always reject overflow writes. Reject this case until 1319 * full fabric driver level support for overflow writes 1320 * is introduced tree-wide. 1321 */ 1322 if (size > cmd->data_length) { 1323 pr_err_ratelimited("Rejecting overflow for" 1324 " WRITE control CDB\n"); 1325 return TCM_INVALID_CDB_FIELD; 1326 } 1327 } 1328 /* 1329 * Reject READ_* or WRITE_* with overflow/underflow for 1330 * type SCF_SCSI_DATA_CDB. 1331 */ 1332 if (dev->dev_attrib.block_size != 512) { 1333 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1334 " CDB on non 512-byte sector setup subsystem" 1335 " plugin: %s\n", dev->transport->name); 1336 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1337 return TCM_INVALID_CDB_FIELD; 1338 } 1339 /* 1340 * For the overflow case keep the existing fabric provided 1341 * ->data_length. Otherwise for the underflow case, reset 1342 * ->data_length to the smaller SCSI expected data transfer 1343 * length. 1344 */ 1345 if (size > cmd->data_length) { 1346 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1347 cmd->residual_count = (size - cmd->data_length); 1348 } else { 1349 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1350 cmd->residual_count = (cmd->data_length - size); 1351 cmd->data_length = size; 1352 } 1353 } 1354 1355 return target_check_max_data_sg_nents(cmd, dev, size); 1356 1357 } 1358 1359 /* 1360 * Used by fabric modules containing a local struct se_cmd within their 1361 * fabric dependent per I/O descriptor. 1362 * 1363 * Preserves the value of @cmd->tag. 1364 */ 1365 void transport_init_se_cmd( 1366 struct se_cmd *cmd, 1367 const struct target_core_fabric_ops *tfo, 1368 struct se_session *se_sess, 1369 u32 data_length, 1370 int data_direction, 1371 int task_attr, 1372 unsigned char *sense_buffer, u64 unpacked_lun) 1373 { 1374 INIT_LIST_HEAD(&cmd->se_delayed_node); 1375 INIT_LIST_HEAD(&cmd->se_qf_node); 1376 INIT_LIST_HEAD(&cmd->se_cmd_list); 1377 INIT_LIST_HEAD(&cmd->state_list); 1378 init_completion(&cmd->t_transport_stop_comp); 1379 cmd->free_compl = NULL; 1380 cmd->abrt_compl = NULL; 1381 spin_lock_init(&cmd->t_state_lock); 1382 INIT_WORK(&cmd->work, NULL); 1383 kref_init(&cmd->cmd_kref); 1384 1385 cmd->se_tfo = tfo; 1386 cmd->se_sess = se_sess; 1387 cmd->data_length = data_length; 1388 cmd->data_direction = data_direction; 1389 cmd->sam_task_attr = task_attr; 1390 cmd->sense_buffer = sense_buffer; 1391 cmd->orig_fe_lun = unpacked_lun; 1392 1393 cmd->state_active = false; 1394 } 1395 EXPORT_SYMBOL(transport_init_se_cmd); 1396 1397 static sense_reason_t 1398 transport_check_alloc_task_attr(struct se_cmd *cmd) 1399 { 1400 struct se_device *dev = cmd->se_dev; 1401 1402 /* 1403 * Check if SAM Task Attribute emulation is enabled for this 1404 * struct se_device storage object 1405 */ 1406 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1407 return 0; 1408 1409 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1410 pr_debug("SAM Task Attribute ACA" 1411 " emulation is not supported\n"); 1412 return TCM_INVALID_CDB_FIELD; 1413 } 1414 1415 return 0; 1416 } 1417 1418 sense_reason_t 1419 target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb) 1420 { 1421 sense_reason_t ret; 1422 1423 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1424 /* 1425 * Ensure that the received CDB is less than the max (252 + 8) bytes 1426 * for VARIABLE_LENGTH_CMD 1427 */ 1428 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1429 pr_err("Received SCSI CDB with command_size: %d that" 1430 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1431 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1432 ret = TCM_INVALID_CDB_FIELD; 1433 goto err; 1434 } 1435 /* 1436 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1437 * allocate the additional extended CDB buffer now.. Otherwise 1438 * setup the pointer from __t_task_cdb to t_task_cdb. 1439 */ 1440 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1441 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1442 GFP_KERNEL); 1443 if (!cmd->t_task_cdb) { 1444 pr_err("Unable to allocate cmd->t_task_cdb" 1445 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1446 scsi_command_size(cdb), 1447 (unsigned long)sizeof(cmd->__t_task_cdb)); 1448 ret = TCM_OUT_OF_RESOURCES; 1449 goto err; 1450 } 1451 } 1452 /* 1453 * Copy the original CDB into cmd-> 1454 */ 1455 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1456 1457 trace_target_sequencer_start(cmd); 1458 return 0; 1459 1460 err: 1461 /* 1462 * Copy the CDB here to allow trace_target_cmd_complete() to 1463 * print the cdb to the trace buffers. 1464 */ 1465 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), 1466 (unsigned int)TCM_MAX_COMMAND_SIZE)); 1467 return ret; 1468 } 1469 EXPORT_SYMBOL(target_cmd_init_cdb); 1470 1471 sense_reason_t 1472 target_cmd_parse_cdb(struct se_cmd *cmd) 1473 { 1474 struct se_device *dev = cmd->se_dev; 1475 sense_reason_t ret; 1476 1477 ret = dev->transport->parse_cdb(cmd); 1478 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1479 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1480 cmd->se_tfo->fabric_name, 1481 cmd->se_sess->se_node_acl->initiatorname, 1482 cmd->t_task_cdb[0]); 1483 if (ret) 1484 return ret; 1485 1486 ret = transport_check_alloc_task_attr(cmd); 1487 if (ret) 1488 return ret; 1489 1490 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1491 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1492 return 0; 1493 } 1494 EXPORT_SYMBOL(target_cmd_parse_cdb); 1495 1496 /* 1497 * Used by fabric module frontends to queue tasks directly. 1498 * May only be used from process context. 1499 */ 1500 int transport_handle_cdb_direct( 1501 struct se_cmd *cmd) 1502 { 1503 sense_reason_t ret; 1504 1505 if (!cmd->se_lun) { 1506 dump_stack(); 1507 pr_err("cmd->se_lun is NULL\n"); 1508 return -EINVAL; 1509 } 1510 if (in_interrupt()) { 1511 dump_stack(); 1512 pr_err("transport_generic_handle_cdb cannot be called" 1513 " from interrupt context\n"); 1514 return -EINVAL; 1515 } 1516 /* 1517 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1518 * outstanding descriptors are handled correctly during shutdown via 1519 * transport_wait_for_tasks() 1520 * 1521 * Also, we don't take cmd->t_state_lock here as we only expect 1522 * this to be called for initial descriptor submission. 1523 */ 1524 cmd->t_state = TRANSPORT_NEW_CMD; 1525 cmd->transport_state |= CMD_T_ACTIVE; 1526 1527 /* 1528 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1529 * so follow TRANSPORT_NEW_CMD processing thread context usage 1530 * and call transport_generic_request_failure() if necessary.. 1531 */ 1532 ret = transport_generic_new_cmd(cmd); 1533 if (ret) 1534 transport_generic_request_failure(cmd, ret); 1535 return 0; 1536 } 1537 EXPORT_SYMBOL(transport_handle_cdb_direct); 1538 1539 sense_reason_t 1540 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1541 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1542 { 1543 if (!sgl || !sgl_count) 1544 return 0; 1545 1546 /* 1547 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1548 * scatterlists already have been set to follow what the fabric 1549 * passes for the original expected data transfer length. 1550 */ 1551 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1552 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1553 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1554 return TCM_INVALID_CDB_FIELD; 1555 } 1556 1557 cmd->t_data_sg = sgl; 1558 cmd->t_data_nents = sgl_count; 1559 cmd->t_bidi_data_sg = sgl_bidi; 1560 cmd->t_bidi_data_nents = sgl_bidi_count; 1561 1562 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1563 return 0; 1564 } 1565 1566 /** 1567 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1568 * se_cmd + use pre-allocated SGL memory. 1569 * 1570 * @se_cmd: command descriptor to submit 1571 * @se_sess: associated se_sess for endpoint 1572 * @cdb: pointer to SCSI CDB 1573 * @sense: pointer to SCSI sense buffer 1574 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1575 * @data_length: fabric expected data transfer length 1576 * @task_attr: SAM task attribute 1577 * @data_dir: DMA data direction 1578 * @flags: flags for command submission from target_sc_flags_tables 1579 * @sgl: struct scatterlist memory for unidirectional mapping 1580 * @sgl_count: scatterlist count for unidirectional mapping 1581 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1582 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1583 * @sgl_prot: struct scatterlist memory protection information 1584 * @sgl_prot_count: scatterlist count for protection information 1585 * 1586 * Task tags are supported if the caller has set @se_cmd->tag. 1587 * 1588 * Returns non zero to signal active I/O shutdown failure. All other 1589 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1590 * but still return zero here. 1591 * 1592 * This may only be called from process context, and also currently 1593 * assumes internal allocation of fabric payload buffer by target-core. 1594 */ 1595 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1596 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1597 u32 data_length, int task_attr, int data_dir, int flags, 1598 struct scatterlist *sgl, u32 sgl_count, 1599 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1600 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1601 { 1602 struct se_portal_group *se_tpg; 1603 sense_reason_t rc; 1604 int ret; 1605 1606 se_tpg = se_sess->se_tpg; 1607 BUG_ON(!se_tpg); 1608 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1609 BUG_ON(in_interrupt()); 1610 /* 1611 * Initialize se_cmd for target operation. From this point 1612 * exceptions are handled by sending exception status via 1613 * target_core_fabric_ops->queue_status() callback 1614 */ 1615 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1616 data_length, data_dir, task_attr, sense, 1617 unpacked_lun); 1618 1619 if (flags & TARGET_SCF_USE_CPUID) 1620 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1621 else 1622 se_cmd->cpuid = WORK_CPU_UNBOUND; 1623 1624 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1625 se_cmd->unknown_data_length = 1; 1626 /* 1627 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1628 * se_sess->sess_cmd_list. A second kref_get here is necessary 1629 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1630 * kref_put() to happen during fabric packet acknowledgement. 1631 */ 1632 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1633 if (ret) 1634 return ret; 1635 /* 1636 * Signal bidirectional data payloads to target-core 1637 */ 1638 if (flags & TARGET_SCF_BIDI_OP) 1639 se_cmd->se_cmd_flags |= SCF_BIDI; 1640 1641 rc = target_cmd_init_cdb(se_cmd, cdb); 1642 if (rc) { 1643 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1644 target_put_sess_cmd(se_cmd); 1645 return 0; 1646 } 1647 1648 /* 1649 * Locate se_lun pointer and attach it to struct se_cmd 1650 */ 1651 rc = transport_lookup_cmd_lun(se_cmd); 1652 if (rc) { 1653 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1654 target_put_sess_cmd(se_cmd); 1655 return 0; 1656 } 1657 1658 rc = target_cmd_parse_cdb(se_cmd); 1659 if (rc != 0) { 1660 transport_generic_request_failure(se_cmd, rc); 1661 return 0; 1662 } 1663 1664 /* 1665 * Save pointers for SGLs containing protection information, 1666 * if present. 1667 */ 1668 if (sgl_prot_count) { 1669 se_cmd->t_prot_sg = sgl_prot; 1670 se_cmd->t_prot_nents = sgl_prot_count; 1671 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1672 } 1673 1674 /* 1675 * When a non zero sgl_count has been passed perform SGL passthrough 1676 * mapping for pre-allocated fabric memory instead of having target 1677 * core perform an internal SGL allocation.. 1678 */ 1679 if (sgl_count != 0) { 1680 BUG_ON(!sgl); 1681 1682 /* 1683 * A work-around for tcm_loop as some userspace code via 1684 * scsi-generic do not memset their associated read buffers, 1685 * so go ahead and do that here for type non-data CDBs. Also 1686 * note that this is currently guaranteed to be a single SGL 1687 * for this case by target core in target_setup_cmd_from_cdb() 1688 * -> transport_generic_cmd_sequencer(). 1689 */ 1690 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1691 se_cmd->data_direction == DMA_FROM_DEVICE) { 1692 unsigned char *buf = NULL; 1693 1694 if (sgl) 1695 buf = kmap(sg_page(sgl)) + sgl->offset; 1696 1697 if (buf) { 1698 memset(buf, 0, sgl->length); 1699 kunmap(sg_page(sgl)); 1700 } 1701 } 1702 1703 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1704 sgl_bidi, sgl_bidi_count); 1705 if (rc != 0) { 1706 transport_generic_request_failure(se_cmd, rc); 1707 return 0; 1708 } 1709 } 1710 1711 /* 1712 * Check if we need to delay processing because of ALUA 1713 * Active/NonOptimized primary access state.. 1714 */ 1715 core_alua_check_nonop_delay(se_cmd); 1716 1717 transport_handle_cdb_direct(se_cmd); 1718 return 0; 1719 } 1720 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1721 1722 /** 1723 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1724 * 1725 * @se_cmd: command descriptor to submit 1726 * @se_sess: associated se_sess for endpoint 1727 * @cdb: pointer to SCSI CDB 1728 * @sense: pointer to SCSI sense buffer 1729 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1730 * @data_length: fabric expected data transfer length 1731 * @task_attr: SAM task attribute 1732 * @data_dir: DMA data direction 1733 * @flags: flags for command submission from target_sc_flags_tables 1734 * 1735 * Task tags are supported if the caller has set @se_cmd->tag. 1736 * 1737 * Returns non zero to signal active I/O shutdown failure. All other 1738 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1739 * but still return zero here. 1740 * 1741 * This may only be called from process context, and also currently 1742 * assumes internal allocation of fabric payload buffer by target-core. 1743 * 1744 * It also assumes interal target core SGL memory allocation. 1745 */ 1746 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1747 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1748 u32 data_length, int task_attr, int data_dir, int flags) 1749 { 1750 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1751 unpacked_lun, data_length, task_attr, data_dir, 1752 flags, NULL, 0, NULL, 0, NULL, 0); 1753 } 1754 EXPORT_SYMBOL(target_submit_cmd); 1755 1756 static void target_complete_tmr_failure(struct work_struct *work) 1757 { 1758 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1759 1760 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1761 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1762 1763 transport_lun_remove_cmd(se_cmd); 1764 transport_cmd_check_stop_to_fabric(se_cmd); 1765 } 1766 1767 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, 1768 u64 *unpacked_lun) 1769 { 1770 struct se_cmd *se_cmd; 1771 unsigned long flags; 1772 bool ret = false; 1773 1774 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1775 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1776 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1777 continue; 1778 1779 if (se_cmd->tag == tag) { 1780 *unpacked_lun = se_cmd->orig_fe_lun; 1781 ret = true; 1782 break; 1783 } 1784 } 1785 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1786 1787 return ret; 1788 } 1789 1790 /** 1791 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1792 * for TMR CDBs 1793 * 1794 * @se_cmd: command descriptor to submit 1795 * @se_sess: associated se_sess for endpoint 1796 * @sense: pointer to SCSI sense buffer 1797 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1798 * @fabric_tmr_ptr: fabric context for TMR req 1799 * @tm_type: Type of TM request 1800 * @gfp: gfp type for caller 1801 * @tag: referenced task tag for TMR_ABORT_TASK 1802 * @flags: submit cmd flags 1803 * 1804 * Callable from all contexts. 1805 **/ 1806 1807 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1808 unsigned char *sense, u64 unpacked_lun, 1809 void *fabric_tmr_ptr, unsigned char tm_type, 1810 gfp_t gfp, u64 tag, int flags) 1811 { 1812 struct se_portal_group *se_tpg; 1813 int ret; 1814 1815 se_tpg = se_sess->se_tpg; 1816 BUG_ON(!se_tpg); 1817 1818 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1819 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); 1820 /* 1821 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1822 * allocation failure. 1823 */ 1824 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1825 if (ret < 0) 1826 return -ENOMEM; 1827 1828 if (tm_type == TMR_ABORT_TASK) 1829 se_cmd->se_tmr_req->ref_task_tag = tag; 1830 1831 /* See target_submit_cmd for commentary */ 1832 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1833 if (ret) { 1834 core_tmr_release_req(se_cmd->se_tmr_req); 1835 return ret; 1836 } 1837 /* 1838 * If this is ABORT_TASK with no explicit fabric provided LUN, 1839 * go ahead and search active session tags for a match to figure 1840 * out unpacked_lun for the original se_cmd. 1841 */ 1842 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { 1843 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) 1844 goto failure; 1845 } 1846 1847 ret = transport_lookup_tmr_lun(se_cmd); 1848 if (ret) 1849 goto failure; 1850 1851 transport_generic_handle_tmr(se_cmd); 1852 return 0; 1853 1854 /* 1855 * For callback during failure handling, push this work off 1856 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1857 */ 1858 failure: 1859 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1860 schedule_work(&se_cmd->work); 1861 return 0; 1862 } 1863 EXPORT_SYMBOL(target_submit_tmr); 1864 1865 /* 1866 * Handle SAM-esque emulation for generic transport request failures. 1867 */ 1868 void transport_generic_request_failure(struct se_cmd *cmd, 1869 sense_reason_t sense_reason) 1870 { 1871 int ret = 0, post_ret; 1872 1873 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1874 sense_reason); 1875 target_show_cmd("-----[ ", cmd); 1876 1877 /* 1878 * For SAM Task Attribute emulation for failed struct se_cmd 1879 */ 1880 transport_complete_task_attr(cmd); 1881 1882 if (cmd->transport_complete_callback) 1883 cmd->transport_complete_callback(cmd, false, &post_ret); 1884 1885 if (cmd->transport_state & CMD_T_ABORTED) { 1886 INIT_WORK(&cmd->work, target_abort_work); 1887 queue_work(target_completion_wq, &cmd->work); 1888 return; 1889 } 1890 1891 switch (sense_reason) { 1892 case TCM_NON_EXISTENT_LUN: 1893 case TCM_UNSUPPORTED_SCSI_OPCODE: 1894 case TCM_INVALID_CDB_FIELD: 1895 case TCM_INVALID_PARAMETER_LIST: 1896 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1897 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1898 case TCM_UNKNOWN_MODE_PAGE: 1899 case TCM_WRITE_PROTECTED: 1900 case TCM_ADDRESS_OUT_OF_RANGE: 1901 case TCM_CHECK_CONDITION_ABORT_CMD: 1902 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1903 case TCM_CHECK_CONDITION_NOT_READY: 1904 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1905 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1906 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1907 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1908 case TCM_TOO_MANY_TARGET_DESCS: 1909 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1910 case TCM_TOO_MANY_SEGMENT_DESCS: 1911 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1912 break; 1913 case TCM_OUT_OF_RESOURCES: 1914 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1915 goto queue_status; 1916 case TCM_LUN_BUSY: 1917 cmd->scsi_status = SAM_STAT_BUSY; 1918 goto queue_status; 1919 case TCM_RESERVATION_CONFLICT: 1920 /* 1921 * No SENSE Data payload for this case, set SCSI Status 1922 * and queue the response to $FABRIC_MOD. 1923 * 1924 * Uses linux/include/scsi/scsi.h SAM status codes defs 1925 */ 1926 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1927 /* 1928 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1929 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1930 * CONFLICT STATUS. 1931 * 1932 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1933 */ 1934 if (cmd->se_sess && 1935 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl 1936 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { 1937 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1938 cmd->orig_fe_lun, 0x2C, 1939 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1940 } 1941 1942 goto queue_status; 1943 default: 1944 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1945 cmd->t_task_cdb[0], sense_reason); 1946 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1947 break; 1948 } 1949 1950 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1951 if (ret) 1952 goto queue_full; 1953 1954 check_stop: 1955 transport_lun_remove_cmd(cmd); 1956 transport_cmd_check_stop_to_fabric(cmd); 1957 return; 1958 1959 queue_status: 1960 trace_target_cmd_complete(cmd); 1961 ret = cmd->se_tfo->queue_status(cmd); 1962 if (!ret) 1963 goto check_stop; 1964 queue_full: 1965 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1966 } 1967 EXPORT_SYMBOL(transport_generic_request_failure); 1968 1969 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1970 { 1971 sense_reason_t ret; 1972 1973 if (!cmd->execute_cmd) { 1974 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1975 goto err; 1976 } 1977 if (do_checks) { 1978 /* 1979 * Check for an existing UNIT ATTENTION condition after 1980 * target_handle_task_attr() has done SAM task attr 1981 * checking, and possibly have already defered execution 1982 * out to target_restart_delayed_cmds() context. 1983 */ 1984 ret = target_scsi3_ua_check(cmd); 1985 if (ret) 1986 goto err; 1987 1988 ret = target_alua_state_check(cmd); 1989 if (ret) 1990 goto err; 1991 1992 ret = target_check_reservation(cmd); 1993 if (ret) { 1994 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1995 goto err; 1996 } 1997 } 1998 1999 ret = cmd->execute_cmd(cmd); 2000 if (!ret) 2001 return; 2002 err: 2003 spin_lock_irq(&cmd->t_state_lock); 2004 cmd->transport_state &= ~CMD_T_SENT; 2005 spin_unlock_irq(&cmd->t_state_lock); 2006 2007 transport_generic_request_failure(cmd, ret); 2008 } 2009 2010 static int target_write_prot_action(struct se_cmd *cmd) 2011 { 2012 u32 sectors; 2013 /* 2014 * Perform WRITE_INSERT of PI using software emulation when backend 2015 * device has PI enabled, if the transport has not already generated 2016 * PI using hardware WRITE_INSERT offload. 2017 */ 2018 switch (cmd->prot_op) { 2019 case TARGET_PROT_DOUT_INSERT: 2020 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 2021 sbc_dif_generate(cmd); 2022 break; 2023 case TARGET_PROT_DOUT_STRIP: 2024 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 2025 break; 2026 2027 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 2028 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2029 sectors, 0, cmd->t_prot_sg, 0); 2030 if (unlikely(cmd->pi_err)) { 2031 spin_lock_irq(&cmd->t_state_lock); 2032 cmd->transport_state &= ~CMD_T_SENT; 2033 spin_unlock_irq(&cmd->t_state_lock); 2034 transport_generic_request_failure(cmd, cmd->pi_err); 2035 return -1; 2036 } 2037 break; 2038 default: 2039 break; 2040 } 2041 2042 return 0; 2043 } 2044 2045 static bool target_handle_task_attr(struct se_cmd *cmd) 2046 { 2047 struct se_device *dev = cmd->se_dev; 2048 2049 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2050 return false; 2051 2052 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 2053 2054 /* 2055 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2056 * to allow the passed struct se_cmd list of tasks to the front of the list. 2057 */ 2058 switch (cmd->sam_task_attr) { 2059 case TCM_HEAD_TAG: 2060 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 2061 cmd->t_task_cdb[0]); 2062 return false; 2063 case TCM_ORDERED_TAG: 2064 atomic_inc_mb(&dev->dev_ordered_sync); 2065 2066 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 2067 cmd->t_task_cdb[0]); 2068 2069 /* 2070 * Execute an ORDERED command if no other older commands 2071 * exist that need to be completed first. 2072 */ 2073 if (!atomic_read(&dev->simple_cmds)) 2074 return false; 2075 break; 2076 default: 2077 /* 2078 * For SIMPLE and UNTAGGED Task Attribute commands 2079 */ 2080 atomic_inc_mb(&dev->simple_cmds); 2081 break; 2082 } 2083 2084 if (atomic_read(&dev->dev_ordered_sync) == 0) 2085 return false; 2086 2087 spin_lock(&dev->delayed_cmd_lock); 2088 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2089 spin_unlock(&dev->delayed_cmd_lock); 2090 2091 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2092 cmd->t_task_cdb[0], cmd->sam_task_attr); 2093 return true; 2094 } 2095 2096 void target_execute_cmd(struct se_cmd *cmd) 2097 { 2098 /* 2099 * Determine if frontend context caller is requesting the stopping of 2100 * this command for frontend exceptions. 2101 * 2102 * If the received CDB has already been aborted stop processing it here. 2103 */ 2104 if (target_cmd_interrupted(cmd)) 2105 return; 2106 2107 spin_lock_irq(&cmd->t_state_lock); 2108 cmd->t_state = TRANSPORT_PROCESSING; 2109 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2110 spin_unlock_irq(&cmd->t_state_lock); 2111 2112 if (target_write_prot_action(cmd)) 2113 return; 2114 2115 if (target_handle_task_attr(cmd)) { 2116 spin_lock_irq(&cmd->t_state_lock); 2117 cmd->transport_state &= ~CMD_T_SENT; 2118 spin_unlock_irq(&cmd->t_state_lock); 2119 return; 2120 } 2121 2122 __target_execute_cmd(cmd, true); 2123 } 2124 EXPORT_SYMBOL(target_execute_cmd); 2125 2126 /* 2127 * Process all commands up to the last received ORDERED task attribute which 2128 * requires another blocking boundary 2129 */ 2130 static void target_restart_delayed_cmds(struct se_device *dev) 2131 { 2132 for (;;) { 2133 struct se_cmd *cmd; 2134 2135 spin_lock(&dev->delayed_cmd_lock); 2136 if (list_empty(&dev->delayed_cmd_list)) { 2137 spin_unlock(&dev->delayed_cmd_lock); 2138 break; 2139 } 2140 2141 cmd = list_entry(dev->delayed_cmd_list.next, 2142 struct se_cmd, se_delayed_node); 2143 list_del(&cmd->se_delayed_node); 2144 spin_unlock(&dev->delayed_cmd_lock); 2145 2146 cmd->transport_state |= CMD_T_SENT; 2147 2148 __target_execute_cmd(cmd, true); 2149 2150 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2151 break; 2152 } 2153 } 2154 2155 /* 2156 * Called from I/O completion to determine which dormant/delayed 2157 * and ordered cmds need to have their tasks added to the execution queue. 2158 */ 2159 static void transport_complete_task_attr(struct se_cmd *cmd) 2160 { 2161 struct se_device *dev = cmd->se_dev; 2162 2163 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2164 return; 2165 2166 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2167 goto restart; 2168 2169 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2170 atomic_dec_mb(&dev->simple_cmds); 2171 dev->dev_cur_ordered_id++; 2172 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2173 dev->dev_cur_ordered_id++; 2174 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2175 dev->dev_cur_ordered_id); 2176 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2177 atomic_dec_mb(&dev->dev_ordered_sync); 2178 2179 dev->dev_cur_ordered_id++; 2180 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2181 dev->dev_cur_ordered_id); 2182 } 2183 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2184 2185 restart: 2186 target_restart_delayed_cmds(dev); 2187 } 2188 2189 static void transport_complete_qf(struct se_cmd *cmd) 2190 { 2191 int ret = 0; 2192 2193 transport_complete_task_attr(cmd); 2194 /* 2195 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2196 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2197 * the same callbacks should not be retried. Return CHECK_CONDITION 2198 * if a scsi_status is not already set. 2199 * 2200 * If a fabric driver ->queue_status() has returned non zero, always 2201 * keep retrying no matter what.. 2202 */ 2203 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2204 if (cmd->scsi_status) 2205 goto queue_status; 2206 2207 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2208 goto queue_status; 2209 } 2210 2211 /* 2212 * Check if we need to send a sense buffer from 2213 * the struct se_cmd in question. We do NOT want 2214 * to take this path of the IO has been marked as 2215 * needing to be treated like a "normal read". This 2216 * is the case if it's a tape read, and either the 2217 * FM, EOM, or ILI bits are set, but there is no 2218 * sense data. 2219 */ 2220 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2221 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2222 goto queue_status; 2223 2224 switch (cmd->data_direction) { 2225 case DMA_FROM_DEVICE: 2226 /* queue status if not treating this as a normal read */ 2227 if (cmd->scsi_status && 2228 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2229 goto queue_status; 2230 2231 trace_target_cmd_complete(cmd); 2232 ret = cmd->se_tfo->queue_data_in(cmd); 2233 break; 2234 case DMA_TO_DEVICE: 2235 if (cmd->se_cmd_flags & SCF_BIDI) { 2236 ret = cmd->se_tfo->queue_data_in(cmd); 2237 break; 2238 } 2239 /* fall through */ 2240 case DMA_NONE: 2241 queue_status: 2242 trace_target_cmd_complete(cmd); 2243 ret = cmd->se_tfo->queue_status(cmd); 2244 break; 2245 default: 2246 break; 2247 } 2248 2249 if (ret < 0) { 2250 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2251 return; 2252 } 2253 transport_lun_remove_cmd(cmd); 2254 transport_cmd_check_stop_to_fabric(cmd); 2255 } 2256 2257 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2258 int err, bool write_pending) 2259 { 2260 /* 2261 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2262 * ->queue_data_in() callbacks from new process context. 2263 * 2264 * Otherwise for other errors, transport_complete_qf() will send 2265 * CHECK_CONDITION via ->queue_status() instead of attempting to 2266 * retry associated fabric driver data-transfer callbacks. 2267 */ 2268 if (err == -EAGAIN || err == -ENOMEM) { 2269 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2270 TRANSPORT_COMPLETE_QF_OK; 2271 } else { 2272 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2273 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2274 } 2275 2276 spin_lock_irq(&dev->qf_cmd_lock); 2277 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2278 atomic_inc_mb(&dev->dev_qf_count); 2279 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2280 2281 schedule_work(&cmd->se_dev->qf_work_queue); 2282 } 2283 2284 static bool target_read_prot_action(struct se_cmd *cmd) 2285 { 2286 switch (cmd->prot_op) { 2287 case TARGET_PROT_DIN_STRIP: 2288 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2289 u32 sectors = cmd->data_length >> 2290 ilog2(cmd->se_dev->dev_attrib.block_size); 2291 2292 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2293 sectors, 0, cmd->t_prot_sg, 2294 0); 2295 if (cmd->pi_err) 2296 return true; 2297 } 2298 break; 2299 case TARGET_PROT_DIN_INSERT: 2300 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2301 break; 2302 2303 sbc_dif_generate(cmd); 2304 break; 2305 default: 2306 break; 2307 } 2308 2309 return false; 2310 } 2311 2312 static void target_complete_ok_work(struct work_struct *work) 2313 { 2314 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2315 int ret; 2316 2317 /* 2318 * Check if we need to move delayed/dormant tasks from cmds on the 2319 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2320 * Attribute. 2321 */ 2322 transport_complete_task_attr(cmd); 2323 2324 /* 2325 * Check to schedule QUEUE_FULL work, or execute an existing 2326 * cmd->transport_qf_callback() 2327 */ 2328 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2329 schedule_work(&cmd->se_dev->qf_work_queue); 2330 2331 /* 2332 * Check if we need to send a sense buffer from 2333 * the struct se_cmd in question. We do NOT want 2334 * to take this path of the IO has been marked as 2335 * needing to be treated like a "normal read". This 2336 * is the case if it's a tape read, and either the 2337 * FM, EOM, or ILI bits are set, but there is no 2338 * sense data. 2339 */ 2340 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2341 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2342 WARN_ON(!cmd->scsi_status); 2343 ret = transport_send_check_condition_and_sense( 2344 cmd, 0, 1); 2345 if (ret) 2346 goto queue_full; 2347 2348 transport_lun_remove_cmd(cmd); 2349 transport_cmd_check_stop_to_fabric(cmd); 2350 return; 2351 } 2352 /* 2353 * Check for a callback, used by amongst other things 2354 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2355 */ 2356 if (cmd->transport_complete_callback) { 2357 sense_reason_t rc; 2358 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2359 bool zero_dl = !(cmd->data_length); 2360 int post_ret = 0; 2361 2362 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2363 if (!rc && !post_ret) { 2364 if (caw && zero_dl) 2365 goto queue_rsp; 2366 2367 return; 2368 } else if (rc) { 2369 ret = transport_send_check_condition_and_sense(cmd, 2370 rc, 0); 2371 if (ret) 2372 goto queue_full; 2373 2374 transport_lun_remove_cmd(cmd); 2375 transport_cmd_check_stop_to_fabric(cmd); 2376 return; 2377 } 2378 } 2379 2380 queue_rsp: 2381 switch (cmd->data_direction) { 2382 case DMA_FROM_DEVICE: 2383 /* 2384 * if this is a READ-type IO, but SCSI status 2385 * is set, then skip returning data and just 2386 * return the status -- unless this IO is marked 2387 * as needing to be treated as a normal read, 2388 * in which case we want to go ahead and return 2389 * the data. This happens, for example, for tape 2390 * reads with the FM, EOM, or ILI bits set, with 2391 * no sense data. 2392 */ 2393 if (cmd->scsi_status && 2394 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2395 goto queue_status; 2396 2397 atomic_long_add(cmd->data_length, 2398 &cmd->se_lun->lun_stats.tx_data_octets); 2399 /* 2400 * Perform READ_STRIP of PI using software emulation when 2401 * backend had PI enabled, if the transport will not be 2402 * performing hardware READ_STRIP offload. 2403 */ 2404 if (target_read_prot_action(cmd)) { 2405 ret = transport_send_check_condition_and_sense(cmd, 2406 cmd->pi_err, 0); 2407 if (ret) 2408 goto queue_full; 2409 2410 transport_lun_remove_cmd(cmd); 2411 transport_cmd_check_stop_to_fabric(cmd); 2412 return; 2413 } 2414 2415 trace_target_cmd_complete(cmd); 2416 ret = cmd->se_tfo->queue_data_in(cmd); 2417 if (ret) 2418 goto queue_full; 2419 break; 2420 case DMA_TO_DEVICE: 2421 atomic_long_add(cmd->data_length, 2422 &cmd->se_lun->lun_stats.rx_data_octets); 2423 /* 2424 * Check if we need to send READ payload for BIDI-COMMAND 2425 */ 2426 if (cmd->se_cmd_flags & SCF_BIDI) { 2427 atomic_long_add(cmd->data_length, 2428 &cmd->se_lun->lun_stats.tx_data_octets); 2429 ret = cmd->se_tfo->queue_data_in(cmd); 2430 if (ret) 2431 goto queue_full; 2432 break; 2433 } 2434 /* fall through */ 2435 case DMA_NONE: 2436 queue_status: 2437 trace_target_cmd_complete(cmd); 2438 ret = cmd->se_tfo->queue_status(cmd); 2439 if (ret) 2440 goto queue_full; 2441 break; 2442 default: 2443 break; 2444 } 2445 2446 transport_lun_remove_cmd(cmd); 2447 transport_cmd_check_stop_to_fabric(cmd); 2448 return; 2449 2450 queue_full: 2451 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2452 " data_direction: %d\n", cmd, cmd->data_direction); 2453 2454 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2455 } 2456 2457 void target_free_sgl(struct scatterlist *sgl, int nents) 2458 { 2459 sgl_free_n_order(sgl, nents, 0); 2460 } 2461 EXPORT_SYMBOL(target_free_sgl); 2462 2463 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2464 { 2465 /* 2466 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2467 * emulation, and free + reset pointers if necessary.. 2468 */ 2469 if (!cmd->t_data_sg_orig) 2470 return; 2471 2472 kfree(cmd->t_data_sg); 2473 cmd->t_data_sg = cmd->t_data_sg_orig; 2474 cmd->t_data_sg_orig = NULL; 2475 cmd->t_data_nents = cmd->t_data_nents_orig; 2476 cmd->t_data_nents_orig = 0; 2477 } 2478 2479 static inline void transport_free_pages(struct se_cmd *cmd) 2480 { 2481 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2482 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2483 cmd->t_prot_sg = NULL; 2484 cmd->t_prot_nents = 0; 2485 } 2486 2487 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2488 /* 2489 * Release special case READ buffer payload required for 2490 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2491 */ 2492 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2493 target_free_sgl(cmd->t_bidi_data_sg, 2494 cmd->t_bidi_data_nents); 2495 cmd->t_bidi_data_sg = NULL; 2496 cmd->t_bidi_data_nents = 0; 2497 } 2498 transport_reset_sgl_orig(cmd); 2499 return; 2500 } 2501 transport_reset_sgl_orig(cmd); 2502 2503 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2504 cmd->t_data_sg = NULL; 2505 cmd->t_data_nents = 0; 2506 2507 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2508 cmd->t_bidi_data_sg = NULL; 2509 cmd->t_bidi_data_nents = 0; 2510 } 2511 2512 void *transport_kmap_data_sg(struct se_cmd *cmd) 2513 { 2514 struct scatterlist *sg = cmd->t_data_sg; 2515 struct page **pages; 2516 int i; 2517 2518 /* 2519 * We need to take into account a possible offset here for fabrics like 2520 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2521 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2522 */ 2523 if (!cmd->t_data_nents) 2524 return NULL; 2525 2526 BUG_ON(!sg); 2527 if (cmd->t_data_nents == 1) 2528 return kmap(sg_page(sg)) + sg->offset; 2529 2530 /* >1 page. use vmap */ 2531 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2532 if (!pages) 2533 return NULL; 2534 2535 /* convert sg[] to pages[] */ 2536 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2537 pages[i] = sg_page(sg); 2538 } 2539 2540 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2541 kfree(pages); 2542 if (!cmd->t_data_vmap) 2543 return NULL; 2544 2545 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2546 } 2547 EXPORT_SYMBOL(transport_kmap_data_sg); 2548 2549 void transport_kunmap_data_sg(struct se_cmd *cmd) 2550 { 2551 if (!cmd->t_data_nents) { 2552 return; 2553 } else if (cmd->t_data_nents == 1) { 2554 kunmap(sg_page(cmd->t_data_sg)); 2555 return; 2556 } 2557 2558 vunmap(cmd->t_data_vmap); 2559 cmd->t_data_vmap = NULL; 2560 } 2561 EXPORT_SYMBOL(transport_kunmap_data_sg); 2562 2563 int 2564 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2565 bool zero_page, bool chainable) 2566 { 2567 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2568 2569 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2570 return *sgl ? 0 : -ENOMEM; 2571 } 2572 EXPORT_SYMBOL(target_alloc_sgl); 2573 2574 /* 2575 * Allocate any required resources to execute the command. For writes we 2576 * might not have the payload yet, so notify the fabric via a call to 2577 * ->write_pending instead. Otherwise place it on the execution queue. 2578 */ 2579 sense_reason_t 2580 transport_generic_new_cmd(struct se_cmd *cmd) 2581 { 2582 unsigned long flags; 2583 int ret = 0; 2584 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2585 2586 if (cmd->prot_op != TARGET_PROT_NORMAL && 2587 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2588 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2589 cmd->prot_length, true, false); 2590 if (ret < 0) 2591 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2592 } 2593 2594 /* 2595 * Determine if the TCM fabric module has already allocated physical 2596 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2597 * beforehand. 2598 */ 2599 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2600 cmd->data_length) { 2601 2602 if ((cmd->se_cmd_flags & SCF_BIDI) || 2603 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2604 u32 bidi_length; 2605 2606 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2607 bidi_length = cmd->t_task_nolb * 2608 cmd->se_dev->dev_attrib.block_size; 2609 else 2610 bidi_length = cmd->data_length; 2611 2612 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2613 &cmd->t_bidi_data_nents, 2614 bidi_length, zero_flag, false); 2615 if (ret < 0) 2616 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2617 } 2618 2619 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2620 cmd->data_length, zero_flag, false); 2621 if (ret < 0) 2622 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2623 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2624 cmd->data_length) { 2625 /* 2626 * Special case for COMPARE_AND_WRITE with fabrics 2627 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2628 */ 2629 u32 caw_length = cmd->t_task_nolb * 2630 cmd->se_dev->dev_attrib.block_size; 2631 2632 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2633 &cmd->t_bidi_data_nents, 2634 caw_length, zero_flag, false); 2635 if (ret < 0) 2636 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2637 } 2638 /* 2639 * If this command is not a write we can execute it right here, 2640 * for write buffers we need to notify the fabric driver first 2641 * and let it call back once the write buffers are ready. 2642 */ 2643 target_add_to_state_list(cmd); 2644 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2645 target_execute_cmd(cmd); 2646 return 0; 2647 } 2648 2649 spin_lock_irqsave(&cmd->t_state_lock, flags); 2650 cmd->t_state = TRANSPORT_WRITE_PENDING; 2651 /* 2652 * Determine if frontend context caller is requesting the stopping of 2653 * this command for frontend exceptions. 2654 */ 2655 if (cmd->transport_state & CMD_T_STOP && 2656 !cmd->se_tfo->write_pending_must_be_called) { 2657 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2658 __func__, __LINE__, cmd->tag); 2659 2660 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2661 2662 complete_all(&cmd->t_transport_stop_comp); 2663 return 0; 2664 } 2665 cmd->transport_state &= ~CMD_T_ACTIVE; 2666 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2667 2668 ret = cmd->se_tfo->write_pending(cmd); 2669 if (ret) 2670 goto queue_full; 2671 2672 return 0; 2673 2674 queue_full: 2675 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2676 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2677 return 0; 2678 } 2679 EXPORT_SYMBOL(transport_generic_new_cmd); 2680 2681 static void transport_write_pending_qf(struct se_cmd *cmd) 2682 { 2683 unsigned long flags; 2684 int ret; 2685 bool stop; 2686 2687 spin_lock_irqsave(&cmd->t_state_lock, flags); 2688 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2689 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2690 2691 if (stop) { 2692 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2693 __func__, __LINE__, cmd->tag); 2694 complete_all(&cmd->t_transport_stop_comp); 2695 return; 2696 } 2697 2698 ret = cmd->se_tfo->write_pending(cmd); 2699 if (ret) { 2700 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2701 cmd); 2702 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2703 } 2704 } 2705 2706 static bool 2707 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2708 unsigned long *flags); 2709 2710 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2711 { 2712 unsigned long flags; 2713 2714 spin_lock_irqsave(&cmd->t_state_lock, flags); 2715 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2716 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2717 } 2718 2719 /* 2720 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2721 * finished. 2722 */ 2723 void target_put_cmd_and_wait(struct se_cmd *cmd) 2724 { 2725 DECLARE_COMPLETION_ONSTACK(compl); 2726 2727 WARN_ON_ONCE(cmd->abrt_compl); 2728 cmd->abrt_compl = &compl; 2729 target_put_sess_cmd(cmd); 2730 wait_for_completion(&compl); 2731 } 2732 2733 /* 2734 * This function is called by frontend drivers after processing of a command 2735 * has finished. 2736 * 2737 * The protocol for ensuring that either the regular frontend command 2738 * processing flow or target_handle_abort() code drops one reference is as 2739 * follows: 2740 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2741 * the frontend driver to call this function synchronously or asynchronously. 2742 * That will cause one reference to be dropped. 2743 * - During regular command processing the target core sets CMD_T_COMPLETE 2744 * before invoking one of the .queue_*() functions. 2745 * - The code that aborts commands skips commands and TMFs for which 2746 * CMD_T_COMPLETE has been set. 2747 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2748 * commands that will be aborted. 2749 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2750 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2751 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2752 * be called and will drop a reference. 2753 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2754 * will be called. target_handle_abort() will drop the final reference. 2755 */ 2756 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2757 { 2758 DECLARE_COMPLETION_ONSTACK(compl); 2759 int ret = 0; 2760 bool aborted = false, tas = false; 2761 2762 if (wait_for_tasks) 2763 target_wait_free_cmd(cmd, &aborted, &tas); 2764 2765 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2766 /* 2767 * Handle WRITE failure case where transport_generic_new_cmd() 2768 * has already added se_cmd to state_list, but fabric has 2769 * failed command before I/O submission. 2770 */ 2771 if (cmd->state_active) 2772 target_remove_from_state_list(cmd); 2773 2774 if (cmd->se_lun) 2775 transport_lun_remove_cmd(cmd); 2776 } 2777 if (aborted) 2778 cmd->free_compl = &compl; 2779 ret = target_put_sess_cmd(cmd); 2780 if (aborted) { 2781 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2782 wait_for_completion(&compl); 2783 ret = 1; 2784 } 2785 return ret; 2786 } 2787 EXPORT_SYMBOL(transport_generic_free_cmd); 2788 2789 /** 2790 * target_get_sess_cmd - Add command to active ->sess_cmd_list 2791 * @se_cmd: command descriptor to add 2792 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2793 */ 2794 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2795 { 2796 struct se_session *se_sess = se_cmd->se_sess; 2797 unsigned long flags; 2798 int ret = 0; 2799 2800 /* 2801 * Add a second kref if the fabric caller is expecting to handle 2802 * fabric acknowledgement that requires two target_put_sess_cmd() 2803 * invocations before se_cmd descriptor release. 2804 */ 2805 if (ack_kref) { 2806 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2807 return -EINVAL; 2808 2809 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2810 } 2811 2812 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2813 if (se_sess->sess_tearing_down) { 2814 ret = -ESHUTDOWN; 2815 goto out; 2816 } 2817 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2818 percpu_ref_get(&se_sess->cmd_count); 2819 out: 2820 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2821 2822 if (ret && ack_kref) 2823 target_put_sess_cmd(se_cmd); 2824 2825 return ret; 2826 } 2827 EXPORT_SYMBOL(target_get_sess_cmd); 2828 2829 static void target_free_cmd_mem(struct se_cmd *cmd) 2830 { 2831 transport_free_pages(cmd); 2832 2833 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2834 core_tmr_release_req(cmd->se_tmr_req); 2835 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2836 kfree(cmd->t_task_cdb); 2837 } 2838 2839 static void target_release_cmd_kref(struct kref *kref) 2840 { 2841 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2842 struct se_session *se_sess = se_cmd->se_sess; 2843 struct completion *free_compl = se_cmd->free_compl; 2844 struct completion *abrt_compl = se_cmd->abrt_compl; 2845 unsigned long flags; 2846 2847 if (se_sess) { 2848 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2849 list_del_init(&se_cmd->se_cmd_list); 2850 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2851 } 2852 2853 target_free_cmd_mem(se_cmd); 2854 se_cmd->se_tfo->release_cmd(se_cmd); 2855 if (free_compl) 2856 complete(free_compl); 2857 if (abrt_compl) 2858 complete(abrt_compl); 2859 2860 percpu_ref_put(&se_sess->cmd_count); 2861 } 2862 2863 /** 2864 * target_put_sess_cmd - decrease the command reference count 2865 * @se_cmd: command to drop a reference from 2866 * 2867 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2868 * refcount to drop to zero. Returns zero otherwise. 2869 */ 2870 int target_put_sess_cmd(struct se_cmd *se_cmd) 2871 { 2872 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2873 } 2874 EXPORT_SYMBOL(target_put_sess_cmd); 2875 2876 static const char *data_dir_name(enum dma_data_direction d) 2877 { 2878 switch (d) { 2879 case DMA_BIDIRECTIONAL: return "BIDI"; 2880 case DMA_TO_DEVICE: return "WRITE"; 2881 case DMA_FROM_DEVICE: return "READ"; 2882 case DMA_NONE: return "NONE"; 2883 } 2884 2885 return "(?)"; 2886 } 2887 2888 static const char *cmd_state_name(enum transport_state_table t) 2889 { 2890 switch (t) { 2891 case TRANSPORT_NO_STATE: return "NO_STATE"; 2892 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2893 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2894 case TRANSPORT_PROCESSING: return "PROCESSING"; 2895 case TRANSPORT_COMPLETE: return "COMPLETE"; 2896 case TRANSPORT_ISTATE_PROCESSING: 2897 return "ISTATE_PROCESSING"; 2898 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2899 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2900 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2901 } 2902 2903 return "(?)"; 2904 } 2905 2906 static void target_append_str(char **str, const char *txt) 2907 { 2908 char *prev = *str; 2909 2910 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2911 kstrdup(txt, GFP_ATOMIC); 2912 kfree(prev); 2913 } 2914 2915 /* 2916 * Convert a transport state bitmask into a string. The caller is 2917 * responsible for freeing the returned pointer. 2918 */ 2919 static char *target_ts_to_str(u32 ts) 2920 { 2921 char *str = NULL; 2922 2923 if (ts & CMD_T_ABORTED) 2924 target_append_str(&str, "aborted"); 2925 if (ts & CMD_T_ACTIVE) 2926 target_append_str(&str, "active"); 2927 if (ts & CMD_T_COMPLETE) 2928 target_append_str(&str, "complete"); 2929 if (ts & CMD_T_SENT) 2930 target_append_str(&str, "sent"); 2931 if (ts & CMD_T_STOP) 2932 target_append_str(&str, "stop"); 2933 if (ts & CMD_T_FABRIC_STOP) 2934 target_append_str(&str, "fabric_stop"); 2935 2936 return str; 2937 } 2938 2939 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2940 { 2941 switch (tmf) { 2942 case TMR_ABORT_TASK: return "ABORT_TASK"; 2943 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2944 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2945 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2946 case TMR_LUN_RESET: return "LUN_RESET"; 2947 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2948 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2949 case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; 2950 case TMR_UNKNOWN: break; 2951 } 2952 return "(?)"; 2953 } 2954 2955 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2956 { 2957 char *ts_str = target_ts_to_str(cmd->transport_state); 2958 const u8 *cdb = cmd->t_task_cdb; 2959 struct se_tmr_req *tmf = cmd->se_tmr_req; 2960 2961 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2962 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2963 pfx, cdb[0], cdb[1], cmd->tag, 2964 data_dir_name(cmd->data_direction), 2965 cmd->se_tfo->get_cmd_state(cmd), 2966 cmd_state_name(cmd->t_state), cmd->data_length, 2967 kref_read(&cmd->cmd_kref), ts_str); 2968 } else { 2969 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2970 pfx, target_tmf_name(tmf->function), cmd->tag, 2971 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2972 cmd_state_name(cmd->t_state), 2973 kref_read(&cmd->cmd_kref), ts_str); 2974 } 2975 kfree(ts_str); 2976 } 2977 EXPORT_SYMBOL(target_show_cmd); 2978 2979 /** 2980 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. 2981 * @se_sess: session to flag 2982 */ 2983 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2984 { 2985 unsigned long flags; 2986 2987 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2988 se_sess->sess_tearing_down = 1; 2989 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2990 2991 percpu_ref_kill(&se_sess->cmd_count); 2992 } 2993 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2994 2995 /** 2996 * target_wait_for_sess_cmds - Wait for outstanding commands 2997 * @se_sess: session to wait for active I/O 2998 */ 2999 void target_wait_for_sess_cmds(struct se_session *se_sess) 3000 { 3001 struct se_cmd *cmd; 3002 int ret; 3003 3004 WARN_ON_ONCE(!se_sess->sess_tearing_down); 3005 3006 do { 3007 ret = wait_event_timeout(se_sess->cmd_list_wq, 3008 percpu_ref_is_zero(&se_sess->cmd_count), 3009 180 * HZ); 3010 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 3011 target_show_cmd("session shutdown: still waiting for ", 3012 cmd); 3013 } while (ret <= 0); 3014 } 3015 EXPORT_SYMBOL(target_wait_for_sess_cmds); 3016 3017 /* 3018 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 3019 * all references to the LUN have been released. Called during LUN shutdown. 3020 */ 3021 void transport_clear_lun_ref(struct se_lun *lun) 3022 { 3023 percpu_ref_kill(&lun->lun_ref); 3024 wait_for_completion(&lun->lun_shutdown_comp); 3025 } 3026 3027 static bool 3028 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 3029 bool *aborted, bool *tas, unsigned long *flags) 3030 __releases(&cmd->t_state_lock) 3031 __acquires(&cmd->t_state_lock) 3032 { 3033 3034 assert_spin_locked(&cmd->t_state_lock); 3035 WARN_ON_ONCE(!irqs_disabled()); 3036 3037 if (fabric_stop) 3038 cmd->transport_state |= CMD_T_FABRIC_STOP; 3039 3040 if (cmd->transport_state & CMD_T_ABORTED) 3041 *aborted = true; 3042 3043 if (cmd->transport_state & CMD_T_TAS) 3044 *tas = true; 3045 3046 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 3047 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3048 return false; 3049 3050 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 3051 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3052 return false; 3053 3054 if (!(cmd->transport_state & CMD_T_ACTIVE)) 3055 return false; 3056 3057 if (fabric_stop && *aborted) 3058 return false; 3059 3060 cmd->transport_state |= CMD_T_STOP; 3061 3062 target_show_cmd("wait_for_tasks: Stopping ", cmd); 3063 3064 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 3065 3066 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 3067 180 * HZ)) 3068 target_show_cmd("wait for tasks: ", cmd); 3069 3070 spin_lock_irqsave(&cmd->t_state_lock, *flags); 3071 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 3072 3073 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 3074 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 3075 3076 return true; 3077 } 3078 3079 /** 3080 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3081 * @cmd: command to wait on 3082 */ 3083 bool transport_wait_for_tasks(struct se_cmd *cmd) 3084 { 3085 unsigned long flags; 3086 bool ret, aborted = false, tas = false; 3087 3088 spin_lock_irqsave(&cmd->t_state_lock, flags); 3089 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3090 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3091 3092 return ret; 3093 } 3094 EXPORT_SYMBOL(transport_wait_for_tasks); 3095 3096 struct sense_info { 3097 u8 key; 3098 u8 asc; 3099 u8 ascq; 3100 bool add_sector_info; 3101 }; 3102 3103 static const struct sense_info sense_info_table[] = { 3104 [TCM_NO_SENSE] = { 3105 .key = NOT_READY 3106 }, 3107 [TCM_NON_EXISTENT_LUN] = { 3108 .key = ILLEGAL_REQUEST, 3109 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3110 }, 3111 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3112 .key = ILLEGAL_REQUEST, 3113 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3114 }, 3115 [TCM_SECTOR_COUNT_TOO_MANY] = { 3116 .key = ILLEGAL_REQUEST, 3117 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3118 }, 3119 [TCM_UNKNOWN_MODE_PAGE] = { 3120 .key = ILLEGAL_REQUEST, 3121 .asc = 0x24, /* INVALID FIELD IN CDB */ 3122 }, 3123 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3124 .key = ABORTED_COMMAND, 3125 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3126 .ascq = 0x03, 3127 }, 3128 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3129 .key = ABORTED_COMMAND, 3130 .asc = 0x0c, /* WRITE ERROR */ 3131 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3132 }, 3133 [TCM_INVALID_CDB_FIELD] = { 3134 .key = ILLEGAL_REQUEST, 3135 .asc = 0x24, /* INVALID FIELD IN CDB */ 3136 }, 3137 [TCM_INVALID_PARAMETER_LIST] = { 3138 .key = ILLEGAL_REQUEST, 3139 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3140 }, 3141 [TCM_TOO_MANY_TARGET_DESCS] = { 3142 .key = ILLEGAL_REQUEST, 3143 .asc = 0x26, 3144 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3145 }, 3146 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3147 .key = ILLEGAL_REQUEST, 3148 .asc = 0x26, 3149 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3150 }, 3151 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3152 .key = ILLEGAL_REQUEST, 3153 .asc = 0x26, 3154 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3155 }, 3156 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3157 .key = ILLEGAL_REQUEST, 3158 .asc = 0x26, 3159 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3160 }, 3161 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3162 .key = ILLEGAL_REQUEST, 3163 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3164 }, 3165 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3166 .key = ILLEGAL_REQUEST, 3167 .asc = 0x0c, /* WRITE ERROR */ 3168 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3169 }, 3170 [TCM_SERVICE_CRC_ERROR] = { 3171 .key = ABORTED_COMMAND, 3172 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3173 .ascq = 0x05, /* N/A */ 3174 }, 3175 [TCM_SNACK_REJECTED] = { 3176 .key = ABORTED_COMMAND, 3177 .asc = 0x11, /* READ ERROR */ 3178 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3179 }, 3180 [TCM_WRITE_PROTECTED] = { 3181 .key = DATA_PROTECT, 3182 .asc = 0x27, /* WRITE PROTECTED */ 3183 }, 3184 [TCM_ADDRESS_OUT_OF_RANGE] = { 3185 .key = ILLEGAL_REQUEST, 3186 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3187 }, 3188 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3189 .key = UNIT_ATTENTION, 3190 }, 3191 [TCM_CHECK_CONDITION_NOT_READY] = { 3192 .key = NOT_READY, 3193 }, 3194 [TCM_MISCOMPARE_VERIFY] = { 3195 .key = MISCOMPARE, 3196 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3197 .ascq = 0x00, 3198 }, 3199 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3200 .key = ABORTED_COMMAND, 3201 .asc = 0x10, 3202 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3203 .add_sector_info = true, 3204 }, 3205 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3206 .key = ABORTED_COMMAND, 3207 .asc = 0x10, 3208 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3209 .add_sector_info = true, 3210 }, 3211 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3212 .key = ABORTED_COMMAND, 3213 .asc = 0x10, 3214 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3215 .add_sector_info = true, 3216 }, 3217 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3218 .key = COPY_ABORTED, 3219 .asc = 0x0d, 3220 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3221 3222 }, 3223 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3224 /* 3225 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3226 * Solaris initiators. Returning NOT READY instead means the 3227 * operations will be retried a finite number of times and we 3228 * can survive intermittent errors. 3229 */ 3230 .key = NOT_READY, 3231 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3232 }, 3233 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3234 /* 3235 * From spc4r22 section5.7.7,5.7.8 3236 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3237 * or a REGISTER AND IGNORE EXISTING KEY service action or 3238 * REGISTER AND MOVE service actionis attempted, 3239 * but there are insufficient device server resources to complete the 3240 * operation, then the command shall be terminated with CHECK CONDITION 3241 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3242 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3243 */ 3244 .key = ILLEGAL_REQUEST, 3245 .asc = 0x55, 3246 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3247 }, 3248 }; 3249 3250 /** 3251 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3252 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3253 * be stored. 3254 * @reason: LIO sense reason code. If this argument has the value 3255 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3256 * dequeuing a unit attention fails due to multiple commands being processed 3257 * concurrently, set the command status to BUSY. 3258 * 3259 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3260 */ 3261 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3262 { 3263 const struct sense_info *si; 3264 u8 *buffer = cmd->sense_buffer; 3265 int r = (__force int)reason; 3266 u8 key, asc, ascq; 3267 bool desc_format = target_sense_desc_format(cmd->se_dev); 3268 3269 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 3270 si = &sense_info_table[r]; 3271 else 3272 si = &sense_info_table[(__force int) 3273 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3274 3275 key = si->key; 3276 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3277 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3278 &ascq)) { 3279 cmd->scsi_status = SAM_STAT_BUSY; 3280 return; 3281 } 3282 } else if (si->asc == 0) { 3283 WARN_ON_ONCE(cmd->scsi_asc == 0); 3284 asc = cmd->scsi_asc; 3285 ascq = cmd->scsi_ascq; 3286 } else { 3287 asc = si->asc; 3288 ascq = si->ascq; 3289 } 3290 3291 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3292 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3293 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3294 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3295 if (si->add_sector_info) 3296 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3297 cmd->scsi_sense_length, 3298 cmd->bad_sector) < 0); 3299 } 3300 3301 int 3302 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3303 sense_reason_t reason, int from_transport) 3304 { 3305 unsigned long flags; 3306 3307 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3308 3309 spin_lock_irqsave(&cmd->t_state_lock, flags); 3310 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3311 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3312 return 0; 3313 } 3314 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3315 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3316 3317 if (!from_transport) 3318 translate_sense_reason(cmd, reason); 3319 3320 trace_target_cmd_complete(cmd); 3321 return cmd->se_tfo->queue_status(cmd); 3322 } 3323 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3324 3325 /** 3326 * target_send_busy - Send SCSI BUSY status back to the initiator 3327 * @cmd: SCSI command for which to send a BUSY reply. 3328 * 3329 * Note: Only call this function if target_submit_cmd*() failed. 3330 */ 3331 int target_send_busy(struct se_cmd *cmd) 3332 { 3333 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3334 3335 cmd->scsi_status = SAM_STAT_BUSY; 3336 trace_target_cmd_complete(cmd); 3337 return cmd->se_tfo->queue_status(cmd); 3338 } 3339 EXPORT_SYMBOL(target_send_busy); 3340 3341 static void target_tmr_work(struct work_struct *work) 3342 { 3343 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3344 struct se_device *dev = cmd->se_dev; 3345 struct se_tmr_req *tmr = cmd->se_tmr_req; 3346 int ret; 3347 3348 if (cmd->transport_state & CMD_T_ABORTED) 3349 goto aborted; 3350 3351 switch (tmr->function) { 3352 case TMR_ABORT_TASK: 3353 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3354 break; 3355 case TMR_ABORT_TASK_SET: 3356 case TMR_CLEAR_ACA: 3357 case TMR_CLEAR_TASK_SET: 3358 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3359 break; 3360 case TMR_LUN_RESET: 3361 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3362 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3363 TMR_FUNCTION_REJECTED; 3364 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3365 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3366 cmd->orig_fe_lun, 0x29, 3367 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3368 } 3369 break; 3370 case TMR_TARGET_WARM_RESET: 3371 tmr->response = TMR_FUNCTION_REJECTED; 3372 break; 3373 case TMR_TARGET_COLD_RESET: 3374 tmr->response = TMR_FUNCTION_REJECTED; 3375 break; 3376 default: 3377 pr_err("Unknown TMR function: 0x%02x.\n", 3378 tmr->function); 3379 tmr->response = TMR_FUNCTION_REJECTED; 3380 break; 3381 } 3382 3383 if (cmd->transport_state & CMD_T_ABORTED) 3384 goto aborted; 3385 3386 cmd->se_tfo->queue_tm_rsp(cmd); 3387 3388 transport_lun_remove_cmd(cmd); 3389 transport_cmd_check_stop_to_fabric(cmd); 3390 return; 3391 3392 aborted: 3393 target_handle_abort(cmd); 3394 } 3395 3396 int transport_generic_handle_tmr( 3397 struct se_cmd *cmd) 3398 { 3399 unsigned long flags; 3400 bool aborted = false; 3401 3402 spin_lock_irqsave(&cmd->t_state_lock, flags); 3403 if (cmd->transport_state & CMD_T_ABORTED) { 3404 aborted = true; 3405 } else { 3406 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3407 cmd->transport_state |= CMD_T_ACTIVE; 3408 } 3409 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3410 3411 if (aborted) { 3412 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3413 cmd->se_tmr_req->function, 3414 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3415 target_handle_abort(cmd); 3416 return 0; 3417 } 3418 3419 INIT_WORK(&cmd->work, target_tmr_work); 3420 schedule_work(&cmd->work); 3421 return 0; 3422 } 3423 EXPORT_SYMBOL(transport_generic_handle_tmr); 3424 3425 bool 3426 target_check_wce(struct se_device *dev) 3427 { 3428 bool wce = false; 3429 3430 if (dev->transport->get_write_cache) 3431 wce = dev->transport->get_write_cache(dev); 3432 else if (dev->dev_attrib.emulate_write_cache > 0) 3433 wce = true; 3434 3435 return wce; 3436 } 3437 3438 bool 3439 target_check_fua(struct se_device *dev) 3440 { 3441 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3442 } 3443