1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_transport.c 4 * 5 * This file contains the Generic Target Engine Core. 6 * 7 * (c) Copyright 2002-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/net.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/timer.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/kthread.h> 20 #include <linux/in.h> 21 #include <linux/cdrom.h> 22 #include <linux/module.h> 23 #include <linux/ratelimit.h> 24 #include <linux/vmalloc.h> 25 #include <asm/unaligned.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_common.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/target.h> 42 43 static struct workqueue_struct *target_completion_wq; 44 static struct kmem_cache *se_sess_cache; 45 struct kmem_cache *se_ua_cache; 46 struct kmem_cache *t10_pr_reg_cache; 47 struct kmem_cache *t10_alua_lu_gp_cache; 48 struct kmem_cache *t10_alua_lu_gp_mem_cache; 49 struct kmem_cache *t10_alua_tg_pt_gp_cache; 50 struct kmem_cache *t10_alua_lba_map_cache; 51 struct kmem_cache *t10_alua_lba_map_mem_cache; 52 53 static void transport_complete_task_attr(struct se_cmd *cmd); 54 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 55 static void transport_handle_queue_full(struct se_cmd *cmd, 56 struct se_device *dev, int err, bool write_pending); 57 static void target_complete_ok_work(struct work_struct *work); 58 59 int init_se_kmem_caches(void) 60 { 61 se_sess_cache = kmem_cache_create("se_sess_cache", 62 sizeof(struct se_session), __alignof__(struct se_session), 63 0, NULL); 64 if (!se_sess_cache) { 65 pr_err("kmem_cache_create() for struct se_session" 66 " failed\n"); 67 goto out; 68 } 69 se_ua_cache = kmem_cache_create("se_ua_cache", 70 sizeof(struct se_ua), __alignof__(struct se_ua), 71 0, NULL); 72 if (!se_ua_cache) { 73 pr_err("kmem_cache_create() for struct se_ua failed\n"); 74 goto out_free_sess_cache; 75 } 76 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 77 sizeof(struct t10_pr_registration), 78 __alignof__(struct t10_pr_registration), 0, NULL); 79 if (!t10_pr_reg_cache) { 80 pr_err("kmem_cache_create() for struct t10_pr_registration" 81 " failed\n"); 82 goto out_free_ua_cache; 83 } 84 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 85 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 86 0, NULL); 87 if (!t10_alua_lu_gp_cache) { 88 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 89 " failed\n"); 90 goto out_free_pr_reg_cache; 91 } 92 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 93 sizeof(struct t10_alua_lu_gp_member), 94 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 95 if (!t10_alua_lu_gp_mem_cache) { 96 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 97 "cache failed\n"); 98 goto out_free_lu_gp_cache; 99 } 100 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 101 sizeof(struct t10_alua_tg_pt_gp), 102 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 103 if (!t10_alua_tg_pt_gp_cache) { 104 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 105 "cache failed\n"); 106 goto out_free_lu_gp_mem_cache; 107 } 108 t10_alua_lba_map_cache = kmem_cache_create( 109 "t10_alua_lba_map_cache", 110 sizeof(struct t10_alua_lba_map), 111 __alignof__(struct t10_alua_lba_map), 0, NULL); 112 if (!t10_alua_lba_map_cache) { 113 pr_err("kmem_cache_create() for t10_alua_lba_map_" 114 "cache failed\n"); 115 goto out_free_tg_pt_gp_cache; 116 } 117 t10_alua_lba_map_mem_cache = kmem_cache_create( 118 "t10_alua_lba_map_mem_cache", 119 sizeof(struct t10_alua_lba_map_member), 120 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 121 if (!t10_alua_lba_map_mem_cache) { 122 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 123 "cache failed\n"); 124 goto out_free_lba_map_cache; 125 } 126 127 target_completion_wq = alloc_workqueue("target_completion", 128 WQ_MEM_RECLAIM, 0); 129 if (!target_completion_wq) 130 goto out_free_lba_map_mem_cache; 131 132 return 0; 133 134 out_free_lba_map_mem_cache: 135 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 136 out_free_lba_map_cache: 137 kmem_cache_destroy(t10_alua_lba_map_cache); 138 out_free_tg_pt_gp_cache: 139 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 140 out_free_lu_gp_mem_cache: 141 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 142 out_free_lu_gp_cache: 143 kmem_cache_destroy(t10_alua_lu_gp_cache); 144 out_free_pr_reg_cache: 145 kmem_cache_destroy(t10_pr_reg_cache); 146 out_free_ua_cache: 147 kmem_cache_destroy(se_ua_cache); 148 out_free_sess_cache: 149 kmem_cache_destroy(se_sess_cache); 150 out: 151 return -ENOMEM; 152 } 153 154 void release_se_kmem_caches(void) 155 { 156 destroy_workqueue(target_completion_wq); 157 kmem_cache_destroy(se_sess_cache); 158 kmem_cache_destroy(se_ua_cache); 159 kmem_cache_destroy(t10_pr_reg_cache); 160 kmem_cache_destroy(t10_alua_lu_gp_cache); 161 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 162 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 163 kmem_cache_destroy(t10_alua_lba_map_cache); 164 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 165 } 166 167 /* This code ensures unique mib indexes are handed out. */ 168 static DEFINE_SPINLOCK(scsi_mib_index_lock); 169 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 170 171 /* 172 * Allocate a new row index for the entry type specified 173 */ 174 u32 scsi_get_new_index(scsi_index_t type) 175 { 176 u32 new_index; 177 178 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 179 180 spin_lock(&scsi_mib_index_lock); 181 new_index = ++scsi_mib_index[type]; 182 spin_unlock(&scsi_mib_index_lock); 183 184 return new_index; 185 } 186 187 void transport_subsystem_check_init(void) 188 { 189 int ret; 190 static int sub_api_initialized; 191 192 if (sub_api_initialized) 193 return; 194 195 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 196 if (ret != 0) 197 pr_err("Unable to load target_core_iblock\n"); 198 199 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 200 if (ret != 0) 201 pr_err("Unable to load target_core_file\n"); 202 203 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 204 if (ret != 0) 205 pr_err("Unable to load target_core_pscsi\n"); 206 207 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 208 if (ret != 0) 209 pr_err("Unable to load target_core_user\n"); 210 211 sub_api_initialized = 1; 212 } 213 214 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) 215 { 216 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); 217 218 wake_up(&sess->cmd_list_wq); 219 } 220 221 /** 222 * transport_init_session - initialize a session object 223 * @se_sess: Session object pointer. 224 * 225 * The caller must have zero-initialized @se_sess before calling this function. 226 */ 227 int transport_init_session(struct se_session *se_sess) 228 { 229 INIT_LIST_HEAD(&se_sess->sess_list); 230 INIT_LIST_HEAD(&se_sess->sess_acl_list); 231 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 232 spin_lock_init(&se_sess->sess_cmd_lock); 233 init_waitqueue_head(&se_sess->cmd_list_wq); 234 return percpu_ref_init(&se_sess->cmd_count, 235 target_release_sess_cmd_refcnt, 0, GFP_KERNEL); 236 } 237 EXPORT_SYMBOL(transport_init_session); 238 239 void transport_uninit_session(struct se_session *se_sess) 240 { 241 percpu_ref_exit(&se_sess->cmd_count); 242 } 243 244 /** 245 * transport_alloc_session - allocate a session object and initialize it 246 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 247 */ 248 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 249 { 250 struct se_session *se_sess; 251 int ret; 252 253 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 254 if (!se_sess) { 255 pr_err("Unable to allocate struct se_session from" 256 " se_sess_cache\n"); 257 return ERR_PTR(-ENOMEM); 258 } 259 ret = transport_init_session(se_sess); 260 if (ret < 0) { 261 kmem_cache_free(se_sess_cache, se_sess); 262 return ERR_PTR(ret); 263 } 264 se_sess->sup_prot_ops = sup_prot_ops; 265 266 return se_sess; 267 } 268 EXPORT_SYMBOL(transport_alloc_session); 269 270 /** 271 * transport_alloc_session_tags - allocate target driver private data 272 * @se_sess: Session pointer. 273 * @tag_num: Maximum number of in-flight commands between initiator and target. 274 * @tag_size: Size in bytes of the private data a target driver associates with 275 * each command. 276 */ 277 int transport_alloc_session_tags(struct se_session *se_sess, 278 unsigned int tag_num, unsigned int tag_size) 279 { 280 int rc; 281 282 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 283 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 284 if (!se_sess->sess_cmd_map) { 285 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 286 return -ENOMEM; 287 } 288 289 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 290 false, GFP_KERNEL, NUMA_NO_NODE); 291 if (rc < 0) { 292 pr_err("Unable to init se_sess->sess_tag_pool," 293 " tag_num: %u\n", tag_num); 294 kvfree(se_sess->sess_cmd_map); 295 se_sess->sess_cmd_map = NULL; 296 return -ENOMEM; 297 } 298 299 return 0; 300 } 301 EXPORT_SYMBOL(transport_alloc_session_tags); 302 303 /** 304 * transport_init_session_tags - allocate a session and target driver private data 305 * @tag_num: Maximum number of in-flight commands between initiator and target. 306 * @tag_size: Size in bytes of the private data a target driver associates with 307 * each command. 308 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 309 */ 310 static struct se_session * 311 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 312 enum target_prot_op sup_prot_ops) 313 { 314 struct se_session *se_sess; 315 int rc; 316 317 if (tag_num != 0 && !tag_size) { 318 pr_err("init_session_tags called with percpu-ida tag_num:" 319 " %u, but zero tag_size\n", tag_num); 320 return ERR_PTR(-EINVAL); 321 } 322 if (!tag_num && tag_size) { 323 pr_err("init_session_tags called with percpu-ida tag_size:" 324 " %u, but zero tag_num\n", tag_size); 325 return ERR_PTR(-EINVAL); 326 } 327 328 se_sess = transport_alloc_session(sup_prot_ops); 329 if (IS_ERR(se_sess)) 330 return se_sess; 331 332 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 333 if (rc < 0) { 334 transport_free_session(se_sess); 335 return ERR_PTR(-ENOMEM); 336 } 337 338 return se_sess; 339 } 340 341 /* 342 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 343 */ 344 void __transport_register_session( 345 struct se_portal_group *se_tpg, 346 struct se_node_acl *se_nacl, 347 struct se_session *se_sess, 348 void *fabric_sess_ptr) 349 { 350 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 351 unsigned char buf[PR_REG_ISID_LEN]; 352 unsigned long flags; 353 354 se_sess->se_tpg = se_tpg; 355 se_sess->fabric_sess_ptr = fabric_sess_ptr; 356 /* 357 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 358 * 359 * Only set for struct se_session's that will actually be moving I/O. 360 * eg: *NOT* discovery sessions. 361 */ 362 if (se_nacl) { 363 /* 364 * 365 * Determine if fabric allows for T10-PI feature bits exposed to 366 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 367 * 368 * If so, then always save prot_type on a per se_node_acl node 369 * basis and re-instate the previous sess_prot_type to avoid 370 * disabling PI from below any previously initiator side 371 * registered LUNs. 372 */ 373 if (se_nacl->saved_prot_type) 374 se_sess->sess_prot_type = se_nacl->saved_prot_type; 375 else if (tfo->tpg_check_prot_fabric_only) 376 se_sess->sess_prot_type = se_nacl->saved_prot_type = 377 tfo->tpg_check_prot_fabric_only(se_tpg); 378 /* 379 * If the fabric module supports an ISID based TransportID, 380 * save this value in binary from the fabric I_T Nexus now. 381 */ 382 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 383 memset(&buf[0], 0, PR_REG_ISID_LEN); 384 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 385 &buf[0], PR_REG_ISID_LEN); 386 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 387 } 388 389 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 390 /* 391 * The se_nacl->nacl_sess pointer will be set to the 392 * last active I_T Nexus for each struct se_node_acl. 393 */ 394 se_nacl->nacl_sess = se_sess; 395 396 list_add_tail(&se_sess->sess_acl_list, 397 &se_nacl->acl_sess_list); 398 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 399 } 400 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 401 402 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 403 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 404 } 405 EXPORT_SYMBOL(__transport_register_session); 406 407 void transport_register_session( 408 struct se_portal_group *se_tpg, 409 struct se_node_acl *se_nacl, 410 struct se_session *se_sess, 411 void *fabric_sess_ptr) 412 { 413 unsigned long flags; 414 415 spin_lock_irqsave(&se_tpg->session_lock, flags); 416 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 417 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 418 } 419 EXPORT_SYMBOL(transport_register_session); 420 421 struct se_session * 422 target_setup_session(struct se_portal_group *tpg, 423 unsigned int tag_num, unsigned int tag_size, 424 enum target_prot_op prot_op, 425 const char *initiatorname, void *private, 426 int (*callback)(struct se_portal_group *, 427 struct se_session *, void *)) 428 { 429 struct se_session *sess; 430 431 /* 432 * If the fabric driver is using percpu-ida based pre allocation 433 * of I/O descriptor tags, go ahead and perform that setup now.. 434 */ 435 if (tag_num != 0) 436 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 437 else 438 sess = transport_alloc_session(prot_op); 439 440 if (IS_ERR(sess)) 441 return sess; 442 443 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 444 (unsigned char *)initiatorname); 445 if (!sess->se_node_acl) { 446 transport_free_session(sess); 447 return ERR_PTR(-EACCES); 448 } 449 /* 450 * Go ahead and perform any remaining fabric setup that is 451 * required before transport_register_session(). 452 */ 453 if (callback != NULL) { 454 int rc = callback(tpg, sess, private); 455 if (rc) { 456 transport_free_session(sess); 457 return ERR_PTR(rc); 458 } 459 } 460 461 transport_register_session(tpg, sess->se_node_acl, sess, private); 462 return sess; 463 } 464 EXPORT_SYMBOL(target_setup_session); 465 466 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 467 { 468 struct se_session *se_sess; 469 ssize_t len = 0; 470 471 spin_lock_bh(&se_tpg->session_lock); 472 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 473 if (!se_sess->se_node_acl) 474 continue; 475 if (!se_sess->se_node_acl->dynamic_node_acl) 476 continue; 477 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 478 break; 479 480 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 481 se_sess->se_node_acl->initiatorname); 482 len += 1; /* Include NULL terminator */ 483 } 484 spin_unlock_bh(&se_tpg->session_lock); 485 486 return len; 487 } 488 EXPORT_SYMBOL(target_show_dynamic_sessions); 489 490 static void target_complete_nacl(struct kref *kref) 491 { 492 struct se_node_acl *nacl = container_of(kref, 493 struct se_node_acl, acl_kref); 494 struct se_portal_group *se_tpg = nacl->se_tpg; 495 496 if (!nacl->dynamic_stop) { 497 complete(&nacl->acl_free_comp); 498 return; 499 } 500 501 mutex_lock(&se_tpg->acl_node_mutex); 502 list_del_init(&nacl->acl_list); 503 mutex_unlock(&se_tpg->acl_node_mutex); 504 505 core_tpg_wait_for_nacl_pr_ref(nacl); 506 core_free_device_list_for_node(nacl, se_tpg); 507 kfree(nacl); 508 } 509 510 void target_put_nacl(struct se_node_acl *nacl) 511 { 512 kref_put(&nacl->acl_kref, target_complete_nacl); 513 } 514 EXPORT_SYMBOL(target_put_nacl); 515 516 void transport_deregister_session_configfs(struct se_session *se_sess) 517 { 518 struct se_node_acl *se_nacl; 519 unsigned long flags; 520 /* 521 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 522 */ 523 se_nacl = se_sess->se_node_acl; 524 if (se_nacl) { 525 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 526 if (!list_empty(&se_sess->sess_acl_list)) 527 list_del_init(&se_sess->sess_acl_list); 528 /* 529 * If the session list is empty, then clear the pointer. 530 * Otherwise, set the struct se_session pointer from the tail 531 * element of the per struct se_node_acl active session list. 532 */ 533 if (list_empty(&se_nacl->acl_sess_list)) 534 se_nacl->nacl_sess = NULL; 535 else { 536 se_nacl->nacl_sess = container_of( 537 se_nacl->acl_sess_list.prev, 538 struct se_session, sess_acl_list); 539 } 540 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 541 } 542 } 543 EXPORT_SYMBOL(transport_deregister_session_configfs); 544 545 void transport_free_session(struct se_session *se_sess) 546 { 547 struct se_node_acl *se_nacl = se_sess->se_node_acl; 548 549 /* 550 * Drop the se_node_acl->nacl_kref obtained from within 551 * core_tpg_get_initiator_node_acl(). 552 */ 553 if (se_nacl) { 554 struct se_portal_group *se_tpg = se_nacl->se_tpg; 555 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 556 unsigned long flags; 557 558 se_sess->se_node_acl = NULL; 559 560 /* 561 * Also determine if we need to drop the extra ->cmd_kref if 562 * it had been previously dynamically generated, and 563 * the endpoint is not caching dynamic ACLs. 564 */ 565 mutex_lock(&se_tpg->acl_node_mutex); 566 if (se_nacl->dynamic_node_acl && 567 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 568 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 569 if (list_empty(&se_nacl->acl_sess_list)) 570 se_nacl->dynamic_stop = true; 571 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 572 573 if (se_nacl->dynamic_stop) 574 list_del_init(&se_nacl->acl_list); 575 } 576 mutex_unlock(&se_tpg->acl_node_mutex); 577 578 if (se_nacl->dynamic_stop) 579 target_put_nacl(se_nacl); 580 581 target_put_nacl(se_nacl); 582 } 583 if (se_sess->sess_cmd_map) { 584 sbitmap_queue_free(&se_sess->sess_tag_pool); 585 kvfree(se_sess->sess_cmd_map); 586 } 587 transport_uninit_session(se_sess); 588 kmem_cache_free(se_sess_cache, se_sess); 589 } 590 EXPORT_SYMBOL(transport_free_session); 591 592 static int target_release_res(struct se_device *dev, void *data) 593 { 594 struct se_session *sess = data; 595 596 if (dev->reservation_holder == sess) 597 target_release_reservation(dev); 598 return 0; 599 } 600 601 void transport_deregister_session(struct se_session *se_sess) 602 { 603 struct se_portal_group *se_tpg = se_sess->se_tpg; 604 unsigned long flags; 605 606 if (!se_tpg) { 607 transport_free_session(se_sess); 608 return; 609 } 610 611 spin_lock_irqsave(&se_tpg->session_lock, flags); 612 list_del(&se_sess->sess_list); 613 se_sess->se_tpg = NULL; 614 se_sess->fabric_sess_ptr = NULL; 615 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 616 617 /* 618 * Since the session is being removed, release SPC-2 619 * reservations held by the session that is disappearing. 620 */ 621 target_for_each_device(target_release_res, se_sess); 622 623 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 624 se_tpg->se_tpg_tfo->fabric_name); 625 /* 626 * If last kref is dropping now for an explicit NodeACL, awake sleeping 627 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 628 * removal context from within transport_free_session() code. 629 * 630 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 631 * to release all remaining generate_node_acl=1 created ACL resources. 632 */ 633 634 transport_free_session(se_sess); 635 } 636 EXPORT_SYMBOL(transport_deregister_session); 637 638 void target_remove_session(struct se_session *se_sess) 639 { 640 transport_deregister_session_configfs(se_sess); 641 transport_deregister_session(se_sess); 642 } 643 EXPORT_SYMBOL(target_remove_session); 644 645 static void target_remove_from_state_list(struct se_cmd *cmd) 646 { 647 struct se_device *dev = cmd->se_dev; 648 unsigned long flags; 649 650 if (!dev) 651 return; 652 653 spin_lock_irqsave(&dev->execute_task_lock, flags); 654 if (cmd->state_active) { 655 list_del(&cmd->state_list); 656 cmd->state_active = false; 657 } 658 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 659 } 660 661 /* 662 * This function is called by the target core after the target core has 663 * finished processing a SCSI command or SCSI TMF. Both the regular command 664 * processing code and the code for aborting commands can call this 665 * function. CMD_T_STOP is set if and only if another thread is waiting 666 * inside transport_wait_for_tasks() for t_transport_stop_comp. 667 */ 668 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 669 { 670 unsigned long flags; 671 672 target_remove_from_state_list(cmd); 673 674 /* 675 * Clear struct se_cmd->se_lun before the handoff to FE. 676 */ 677 cmd->se_lun = NULL; 678 679 spin_lock_irqsave(&cmd->t_state_lock, flags); 680 /* 681 * Determine if frontend context caller is requesting the stopping of 682 * this command for frontend exceptions. 683 */ 684 if (cmd->transport_state & CMD_T_STOP) { 685 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 686 __func__, __LINE__, cmd->tag); 687 688 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 689 690 complete_all(&cmd->t_transport_stop_comp); 691 return 1; 692 } 693 cmd->transport_state &= ~CMD_T_ACTIVE; 694 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 695 696 /* 697 * Some fabric modules like tcm_loop can release their internally 698 * allocated I/O reference and struct se_cmd now. 699 * 700 * Fabric modules are expected to return '1' here if the se_cmd being 701 * passed is released at this point, or zero if not being released. 702 */ 703 return cmd->se_tfo->check_stop_free(cmd); 704 } 705 706 static void transport_lun_remove_cmd(struct se_cmd *cmd) 707 { 708 struct se_lun *lun = cmd->se_lun; 709 710 if (!lun) 711 return; 712 713 if (cmpxchg(&cmd->lun_ref_active, true, false)) 714 percpu_ref_put(&lun->lun_ref); 715 } 716 717 static void target_complete_failure_work(struct work_struct *work) 718 { 719 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 720 721 transport_generic_request_failure(cmd, 722 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 723 } 724 725 /* 726 * Used when asking transport to copy Sense Data from the underlying 727 * Linux/SCSI struct scsi_cmnd 728 */ 729 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 730 { 731 struct se_device *dev = cmd->se_dev; 732 733 WARN_ON(!cmd->se_lun); 734 735 if (!dev) 736 return NULL; 737 738 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 739 return NULL; 740 741 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 742 743 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 744 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 745 return cmd->sense_buffer; 746 } 747 748 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 749 { 750 unsigned char *cmd_sense_buf; 751 unsigned long flags; 752 753 spin_lock_irqsave(&cmd->t_state_lock, flags); 754 cmd_sense_buf = transport_get_sense_buffer(cmd); 755 if (!cmd_sense_buf) { 756 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 757 return; 758 } 759 760 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 761 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 762 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 763 } 764 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 765 766 static void target_handle_abort(struct se_cmd *cmd) 767 { 768 bool tas = cmd->transport_state & CMD_T_TAS; 769 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 770 int ret; 771 772 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 773 774 if (tas) { 775 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 776 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 777 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 778 cmd->t_task_cdb[0], cmd->tag); 779 trace_target_cmd_complete(cmd); 780 ret = cmd->se_tfo->queue_status(cmd); 781 if (ret) { 782 transport_handle_queue_full(cmd, cmd->se_dev, 783 ret, false); 784 return; 785 } 786 } else { 787 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 788 cmd->se_tfo->queue_tm_rsp(cmd); 789 } 790 } else { 791 /* 792 * Allow the fabric driver to unmap any resources before 793 * releasing the descriptor via TFO->release_cmd(). 794 */ 795 cmd->se_tfo->aborted_task(cmd); 796 if (ack_kref) 797 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 798 /* 799 * To do: establish a unit attention condition on the I_T 800 * nexus associated with cmd. See also the paragraph "Aborting 801 * commands" in SAM. 802 */ 803 } 804 805 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 806 807 transport_lun_remove_cmd(cmd); 808 809 transport_cmd_check_stop_to_fabric(cmd); 810 } 811 812 static void target_abort_work(struct work_struct *work) 813 { 814 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 815 816 target_handle_abort(cmd); 817 } 818 819 static bool target_cmd_interrupted(struct se_cmd *cmd) 820 { 821 int post_ret; 822 823 if (cmd->transport_state & CMD_T_ABORTED) { 824 if (cmd->transport_complete_callback) 825 cmd->transport_complete_callback(cmd, false, &post_ret); 826 INIT_WORK(&cmd->work, target_abort_work); 827 queue_work(target_completion_wq, &cmd->work); 828 return true; 829 } else if (cmd->transport_state & CMD_T_STOP) { 830 if (cmd->transport_complete_callback) 831 cmd->transport_complete_callback(cmd, false, &post_ret); 832 complete_all(&cmd->t_transport_stop_comp); 833 return true; 834 } 835 836 return false; 837 } 838 839 /* May be called from interrupt context so must not sleep. */ 840 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 841 { 842 int success; 843 unsigned long flags; 844 845 if (target_cmd_interrupted(cmd)) 846 return; 847 848 cmd->scsi_status = scsi_status; 849 850 spin_lock_irqsave(&cmd->t_state_lock, flags); 851 switch (cmd->scsi_status) { 852 case SAM_STAT_CHECK_CONDITION: 853 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 854 success = 1; 855 else 856 success = 0; 857 break; 858 default: 859 success = 1; 860 break; 861 } 862 863 cmd->t_state = TRANSPORT_COMPLETE; 864 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 865 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 866 867 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 868 target_complete_failure_work); 869 if (cmd->se_cmd_flags & SCF_USE_CPUID) 870 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 871 else 872 queue_work(target_completion_wq, &cmd->work); 873 } 874 EXPORT_SYMBOL(target_complete_cmd); 875 876 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 877 { 878 if ((scsi_status == SAM_STAT_GOOD || 879 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 880 length < cmd->data_length) { 881 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 882 cmd->residual_count += cmd->data_length - length; 883 } else { 884 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 885 cmd->residual_count = cmd->data_length - length; 886 } 887 888 cmd->data_length = length; 889 } 890 891 target_complete_cmd(cmd, scsi_status); 892 } 893 EXPORT_SYMBOL(target_complete_cmd_with_length); 894 895 static void target_add_to_state_list(struct se_cmd *cmd) 896 { 897 struct se_device *dev = cmd->se_dev; 898 unsigned long flags; 899 900 spin_lock_irqsave(&dev->execute_task_lock, flags); 901 if (!cmd->state_active) { 902 list_add_tail(&cmd->state_list, &dev->state_list); 903 cmd->state_active = true; 904 } 905 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 906 } 907 908 /* 909 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 910 */ 911 static void transport_write_pending_qf(struct se_cmd *cmd); 912 static void transport_complete_qf(struct se_cmd *cmd); 913 914 void target_qf_do_work(struct work_struct *work) 915 { 916 struct se_device *dev = container_of(work, struct se_device, 917 qf_work_queue); 918 LIST_HEAD(qf_cmd_list); 919 struct se_cmd *cmd, *cmd_tmp; 920 921 spin_lock_irq(&dev->qf_cmd_lock); 922 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 923 spin_unlock_irq(&dev->qf_cmd_lock); 924 925 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 926 list_del(&cmd->se_qf_node); 927 atomic_dec_mb(&dev->dev_qf_count); 928 929 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 930 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 931 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 932 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 933 : "UNKNOWN"); 934 935 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 936 transport_write_pending_qf(cmd); 937 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 938 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 939 transport_complete_qf(cmd); 940 } 941 } 942 943 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 944 { 945 switch (cmd->data_direction) { 946 case DMA_NONE: 947 return "NONE"; 948 case DMA_FROM_DEVICE: 949 return "READ"; 950 case DMA_TO_DEVICE: 951 return "WRITE"; 952 case DMA_BIDIRECTIONAL: 953 return "BIDI"; 954 default: 955 break; 956 } 957 958 return "UNKNOWN"; 959 } 960 961 void transport_dump_dev_state( 962 struct se_device *dev, 963 char *b, 964 int *bl) 965 { 966 *bl += sprintf(b + *bl, "Status: "); 967 if (dev->export_count) 968 *bl += sprintf(b + *bl, "ACTIVATED"); 969 else 970 *bl += sprintf(b + *bl, "DEACTIVATED"); 971 972 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 973 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 974 dev->dev_attrib.block_size, 975 dev->dev_attrib.hw_max_sectors); 976 *bl += sprintf(b + *bl, " "); 977 } 978 979 void transport_dump_vpd_proto_id( 980 struct t10_vpd *vpd, 981 unsigned char *p_buf, 982 int p_buf_len) 983 { 984 unsigned char buf[VPD_TMP_BUF_SIZE]; 985 int len; 986 987 memset(buf, 0, VPD_TMP_BUF_SIZE); 988 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 989 990 switch (vpd->protocol_identifier) { 991 case 0x00: 992 sprintf(buf+len, "Fibre Channel\n"); 993 break; 994 case 0x10: 995 sprintf(buf+len, "Parallel SCSI\n"); 996 break; 997 case 0x20: 998 sprintf(buf+len, "SSA\n"); 999 break; 1000 case 0x30: 1001 sprintf(buf+len, "IEEE 1394\n"); 1002 break; 1003 case 0x40: 1004 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1005 " Protocol\n"); 1006 break; 1007 case 0x50: 1008 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1009 break; 1010 case 0x60: 1011 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1012 break; 1013 case 0x70: 1014 sprintf(buf+len, "Automation/Drive Interface Transport" 1015 " Protocol\n"); 1016 break; 1017 case 0x80: 1018 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1019 break; 1020 default: 1021 sprintf(buf+len, "Unknown 0x%02x\n", 1022 vpd->protocol_identifier); 1023 break; 1024 } 1025 1026 if (p_buf) 1027 strncpy(p_buf, buf, p_buf_len); 1028 else 1029 pr_debug("%s", buf); 1030 } 1031 1032 void 1033 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1034 { 1035 /* 1036 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1037 * 1038 * from spc3r23.pdf section 7.5.1 1039 */ 1040 if (page_83[1] & 0x80) { 1041 vpd->protocol_identifier = (page_83[0] & 0xf0); 1042 vpd->protocol_identifier_set = 1; 1043 transport_dump_vpd_proto_id(vpd, NULL, 0); 1044 } 1045 } 1046 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1047 1048 int transport_dump_vpd_assoc( 1049 struct t10_vpd *vpd, 1050 unsigned char *p_buf, 1051 int p_buf_len) 1052 { 1053 unsigned char buf[VPD_TMP_BUF_SIZE]; 1054 int ret = 0; 1055 int len; 1056 1057 memset(buf, 0, VPD_TMP_BUF_SIZE); 1058 len = sprintf(buf, "T10 VPD Identifier Association: "); 1059 1060 switch (vpd->association) { 1061 case 0x00: 1062 sprintf(buf+len, "addressed logical unit\n"); 1063 break; 1064 case 0x10: 1065 sprintf(buf+len, "target port\n"); 1066 break; 1067 case 0x20: 1068 sprintf(buf+len, "SCSI target device\n"); 1069 break; 1070 default: 1071 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1072 ret = -EINVAL; 1073 break; 1074 } 1075 1076 if (p_buf) 1077 strncpy(p_buf, buf, p_buf_len); 1078 else 1079 pr_debug("%s", buf); 1080 1081 return ret; 1082 } 1083 1084 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1085 { 1086 /* 1087 * The VPD identification association.. 1088 * 1089 * from spc3r23.pdf Section 7.6.3.1 Table 297 1090 */ 1091 vpd->association = (page_83[1] & 0x30); 1092 return transport_dump_vpd_assoc(vpd, NULL, 0); 1093 } 1094 EXPORT_SYMBOL(transport_set_vpd_assoc); 1095 1096 int transport_dump_vpd_ident_type( 1097 struct t10_vpd *vpd, 1098 unsigned char *p_buf, 1099 int p_buf_len) 1100 { 1101 unsigned char buf[VPD_TMP_BUF_SIZE]; 1102 int ret = 0; 1103 int len; 1104 1105 memset(buf, 0, VPD_TMP_BUF_SIZE); 1106 len = sprintf(buf, "T10 VPD Identifier Type: "); 1107 1108 switch (vpd->device_identifier_type) { 1109 case 0x00: 1110 sprintf(buf+len, "Vendor specific\n"); 1111 break; 1112 case 0x01: 1113 sprintf(buf+len, "T10 Vendor ID based\n"); 1114 break; 1115 case 0x02: 1116 sprintf(buf+len, "EUI-64 based\n"); 1117 break; 1118 case 0x03: 1119 sprintf(buf+len, "NAA\n"); 1120 break; 1121 case 0x04: 1122 sprintf(buf+len, "Relative target port identifier\n"); 1123 break; 1124 case 0x08: 1125 sprintf(buf+len, "SCSI name string\n"); 1126 break; 1127 default: 1128 sprintf(buf+len, "Unsupported: 0x%02x\n", 1129 vpd->device_identifier_type); 1130 ret = -EINVAL; 1131 break; 1132 } 1133 1134 if (p_buf) { 1135 if (p_buf_len < strlen(buf)+1) 1136 return -EINVAL; 1137 strncpy(p_buf, buf, p_buf_len); 1138 } else { 1139 pr_debug("%s", buf); 1140 } 1141 1142 return ret; 1143 } 1144 1145 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1146 { 1147 /* 1148 * The VPD identifier type.. 1149 * 1150 * from spc3r23.pdf Section 7.6.3.1 Table 298 1151 */ 1152 vpd->device_identifier_type = (page_83[1] & 0x0f); 1153 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1154 } 1155 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1156 1157 int transport_dump_vpd_ident( 1158 struct t10_vpd *vpd, 1159 unsigned char *p_buf, 1160 int p_buf_len) 1161 { 1162 unsigned char buf[VPD_TMP_BUF_SIZE]; 1163 int ret = 0; 1164 1165 memset(buf, 0, VPD_TMP_BUF_SIZE); 1166 1167 switch (vpd->device_identifier_code_set) { 1168 case 0x01: /* Binary */ 1169 snprintf(buf, sizeof(buf), 1170 "T10 VPD Binary Device Identifier: %s\n", 1171 &vpd->device_identifier[0]); 1172 break; 1173 case 0x02: /* ASCII */ 1174 snprintf(buf, sizeof(buf), 1175 "T10 VPD ASCII Device Identifier: %s\n", 1176 &vpd->device_identifier[0]); 1177 break; 1178 case 0x03: /* UTF-8 */ 1179 snprintf(buf, sizeof(buf), 1180 "T10 VPD UTF-8 Device Identifier: %s\n", 1181 &vpd->device_identifier[0]); 1182 break; 1183 default: 1184 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1185 " 0x%02x", vpd->device_identifier_code_set); 1186 ret = -EINVAL; 1187 break; 1188 } 1189 1190 if (p_buf) 1191 strncpy(p_buf, buf, p_buf_len); 1192 else 1193 pr_debug("%s", buf); 1194 1195 return ret; 1196 } 1197 1198 int 1199 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1200 { 1201 static const char hex_str[] = "0123456789abcdef"; 1202 int j = 0, i = 4; /* offset to start of the identifier */ 1203 1204 /* 1205 * The VPD Code Set (encoding) 1206 * 1207 * from spc3r23.pdf Section 7.6.3.1 Table 296 1208 */ 1209 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1210 switch (vpd->device_identifier_code_set) { 1211 case 0x01: /* Binary */ 1212 vpd->device_identifier[j++] = 1213 hex_str[vpd->device_identifier_type]; 1214 while (i < (4 + page_83[3])) { 1215 vpd->device_identifier[j++] = 1216 hex_str[(page_83[i] & 0xf0) >> 4]; 1217 vpd->device_identifier[j++] = 1218 hex_str[page_83[i] & 0x0f]; 1219 i++; 1220 } 1221 break; 1222 case 0x02: /* ASCII */ 1223 case 0x03: /* UTF-8 */ 1224 while (i < (4 + page_83[3])) 1225 vpd->device_identifier[j++] = page_83[i++]; 1226 break; 1227 default: 1228 break; 1229 } 1230 1231 return transport_dump_vpd_ident(vpd, NULL, 0); 1232 } 1233 EXPORT_SYMBOL(transport_set_vpd_ident); 1234 1235 static sense_reason_t 1236 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1237 unsigned int size) 1238 { 1239 u32 mtl; 1240 1241 if (!cmd->se_tfo->max_data_sg_nents) 1242 return TCM_NO_SENSE; 1243 /* 1244 * Check if fabric enforced maximum SGL entries per I/O descriptor 1245 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1246 * residual_count and reduce original cmd->data_length to maximum 1247 * length based on single PAGE_SIZE entry scatter-lists. 1248 */ 1249 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1250 if (cmd->data_length > mtl) { 1251 /* 1252 * If an existing CDB overflow is present, calculate new residual 1253 * based on CDB size minus fabric maximum transfer length. 1254 * 1255 * If an existing CDB underflow is present, calculate new residual 1256 * based on original cmd->data_length minus fabric maximum transfer 1257 * length. 1258 * 1259 * Otherwise, set the underflow residual based on cmd->data_length 1260 * minus fabric maximum transfer length. 1261 */ 1262 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1263 cmd->residual_count = (size - mtl); 1264 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1265 u32 orig_dl = size + cmd->residual_count; 1266 cmd->residual_count = (orig_dl - mtl); 1267 } else { 1268 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1269 cmd->residual_count = (cmd->data_length - mtl); 1270 } 1271 cmd->data_length = mtl; 1272 /* 1273 * Reset sbc_check_prot() calculated protection payload 1274 * length based upon the new smaller MTL. 1275 */ 1276 if (cmd->prot_length) { 1277 u32 sectors = (mtl / dev->dev_attrib.block_size); 1278 cmd->prot_length = dev->prot_length * sectors; 1279 } 1280 } 1281 return TCM_NO_SENSE; 1282 } 1283 1284 /** 1285 * target_cmd_size_check - Check whether there will be a residual. 1286 * @cmd: SCSI command. 1287 * @size: Data buffer size derived from CDB. The data buffer size provided by 1288 * the SCSI transport driver is available in @cmd->data_length. 1289 * 1290 * Compare the data buffer size from the CDB with the data buffer limit from the transport 1291 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. 1292 * 1293 * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd(). 1294 * 1295 * Return: TCM_NO_SENSE 1296 */ 1297 sense_reason_t 1298 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1299 { 1300 struct se_device *dev = cmd->se_dev; 1301 1302 if (cmd->unknown_data_length) { 1303 cmd->data_length = size; 1304 } else if (size != cmd->data_length) { 1305 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1306 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1307 " 0x%02x\n", cmd->se_tfo->fabric_name, 1308 cmd->data_length, size, cmd->t_task_cdb[0]); 1309 1310 if (cmd->data_direction == DMA_TO_DEVICE) { 1311 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1312 pr_err_ratelimited("Rejecting underflow/overflow" 1313 " for WRITE data CDB\n"); 1314 return TCM_INVALID_CDB_FIELD; 1315 } 1316 /* 1317 * Some fabric drivers like iscsi-target still expect to 1318 * always reject overflow writes. Reject this case until 1319 * full fabric driver level support for overflow writes 1320 * is introduced tree-wide. 1321 */ 1322 if (size > cmd->data_length) { 1323 pr_err_ratelimited("Rejecting overflow for" 1324 " WRITE control CDB\n"); 1325 return TCM_INVALID_CDB_FIELD; 1326 } 1327 } 1328 /* 1329 * Reject READ_* or WRITE_* with overflow/underflow for 1330 * type SCF_SCSI_DATA_CDB. 1331 */ 1332 if (dev->dev_attrib.block_size != 512) { 1333 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1334 " CDB on non 512-byte sector setup subsystem" 1335 " plugin: %s\n", dev->transport->name); 1336 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1337 return TCM_INVALID_CDB_FIELD; 1338 } 1339 /* 1340 * For the overflow case keep the existing fabric provided 1341 * ->data_length. Otherwise for the underflow case, reset 1342 * ->data_length to the smaller SCSI expected data transfer 1343 * length. 1344 */ 1345 if (size > cmd->data_length) { 1346 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1347 cmd->residual_count = (size - cmd->data_length); 1348 } else { 1349 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1350 cmd->residual_count = (cmd->data_length - size); 1351 cmd->data_length = size; 1352 } 1353 } 1354 1355 return target_check_max_data_sg_nents(cmd, dev, size); 1356 1357 } 1358 1359 /* 1360 * Used by fabric modules containing a local struct se_cmd within their 1361 * fabric dependent per I/O descriptor. 1362 * 1363 * Preserves the value of @cmd->tag. 1364 */ 1365 void transport_init_se_cmd( 1366 struct se_cmd *cmd, 1367 const struct target_core_fabric_ops *tfo, 1368 struct se_session *se_sess, 1369 u32 data_length, 1370 int data_direction, 1371 int task_attr, 1372 unsigned char *sense_buffer, u64 unpacked_lun) 1373 { 1374 INIT_LIST_HEAD(&cmd->se_delayed_node); 1375 INIT_LIST_HEAD(&cmd->se_qf_node); 1376 INIT_LIST_HEAD(&cmd->se_cmd_list); 1377 INIT_LIST_HEAD(&cmd->state_list); 1378 init_completion(&cmd->t_transport_stop_comp); 1379 cmd->free_compl = NULL; 1380 cmd->abrt_compl = NULL; 1381 spin_lock_init(&cmd->t_state_lock); 1382 INIT_WORK(&cmd->work, NULL); 1383 kref_init(&cmd->cmd_kref); 1384 1385 cmd->se_tfo = tfo; 1386 cmd->se_sess = se_sess; 1387 cmd->data_length = data_length; 1388 cmd->data_direction = data_direction; 1389 cmd->sam_task_attr = task_attr; 1390 cmd->sense_buffer = sense_buffer; 1391 cmd->orig_fe_lun = unpacked_lun; 1392 1393 cmd->state_active = false; 1394 } 1395 EXPORT_SYMBOL(transport_init_se_cmd); 1396 1397 static sense_reason_t 1398 transport_check_alloc_task_attr(struct se_cmd *cmd) 1399 { 1400 struct se_device *dev = cmd->se_dev; 1401 1402 /* 1403 * Check if SAM Task Attribute emulation is enabled for this 1404 * struct se_device storage object 1405 */ 1406 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1407 return 0; 1408 1409 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1410 pr_debug("SAM Task Attribute ACA" 1411 " emulation is not supported\n"); 1412 return TCM_INVALID_CDB_FIELD; 1413 } 1414 1415 return 0; 1416 } 1417 1418 sense_reason_t 1419 target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb) 1420 { 1421 sense_reason_t ret; 1422 1423 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1424 /* 1425 * Ensure that the received CDB is less than the max (252 + 8) bytes 1426 * for VARIABLE_LENGTH_CMD 1427 */ 1428 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1429 pr_err("Received SCSI CDB with command_size: %d that" 1430 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1431 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1432 ret = TCM_INVALID_CDB_FIELD; 1433 goto err; 1434 } 1435 /* 1436 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1437 * allocate the additional extended CDB buffer now.. Otherwise 1438 * setup the pointer from __t_task_cdb to t_task_cdb. 1439 */ 1440 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1441 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1442 GFP_KERNEL); 1443 if (!cmd->t_task_cdb) { 1444 pr_err("Unable to allocate cmd->t_task_cdb" 1445 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1446 scsi_command_size(cdb), 1447 (unsigned long)sizeof(cmd->__t_task_cdb)); 1448 ret = TCM_OUT_OF_RESOURCES; 1449 goto err; 1450 } 1451 } 1452 /* 1453 * Copy the original CDB into cmd-> 1454 */ 1455 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1456 1457 trace_target_sequencer_start(cmd); 1458 return 0; 1459 1460 err: 1461 /* 1462 * Copy the CDB here to allow trace_target_cmd_complete() to 1463 * print the cdb to the trace buffers. 1464 */ 1465 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), 1466 (unsigned int)TCM_MAX_COMMAND_SIZE)); 1467 return ret; 1468 } 1469 EXPORT_SYMBOL(target_cmd_init_cdb); 1470 1471 sense_reason_t 1472 target_cmd_parse_cdb(struct se_cmd *cmd) 1473 { 1474 struct se_device *dev = cmd->se_dev; 1475 sense_reason_t ret; 1476 1477 ret = dev->transport->parse_cdb(cmd); 1478 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1479 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1480 cmd->se_tfo->fabric_name, 1481 cmd->se_sess->se_node_acl->initiatorname, 1482 cmd->t_task_cdb[0]); 1483 if (ret) 1484 return ret; 1485 1486 ret = transport_check_alloc_task_attr(cmd); 1487 if (ret) 1488 return ret; 1489 1490 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1491 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1492 return 0; 1493 } 1494 EXPORT_SYMBOL(target_cmd_parse_cdb); 1495 1496 /* 1497 * Used by fabric module frontends to queue tasks directly. 1498 * May only be used from process context. 1499 */ 1500 int transport_handle_cdb_direct( 1501 struct se_cmd *cmd) 1502 { 1503 sense_reason_t ret; 1504 1505 if (!cmd->se_lun) { 1506 dump_stack(); 1507 pr_err("cmd->se_lun is NULL\n"); 1508 return -EINVAL; 1509 } 1510 if (in_interrupt()) { 1511 dump_stack(); 1512 pr_err("transport_generic_handle_cdb cannot be called" 1513 " from interrupt context\n"); 1514 return -EINVAL; 1515 } 1516 /* 1517 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1518 * outstanding descriptors are handled correctly during shutdown via 1519 * transport_wait_for_tasks() 1520 * 1521 * Also, we don't take cmd->t_state_lock here as we only expect 1522 * this to be called for initial descriptor submission. 1523 */ 1524 cmd->t_state = TRANSPORT_NEW_CMD; 1525 cmd->transport_state |= CMD_T_ACTIVE; 1526 1527 /* 1528 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1529 * so follow TRANSPORT_NEW_CMD processing thread context usage 1530 * and call transport_generic_request_failure() if necessary.. 1531 */ 1532 ret = transport_generic_new_cmd(cmd); 1533 if (ret) 1534 transport_generic_request_failure(cmd, ret); 1535 return 0; 1536 } 1537 EXPORT_SYMBOL(transport_handle_cdb_direct); 1538 1539 sense_reason_t 1540 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1541 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1542 { 1543 if (!sgl || !sgl_count) 1544 return 0; 1545 1546 /* 1547 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1548 * scatterlists already have been set to follow what the fabric 1549 * passes for the original expected data transfer length. 1550 */ 1551 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1552 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1553 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1554 return TCM_INVALID_CDB_FIELD; 1555 } 1556 1557 cmd->t_data_sg = sgl; 1558 cmd->t_data_nents = sgl_count; 1559 cmd->t_bidi_data_sg = sgl_bidi; 1560 cmd->t_bidi_data_nents = sgl_bidi_count; 1561 1562 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1563 return 0; 1564 } 1565 1566 /** 1567 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1568 * se_cmd + use pre-allocated SGL memory. 1569 * 1570 * @se_cmd: command descriptor to submit 1571 * @se_sess: associated se_sess for endpoint 1572 * @cdb: pointer to SCSI CDB 1573 * @sense: pointer to SCSI sense buffer 1574 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1575 * @data_length: fabric expected data transfer length 1576 * @task_attr: SAM task attribute 1577 * @data_dir: DMA data direction 1578 * @flags: flags for command submission from target_sc_flags_tables 1579 * @sgl: struct scatterlist memory for unidirectional mapping 1580 * @sgl_count: scatterlist count for unidirectional mapping 1581 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1582 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1583 * @sgl_prot: struct scatterlist memory protection information 1584 * @sgl_prot_count: scatterlist count for protection information 1585 * 1586 * Task tags are supported if the caller has set @se_cmd->tag. 1587 * 1588 * Returns non zero to signal active I/O shutdown failure. All other 1589 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1590 * but still return zero here. 1591 * 1592 * This may only be called from process context, and also currently 1593 * assumes internal allocation of fabric payload buffer by target-core. 1594 */ 1595 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1596 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1597 u32 data_length, int task_attr, int data_dir, int flags, 1598 struct scatterlist *sgl, u32 sgl_count, 1599 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1600 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1601 { 1602 struct se_portal_group *se_tpg; 1603 sense_reason_t rc; 1604 int ret; 1605 1606 se_tpg = se_sess->se_tpg; 1607 BUG_ON(!se_tpg); 1608 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1609 BUG_ON(in_interrupt()); 1610 /* 1611 * Initialize se_cmd for target operation. From this point 1612 * exceptions are handled by sending exception status via 1613 * target_core_fabric_ops->queue_status() callback 1614 */ 1615 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1616 data_length, data_dir, task_attr, sense, 1617 unpacked_lun); 1618 1619 if (flags & TARGET_SCF_USE_CPUID) 1620 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1621 else 1622 se_cmd->cpuid = WORK_CPU_UNBOUND; 1623 1624 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1625 se_cmd->unknown_data_length = 1; 1626 /* 1627 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1628 * se_sess->sess_cmd_list. A second kref_get here is necessary 1629 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1630 * kref_put() to happen during fabric packet acknowledgement. 1631 */ 1632 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1633 if (ret) 1634 return ret; 1635 /* 1636 * Signal bidirectional data payloads to target-core 1637 */ 1638 if (flags & TARGET_SCF_BIDI_OP) 1639 se_cmd->se_cmd_flags |= SCF_BIDI; 1640 1641 rc = target_cmd_init_cdb(se_cmd, cdb); 1642 if (rc) { 1643 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1644 target_put_sess_cmd(se_cmd); 1645 return 0; 1646 } 1647 1648 /* 1649 * Locate se_lun pointer and attach it to struct se_cmd 1650 */ 1651 rc = transport_lookup_cmd_lun(se_cmd); 1652 if (rc) { 1653 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1654 target_put_sess_cmd(se_cmd); 1655 return 0; 1656 } 1657 1658 rc = target_cmd_parse_cdb(se_cmd); 1659 if (rc != 0) { 1660 transport_generic_request_failure(se_cmd, rc); 1661 return 0; 1662 } 1663 1664 /* 1665 * Save pointers for SGLs containing protection information, 1666 * if present. 1667 */ 1668 if (sgl_prot_count) { 1669 se_cmd->t_prot_sg = sgl_prot; 1670 se_cmd->t_prot_nents = sgl_prot_count; 1671 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1672 } 1673 1674 /* 1675 * When a non zero sgl_count has been passed perform SGL passthrough 1676 * mapping for pre-allocated fabric memory instead of having target 1677 * core perform an internal SGL allocation.. 1678 */ 1679 if (sgl_count != 0) { 1680 BUG_ON(!sgl); 1681 1682 /* 1683 * A work-around for tcm_loop as some userspace code via 1684 * scsi-generic do not memset their associated read buffers, 1685 * so go ahead and do that here for type non-data CDBs. Also 1686 * note that this is currently guaranteed to be a single SGL 1687 * for this case by target core in target_setup_cmd_from_cdb() 1688 * -> transport_generic_cmd_sequencer(). 1689 */ 1690 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1691 se_cmd->data_direction == DMA_FROM_DEVICE) { 1692 unsigned char *buf = NULL; 1693 1694 if (sgl) 1695 buf = kmap(sg_page(sgl)) + sgl->offset; 1696 1697 if (buf) { 1698 memset(buf, 0, sgl->length); 1699 kunmap(sg_page(sgl)); 1700 } 1701 } 1702 1703 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1704 sgl_bidi, sgl_bidi_count); 1705 if (rc != 0) { 1706 transport_generic_request_failure(se_cmd, rc); 1707 return 0; 1708 } 1709 } 1710 1711 /* 1712 * Check if we need to delay processing because of ALUA 1713 * Active/NonOptimized primary access state.. 1714 */ 1715 core_alua_check_nonop_delay(se_cmd); 1716 1717 transport_handle_cdb_direct(se_cmd); 1718 return 0; 1719 } 1720 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1721 1722 /** 1723 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1724 * 1725 * @se_cmd: command descriptor to submit 1726 * @se_sess: associated se_sess for endpoint 1727 * @cdb: pointer to SCSI CDB 1728 * @sense: pointer to SCSI sense buffer 1729 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1730 * @data_length: fabric expected data transfer length 1731 * @task_attr: SAM task attribute 1732 * @data_dir: DMA data direction 1733 * @flags: flags for command submission from target_sc_flags_tables 1734 * 1735 * Task tags are supported if the caller has set @se_cmd->tag. 1736 * 1737 * Returns non zero to signal active I/O shutdown failure. All other 1738 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1739 * but still return zero here. 1740 * 1741 * This may only be called from process context, and also currently 1742 * assumes internal allocation of fabric payload buffer by target-core. 1743 * 1744 * It also assumes interal target core SGL memory allocation. 1745 */ 1746 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1747 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1748 u32 data_length, int task_attr, int data_dir, int flags) 1749 { 1750 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1751 unpacked_lun, data_length, task_attr, data_dir, 1752 flags, NULL, 0, NULL, 0, NULL, 0); 1753 } 1754 EXPORT_SYMBOL(target_submit_cmd); 1755 1756 static void target_complete_tmr_failure(struct work_struct *work) 1757 { 1758 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1759 1760 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1761 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1762 1763 transport_lun_remove_cmd(se_cmd); 1764 transport_cmd_check_stop_to_fabric(se_cmd); 1765 } 1766 1767 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, 1768 u64 *unpacked_lun) 1769 { 1770 struct se_cmd *se_cmd; 1771 unsigned long flags; 1772 bool ret = false; 1773 1774 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1775 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1776 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1777 continue; 1778 1779 if (se_cmd->tag == tag) { 1780 *unpacked_lun = se_cmd->orig_fe_lun; 1781 ret = true; 1782 break; 1783 } 1784 } 1785 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1786 1787 return ret; 1788 } 1789 1790 /** 1791 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1792 * for TMR CDBs 1793 * 1794 * @se_cmd: command descriptor to submit 1795 * @se_sess: associated se_sess for endpoint 1796 * @sense: pointer to SCSI sense buffer 1797 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1798 * @fabric_tmr_ptr: fabric context for TMR req 1799 * @tm_type: Type of TM request 1800 * @gfp: gfp type for caller 1801 * @tag: referenced task tag for TMR_ABORT_TASK 1802 * @flags: submit cmd flags 1803 * 1804 * Callable from all contexts. 1805 **/ 1806 1807 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1808 unsigned char *sense, u64 unpacked_lun, 1809 void *fabric_tmr_ptr, unsigned char tm_type, 1810 gfp_t gfp, u64 tag, int flags) 1811 { 1812 struct se_portal_group *se_tpg; 1813 int ret; 1814 1815 se_tpg = se_sess->se_tpg; 1816 BUG_ON(!se_tpg); 1817 1818 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1819 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); 1820 /* 1821 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1822 * allocation failure. 1823 */ 1824 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1825 if (ret < 0) 1826 return -ENOMEM; 1827 1828 if (tm_type == TMR_ABORT_TASK) 1829 se_cmd->se_tmr_req->ref_task_tag = tag; 1830 1831 /* See target_submit_cmd for commentary */ 1832 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1833 if (ret) { 1834 core_tmr_release_req(se_cmd->se_tmr_req); 1835 return ret; 1836 } 1837 /* 1838 * If this is ABORT_TASK with no explicit fabric provided LUN, 1839 * go ahead and search active session tags for a match to figure 1840 * out unpacked_lun for the original se_cmd. 1841 */ 1842 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { 1843 if (!target_lookup_lun_from_tag(se_sess, tag, 1844 &se_cmd->orig_fe_lun)) 1845 goto failure; 1846 } 1847 1848 ret = transport_lookup_tmr_lun(se_cmd); 1849 if (ret) 1850 goto failure; 1851 1852 transport_generic_handle_tmr(se_cmd); 1853 return 0; 1854 1855 /* 1856 * For callback during failure handling, push this work off 1857 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1858 */ 1859 failure: 1860 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1861 schedule_work(&se_cmd->work); 1862 return 0; 1863 } 1864 EXPORT_SYMBOL(target_submit_tmr); 1865 1866 /* 1867 * Handle SAM-esque emulation for generic transport request failures. 1868 */ 1869 void transport_generic_request_failure(struct se_cmd *cmd, 1870 sense_reason_t sense_reason) 1871 { 1872 int ret = 0, post_ret; 1873 1874 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1875 sense_reason); 1876 target_show_cmd("-----[ ", cmd); 1877 1878 /* 1879 * For SAM Task Attribute emulation for failed struct se_cmd 1880 */ 1881 transport_complete_task_attr(cmd); 1882 1883 if (cmd->transport_complete_callback) 1884 cmd->transport_complete_callback(cmd, false, &post_ret); 1885 1886 if (cmd->transport_state & CMD_T_ABORTED) { 1887 INIT_WORK(&cmd->work, target_abort_work); 1888 queue_work(target_completion_wq, &cmd->work); 1889 return; 1890 } 1891 1892 switch (sense_reason) { 1893 case TCM_NON_EXISTENT_LUN: 1894 case TCM_UNSUPPORTED_SCSI_OPCODE: 1895 case TCM_INVALID_CDB_FIELD: 1896 case TCM_INVALID_PARAMETER_LIST: 1897 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1898 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1899 case TCM_UNKNOWN_MODE_PAGE: 1900 case TCM_WRITE_PROTECTED: 1901 case TCM_ADDRESS_OUT_OF_RANGE: 1902 case TCM_CHECK_CONDITION_ABORT_CMD: 1903 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1904 case TCM_CHECK_CONDITION_NOT_READY: 1905 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1906 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1907 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1908 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1909 case TCM_TOO_MANY_TARGET_DESCS: 1910 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1911 case TCM_TOO_MANY_SEGMENT_DESCS: 1912 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1913 break; 1914 case TCM_OUT_OF_RESOURCES: 1915 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1916 goto queue_status; 1917 case TCM_LUN_BUSY: 1918 cmd->scsi_status = SAM_STAT_BUSY; 1919 goto queue_status; 1920 case TCM_RESERVATION_CONFLICT: 1921 /* 1922 * No SENSE Data payload for this case, set SCSI Status 1923 * and queue the response to $FABRIC_MOD. 1924 * 1925 * Uses linux/include/scsi/scsi.h SAM status codes defs 1926 */ 1927 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1928 /* 1929 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1930 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1931 * CONFLICT STATUS. 1932 * 1933 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1934 */ 1935 if (cmd->se_sess && 1936 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl 1937 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { 1938 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1939 cmd->orig_fe_lun, 0x2C, 1940 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1941 } 1942 1943 goto queue_status; 1944 default: 1945 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1946 cmd->t_task_cdb[0], sense_reason); 1947 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1948 break; 1949 } 1950 1951 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1952 if (ret) 1953 goto queue_full; 1954 1955 check_stop: 1956 transport_lun_remove_cmd(cmd); 1957 transport_cmd_check_stop_to_fabric(cmd); 1958 return; 1959 1960 queue_status: 1961 trace_target_cmd_complete(cmd); 1962 ret = cmd->se_tfo->queue_status(cmd); 1963 if (!ret) 1964 goto check_stop; 1965 queue_full: 1966 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1967 } 1968 EXPORT_SYMBOL(transport_generic_request_failure); 1969 1970 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1971 { 1972 sense_reason_t ret; 1973 1974 if (!cmd->execute_cmd) { 1975 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1976 goto err; 1977 } 1978 if (do_checks) { 1979 /* 1980 * Check for an existing UNIT ATTENTION condition after 1981 * target_handle_task_attr() has done SAM task attr 1982 * checking, and possibly have already defered execution 1983 * out to target_restart_delayed_cmds() context. 1984 */ 1985 ret = target_scsi3_ua_check(cmd); 1986 if (ret) 1987 goto err; 1988 1989 ret = target_alua_state_check(cmd); 1990 if (ret) 1991 goto err; 1992 1993 ret = target_check_reservation(cmd); 1994 if (ret) { 1995 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1996 goto err; 1997 } 1998 } 1999 2000 ret = cmd->execute_cmd(cmd); 2001 if (!ret) 2002 return; 2003 err: 2004 spin_lock_irq(&cmd->t_state_lock); 2005 cmd->transport_state &= ~CMD_T_SENT; 2006 spin_unlock_irq(&cmd->t_state_lock); 2007 2008 transport_generic_request_failure(cmd, ret); 2009 } 2010 2011 static int target_write_prot_action(struct se_cmd *cmd) 2012 { 2013 u32 sectors; 2014 /* 2015 * Perform WRITE_INSERT of PI using software emulation when backend 2016 * device has PI enabled, if the transport has not already generated 2017 * PI using hardware WRITE_INSERT offload. 2018 */ 2019 switch (cmd->prot_op) { 2020 case TARGET_PROT_DOUT_INSERT: 2021 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 2022 sbc_dif_generate(cmd); 2023 break; 2024 case TARGET_PROT_DOUT_STRIP: 2025 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 2026 break; 2027 2028 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 2029 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2030 sectors, 0, cmd->t_prot_sg, 0); 2031 if (unlikely(cmd->pi_err)) { 2032 spin_lock_irq(&cmd->t_state_lock); 2033 cmd->transport_state &= ~CMD_T_SENT; 2034 spin_unlock_irq(&cmd->t_state_lock); 2035 transport_generic_request_failure(cmd, cmd->pi_err); 2036 return -1; 2037 } 2038 break; 2039 default: 2040 break; 2041 } 2042 2043 return 0; 2044 } 2045 2046 static bool target_handle_task_attr(struct se_cmd *cmd) 2047 { 2048 struct se_device *dev = cmd->se_dev; 2049 2050 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2051 return false; 2052 2053 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 2054 2055 /* 2056 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2057 * to allow the passed struct se_cmd list of tasks to the front of the list. 2058 */ 2059 switch (cmd->sam_task_attr) { 2060 case TCM_HEAD_TAG: 2061 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 2062 cmd->t_task_cdb[0]); 2063 return false; 2064 case TCM_ORDERED_TAG: 2065 atomic_inc_mb(&dev->dev_ordered_sync); 2066 2067 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 2068 cmd->t_task_cdb[0]); 2069 2070 /* 2071 * Execute an ORDERED command if no other older commands 2072 * exist that need to be completed first. 2073 */ 2074 if (!atomic_read(&dev->simple_cmds)) 2075 return false; 2076 break; 2077 default: 2078 /* 2079 * For SIMPLE and UNTAGGED Task Attribute commands 2080 */ 2081 atomic_inc_mb(&dev->simple_cmds); 2082 break; 2083 } 2084 2085 if (atomic_read(&dev->dev_ordered_sync) == 0) 2086 return false; 2087 2088 spin_lock(&dev->delayed_cmd_lock); 2089 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2090 spin_unlock(&dev->delayed_cmd_lock); 2091 2092 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2093 cmd->t_task_cdb[0], cmd->sam_task_attr); 2094 return true; 2095 } 2096 2097 void target_execute_cmd(struct se_cmd *cmd) 2098 { 2099 /* 2100 * Determine if frontend context caller is requesting the stopping of 2101 * this command for frontend exceptions. 2102 * 2103 * If the received CDB has already been aborted stop processing it here. 2104 */ 2105 if (target_cmd_interrupted(cmd)) 2106 return; 2107 2108 spin_lock_irq(&cmd->t_state_lock); 2109 cmd->t_state = TRANSPORT_PROCESSING; 2110 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2111 spin_unlock_irq(&cmd->t_state_lock); 2112 2113 if (target_write_prot_action(cmd)) 2114 return; 2115 2116 if (target_handle_task_attr(cmd)) { 2117 spin_lock_irq(&cmd->t_state_lock); 2118 cmd->transport_state &= ~CMD_T_SENT; 2119 spin_unlock_irq(&cmd->t_state_lock); 2120 return; 2121 } 2122 2123 __target_execute_cmd(cmd, true); 2124 } 2125 EXPORT_SYMBOL(target_execute_cmd); 2126 2127 /* 2128 * Process all commands up to the last received ORDERED task attribute which 2129 * requires another blocking boundary 2130 */ 2131 static void target_restart_delayed_cmds(struct se_device *dev) 2132 { 2133 for (;;) { 2134 struct se_cmd *cmd; 2135 2136 spin_lock(&dev->delayed_cmd_lock); 2137 if (list_empty(&dev->delayed_cmd_list)) { 2138 spin_unlock(&dev->delayed_cmd_lock); 2139 break; 2140 } 2141 2142 cmd = list_entry(dev->delayed_cmd_list.next, 2143 struct se_cmd, se_delayed_node); 2144 list_del(&cmd->se_delayed_node); 2145 spin_unlock(&dev->delayed_cmd_lock); 2146 2147 cmd->transport_state |= CMD_T_SENT; 2148 2149 __target_execute_cmd(cmd, true); 2150 2151 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2152 break; 2153 } 2154 } 2155 2156 /* 2157 * Called from I/O completion to determine which dormant/delayed 2158 * and ordered cmds need to have their tasks added to the execution queue. 2159 */ 2160 static void transport_complete_task_attr(struct se_cmd *cmd) 2161 { 2162 struct se_device *dev = cmd->se_dev; 2163 2164 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2165 return; 2166 2167 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2168 goto restart; 2169 2170 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2171 atomic_dec_mb(&dev->simple_cmds); 2172 dev->dev_cur_ordered_id++; 2173 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2174 dev->dev_cur_ordered_id++; 2175 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2176 dev->dev_cur_ordered_id); 2177 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2178 atomic_dec_mb(&dev->dev_ordered_sync); 2179 2180 dev->dev_cur_ordered_id++; 2181 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2182 dev->dev_cur_ordered_id); 2183 } 2184 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2185 2186 restart: 2187 target_restart_delayed_cmds(dev); 2188 } 2189 2190 static void transport_complete_qf(struct se_cmd *cmd) 2191 { 2192 int ret = 0; 2193 2194 transport_complete_task_attr(cmd); 2195 /* 2196 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2197 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2198 * the same callbacks should not be retried. Return CHECK_CONDITION 2199 * if a scsi_status is not already set. 2200 * 2201 * If a fabric driver ->queue_status() has returned non zero, always 2202 * keep retrying no matter what.. 2203 */ 2204 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2205 if (cmd->scsi_status) 2206 goto queue_status; 2207 2208 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2209 goto queue_status; 2210 } 2211 2212 /* 2213 * Check if we need to send a sense buffer from 2214 * the struct se_cmd in question. We do NOT want 2215 * to take this path of the IO has been marked as 2216 * needing to be treated like a "normal read". This 2217 * is the case if it's a tape read, and either the 2218 * FM, EOM, or ILI bits are set, but there is no 2219 * sense data. 2220 */ 2221 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2222 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2223 goto queue_status; 2224 2225 switch (cmd->data_direction) { 2226 case DMA_FROM_DEVICE: 2227 /* queue status if not treating this as a normal read */ 2228 if (cmd->scsi_status && 2229 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2230 goto queue_status; 2231 2232 trace_target_cmd_complete(cmd); 2233 ret = cmd->se_tfo->queue_data_in(cmd); 2234 break; 2235 case DMA_TO_DEVICE: 2236 if (cmd->se_cmd_flags & SCF_BIDI) { 2237 ret = cmd->se_tfo->queue_data_in(cmd); 2238 break; 2239 } 2240 fallthrough; 2241 case DMA_NONE: 2242 queue_status: 2243 trace_target_cmd_complete(cmd); 2244 ret = cmd->se_tfo->queue_status(cmd); 2245 break; 2246 default: 2247 break; 2248 } 2249 2250 if (ret < 0) { 2251 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2252 return; 2253 } 2254 transport_lun_remove_cmd(cmd); 2255 transport_cmd_check_stop_to_fabric(cmd); 2256 } 2257 2258 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2259 int err, bool write_pending) 2260 { 2261 /* 2262 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2263 * ->queue_data_in() callbacks from new process context. 2264 * 2265 * Otherwise for other errors, transport_complete_qf() will send 2266 * CHECK_CONDITION via ->queue_status() instead of attempting to 2267 * retry associated fabric driver data-transfer callbacks. 2268 */ 2269 if (err == -EAGAIN || err == -ENOMEM) { 2270 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2271 TRANSPORT_COMPLETE_QF_OK; 2272 } else { 2273 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2274 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2275 } 2276 2277 spin_lock_irq(&dev->qf_cmd_lock); 2278 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2279 atomic_inc_mb(&dev->dev_qf_count); 2280 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2281 2282 schedule_work(&cmd->se_dev->qf_work_queue); 2283 } 2284 2285 static bool target_read_prot_action(struct se_cmd *cmd) 2286 { 2287 switch (cmd->prot_op) { 2288 case TARGET_PROT_DIN_STRIP: 2289 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2290 u32 sectors = cmd->data_length >> 2291 ilog2(cmd->se_dev->dev_attrib.block_size); 2292 2293 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2294 sectors, 0, cmd->t_prot_sg, 2295 0); 2296 if (cmd->pi_err) 2297 return true; 2298 } 2299 break; 2300 case TARGET_PROT_DIN_INSERT: 2301 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2302 break; 2303 2304 sbc_dif_generate(cmd); 2305 break; 2306 default: 2307 break; 2308 } 2309 2310 return false; 2311 } 2312 2313 static void target_complete_ok_work(struct work_struct *work) 2314 { 2315 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2316 int ret; 2317 2318 /* 2319 * Check if we need to move delayed/dormant tasks from cmds on the 2320 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2321 * Attribute. 2322 */ 2323 transport_complete_task_attr(cmd); 2324 2325 /* 2326 * Check to schedule QUEUE_FULL work, or execute an existing 2327 * cmd->transport_qf_callback() 2328 */ 2329 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2330 schedule_work(&cmd->se_dev->qf_work_queue); 2331 2332 /* 2333 * Check if we need to send a sense buffer from 2334 * the struct se_cmd in question. We do NOT want 2335 * to take this path of the IO has been marked as 2336 * needing to be treated like a "normal read". This 2337 * is the case if it's a tape read, and either the 2338 * FM, EOM, or ILI bits are set, but there is no 2339 * sense data. 2340 */ 2341 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2342 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2343 WARN_ON(!cmd->scsi_status); 2344 ret = transport_send_check_condition_and_sense( 2345 cmd, 0, 1); 2346 if (ret) 2347 goto queue_full; 2348 2349 transport_lun_remove_cmd(cmd); 2350 transport_cmd_check_stop_to_fabric(cmd); 2351 return; 2352 } 2353 /* 2354 * Check for a callback, used by amongst other things 2355 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2356 */ 2357 if (cmd->transport_complete_callback) { 2358 sense_reason_t rc; 2359 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2360 bool zero_dl = !(cmd->data_length); 2361 int post_ret = 0; 2362 2363 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2364 if (!rc && !post_ret) { 2365 if (caw && zero_dl) 2366 goto queue_rsp; 2367 2368 return; 2369 } else if (rc) { 2370 ret = transport_send_check_condition_and_sense(cmd, 2371 rc, 0); 2372 if (ret) 2373 goto queue_full; 2374 2375 transport_lun_remove_cmd(cmd); 2376 transport_cmd_check_stop_to_fabric(cmd); 2377 return; 2378 } 2379 } 2380 2381 queue_rsp: 2382 switch (cmd->data_direction) { 2383 case DMA_FROM_DEVICE: 2384 /* 2385 * if this is a READ-type IO, but SCSI status 2386 * is set, then skip returning data and just 2387 * return the status -- unless this IO is marked 2388 * as needing to be treated as a normal read, 2389 * in which case we want to go ahead and return 2390 * the data. This happens, for example, for tape 2391 * reads with the FM, EOM, or ILI bits set, with 2392 * no sense data. 2393 */ 2394 if (cmd->scsi_status && 2395 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2396 goto queue_status; 2397 2398 atomic_long_add(cmd->data_length, 2399 &cmd->se_lun->lun_stats.tx_data_octets); 2400 /* 2401 * Perform READ_STRIP of PI using software emulation when 2402 * backend had PI enabled, if the transport will not be 2403 * performing hardware READ_STRIP offload. 2404 */ 2405 if (target_read_prot_action(cmd)) { 2406 ret = transport_send_check_condition_and_sense(cmd, 2407 cmd->pi_err, 0); 2408 if (ret) 2409 goto queue_full; 2410 2411 transport_lun_remove_cmd(cmd); 2412 transport_cmd_check_stop_to_fabric(cmd); 2413 return; 2414 } 2415 2416 trace_target_cmd_complete(cmd); 2417 ret = cmd->se_tfo->queue_data_in(cmd); 2418 if (ret) 2419 goto queue_full; 2420 break; 2421 case DMA_TO_DEVICE: 2422 atomic_long_add(cmd->data_length, 2423 &cmd->se_lun->lun_stats.rx_data_octets); 2424 /* 2425 * Check if we need to send READ payload for BIDI-COMMAND 2426 */ 2427 if (cmd->se_cmd_flags & SCF_BIDI) { 2428 atomic_long_add(cmd->data_length, 2429 &cmd->se_lun->lun_stats.tx_data_octets); 2430 ret = cmd->se_tfo->queue_data_in(cmd); 2431 if (ret) 2432 goto queue_full; 2433 break; 2434 } 2435 fallthrough; 2436 case DMA_NONE: 2437 queue_status: 2438 trace_target_cmd_complete(cmd); 2439 ret = cmd->se_tfo->queue_status(cmd); 2440 if (ret) 2441 goto queue_full; 2442 break; 2443 default: 2444 break; 2445 } 2446 2447 transport_lun_remove_cmd(cmd); 2448 transport_cmd_check_stop_to_fabric(cmd); 2449 return; 2450 2451 queue_full: 2452 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2453 " data_direction: %d\n", cmd, cmd->data_direction); 2454 2455 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2456 } 2457 2458 void target_free_sgl(struct scatterlist *sgl, int nents) 2459 { 2460 sgl_free_n_order(sgl, nents, 0); 2461 } 2462 EXPORT_SYMBOL(target_free_sgl); 2463 2464 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2465 { 2466 /* 2467 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2468 * emulation, and free + reset pointers if necessary.. 2469 */ 2470 if (!cmd->t_data_sg_orig) 2471 return; 2472 2473 kfree(cmd->t_data_sg); 2474 cmd->t_data_sg = cmd->t_data_sg_orig; 2475 cmd->t_data_sg_orig = NULL; 2476 cmd->t_data_nents = cmd->t_data_nents_orig; 2477 cmd->t_data_nents_orig = 0; 2478 } 2479 2480 static inline void transport_free_pages(struct se_cmd *cmd) 2481 { 2482 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2483 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2484 cmd->t_prot_sg = NULL; 2485 cmd->t_prot_nents = 0; 2486 } 2487 2488 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2489 /* 2490 * Release special case READ buffer payload required for 2491 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2492 */ 2493 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2494 target_free_sgl(cmd->t_bidi_data_sg, 2495 cmd->t_bidi_data_nents); 2496 cmd->t_bidi_data_sg = NULL; 2497 cmd->t_bidi_data_nents = 0; 2498 } 2499 transport_reset_sgl_orig(cmd); 2500 return; 2501 } 2502 transport_reset_sgl_orig(cmd); 2503 2504 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2505 cmd->t_data_sg = NULL; 2506 cmd->t_data_nents = 0; 2507 2508 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2509 cmd->t_bidi_data_sg = NULL; 2510 cmd->t_bidi_data_nents = 0; 2511 } 2512 2513 void *transport_kmap_data_sg(struct se_cmd *cmd) 2514 { 2515 struct scatterlist *sg = cmd->t_data_sg; 2516 struct page **pages; 2517 int i; 2518 2519 /* 2520 * We need to take into account a possible offset here for fabrics like 2521 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2522 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2523 */ 2524 if (!cmd->t_data_nents) 2525 return NULL; 2526 2527 BUG_ON(!sg); 2528 if (cmd->t_data_nents == 1) 2529 return kmap(sg_page(sg)) + sg->offset; 2530 2531 /* >1 page. use vmap */ 2532 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2533 if (!pages) 2534 return NULL; 2535 2536 /* convert sg[] to pages[] */ 2537 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2538 pages[i] = sg_page(sg); 2539 } 2540 2541 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2542 kfree(pages); 2543 if (!cmd->t_data_vmap) 2544 return NULL; 2545 2546 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2547 } 2548 EXPORT_SYMBOL(transport_kmap_data_sg); 2549 2550 void transport_kunmap_data_sg(struct se_cmd *cmd) 2551 { 2552 if (!cmd->t_data_nents) { 2553 return; 2554 } else if (cmd->t_data_nents == 1) { 2555 kunmap(sg_page(cmd->t_data_sg)); 2556 return; 2557 } 2558 2559 vunmap(cmd->t_data_vmap); 2560 cmd->t_data_vmap = NULL; 2561 } 2562 EXPORT_SYMBOL(transport_kunmap_data_sg); 2563 2564 int 2565 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2566 bool zero_page, bool chainable) 2567 { 2568 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2569 2570 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2571 return *sgl ? 0 : -ENOMEM; 2572 } 2573 EXPORT_SYMBOL(target_alloc_sgl); 2574 2575 /* 2576 * Allocate any required resources to execute the command. For writes we 2577 * might not have the payload yet, so notify the fabric via a call to 2578 * ->write_pending instead. Otherwise place it on the execution queue. 2579 */ 2580 sense_reason_t 2581 transport_generic_new_cmd(struct se_cmd *cmd) 2582 { 2583 unsigned long flags; 2584 int ret = 0; 2585 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2586 2587 if (cmd->prot_op != TARGET_PROT_NORMAL && 2588 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2589 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2590 cmd->prot_length, true, false); 2591 if (ret < 0) 2592 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2593 } 2594 2595 /* 2596 * Determine if the TCM fabric module has already allocated physical 2597 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2598 * beforehand. 2599 */ 2600 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2601 cmd->data_length) { 2602 2603 if ((cmd->se_cmd_flags & SCF_BIDI) || 2604 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2605 u32 bidi_length; 2606 2607 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2608 bidi_length = cmd->t_task_nolb * 2609 cmd->se_dev->dev_attrib.block_size; 2610 else 2611 bidi_length = cmd->data_length; 2612 2613 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2614 &cmd->t_bidi_data_nents, 2615 bidi_length, zero_flag, false); 2616 if (ret < 0) 2617 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2618 } 2619 2620 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2621 cmd->data_length, zero_flag, false); 2622 if (ret < 0) 2623 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2624 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2625 cmd->data_length) { 2626 /* 2627 * Special case for COMPARE_AND_WRITE with fabrics 2628 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2629 */ 2630 u32 caw_length = cmd->t_task_nolb * 2631 cmd->se_dev->dev_attrib.block_size; 2632 2633 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2634 &cmd->t_bidi_data_nents, 2635 caw_length, zero_flag, false); 2636 if (ret < 0) 2637 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2638 } 2639 /* 2640 * If this command is not a write we can execute it right here, 2641 * for write buffers we need to notify the fabric driver first 2642 * and let it call back once the write buffers are ready. 2643 */ 2644 target_add_to_state_list(cmd); 2645 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2646 target_execute_cmd(cmd); 2647 return 0; 2648 } 2649 2650 spin_lock_irqsave(&cmd->t_state_lock, flags); 2651 cmd->t_state = TRANSPORT_WRITE_PENDING; 2652 /* 2653 * Determine if frontend context caller is requesting the stopping of 2654 * this command for frontend exceptions. 2655 */ 2656 if (cmd->transport_state & CMD_T_STOP && 2657 !cmd->se_tfo->write_pending_must_be_called) { 2658 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2659 __func__, __LINE__, cmd->tag); 2660 2661 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2662 2663 complete_all(&cmd->t_transport_stop_comp); 2664 return 0; 2665 } 2666 cmd->transport_state &= ~CMD_T_ACTIVE; 2667 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2668 2669 ret = cmd->se_tfo->write_pending(cmd); 2670 if (ret) 2671 goto queue_full; 2672 2673 return 0; 2674 2675 queue_full: 2676 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2677 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2678 return 0; 2679 } 2680 EXPORT_SYMBOL(transport_generic_new_cmd); 2681 2682 static void transport_write_pending_qf(struct se_cmd *cmd) 2683 { 2684 unsigned long flags; 2685 int ret; 2686 bool stop; 2687 2688 spin_lock_irqsave(&cmd->t_state_lock, flags); 2689 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2690 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2691 2692 if (stop) { 2693 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2694 __func__, __LINE__, cmd->tag); 2695 complete_all(&cmd->t_transport_stop_comp); 2696 return; 2697 } 2698 2699 ret = cmd->se_tfo->write_pending(cmd); 2700 if (ret) { 2701 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2702 cmd); 2703 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2704 } 2705 } 2706 2707 static bool 2708 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2709 unsigned long *flags); 2710 2711 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2712 { 2713 unsigned long flags; 2714 2715 spin_lock_irqsave(&cmd->t_state_lock, flags); 2716 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2717 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2718 } 2719 2720 /* 2721 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2722 * finished. 2723 */ 2724 void target_put_cmd_and_wait(struct se_cmd *cmd) 2725 { 2726 DECLARE_COMPLETION_ONSTACK(compl); 2727 2728 WARN_ON_ONCE(cmd->abrt_compl); 2729 cmd->abrt_compl = &compl; 2730 target_put_sess_cmd(cmd); 2731 wait_for_completion(&compl); 2732 } 2733 2734 /* 2735 * This function is called by frontend drivers after processing of a command 2736 * has finished. 2737 * 2738 * The protocol for ensuring that either the regular frontend command 2739 * processing flow or target_handle_abort() code drops one reference is as 2740 * follows: 2741 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2742 * the frontend driver to call this function synchronously or asynchronously. 2743 * That will cause one reference to be dropped. 2744 * - During regular command processing the target core sets CMD_T_COMPLETE 2745 * before invoking one of the .queue_*() functions. 2746 * - The code that aborts commands skips commands and TMFs for which 2747 * CMD_T_COMPLETE has been set. 2748 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2749 * commands that will be aborted. 2750 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2751 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2752 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2753 * be called and will drop a reference. 2754 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2755 * will be called. target_handle_abort() will drop the final reference. 2756 */ 2757 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2758 { 2759 DECLARE_COMPLETION_ONSTACK(compl); 2760 int ret = 0; 2761 bool aborted = false, tas = false; 2762 2763 if (wait_for_tasks) 2764 target_wait_free_cmd(cmd, &aborted, &tas); 2765 2766 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2767 /* 2768 * Handle WRITE failure case where transport_generic_new_cmd() 2769 * has already added se_cmd to state_list, but fabric has 2770 * failed command before I/O submission. 2771 */ 2772 if (cmd->state_active) 2773 target_remove_from_state_list(cmd); 2774 2775 if (cmd->se_lun) 2776 transport_lun_remove_cmd(cmd); 2777 } 2778 if (aborted) 2779 cmd->free_compl = &compl; 2780 ret = target_put_sess_cmd(cmd); 2781 if (aborted) { 2782 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2783 wait_for_completion(&compl); 2784 ret = 1; 2785 } 2786 return ret; 2787 } 2788 EXPORT_SYMBOL(transport_generic_free_cmd); 2789 2790 /** 2791 * target_get_sess_cmd - Add command to active ->sess_cmd_list 2792 * @se_cmd: command descriptor to add 2793 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2794 */ 2795 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2796 { 2797 struct se_session *se_sess = se_cmd->se_sess; 2798 unsigned long flags; 2799 int ret = 0; 2800 2801 /* 2802 * Add a second kref if the fabric caller is expecting to handle 2803 * fabric acknowledgement that requires two target_put_sess_cmd() 2804 * invocations before se_cmd descriptor release. 2805 */ 2806 if (ack_kref) { 2807 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2808 return -EINVAL; 2809 2810 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2811 } 2812 2813 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2814 if (se_sess->sess_tearing_down) { 2815 ret = -ESHUTDOWN; 2816 goto out; 2817 } 2818 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2819 percpu_ref_get(&se_sess->cmd_count); 2820 out: 2821 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2822 2823 if (ret && ack_kref) 2824 target_put_sess_cmd(se_cmd); 2825 2826 return ret; 2827 } 2828 EXPORT_SYMBOL(target_get_sess_cmd); 2829 2830 static void target_free_cmd_mem(struct se_cmd *cmd) 2831 { 2832 transport_free_pages(cmd); 2833 2834 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2835 core_tmr_release_req(cmd->se_tmr_req); 2836 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2837 kfree(cmd->t_task_cdb); 2838 } 2839 2840 static void target_release_cmd_kref(struct kref *kref) 2841 { 2842 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2843 struct se_session *se_sess = se_cmd->se_sess; 2844 struct completion *free_compl = se_cmd->free_compl; 2845 struct completion *abrt_compl = se_cmd->abrt_compl; 2846 unsigned long flags; 2847 2848 if (se_sess) { 2849 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2850 list_del_init(&se_cmd->se_cmd_list); 2851 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2852 } 2853 2854 target_free_cmd_mem(se_cmd); 2855 se_cmd->se_tfo->release_cmd(se_cmd); 2856 if (free_compl) 2857 complete(free_compl); 2858 if (abrt_compl) 2859 complete(abrt_compl); 2860 2861 percpu_ref_put(&se_sess->cmd_count); 2862 } 2863 2864 /** 2865 * target_put_sess_cmd - decrease the command reference count 2866 * @se_cmd: command to drop a reference from 2867 * 2868 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2869 * refcount to drop to zero. Returns zero otherwise. 2870 */ 2871 int target_put_sess_cmd(struct se_cmd *se_cmd) 2872 { 2873 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2874 } 2875 EXPORT_SYMBOL(target_put_sess_cmd); 2876 2877 static const char *data_dir_name(enum dma_data_direction d) 2878 { 2879 switch (d) { 2880 case DMA_BIDIRECTIONAL: return "BIDI"; 2881 case DMA_TO_DEVICE: return "WRITE"; 2882 case DMA_FROM_DEVICE: return "READ"; 2883 case DMA_NONE: return "NONE"; 2884 } 2885 2886 return "(?)"; 2887 } 2888 2889 static const char *cmd_state_name(enum transport_state_table t) 2890 { 2891 switch (t) { 2892 case TRANSPORT_NO_STATE: return "NO_STATE"; 2893 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2894 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2895 case TRANSPORT_PROCESSING: return "PROCESSING"; 2896 case TRANSPORT_COMPLETE: return "COMPLETE"; 2897 case TRANSPORT_ISTATE_PROCESSING: 2898 return "ISTATE_PROCESSING"; 2899 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2900 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2901 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2902 } 2903 2904 return "(?)"; 2905 } 2906 2907 static void target_append_str(char **str, const char *txt) 2908 { 2909 char *prev = *str; 2910 2911 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2912 kstrdup(txt, GFP_ATOMIC); 2913 kfree(prev); 2914 } 2915 2916 /* 2917 * Convert a transport state bitmask into a string. The caller is 2918 * responsible for freeing the returned pointer. 2919 */ 2920 static char *target_ts_to_str(u32 ts) 2921 { 2922 char *str = NULL; 2923 2924 if (ts & CMD_T_ABORTED) 2925 target_append_str(&str, "aborted"); 2926 if (ts & CMD_T_ACTIVE) 2927 target_append_str(&str, "active"); 2928 if (ts & CMD_T_COMPLETE) 2929 target_append_str(&str, "complete"); 2930 if (ts & CMD_T_SENT) 2931 target_append_str(&str, "sent"); 2932 if (ts & CMD_T_STOP) 2933 target_append_str(&str, "stop"); 2934 if (ts & CMD_T_FABRIC_STOP) 2935 target_append_str(&str, "fabric_stop"); 2936 2937 return str; 2938 } 2939 2940 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2941 { 2942 switch (tmf) { 2943 case TMR_ABORT_TASK: return "ABORT_TASK"; 2944 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2945 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2946 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2947 case TMR_LUN_RESET: return "LUN_RESET"; 2948 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2949 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2950 case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; 2951 case TMR_UNKNOWN: break; 2952 } 2953 return "(?)"; 2954 } 2955 2956 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2957 { 2958 char *ts_str = target_ts_to_str(cmd->transport_state); 2959 const u8 *cdb = cmd->t_task_cdb; 2960 struct se_tmr_req *tmf = cmd->se_tmr_req; 2961 2962 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2963 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2964 pfx, cdb[0], cdb[1], cmd->tag, 2965 data_dir_name(cmd->data_direction), 2966 cmd->se_tfo->get_cmd_state(cmd), 2967 cmd_state_name(cmd->t_state), cmd->data_length, 2968 kref_read(&cmd->cmd_kref), ts_str); 2969 } else { 2970 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2971 pfx, target_tmf_name(tmf->function), cmd->tag, 2972 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2973 cmd_state_name(cmd->t_state), 2974 kref_read(&cmd->cmd_kref), ts_str); 2975 } 2976 kfree(ts_str); 2977 } 2978 EXPORT_SYMBOL(target_show_cmd); 2979 2980 /** 2981 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. 2982 * @se_sess: session to flag 2983 */ 2984 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2985 { 2986 unsigned long flags; 2987 2988 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2989 se_sess->sess_tearing_down = 1; 2990 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2991 2992 percpu_ref_kill(&se_sess->cmd_count); 2993 } 2994 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2995 2996 /** 2997 * target_wait_for_sess_cmds - Wait for outstanding commands 2998 * @se_sess: session to wait for active I/O 2999 */ 3000 void target_wait_for_sess_cmds(struct se_session *se_sess) 3001 { 3002 struct se_cmd *cmd; 3003 int ret; 3004 3005 WARN_ON_ONCE(!se_sess->sess_tearing_down); 3006 3007 do { 3008 ret = wait_event_timeout(se_sess->cmd_list_wq, 3009 percpu_ref_is_zero(&se_sess->cmd_count), 3010 180 * HZ); 3011 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 3012 target_show_cmd("session shutdown: still waiting for ", 3013 cmd); 3014 } while (ret <= 0); 3015 } 3016 EXPORT_SYMBOL(target_wait_for_sess_cmds); 3017 3018 /* 3019 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 3020 * all references to the LUN have been released. Called during LUN shutdown. 3021 */ 3022 void transport_clear_lun_ref(struct se_lun *lun) 3023 { 3024 percpu_ref_kill(&lun->lun_ref); 3025 wait_for_completion(&lun->lun_shutdown_comp); 3026 } 3027 3028 static bool 3029 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 3030 bool *aborted, bool *tas, unsigned long *flags) 3031 __releases(&cmd->t_state_lock) 3032 __acquires(&cmd->t_state_lock) 3033 { 3034 3035 assert_spin_locked(&cmd->t_state_lock); 3036 WARN_ON_ONCE(!irqs_disabled()); 3037 3038 if (fabric_stop) 3039 cmd->transport_state |= CMD_T_FABRIC_STOP; 3040 3041 if (cmd->transport_state & CMD_T_ABORTED) 3042 *aborted = true; 3043 3044 if (cmd->transport_state & CMD_T_TAS) 3045 *tas = true; 3046 3047 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 3048 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3049 return false; 3050 3051 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 3052 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3053 return false; 3054 3055 if (!(cmd->transport_state & CMD_T_ACTIVE)) 3056 return false; 3057 3058 if (fabric_stop && *aborted) 3059 return false; 3060 3061 cmd->transport_state |= CMD_T_STOP; 3062 3063 target_show_cmd("wait_for_tasks: Stopping ", cmd); 3064 3065 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 3066 3067 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 3068 180 * HZ)) 3069 target_show_cmd("wait for tasks: ", cmd); 3070 3071 spin_lock_irqsave(&cmd->t_state_lock, *flags); 3072 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 3073 3074 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 3075 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 3076 3077 return true; 3078 } 3079 3080 /** 3081 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3082 * @cmd: command to wait on 3083 */ 3084 bool transport_wait_for_tasks(struct se_cmd *cmd) 3085 { 3086 unsigned long flags; 3087 bool ret, aborted = false, tas = false; 3088 3089 spin_lock_irqsave(&cmd->t_state_lock, flags); 3090 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3091 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3092 3093 return ret; 3094 } 3095 EXPORT_SYMBOL(transport_wait_for_tasks); 3096 3097 struct sense_info { 3098 u8 key; 3099 u8 asc; 3100 u8 ascq; 3101 bool add_sector_info; 3102 }; 3103 3104 static const struct sense_info sense_info_table[] = { 3105 [TCM_NO_SENSE] = { 3106 .key = NOT_READY 3107 }, 3108 [TCM_NON_EXISTENT_LUN] = { 3109 .key = ILLEGAL_REQUEST, 3110 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3111 }, 3112 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3113 .key = ILLEGAL_REQUEST, 3114 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3115 }, 3116 [TCM_SECTOR_COUNT_TOO_MANY] = { 3117 .key = ILLEGAL_REQUEST, 3118 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3119 }, 3120 [TCM_UNKNOWN_MODE_PAGE] = { 3121 .key = ILLEGAL_REQUEST, 3122 .asc = 0x24, /* INVALID FIELD IN CDB */ 3123 }, 3124 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3125 .key = ABORTED_COMMAND, 3126 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3127 .ascq = 0x03, 3128 }, 3129 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3130 .key = ABORTED_COMMAND, 3131 .asc = 0x0c, /* WRITE ERROR */ 3132 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3133 }, 3134 [TCM_INVALID_CDB_FIELD] = { 3135 .key = ILLEGAL_REQUEST, 3136 .asc = 0x24, /* INVALID FIELD IN CDB */ 3137 }, 3138 [TCM_INVALID_PARAMETER_LIST] = { 3139 .key = ILLEGAL_REQUEST, 3140 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3141 }, 3142 [TCM_TOO_MANY_TARGET_DESCS] = { 3143 .key = ILLEGAL_REQUEST, 3144 .asc = 0x26, 3145 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3146 }, 3147 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3148 .key = ILLEGAL_REQUEST, 3149 .asc = 0x26, 3150 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3151 }, 3152 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3153 .key = ILLEGAL_REQUEST, 3154 .asc = 0x26, 3155 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3156 }, 3157 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3158 .key = ILLEGAL_REQUEST, 3159 .asc = 0x26, 3160 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3161 }, 3162 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3163 .key = ILLEGAL_REQUEST, 3164 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3165 }, 3166 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3167 .key = ILLEGAL_REQUEST, 3168 .asc = 0x0c, /* WRITE ERROR */ 3169 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3170 }, 3171 [TCM_SERVICE_CRC_ERROR] = { 3172 .key = ABORTED_COMMAND, 3173 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3174 .ascq = 0x05, /* N/A */ 3175 }, 3176 [TCM_SNACK_REJECTED] = { 3177 .key = ABORTED_COMMAND, 3178 .asc = 0x11, /* READ ERROR */ 3179 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3180 }, 3181 [TCM_WRITE_PROTECTED] = { 3182 .key = DATA_PROTECT, 3183 .asc = 0x27, /* WRITE PROTECTED */ 3184 }, 3185 [TCM_ADDRESS_OUT_OF_RANGE] = { 3186 .key = ILLEGAL_REQUEST, 3187 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3188 }, 3189 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3190 .key = UNIT_ATTENTION, 3191 }, 3192 [TCM_CHECK_CONDITION_NOT_READY] = { 3193 .key = NOT_READY, 3194 }, 3195 [TCM_MISCOMPARE_VERIFY] = { 3196 .key = MISCOMPARE, 3197 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3198 .ascq = 0x00, 3199 }, 3200 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3201 .key = ABORTED_COMMAND, 3202 .asc = 0x10, 3203 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3204 .add_sector_info = true, 3205 }, 3206 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3207 .key = ABORTED_COMMAND, 3208 .asc = 0x10, 3209 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3210 .add_sector_info = true, 3211 }, 3212 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3213 .key = ABORTED_COMMAND, 3214 .asc = 0x10, 3215 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3216 .add_sector_info = true, 3217 }, 3218 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3219 .key = COPY_ABORTED, 3220 .asc = 0x0d, 3221 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3222 3223 }, 3224 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3225 /* 3226 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3227 * Solaris initiators. Returning NOT READY instead means the 3228 * operations will be retried a finite number of times and we 3229 * can survive intermittent errors. 3230 */ 3231 .key = NOT_READY, 3232 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3233 }, 3234 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3235 /* 3236 * From spc4r22 section5.7.7,5.7.8 3237 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3238 * or a REGISTER AND IGNORE EXISTING KEY service action or 3239 * REGISTER AND MOVE service actionis attempted, 3240 * but there are insufficient device server resources to complete the 3241 * operation, then the command shall be terminated with CHECK CONDITION 3242 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3243 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3244 */ 3245 .key = ILLEGAL_REQUEST, 3246 .asc = 0x55, 3247 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3248 }, 3249 }; 3250 3251 /** 3252 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3253 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3254 * be stored. 3255 * @reason: LIO sense reason code. If this argument has the value 3256 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3257 * dequeuing a unit attention fails due to multiple commands being processed 3258 * concurrently, set the command status to BUSY. 3259 * 3260 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3261 */ 3262 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3263 { 3264 const struct sense_info *si; 3265 u8 *buffer = cmd->sense_buffer; 3266 int r = (__force int)reason; 3267 u8 key, asc, ascq; 3268 bool desc_format = target_sense_desc_format(cmd->se_dev); 3269 3270 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 3271 si = &sense_info_table[r]; 3272 else 3273 si = &sense_info_table[(__force int) 3274 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3275 3276 key = si->key; 3277 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3278 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3279 &ascq)) { 3280 cmd->scsi_status = SAM_STAT_BUSY; 3281 return; 3282 } 3283 } else if (si->asc == 0) { 3284 WARN_ON_ONCE(cmd->scsi_asc == 0); 3285 asc = cmd->scsi_asc; 3286 ascq = cmd->scsi_ascq; 3287 } else { 3288 asc = si->asc; 3289 ascq = si->ascq; 3290 } 3291 3292 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3293 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3294 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3295 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3296 if (si->add_sector_info) 3297 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3298 cmd->scsi_sense_length, 3299 cmd->bad_sector) < 0); 3300 } 3301 3302 int 3303 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3304 sense_reason_t reason, int from_transport) 3305 { 3306 unsigned long flags; 3307 3308 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3309 3310 spin_lock_irqsave(&cmd->t_state_lock, flags); 3311 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3312 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3313 return 0; 3314 } 3315 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3316 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3317 3318 if (!from_transport) 3319 translate_sense_reason(cmd, reason); 3320 3321 trace_target_cmd_complete(cmd); 3322 return cmd->se_tfo->queue_status(cmd); 3323 } 3324 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3325 3326 /** 3327 * target_send_busy - Send SCSI BUSY status back to the initiator 3328 * @cmd: SCSI command for which to send a BUSY reply. 3329 * 3330 * Note: Only call this function if target_submit_cmd*() failed. 3331 */ 3332 int target_send_busy(struct se_cmd *cmd) 3333 { 3334 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3335 3336 cmd->scsi_status = SAM_STAT_BUSY; 3337 trace_target_cmd_complete(cmd); 3338 return cmd->se_tfo->queue_status(cmd); 3339 } 3340 EXPORT_SYMBOL(target_send_busy); 3341 3342 static void target_tmr_work(struct work_struct *work) 3343 { 3344 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3345 struct se_device *dev = cmd->se_dev; 3346 struct se_tmr_req *tmr = cmd->se_tmr_req; 3347 int ret; 3348 3349 if (cmd->transport_state & CMD_T_ABORTED) 3350 goto aborted; 3351 3352 switch (tmr->function) { 3353 case TMR_ABORT_TASK: 3354 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3355 break; 3356 case TMR_ABORT_TASK_SET: 3357 case TMR_CLEAR_ACA: 3358 case TMR_CLEAR_TASK_SET: 3359 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3360 break; 3361 case TMR_LUN_RESET: 3362 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3363 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3364 TMR_FUNCTION_REJECTED; 3365 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3366 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3367 cmd->orig_fe_lun, 0x29, 3368 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3369 } 3370 break; 3371 case TMR_TARGET_WARM_RESET: 3372 tmr->response = TMR_FUNCTION_REJECTED; 3373 break; 3374 case TMR_TARGET_COLD_RESET: 3375 tmr->response = TMR_FUNCTION_REJECTED; 3376 break; 3377 default: 3378 pr_err("Unknown TMR function: 0x%02x.\n", 3379 tmr->function); 3380 tmr->response = TMR_FUNCTION_REJECTED; 3381 break; 3382 } 3383 3384 if (cmd->transport_state & CMD_T_ABORTED) 3385 goto aborted; 3386 3387 cmd->se_tfo->queue_tm_rsp(cmd); 3388 3389 transport_lun_remove_cmd(cmd); 3390 transport_cmd_check_stop_to_fabric(cmd); 3391 return; 3392 3393 aborted: 3394 target_handle_abort(cmd); 3395 } 3396 3397 int transport_generic_handle_tmr( 3398 struct se_cmd *cmd) 3399 { 3400 unsigned long flags; 3401 bool aborted = false; 3402 3403 spin_lock_irqsave(&cmd->t_state_lock, flags); 3404 if (cmd->transport_state & CMD_T_ABORTED) { 3405 aborted = true; 3406 } else { 3407 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3408 cmd->transport_state |= CMD_T_ACTIVE; 3409 } 3410 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3411 3412 if (aborted) { 3413 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3414 cmd->se_tmr_req->function, 3415 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3416 target_handle_abort(cmd); 3417 return 0; 3418 } 3419 3420 INIT_WORK(&cmd->work, target_tmr_work); 3421 schedule_work(&cmd->work); 3422 return 0; 3423 } 3424 EXPORT_SYMBOL(transport_generic_handle_tmr); 3425 3426 bool 3427 target_check_wce(struct se_device *dev) 3428 { 3429 bool wce = false; 3430 3431 if (dev->transport->get_write_cache) 3432 wce = dev->transport->get_write_cache(dev); 3433 else if (dev->dev_attrib.emulate_write_cache > 0) 3434 wce = true; 3435 3436 return wce; 3437 } 3438 3439 bool 3440 target_check_fua(struct se_device *dev) 3441 { 3442 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3443 } 3444