1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_transport.c 4 * 5 * This file contains the Generic Target Engine Core. 6 * 7 * (c) Copyright 2002-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/net.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/timer.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/kthread.h> 20 #include <linux/in.h> 21 #include <linux/cdrom.h> 22 #include <linux/module.h> 23 #include <linux/ratelimit.h> 24 #include <linux/vmalloc.h> 25 #include <asm/unaligned.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_common.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/target.h> 42 43 static struct workqueue_struct *target_completion_wq; 44 static struct kmem_cache *se_sess_cache; 45 struct kmem_cache *se_ua_cache; 46 struct kmem_cache *t10_pr_reg_cache; 47 struct kmem_cache *t10_alua_lu_gp_cache; 48 struct kmem_cache *t10_alua_lu_gp_mem_cache; 49 struct kmem_cache *t10_alua_tg_pt_gp_cache; 50 struct kmem_cache *t10_alua_lba_map_cache; 51 struct kmem_cache *t10_alua_lba_map_mem_cache; 52 53 static void transport_complete_task_attr(struct se_cmd *cmd); 54 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 55 static void transport_handle_queue_full(struct se_cmd *cmd, 56 struct se_device *dev, int err, bool write_pending); 57 static void target_complete_ok_work(struct work_struct *work); 58 59 int init_se_kmem_caches(void) 60 { 61 se_sess_cache = kmem_cache_create("se_sess_cache", 62 sizeof(struct se_session), __alignof__(struct se_session), 63 0, NULL); 64 if (!se_sess_cache) { 65 pr_err("kmem_cache_create() for struct se_session" 66 " failed\n"); 67 goto out; 68 } 69 se_ua_cache = kmem_cache_create("se_ua_cache", 70 sizeof(struct se_ua), __alignof__(struct se_ua), 71 0, NULL); 72 if (!se_ua_cache) { 73 pr_err("kmem_cache_create() for struct se_ua failed\n"); 74 goto out_free_sess_cache; 75 } 76 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 77 sizeof(struct t10_pr_registration), 78 __alignof__(struct t10_pr_registration), 0, NULL); 79 if (!t10_pr_reg_cache) { 80 pr_err("kmem_cache_create() for struct t10_pr_registration" 81 " failed\n"); 82 goto out_free_ua_cache; 83 } 84 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 85 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 86 0, NULL); 87 if (!t10_alua_lu_gp_cache) { 88 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 89 " failed\n"); 90 goto out_free_pr_reg_cache; 91 } 92 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 93 sizeof(struct t10_alua_lu_gp_member), 94 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 95 if (!t10_alua_lu_gp_mem_cache) { 96 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 97 "cache failed\n"); 98 goto out_free_lu_gp_cache; 99 } 100 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 101 sizeof(struct t10_alua_tg_pt_gp), 102 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 103 if (!t10_alua_tg_pt_gp_cache) { 104 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 105 "cache failed\n"); 106 goto out_free_lu_gp_mem_cache; 107 } 108 t10_alua_lba_map_cache = kmem_cache_create( 109 "t10_alua_lba_map_cache", 110 sizeof(struct t10_alua_lba_map), 111 __alignof__(struct t10_alua_lba_map), 0, NULL); 112 if (!t10_alua_lba_map_cache) { 113 pr_err("kmem_cache_create() for t10_alua_lba_map_" 114 "cache failed\n"); 115 goto out_free_tg_pt_gp_cache; 116 } 117 t10_alua_lba_map_mem_cache = kmem_cache_create( 118 "t10_alua_lba_map_mem_cache", 119 sizeof(struct t10_alua_lba_map_member), 120 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 121 if (!t10_alua_lba_map_mem_cache) { 122 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 123 "cache failed\n"); 124 goto out_free_lba_map_cache; 125 } 126 127 target_completion_wq = alloc_workqueue("target_completion", 128 WQ_MEM_RECLAIM, 0); 129 if (!target_completion_wq) 130 goto out_free_lba_map_mem_cache; 131 132 return 0; 133 134 out_free_lba_map_mem_cache: 135 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 136 out_free_lba_map_cache: 137 kmem_cache_destroy(t10_alua_lba_map_cache); 138 out_free_tg_pt_gp_cache: 139 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 140 out_free_lu_gp_mem_cache: 141 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 142 out_free_lu_gp_cache: 143 kmem_cache_destroy(t10_alua_lu_gp_cache); 144 out_free_pr_reg_cache: 145 kmem_cache_destroy(t10_pr_reg_cache); 146 out_free_ua_cache: 147 kmem_cache_destroy(se_ua_cache); 148 out_free_sess_cache: 149 kmem_cache_destroy(se_sess_cache); 150 out: 151 return -ENOMEM; 152 } 153 154 void release_se_kmem_caches(void) 155 { 156 destroy_workqueue(target_completion_wq); 157 kmem_cache_destroy(se_sess_cache); 158 kmem_cache_destroy(se_ua_cache); 159 kmem_cache_destroy(t10_pr_reg_cache); 160 kmem_cache_destroy(t10_alua_lu_gp_cache); 161 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 162 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 163 kmem_cache_destroy(t10_alua_lba_map_cache); 164 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 165 } 166 167 /* This code ensures unique mib indexes are handed out. */ 168 static DEFINE_SPINLOCK(scsi_mib_index_lock); 169 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 170 171 /* 172 * Allocate a new row index for the entry type specified 173 */ 174 u32 scsi_get_new_index(scsi_index_t type) 175 { 176 u32 new_index; 177 178 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 179 180 spin_lock(&scsi_mib_index_lock); 181 new_index = ++scsi_mib_index[type]; 182 spin_unlock(&scsi_mib_index_lock); 183 184 return new_index; 185 } 186 187 void transport_subsystem_check_init(void) 188 { 189 int ret; 190 static int sub_api_initialized; 191 192 if (sub_api_initialized) 193 return; 194 195 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 196 if (ret != 0) 197 pr_err("Unable to load target_core_iblock\n"); 198 199 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 200 if (ret != 0) 201 pr_err("Unable to load target_core_file\n"); 202 203 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 204 if (ret != 0) 205 pr_err("Unable to load target_core_pscsi\n"); 206 207 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 208 if (ret != 0) 209 pr_err("Unable to load target_core_user\n"); 210 211 sub_api_initialized = 1; 212 } 213 214 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) 215 { 216 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); 217 218 wake_up(&sess->cmd_count_wq); 219 } 220 221 /** 222 * transport_init_session - initialize a session object 223 * @se_sess: Session object pointer. 224 * 225 * The caller must have zero-initialized @se_sess before calling this function. 226 */ 227 int transport_init_session(struct se_session *se_sess) 228 { 229 INIT_LIST_HEAD(&se_sess->sess_list); 230 INIT_LIST_HEAD(&se_sess->sess_acl_list); 231 spin_lock_init(&se_sess->sess_cmd_lock); 232 init_waitqueue_head(&se_sess->cmd_count_wq); 233 init_completion(&se_sess->stop_done); 234 atomic_set(&se_sess->stopped, 0); 235 return percpu_ref_init(&se_sess->cmd_count, 236 target_release_sess_cmd_refcnt, 0, GFP_KERNEL); 237 } 238 EXPORT_SYMBOL(transport_init_session); 239 240 void transport_uninit_session(struct se_session *se_sess) 241 { 242 /* 243 * Drivers like iscsi and loop do not call target_stop_session 244 * during session shutdown so we have to drop the ref taken at init 245 * time here. 246 */ 247 if (!atomic_read(&se_sess->stopped)) 248 percpu_ref_put(&se_sess->cmd_count); 249 250 percpu_ref_exit(&se_sess->cmd_count); 251 } 252 253 /** 254 * transport_alloc_session - allocate a session object and initialize it 255 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 256 */ 257 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 258 { 259 struct se_session *se_sess; 260 int ret; 261 262 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 263 if (!se_sess) { 264 pr_err("Unable to allocate struct se_session from" 265 " se_sess_cache\n"); 266 return ERR_PTR(-ENOMEM); 267 } 268 ret = transport_init_session(se_sess); 269 if (ret < 0) { 270 kmem_cache_free(se_sess_cache, se_sess); 271 return ERR_PTR(ret); 272 } 273 se_sess->sup_prot_ops = sup_prot_ops; 274 275 return se_sess; 276 } 277 EXPORT_SYMBOL(transport_alloc_session); 278 279 /** 280 * transport_alloc_session_tags - allocate target driver private data 281 * @se_sess: Session pointer. 282 * @tag_num: Maximum number of in-flight commands between initiator and target. 283 * @tag_size: Size in bytes of the private data a target driver associates with 284 * each command. 285 */ 286 int transport_alloc_session_tags(struct se_session *se_sess, 287 unsigned int tag_num, unsigned int tag_size) 288 { 289 int rc; 290 291 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 292 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 293 if (!se_sess->sess_cmd_map) { 294 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 295 return -ENOMEM; 296 } 297 298 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 299 false, GFP_KERNEL, NUMA_NO_NODE); 300 if (rc < 0) { 301 pr_err("Unable to init se_sess->sess_tag_pool," 302 " tag_num: %u\n", tag_num); 303 kvfree(se_sess->sess_cmd_map); 304 se_sess->sess_cmd_map = NULL; 305 return -ENOMEM; 306 } 307 308 return 0; 309 } 310 EXPORT_SYMBOL(transport_alloc_session_tags); 311 312 /** 313 * transport_init_session_tags - allocate a session and target driver private data 314 * @tag_num: Maximum number of in-flight commands between initiator and target. 315 * @tag_size: Size in bytes of the private data a target driver associates with 316 * each command. 317 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 318 */ 319 static struct se_session * 320 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 321 enum target_prot_op sup_prot_ops) 322 { 323 struct se_session *se_sess; 324 int rc; 325 326 if (tag_num != 0 && !tag_size) { 327 pr_err("init_session_tags called with percpu-ida tag_num:" 328 " %u, but zero tag_size\n", tag_num); 329 return ERR_PTR(-EINVAL); 330 } 331 if (!tag_num && tag_size) { 332 pr_err("init_session_tags called with percpu-ida tag_size:" 333 " %u, but zero tag_num\n", tag_size); 334 return ERR_PTR(-EINVAL); 335 } 336 337 se_sess = transport_alloc_session(sup_prot_ops); 338 if (IS_ERR(se_sess)) 339 return se_sess; 340 341 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 342 if (rc < 0) { 343 transport_free_session(se_sess); 344 return ERR_PTR(-ENOMEM); 345 } 346 347 return se_sess; 348 } 349 350 /* 351 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 352 */ 353 void __transport_register_session( 354 struct se_portal_group *se_tpg, 355 struct se_node_acl *se_nacl, 356 struct se_session *se_sess, 357 void *fabric_sess_ptr) 358 { 359 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 360 unsigned char buf[PR_REG_ISID_LEN]; 361 unsigned long flags; 362 363 se_sess->se_tpg = se_tpg; 364 se_sess->fabric_sess_ptr = fabric_sess_ptr; 365 /* 366 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 367 * 368 * Only set for struct se_session's that will actually be moving I/O. 369 * eg: *NOT* discovery sessions. 370 */ 371 if (se_nacl) { 372 /* 373 * 374 * Determine if fabric allows for T10-PI feature bits exposed to 375 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 376 * 377 * If so, then always save prot_type on a per se_node_acl node 378 * basis and re-instate the previous sess_prot_type to avoid 379 * disabling PI from below any previously initiator side 380 * registered LUNs. 381 */ 382 if (se_nacl->saved_prot_type) 383 se_sess->sess_prot_type = se_nacl->saved_prot_type; 384 else if (tfo->tpg_check_prot_fabric_only) 385 se_sess->sess_prot_type = se_nacl->saved_prot_type = 386 tfo->tpg_check_prot_fabric_only(se_tpg); 387 /* 388 * If the fabric module supports an ISID based TransportID, 389 * save this value in binary from the fabric I_T Nexus now. 390 */ 391 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 392 memset(&buf[0], 0, PR_REG_ISID_LEN); 393 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 394 &buf[0], PR_REG_ISID_LEN); 395 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 396 } 397 398 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 399 /* 400 * The se_nacl->nacl_sess pointer will be set to the 401 * last active I_T Nexus for each struct se_node_acl. 402 */ 403 se_nacl->nacl_sess = se_sess; 404 405 list_add_tail(&se_sess->sess_acl_list, 406 &se_nacl->acl_sess_list); 407 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 408 } 409 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 410 411 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 412 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 413 } 414 EXPORT_SYMBOL(__transport_register_session); 415 416 void transport_register_session( 417 struct se_portal_group *se_tpg, 418 struct se_node_acl *se_nacl, 419 struct se_session *se_sess, 420 void *fabric_sess_ptr) 421 { 422 unsigned long flags; 423 424 spin_lock_irqsave(&se_tpg->session_lock, flags); 425 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 426 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 427 } 428 EXPORT_SYMBOL(transport_register_session); 429 430 struct se_session * 431 target_setup_session(struct se_portal_group *tpg, 432 unsigned int tag_num, unsigned int tag_size, 433 enum target_prot_op prot_op, 434 const char *initiatorname, void *private, 435 int (*callback)(struct se_portal_group *, 436 struct se_session *, void *)) 437 { 438 struct se_session *sess; 439 440 /* 441 * If the fabric driver is using percpu-ida based pre allocation 442 * of I/O descriptor tags, go ahead and perform that setup now.. 443 */ 444 if (tag_num != 0) 445 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 446 else 447 sess = transport_alloc_session(prot_op); 448 449 if (IS_ERR(sess)) 450 return sess; 451 452 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 453 (unsigned char *)initiatorname); 454 if (!sess->se_node_acl) { 455 transport_free_session(sess); 456 return ERR_PTR(-EACCES); 457 } 458 /* 459 * Go ahead and perform any remaining fabric setup that is 460 * required before transport_register_session(). 461 */ 462 if (callback != NULL) { 463 int rc = callback(tpg, sess, private); 464 if (rc) { 465 transport_free_session(sess); 466 return ERR_PTR(rc); 467 } 468 } 469 470 transport_register_session(tpg, sess->se_node_acl, sess, private); 471 return sess; 472 } 473 EXPORT_SYMBOL(target_setup_session); 474 475 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 476 { 477 struct se_session *se_sess; 478 ssize_t len = 0; 479 480 spin_lock_bh(&se_tpg->session_lock); 481 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 482 if (!se_sess->se_node_acl) 483 continue; 484 if (!se_sess->se_node_acl->dynamic_node_acl) 485 continue; 486 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 487 break; 488 489 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 490 se_sess->se_node_acl->initiatorname); 491 len += 1; /* Include NULL terminator */ 492 } 493 spin_unlock_bh(&se_tpg->session_lock); 494 495 return len; 496 } 497 EXPORT_SYMBOL(target_show_dynamic_sessions); 498 499 static void target_complete_nacl(struct kref *kref) 500 { 501 struct se_node_acl *nacl = container_of(kref, 502 struct se_node_acl, acl_kref); 503 struct se_portal_group *se_tpg = nacl->se_tpg; 504 505 if (!nacl->dynamic_stop) { 506 complete(&nacl->acl_free_comp); 507 return; 508 } 509 510 mutex_lock(&se_tpg->acl_node_mutex); 511 list_del_init(&nacl->acl_list); 512 mutex_unlock(&se_tpg->acl_node_mutex); 513 514 core_tpg_wait_for_nacl_pr_ref(nacl); 515 core_free_device_list_for_node(nacl, se_tpg); 516 kfree(nacl); 517 } 518 519 void target_put_nacl(struct se_node_acl *nacl) 520 { 521 kref_put(&nacl->acl_kref, target_complete_nacl); 522 } 523 EXPORT_SYMBOL(target_put_nacl); 524 525 void transport_deregister_session_configfs(struct se_session *se_sess) 526 { 527 struct se_node_acl *se_nacl; 528 unsigned long flags; 529 /* 530 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 531 */ 532 se_nacl = se_sess->se_node_acl; 533 if (se_nacl) { 534 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 535 if (!list_empty(&se_sess->sess_acl_list)) 536 list_del_init(&se_sess->sess_acl_list); 537 /* 538 * If the session list is empty, then clear the pointer. 539 * Otherwise, set the struct se_session pointer from the tail 540 * element of the per struct se_node_acl active session list. 541 */ 542 if (list_empty(&se_nacl->acl_sess_list)) 543 se_nacl->nacl_sess = NULL; 544 else { 545 se_nacl->nacl_sess = container_of( 546 se_nacl->acl_sess_list.prev, 547 struct se_session, sess_acl_list); 548 } 549 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 550 } 551 } 552 EXPORT_SYMBOL(transport_deregister_session_configfs); 553 554 void transport_free_session(struct se_session *se_sess) 555 { 556 struct se_node_acl *se_nacl = se_sess->se_node_acl; 557 558 /* 559 * Drop the se_node_acl->nacl_kref obtained from within 560 * core_tpg_get_initiator_node_acl(). 561 */ 562 if (se_nacl) { 563 struct se_portal_group *se_tpg = se_nacl->se_tpg; 564 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 565 unsigned long flags; 566 567 se_sess->se_node_acl = NULL; 568 569 /* 570 * Also determine if we need to drop the extra ->cmd_kref if 571 * it had been previously dynamically generated, and 572 * the endpoint is not caching dynamic ACLs. 573 */ 574 mutex_lock(&se_tpg->acl_node_mutex); 575 if (se_nacl->dynamic_node_acl && 576 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 577 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 578 if (list_empty(&se_nacl->acl_sess_list)) 579 se_nacl->dynamic_stop = true; 580 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 581 582 if (se_nacl->dynamic_stop) 583 list_del_init(&se_nacl->acl_list); 584 } 585 mutex_unlock(&se_tpg->acl_node_mutex); 586 587 if (se_nacl->dynamic_stop) 588 target_put_nacl(se_nacl); 589 590 target_put_nacl(se_nacl); 591 } 592 if (se_sess->sess_cmd_map) { 593 sbitmap_queue_free(&se_sess->sess_tag_pool); 594 kvfree(se_sess->sess_cmd_map); 595 } 596 transport_uninit_session(se_sess); 597 kmem_cache_free(se_sess_cache, se_sess); 598 } 599 EXPORT_SYMBOL(transport_free_session); 600 601 static int target_release_res(struct se_device *dev, void *data) 602 { 603 struct se_session *sess = data; 604 605 if (dev->reservation_holder == sess) 606 target_release_reservation(dev); 607 return 0; 608 } 609 610 void transport_deregister_session(struct se_session *se_sess) 611 { 612 struct se_portal_group *se_tpg = se_sess->se_tpg; 613 unsigned long flags; 614 615 if (!se_tpg) { 616 transport_free_session(se_sess); 617 return; 618 } 619 620 spin_lock_irqsave(&se_tpg->session_lock, flags); 621 list_del(&se_sess->sess_list); 622 se_sess->se_tpg = NULL; 623 se_sess->fabric_sess_ptr = NULL; 624 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 625 626 /* 627 * Since the session is being removed, release SPC-2 628 * reservations held by the session that is disappearing. 629 */ 630 target_for_each_device(target_release_res, se_sess); 631 632 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 633 se_tpg->se_tpg_tfo->fabric_name); 634 /* 635 * If last kref is dropping now for an explicit NodeACL, awake sleeping 636 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 637 * removal context from within transport_free_session() code. 638 * 639 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 640 * to release all remaining generate_node_acl=1 created ACL resources. 641 */ 642 643 transport_free_session(se_sess); 644 } 645 EXPORT_SYMBOL(transport_deregister_session); 646 647 void target_remove_session(struct se_session *se_sess) 648 { 649 transport_deregister_session_configfs(se_sess); 650 transport_deregister_session(se_sess); 651 } 652 EXPORT_SYMBOL(target_remove_session); 653 654 static void target_remove_from_state_list(struct se_cmd *cmd) 655 { 656 struct se_device *dev = cmd->se_dev; 657 unsigned long flags; 658 659 if (!dev) 660 return; 661 662 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); 663 if (cmd->state_active) { 664 list_del(&cmd->state_list); 665 cmd->state_active = false; 666 } 667 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); 668 } 669 670 /* 671 * This function is called by the target core after the target core has 672 * finished processing a SCSI command or SCSI TMF. Both the regular command 673 * processing code and the code for aborting commands can call this 674 * function. CMD_T_STOP is set if and only if another thread is waiting 675 * inside transport_wait_for_tasks() for t_transport_stop_comp. 676 */ 677 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 678 { 679 unsigned long flags; 680 681 target_remove_from_state_list(cmd); 682 683 /* 684 * Clear struct se_cmd->se_lun before the handoff to FE. 685 */ 686 cmd->se_lun = NULL; 687 688 spin_lock_irqsave(&cmd->t_state_lock, flags); 689 /* 690 * Determine if frontend context caller is requesting the stopping of 691 * this command for frontend exceptions. 692 */ 693 if (cmd->transport_state & CMD_T_STOP) { 694 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 695 __func__, __LINE__, cmd->tag); 696 697 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 698 699 complete_all(&cmd->t_transport_stop_comp); 700 return 1; 701 } 702 cmd->transport_state &= ~CMD_T_ACTIVE; 703 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 704 705 /* 706 * Some fabric modules like tcm_loop can release their internally 707 * allocated I/O reference and struct se_cmd now. 708 * 709 * Fabric modules are expected to return '1' here if the se_cmd being 710 * passed is released at this point, or zero if not being released. 711 */ 712 return cmd->se_tfo->check_stop_free(cmd); 713 } 714 715 static void transport_lun_remove_cmd(struct se_cmd *cmd) 716 { 717 struct se_lun *lun = cmd->se_lun; 718 719 if (!lun) 720 return; 721 722 if (cmpxchg(&cmd->lun_ref_active, true, false)) 723 percpu_ref_put(&lun->lun_ref); 724 } 725 726 static void target_complete_failure_work(struct work_struct *work) 727 { 728 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 729 730 transport_generic_request_failure(cmd, 731 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 732 } 733 734 /* 735 * Used when asking transport to copy Sense Data from the underlying 736 * Linux/SCSI struct scsi_cmnd 737 */ 738 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 739 { 740 struct se_device *dev = cmd->se_dev; 741 742 WARN_ON(!cmd->se_lun); 743 744 if (!dev) 745 return NULL; 746 747 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 748 return NULL; 749 750 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 751 752 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 753 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 754 return cmd->sense_buffer; 755 } 756 757 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 758 { 759 unsigned char *cmd_sense_buf; 760 unsigned long flags; 761 762 spin_lock_irqsave(&cmd->t_state_lock, flags); 763 cmd_sense_buf = transport_get_sense_buffer(cmd); 764 if (!cmd_sense_buf) { 765 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 766 return; 767 } 768 769 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 770 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 771 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 772 } 773 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 774 775 static void target_handle_abort(struct se_cmd *cmd) 776 { 777 bool tas = cmd->transport_state & CMD_T_TAS; 778 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 779 int ret; 780 781 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 782 783 if (tas) { 784 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 785 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 786 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 787 cmd->t_task_cdb[0], cmd->tag); 788 trace_target_cmd_complete(cmd); 789 ret = cmd->se_tfo->queue_status(cmd); 790 if (ret) { 791 transport_handle_queue_full(cmd, cmd->se_dev, 792 ret, false); 793 return; 794 } 795 } else { 796 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 797 cmd->se_tfo->queue_tm_rsp(cmd); 798 } 799 } else { 800 /* 801 * Allow the fabric driver to unmap any resources before 802 * releasing the descriptor via TFO->release_cmd(). 803 */ 804 cmd->se_tfo->aborted_task(cmd); 805 if (ack_kref) 806 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 807 /* 808 * To do: establish a unit attention condition on the I_T 809 * nexus associated with cmd. See also the paragraph "Aborting 810 * commands" in SAM. 811 */ 812 } 813 814 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 815 816 transport_lun_remove_cmd(cmd); 817 818 transport_cmd_check_stop_to_fabric(cmd); 819 } 820 821 static void target_abort_work(struct work_struct *work) 822 { 823 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 824 825 target_handle_abort(cmd); 826 } 827 828 static bool target_cmd_interrupted(struct se_cmd *cmd) 829 { 830 int post_ret; 831 832 if (cmd->transport_state & CMD_T_ABORTED) { 833 if (cmd->transport_complete_callback) 834 cmd->transport_complete_callback(cmd, false, &post_ret); 835 INIT_WORK(&cmd->work, target_abort_work); 836 queue_work(target_completion_wq, &cmd->work); 837 return true; 838 } else if (cmd->transport_state & CMD_T_STOP) { 839 if (cmd->transport_complete_callback) 840 cmd->transport_complete_callback(cmd, false, &post_ret); 841 complete_all(&cmd->t_transport_stop_comp); 842 return true; 843 } 844 845 return false; 846 } 847 848 /* May be called from interrupt context so must not sleep. */ 849 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 850 { 851 int success; 852 unsigned long flags; 853 854 if (target_cmd_interrupted(cmd)) 855 return; 856 857 cmd->scsi_status = scsi_status; 858 859 spin_lock_irqsave(&cmd->t_state_lock, flags); 860 switch (cmd->scsi_status) { 861 case SAM_STAT_CHECK_CONDITION: 862 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 863 success = 1; 864 else 865 success = 0; 866 break; 867 default: 868 success = 1; 869 break; 870 } 871 872 cmd->t_state = TRANSPORT_COMPLETE; 873 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 874 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 875 876 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 877 target_complete_failure_work); 878 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 879 } 880 EXPORT_SYMBOL(target_complete_cmd); 881 882 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 883 { 884 if ((scsi_status == SAM_STAT_GOOD || 885 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 886 length < cmd->data_length) { 887 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 888 cmd->residual_count += cmd->data_length - length; 889 } else { 890 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 891 cmd->residual_count = cmd->data_length - length; 892 } 893 894 cmd->data_length = length; 895 } 896 897 target_complete_cmd(cmd, scsi_status); 898 } 899 EXPORT_SYMBOL(target_complete_cmd_with_length); 900 901 static void target_add_to_state_list(struct se_cmd *cmd) 902 { 903 struct se_device *dev = cmd->se_dev; 904 unsigned long flags; 905 906 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); 907 if (!cmd->state_active) { 908 list_add_tail(&cmd->state_list, 909 &dev->queues[cmd->cpuid].state_list); 910 cmd->state_active = true; 911 } 912 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); 913 } 914 915 /* 916 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 917 */ 918 static void transport_write_pending_qf(struct se_cmd *cmd); 919 static void transport_complete_qf(struct se_cmd *cmd); 920 921 void target_qf_do_work(struct work_struct *work) 922 { 923 struct se_device *dev = container_of(work, struct se_device, 924 qf_work_queue); 925 LIST_HEAD(qf_cmd_list); 926 struct se_cmd *cmd, *cmd_tmp; 927 928 spin_lock_irq(&dev->qf_cmd_lock); 929 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 930 spin_unlock_irq(&dev->qf_cmd_lock); 931 932 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 933 list_del(&cmd->se_qf_node); 934 atomic_dec_mb(&dev->dev_qf_count); 935 936 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 937 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 938 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 939 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 940 : "UNKNOWN"); 941 942 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 943 transport_write_pending_qf(cmd); 944 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 945 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 946 transport_complete_qf(cmd); 947 } 948 } 949 950 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 951 { 952 switch (cmd->data_direction) { 953 case DMA_NONE: 954 return "NONE"; 955 case DMA_FROM_DEVICE: 956 return "READ"; 957 case DMA_TO_DEVICE: 958 return "WRITE"; 959 case DMA_BIDIRECTIONAL: 960 return "BIDI"; 961 default: 962 break; 963 } 964 965 return "UNKNOWN"; 966 } 967 968 void transport_dump_dev_state( 969 struct se_device *dev, 970 char *b, 971 int *bl) 972 { 973 *bl += sprintf(b + *bl, "Status: "); 974 if (dev->export_count) 975 *bl += sprintf(b + *bl, "ACTIVATED"); 976 else 977 *bl += sprintf(b + *bl, "DEACTIVATED"); 978 979 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 980 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 981 dev->dev_attrib.block_size, 982 dev->dev_attrib.hw_max_sectors); 983 *bl += sprintf(b + *bl, " "); 984 } 985 986 void transport_dump_vpd_proto_id( 987 struct t10_vpd *vpd, 988 unsigned char *p_buf, 989 int p_buf_len) 990 { 991 unsigned char buf[VPD_TMP_BUF_SIZE]; 992 int len; 993 994 memset(buf, 0, VPD_TMP_BUF_SIZE); 995 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 996 997 switch (vpd->protocol_identifier) { 998 case 0x00: 999 sprintf(buf+len, "Fibre Channel\n"); 1000 break; 1001 case 0x10: 1002 sprintf(buf+len, "Parallel SCSI\n"); 1003 break; 1004 case 0x20: 1005 sprintf(buf+len, "SSA\n"); 1006 break; 1007 case 0x30: 1008 sprintf(buf+len, "IEEE 1394\n"); 1009 break; 1010 case 0x40: 1011 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1012 " Protocol\n"); 1013 break; 1014 case 0x50: 1015 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1016 break; 1017 case 0x60: 1018 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1019 break; 1020 case 0x70: 1021 sprintf(buf+len, "Automation/Drive Interface Transport" 1022 " Protocol\n"); 1023 break; 1024 case 0x80: 1025 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1026 break; 1027 default: 1028 sprintf(buf+len, "Unknown 0x%02x\n", 1029 vpd->protocol_identifier); 1030 break; 1031 } 1032 1033 if (p_buf) 1034 strncpy(p_buf, buf, p_buf_len); 1035 else 1036 pr_debug("%s", buf); 1037 } 1038 1039 void 1040 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1041 { 1042 /* 1043 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1044 * 1045 * from spc3r23.pdf section 7.5.1 1046 */ 1047 if (page_83[1] & 0x80) { 1048 vpd->protocol_identifier = (page_83[0] & 0xf0); 1049 vpd->protocol_identifier_set = 1; 1050 transport_dump_vpd_proto_id(vpd, NULL, 0); 1051 } 1052 } 1053 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1054 1055 int transport_dump_vpd_assoc( 1056 struct t10_vpd *vpd, 1057 unsigned char *p_buf, 1058 int p_buf_len) 1059 { 1060 unsigned char buf[VPD_TMP_BUF_SIZE]; 1061 int ret = 0; 1062 int len; 1063 1064 memset(buf, 0, VPD_TMP_BUF_SIZE); 1065 len = sprintf(buf, "T10 VPD Identifier Association: "); 1066 1067 switch (vpd->association) { 1068 case 0x00: 1069 sprintf(buf+len, "addressed logical unit\n"); 1070 break; 1071 case 0x10: 1072 sprintf(buf+len, "target port\n"); 1073 break; 1074 case 0x20: 1075 sprintf(buf+len, "SCSI target device\n"); 1076 break; 1077 default: 1078 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1079 ret = -EINVAL; 1080 break; 1081 } 1082 1083 if (p_buf) 1084 strncpy(p_buf, buf, p_buf_len); 1085 else 1086 pr_debug("%s", buf); 1087 1088 return ret; 1089 } 1090 1091 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1092 { 1093 /* 1094 * The VPD identification association.. 1095 * 1096 * from spc3r23.pdf Section 7.6.3.1 Table 297 1097 */ 1098 vpd->association = (page_83[1] & 0x30); 1099 return transport_dump_vpd_assoc(vpd, NULL, 0); 1100 } 1101 EXPORT_SYMBOL(transport_set_vpd_assoc); 1102 1103 int transport_dump_vpd_ident_type( 1104 struct t10_vpd *vpd, 1105 unsigned char *p_buf, 1106 int p_buf_len) 1107 { 1108 unsigned char buf[VPD_TMP_BUF_SIZE]; 1109 int ret = 0; 1110 int len; 1111 1112 memset(buf, 0, VPD_TMP_BUF_SIZE); 1113 len = sprintf(buf, "T10 VPD Identifier Type: "); 1114 1115 switch (vpd->device_identifier_type) { 1116 case 0x00: 1117 sprintf(buf+len, "Vendor specific\n"); 1118 break; 1119 case 0x01: 1120 sprintf(buf+len, "T10 Vendor ID based\n"); 1121 break; 1122 case 0x02: 1123 sprintf(buf+len, "EUI-64 based\n"); 1124 break; 1125 case 0x03: 1126 sprintf(buf+len, "NAA\n"); 1127 break; 1128 case 0x04: 1129 sprintf(buf+len, "Relative target port identifier\n"); 1130 break; 1131 case 0x08: 1132 sprintf(buf+len, "SCSI name string\n"); 1133 break; 1134 default: 1135 sprintf(buf+len, "Unsupported: 0x%02x\n", 1136 vpd->device_identifier_type); 1137 ret = -EINVAL; 1138 break; 1139 } 1140 1141 if (p_buf) { 1142 if (p_buf_len < strlen(buf)+1) 1143 return -EINVAL; 1144 strncpy(p_buf, buf, p_buf_len); 1145 } else { 1146 pr_debug("%s", buf); 1147 } 1148 1149 return ret; 1150 } 1151 1152 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1153 { 1154 /* 1155 * The VPD identifier type.. 1156 * 1157 * from spc3r23.pdf Section 7.6.3.1 Table 298 1158 */ 1159 vpd->device_identifier_type = (page_83[1] & 0x0f); 1160 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1161 } 1162 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1163 1164 int transport_dump_vpd_ident( 1165 struct t10_vpd *vpd, 1166 unsigned char *p_buf, 1167 int p_buf_len) 1168 { 1169 unsigned char buf[VPD_TMP_BUF_SIZE]; 1170 int ret = 0; 1171 1172 memset(buf, 0, VPD_TMP_BUF_SIZE); 1173 1174 switch (vpd->device_identifier_code_set) { 1175 case 0x01: /* Binary */ 1176 snprintf(buf, sizeof(buf), 1177 "T10 VPD Binary Device Identifier: %s\n", 1178 &vpd->device_identifier[0]); 1179 break; 1180 case 0x02: /* ASCII */ 1181 snprintf(buf, sizeof(buf), 1182 "T10 VPD ASCII Device Identifier: %s\n", 1183 &vpd->device_identifier[0]); 1184 break; 1185 case 0x03: /* UTF-8 */ 1186 snprintf(buf, sizeof(buf), 1187 "T10 VPD UTF-8 Device Identifier: %s\n", 1188 &vpd->device_identifier[0]); 1189 break; 1190 default: 1191 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1192 " 0x%02x", vpd->device_identifier_code_set); 1193 ret = -EINVAL; 1194 break; 1195 } 1196 1197 if (p_buf) 1198 strncpy(p_buf, buf, p_buf_len); 1199 else 1200 pr_debug("%s", buf); 1201 1202 return ret; 1203 } 1204 1205 int 1206 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1207 { 1208 static const char hex_str[] = "0123456789abcdef"; 1209 int j = 0, i = 4; /* offset to start of the identifier */ 1210 1211 /* 1212 * The VPD Code Set (encoding) 1213 * 1214 * from spc3r23.pdf Section 7.6.3.1 Table 296 1215 */ 1216 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1217 switch (vpd->device_identifier_code_set) { 1218 case 0x01: /* Binary */ 1219 vpd->device_identifier[j++] = 1220 hex_str[vpd->device_identifier_type]; 1221 while (i < (4 + page_83[3])) { 1222 vpd->device_identifier[j++] = 1223 hex_str[(page_83[i] & 0xf0) >> 4]; 1224 vpd->device_identifier[j++] = 1225 hex_str[page_83[i] & 0x0f]; 1226 i++; 1227 } 1228 break; 1229 case 0x02: /* ASCII */ 1230 case 0x03: /* UTF-8 */ 1231 while (i < (4 + page_83[3])) 1232 vpd->device_identifier[j++] = page_83[i++]; 1233 break; 1234 default: 1235 break; 1236 } 1237 1238 return transport_dump_vpd_ident(vpd, NULL, 0); 1239 } 1240 EXPORT_SYMBOL(transport_set_vpd_ident); 1241 1242 static sense_reason_t 1243 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1244 unsigned int size) 1245 { 1246 u32 mtl; 1247 1248 if (!cmd->se_tfo->max_data_sg_nents) 1249 return TCM_NO_SENSE; 1250 /* 1251 * Check if fabric enforced maximum SGL entries per I/O descriptor 1252 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1253 * residual_count and reduce original cmd->data_length to maximum 1254 * length based on single PAGE_SIZE entry scatter-lists. 1255 */ 1256 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1257 if (cmd->data_length > mtl) { 1258 /* 1259 * If an existing CDB overflow is present, calculate new residual 1260 * based on CDB size minus fabric maximum transfer length. 1261 * 1262 * If an existing CDB underflow is present, calculate new residual 1263 * based on original cmd->data_length minus fabric maximum transfer 1264 * length. 1265 * 1266 * Otherwise, set the underflow residual based on cmd->data_length 1267 * minus fabric maximum transfer length. 1268 */ 1269 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1270 cmd->residual_count = (size - mtl); 1271 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1272 u32 orig_dl = size + cmd->residual_count; 1273 cmd->residual_count = (orig_dl - mtl); 1274 } else { 1275 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1276 cmd->residual_count = (cmd->data_length - mtl); 1277 } 1278 cmd->data_length = mtl; 1279 /* 1280 * Reset sbc_check_prot() calculated protection payload 1281 * length based upon the new smaller MTL. 1282 */ 1283 if (cmd->prot_length) { 1284 u32 sectors = (mtl / dev->dev_attrib.block_size); 1285 cmd->prot_length = dev->prot_length * sectors; 1286 } 1287 } 1288 return TCM_NO_SENSE; 1289 } 1290 1291 /** 1292 * target_cmd_size_check - Check whether there will be a residual. 1293 * @cmd: SCSI command. 1294 * @size: Data buffer size derived from CDB. The data buffer size provided by 1295 * the SCSI transport driver is available in @cmd->data_length. 1296 * 1297 * Compare the data buffer size from the CDB with the data buffer limit from the transport 1298 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. 1299 * 1300 * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd(). 1301 * 1302 * Return: TCM_NO_SENSE 1303 */ 1304 sense_reason_t 1305 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1306 { 1307 struct se_device *dev = cmd->se_dev; 1308 1309 if (cmd->unknown_data_length) { 1310 cmd->data_length = size; 1311 } else if (size != cmd->data_length) { 1312 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1313 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1314 " 0x%02x\n", cmd->se_tfo->fabric_name, 1315 cmd->data_length, size, cmd->t_task_cdb[0]); 1316 /* 1317 * For READ command for the overflow case keep the existing 1318 * fabric provided ->data_length. Otherwise for the underflow 1319 * case, reset ->data_length to the smaller SCSI expected data 1320 * transfer length. 1321 */ 1322 if (size > cmd->data_length) { 1323 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1324 cmd->residual_count = (size - cmd->data_length); 1325 } else { 1326 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1327 cmd->residual_count = (cmd->data_length - size); 1328 /* 1329 * Do not truncate ->data_length for WRITE command to 1330 * dump all payload 1331 */ 1332 if (cmd->data_direction == DMA_FROM_DEVICE) { 1333 cmd->data_length = size; 1334 } 1335 } 1336 1337 if (cmd->data_direction == DMA_TO_DEVICE) { 1338 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1339 pr_err_ratelimited("Rejecting underflow/overflow" 1340 " for WRITE data CDB\n"); 1341 return TCM_INVALID_FIELD_IN_COMMAND_IU; 1342 } 1343 /* 1344 * Some fabric drivers like iscsi-target still expect to 1345 * always reject overflow writes. Reject this case until 1346 * full fabric driver level support for overflow writes 1347 * is introduced tree-wide. 1348 */ 1349 if (size > cmd->data_length) { 1350 pr_err_ratelimited("Rejecting overflow for" 1351 " WRITE control CDB\n"); 1352 return TCM_INVALID_CDB_FIELD; 1353 } 1354 } 1355 } 1356 1357 return target_check_max_data_sg_nents(cmd, dev, size); 1358 1359 } 1360 1361 /* 1362 * Used by fabric modules containing a local struct se_cmd within their 1363 * fabric dependent per I/O descriptor. 1364 * 1365 * Preserves the value of @cmd->tag. 1366 */ 1367 void transport_init_se_cmd( 1368 struct se_cmd *cmd, 1369 const struct target_core_fabric_ops *tfo, 1370 struct se_session *se_sess, 1371 u32 data_length, 1372 int data_direction, 1373 int task_attr, 1374 unsigned char *sense_buffer, u64 unpacked_lun) 1375 { 1376 INIT_LIST_HEAD(&cmd->se_delayed_node); 1377 INIT_LIST_HEAD(&cmd->se_qf_node); 1378 INIT_LIST_HEAD(&cmd->se_cmd_list); 1379 INIT_LIST_HEAD(&cmd->state_list); 1380 init_completion(&cmd->t_transport_stop_comp); 1381 cmd->free_compl = NULL; 1382 cmd->abrt_compl = NULL; 1383 spin_lock_init(&cmd->t_state_lock); 1384 INIT_WORK(&cmd->work, NULL); 1385 kref_init(&cmd->cmd_kref); 1386 1387 cmd->se_tfo = tfo; 1388 cmd->se_sess = se_sess; 1389 cmd->data_length = data_length; 1390 cmd->data_direction = data_direction; 1391 cmd->sam_task_attr = task_attr; 1392 cmd->sense_buffer = sense_buffer; 1393 cmd->orig_fe_lun = unpacked_lun; 1394 1395 if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) 1396 cmd->cpuid = smp_processor_id(); 1397 1398 cmd->state_active = false; 1399 } 1400 EXPORT_SYMBOL(transport_init_se_cmd); 1401 1402 static sense_reason_t 1403 transport_check_alloc_task_attr(struct se_cmd *cmd) 1404 { 1405 struct se_device *dev = cmd->se_dev; 1406 1407 /* 1408 * Check if SAM Task Attribute emulation is enabled for this 1409 * struct se_device storage object 1410 */ 1411 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1412 return 0; 1413 1414 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1415 pr_debug("SAM Task Attribute ACA" 1416 " emulation is not supported\n"); 1417 return TCM_INVALID_CDB_FIELD; 1418 } 1419 1420 return 0; 1421 } 1422 1423 sense_reason_t 1424 target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb) 1425 { 1426 sense_reason_t ret; 1427 1428 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1429 /* 1430 * Ensure that the received CDB is less than the max (252 + 8) bytes 1431 * for VARIABLE_LENGTH_CMD 1432 */ 1433 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1434 pr_err("Received SCSI CDB with command_size: %d that" 1435 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1436 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1437 ret = TCM_INVALID_CDB_FIELD; 1438 goto err; 1439 } 1440 /* 1441 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1442 * allocate the additional extended CDB buffer now.. Otherwise 1443 * setup the pointer from __t_task_cdb to t_task_cdb. 1444 */ 1445 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1446 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1447 GFP_KERNEL); 1448 if (!cmd->t_task_cdb) { 1449 pr_err("Unable to allocate cmd->t_task_cdb" 1450 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1451 scsi_command_size(cdb), 1452 (unsigned long)sizeof(cmd->__t_task_cdb)); 1453 ret = TCM_OUT_OF_RESOURCES; 1454 goto err; 1455 } 1456 } 1457 /* 1458 * Copy the original CDB into cmd-> 1459 */ 1460 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1461 1462 trace_target_sequencer_start(cmd); 1463 return 0; 1464 1465 err: 1466 /* 1467 * Copy the CDB here to allow trace_target_cmd_complete() to 1468 * print the cdb to the trace buffers. 1469 */ 1470 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), 1471 (unsigned int)TCM_MAX_COMMAND_SIZE)); 1472 return ret; 1473 } 1474 EXPORT_SYMBOL(target_cmd_init_cdb); 1475 1476 sense_reason_t 1477 target_cmd_parse_cdb(struct se_cmd *cmd) 1478 { 1479 struct se_device *dev = cmd->se_dev; 1480 sense_reason_t ret; 1481 1482 ret = dev->transport->parse_cdb(cmd); 1483 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1484 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1485 cmd->se_tfo->fabric_name, 1486 cmd->se_sess->se_node_acl->initiatorname, 1487 cmd->t_task_cdb[0]); 1488 if (ret) 1489 return ret; 1490 1491 ret = transport_check_alloc_task_attr(cmd); 1492 if (ret) 1493 return ret; 1494 1495 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1496 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1497 return 0; 1498 } 1499 EXPORT_SYMBOL(target_cmd_parse_cdb); 1500 1501 /* 1502 * Used by fabric module frontends to queue tasks directly. 1503 * May only be used from process context. 1504 */ 1505 int transport_handle_cdb_direct( 1506 struct se_cmd *cmd) 1507 { 1508 sense_reason_t ret; 1509 1510 might_sleep(); 1511 1512 if (!cmd->se_lun) { 1513 dump_stack(); 1514 pr_err("cmd->se_lun is NULL\n"); 1515 return -EINVAL; 1516 } 1517 1518 /* 1519 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1520 * outstanding descriptors are handled correctly during shutdown via 1521 * transport_wait_for_tasks() 1522 * 1523 * Also, we don't take cmd->t_state_lock here as we only expect 1524 * this to be called for initial descriptor submission. 1525 */ 1526 cmd->t_state = TRANSPORT_NEW_CMD; 1527 cmd->transport_state |= CMD_T_ACTIVE; 1528 1529 /* 1530 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1531 * so follow TRANSPORT_NEW_CMD processing thread context usage 1532 * and call transport_generic_request_failure() if necessary.. 1533 */ 1534 ret = transport_generic_new_cmd(cmd); 1535 if (ret) 1536 transport_generic_request_failure(cmd, ret); 1537 return 0; 1538 } 1539 EXPORT_SYMBOL(transport_handle_cdb_direct); 1540 1541 sense_reason_t 1542 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1543 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1544 { 1545 if (!sgl || !sgl_count) 1546 return 0; 1547 1548 /* 1549 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1550 * scatterlists already have been set to follow what the fabric 1551 * passes for the original expected data transfer length. 1552 */ 1553 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1554 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1555 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1556 return TCM_INVALID_CDB_FIELD; 1557 } 1558 1559 cmd->t_data_sg = sgl; 1560 cmd->t_data_nents = sgl_count; 1561 cmd->t_bidi_data_sg = sgl_bidi; 1562 cmd->t_bidi_data_nents = sgl_bidi_count; 1563 1564 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1565 return 0; 1566 } 1567 1568 /** 1569 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1570 * se_cmd + use pre-allocated SGL memory. 1571 * 1572 * @se_cmd: command descriptor to submit 1573 * @se_sess: associated se_sess for endpoint 1574 * @cdb: pointer to SCSI CDB 1575 * @sense: pointer to SCSI sense buffer 1576 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1577 * @data_length: fabric expected data transfer length 1578 * @task_attr: SAM task attribute 1579 * @data_dir: DMA data direction 1580 * @flags: flags for command submission from target_sc_flags_tables 1581 * @sgl: struct scatterlist memory for unidirectional mapping 1582 * @sgl_count: scatterlist count for unidirectional mapping 1583 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1584 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1585 * @sgl_prot: struct scatterlist memory protection information 1586 * @sgl_prot_count: scatterlist count for protection information 1587 * 1588 * Task tags are supported if the caller has set @se_cmd->tag. 1589 * 1590 * Returns non zero to signal active I/O shutdown failure. All other 1591 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1592 * but still return zero here. 1593 * 1594 * This may only be called from process context, and also currently 1595 * assumes internal allocation of fabric payload buffer by target-core. 1596 */ 1597 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1598 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1599 u32 data_length, int task_attr, int data_dir, int flags, 1600 struct scatterlist *sgl, u32 sgl_count, 1601 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1602 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1603 { 1604 struct se_portal_group *se_tpg; 1605 sense_reason_t rc; 1606 int ret; 1607 1608 might_sleep(); 1609 1610 se_tpg = se_sess->se_tpg; 1611 BUG_ON(!se_tpg); 1612 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1613 1614 if (flags & TARGET_SCF_USE_CPUID) 1615 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1616 /* 1617 * Initialize se_cmd for target operation. From this point 1618 * exceptions are handled by sending exception status via 1619 * target_core_fabric_ops->queue_status() callback 1620 */ 1621 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1622 data_length, data_dir, task_attr, sense, 1623 unpacked_lun); 1624 1625 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1626 se_cmd->unknown_data_length = 1; 1627 /* 1628 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is 1629 * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second 1630 * kref_put() to happen during fabric packet acknowledgement. 1631 */ 1632 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1633 if (ret) 1634 return ret; 1635 /* 1636 * Signal bidirectional data payloads to target-core 1637 */ 1638 if (flags & TARGET_SCF_BIDI_OP) 1639 se_cmd->se_cmd_flags |= SCF_BIDI; 1640 1641 rc = target_cmd_init_cdb(se_cmd, cdb); 1642 if (rc) { 1643 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1644 target_put_sess_cmd(se_cmd); 1645 return 0; 1646 } 1647 1648 /* 1649 * Locate se_lun pointer and attach it to struct se_cmd 1650 */ 1651 rc = transport_lookup_cmd_lun(se_cmd); 1652 if (rc) { 1653 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1654 target_put_sess_cmd(se_cmd); 1655 return 0; 1656 } 1657 1658 rc = target_cmd_parse_cdb(se_cmd); 1659 if (rc != 0) { 1660 transport_generic_request_failure(se_cmd, rc); 1661 return 0; 1662 } 1663 1664 /* 1665 * Save pointers for SGLs containing protection information, 1666 * if present. 1667 */ 1668 if (sgl_prot_count) { 1669 se_cmd->t_prot_sg = sgl_prot; 1670 se_cmd->t_prot_nents = sgl_prot_count; 1671 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1672 } 1673 1674 /* 1675 * When a non zero sgl_count has been passed perform SGL passthrough 1676 * mapping for pre-allocated fabric memory instead of having target 1677 * core perform an internal SGL allocation.. 1678 */ 1679 if (sgl_count != 0) { 1680 BUG_ON(!sgl); 1681 1682 /* 1683 * A work-around for tcm_loop as some userspace code via 1684 * scsi-generic do not memset their associated read buffers, 1685 * so go ahead and do that here for type non-data CDBs. Also 1686 * note that this is currently guaranteed to be a single SGL 1687 * for this case by target core in target_setup_cmd_from_cdb() 1688 * -> transport_generic_cmd_sequencer(). 1689 */ 1690 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1691 se_cmd->data_direction == DMA_FROM_DEVICE) { 1692 unsigned char *buf = NULL; 1693 1694 if (sgl) 1695 buf = kmap(sg_page(sgl)) + sgl->offset; 1696 1697 if (buf) { 1698 memset(buf, 0, sgl->length); 1699 kunmap(sg_page(sgl)); 1700 } 1701 } 1702 1703 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1704 sgl_bidi, sgl_bidi_count); 1705 if (rc != 0) { 1706 transport_generic_request_failure(se_cmd, rc); 1707 return 0; 1708 } 1709 } 1710 1711 /* 1712 * Check if we need to delay processing because of ALUA 1713 * Active/NonOptimized primary access state.. 1714 */ 1715 core_alua_check_nonop_delay(se_cmd); 1716 1717 transport_handle_cdb_direct(se_cmd); 1718 return 0; 1719 } 1720 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1721 1722 /** 1723 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1724 * 1725 * @se_cmd: command descriptor to submit 1726 * @se_sess: associated se_sess for endpoint 1727 * @cdb: pointer to SCSI CDB 1728 * @sense: pointer to SCSI sense buffer 1729 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1730 * @data_length: fabric expected data transfer length 1731 * @task_attr: SAM task attribute 1732 * @data_dir: DMA data direction 1733 * @flags: flags for command submission from target_sc_flags_tables 1734 * 1735 * Task tags are supported if the caller has set @se_cmd->tag. 1736 * 1737 * Returns non zero to signal active I/O shutdown failure. All other 1738 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1739 * but still return zero here. 1740 * 1741 * This may only be called from process context, and also currently 1742 * assumes internal allocation of fabric payload buffer by target-core. 1743 * 1744 * It also assumes interal target core SGL memory allocation. 1745 */ 1746 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1747 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1748 u32 data_length, int task_attr, int data_dir, int flags) 1749 { 1750 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1751 unpacked_lun, data_length, task_attr, data_dir, 1752 flags, NULL, 0, NULL, 0, NULL, 0); 1753 } 1754 EXPORT_SYMBOL(target_submit_cmd); 1755 1756 static void target_complete_tmr_failure(struct work_struct *work) 1757 { 1758 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1759 1760 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1761 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1762 1763 transport_lun_remove_cmd(se_cmd); 1764 transport_cmd_check_stop_to_fabric(se_cmd); 1765 } 1766 1767 /** 1768 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1769 * for TMR CDBs 1770 * 1771 * @se_cmd: command descriptor to submit 1772 * @se_sess: associated se_sess for endpoint 1773 * @sense: pointer to SCSI sense buffer 1774 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1775 * @fabric_tmr_ptr: fabric context for TMR req 1776 * @tm_type: Type of TM request 1777 * @gfp: gfp type for caller 1778 * @tag: referenced task tag for TMR_ABORT_TASK 1779 * @flags: submit cmd flags 1780 * 1781 * Callable from all contexts. 1782 **/ 1783 1784 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1785 unsigned char *sense, u64 unpacked_lun, 1786 void *fabric_tmr_ptr, unsigned char tm_type, 1787 gfp_t gfp, u64 tag, int flags) 1788 { 1789 struct se_portal_group *se_tpg; 1790 int ret; 1791 1792 se_tpg = se_sess->se_tpg; 1793 BUG_ON(!se_tpg); 1794 1795 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1796 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); 1797 /* 1798 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1799 * allocation failure. 1800 */ 1801 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1802 if (ret < 0) 1803 return -ENOMEM; 1804 1805 if (tm_type == TMR_ABORT_TASK) 1806 se_cmd->se_tmr_req->ref_task_tag = tag; 1807 1808 /* See target_submit_cmd for commentary */ 1809 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1810 if (ret) { 1811 core_tmr_release_req(se_cmd->se_tmr_req); 1812 return ret; 1813 } 1814 1815 ret = transport_lookup_tmr_lun(se_cmd); 1816 if (ret) 1817 goto failure; 1818 1819 transport_generic_handle_tmr(se_cmd); 1820 return 0; 1821 1822 /* 1823 * For callback during failure handling, push this work off 1824 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1825 */ 1826 failure: 1827 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1828 schedule_work(&se_cmd->work); 1829 return 0; 1830 } 1831 EXPORT_SYMBOL(target_submit_tmr); 1832 1833 /* 1834 * Handle SAM-esque emulation for generic transport request failures. 1835 */ 1836 void transport_generic_request_failure(struct se_cmd *cmd, 1837 sense_reason_t sense_reason) 1838 { 1839 int ret = 0, post_ret; 1840 1841 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1842 sense_reason); 1843 target_show_cmd("-----[ ", cmd); 1844 1845 /* 1846 * For SAM Task Attribute emulation for failed struct se_cmd 1847 */ 1848 transport_complete_task_attr(cmd); 1849 1850 if (cmd->transport_complete_callback) 1851 cmd->transport_complete_callback(cmd, false, &post_ret); 1852 1853 if (cmd->transport_state & CMD_T_ABORTED) { 1854 INIT_WORK(&cmd->work, target_abort_work); 1855 queue_work(target_completion_wq, &cmd->work); 1856 return; 1857 } 1858 1859 switch (sense_reason) { 1860 case TCM_NON_EXISTENT_LUN: 1861 case TCM_UNSUPPORTED_SCSI_OPCODE: 1862 case TCM_INVALID_CDB_FIELD: 1863 case TCM_INVALID_PARAMETER_LIST: 1864 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1865 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1866 case TCM_UNKNOWN_MODE_PAGE: 1867 case TCM_WRITE_PROTECTED: 1868 case TCM_ADDRESS_OUT_OF_RANGE: 1869 case TCM_CHECK_CONDITION_ABORT_CMD: 1870 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1871 case TCM_CHECK_CONDITION_NOT_READY: 1872 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1873 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1874 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1875 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1876 case TCM_TOO_MANY_TARGET_DESCS: 1877 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1878 case TCM_TOO_MANY_SEGMENT_DESCS: 1879 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1880 case TCM_INVALID_FIELD_IN_COMMAND_IU: 1881 break; 1882 case TCM_OUT_OF_RESOURCES: 1883 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1884 goto queue_status; 1885 case TCM_LUN_BUSY: 1886 cmd->scsi_status = SAM_STAT_BUSY; 1887 goto queue_status; 1888 case TCM_RESERVATION_CONFLICT: 1889 /* 1890 * No SENSE Data payload for this case, set SCSI Status 1891 * and queue the response to $FABRIC_MOD. 1892 * 1893 * Uses linux/include/scsi/scsi.h SAM status codes defs 1894 */ 1895 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1896 /* 1897 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1898 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1899 * CONFLICT STATUS. 1900 * 1901 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1902 */ 1903 if (cmd->se_sess && 1904 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl 1905 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { 1906 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1907 cmd->orig_fe_lun, 0x2C, 1908 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1909 } 1910 1911 goto queue_status; 1912 default: 1913 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1914 cmd->t_task_cdb[0], sense_reason); 1915 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1916 break; 1917 } 1918 1919 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1920 if (ret) 1921 goto queue_full; 1922 1923 check_stop: 1924 transport_lun_remove_cmd(cmd); 1925 transport_cmd_check_stop_to_fabric(cmd); 1926 return; 1927 1928 queue_status: 1929 trace_target_cmd_complete(cmd); 1930 ret = cmd->se_tfo->queue_status(cmd); 1931 if (!ret) 1932 goto check_stop; 1933 queue_full: 1934 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1935 } 1936 EXPORT_SYMBOL(transport_generic_request_failure); 1937 1938 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1939 { 1940 sense_reason_t ret; 1941 1942 if (!cmd->execute_cmd) { 1943 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1944 goto err; 1945 } 1946 if (do_checks) { 1947 /* 1948 * Check for an existing UNIT ATTENTION condition after 1949 * target_handle_task_attr() has done SAM task attr 1950 * checking, and possibly have already defered execution 1951 * out to target_restart_delayed_cmds() context. 1952 */ 1953 ret = target_scsi3_ua_check(cmd); 1954 if (ret) 1955 goto err; 1956 1957 ret = target_alua_state_check(cmd); 1958 if (ret) 1959 goto err; 1960 1961 ret = target_check_reservation(cmd); 1962 if (ret) { 1963 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1964 goto err; 1965 } 1966 } 1967 1968 ret = cmd->execute_cmd(cmd); 1969 if (!ret) 1970 return; 1971 err: 1972 spin_lock_irq(&cmd->t_state_lock); 1973 cmd->transport_state &= ~CMD_T_SENT; 1974 spin_unlock_irq(&cmd->t_state_lock); 1975 1976 transport_generic_request_failure(cmd, ret); 1977 } 1978 1979 static int target_write_prot_action(struct se_cmd *cmd) 1980 { 1981 u32 sectors; 1982 /* 1983 * Perform WRITE_INSERT of PI using software emulation when backend 1984 * device has PI enabled, if the transport has not already generated 1985 * PI using hardware WRITE_INSERT offload. 1986 */ 1987 switch (cmd->prot_op) { 1988 case TARGET_PROT_DOUT_INSERT: 1989 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1990 sbc_dif_generate(cmd); 1991 break; 1992 case TARGET_PROT_DOUT_STRIP: 1993 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1994 break; 1995 1996 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1997 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1998 sectors, 0, cmd->t_prot_sg, 0); 1999 if (unlikely(cmd->pi_err)) { 2000 spin_lock_irq(&cmd->t_state_lock); 2001 cmd->transport_state &= ~CMD_T_SENT; 2002 spin_unlock_irq(&cmd->t_state_lock); 2003 transport_generic_request_failure(cmd, cmd->pi_err); 2004 return -1; 2005 } 2006 break; 2007 default: 2008 break; 2009 } 2010 2011 return 0; 2012 } 2013 2014 static bool target_handle_task_attr(struct se_cmd *cmd) 2015 { 2016 struct se_device *dev = cmd->se_dev; 2017 2018 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2019 return false; 2020 2021 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 2022 2023 /* 2024 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2025 * to allow the passed struct se_cmd list of tasks to the front of the list. 2026 */ 2027 switch (cmd->sam_task_attr) { 2028 case TCM_HEAD_TAG: 2029 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 2030 cmd->t_task_cdb[0]); 2031 return false; 2032 case TCM_ORDERED_TAG: 2033 atomic_inc_mb(&dev->dev_ordered_sync); 2034 2035 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 2036 cmd->t_task_cdb[0]); 2037 2038 /* 2039 * Execute an ORDERED command if no other older commands 2040 * exist that need to be completed first. 2041 */ 2042 if (!atomic_read(&dev->simple_cmds)) 2043 return false; 2044 break; 2045 default: 2046 /* 2047 * For SIMPLE and UNTAGGED Task Attribute commands 2048 */ 2049 atomic_inc_mb(&dev->simple_cmds); 2050 break; 2051 } 2052 2053 if (atomic_read(&dev->dev_ordered_sync) == 0) 2054 return false; 2055 2056 spin_lock(&dev->delayed_cmd_lock); 2057 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2058 spin_unlock(&dev->delayed_cmd_lock); 2059 2060 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2061 cmd->t_task_cdb[0], cmd->sam_task_attr); 2062 return true; 2063 } 2064 2065 void target_execute_cmd(struct se_cmd *cmd) 2066 { 2067 /* 2068 * Determine if frontend context caller is requesting the stopping of 2069 * this command for frontend exceptions. 2070 * 2071 * If the received CDB has already been aborted stop processing it here. 2072 */ 2073 if (target_cmd_interrupted(cmd)) 2074 return; 2075 2076 spin_lock_irq(&cmd->t_state_lock); 2077 cmd->t_state = TRANSPORT_PROCESSING; 2078 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2079 spin_unlock_irq(&cmd->t_state_lock); 2080 2081 if (target_write_prot_action(cmd)) 2082 return; 2083 2084 if (target_handle_task_attr(cmd)) { 2085 spin_lock_irq(&cmd->t_state_lock); 2086 cmd->transport_state &= ~CMD_T_SENT; 2087 spin_unlock_irq(&cmd->t_state_lock); 2088 return; 2089 } 2090 2091 __target_execute_cmd(cmd, true); 2092 } 2093 EXPORT_SYMBOL(target_execute_cmd); 2094 2095 /* 2096 * Process all commands up to the last received ORDERED task attribute which 2097 * requires another blocking boundary 2098 */ 2099 static void target_restart_delayed_cmds(struct se_device *dev) 2100 { 2101 for (;;) { 2102 struct se_cmd *cmd; 2103 2104 spin_lock(&dev->delayed_cmd_lock); 2105 if (list_empty(&dev->delayed_cmd_list)) { 2106 spin_unlock(&dev->delayed_cmd_lock); 2107 break; 2108 } 2109 2110 cmd = list_entry(dev->delayed_cmd_list.next, 2111 struct se_cmd, se_delayed_node); 2112 list_del(&cmd->se_delayed_node); 2113 spin_unlock(&dev->delayed_cmd_lock); 2114 2115 cmd->transport_state |= CMD_T_SENT; 2116 2117 __target_execute_cmd(cmd, true); 2118 2119 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2120 break; 2121 } 2122 } 2123 2124 /* 2125 * Called from I/O completion to determine which dormant/delayed 2126 * and ordered cmds need to have their tasks added to the execution queue. 2127 */ 2128 static void transport_complete_task_attr(struct se_cmd *cmd) 2129 { 2130 struct se_device *dev = cmd->se_dev; 2131 2132 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2133 return; 2134 2135 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2136 goto restart; 2137 2138 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2139 atomic_dec_mb(&dev->simple_cmds); 2140 dev->dev_cur_ordered_id++; 2141 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2142 dev->dev_cur_ordered_id++; 2143 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2144 dev->dev_cur_ordered_id); 2145 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2146 atomic_dec_mb(&dev->dev_ordered_sync); 2147 2148 dev->dev_cur_ordered_id++; 2149 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2150 dev->dev_cur_ordered_id); 2151 } 2152 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2153 2154 restart: 2155 target_restart_delayed_cmds(dev); 2156 } 2157 2158 static void transport_complete_qf(struct se_cmd *cmd) 2159 { 2160 int ret = 0; 2161 2162 transport_complete_task_attr(cmd); 2163 /* 2164 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2165 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2166 * the same callbacks should not be retried. Return CHECK_CONDITION 2167 * if a scsi_status is not already set. 2168 * 2169 * If a fabric driver ->queue_status() has returned non zero, always 2170 * keep retrying no matter what.. 2171 */ 2172 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2173 if (cmd->scsi_status) 2174 goto queue_status; 2175 2176 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2177 goto queue_status; 2178 } 2179 2180 /* 2181 * Check if we need to send a sense buffer from 2182 * the struct se_cmd in question. We do NOT want 2183 * to take this path of the IO has been marked as 2184 * needing to be treated like a "normal read". This 2185 * is the case if it's a tape read, and either the 2186 * FM, EOM, or ILI bits are set, but there is no 2187 * sense data. 2188 */ 2189 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2190 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2191 goto queue_status; 2192 2193 switch (cmd->data_direction) { 2194 case DMA_FROM_DEVICE: 2195 /* queue status if not treating this as a normal read */ 2196 if (cmd->scsi_status && 2197 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2198 goto queue_status; 2199 2200 trace_target_cmd_complete(cmd); 2201 ret = cmd->se_tfo->queue_data_in(cmd); 2202 break; 2203 case DMA_TO_DEVICE: 2204 if (cmd->se_cmd_flags & SCF_BIDI) { 2205 ret = cmd->se_tfo->queue_data_in(cmd); 2206 break; 2207 } 2208 fallthrough; 2209 case DMA_NONE: 2210 queue_status: 2211 trace_target_cmd_complete(cmd); 2212 ret = cmd->se_tfo->queue_status(cmd); 2213 break; 2214 default: 2215 break; 2216 } 2217 2218 if (ret < 0) { 2219 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2220 return; 2221 } 2222 transport_lun_remove_cmd(cmd); 2223 transport_cmd_check_stop_to_fabric(cmd); 2224 } 2225 2226 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2227 int err, bool write_pending) 2228 { 2229 /* 2230 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2231 * ->queue_data_in() callbacks from new process context. 2232 * 2233 * Otherwise for other errors, transport_complete_qf() will send 2234 * CHECK_CONDITION via ->queue_status() instead of attempting to 2235 * retry associated fabric driver data-transfer callbacks. 2236 */ 2237 if (err == -EAGAIN || err == -ENOMEM) { 2238 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2239 TRANSPORT_COMPLETE_QF_OK; 2240 } else { 2241 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2242 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2243 } 2244 2245 spin_lock_irq(&dev->qf_cmd_lock); 2246 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2247 atomic_inc_mb(&dev->dev_qf_count); 2248 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2249 2250 schedule_work(&cmd->se_dev->qf_work_queue); 2251 } 2252 2253 static bool target_read_prot_action(struct se_cmd *cmd) 2254 { 2255 switch (cmd->prot_op) { 2256 case TARGET_PROT_DIN_STRIP: 2257 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2258 u32 sectors = cmd->data_length >> 2259 ilog2(cmd->se_dev->dev_attrib.block_size); 2260 2261 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2262 sectors, 0, cmd->t_prot_sg, 2263 0); 2264 if (cmd->pi_err) 2265 return true; 2266 } 2267 break; 2268 case TARGET_PROT_DIN_INSERT: 2269 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2270 break; 2271 2272 sbc_dif_generate(cmd); 2273 break; 2274 default: 2275 break; 2276 } 2277 2278 return false; 2279 } 2280 2281 static void target_complete_ok_work(struct work_struct *work) 2282 { 2283 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2284 int ret; 2285 2286 /* 2287 * Check if we need to move delayed/dormant tasks from cmds on the 2288 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2289 * Attribute. 2290 */ 2291 transport_complete_task_attr(cmd); 2292 2293 /* 2294 * Check to schedule QUEUE_FULL work, or execute an existing 2295 * cmd->transport_qf_callback() 2296 */ 2297 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2298 schedule_work(&cmd->se_dev->qf_work_queue); 2299 2300 /* 2301 * Check if we need to send a sense buffer from 2302 * the struct se_cmd in question. We do NOT want 2303 * to take this path of the IO has been marked as 2304 * needing to be treated like a "normal read". This 2305 * is the case if it's a tape read, and either the 2306 * FM, EOM, or ILI bits are set, but there is no 2307 * sense data. 2308 */ 2309 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2310 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2311 WARN_ON(!cmd->scsi_status); 2312 ret = transport_send_check_condition_and_sense( 2313 cmd, 0, 1); 2314 if (ret) 2315 goto queue_full; 2316 2317 transport_lun_remove_cmd(cmd); 2318 transport_cmd_check_stop_to_fabric(cmd); 2319 return; 2320 } 2321 /* 2322 * Check for a callback, used by amongst other things 2323 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2324 */ 2325 if (cmd->transport_complete_callback) { 2326 sense_reason_t rc; 2327 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2328 bool zero_dl = !(cmd->data_length); 2329 int post_ret = 0; 2330 2331 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2332 if (!rc && !post_ret) { 2333 if (caw && zero_dl) 2334 goto queue_rsp; 2335 2336 return; 2337 } else if (rc) { 2338 ret = transport_send_check_condition_and_sense(cmd, 2339 rc, 0); 2340 if (ret) 2341 goto queue_full; 2342 2343 transport_lun_remove_cmd(cmd); 2344 transport_cmd_check_stop_to_fabric(cmd); 2345 return; 2346 } 2347 } 2348 2349 queue_rsp: 2350 switch (cmd->data_direction) { 2351 case DMA_FROM_DEVICE: 2352 /* 2353 * if this is a READ-type IO, but SCSI status 2354 * is set, then skip returning data and just 2355 * return the status -- unless this IO is marked 2356 * as needing to be treated as a normal read, 2357 * in which case we want to go ahead and return 2358 * the data. This happens, for example, for tape 2359 * reads with the FM, EOM, or ILI bits set, with 2360 * no sense data. 2361 */ 2362 if (cmd->scsi_status && 2363 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2364 goto queue_status; 2365 2366 atomic_long_add(cmd->data_length, 2367 &cmd->se_lun->lun_stats.tx_data_octets); 2368 /* 2369 * Perform READ_STRIP of PI using software emulation when 2370 * backend had PI enabled, if the transport will not be 2371 * performing hardware READ_STRIP offload. 2372 */ 2373 if (target_read_prot_action(cmd)) { 2374 ret = transport_send_check_condition_and_sense(cmd, 2375 cmd->pi_err, 0); 2376 if (ret) 2377 goto queue_full; 2378 2379 transport_lun_remove_cmd(cmd); 2380 transport_cmd_check_stop_to_fabric(cmd); 2381 return; 2382 } 2383 2384 trace_target_cmd_complete(cmd); 2385 ret = cmd->se_tfo->queue_data_in(cmd); 2386 if (ret) 2387 goto queue_full; 2388 break; 2389 case DMA_TO_DEVICE: 2390 atomic_long_add(cmd->data_length, 2391 &cmd->se_lun->lun_stats.rx_data_octets); 2392 /* 2393 * Check if we need to send READ payload for BIDI-COMMAND 2394 */ 2395 if (cmd->se_cmd_flags & SCF_BIDI) { 2396 atomic_long_add(cmd->data_length, 2397 &cmd->se_lun->lun_stats.tx_data_octets); 2398 ret = cmd->se_tfo->queue_data_in(cmd); 2399 if (ret) 2400 goto queue_full; 2401 break; 2402 } 2403 fallthrough; 2404 case DMA_NONE: 2405 queue_status: 2406 trace_target_cmd_complete(cmd); 2407 ret = cmd->se_tfo->queue_status(cmd); 2408 if (ret) 2409 goto queue_full; 2410 break; 2411 default: 2412 break; 2413 } 2414 2415 transport_lun_remove_cmd(cmd); 2416 transport_cmd_check_stop_to_fabric(cmd); 2417 return; 2418 2419 queue_full: 2420 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2421 " data_direction: %d\n", cmd, cmd->data_direction); 2422 2423 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2424 } 2425 2426 void target_free_sgl(struct scatterlist *sgl, int nents) 2427 { 2428 sgl_free_n_order(sgl, nents, 0); 2429 } 2430 EXPORT_SYMBOL(target_free_sgl); 2431 2432 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2433 { 2434 /* 2435 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2436 * emulation, and free + reset pointers if necessary.. 2437 */ 2438 if (!cmd->t_data_sg_orig) 2439 return; 2440 2441 kfree(cmd->t_data_sg); 2442 cmd->t_data_sg = cmd->t_data_sg_orig; 2443 cmd->t_data_sg_orig = NULL; 2444 cmd->t_data_nents = cmd->t_data_nents_orig; 2445 cmd->t_data_nents_orig = 0; 2446 } 2447 2448 static inline void transport_free_pages(struct se_cmd *cmd) 2449 { 2450 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2451 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2452 cmd->t_prot_sg = NULL; 2453 cmd->t_prot_nents = 0; 2454 } 2455 2456 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2457 /* 2458 * Release special case READ buffer payload required for 2459 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2460 */ 2461 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2462 target_free_sgl(cmd->t_bidi_data_sg, 2463 cmd->t_bidi_data_nents); 2464 cmd->t_bidi_data_sg = NULL; 2465 cmd->t_bidi_data_nents = 0; 2466 } 2467 transport_reset_sgl_orig(cmd); 2468 return; 2469 } 2470 transport_reset_sgl_orig(cmd); 2471 2472 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2473 cmd->t_data_sg = NULL; 2474 cmd->t_data_nents = 0; 2475 2476 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2477 cmd->t_bidi_data_sg = NULL; 2478 cmd->t_bidi_data_nents = 0; 2479 } 2480 2481 void *transport_kmap_data_sg(struct se_cmd *cmd) 2482 { 2483 struct scatterlist *sg = cmd->t_data_sg; 2484 struct page **pages; 2485 int i; 2486 2487 /* 2488 * We need to take into account a possible offset here for fabrics like 2489 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2490 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2491 */ 2492 if (!cmd->t_data_nents) 2493 return NULL; 2494 2495 BUG_ON(!sg); 2496 if (cmd->t_data_nents == 1) 2497 return kmap(sg_page(sg)) + sg->offset; 2498 2499 /* >1 page. use vmap */ 2500 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2501 if (!pages) 2502 return NULL; 2503 2504 /* convert sg[] to pages[] */ 2505 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2506 pages[i] = sg_page(sg); 2507 } 2508 2509 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2510 kfree(pages); 2511 if (!cmd->t_data_vmap) 2512 return NULL; 2513 2514 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2515 } 2516 EXPORT_SYMBOL(transport_kmap_data_sg); 2517 2518 void transport_kunmap_data_sg(struct se_cmd *cmd) 2519 { 2520 if (!cmd->t_data_nents) { 2521 return; 2522 } else if (cmd->t_data_nents == 1) { 2523 kunmap(sg_page(cmd->t_data_sg)); 2524 return; 2525 } 2526 2527 vunmap(cmd->t_data_vmap); 2528 cmd->t_data_vmap = NULL; 2529 } 2530 EXPORT_SYMBOL(transport_kunmap_data_sg); 2531 2532 int 2533 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2534 bool zero_page, bool chainable) 2535 { 2536 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2537 2538 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2539 return *sgl ? 0 : -ENOMEM; 2540 } 2541 EXPORT_SYMBOL(target_alloc_sgl); 2542 2543 /* 2544 * Allocate any required resources to execute the command. For writes we 2545 * might not have the payload yet, so notify the fabric via a call to 2546 * ->write_pending instead. Otherwise place it on the execution queue. 2547 */ 2548 sense_reason_t 2549 transport_generic_new_cmd(struct se_cmd *cmd) 2550 { 2551 unsigned long flags; 2552 int ret = 0; 2553 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2554 2555 if (cmd->prot_op != TARGET_PROT_NORMAL && 2556 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2557 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2558 cmd->prot_length, true, false); 2559 if (ret < 0) 2560 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2561 } 2562 2563 /* 2564 * Determine if the TCM fabric module has already allocated physical 2565 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2566 * beforehand. 2567 */ 2568 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2569 cmd->data_length) { 2570 2571 if ((cmd->se_cmd_flags & SCF_BIDI) || 2572 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2573 u32 bidi_length; 2574 2575 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2576 bidi_length = cmd->t_task_nolb * 2577 cmd->se_dev->dev_attrib.block_size; 2578 else 2579 bidi_length = cmd->data_length; 2580 2581 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2582 &cmd->t_bidi_data_nents, 2583 bidi_length, zero_flag, false); 2584 if (ret < 0) 2585 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2586 } 2587 2588 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2589 cmd->data_length, zero_flag, false); 2590 if (ret < 0) 2591 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2592 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2593 cmd->data_length) { 2594 /* 2595 * Special case for COMPARE_AND_WRITE with fabrics 2596 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2597 */ 2598 u32 caw_length = cmd->t_task_nolb * 2599 cmd->se_dev->dev_attrib.block_size; 2600 2601 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2602 &cmd->t_bidi_data_nents, 2603 caw_length, zero_flag, false); 2604 if (ret < 0) 2605 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2606 } 2607 /* 2608 * If this command is not a write we can execute it right here, 2609 * for write buffers we need to notify the fabric driver first 2610 * and let it call back once the write buffers are ready. 2611 */ 2612 target_add_to_state_list(cmd); 2613 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2614 target_execute_cmd(cmd); 2615 return 0; 2616 } 2617 2618 spin_lock_irqsave(&cmd->t_state_lock, flags); 2619 cmd->t_state = TRANSPORT_WRITE_PENDING; 2620 /* 2621 * Determine if frontend context caller is requesting the stopping of 2622 * this command for frontend exceptions. 2623 */ 2624 if (cmd->transport_state & CMD_T_STOP && 2625 !cmd->se_tfo->write_pending_must_be_called) { 2626 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2627 __func__, __LINE__, cmd->tag); 2628 2629 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2630 2631 complete_all(&cmd->t_transport_stop_comp); 2632 return 0; 2633 } 2634 cmd->transport_state &= ~CMD_T_ACTIVE; 2635 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2636 2637 ret = cmd->se_tfo->write_pending(cmd); 2638 if (ret) 2639 goto queue_full; 2640 2641 return 0; 2642 2643 queue_full: 2644 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2645 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2646 return 0; 2647 } 2648 EXPORT_SYMBOL(transport_generic_new_cmd); 2649 2650 static void transport_write_pending_qf(struct se_cmd *cmd) 2651 { 2652 unsigned long flags; 2653 int ret; 2654 bool stop; 2655 2656 spin_lock_irqsave(&cmd->t_state_lock, flags); 2657 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2658 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2659 2660 if (stop) { 2661 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2662 __func__, __LINE__, cmd->tag); 2663 complete_all(&cmd->t_transport_stop_comp); 2664 return; 2665 } 2666 2667 ret = cmd->se_tfo->write_pending(cmd); 2668 if (ret) { 2669 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2670 cmd); 2671 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2672 } 2673 } 2674 2675 static bool 2676 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2677 unsigned long *flags); 2678 2679 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2680 { 2681 unsigned long flags; 2682 2683 spin_lock_irqsave(&cmd->t_state_lock, flags); 2684 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2685 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2686 } 2687 2688 /* 2689 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2690 * finished. 2691 */ 2692 void target_put_cmd_and_wait(struct se_cmd *cmd) 2693 { 2694 DECLARE_COMPLETION_ONSTACK(compl); 2695 2696 WARN_ON_ONCE(cmd->abrt_compl); 2697 cmd->abrt_compl = &compl; 2698 target_put_sess_cmd(cmd); 2699 wait_for_completion(&compl); 2700 } 2701 2702 /* 2703 * This function is called by frontend drivers after processing of a command 2704 * has finished. 2705 * 2706 * The protocol for ensuring that either the regular frontend command 2707 * processing flow or target_handle_abort() code drops one reference is as 2708 * follows: 2709 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2710 * the frontend driver to call this function synchronously or asynchronously. 2711 * That will cause one reference to be dropped. 2712 * - During regular command processing the target core sets CMD_T_COMPLETE 2713 * before invoking one of the .queue_*() functions. 2714 * - The code that aborts commands skips commands and TMFs for which 2715 * CMD_T_COMPLETE has been set. 2716 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2717 * commands that will be aborted. 2718 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2719 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2720 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2721 * be called and will drop a reference. 2722 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2723 * will be called. target_handle_abort() will drop the final reference. 2724 */ 2725 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2726 { 2727 DECLARE_COMPLETION_ONSTACK(compl); 2728 int ret = 0; 2729 bool aborted = false, tas = false; 2730 2731 if (wait_for_tasks) 2732 target_wait_free_cmd(cmd, &aborted, &tas); 2733 2734 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2735 /* 2736 * Handle WRITE failure case where transport_generic_new_cmd() 2737 * has already added se_cmd to state_list, but fabric has 2738 * failed command before I/O submission. 2739 */ 2740 if (cmd->state_active) 2741 target_remove_from_state_list(cmd); 2742 2743 if (cmd->se_lun) 2744 transport_lun_remove_cmd(cmd); 2745 } 2746 if (aborted) 2747 cmd->free_compl = &compl; 2748 ret = target_put_sess_cmd(cmd); 2749 if (aborted) { 2750 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2751 wait_for_completion(&compl); 2752 ret = 1; 2753 } 2754 return ret; 2755 } 2756 EXPORT_SYMBOL(transport_generic_free_cmd); 2757 2758 /** 2759 * target_get_sess_cmd - Verify the session is accepting cmds and take ref 2760 * @se_cmd: command descriptor to add 2761 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2762 */ 2763 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2764 { 2765 struct se_session *se_sess = se_cmd->se_sess; 2766 int ret = 0; 2767 2768 /* 2769 * Add a second kref if the fabric caller is expecting to handle 2770 * fabric acknowledgement that requires two target_put_sess_cmd() 2771 * invocations before se_cmd descriptor release. 2772 */ 2773 if (ack_kref) { 2774 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2775 return -EINVAL; 2776 2777 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2778 } 2779 2780 if (!percpu_ref_tryget_live(&se_sess->cmd_count)) 2781 ret = -ESHUTDOWN; 2782 2783 if (ret && ack_kref) 2784 target_put_sess_cmd(se_cmd); 2785 2786 return ret; 2787 } 2788 EXPORT_SYMBOL(target_get_sess_cmd); 2789 2790 static void target_free_cmd_mem(struct se_cmd *cmd) 2791 { 2792 transport_free_pages(cmd); 2793 2794 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2795 core_tmr_release_req(cmd->se_tmr_req); 2796 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2797 kfree(cmd->t_task_cdb); 2798 } 2799 2800 static void target_release_cmd_kref(struct kref *kref) 2801 { 2802 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2803 struct se_session *se_sess = se_cmd->se_sess; 2804 struct completion *free_compl = se_cmd->free_compl; 2805 struct completion *abrt_compl = se_cmd->abrt_compl; 2806 2807 target_free_cmd_mem(se_cmd); 2808 se_cmd->se_tfo->release_cmd(se_cmd); 2809 if (free_compl) 2810 complete(free_compl); 2811 if (abrt_compl) 2812 complete(abrt_compl); 2813 2814 percpu_ref_put(&se_sess->cmd_count); 2815 } 2816 2817 /** 2818 * target_put_sess_cmd - decrease the command reference count 2819 * @se_cmd: command to drop a reference from 2820 * 2821 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2822 * refcount to drop to zero. Returns zero otherwise. 2823 */ 2824 int target_put_sess_cmd(struct se_cmd *se_cmd) 2825 { 2826 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2827 } 2828 EXPORT_SYMBOL(target_put_sess_cmd); 2829 2830 static const char *data_dir_name(enum dma_data_direction d) 2831 { 2832 switch (d) { 2833 case DMA_BIDIRECTIONAL: return "BIDI"; 2834 case DMA_TO_DEVICE: return "WRITE"; 2835 case DMA_FROM_DEVICE: return "READ"; 2836 case DMA_NONE: return "NONE"; 2837 } 2838 2839 return "(?)"; 2840 } 2841 2842 static const char *cmd_state_name(enum transport_state_table t) 2843 { 2844 switch (t) { 2845 case TRANSPORT_NO_STATE: return "NO_STATE"; 2846 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2847 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2848 case TRANSPORT_PROCESSING: return "PROCESSING"; 2849 case TRANSPORT_COMPLETE: return "COMPLETE"; 2850 case TRANSPORT_ISTATE_PROCESSING: 2851 return "ISTATE_PROCESSING"; 2852 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2853 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2854 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2855 } 2856 2857 return "(?)"; 2858 } 2859 2860 static void target_append_str(char **str, const char *txt) 2861 { 2862 char *prev = *str; 2863 2864 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2865 kstrdup(txt, GFP_ATOMIC); 2866 kfree(prev); 2867 } 2868 2869 /* 2870 * Convert a transport state bitmask into a string. The caller is 2871 * responsible for freeing the returned pointer. 2872 */ 2873 static char *target_ts_to_str(u32 ts) 2874 { 2875 char *str = NULL; 2876 2877 if (ts & CMD_T_ABORTED) 2878 target_append_str(&str, "aborted"); 2879 if (ts & CMD_T_ACTIVE) 2880 target_append_str(&str, "active"); 2881 if (ts & CMD_T_COMPLETE) 2882 target_append_str(&str, "complete"); 2883 if (ts & CMD_T_SENT) 2884 target_append_str(&str, "sent"); 2885 if (ts & CMD_T_STOP) 2886 target_append_str(&str, "stop"); 2887 if (ts & CMD_T_FABRIC_STOP) 2888 target_append_str(&str, "fabric_stop"); 2889 2890 return str; 2891 } 2892 2893 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2894 { 2895 switch (tmf) { 2896 case TMR_ABORT_TASK: return "ABORT_TASK"; 2897 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2898 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2899 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2900 case TMR_LUN_RESET: return "LUN_RESET"; 2901 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2902 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2903 case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; 2904 case TMR_UNKNOWN: break; 2905 } 2906 return "(?)"; 2907 } 2908 2909 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2910 { 2911 char *ts_str = target_ts_to_str(cmd->transport_state); 2912 const u8 *cdb = cmd->t_task_cdb; 2913 struct se_tmr_req *tmf = cmd->se_tmr_req; 2914 2915 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2916 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2917 pfx, cdb[0], cdb[1], cmd->tag, 2918 data_dir_name(cmd->data_direction), 2919 cmd->se_tfo->get_cmd_state(cmd), 2920 cmd_state_name(cmd->t_state), cmd->data_length, 2921 kref_read(&cmd->cmd_kref), ts_str); 2922 } else { 2923 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2924 pfx, target_tmf_name(tmf->function), cmd->tag, 2925 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2926 cmd_state_name(cmd->t_state), 2927 kref_read(&cmd->cmd_kref), ts_str); 2928 } 2929 kfree(ts_str); 2930 } 2931 EXPORT_SYMBOL(target_show_cmd); 2932 2933 static void target_stop_session_confirm(struct percpu_ref *ref) 2934 { 2935 struct se_session *se_sess = container_of(ref, struct se_session, 2936 cmd_count); 2937 complete_all(&se_sess->stop_done); 2938 } 2939 2940 /** 2941 * target_stop_session - Stop new IO from being queued on the session. 2942 * @se_sess: session to stop 2943 */ 2944 void target_stop_session(struct se_session *se_sess) 2945 { 2946 pr_debug("Stopping session queue.\n"); 2947 if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0) 2948 percpu_ref_kill_and_confirm(&se_sess->cmd_count, 2949 target_stop_session_confirm); 2950 } 2951 EXPORT_SYMBOL(target_stop_session); 2952 2953 /** 2954 * target_wait_for_sess_cmds - Wait for outstanding commands 2955 * @se_sess: session to wait for active I/O 2956 */ 2957 void target_wait_for_sess_cmds(struct se_session *se_sess) 2958 { 2959 int ret; 2960 2961 WARN_ON_ONCE(!atomic_read(&se_sess->stopped)); 2962 2963 do { 2964 pr_debug("Waiting for running cmds to complete.\n"); 2965 ret = wait_event_timeout(se_sess->cmd_count_wq, 2966 percpu_ref_is_zero(&se_sess->cmd_count), 2967 180 * HZ); 2968 } while (ret <= 0); 2969 2970 wait_for_completion(&se_sess->stop_done); 2971 pr_debug("Waiting for cmds done.\n"); 2972 } 2973 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2974 2975 /* 2976 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 2977 * all references to the LUN have been released. Called during LUN shutdown. 2978 */ 2979 void transport_clear_lun_ref(struct se_lun *lun) 2980 { 2981 percpu_ref_kill(&lun->lun_ref); 2982 wait_for_completion(&lun->lun_shutdown_comp); 2983 } 2984 2985 static bool 2986 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2987 bool *aborted, bool *tas, unsigned long *flags) 2988 __releases(&cmd->t_state_lock) 2989 __acquires(&cmd->t_state_lock) 2990 { 2991 2992 assert_spin_locked(&cmd->t_state_lock); 2993 WARN_ON_ONCE(!irqs_disabled()); 2994 2995 if (fabric_stop) 2996 cmd->transport_state |= CMD_T_FABRIC_STOP; 2997 2998 if (cmd->transport_state & CMD_T_ABORTED) 2999 *aborted = true; 3000 3001 if (cmd->transport_state & CMD_T_TAS) 3002 *tas = true; 3003 3004 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 3005 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3006 return false; 3007 3008 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 3009 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3010 return false; 3011 3012 if (!(cmd->transport_state & CMD_T_ACTIVE)) 3013 return false; 3014 3015 if (fabric_stop && *aborted) 3016 return false; 3017 3018 cmd->transport_state |= CMD_T_STOP; 3019 3020 target_show_cmd("wait_for_tasks: Stopping ", cmd); 3021 3022 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 3023 3024 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 3025 180 * HZ)) 3026 target_show_cmd("wait for tasks: ", cmd); 3027 3028 spin_lock_irqsave(&cmd->t_state_lock, *flags); 3029 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 3030 3031 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 3032 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 3033 3034 return true; 3035 } 3036 3037 /** 3038 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3039 * @cmd: command to wait on 3040 */ 3041 bool transport_wait_for_tasks(struct se_cmd *cmd) 3042 { 3043 unsigned long flags; 3044 bool ret, aborted = false, tas = false; 3045 3046 spin_lock_irqsave(&cmd->t_state_lock, flags); 3047 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3048 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3049 3050 return ret; 3051 } 3052 EXPORT_SYMBOL(transport_wait_for_tasks); 3053 3054 struct sense_detail { 3055 u8 key; 3056 u8 asc; 3057 u8 ascq; 3058 bool add_sense_info; 3059 }; 3060 3061 static const struct sense_detail sense_detail_table[] = { 3062 [TCM_NO_SENSE] = { 3063 .key = NOT_READY 3064 }, 3065 [TCM_NON_EXISTENT_LUN] = { 3066 .key = ILLEGAL_REQUEST, 3067 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3068 }, 3069 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3070 .key = ILLEGAL_REQUEST, 3071 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3072 }, 3073 [TCM_SECTOR_COUNT_TOO_MANY] = { 3074 .key = ILLEGAL_REQUEST, 3075 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3076 }, 3077 [TCM_UNKNOWN_MODE_PAGE] = { 3078 .key = ILLEGAL_REQUEST, 3079 .asc = 0x24, /* INVALID FIELD IN CDB */ 3080 }, 3081 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3082 .key = ABORTED_COMMAND, 3083 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3084 .ascq = 0x03, 3085 }, 3086 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3087 .key = ABORTED_COMMAND, 3088 .asc = 0x0c, /* WRITE ERROR */ 3089 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3090 }, 3091 [TCM_INVALID_CDB_FIELD] = { 3092 .key = ILLEGAL_REQUEST, 3093 .asc = 0x24, /* INVALID FIELD IN CDB */ 3094 }, 3095 [TCM_INVALID_PARAMETER_LIST] = { 3096 .key = ILLEGAL_REQUEST, 3097 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3098 }, 3099 [TCM_TOO_MANY_TARGET_DESCS] = { 3100 .key = ILLEGAL_REQUEST, 3101 .asc = 0x26, 3102 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3103 }, 3104 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3105 .key = ILLEGAL_REQUEST, 3106 .asc = 0x26, 3107 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3108 }, 3109 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3110 .key = ILLEGAL_REQUEST, 3111 .asc = 0x26, 3112 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3113 }, 3114 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3115 .key = ILLEGAL_REQUEST, 3116 .asc = 0x26, 3117 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3118 }, 3119 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3120 .key = ILLEGAL_REQUEST, 3121 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3122 }, 3123 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3124 .key = ILLEGAL_REQUEST, 3125 .asc = 0x0c, /* WRITE ERROR */ 3126 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3127 }, 3128 [TCM_SERVICE_CRC_ERROR] = { 3129 .key = ABORTED_COMMAND, 3130 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3131 .ascq = 0x05, /* N/A */ 3132 }, 3133 [TCM_SNACK_REJECTED] = { 3134 .key = ABORTED_COMMAND, 3135 .asc = 0x11, /* READ ERROR */ 3136 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3137 }, 3138 [TCM_WRITE_PROTECTED] = { 3139 .key = DATA_PROTECT, 3140 .asc = 0x27, /* WRITE PROTECTED */ 3141 }, 3142 [TCM_ADDRESS_OUT_OF_RANGE] = { 3143 .key = ILLEGAL_REQUEST, 3144 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3145 }, 3146 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3147 .key = UNIT_ATTENTION, 3148 }, 3149 [TCM_CHECK_CONDITION_NOT_READY] = { 3150 .key = NOT_READY, 3151 }, 3152 [TCM_MISCOMPARE_VERIFY] = { 3153 .key = MISCOMPARE, 3154 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3155 .ascq = 0x00, 3156 .add_sense_info = true, 3157 }, 3158 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3159 .key = ABORTED_COMMAND, 3160 .asc = 0x10, 3161 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3162 .add_sense_info = true, 3163 }, 3164 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3165 .key = ABORTED_COMMAND, 3166 .asc = 0x10, 3167 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3168 .add_sense_info = true, 3169 }, 3170 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3171 .key = ABORTED_COMMAND, 3172 .asc = 0x10, 3173 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3174 .add_sense_info = true, 3175 }, 3176 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3177 .key = COPY_ABORTED, 3178 .asc = 0x0d, 3179 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3180 3181 }, 3182 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3183 /* 3184 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3185 * Solaris initiators. Returning NOT READY instead means the 3186 * operations will be retried a finite number of times and we 3187 * can survive intermittent errors. 3188 */ 3189 .key = NOT_READY, 3190 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3191 }, 3192 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3193 /* 3194 * From spc4r22 section5.7.7,5.7.8 3195 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3196 * or a REGISTER AND IGNORE EXISTING KEY service action or 3197 * REGISTER AND MOVE service actionis attempted, 3198 * but there are insufficient device server resources to complete the 3199 * operation, then the command shall be terminated with CHECK CONDITION 3200 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3201 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3202 */ 3203 .key = ILLEGAL_REQUEST, 3204 .asc = 0x55, 3205 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3206 }, 3207 [TCM_INVALID_FIELD_IN_COMMAND_IU] = { 3208 .key = ILLEGAL_REQUEST, 3209 .asc = 0x0e, 3210 .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */ 3211 }, 3212 }; 3213 3214 /** 3215 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3216 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3217 * be stored. 3218 * @reason: LIO sense reason code. If this argument has the value 3219 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3220 * dequeuing a unit attention fails due to multiple commands being processed 3221 * concurrently, set the command status to BUSY. 3222 * 3223 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3224 */ 3225 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3226 { 3227 const struct sense_detail *sd; 3228 u8 *buffer = cmd->sense_buffer; 3229 int r = (__force int)reason; 3230 u8 key, asc, ascq; 3231 bool desc_format = target_sense_desc_format(cmd->se_dev); 3232 3233 if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) 3234 sd = &sense_detail_table[r]; 3235 else 3236 sd = &sense_detail_table[(__force int) 3237 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3238 3239 key = sd->key; 3240 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3241 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3242 &ascq)) { 3243 cmd->scsi_status = SAM_STAT_BUSY; 3244 return; 3245 } 3246 } else if (sd->asc == 0) { 3247 WARN_ON_ONCE(cmd->scsi_asc == 0); 3248 asc = cmd->scsi_asc; 3249 ascq = cmd->scsi_ascq; 3250 } else { 3251 asc = sd->asc; 3252 ascq = sd->ascq; 3253 } 3254 3255 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3256 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3257 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3258 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3259 if (sd->add_sense_info) 3260 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3261 cmd->scsi_sense_length, 3262 cmd->sense_info) < 0); 3263 } 3264 3265 int 3266 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3267 sense_reason_t reason, int from_transport) 3268 { 3269 unsigned long flags; 3270 3271 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3272 3273 spin_lock_irqsave(&cmd->t_state_lock, flags); 3274 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3275 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3276 return 0; 3277 } 3278 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3279 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3280 3281 if (!from_transport) 3282 translate_sense_reason(cmd, reason); 3283 3284 trace_target_cmd_complete(cmd); 3285 return cmd->se_tfo->queue_status(cmd); 3286 } 3287 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3288 3289 /** 3290 * target_send_busy - Send SCSI BUSY status back to the initiator 3291 * @cmd: SCSI command for which to send a BUSY reply. 3292 * 3293 * Note: Only call this function if target_submit_cmd*() failed. 3294 */ 3295 int target_send_busy(struct se_cmd *cmd) 3296 { 3297 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3298 3299 cmd->scsi_status = SAM_STAT_BUSY; 3300 trace_target_cmd_complete(cmd); 3301 return cmd->se_tfo->queue_status(cmd); 3302 } 3303 EXPORT_SYMBOL(target_send_busy); 3304 3305 static void target_tmr_work(struct work_struct *work) 3306 { 3307 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3308 struct se_device *dev = cmd->se_dev; 3309 struct se_tmr_req *tmr = cmd->se_tmr_req; 3310 int ret; 3311 3312 if (cmd->transport_state & CMD_T_ABORTED) 3313 goto aborted; 3314 3315 switch (tmr->function) { 3316 case TMR_ABORT_TASK: 3317 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3318 break; 3319 case TMR_ABORT_TASK_SET: 3320 case TMR_CLEAR_ACA: 3321 case TMR_CLEAR_TASK_SET: 3322 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3323 break; 3324 case TMR_LUN_RESET: 3325 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3326 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3327 TMR_FUNCTION_REJECTED; 3328 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3329 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3330 cmd->orig_fe_lun, 0x29, 3331 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3332 } 3333 break; 3334 case TMR_TARGET_WARM_RESET: 3335 tmr->response = TMR_FUNCTION_REJECTED; 3336 break; 3337 case TMR_TARGET_COLD_RESET: 3338 tmr->response = TMR_FUNCTION_REJECTED; 3339 break; 3340 default: 3341 pr_err("Unknown TMR function: 0x%02x.\n", 3342 tmr->function); 3343 tmr->response = TMR_FUNCTION_REJECTED; 3344 break; 3345 } 3346 3347 if (cmd->transport_state & CMD_T_ABORTED) 3348 goto aborted; 3349 3350 cmd->se_tfo->queue_tm_rsp(cmd); 3351 3352 transport_lun_remove_cmd(cmd); 3353 transport_cmd_check_stop_to_fabric(cmd); 3354 return; 3355 3356 aborted: 3357 target_handle_abort(cmd); 3358 } 3359 3360 int transport_generic_handle_tmr( 3361 struct se_cmd *cmd) 3362 { 3363 unsigned long flags; 3364 bool aborted = false; 3365 3366 spin_lock_irqsave(&cmd->t_state_lock, flags); 3367 if (cmd->transport_state & CMD_T_ABORTED) { 3368 aborted = true; 3369 } else { 3370 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3371 cmd->transport_state |= CMD_T_ACTIVE; 3372 } 3373 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3374 3375 if (aborted) { 3376 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3377 cmd->se_tmr_req->function, 3378 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3379 target_handle_abort(cmd); 3380 return 0; 3381 } 3382 3383 INIT_WORK(&cmd->work, target_tmr_work); 3384 schedule_work(&cmd->work); 3385 return 0; 3386 } 3387 EXPORT_SYMBOL(transport_generic_handle_tmr); 3388 3389 bool 3390 target_check_wce(struct se_device *dev) 3391 { 3392 bool wce = false; 3393 3394 if (dev->transport->get_write_cache) 3395 wce = dev->transport->get_write_cache(dev); 3396 else if (dev->dev_attrib.emulate_write_cache > 0) 3397 wce = true; 3398 3399 return wce; 3400 } 3401 3402 bool 3403 target_check_fua(struct se_device *dev) 3404 { 3405 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3406 } 3407