1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 68 static void transport_handle_queue_full(struct se_cmd *cmd, 69 struct se_device *dev, int err, bool write_pending); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) 228 { 229 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); 230 231 wake_up(&sess->cmd_list_wq); 232 } 233 234 /** 235 * transport_init_session - initialize a session object 236 * @se_sess: Session object pointer. 237 * 238 * The caller must have zero-initialized @se_sess before calling this function. 239 */ 240 int transport_init_session(struct se_session *se_sess) 241 { 242 INIT_LIST_HEAD(&se_sess->sess_list); 243 INIT_LIST_HEAD(&se_sess->sess_acl_list); 244 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 245 spin_lock_init(&se_sess->sess_cmd_lock); 246 init_waitqueue_head(&se_sess->cmd_list_wq); 247 return percpu_ref_init(&se_sess->cmd_count, 248 target_release_sess_cmd_refcnt, 0, GFP_KERNEL); 249 } 250 EXPORT_SYMBOL(transport_init_session); 251 252 /** 253 * transport_alloc_session - allocate a session object and initialize it 254 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 255 */ 256 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 257 { 258 struct se_session *se_sess; 259 int ret; 260 261 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 262 if (!se_sess) { 263 pr_err("Unable to allocate struct se_session from" 264 " se_sess_cache\n"); 265 return ERR_PTR(-ENOMEM); 266 } 267 ret = transport_init_session(se_sess); 268 if (ret < 0) { 269 kmem_cache_free(se_sess_cache, se_sess); 270 return ERR_PTR(ret); 271 } 272 se_sess->sup_prot_ops = sup_prot_ops; 273 274 return se_sess; 275 } 276 EXPORT_SYMBOL(transport_alloc_session); 277 278 /** 279 * transport_alloc_session_tags - allocate target driver private data 280 * @se_sess: Session pointer. 281 * @tag_num: Maximum number of in-flight commands between initiator and target. 282 * @tag_size: Size in bytes of the private data a target driver associates with 283 * each command. 284 */ 285 int transport_alloc_session_tags(struct se_session *se_sess, 286 unsigned int tag_num, unsigned int tag_size) 287 { 288 int rc; 289 290 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 291 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 292 if (!se_sess->sess_cmd_map) { 293 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 294 return -ENOMEM; 295 } 296 297 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 298 false, GFP_KERNEL, NUMA_NO_NODE); 299 if (rc < 0) { 300 pr_err("Unable to init se_sess->sess_tag_pool," 301 " tag_num: %u\n", tag_num); 302 kvfree(se_sess->sess_cmd_map); 303 se_sess->sess_cmd_map = NULL; 304 return -ENOMEM; 305 } 306 307 return 0; 308 } 309 EXPORT_SYMBOL(transport_alloc_session_tags); 310 311 /** 312 * transport_init_session_tags - allocate a session and target driver private data 313 * @tag_num: Maximum number of in-flight commands between initiator and target. 314 * @tag_size: Size in bytes of the private data a target driver associates with 315 * each command. 316 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 317 */ 318 static struct se_session * 319 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 320 enum target_prot_op sup_prot_ops) 321 { 322 struct se_session *se_sess; 323 int rc; 324 325 if (tag_num != 0 && !tag_size) { 326 pr_err("init_session_tags called with percpu-ida tag_num:" 327 " %u, but zero tag_size\n", tag_num); 328 return ERR_PTR(-EINVAL); 329 } 330 if (!tag_num && tag_size) { 331 pr_err("init_session_tags called with percpu-ida tag_size:" 332 " %u, but zero tag_num\n", tag_size); 333 return ERR_PTR(-EINVAL); 334 } 335 336 se_sess = transport_alloc_session(sup_prot_ops); 337 if (IS_ERR(se_sess)) 338 return se_sess; 339 340 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 341 if (rc < 0) { 342 transport_free_session(se_sess); 343 return ERR_PTR(-ENOMEM); 344 } 345 346 return se_sess; 347 } 348 349 /* 350 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 351 */ 352 void __transport_register_session( 353 struct se_portal_group *se_tpg, 354 struct se_node_acl *se_nacl, 355 struct se_session *se_sess, 356 void *fabric_sess_ptr) 357 { 358 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 359 unsigned char buf[PR_REG_ISID_LEN]; 360 unsigned long flags; 361 362 se_sess->se_tpg = se_tpg; 363 se_sess->fabric_sess_ptr = fabric_sess_ptr; 364 /* 365 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 366 * 367 * Only set for struct se_session's that will actually be moving I/O. 368 * eg: *NOT* discovery sessions. 369 */ 370 if (se_nacl) { 371 /* 372 * 373 * Determine if fabric allows for T10-PI feature bits exposed to 374 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 375 * 376 * If so, then always save prot_type on a per se_node_acl node 377 * basis and re-instate the previous sess_prot_type to avoid 378 * disabling PI from below any previously initiator side 379 * registered LUNs. 380 */ 381 if (se_nacl->saved_prot_type) 382 se_sess->sess_prot_type = se_nacl->saved_prot_type; 383 else if (tfo->tpg_check_prot_fabric_only) 384 se_sess->sess_prot_type = se_nacl->saved_prot_type = 385 tfo->tpg_check_prot_fabric_only(se_tpg); 386 /* 387 * If the fabric module supports an ISID based TransportID, 388 * save this value in binary from the fabric I_T Nexus now. 389 */ 390 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 391 memset(&buf[0], 0, PR_REG_ISID_LEN); 392 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 393 &buf[0], PR_REG_ISID_LEN); 394 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 395 } 396 397 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 398 /* 399 * The se_nacl->nacl_sess pointer will be set to the 400 * last active I_T Nexus for each struct se_node_acl. 401 */ 402 se_nacl->nacl_sess = se_sess; 403 404 list_add_tail(&se_sess->sess_acl_list, 405 &se_nacl->acl_sess_list); 406 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 407 } 408 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 409 410 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 411 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 412 } 413 EXPORT_SYMBOL(__transport_register_session); 414 415 void transport_register_session( 416 struct se_portal_group *se_tpg, 417 struct se_node_acl *se_nacl, 418 struct se_session *se_sess, 419 void *fabric_sess_ptr) 420 { 421 unsigned long flags; 422 423 spin_lock_irqsave(&se_tpg->session_lock, flags); 424 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 425 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 426 } 427 EXPORT_SYMBOL(transport_register_session); 428 429 struct se_session * 430 target_setup_session(struct se_portal_group *tpg, 431 unsigned int tag_num, unsigned int tag_size, 432 enum target_prot_op prot_op, 433 const char *initiatorname, void *private, 434 int (*callback)(struct se_portal_group *, 435 struct se_session *, void *)) 436 { 437 struct se_session *sess; 438 439 /* 440 * If the fabric driver is using percpu-ida based pre allocation 441 * of I/O descriptor tags, go ahead and perform that setup now.. 442 */ 443 if (tag_num != 0) 444 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 445 else 446 sess = transport_alloc_session(prot_op); 447 448 if (IS_ERR(sess)) 449 return sess; 450 451 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 452 (unsigned char *)initiatorname); 453 if (!sess->se_node_acl) { 454 transport_free_session(sess); 455 return ERR_PTR(-EACCES); 456 } 457 /* 458 * Go ahead and perform any remaining fabric setup that is 459 * required before transport_register_session(). 460 */ 461 if (callback != NULL) { 462 int rc = callback(tpg, sess, private); 463 if (rc) { 464 transport_free_session(sess); 465 return ERR_PTR(rc); 466 } 467 } 468 469 transport_register_session(tpg, sess->se_node_acl, sess, private); 470 return sess; 471 } 472 EXPORT_SYMBOL(target_setup_session); 473 474 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 475 { 476 struct se_session *se_sess; 477 ssize_t len = 0; 478 479 spin_lock_bh(&se_tpg->session_lock); 480 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 481 if (!se_sess->se_node_acl) 482 continue; 483 if (!se_sess->se_node_acl->dynamic_node_acl) 484 continue; 485 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 486 break; 487 488 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 489 se_sess->se_node_acl->initiatorname); 490 len += 1; /* Include NULL terminator */ 491 } 492 spin_unlock_bh(&se_tpg->session_lock); 493 494 return len; 495 } 496 EXPORT_SYMBOL(target_show_dynamic_sessions); 497 498 static void target_complete_nacl(struct kref *kref) 499 { 500 struct se_node_acl *nacl = container_of(kref, 501 struct se_node_acl, acl_kref); 502 struct se_portal_group *se_tpg = nacl->se_tpg; 503 504 if (!nacl->dynamic_stop) { 505 complete(&nacl->acl_free_comp); 506 return; 507 } 508 509 mutex_lock(&se_tpg->acl_node_mutex); 510 list_del_init(&nacl->acl_list); 511 mutex_unlock(&se_tpg->acl_node_mutex); 512 513 core_tpg_wait_for_nacl_pr_ref(nacl); 514 core_free_device_list_for_node(nacl, se_tpg); 515 kfree(nacl); 516 } 517 518 void target_put_nacl(struct se_node_acl *nacl) 519 { 520 kref_put(&nacl->acl_kref, target_complete_nacl); 521 } 522 EXPORT_SYMBOL(target_put_nacl); 523 524 void transport_deregister_session_configfs(struct se_session *se_sess) 525 { 526 struct se_node_acl *se_nacl; 527 unsigned long flags; 528 /* 529 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 530 */ 531 se_nacl = se_sess->se_node_acl; 532 if (se_nacl) { 533 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 534 if (!list_empty(&se_sess->sess_acl_list)) 535 list_del_init(&se_sess->sess_acl_list); 536 /* 537 * If the session list is empty, then clear the pointer. 538 * Otherwise, set the struct se_session pointer from the tail 539 * element of the per struct se_node_acl active session list. 540 */ 541 if (list_empty(&se_nacl->acl_sess_list)) 542 se_nacl->nacl_sess = NULL; 543 else { 544 se_nacl->nacl_sess = container_of( 545 se_nacl->acl_sess_list.prev, 546 struct se_session, sess_acl_list); 547 } 548 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 549 } 550 } 551 EXPORT_SYMBOL(transport_deregister_session_configfs); 552 553 void transport_free_session(struct se_session *se_sess) 554 { 555 struct se_node_acl *se_nacl = se_sess->se_node_acl; 556 557 /* 558 * Drop the se_node_acl->nacl_kref obtained from within 559 * core_tpg_get_initiator_node_acl(). 560 */ 561 if (se_nacl) { 562 struct se_portal_group *se_tpg = se_nacl->se_tpg; 563 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 564 unsigned long flags; 565 566 se_sess->se_node_acl = NULL; 567 568 /* 569 * Also determine if we need to drop the extra ->cmd_kref if 570 * it had been previously dynamically generated, and 571 * the endpoint is not caching dynamic ACLs. 572 */ 573 mutex_lock(&se_tpg->acl_node_mutex); 574 if (se_nacl->dynamic_node_acl && 575 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 576 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 577 if (list_empty(&se_nacl->acl_sess_list)) 578 se_nacl->dynamic_stop = true; 579 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 580 581 if (se_nacl->dynamic_stop) 582 list_del_init(&se_nacl->acl_list); 583 } 584 mutex_unlock(&se_tpg->acl_node_mutex); 585 586 if (se_nacl->dynamic_stop) 587 target_put_nacl(se_nacl); 588 589 target_put_nacl(se_nacl); 590 } 591 if (se_sess->sess_cmd_map) { 592 sbitmap_queue_free(&se_sess->sess_tag_pool); 593 kvfree(se_sess->sess_cmd_map); 594 } 595 percpu_ref_exit(&se_sess->cmd_count); 596 kmem_cache_free(se_sess_cache, se_sess); 597 } 598 EXPORT_SYMBOL(transport_free_session); 599 600 void transport_deregister_session(struct se_session *se_sess) 601 { 602 struct se_portal_group *se_tpg = se_sess->se_tpg; 603 unsigned long flags; 604 605 if (!se_tpg) { 606 transport_free_session(se_sess); 607 return; 608 } 609 610 spin_lock_irqsave(&se_tpg->session_lock, flags); 611 list_del(&se_sess->sess_list); 612 se_sess->se_tpg = NULL; 613 se_sess->fabric_sess_ptr = NULL; 614 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 615 616 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 617 se_tpg->se_tpg_tfo->fabric_name); 618 /* 619 * If last kref is dropping now for an explicit NodeACL, awake sleeping 620 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 621 * removal context from within transport_free_session() code. 622 * 623 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 624 * to release all remaining generate_node_acl=1 created ACL resources. 625 */ 626 627 transport_free_session(se_sess); 628 } 629 EXPORT_SYMBOL(transport_deregister_session); 630 631 void target_remove_session(struct se_session *se_sess) 632 { 633 transport_deregister_session_configfs(se_sess); 634 transport_deregister_session(se_sess); 635 } 636 EXPORT_SYMBOL(target_remove_session); 637 638 static void target_remove_from_state_list(struct se_cmd *cmd) 639 { 640 struct se_device *dev = cmd->se_dev; 641 unsigned long flags; 642 643 if (!dev) 644 return; 645 646 spin_lock_irqsave(&dev->execute_task_lock, flags); 647 if (cmd->state_active) { 648 list_del(&cmd->state_list); 649 cmd->state_active = false; 650 } 651 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 652 } 653 654 /* 655 * This function is called by the target core after the target core has 656 * finished processing a SCSI command or SCSI TMF. Both the regular command 657 * processing code and the code for aborting commands can call this 658 * function. CMD_T_STOP is set if and only if another thread is waiting 659 * inside transport_wait_for_tasks() for t_transport_stop_comp. 660 */ 661 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 662 { 663 unsigned long flags; 664 665 target_remove_from_state_list(cmd); 666 667 spin_lock_irqsave(&cmd->t_state_lock, flags); 668 /* 669 * Determine if frontend context caller is requesting the stopping of 670 * this command for frontend exceptions. 671 */ 672 if (cmd->transport_state & CMD_T_STOP) { 673 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 674 __func__, __LINE__, cmd->tag); 675 676 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 677 678 complete_all(&cmd->t_transport_stop_comp); 679 return 1; 680 } 681 cmd->transport_state &= ~CMD_T_ACTIVE; 682 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 683 684 /* 685 * Some fabric modules like tcm_loop can release their internally 686 * allocated I/O reference and struct se_cmd now. 687 * 688 * Fabric modules are expected to return '1' here if the se_cmd being 689 * passed is released at this point, or zero if not being released. 690 */ 691 return cmd->se_tfo->check_stop_free(cmd); 692 } 693 694 static void target_complete_failure_work(struct work_struct *work) 695 { 696 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 697 698 transport_generic_request_failure(cmd, 699 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 700 } 701 702 /* 703 * Used when asking transport to copy Sense Data from the underlying 704 * Linux/SCSI struct scsi_cmnd 705 */ 706 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 707 { 708 struct se_device *dev = cmd->se_dev; 709 710 WARN_ON(!cmd->se_lun); 711 712 if (!dev) 713 return NULL; 714 715 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 716 return NULL; 717 718 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 719 720 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 721 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 722 return cmd->sense_buffer; 723 } 724 725 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 726 { 727 unsigned char *cmd_sense_buf; 728 unsigned long flags; 729 730 spin_lock_irqsave(&cmd->t_state_lock, flags); 731 cmd_sense_buf = transport_get_sense_buffer(cmd); 732 if (!cmd_sense_buf) { 733 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 734 return; 735 } 736 737 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 738 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 739 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 740 } 741 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 742 743 static void target_handle_abort(struct se_cmd *cmd) 744 { 745 bool tas = cmd->transport_state & CMD_T_TAS; 746 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 747 int ret; 748 749 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 750 751 if (tas) { 752 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 753 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 754 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 755 cmd->t_task_cdb[0], cmd->tag); 756 trace_target_cmd_complete(cmd); 757 ret = cmd->se_tfo->queue_status(cmd); 758 if (ret) { 759 transport_handle_queue_full(cmd, cmd->se_dev, 760 ret, false); 761 return; 762 } 763 } else { 764 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 765 cmd->se_tfo->queue_tm_rsp(cmd); 766 } 767 } else { 768 /* 769 * Allow the fabric driver to unmap any resources before 770 * releasing the descriptor via TFO->release_cmd(). 771 */ 772 cmd->se_tfo->aborted_task(cmd); 773 if (ack_kref) 774 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 775 /* 776 * To do: establish a unit attention condition on the I_T 777 * nexus associated with cmd. See also the paragraph "Aborting 778 * commands" in SAM. 779 */ 780 } 781 782 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 783 784 transport_cmd_check_stop_to_fabric(cmd); 785 } 786 787 static void target_abort_work(struct work_struct *work) 788 { 789 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 790 791 target_handle_abort(cmd); 792 } 793 794 static bool target_cmd_interrupted(struct se_cmd *cmd) 795 { 796 int post_ret; 797 798 if (cmd->transport_state & CMD_T_ABORTED) { 799 if (cmd->transport_complete_callback) 800 cmd->transport_complete_callback(cmd, false, &post_ret); 801 INIT_WORK(&cmd->work, target_abort_work); 802 queue_work(target_completion_wq, &cmd->work); 803 return true; 804 } else if (cmd->transport_state & CMD_T_STOP) { 805 if (cmd->transport_complete_callback) 806 cmd->transport_complete_callback(cmd, false, &post_ret); 807 complete_all(&cmd->t_transport_stop_comp); 808 return true; 809 } 810 811 return false; 812 } 813 814 /* May be called from interrupt context so must not sleep. */ 815 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 816 { 817 int success; 818 unsigned long flags; 819 820 if (target_cmd_interrupted(cmd)) 821 return; 822 823 cmd->scsi_status = scsi_status; 824 825 spin_lock_irqsave(&cmd->t_state_lock, flags); 826 switch (cmd->scsi_status) { 827 case SAM_STAT_CHECK_CONDITION: 828 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 829 success = 1; 830 else 831 success = 0; 832 break; 833 default: 834 success = 1; 835 break; 836 } 837 838 cmd->t_state = TRANSPORT_COMPLETE; 839 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 840 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 841 842 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 843 target_complete_failure_work); 844 if (cmd->se_cmd_flags & SCF_USE_CPUID) 845 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 846 else 847 queue_work(target_completion_wq, &cmd->work); 848 } 849 EXPORT_SYMBOL(target_complete_cmd); 850 851 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 852 { 853 if ((scsi_status == SAM_STAT_GOOD || 854 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 855 length < cmd->data_length) { 856 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 857 cmd->residual_count += cmd->data_length - length; 858 } else { 859 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 860 cmd->residual_count = cmd->data_length - length; 861 } 862 863 cmd->data_length = length; 864 } 865 866 target_complete_cmd(cmd, scsi_status); 867 } 868 EXPORT_SYMBOL(target_complete_cmd_with_length); 869 870 static void target_add_to_state_list(struct se_cmd *cmd) 871 { 872 struct se_device *dev = cmd->se_dev; 873 unsigned long flags; 874 875 spin_lock_irqsave(&dev->execute_task_lock, flags); 876 if (!cmd->state_active) { 877 list_add_tail(&cmd->state_list, &dev->state_list); 878 cmd->state_active = true; 879 } 880 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 881 } 882 883 /* 884 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 885 */ 886 static void transport_write_pending_qf(struct se_cmd *cmd); 887 static void transport_complete_qf(struct se_cmd *cmd); 888 889 void target_qf_do_work(struct work_struct *work) 890 { 891 struct se_device *dev = container_of(work, struct se_device, 892 qf_work_queue); 893 LIST_HEAD(qf_cmd_list); 894 struct se_cmd *cmd, *cmd_tmp; 895 896 spin_lock_irq(&dev->qf_cmd_lock); 897 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 898 spin_unlock_irq(&dev->qf_cmd_lock); 899 900 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 901 list_del(&cmd->se_qf_node); 902 atomic_dec_mb(&dev->dev_qf_count); 903 904 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 905 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 906 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 907 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 908 : "UNKNOWN"); 909 910 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 911 transport_write_pending_qf(cmd); 912 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 913 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 914 transport_complete_qf(cmd); 915 } 916 } 917 918 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 919 { 920 switch (cmd->data_direction) { 921 case DMA_NONE: 922 return "NONE"; 923 case DMA_FROM_DEVICE: 924 return "READ"; 925 case DMA_TO_DEVICE: 926 return "WRITE"; 927 case DMA_BIDIRECTIONAL: 928 return "BIDI"; 929 default: 930 break; 931 } 932 933 return "UNKNOWN"; 934 } 935 936 void transport_dump_dev_state( 937 struct se_device *dev, 938 char *b, 939 int *bl) 940 { 941 *bl += sprintf(b + *bl, "Status: "); 942 if (dev->export_count) 943 *bl += sprintf(b + *bl, "ACTIVATED"); 944 else 945 *bl += sprintf(b + *bl, "DEACTIVATED"); 946 947 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 948 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 949 dev->dev_attrib.block_size, 950 dev->dev_attrib.hw_max_sectors); 951 *bl += sprintf(b + *bl, " "); 952 } 953 954 void transport_dump_vpd_proto_id( 955 struct t10_vpd *vpd, 956 unsigned char *p_buf, 957 int p_buf_len) 958 { 959 unsigned char buf[VPD_TMP_BUF_SIZE]; 960 int len; 961 962 memset(buf, 0, VPD_TMP_BUF_SIZE); 963 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 964 965 switch (vpd->protocol_identifier) { 966 case 0x00: 967 sprintf(buf+len, "Fibre Channel\n"); 968 break; 969 case 0x10: 970 sprintf(buf+len, "Parallel SCSI\n"); 971 break; 972 case 0x20: 973 sprintf(buf+len, "SSA\n"); 974 break; 975 case 0x30: 976 sprintf(buf+len, "IEEE 1394\n"); 977 break; 978 case 0x40: 979 sprintf(buf+len, "SCSI Remote Direct Memory Access" 980 " Protocol\n"); 981 break; 982 case 0x50: 983 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 984 break; 985 case 0x60: 986 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 987 break; 988 case 0x70: 989 sprintf(buf+len, "Automation/Drive Interface Transport" 990 " Protocol\n"); 991 break; 992 case 0x80: 993 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 994 break; 995 default: 996 sprintf(buf+len, "Unknown 0x%02x\n", 997 vpd->protocol_identifier); 998 break; 999 } 1000 1001 if (p_buf) 1002 strncpy(p_buf, buf, p_buf_len); 1003 else 1004 pr_debug("%s", buf); 1005 } 1006 1007 void 1008 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1009 { 1010 /* 1011 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1012 * 1013 * from spc3r23.pdf section 7.5.1 1014 */ 1015 if (page_83[1] & 0x80) { 1016 vpd->protocol_identifier = (page_83[0] & 0xf0); 1017 vpd->protocol_identifier_set = 1; 1018 transport_dump_vpd_proto_id(vpd, NULL, 0); 1019 } 1020 } 1021 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1022 1023 int transport_dump_vpd_assoc( 1024 struct t10_vpd *vpd, 1025 unsigned char *p_buf, 1026 int p_buf_len) 1027 { 1028 unsigned char buf[VPD_TMP_BUF_SIZE]; 1029 int ret = 0; 1030 int len; 1031 1032 memset(buf, 0, VPD_TMP_BUF_SIZE); 1033 len = sprintf(buf, "T10 VPD Identifier Association: "); 1034 1035 switch (vpd->association) { 1036 case 0x00: 1037 sprintf(buf+len, "addressed logical unit\n"); 1038 break; 1039 case 0x10: 1040 sprintf(buf+len, "target port\n"); 1041 break; 1042 case 0x20: 1043 sprintf(buf+len, "SCSI target device\n"); 1044 break; 1045 default: 1046 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1047 ret = -EINVAL; 1048 break; 1049 } 1050 1051 if (p_buf) 1052 strncpy(p_buf, buf, p_buf_len); 1053 else 1054 pr_debug("%s", buf); 1055 1056 return ret; 1057 } 1058 1059 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1060 { 1061 /* 1062 * The VPD identification association.. 1063 * 1064 * from spc3r23.pdf Section 7.6.3.1 Table 297 1065 */ 1066 vpd->association = (page_83[1] & 0x30); 1067 return transport_dump_vpd_assoc(vpd, NULL, 0); 1068 } 1069 EXPORT_SYMBOL(transport_set_vpd_assoc); 1070 1071 int transport_dump_vpd_ident_type( 1072 struct t10_vpd *vpd, 1073 unsigned char *p_buf, 1074 int p_buf_len) 1075 { 1076 unsigned char buf[VPD_TMP_BUF_SIZE]; 1077 int ret = 0; 1078 int len; 1079 1080 memset(buf, 0, VPD_TMP_BUF_SIZE); 1081 len = sprintf(buf, "T10 VPD Identifier Type: "); 1082 1083 switch (vpd->device_identifier_type) { 1084 case 0x00: 1085 sprintf(buf+len, "Vendor specific\n"); 1086 break; 1087 case 0x01: 1088 sprintf(buf+len, "T10 Vendor ID based\n"); 1089 break; 1090 case 0x02: 1091 sprintf(buf+len, "EUI-64 based\n"); 1092 break; 1093 case 0x03: 1094 sprintf(buf+len, "NAA\n"); 1095 break; 1096 case 0x04: 1097 sprintf(buf+len, "Relative target port identifier\n"); 1098 break; 1099 case 0x08: 1100 sprintf(buf+len, "SCSI name string\n"); 1101 break; 1102 default: 1103 sprintf(buf+len, "Unsupported: 0x%02x\n", 1104 vpd->device_identifier_type); 1105 ret = -EINVAL; 1106 break; 1107 } 1108 1109 if (p_buf) { 1110 if (p_buf_len < strlen(buf)+1) 1111 return -EINVAL; 1112 strncpy(p_buf, buf, p_buf_len); 1113 } else { 1114 pr_debug("%s", buf); 1115 } 1116 1117 return ret; 1118 } 1119 1120 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1121 { 1122 /* 1123 * The VPD identifier type.. 1124 * 1125 * from spc3r23.pdf Section 7.6.3.1 Table 298 1126 */ 1127 vpd->device_identifier_type = (page_83[1] & 0x0f); 1128 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1129 } 1130 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1131 1132 int transport_dump_vpd_ident( 1133 struct t10_vpd *vpd, 1134 unsigned char *p_buf, 1135 int p_buf_len) 1136 { 1137 unsigned char buf[VPD_TMP_BUF_SIZE]; 1138 int ret = 0; 1139 1140 memset(buf, 0, VPD_TMP_BUF_SIZE); 1141 1142 switch (vpd->device_identifier_code_set) { 1143 case 0x01: /* Binary */ 1144 snprintf(buf, sizeof(buf), 1145 "T10 VPD Binary Device Identifier: %s\n", 1146 &vpd->device_identifier[0]); 1147 break; 1148 case 0x02: /* ASCII */ 1149 snprintf(buf, sizeof(buf), 1150 "T10 VPD ASCII Device Identifier: %s\n", 1151 &vpd->device_identifier[0]); 1152 break; 1153 case 0x03: /* UTF-8 */ 1154 snprintf(buf, sizeof(buf), 1155 "T10 VPD UTF-8 Device Identifier: %s\n", 1156 &vpd->device_identifier[0]); 1157 break; 1158 default: 1159 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1160 " 0x%02x", vpd->device_identifier_code_set); 1161 ret = -EINVAL; 1162 break; 1163 } 1164 1165 if (p_buf) 1166 strncpy(p_buf, buf, p_buf_len); 1167 else 1168 pr_debug("%s", buf); 1169 1170 return ret; 1171 } 1172 1173 int 1174 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1175 { 1176 static const char hex_str[] = "0123456789abcdef"; 1177 int j = 0, i = 4; /* offset to start of the identifier */ 1178 1179 /* 1180 * The VPD Code Set (encoding) 1181 * 1182 * from spc3r23.pdf Section 7.6.3.1 Table 296 1183 */ 1184 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1185 switch (vpd->device_identifier_code_set) { 1186 case 0x01: /* Binary */ 1187 vpd->device_identifier[j++] = 1188 hex_str[vpd->device_identifier_type]; 1189 while (i < (4 + page_83[3])) { 1190 vpd->device_identifier[j++] = 1191 hex_str[(page_83[i] & 0xf0) >> 4]; 1192 vpd->device_identifier[j++] = 1193 hex_str[page_83[i] & 0x0f]; 1194 i++; 1195 } 1196 break; 1197 case 0x02: /* ASCII */ 1198 case 0x03: /* UTF-8 */ 1199 while (i < (4 + page_83[3])) 1200 vpd->device_identifier[j++] = page_83[i++]; 1201 break; 1202 default: 1203 break; 1204 } 1205 1206 return transport_dump_vpd_ident(vpd, NULL, 0); 1207 } 1208 EXPORT_SYMBOL(transport_set_vpd_ident); 1209 1210 static sense_reason_t 1211 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1212 unsigned int size) 1213 { 1214 u32 mtl; 1215 1216 if (!cmd->se_tfo->max_data_sg_nents) 1217 return TCM_NO_SENSE; 1218 /* 1219 * Check if fabric enforced maximum SGL entries per I/O descriptor 1220 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1221 * residual_count and reduce original cmd->data_length to maximum 1222 * length based on single PAGE_SIZE entry scatter-lists. 1223 */ 1224 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1225 if (cmd->data_length > mtl) { 1226 /* 1227 * If an existing CDB overflow is present, calculate new residual 1228 * based on CDB size minus fabric maximum transfer length. 1229 * 1230 * If an existing CDB underflow is present, calculate new residual 1231 * based on original cmd->data_length minus fabric maximum transfer 1232 * length. 1233 * 1234 * Otherwise, set the underflow residual based on cmd->data_length 1235 * minus fabric maximum transfer length. 1236 */ 1237 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1238 cmd->residual_count = (size - mtl); 1239 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1240 u32 orig_dl = size + cmd->residual_count; 1241 cmd->residual_count = (orig_dl - mtl); 1242 } else { 1243 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1244 cmd->residual_count = (cmd->data_length - mtl); 1245 } 1246 cmd->data_length = mtl; 1247 /* 1248 * Reset sbc_check_prot() calculated protection payload 1249 * length based upon the new smaller MTL. 1250 */ 1251 if (cmd->prot_length) { 1252 u32 sectors = (mtl / dev->dev_attrib.block_size); 1253 cmd->prot_length = dev->prot_length * sectors; 1254 } 1255 } 1256 return TCM_NO_SENSE; 1257 } 1258 1259 sense_reason_t 1260 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1261 { 1262 struct se_device *dev = cmd->se_dev; 1263 1264 if (cmd->unknown_data_length) { 1265 cmd->data_length = size; 1266 } else if (size != cmd->data_length) { 1267 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1268 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1269 " 0x%02x\n", cmd->se_tfo->fabric_name, 1270 cmd->data_length, size, cmd->t_task_cdb[0]); 1271 1272 if (cmd->data_direction == DMA_TO_DEVICE) { 1273 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1274 pr_err_ratelimited("Rejecting underflow/overflow" 1275 " for WRITE data CDB\n"); 1276 return TCM_INVALID_CDB_FIELD; 1277 } 1278 /* 1279 * Some fabric drivers like iscsi-target still expect to 1280 * always reject overflow writes. Reject this case until 1281 * full fabric driver level support for overflow writes 1282 * is introduced tree-wide. 1283 */ 1284 if (size > cmd->data_length) { 1285 pr_err_ratelimited("Rejecting overflow for" 1286 " WRITE control CDB\n"); 1287 return TCM_INVALID_CDB_FIELD; 1288 } 1289 } 1290 /* 1291 * Reject READ_* or WRITE_* with overflow/underflow for 1292 * type SCF_SCSI_DATA_CDB. 1293 */ 1294 if (dev->dev_attrib.block_size != 512) { 1295 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1296 " CDB on non 512-byte sector setup subsystem" 1297 " plugin: %s\n", dev->transport->name); 1298 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1299 return TCM_INVALID_CDB_FIELD; 1300 } 1301 /* 1302 * For the overflow case keep the existing fabric provided 1303 * ->data_length. Otherwise for the underflow case, reset 1304 * ->data_length to the smaller SCSI expected data transfer 1305 * length. 1306 */ 1307 if (size > cmd->data_length) { 1308 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1309 cmd->residual_count = (size - cmd->data_length); 1310 } else { 1311 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1312 cmd->residual_count = (cmd->data_length - size); 1313 cmd->data_length = size; 1314 } 1315 } 1316 1317 return target_check_max_data_sg_nents(cmd, dev, size); 1318 1319 } 1320 1321 /* 1322 * Used by fabric modules containing a local struct se_cmd within their 1323 * fabric dependent per I/O descriptor. 1324 * 1325 * Preserves the value of @cmd->tag. 1326 */ 1327 void transport_init_se_cmd( 1328 struct se_cmd *cmd, 1329 const struct target_core_fabric_ops *tfo, 1330 struct se_session *se_sess, 1331 u32 data_length, 1332 int data_direction, 1333 int task_attr, 1334 unsigned char *sense_buffer) 1335 { 1336 INIT_LIST_HEAD(&cmd->se_delayed_node); 1337 INIT_LIST_HEAD(&cmd->se_qf_node); 1338 INIT_LIST_HEAD(&cmd->se_cmd_list); 1339 INIT_LIST_HEAD(&cmd->state_list); 1340 init_completion(&cmd->t_transport_stop_comp); 1341 cmd->free_compl = NULL; 1342 cmd->abrt_compl = NULL; 1343 spin_lock_init(&cmd->t_state_lock); 1344 INIT_WORK(&cmd->work, NULL); 1345 kref_init(&cmd->cmd_kref); 1346 1347 cmd->se_tfo = tfo; 1348 cmd->se_sess = se_sess; 1349 cmd->data_length = data_length; 1350 cmd->data_direction = data_direction; 1351 cmd->sam_task_attr = task_attr; 1352 cmd->sense_buffer = sense_buffer; 1353 1354 cmd->state_active = false; 1355 } 1356 EXPORT_SYMBOL(transport_init_se_cmd); 1357 1358 static sense_reason_t 1359 transport_check_alloc_task_attr(struct se_cmd *cmd) 1360 { 1361 struct se_device *dev = cmd->se_dev; 1362 1363 /* 1364 * Check if SAM Task Attribute emulation is enabled for this 1365 * struct se_device storage object 1366 */ 1367 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1368 return 0; 1369 1370 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1371 pr_debug("SAM Task Attribute ACA" 1372 " emulation is not supported\n"); 1373 return TCM_INVALID_CDB_FIELD; 1374 } 1375 1376 return 0; 1377 } 1378 1379 sense_reason_t 1380 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1381 { 1382 struct se_device *dev = cmd->se_dev; 1383 sense_reason_t ret; 1384 1385 /* 1386 * Ensure that the received CDB is less than the max (252 + 8) bytes 1387 * for VARIABLE_LENGTH_CMD 1388 */ 1389 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1390 pr_err("Received SCSI CDB with command_size: %d that" 1391 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1392 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1393 return TCM_INVALID_CDB_FIELD; 1394 } 1395 /* 1396 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1397 * allocate the additional extended CDB buffer now.. Otherwise 1398 * setup the pointer from __t_task_cdb to t_task_cdb. 1399 */ 1400 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1401 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1402 GFP_KERNEL); 1403 if (!cmd->t_task_cdb) { 1404 pr_err("Unable to allocate cmd->t_task_cdb" 1405 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1406 scsi_command_size(cdb), 1407 (unsigned long)sizeof(cmd->__t_task_cdb)); 1408 return TCM_OUT_OF_RESOURCES; 1409 } 1410 } else 1411 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1412 /* 1413 * Copy the original CDB into cmd-> 1414 */ 1415 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1416 1417 trace_target_sequencer_start(cmd); 1418 1419 ret = dev->transport->parse_cdb(cmd); 1420 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1421 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1422 cmd->se_tfo->fabric_name, 1423 cmd->se_sess->se_node_acl->initiatorname, 1424 cmd->t_task_cdb[0]); 1425 if (ret) 1426 return ret; 1427 1428 ret = transport_check_alloc_task_attr(cmd); 1429 if (ret) 1430 return ret; 1431 1432 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1433 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1434 return 0; 1435 } 1436 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1437 1438 /* 1439 * Used by fabric module frontends to queue tasks directly. 1440 * May only be used from process context. 1441 */ 1442 int transport_handle_cdb_direct( 1443 struct se_cmd *cmd) 1444 { 1445 sense_reason_t ret; 1446 1447 if (!cmd->se_lun) { 1448 dump_stack(); 1449 pr_err("cmd->se_lun is NULL\n"); 1450 return -EINVAL; 1451 } 1452 if (in_interrupt()) { 1453 dump_stack(); 1454 pr_err("transport_generic_handle_cdb cannot be called" 1455 " from interrupt context\n"); 1456 return -EINVAL; 1457 } 1458 /* 1459 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1460 * outstanding descriptors are handled correctly during shutdown via 1461 * transport_wait_for_tasks() 1462 * 1463 * Also, we don't take cmd->t_state_lock here as we only expect 1464 * this to be called for initial descriptor submission. 1465 */ 1466 cmd->t_state = TRANSPORT_NEW_CMD; 1467 cmd->transport_state |= CMD_T_ACTIVE; 1468 1469 /* 1470 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1471 * so follow TRANSPORT_NEW_CMD processing thread context usage 1472 * and call transport_generic_request_failure() if necessary.. 1473 */ 1474 ret = transport_generic_new_cmd(cmd); 1475 if (ret) 1476 transport_generic_request_failure(cmd, ret); 1477 return 0; 1478 } 1479 EXPORT_SYMBOL(transport_handle_cdb_direct); 1480 1481 sense_reason_t 1482 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1483 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1484 { 1485 if (!sgl || !sgl_count) 1486 return 0; 1487 1488 /* 1489 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1490 * scatterlists already have been set to follow what the fabric 1491 * passes for the original expected data transfer length. 1492 */ 1493 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1494 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1495 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1496 return TCM_INVALID_CDB_FIELD; 1497 } 1498 1499 cmd->t_data_sg = sgl; 1500 cmd->t_data_nents = sgl_count; 1501 cmd->t_bidi_data_sg = sgl_bidi; 1502 cmd->t_bidi_data_nents = sgl_bidi_count; 1503 1504 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1505 return 0; 1506 } 1507 1508 /** 1509 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1510 * se_cmd + use pre-allocated SGL memory. 1511 * 1512 * @se_cmd: command descriptor to submit 1513 * @se_sess: associated se_sess for endpoint 1514 * @cdb: pointer to SCSI CDB 1515 * @sense: pointer to SCSI sense buffer 1516 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1517 * @data_length: fabric expected data transfer length 1518 * @task_attr: SAM task attribute 1519 * @data_dir: DMA data direction 1520 * @flags: flags for command submission from target_sc_flags_tables 1521 * @sgl: struct scatterlist memory for unidirectional mapping 1522 * @sgl_count: scatterlist count for unidirectional mapping 1523 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1524 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1525 * @sgl_prot: struct scatterlist memory protection information 1526 * @sgl_prot_count: scatterlist count for protection information 1527 * 1528 * Task tags are supported if the caller has set @se_cmd->tag. 1529 * 1530 * Returns non zero to signal active I/O shutdown failure. All other 1531 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1532 * but still return zero here. 1533 * 1534 * This may only be called from process context, and also currently 1535 * assumes internal allocation of fabric payload buffer by target-core. 1536 */ 1537 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1538 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1539 u32 data_length, int task_attr, int data_dir, int flags, 1540 struct scatterlist *sgl, u32 sgl_count, 1541 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1542 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1543 { 1544 struct se_portal_group *se_tpg; 1545 sense_reason_t rc; 1546 int ret; 1547 1548 se_tpg = se_sess->se_tpg; 1549 BUG_ON(!se_tpg); 1550 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1551 BUG_ON(in_interrupt()); 1552 /* 1553 * Initialize se_cmd for target operation. From this point 1554 * exceptions are handled by sending exception status via 1555 * target_core_fabric_ops->queue_status() callback 1556 */ 1557 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1558 data_length, data_dir, task_attr, sense); 1559 1560 if (flags & TARGET_SCF_USE_CPUID) 1561 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1562 else 1563 se_cmd->cpuid = WORK_CPU_UNBOUND; 1564 1565 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1566 se_cmd->unknown_data_length = 1; 1567 /* 1568 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1569 * se_sess->sess_cmd_list. A second kref_get here is necessary 1570 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1571 * kref_put() to happen during fabric packet acknowledgement. 1572 */ 1573 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1574 if (ret) 1575 return ret; 1576 /* 1577 * Signal bidirectional data payloads to target-core 1578 */ 1579 if (flags & TARGET_SCF_BIDI_OP) 1580 se_cmd->se_cmd_flags |= SCF_BIDI; 1581 /* 1582 * Locate se_lun pointer and attach it to struct se_cmd 1583 */ 1584 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1585 if (rc) { 1586 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1587 target_put_sess_cmd(se_cmd); 1588 return 0; 1589 } 1590 1591 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1592 if (rc != 0) { 1593 transport_generic_request_failure(se_cmd, rc); 1594 return 0; 1595 } 1596 1597 /* 1598 * Save pointers for SGLs containing protection information, 1599 * if present. 1600 */ 1601 if (sgl_prot_count) { 1602 se_cmd->t_prot_sg = sgl_prot; 1603 se_cmd->t_prot_nents = sgl_prot_count; 1604 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1605 } 1606 1607 /* 1608 * When a non zero sgl_count has been passed perform SGL passthrough 1609 * mapping for pre-allocated fabric memory instead of having target 1610 * core perform an internal SGL allocation.. 1611 */ 1612 if (sgl_count != 0) { 1613 BUG_ON(!sgl); 1614 1615 /* 1616 * A work-around for tcm_loop as some userspace code via 1617 * scsi-generic do not memset their associated read buffers, 1618 * so go ahead and do that here for type non-data CDBs. Also 1619 * note that this is currently guaranteed to be a single SGL 1620 * for this case by target core in target_setup_cmd_from_cdb() 1621 * -> transport_generic_cmd_sequencer(). 1622 */ 1623 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1624 se_cmd->data_direction == DMA_FROM_DEVICE) { 1625 unsigned char *buf = NULL; 1626 1627 if (sgl) 1628 buf = kmap(sg_page(sgl)) + sgl->offset; 1629 1630 if (buf) { 1631 memset(buf, 0, sgl->length); 1632 kunmap(sg_page(sgl)); 1633 } 1634 } 1635 1636 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1637 sgl_bidi, sgl_bidi_count); 1638 if (rc != 0) { 1639 transport_generic_request_failure(se_cmd, rc); 1640 return 0; 1641 } 1642 } 1643 1644 /* 1645 * Check if we need to delay processing because of ALUA 1646 * Active/NonOptimized primary access state.. 1647 */ 1648 core_alua_check_nonop_delay(se_cmd); 1649 1650 transport_handle_cdb_direct(se_cmd); 1651 return 0; 1652 } 1653 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1654 1655 /** 1656 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1657 * 1658 * @se_cmd: command descriptor to submit 1659 * @se_sess: associated se_sess for endpoint 1660 * @cdb: pointer to SCSI CDB 1661 * @sense: pointer to SCSI sense buffer 1662 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1663 * @data_length: fabric expected data transfer length 1664 * @task_attr: SAM task attribute 1665 * @data_dir: DMA data direction 1666 * @flags: flags for command submission from target_sc_flags_tables 1667 * 1668 * Task tags are supported if the caller has set @se_cmd->tag. 1669 * 1670 * Returns non zero to signal active I/O shutdown failure. All other 1671 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1672 * but still return zero here. 1673 * 1674 * This may only be called from process context, and also currently 1675 * assumes internal allocation of fabric payload buffer by target-core. 1676 * 1677 * It also assumes interal target core SGL memory allocation. 1678 */ 1679 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1680 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1681 u32 data_length, int task_attr, int data_dir, int flags) 1682 { 1683 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1684 unpacked_lun, data_length, task_attr, data_dir, 1685 flags, NULL, 0, NULL, 0, NULL, 0); 1686 } 1687 EXPORT_SYMBOL(target_submit_cmd); 1688 1689 static void target_complete_tmr_failure(struct work_struct *work) 1690 { 1691 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1692 1693 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1694 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1695 1696 transport_cmd_check_stop_to_fabric(se_cmd); 1697 } 1698 1699 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, 1700 u64 *unpacked_lun) 1701 { 1702 struct se_cmd *se_cmd; 1703 unsigned long flags; 1704 bool ret = false; 1705 1706 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1707 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1708 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1709 continue; 1710 1711 if (se_cmd->tag == tag) { 1712 *unpacked_lun = se_cmd->orig_fe_lun; 1713 ret = true; 1714 break; 1715 } 1716 } 1717 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1718 1719 return ret; 1720 } 1721 1722 /** 1723 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1724 * for TMR CDBs 1725 * 1726 * @se_cmd: command descriptor to submit 1727 * @se_sess: associated se_sess for endpoint 1728 * @sense: pointer to SCSI sense buffer 1729 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1730 * @fabric_tmr_ptr: fabric context for TMR req 1731 * @tm_type: Type of TM request 1732 * @gfp: gfp type for caller 1733 * @tag: referenced task tag for TMR_ABORT_TASK 1734 * @flags: submit cmd flags 1735 * 1736 * Callable from all contexts. 1737 **/ 1738 1739 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1740 unsigned char *sense, u64 unpacked_lun, 1741 void *fabric_tmr_ptr, unsigned char tm_type, 1742 gfp_t gfp, u64 tag, int flags) 1743 { 1744 struct se_portal_group *se_tpg; 1745 int ret; 1746 1747 se_tpg = se_sess->se_tpg; 1748 BUG_ON(!se_tpg); 1749 1750 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1751 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1752 /* 1753 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1754 * allocation failure. 1755 */ 1756 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1757 if (ret < 0) 1758 return -ENOMEM; 1759 1760 if (tm_type == TMR_ABORT_TASK) 1761 se_cmd->se_tmr_req->ref_task_tag = tag; 1762 1763 /* See target_submit_cmd for commentary */ 1764 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1765 if (ret) { 1766 core_tmr_release_req(se_cmd->se_tmr_req); 1767 return ret; 1768 } 1769 /* 1770 * If this is ABORT_TASK with no explicit fabric provided LUN, 1771 * go ahead and search active session tags for a match to figure 1772 * out unpacked_lun for the original se_cmd. 1773 */ 1774 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { 1775 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) 1776 goto failure; 1777 } 1778 1779 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1780 if (ret) 1781 goto failure; 1782 1783 transport_generic_handle_tmr(se_cmd); 1784 return 0; 1785 1786 /* 1787 * For callback during failure handling, push this work off 1788 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1789 */ 1790 failure: 1791 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1792 schedule_work(&se_cmd->work); 1793 return 0; 1794 } 1795 EXPORT_SYMBOL(target_submit_tmr); 1796 1797 /* 1798 * Handle SAM-esque emulation for generic transport request failures. 1799 */ 1800 void transport_generic_request_failure(struct se_cmd *cmd, 1801 sense_reason_t sense_reason) 1802 { 1803 int ret = 0, post_ret; 1804 1805 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1806 sense_reason); 1807 target_show_cmd("-----[ ", cmd); 1808 1809 /* 1810 * For SAM Task Attribute emulation for failed struct se_cmd 1811 */ 1812 transport_complete_task_attr(cmd); 1813 1814 if (cmd->transport_complete_callback) 1815 cmd->transport_complete_callback(cmd, false, &post_ret); 1816 1817 if (cmd->transport_state & CMD_T_ABORTED) { 1818 INIT_WORK(&cmd->work, target_abort_work); 1819 queue_work(target_completion_wq, &cmd->work); 1820 return; 1821 } 1822 1823 switch (sense_reason) { 1824 case TCM_NON_EXISTENT_LUN: 1825 case TCM_UNSUPPORTED_SCSI_OPCODE: 1826 case TCM_INVALID_CDB_FIELD: 1827 case TCM_INVALID_PARAMETER_LIST: 1828 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1829 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1830 case TCM_UNKNOWN_MODE_PAGE: 1831 case TCM_WRITE_PROTECTED: 1832 case TCM_ADDRESS_OUT_OF_RANGE: 1833 case TCM_CHECK_CONDITION_ABORT_CMD: 1834 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1835 case TCM_CHECK_CONDITION_NOT_READY: 1836 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1837 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1838 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1839 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1840 case TCM_TOO_MANY_TARGET_DESCS: 1841 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1842 case TCM_TOO_MANY_SEGMENT_DESCS: 1843 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1844 break; 1845 case TCM_OUT_OF_RESOURCES: 1846 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1847 goto queue_status; 1848 case TCM_LUN_BUSY: 1849 cmd->scsi_status = SAM_STAT_BUSY; 1850 goto queue_status; 1851 case TCM_RESERVATION_CONFLICT: 1852 /* 1853 * No SENSE Data payload for this case, set SCSI Status 1854 * and queue the response to $FABRIC_MOD. 1855 * 1856 * Uses linux/include/scsi/scsi.h SAM status codes defs 1857 */ 1858 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1859 /* 1860 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1861 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1862 * CONFLICT STATUS. 1863 * 1864 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1865 */ 1866 if (cmd->se_sess && 1867 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1868 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1869 cmd->orig_fe_lun, 0x2C, 1870 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1871 } 1872 1873 goto queue_status; 1874 default: 1875 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1876 cmd->t_task_cdb[0], sense_reason); 1877 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1878 break; 1879 } 1880 1881 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1882 if (ret) 1883 goto queue_full; 1884 1885 check_stop: 1886 transport_cmd_check_stop_to_fabric(cmd); 1887 return; 1888 1889 queue_status: 1890 trace_target_cmd_complete(cmd); 1891 ret = cmd->se_tfo->queue_status(cmd); 1892 if (!ret) 1893 goto check_stop; 1894 queue_full: 1895 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1896 } 1897 EXPORT_SYMBOL(transport_generic_request_failure); 1898 1899 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1900 { 1901 sense_reason_t ret; 1902 1903 if (!cmd->execute_cmd) { 1904 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1905 goto err; 1906 } 1907 if (do_checks) { 1908 /* 1909 * Check for an existing UNIT ATTENTION condition after 1910 * target_handle_task_attr() has done SAM task attr 1911 * checking, and possibly have already defered execution 1912 * out to target_restart_delayed_cmds() context. 1913 */ 1914 ret = target_scsi3_ua_check(cmd); 1915 if (ret) 1916 goto err; 1917 1918 ret = target_alua_state_check(cmd); 1919 if (ret) 1920 goto err; 1921 1922 ret = target_check_reservation(cmd); 1923 if (ret) { 1924 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1925 goto err; 1926 } 1927 } 1928 1929 ret = cmd->execute_cmd(cmd); 1930 if (!ret) 1931 return; 1932 err: 1933 spin_lock_irq(&cmd->t_state_lock); 1934 cmd->transport_state &= ~CMD_T_SENT; 1935 spin_unlock_irq(&cmd->t_state_lock); 1936 1937 transport_generic_request_failure(cmd, ret); 1938 } 1939 1940 static int target_write_prot_action(struct se_cmd *cmd) 1941 { 1942 u32 sectors; 1943 /* 1944 * Perform WRITE_INSERT of PI using software emulation when backend 1945 * device has PI enabled, if the transport has not already generated 1946 * PI using hardware WRITE_INSERT offload. 1947 */ 1948 switch (cmd->prot_op) { 1949 case TARGET_PROT_DOUT_INSERT: 1950 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1951 sbc_dif_generate(cmd); 1952 break; 1953 case TARGET_PROT_DOUT_STRIP: 1954 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1955 break; 1956 1957 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1958 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1959 sectors, 0, cmd->t_prot_sg, 0); 1960 if (unlikely(cmd->pi_err)) { 1961 spin_lock_irq(&cmd->t_state_lock); 1962 cmd->transport_state &= ~CMD_T_SENT; 1963 spin_unlock_irq(&cmd->t_state_lock); 1964 transport_generic_request_failure(cmd, cmd->pi_err); 1965 return -1; 1966 } 1967 break; 1968 default: 1969 break; 1970 } 1971 1972 return 0; 1973 } 1974 1975 static bool target_handle_task_attr(struct se_cmd *cmd) 1976 { 1977 struct se_device *dev = cmd->se_dev; 1978 1979 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1980 return false; 1981 1982 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 1983 1984 /* 1985 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1986 * to allow the passed struct se_cmd list of tasks to the front of the list. 1987 */ 1988 switch (cmd->sam_task_attr) { 1989 case TCM_HEAD_TAG: 1990 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1991 cmd->t_task_cdb[0]); 1992 return false; 1993 case TCM_ORDERED_TAG: 1994 atomic_inc_mb(&dev->dev_ordered_sync); 1995 1996 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1997 cmd->t_task_cdb[0]); 1998 1999 /* 2000 * Execute an ORDERED command if no other older commands 2001 * exist that need to be completed first. 2002 */ 2003 if (!atomic_read(&dev->simple_cmds)) 2004 return false; 2005 break; 2006 default: 2007 /* 2008 * For SIMPLE and UNTAGGED Task Attribute commands 2009 */ 2010 atomic_inc_mb(&dev->simple_cmds); 2011 break; 2012 } 2013 2014 if (atomic_read(&dev->dev_ordered_sync) == 0) 2015 return false; 2016 2017 spin_lock(&dev->delayed_cmd_lock); 2018 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2019 spin_unlock(&dev->delayed_cmd_lock); 2020 2021 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2022 cmd->t_task_cdb[0], cmd->sam_task_attr); 2023 return true; 2024 } 2025 2026 void target_execute_cmd(struct se_cmd *cmd) 2027 { 2028 /* 2029 * Determine if frontend context caller is requesting the stopping of 2030 * this command for frontend exceptions. 2031 * 2032 * If the received CDB has already been aborted stop processing it here. 2033 */ 2034 if (target_cmd_interrupted(cmd)) 2035 return; 2036 2037 spin_lock_irq(&cmd->t_state_lock); 2038 cmd->t_state = TRANSPORT_PROCESSING; 2039 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2040 spin_unlock_irq(&cmd->t_state_lock); 2041 2042 if (target_write_prot_action(cmd)) 2043 return; 2044 2045 if (target_handle_task_attr(cmd)) { 2046 spin_lock_irq(&cmd->t_state_lock); 2047 cmd->transport_state &= ~CMD_T_SENT; 2048 spin_unlock_irq(&cmd->t_state_lock); 2049 return; 2050 } 2051 2052 __target_execute_cmd(cmd, true); 2053 } 2054 EXPORT_SYMBOL(target_execute_cmd); 2055 2056 /* 2057 * Process all commands up to the last received ORDERED task attribute which 2058 * requires another blocking boundary 2059 */ 2060 static void target_restart_delayed_cmds(struct se_device *dev) 2061 { 2062 for (;;) { 2063 struct se_cmd *cmd; 2064 2065 spin_lock(&dev->delayed_cmd_lock); 2066 if (list_empty(&dev->delayed_cmd_list)) { 2067 spin_unlock(&dev->delayed_cmd_lock); 2068 break; 2069 } 2070 2071 cmd = list_entry(dev->delayed_cmd_list.next, 2072 struct se_cmd, se_delayed_node); 2073 list_del(&cmd->se_delayed_node); 2074 spin_unlock(&dev->delayed_cmd_lock); 2075 2076 cmd->transport_state |= CMD_T_SENT; 2077 2078 __target_execute_cmd(cmd, true); 2079 2080 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2081 break; 2082 } 2083 } 2084 2085 /* 2086 * Called from I/O completion to determine which dormant/delayed 2087 * and ordered cmds need to have their tasks added to the execution queue. 2088 */ 2089 static void transport_complete_task_attr(struct se_cmd *cmd) 2090 { 2091 struct se_device *dev = cmd->se_dev; 2092 2093 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2094 return; 2095 2096 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2097 goto restart; 2098 2099 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2100 atomic_dec_mb(&dev->simple_cmds); 2101 dev->dev_cur_ordered_id++; 2102 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2103 dev->dev_cur_ordered_id++; 2104 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2105 dev->dev_cur_ordered_id); 2106 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2107 atomic_dec_mb(&dev->dev_ordered_sync); 2108 2109 dev->dev_cur_ordered_id++; 2110 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2111 dev->dev_cur_ordered_id); 2112 } 2113 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2114 2115 restart: 2116 target_restart_delayed_cmds(dev); 2117 } 2118 2119 static void transport_complete_qf(struct se_cmd *cmd) 2120 { 2121 int ret = 0; 2122 2123 transport_complete_task_attr(cmd); 2124 /* 2125 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2126 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2127 * the same callbacks should not be retried. Return CHECK_CONDITION 2128 * if a scsi_status is not already set. 2129 * 2130 * If a fabric driver ->queue_status() has returned non zero, always 2131 * keep retrying no matter what.. 2132 */ 2133 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2134 if (cmd->scsi_status) 2135 goto queue_status; 2136 2137 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2138 goto queue_status; 2139 } 2140 2141 /* 2142 * Check if we need to send a sense buffer from 2143 * the struct se_cmd in question. We do NOT want 2144 * to take this path of the IO has been marked as 2145 * needing to be treated like a "normal read". This 2146 * is the case if it's a tape read, and either the 2147 * FM, EOM, or ILI bits are set, but there is no 2148 * sense data. 2149 */ 2150 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2151 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2152 goto queue_status; 2153 2154 switch (cmd->data_direction) { 2155 case DMA_FROM_DEVICE: 2156 /* queue status if not treating this as a normal read */ 2157 if (cmd->scsi_status && 2158 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2159 goto queue_status; 2160 2161 trace_target_cmd_complete(cmd); 2162 ret = cmd->se_tfo->queue_data_in(cmd); 2163 break; 2164 case DMA_TO_DEVICE: 2165 if (cmd->se_cmd_flags & SCF_BIDI) { 2166 ret = cmd->se_tfo->queue_data_in(cmd); 2167 break; 2168 } 2169 /* fall through */ 2170 case DMA_NONE: 2171 queue_status: 2172 trace_target_cmd_complete(cmd); 2173 ret = cmd->se_tfo->queue_status(cmd); 2174 break; 2175 default: 2176 break; 2177 } 2178 2179 if (ret < 0) { 2180 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2181 return; 2182 } 2183 transport_cmd_check_stop_to_fabric(cmd); 2184 } 2185 2186 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2187 int err, bool write_pending) 2188 { 2189 /* 2190 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2191 * ->queue_data_in() callbacks from new process context. 2192 * 2193 * Otherwise for other errors, transport_complete_qf() will send 2194 * CHECK_CONDITION via ->queue_status() instead of attempting to 2195 * retry associated fabric driver data-transfer callbacks. 2196 */ 2197 if (err == -EAGAIN || err == -ENOMEM) { 2198 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2199 TRANSPORT_COMPLETE_QF_OK; 2200 } else { 2201 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2202 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2203 } 2204 2205 spin_lock_irq(&dev->qf_cmd_lock); 2206 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2207 atomic_inc_mb(&dev->dev_qf_count); 2208 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2209 2210 schedule_work(&cmd->se_dev->qf_work_queue); 2211 } 2212 2213 static bool target_read_prot_action(struct se_cmd *cmd) 2214 { 2215 switch (cmd->prot_op) { 2216 case TARGET_PROT_DIN_STRIP: 2217 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2218 u32 sectors = cmd->data_length >> 2219 ilog2(cmd->se_dev->dev_attrib.block_size); 2220 2221 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2222 sectors, 0, cmd->t_prot_sg, 2223 0); 2224 if (cmd->pi_err) 2225 return true; 2226 } 2227 break; 2228 case TARGET_PROT_DIN_INSERT: 2229 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2230 break; 2231 2232 sbc_dif_generate(cmd); 2233 break; 2234 default: 2235 break; 2236 } 2237 2238 return false; 2239 } 2240 2241 static void target_complete_ok_work(struct work_struct *work) 2242 { 2243 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2244 int ret; 2245 2246 /* 2247 * Check if we need to move delayed/dormant tasks from cmds on the 2248 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2249 * Attribute. 2250 */ 2251 transport_complete_task_attr(cmd); 2252 2253 /* 2254 * Check to schedule QUEUE_FULL work, or execute an existing 2255 * cmd->transport_qf_callback() 2256 */ 2257 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2258 schedule_work(&cmd->se_dev->qf_work_queue); 2259 2260 /* 2261 * Check if we need to send a sense buffer from 2262 * the struct se_cmd in question. We do NOT want 2263 * to take this path of the IO has been marked as 2264 * needing to be treated like a "normal read". This 2265 * is the case if it's a tape read, and either the 2266 * FM, EOM, or ILI bits are set, but there is no 2267 * sense data. 2268 */ 2269 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2270 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2271 WARN_ON(!cmd->scsi_status); 2272 ret = transport_send_check_condition_and_sense( 2273 cmd, 0, 1); 2274 if (ret) 2275 goto queue_full; 2276 2277 transport_cmd_check_stop_to_fabric(cmd); 2278 return; 2279 } 2280 /* 2281 * Check for a callback, used by amongst other things 2282 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2283 */ 2284 if (cmd->transport_complete_callback) { 2285 sense_reason_t rc; 2286 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2287 bool zero_dl = !(cmd->data_length); 2288 int post_ret = 0; 2289 2290 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2291 if (!rc && !post_ret) { 2292 if (caw && zero_dl) 2293 goto queue_rsp; 2294 2295 return; 2296 } else if (rc) { 2297 ret = transport_send_check_condition_and_sense(cmd, 2298 rc, 0); 2299 if (ret) 2300 goto queue_full; 2301 2302 transport_cmd_check_stop_to_fabric(cmd); 2303 return; 2304 } 2305 } 2306 2307 queue_rsp: 2308 switch (cmd->data_direction) { 2309 case DMA_FROM_DEVICE: 2310 /* 2311 * if this is a READ-type IO, but SCSI status 2312 * is set, then skip returning data and just 2313 * return the status -- unless this IO is marked 2314 * as needing to be treated as a normal read, 2315 * in which case we want to go ahead and return 2316 * the data. This happens, for example, for tape 2317 * reads with the FM, EOM, or ILI bits set, with 2318 * no sense data. 2319 */ 2320 if (cmd->scsi_status && 2321 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2322 goto queue_status; 2323 2324 atomic_long_add(cmd->data_length, 2325 &cmd->se_lun->lun_stats.tx_data_octets); 2326 /* 2327 * Perform READ_STRIP of PI using software emulation when 2328 * backend had PI enabled, if the transport will not be 2329 * performing hardware READ_STRIP offload. 2330 */ 2331 if (target_read_prot_action(cmd)) { 2332 ret = transport_send_check_condition_and_sense(cmd, 2333 cmd->pi_err, 0); 2334 if (ret) 2335 goto queue_full; 2336 2337 transport_cmd_check_stop_to_fabric(cmd); 2338 return; 2339 } 2340 2341 trace_target_cmd_complete(cmd); 2342 ret = cmd->se_tfo->queue_data_in(cmd); 2343 if (ret) 2344 goto queue_full; 2345 break; 2346 case DMA_TO_DEVICE: 2347 atomic_long_add(cmd->data_length, 2348 &cmd->se_lun->lun_stats.rx_data_octets); 2349 /* 2350 * Check if we need to send READ payload for BIDI-COMMAND 2351 */ 2352 if (cmd->se_cmd_flags & SCF_BIDI) { 2353 atomic_long_add(cmd->data_length, 2354 &cmd->se_lun->lun_stats.tx_data_octets); 2355 ret = cmd->se_tfo->queue_data_in(cmd); 2356 if (ret) 2357 goto queue_full; 2358 break; 2359 } 2360 /* fall through */ 2361 case DMA_NONE: 2362 queue_status: 2363 trace_target_cmd_complete(cmd); 2364 ret = cmd->se_tfo->queue_status(cmd); 2365 if (ret) 2366 goto queue_full; 2367 break; 2368 default: 2369 break; 2370 } 2371 2372 transport_cmd_check_stop_to_fabric(cmd); 2373 return; 2374 2375 queue_full: 2376 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2377 " data_direction: %d\n", cmd, cmd->data_direction); 2378 2379 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2380 } 2381 2382 void target_free_sgl(struct scatterlist *sgl, int nents) 2383 { 2384 sgl_free_n_order(sgl, nents, 0); 2385 } 2386 EXPORT_SYMBOL(target_free_sgl); 2387 2388 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2389 { 2390 /* 2391 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2392 * emulation, and free + reset pointers if necessary.. 2393 */ 2394 if (!cmd->t_data_sg_orig) 2395 return; 2396 2397 kfree(cmd->t_data_sg); 2398 cmd->t_data_sg = cmd->t_data_sg_orig; 2399 cmd->t_data_sg_orig = NULL; 2400 cmd->t_data_nents = cmd->t_data_nents_orig; 2401 cmd->t_data_nents_orig = 0; 2402 } 2403 2404 static inline void transport_free_pages(struct se_cmd *cmd) 2405 { 2406 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2407 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2408 cmd->t_prot_sg = NULL; 2409 cmd->t_prot_nents = 0; 2410 } 2411 2412 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2413 /* 2414 * Release special case READ buffer payload required for 2415 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2416 */ 2417 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2418 target_free_sgl(cmd->t_bidi_data_sg, 2419 cmd->t_bidi_data_nents); 2420 cmd->t_bidi_data_sg = NULL; 2421 cmd->t_bidi_data_nents = 0; 2422 } 2423 transport_reset_sgl_orig(cmd); 2424 return; 2425 } 2426 transport_reset_sgl_orig(cmd); 2427 2428 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2429 cmd->t_data_sg = NULL; 2430 cmd->t_data_nents = 0; 2431 2432 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2433 cmd->t_bidi_data_sg = NULL; 2434 cmd->t_bidi_data_nents = 0; 2435 } 2436 2437 void *transport_kmap_data_sg(struct se_cmd *cmd) 2438 { 2439 struct scatterlist *sg = cmd->t_data_sg; 2440 struct page **pages; 2441 int i; 2442 2443 /* 2444 * We need to take into account a possible offset here for fabrics like 2445 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2446 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2447 */ 2448 if (!cmd->t_data_nents) 2449 return NULL; 2450 2451 BUG_ON(!sg); 2452 if (cmd->t_data_nents == 1) 2453 return kmap(sg_page(sg)) + sg->offset; 2454 2455 /* >1 page. use vmap */ 2456 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2457 if (!pages) 2458 return NULL; 2459 2460 /* convert sg[] to pages[] */ 2461 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2462 pages[i] = sg_page(sg); 2463 } 2464 2465 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2466 kfree(pages); 2467 if (!cmd->t_data_vmap) 2468 return NULL; 2469 2470 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2471 } 2472 EXPORT_SYMBOL(transport_kmap_data_sg); 2473 2474 void transport_kunmap_data_sg(struct se_cmd *cmd) 2475 { 2476 if (!cmd->t_data_nents) { 2477 return; 2478 } else if (cmd->t_data_nents == 1) { 2479 kunmap(sg_page(cmd->t_data_sg)); 2480 return; 2481 } 2482 2483 vunmap(cmd->t_data_vmap); 2484 cmd->t_data_vmap = NULL; 2485 } 2486 EXPORT_SYMBOL(transport_kunmap_data_sg); 2487 2488 int 2489 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2490 bool zero_page, bool chainable) 2491 { 2492 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2493 2494 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2495 return *sgl ? 0 : -ENOMEM; 2496 } 2497 EXPORT_SYMBOL(target_alloc_sgl); 2498 2499 /* 2500 * Allocate any required resources to execute the command. For writes we 2501 * might not have the payload yet, so notify the fabric via a call to 2502 * ->write_pending instead. Otherwise place it on the execution queue. 2503 */ 2504 sense_reason_t 2505 transport_generic_new_cmd(struct se_cmd *cmd) 2506 { 2507 unsigned long flags; 2508 int ret = 0; 2509 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2510 2511 if (cmd->prot_op != TARGET_PROT_NORMAL && 2512 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2513 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2514 cmd->prot_length, true, false); 2515 if (ret < 0) 2516 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2517 } 2518 2519 /* 2520 * Determine if the TCM fabric module has already allocated physical 2521 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2522 * beforehand. 2523 */ 2524 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2525 cmd->data_length) { 2526 2527 if ((cmd->se_cmd_flags & SCF_BIDI) || 2528 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2529 u32 bidi_length; 2530 2531 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2532 bidi_length = cmd->t_task_nolb * 2533 cmd->se_dev->dev_attrib.block_size; 2534 else 2535 bidi_length = cmd->data_length; 2536 2537 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2538 &cmd->t_bidi_data_nents, 2539 bidi_length, zero_flag, false); 2540 if (ret < 0) 2541 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2542 } 2543 2544 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2545 cmd->data_length, zero_flag, false); 2546 if (ret < 0) 2547 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2548 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2549 cmd->data_length) { 2550 /* 2551 * Special case for COMPARE_AND_WRITE with fabrics 2552 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2553 */ 2554 u32 caw_length = cmd->t_task_nolb * 2555 cmd->se_dev->dev_attrib.block_size; 2556 2557 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2558 &cmd->t_bidi_data_nents, 2559 caw_length, zero_flag, false); 2560 if (ret < 0) 2561 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2562 } 2563 /* 2564 * If this command is not a write we can execute it right here, 2565 * for write buffers we need to notify the fabric driver first 2566 * and let it call back once the write buffers are ready. 2567 */ 2568 target_add_to_state_list(cmd); 2569 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2570 target_execute_cmd(cmd); 2571 return 0; 2572 } 2573 2574 spin_lock_irqsave(&cmd->t_state_lock, flags); 2575 cmd->t_state = TRANSPORT_WRITE_PENDING; 2576 /* 2577 * Determine if frontend context caller is requesting the stopping of 2578 * this command for frontend exceptions. 2579 */ 2580 if (cmd->transport_state & CMD_T_STOP && 2581 !cmd->se_tfo->write_pending_must_be_called) { 2582 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2583 __func__, __LINE__, cmd->tag); 2584 2585 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2586 2587 complete_all(&cmd->t_transport_stop_comp); 2588 return 0; 2589 } 2590 cmd->transport_state &= ~CMD_T_ACTIVE; 2591 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2592 2593 ret = cmd->se_tfo->write_pending(cmd); 2594 if (ret) 2595 goto queue_full; 2596 2597 return 0; 2598 2599 queue_full: 2600 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2601 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2602 return 0; 2603 } 2604 EXPORT_SYMBOL(transport_generic_new_cmd); 2605 2606 static void transport_write_pending_qf(struct se_cmd *cmd) 2607 { 2608 unsigned long flags; 2609 int ret; 2610 bool stop; 2611 2612 spin_lock_irqsave(&cmd->t_state_lock, flags); 2613 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2614 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2615 2616 if (stop) { 2617 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2618 __func__, __LINE__, cmd->tag); 2619 complete_all(&cmd->t_transport_stop_comp); 2620 return; 2621 } 2622 2623 ret = cmd->se_tfo->write_pending(cmd); 2624 if (ret) { 2625 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2626 cmd); 2627 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2628 } 2629 } 2630 2631 static bool 2632 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2633 unsigned long *flags); 2634 2635 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2636 { 2637 unsigned long flags; 2638 2639 spin_lock_irqsave(&cmd->t_state_lock, flags); 2640 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2641 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2642 } 2643 2644 /* 2645 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2646 * finished. 2647 */ 2648 void target_put_cmd_and_wait(struct se_cmd *cmd) 2649 { 2650 DECLARE_COMPLETION_ONSTACK(compl); 2651 2652 WARN_ON_ONCE(cmd->abrt_compl); 2653 cmd->abrt_compl = &compl; 2654 target_put_sess_cmd(cmd); 2655 wait_for_completion(&compl); 2656 } 2657 2658 /* 2659 * This function is called by frontend drivers after processing of a command 2660 * has finished. 2661 * 2662 * The protocol for ensuring that either the regular frontend command 2663 * processing flow or target_handle_abort() code drops one reference is as 2664 * follows: 2665 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2666 * the frontend driver to call this function synchronously or asynchronously. 2667 * That will cause one reference to be dropped. 2668 * - During regular command processing the target core sets CMD_T_COMPLETE 2669 * before invoking one of the .queue_*() functions. 2670 * - The code that aborts commands skips commands and TMFs for which 2671 * CMD_T_COMPLETE has been set. 2672 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2673 * commands that will be aborted. 2674 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2675 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2676 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2677 * be called and will drop a reference. 2678 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2679 * will be called. target_handle_abort() will drop the final reference. 2680 */ 2681 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2682 { 2683 DECLARE_COMPLETION_ONSTACK(compl); 2684 int ret = 0; 2685 bool aborted = false, tas = false; 2686 2687 if (wait_for_tasks) 2688 target_wait_free_cmd(cmd, &aborted, &tas); 2689 2690 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2691 /* 2692 * Handle WRITE failure case where transport_generic_new_cmd() 2693 * has already added se_cmd to state_list, but fabric has 2694 * failed command before I/O submission. 2695 */ 2696 if (cmd->state_active) 2697 target_remove_from_state_list(cmd); 2698 } 2699 if (aborted) 2700 cmd->free_compl = &compl; 2701 ret = target_put_sess_cmd(cmd); 2702 if (aborted) { 2703 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2704 wait_for_completion(&compl); 2705 ret = 1; 2706 } 2707 return ret; 2708 } 2709 EXPORT_SYMBOL(transport_generic_free_cmd); 2710 2711 /** 2712 * target_get_sess_cmd - Add command to active ->sess_cmd_list 2713 * @se_cmd: command descriptor to add 2714 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2715 */ 2716 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2717 { 2718 struct se_session *se_sess = se_cmd->se_sess; 2719 unsigned long flags; 2720 int ret = 0; 2721 2722 /* 2723 * Add a second kref if the fabric caller is expecting to handle 2724 * fabric acknowledgement that requires two target_put_sess_cmd() 2725 * invocations before se_cmd descriptor release. 2726 */ 2727 if (ack_kref) { 2728 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2729 return -EINVAL; 2730 2731 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2732 } 2733 2734 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2735 if (se_sess->sess_tearing_down) { 2736 ret = -ESHUTDOWN; 2737 goto out; 2738 } 2739 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2740 percpu_ref_get(&se_sess->cmd_count); 2741 out: 2742 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2743 2744 if (ret && ack_kref) 2745 target_put_sess_cmd(se_cmd); 2746 2747 return ret; 2748 } 2749 EXPORT_SYMBOL(target_get_sess_cmd); 2750 2751 static void target_free_cmd_mem(struct se_cmd *cmd) 2752 { 2753 transport_free_pages(cmd); 2754 2755 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2756 core_tmr_release_req(cmd->se_tmr_req); 2757 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2758 kfree(cmd->t_task_cdb); 2759 } 2760 2761 static void target_release_cmd_kref(struct kref *kref) 2762 { 2763 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2764 struct se_session *se_sess = se_cmd->se_sess; 2765 struct completion *free_compl = se_cmd->free_compl; 2766 struct completion *abrt_compl = se_cmd->abrt_compl; 2767 unsigned long flags; 2768 2769 if (se_cmd->lun_ref_active) 2770 percpu_ref_put(&se_cmd->se_lun->lun_ref); 2771 2772 if (se_sess) { 2773 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2774 list_del_init(&se_cmd->se_cmd_list); 2775 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2776 } 2777 2778 target_free_cmd_mem(se_cmd); 2779 se_cmd->se_tfo->release_cmd(se_cmd); 2780 if (free_compl) 2781 complete(free_compl); 2782 if (abrt_compl) 2783 complete(abrt_compl); 2784 2785 percpu_ref_put(&se_sess->cmd_count); 2786 } 2787 2788 /** 2789 * target_put_sess_cmd - decrease the command reference count 2790 * @se_cmd: command to drop a reference from 2791 * 2792 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2793 * refcount to drop to zero. Returns zero otherwise. 2794 */ 2795 int target_put_sess_cmd(struct se_cmd *se_cmd) 2796 { 2797 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2798 } 2799 EXPORT_SYMBOL(target_put_sess_cmd); 2800 2801 static const char *data_dir_name(enum dma_data_direction d) 2802 { 2803 switch (d) { 2804 case DMA_BIDIRECTIONAL: return "BIDI"; 2805 case DMA_TO_DEVICE: return "WRITE"; 2806 case DMA_FROM_DEVICE: return "READ"; 2807 case DMA_NONE: return "NONE"; 2808 } 2809 2810 return "(?)"; 2811 } 2812 2813 static const char *cmd_state_name(enum transport_state_table t) 2814 { 2815 switch (t) { 2816 case TRANSPORT_NO_STATE: return "NO_STATE"; 2817 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2818 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2819 case TRANSPORT_PROCESSING: return "PROCESSING"; 2820 case TRANSPORT_COMPLETE: return "COMPLETE"; 2821 case TRANSPORT_ISTATE_PROCESSING: 2822 return "ISTATE_PROCESSING"; 2823 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2824 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2825 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2826 } 2827 2828 return "(?)"; 2829 } 2830 2831 static void target_append_str(char **str, const char *txt) 2832 { 2833 char *prev = *str; 2834 2835 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2836 kstrdup(txt, GFP_ATOMIC); 2837 kfree(prev); 2838 } 2839 2840 /* 2841 * Convert a transport state bitmask into a string. The caller is 2842 * responsible for freeing the returned pointer. 2843 */ 2844 static char *target_ts_to_str(u32 ts) 2845 { 2846 char *str = NULL; 2847 2848 if (ts & CMD_T_ABORTED) 2849 target_append_str(&str, "aborted"); 2850 if (ts & CMD_T_ACTIVE) 2851 target_append_str(&str, "active"); 2852 if (ts & CMD_T_COMPLETE) 2853 target_append_str(&str, "complete"); 2854 if (ts & CMD_T_SENT) 2855 target_append_str(&str, "sent"); 2856 if (ts & CMD_T_STOP) 2857 target_append_str(&str, "stop"); 2858 if (ts & CMD_T_FABRIC_STOP) 2859 target_append_str(&str, "fabric_stop"); 2860 2861 return str; 2862 } 2863 2864 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2865 { 2866 switch (tmf) { 2867 case TMR_ABORT_TASK: return "ABORT_TASK"; 2868 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2869 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2870 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2871 case TMR_LUN_RESET: return "LUN_RESET"; 2872 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2873 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2874 case TMR_UNKNOWN: break; 2875 } 2876 return "(?)"; 2877 } 2878 2879 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2880 { 2881 char *ts_str = target_ts_to_str(cmd->transport_state); 2882 const u8 *cdb = cmd->t_task_cdb; 2883 struct se_tmr_req *tmf = cmd->se_tmr_req; 2884 2885 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2886 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2887 pfx, cdb[0], cdb[1], cmd->tag, 2888 data_dir_name(cmd->data_direction), 2889 cmd->se_tfo->get_cmd_state(cmd), 2890 cmd_state_name(cmd->t_state), cmd->data_length, 2891 kref_read(&cmd->cmd_kref), ts_str); 2892 } else { 2893 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2894 pfx, target_tmf_name(tmf->function), cmd->tag, 2895 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2896 cmd_state_name(cmd->t_state), 2897 kref_read(&cmd->cmd_kref), ts_str); 2898 } 2899 kfree(ts_str); 2900 } 2901 EXPORT_SYMBOL(target_show_cmd); 2902 2903 /** 2904 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. 2905 * @se_sess: session to flag 2906 */ 2907 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2908 { 2909 unsigned long flags; 2910 2911 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2912 se_sess->sess_tearing_down = 1; 2913 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2914 2915 percpu_ref_kill(&se_sess->cmd_count); 2916 } 2917 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2918 2919 /** 2920 * target_wait_for_sess_cmds - Wait for outstanding commands 2921 * @se_sess: session to wait for active I/O 2922 */ 2923 void target_wait_for_sess_cmds(struct se_session *se_sess) 2924 { 2925 struct se_cmd *cmd; 2926 int ret; 2927 2928 WARN_ON_ONCE(!se_sess->sess_tearing_down); 2929 2930 do { 2931 ret = wait_event_timeout(se_sess->cmd_list_wq, 2932 percpu_ref_is_zero(&se_sess->cmd_count), 2933 180 * HZ); 2934 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 2935 target_show_cmd("session shutdown: still waiting for ", 2936 cmd); 2937 } while (ret <= 0); 2938 } 2939 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2940 2941 /* 2942 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 2943 * all references to the LUN have been released. Called during LUN shutdown. 2944 */ 2945 void transport_clear_lun_ref(struct se_lun *lun) 2946 { 2947 percpu_ref_kill(&lun->lun_ref); 2948 wait_for_completion(&lun->lun_shutdown_comp); 2949 } 2950 2951 static bool 2952 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2953 bool *aborted, bool *tas, unsigned long *flags) 2954 __releases(&cmd->t_state_lock) 2955 __acquires(&cmd->t_state_lock) 2956 { 2957 2958 assert_spin_locked(&cmd->t_state_lock); 2959 WARN_ON_ONCE(!irqs_disabled()); 2960 2961 if (fabric_stop) 2962 cmd->transport_state |= CMD_T_FABRIC_STOP; 2963 2964 if (cmd->transport_state & CMD_T_ABORTED) 2965 *aborted = true; 2966 2967 if (cmd->transport_state & CMD_T_TAS) 2968 *tas = true; 2969 2970 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2971 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2972 return false; 2973 2974 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2975 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2976 return false; 2977 2978 if (!(cmd->transport_state & CMD_T_ACTIVE)) 2979 return false; 2980 2981 if (fabric_stop && *aborted) 2982 return false; 2983 2984 cmd->transport_state |= CMD_T_STOP; 2985 2986 target_show_cmd("wait_for_tasks: Stopping ", cmd); 2987 2988 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2989 2990 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 2991 180 * HZ)) 2992 target_show_cmd("wait for tasks: ", cmd); 2993 2994 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2995 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2996 2997 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 2998 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 2999 3000 return true; 3001 } 3002 3003 /** 3004 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3005 * @cmd: command to wait on 3006 */ 3007 bool transport_wait_for_tasks(struct se_cmd *cmd) 3008 { 3009 unsigned long flags; 3010 bool ret, aborted = false, tas = false; 3011 3012 spin_lock_irqsave(&cmd->t_state_lock, flags); 3013 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3014 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3015 3016 return ret; 3017 } 3018 EXPORT_SYMBOL(transport_wait_for_tasks); 3019 3020 struct sense_info { 3021 u8 key; 3022 u8 asc; 3023 u8 ascq; 3024 bool add_sector_info; 3025 }; 3026 3027 static const struct sense_info sense_info_table[] = { 3028 [TCM_NO_SENSE] = { 3029 .key = NOT_READY 3030 }, 3031 [TCM_NON_EXISTENT_LUN] = { 3032 .key = ILLEGAL_REQUEST, 3033 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3034 }, 3035 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3036 .key = ILLEGAL_REQUEST, 3037 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3038 }, 3039 [TCM_SECTOR_COUNT_TOO_MANY] = { 3040 .key = ILLEGAL_REQUEST, 3041 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3042 }, 3043 [TCM_UNKNOWN_MODE_PAGE] = { 3044 .key = ILLEGAL_REQUEST, 3045 .asc = 0x24, /* INVALID FIELD IN CDB */ 3046 }, 3047 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3048 .key = ABORTED_COMMAND, 3049 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3050 .ascq = 0x03, 3051 }, 3052 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3053 .key = ABORTED_COMMAND, 3054 .asc = 0x0c, /* WRITE ERROR */ 3055 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3056 }, 3057 [TCM_INVALID_CDB_FIELD] = { 3058 .key = ILLEGAL_REQUEST, 3059 .asc = 0x24, /* INVALID FIELD IN CDB */ 3060 }, 3061 [TCM_INVALID_PARAMETER_LIST] = { 3062 .key = ILLEGAL_REQUEST, 3063 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3064 }, 3065 [TCM_TOO_MANY_TARGET_DESCS] = { 3066 .key = ILLEGAL_REQUEST, 3067 .asc = 0x26, 3068 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3069 }, 3070 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3071 .key = ILLEGAL_REQUEST, 3072 .asc = 0x26, 3073 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3074 }, 3075 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3076 .key = ILLEGAL_REQUEST, 3077 .asc = 0x26, 3078 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3079 }, 3080 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3081 .key = ILLEGAL_REQUEST, 3082 .asc = 0x26, 3083 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3084 }, 3085 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3086 .key = ILLEGAL_REQUEST, 3087 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3088 }, 3089 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3090 .key = ILLEGAL_REQUEST, 3091 .asc = 0x0c, /* WRITE ERROR */ 3092 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3093 }, 3094 [TCM_SERVICE_CRC_ERROR] = { 3095 .key = ABORTED_COMMAND, 3096 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3097 .ascq = 0x05, /* N/A */ 3098 }, 3099 [TCM_SNACK_REJECTED] = { 3100 .key = ABORTED_COMMAND, 3101 .asc = 0x11, /* READ ERROR */ 3102 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3103 }, 3104 [TCM_WRITE_PROTECTED] = { 3105 .key = DATA_PROTECT, 3106 .asc = 0x27, /* WRITE PROTECTED */ 3107 }, 3108 [TCM_ADDRESS_OUT_OF_RANGE] = { 3109 .key = ILLEGAL_REQUEST, 3110 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3111 }, 3112 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3113 .key = UNIT_ATTENTION, 3114 }, 3115 [TCM_CHECK_CONDITION_NOT_READY] = { 3116 .key = NOT_READY, 3117 }, 3118 [TCM_MISCOMPARE_VERIFY] = { 3119 .key = MISCOMPARE, 3120 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3121 .ascq = 0x00, 3122 }, 3123 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3124 .key = ABORTED_COMMAND, 3125 .asc = 0x10, 3126 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3127 .add_sector_info = true, 3128 }, 3129 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3130 .key = ABORTED_COMMAND, 3131 .asc = 0x10, 3132 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3133 .add_sector_info = true, 3134 }, 3135 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3136 .key = ABORTED_COMMAND, 3137 .asc = 0x10, 3138 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3139 .add_sector_info = true, 3140 }, 3141 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3142 .key = COPY_ABORTED, 3143 .asc = 0x0d, 3144 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3145 3146 }, 3147 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3148 /* 3149 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3150 * Solaris initiators. Returning NOT READY instead means the 3151 * operations will be retried a finite number of times and we 3152 * can survive intermittent errors. 3153 */ 3154 .key = NOT_READY, 3155 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3156 }, 3157 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3158 /* 3159 * From spc4r22 section5.7.7,5.7.8 3160 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3161 * or a REGISTER AND IGNORE EXISTING KEY service action or 3162 * REGISTER AND MOVE service actionis attempted, 3163 * but there are insufficient device server resources to complete the 3164 * operation, then the command shall be terminated with CHECK CONDITION 3165 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3166 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3167 */ 3168 .key = ILLEGAL_REQUEST, 3169 .asc = 0x55, 3170 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3171 }, 3172 }; 3173 3174 /** 3175 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3176 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3177 * be stored. 3178 * @reason: LIO sense reason code. If this argument has the value 3179 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3180 * dequeuing a unit attention fails due to multiple commands being processed 3181 * concurrently, set the command status to BUSY. 3182 * 3183 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3184 */ 3185 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3186 { 3187 const struct sense_info *si; 3188 u8 *buffer = cmd->sense_buffer; 3189 int r = (__force int)reason; 3190 u8 key, asc, ascq; 3191 bool desc_format = target_sense_desc_format(cmd->se_dev); 3192 3193 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 3194 si = &sense_info_table[r]; 3195 else 3196 si = &sense_info_table[(__force int) 3197 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3198 3199 key = si->key; 3200 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3201 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3202 &ascq)) { 3203 cmd->scsi_status = SAM_STAT_BUSY; 3204 return; 3205 } 3206 } else if (si->asc == 0) { 3207 WARN_ON_ONCE(cmd->scsi_asc == 0); 3208 asc = cmd->scsi_asc; 3209 ascq = cmd->scsi_ascq; 3210 } else { 3211 asc = si->asc; 3212 ascq = si->ascq; 3213 } 3214 3215 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3216 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3217 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3218 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3219 if (si->add_sector_info) 3220 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3221 cmd->scsi_sense_length, 3222 cmd->bad_sector) < 0); 3223 } 3224 3225 int 3226 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3227 sense_reason_t reason, int from_transport) 3228 { 3229 unsigned long flags; 3230 3231 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3232 3233 spin_lock_irqsave(&cmd->t_state_lock, flags); 3234 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3235 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3236 return 0; 3237 } 3238 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3239 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3240 3241 if (!from_transport) 3242 translate_sense_reason(cmd, reason); 3243 3244 trace_target_cmd_complete(cmd); 3245 return cmd->se_tfo->queue_status(cmd); 3246 } 3247 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3248 3249 /** 3250 * target_send_busy - Send SCSI BUSY status back to the initiator 3251 * @cmd: SCSI command for which to send a BUSY reply. 3252 * 3253 * Note: Only call this function if target_submit_cmd*() failed. 3254 */ 3255 int target_send_busy(struct se_cmd *cmd) 3256 { 3257 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3258 3259 cmd->scsi_status = SAM_STAT_BUSY; 3260 trace_target_cmd_complete(cmd); 3261 return cmd->se_tfo->queue_status(cmd); 3262 } 3263 EXPORT_SYMBOL(target_send_busy); 3264 3265 static void target_tmr_work(struct work_struct *work) 3266 { 3267 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3268 struct se_device *dev = cmd->se_dev; 3269 struct se_tmr_req *tmr = cmd->se_tmr_req; 3270 int ret; 3271 3272 if (cmd->transport_state & CMD_T_ABORTED) 3273 goto aborted; 3274 3275 switch (tmr->function) { 3276 case TMR_ABORT_TASK: 3277 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3278 break; 3279 case TMR_ABORT_TASK_SET: 3280 case TMR_CLEAR_ACA: 3281 case TMR_CLEAR_TASK_SET: 3282 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3283 break; 3284 case TMR_LUN_RESET: 3285 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3286 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3287 TMR_FUNCTION_REJECTED; 3288 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3289 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3290 cmd->orig_fe_lun, 0x29, 3291 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3292 } 3293 break; 3294 case TMR_TARGET_WARM_RESET: 3295 tmr->response = TMR_FUNCTION_REJECTED; 3296 break; 3297 case TMR_TARGET_COLD_RESET: 3298 tmr->response = TMR_FUNCTION_REJECTED; 3299 break; 3300 default: 3301 pr_err("Unknown TMR function: 0x%02x.\n", 3302 tmr->function); 3303 tmr->response = TMR_FUNCTION_REJECTED; 3304 break; 3305 } 3306 3307 if (cmd->transport_state & CMD_T_ABORTED) 3308 goto aborted; 3309 3310 cmd->se_tfo->queue_tm_rsp(cmd); 3311 3312 transport_cmd_check_stop_to_fabric(cmd); 3313 return; 3314 3315 aborted: 3316 target_handle_abort(cmd); 3317 } 3318 3319 int transport_generic_handle_tmr( 3320 struct se_cmd *cmd) 3321 { 3322 unsigned long flags; 3323 bool aborted = false; 3324 3325 spin_lock_irqsave(&cmd->t_state_lock, flags); 3326 if (cmd->transport_state & CMD_T_ABORTED) { 3327 aborted = true; 3328 } else { 3329 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3330 cmd->transport_state |= CMD_T_ACTIVE; 3331 } 3332 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3333 3334 if (aborted) { 3335 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3336 cmd->se_tmr_req->function, 3337 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3338 target_handle_abort(cmd); 3339 return 0; 3340 } 3341 3342 INIT_WORK(&cmd->work, target_tmr_work); 3343 schedule_work(&cmd->work); 3344 return 0; 3345 } 3346 EXPORT_SYMBOL(transport_generic_handle_tmr); 3347 3348 bool 3349 target_check_wce(struct se_device *dev) 3350 { 3351 bool wce = false; 3352 3353 if (dev->transport->get_write_cache) 3354 wce = dev->transport->get_write_cache(dev); 3355 else if (dev->dev_attrib.emulate_write_cache > 0) 3356 wce = true; 3357 3358 return wce; 3359 } 3360 3361 bool 3362 target_check_fua(struct se_device *dev) 3363 { 3364 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3365 } 3366