1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_transport.c 4 * 5 * This file contains the Generic Target Engine Core. 6 * 7 * (c) Copyright 2002-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/net.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/timer.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/kthread.h> 20 #include <linux/in.h> 21 #include <linux/cdrom.h> 22 #include <linux/module.h> 23 #include <linux/ratelimit.h> 24 #include <linux/vmalloc.h> 25 #include <asm/unaligned.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_common.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/target.h> 42 43 static struct workqueue_struct *target_completion_wq; 44 static struct kmem_cache *se_sess_cache; 45 struct kmem_cache *se_ua_cache; 46 struct kmem_cache *t10_pr_reg_cache; 47 struct kmem_cache *t10_alua_lu_gp_cache; 48 struct kmem_cache *t10_alua_lu_gp_mem_cache; 49 struct kmem_cache *t10_alua_tg_pt_gp_cache; 50 struct kmem_cache *t10_alua_lba_map_cache; 51 struct kmem_cache *t10_alua_lba_map_mem_cache; 52 53 static void transport_complete_task_attr(struct se_cmd *cmd); 54 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 55 static void transport_handle_queue_full(struct se_cmd *cmd, 56 struct se_device *dev, int err, bool write_pending); 57 static void target_complete_ok_work(struct work_struct *work); 58 59 int init_se_kmem_caches(void) 60 { 61 se_sess_cache = kmem_cache_create("se_sess_cache", 62 sizeof(struct se_session), __alignof__(struct se_session), 63 0, NULL); 64 if (!se_sess_cache) { 65 pr_err("kmem_cache_create() for struct se_session" 66 " failed\n"); 67 goto out; 68 } 69 se_ua_cache = kmem_cache_create("se_ua_cache", 70 sizeof(struct se_ua), __alignof__(struct se_ua), 71 0, NULL); 72 if (!se_ua_cache) { 73 pr_err("kmem_cache_create() for struct se_ua failed\n"); 74 goto out_free_sess_cache; 75 } 76 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 77 sizeof(struct t10_pr_registration), 78 __alignof__(struct t10_pr_registration), 0, NULL); 79 if (!t10_pr_reg_cache) { 80 pr_err("kmem_cache_create() for struct t10_pr_registration" 81 " failed\n"); 82 goto out_free_ua_cache; 83 } 84 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 85 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 86 0, NULL); 87 if (!t10_alua_lu_gp_cache) { 88 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 89 " failed\n"); 90 goto out_free_pr_reg_cache; 91 } 92 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 93 sizeof(struct t10_alua_lu_gp_member), 94 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 95 if (!t10_alua_lu_gp_mem_cache) { 96 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 97 "cache failed\n"); 98 goto out_free_lu_gp_cache; 99 } 100 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 101 sizeof(struct t10_alua_tg_pt_gp), 102 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 103 if (!t10_alua_tg_pt_gp_cache) { 104 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 105 "cache failed\n"); 106 goto out_free_lu_gp_mem_cache; 107 } 108 t10_alua_lba_map_cache = kmem_cache_create( 109 "t10_alua_lba_map_cache", 110 sizeof(struct t10_alua_lba_map), 111 __alignof__(struct t10_alua_lba_map), 0, NULL); 112 if (!t10_alua_lba_map_cache) { 113 pr_err("kmem_cache_create() for t10_alua_lba_map_" 114 "cache failed\n"); 115 goto out_free_tg_pt_gp_cache; 116 } 117 t10_alua_lba_map_mem_cache = kmem_cache_create( 118 "t10_alua_lba_map_mem_cache", 119 sizeof(struct t10_alua_lba_map_member), 120 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 121 if (!t10_alua_lba_map_mem_cache) { 122 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 123 "cache failed\n"); 124 goto out_free_lba_map_cache; 125 } 126 127 target_completion_wq = alloc_workqueue("target_completion", 128 WQ_MEM_RECLAIM, 0); 129 if (!target_completion_wq) 130 goto out_free_lba_map_mem_cache; 131 132 return 0; 133 134 out_free_lba_map_mem_cache: 135 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 136 out_free_lba_map_cache: 137 kmem_cache_destroy(t10_alua_lba_map_cache); 138 out_free_tg_pt_gp_cache: 139 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 140 out_free_lu_gp_mem_cache: 141 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 142 out_free_lu_gp_cache: 143 kmem_cache_destroy(t10_alua_lu_gp_cache); 144 out_free_pr_reg_cache: 145 kmem_cache_destroy(t10_pr_reg_cache); 146 out_free_ua_cache: 147 kmem_cache_destroy(se_ua_cache); 148 out_free_sess_cache: 149 kmem_cache_destroy(se_sess_cache); 150 out: 151 return -ENOMEM; 152 } 153 154 void release_se_kmem_caches(void) 155 { 156 destroy_workqueue(target_completion_wq); 157 kmem_cache_destroy(se_sess_cache); 158 kmem_cache_destroy(se_ua_cache); 159 kmem_cache_destroy(t10_pr_reg_cache); 160 kmem_cache_destroy(t10_alua_lu_gp_cache); 161 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 162 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 163 kmem_cache_destroy(t10_alua_lba_map_cache); 164 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 165 } 166 167 /* This code ensures unique mib indexes are handed out. */ 168 static DEFINE_SPINLOCK(scsi_mib_index_lock); 169 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 170 171 /* 172 * Allocate a new row index for the entry type specified 173 */ 174 u32 scsi_get_new_index(scsi_index_t type) 175 { 176 u32 new_index; 177 178 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 179 180 spin_lock(&scsi_mib_index_lock); 181 new_index = ++scsi_mib_index[type]; 182 spin_unlock(&scsi_mib_index_lock); 183 184 return new_index; 185 } 186 187 void transport_subsystem_check_init(void) 188 { 189 int ret; 190 static int sub_api_initialized; 191 192 if (sub_api_initialized) 193 return; 194 195 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 196 if (ret != 0) 197 pr_err("Unable to load target_core_iblock\n"); 198 199 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 200 if (ret != 0) 201 pr_err("Unable to load target_core_file\n"); 202 203 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 204 if (ret != 0) 205 pr_err("Unable to load target_core_pscsi\n"); 206 207 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 208 if (ret != 0) 209 pr_err("Unable to load target_core_user\n"); 210 211 sub_api_initialized = 1; 212 } 213 214 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) 215 { 216 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); 217 218 wake_up(&sess->cmd_list_wq); 219 } 220 221 /** 222 * transport_init_session - initialize a session object 223 * @se_sess: Session object pointer. 224 * 225 * The caller must have zero-initialized @se_sess before calling this function. 226 */ 227 int transport_init_session(struct se_session *se_sess) 228 { 229 INIT_LIST_HEAD(&se_sess->sess_list); 230 INIT_LIST_HEAD(&se_sess->sess_acl_list); 231 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 232 spin_lock_init(&se_sess->sess_cmd_lock); 233 init_waitqueue_head(&se_sess->cmd_list_wq); 234 return percpu_ref_init(&se_sess->cmd_count, 235 target_release_sess_cmd_refcnt, 0, GFP_KERNEL); 236 } 237 EXPORT_SYMBOL(transport_init_session); 238 239 /** 240 * transport_alloc_session - allocate a session object and initialize it 241 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 242 */ 243 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 244 { 245 struct se_session *se_sess; 246 int ret; 247 248 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 249 if (!se_sess) { 250 pr_err("Unable to allocate struct se_session from" 251 " se_sess_cache\n"); 252 return ERR_PTR(-ENOMEM); 253 } 254 ret = transport_init_session(se_sess); 255 if (ret < 0) { 256 kmem_cache_free(se_sess_cache, se_sess); 257 return ERR_PTR(ret); 258 } 259 se_sess->sup_prot_ops = sup_prot_ops; 260 261 return se_sess; 262 } 263 EXPORT_SYMBOL(transport_alloc_session); 264 265 /** 266 * transport_alloc_session_tags - allocate target driver private data 267 * @se_sess: Session pointer. 268 * @tag_num: Maximum number of in-flight commands between initiator and target. 269 * @tag_size: Size in bytes of the private data a target driver associates with 270 * each command. 271 */ 272 int transport_alloc_session_tags(struct se_session *se_sess, 273 unsigned int tag_num, unsigned int tag_size) 274 { 275 int rc; 276 277 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 278 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 279 if (!se_sess->sess_cmd_map) { 280 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 281 return -ENOMEM; 282 } 283 284 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 285 false, GFP_KERNEL, NUMA_NO_NODE); 286 if (rc < 0) { 287 pr_err("Unable to init se_sess->sess_tag_pool," 288 " tag_num: %u\n", tag_num); 289 kvfree(se_sess->sess_cmd_map); 290 se_sess->sess_cmd_map = NULL; 291 return -ENOMEM; 292 } 293 294 return 0; 295 } 296 EXPORT_SYMBOL(transport_alloc_session_tags); 297 298 /** 299 * transport_init_session_tags - allocate a session and target driver private data 300 * @tag_num: Maximum number of in-flight commands between initiator and target. 301 * @tag_size: Size in bytes of the private data a target driver associates with 302 * each command. 303 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 304 */ 305 static struct se_session * 306 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 307 enum target_prot_op sup_prot_ops) 308 { 309 struct se_session *se_sess; 310 int rc; 311 312 if (tag_num != 0 && !tag_size) { 313 pr_err("init_session_tags called with percpu-ida tag_num:" 314 " %u, but zero tag_size\n", tag_num); 315 return ERR_PTR(-EINVAL); 316 } 317 if (!tag_num && tag_size) { 318 pr_err("init_session_tags called with percpu-ida tag_size:" 319 " %u, but zero tag_num\n", tag_size); 320 return ERR_PTR(-EINVAL); 321 } 322 323 se_sess = transport_alloc_session(sup_prot_ops); 324 if (IS_ERR(se_sess)) 325 return se_sess; 326 327 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 328 if (rc < 0) { 329 transport_free_session(se_sess); 330 return ERR_PTR(-ENOMEM); 331 } 332 333 return se_sess; 334 } 335 336 /* 337 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 338 */ 339 void __transport_register_session( 340 struct se_portal_group *se_tpg, 341 struct se_node_acl *se_nacl, 342 struct se_session *se_sess, 343 void *fabric_sess_ptr) 344 { 345 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 346 unsigned char buf[PR_REG_ISID_LEN]; 347 unsigned long flags; 348 349 se_sess->se_tpg = se_tpg; 350 se_sess->fabric_sess_ptr = fabric_sess_ptr; 351 /* 352 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 353 * 354 * Only set for struct se_session's that will actually be moving I/O. 355 * eg: *NOT* discovery sessions. 356 */ 357 if (se_nacl) { 358 /* 359 * 360 * Determine if fabric allows for T10-PI feature bits exposed to 361 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 362 * 363 * If so, then always save prot_type on a per se_node_acl node 364 * basis and re-instate the previous sess_prot_type to avoid 365 * disabling PI from below any previously initiator side 366 * registered LUNs. 367 */ 368 if (se_nacl->saved_prot_type) 369 se_sess->sess_prot_type = se_nacl->saved_prot_type; 370 else if (tfo->tpg_check_prot_fabric_only) 371 se_sess->sess_prot_type = se_nacl->saved_prot_type = 372 tfo->tpg_check_prot_fabric_only(se_tpg); 373 /* 374 * If the fabric module supports an ISID based TransportID, 375 * save this value in binary from the fabric I_T Nexus now. 376 */ 377 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 378 memset(&buf[0], 0, PR_REG_ISID_LEN); 379 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 380 &buf[0], PR_REG_ISID_LEN); 381 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 382 } 383 384 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 385 /* 386 * The se_nacl->nacl_sess pointer will be set to the 387 * last active I_T Nexus for each struct se_node_acl. 388 */ 389 se_nacl->nacl_sess = se_sess; 390 391 list_add_tail(&se_sess->sess_acl_list, 392 &se_nacl->acl_sess_list); 393 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 394 } 395 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 396 397 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 398 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 399 } 400 EXPORT_SYMBOL(__transport_register_session); 401 402 void transport_register_session( 403 struct se_portal_group *se_tpg, 404 struct se_node_acl *se_nacl, 405 struct se_session *se_sess, 406 void *fabric_sess_ptr) 407 { 408 unsigned long flags; 409 410 spin_lock_irqsave(&se_tpg->session_lock, flags); 411 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 412 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 413 } 414 EXPORT_SYMBOL(transport_register_session); 415 416 struct se_session * 417 target_setup_session(struct se_portal_group *tpg, 418 unsigned int tag_num, unsigned int tag_size, 419 enum target_prot_op prot_op, 420 const char *initiatorname, void *private, 421 int (*callback)(struct se_portal_group *, 422 struct se_session *, void *)) 423 { 424 struct se_session *sess; 425 426 /* 427 * If the fabric driver is using percpu-ida based pre allocation 428 * of I/O descriptor tags, go ahead and perform that setup now.. 429 */ 430 if (tag_num != 0) 431 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 432 else 433 sess = transport_alloc_session(prot_op); 434 435 if (IS_ERR(sess)) 436 return sess; 437 438 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 439 (unsigned char *)initiatorname); 440 if (!sess->se_node_acl) { 441 transport_free_session(sess); 442 return ERR_PTR(-EACCES); 443 } 444 /* 445 * Go ahead and perform any remaining fabric setup that is 446 * required before transport_register_session(). 447 */ 448 if (callback != NULL) { 449 int rc = callback(tpg, sess, private); 450 if (rc) { 451 transport_free_session(sess); 452 return ERR_PTR(rc); 453 } 454 } 455 456 transport_register_session(tpg, sess->se_node_acl, sess, private); 457 return sess; 458 } 459 EXPORT_SYMBOL(target_setup_session); 460 461 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 462 { 463 struct se_session *se_sess; 464 ssize_t len = 0; 465 466 spin_lock_bh(&se_tpg->session_lock); 467 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 468 if (!se_sess->se_node_acl) 469 continue; 470 if (!se_sess->se_node_acl->dynamic_node_acl) 471 continue; 472 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 473 break; 474 475 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 476 se_sess->se_node_acl->initiatorname); 477 len += 1; /* Include NULL terminator */ 478 } 479 spin_unlock_bh(&se_tpg->session_lock); 480 481 return len; 482 } 483 EXPORT_SYMBOL(target_show_dynamic_sessions); 484 485 static void target_complete_nacl(struct kref *kref) 486 { 487 struct se_node_acl *nacl = container_of(kref, 488 struct se_node_acl, acl_kref); 489 struct se_portal_group *se_tpg = nacl->se_tpg; 490 491 if (!nacl->dynamic_stop) { 492 complete(&nacl->acl_free_comp); 493 return; 494 } 495 496 mutex_lock(&se_tpg->acl_node_mutex); 497 list_del_init(&nacl->acl_list); 498 mutex_unlock(&se_tpg->acl_node_mutex); 499 500 core_tpg_wait_for_nacl_pr_ref(nacl); 501 core_free_device_list_for_node(nacl, se_tpg); 502 kfree(nacl); 503 } 504 505 void target_put_nacl(struct se_node_acl *nacl) 506 { 507 kref_put(&nacl->acl_kref, target_complete_nacl); 508 } 509 EXPORT_SYMBOL(target_put_nacl); 510 511 void transport_deregister_session_configfs(struct se_session *se_sess) 512 { 513 struct se_node_acl *se_nacl; 514 unsigned long flags; 515 /* 516 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 517 */ 518 se_nacl = se_sess->se_node_acl; 519 if (se_nacl) { 520 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 521 if (!list_empty(&se_sess->sess_acl_list)) 522 list_del_init(&se_sess->sess_acl_list); 523 /* 524 * If the session list is empty, then clear the pointer. 525 * Otherwise, set the struct se_session pointer from the tail 526 * element of the per struct se_node_acl active session list. 527 */ 528 if (list_empty(&se_nacl->acl_sess_list)) 529 se_nacl->nacl_sess = NULL; 530 else { 531 se_nacl->nacl_sess = container_of( 532 se_nacl->acl_sess_list.prev, 533 struct se_session, sess_acl_list); 534 } 535 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 536 } 537 } 538 EXPORT_SYMBOL(transport_deregister_session_configfs); 539 540 void transport_free_session(struct se_session *se_sess) 541 { 542 struct se_node_acl *se_nacl = se_sess->se_node_acl; 543 544 /* 545 * Drop the se_node_acl->nacl_kref obtained from within 546 * core_tpg_get_initiator_node_acl(). 547 */ 548 if (se_nacl) { 549 struct se_portal_group *se_tpg = se_nacl->se_tpg; 550 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 551 unsigned long flags; 552 553 se_sess->se_node_acl = NULL; 554 555 /* 556 * Also determine if we need to drop the extra ->cmd_kref if 557 * it had been previously dynamically generated, and 558 * the endpoint is not caching dynamic ACLs. 559 */ 560 mutex_lock(&se_tpg->acl_node_mutex); 561 if (se_nacl->dynamic_node_acl && 562 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 563 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 564 if (list_empty(&se_nacl->acl_sess_list)) 565 se_nacl->dynamic_stop = true; 566 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 567 568 if (se_nacl->dynamic_stop) 569 list_del_init(&se_nacl->acl_list); 570 } 571 mutex_unlock(&se_tpg->acl_node_mutex); 572 573 if (se_nacl->dynamic_stop) 574 target_put_nacl(se_nacl); 575 576 target_put_nacl(se_nacl); 577 } 578 if (se_sess->sess_cmd_map) { 579 sbitmap_queue_free(&se_sess->sess_tag_pool); 580 kvfree(se_sess->sess_cmd_map); 581 } 582 percpu_ref_exit(&se_sess->cmd_count); 583 kmem_cache_free(se_sess_cache, se_sess); 584 } 585 EXPORT_SYMBOL(transport_free_session); 586 587 void transport_deregister_session(struct se_session *se_sess) 588 { 589 struct se_portal_group *se_tpg = se_sess->se_tpg; 590 unsigned long flags; 591 592 if (!se_tpg) { 593 transport_free_session(se_sess); 594 return; 595 } 596 597 spin_lock_irqsave(&se_tpg->session_lock, flags); 598 list_del(&se_sess->sess_list); 599 se_sess->se_tpg = NULL; 600 se_sess->fabric_sess_ptr = NULL; 601 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 602 603 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 604 se_tpg->se_tpg_tfo->fabric_name); 605 /* 606 * If last kref is dropping now for an explicit NodeACL, awake sleeping 607 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 608 * removal context from within transport_free_session() code. 609 * 610 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 611 * to release all remaining generate_node_acl=1 created ACL resources. 612 */ 613 614 transport_free_session(se_sess); 615 } 616 EXPORT_SYMBOL(transport_deregister_session); 617 618 void target_remove_session(struct se_session *se_sess) 619 { 620 transport_deregister_session_configfs(se_sess); 621 transport_deregister_session(se_sess); 622 } 623 EXPORT_SYMBOL(target_remove_session); 624 625 static void target_remove_from_state_list(struct se_cmd *cmd) 626 { 627 struct se_device *dev = cmd->se_dev; 628 unsigned long flags; 629 630 if (!dev) 631 return; 632 633 spin_lock_irqsave(&dev->execute_task_lock, flags); 634 if (cmd->state_active) { 635 list_del(&cmd->state_list); 636 cmd->state_active = false; 637 } 638 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 639 } 640 641 /* 642 * This function is called by the target core after the target core has 643 * finished processing a SCSI command or SCSI TMF. Both the regular command 644 * processing code and the code for aborting commands can call this 645 * function. CMD_T_STOP is set if and only if another thread is waiting 646 * inside transport_wait_for_tasks() for t_transport_stop_comp. 647 */ 648 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 649 { 650 unsigned long flags; 651 652 target_remove_from_state_list(cmd); 653 654 spin_lock_irqsave(&cmd->t_state_lock, flags); 655 /* 656 * Determine if frontend context caller is requesting the stopping of 657 * this command for frontend exceptions. 658 */ 659 if (cmd->transport_state & CMD_T_STOP) { 660 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 661 __func__, __LINE__, cmd->tag); 662 663 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 664 665 complete_all(&cmd->t_transport_stop_comp); 666 return 1; 667 } 668 cmd->transport_state &= ~CMD_T_ACTIVE; 669 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 670 671 /* 672 * Some fabric modules like tcm_loop can release their internally 673 * allocated I/O reference and struct se_cmd now. 674 * 675 * Fabric modules are expected to return '1' here if the se_cmd being 676 * passed is released at this point, or zero if not being released. 677 */ 678 return cmd->se_tfo->check_stop_free(cmd); 679 } 680 681 static void target_complete_failure_work(struct work_struct *work) 682 { 683 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 684 685 transport_generic_request_failure(cmd, 686 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 687 } 688 689 /* 690 * Used when asking transport to copy Sense Data from the underlying 691 * Linux/SCSI struct scsi_cmnd 692 */ 693 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 694 { 695 struct se_device *dev = cmd->se_dev; 696 697 WARN_ON(!cmd->se_lun); 698 699 if (!dev) 700 return NULL; 701 702 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 703 return NULL; 704 705 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 706 707 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 708 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 709 return cmd->sense_buffer; 710 } 711 712 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 713 { 714 unsigned char *cmd_sense_buf; 715 unsigned long flags; 716 717 spin_lock_irqsave(&cmd->t_state_lock, flags); 718 cmd_sense_buf = transport_get_sense_buffer(cmd); 719 if (!cmd_sense_buf) { 720 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 721 return; 722 } 723 724 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 725 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 726 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 727 } 728 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 729 730 static void target_handle_abort(struct se_cmd *cmd) 731 { 732 bool tas = cmd->transport_state & CMD_T_TAS; 733 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 734 int ret; 735 736 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 737 738 if (tas) { 739 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 740 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 741 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 742 cmd->t_task_cdb[0], cmd->tag); 743 trace_target_cmd_complete(cmd); 744 ret = cmd->se_tfo->queue_status(cmd); 745 if (ret) { 746 transport_handle_queue_full(cmd, cmd->se_dev, 747 ret, false); 748 return; 749 } 750 } else { 751 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 752 cmd->se_tfo->queue_tm_rsp(cmd); 753 } 754 } else { 755 /* 756 * Allow the fabric driver to unmap any resources before 757 * releasing the descriptor via TFO->release_cmd(). 758 */ 759 cmd->se_tfo->aborted_task(cmd); 760 if (ack_kref) 761 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 762 /* 763 * To do: establish a unit attention condition on the I_T 764 * nexus associated with cmd. See also the paragraph "Aborting 765 * commands" in SAM. 766 */ 767 } 768 769 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 770 771 transport_cmd_check_stop_to_fabric(cmd); 772 } 773 774 static void target_abort_work(struct work_struct *work) 775 { 776 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 777 778 target_handle_abort(cmd); 779 } 780 781 static bool target_cmd_interrupted(struct se_cmd *cmd) 782 { 783 int post_ret; 784 785 if (cmd->transport_state & CMD_T_ABORTED) { 786 if (cmd->transport_complete_callback) 787 cmd->transport_complete_callback(cmd, false, &post_ret); 788 INIT_WORK(&cmd->work, target_abort_work); 789 queue_work(target_completion_wq, &cmd->work); 790 return true; 791 } else if (cmd->transport_state & CMD_T_STOP) { 792 if (cmd->transport_complete_callback) 793 cmd->transport_complete_callback(cmd, false, &post_ret); 794 complete_all(&cmd->t_transport_stop_comp); 795 return true; 796 } 797 798 return false; 799 } 800 801 /* May be called from interrupt context so must not sleep. */ 802 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 803 { 804 int success; 805 unsigned long flags; 806 807 if (target_cmd_interrupted(cmd)) 808 return; 809 810 cmd->scsi_status = scsi_status; 811 812 spin_lock_irqsave(&cmd->t_state_lock, flags); 813 switch (cmd->scsi_status) { 814 case SAM_STAT_CHECK_CONDITION: 815 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 816 success = 1; 817 else 818 success = 0; 819 break; 820 default: 821 success = 1; 822 break; 823 } 824 825 cmd->t_state = TRANSPORT_COMPLETE; 826 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 827 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 828 829 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 830 target_complete_failure_work); 831 if (cmd->se_cmd_flags & SCF_USE_CPUID) 832 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 833 else 834 queue_work(target_completion_wq, &cmd->work); 835 } 836 EXPORT_SYMBOL(target_complete_cmd); 837 838 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 839 { 840 if ((scsi_status == SAM_STAT_GOOD || 841 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 842 length < cmd->data_length) { 843 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 844 cmd->residual_count += cmd->data_length - length; 845 } else { 846 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 847 cmd->residual_count = cmd->data_length - length; 848 } 849 850 cmd->data_length = length; 851 } 852 853 target_complete_cmd(cmd, scsi_status); 854 } 855 EXPORT_SYMBOL(target_complete_cmd_with_length); 856 857 static void target_add_to_state_list(struct se_cmd *cmd) 858 { 859 struct se_device *dev = cmd->se_dev; 860 unsigned long flags; 861 862 spin_lock_irqsave(&dev->execute_task_lock, flags); 863 if (!cmd->state_active) { 864 list_add_tail(&cmd->state_list, &dev->state_list); 865 cmd->state_active = true; 866 } 867 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 868 } 869 870 /* 871 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 872 */ 873 static void transport_write_pending_qf(struct se_cmd *cmd); 874 static void transport_complete_qf(struct se_cmd *cmd); 875 876 void target_qf_do_work(struct work_struct *work) 877 { 878 struct se_device *dev = container_of(work, struct se_device, 879 qf_work_queue); 880 LIST_HEAD(qf_cmd_list); 881 struct se_cmd *cmd, *cmd_tmp; 882 883 spin_lock_irq(&dev->qf_cmd_lock); 884 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 885 spin_unlock_irq(&dev->qf_cmd_lock); 886 887 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 888 list_del(&cmd->se_qf_node); 889 atomic_dec_mb(&dev->dev_qf_count); 890 891 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 892 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 893 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 894 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 895 : "UNKNOWN"); 896 897 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 898 transport_write_pending_qf(cmd); 899 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 900 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 901 transport_complete_qf(cmd); 902 } 903 } 904 905 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 906 { 907 switch (cmd->data_direction) { 908 case DMA_NONE: 909 return "NONE"; 910 case DMA_FROM_DEVICE: 911 return "READ"; 912 case DMA_TO_DEVICE: 913 return "WRITE"; 914 case DMA_BIDIRECTIONAL: 915 return "BIDI"; 916 default: 917 break; 918 } 919 920 return "UNKNOWN"; 921 } 922 923 void transport_dump_dev_state( 924 struct se_device *dev, 925 char *b, 926 int *bl) 927 { 928 *bl += sprintf(b + *bl, "Status: "); 929 if (dev->export_count) 930 *bl += sprintf(b + *bl, "ACTIVATED"); 931 else 932 *bl += sprintf(b + *bl, "DEACTIVATED"); 933 934 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 935 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 936 dev->dev_attrib.block_size, 937 dev->dev_attrib.hw_max_sectors); 938 *bl += sprintf(b + *bl, " "); 939 } 940 941 void transport_dump_vpd_proto_id( 942 struct t10_vpd *vpd, 943 unsigned char *p_buf, 944 int p_buf_len) 945 { 946 unsigned char buf[VPD_TMP_BUF_SIZE]; 947 int len; 948 949 memset(buf, 0, VPD_TMP_BUF_SIZE); 950 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 951 952 switch (vpd->protocol_identifier) { 953 case 0x00: 954 sprintf(buf+len, "Fibre Channel\n"); 955 break; 956 case 0x10: 957 sprintf(buf+len, "Parallel SCSI\n"); 958 break; 959 case 0x20: 960 sprintf(buf+len, "SSA\n"); 961 break; 962 case 0x30: 963 sprintf(buf+len, "IEEE 1394\n"); 964 break; 965 case 0x40: 966 sprintf(buf+len, "SCSI Remote Direct Memory Access" 967 " Protocol\n"); 968 break; 969 case 0x50: 970 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 971 break; 972 case 0x60: 973 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 974 break; 975 case 0x70: 976 sprintf(buf+len, "Automation/Drive Interface Transport" 977 " Protocol\n"); 978 break; 979 case 0x80: 980 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 981 break; 982 default: 983 sprintf(buf+len, "Unknown 0x%02x\n", 984 vpd->protocol_identifier); 985 break; 986 } 987 988 if (p_buf) 989 strncpy(p_buf, buf, p_buf_len); 990 else 991 pr_debug("%s", buf); 992 } 993 994 void 995 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 996 { 997 /* 998 * Check if the Protocol Identifier Valid (PIV) bit is set.. 999 * 1000 * from spc3r23.pdf section 7.5.1 1001 */ 1002 if (page_83[1] & 0x80) { 1003 vpd->protocol_identifier = (page_83[0] & 0xf0); 1004 vpd->protocol_identifier_set = 1; 1005 transport_dump_vpd_proto_id(vpd, NULL, 0); 1006 } 1007 } 1008 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1009 1010 int transport_dump_vpd_assoc( 1011 struct t10_vpd *vpd, 1012 unsigned char *p_buf, 1013 int p_buf_len) 1014 { 1015 unsigned char buf[VPD_TMP_BUF_SIZE]; 1016 int ret = 0; 1017 int len; 1018 1019 memset(buf, 0, VPD_TMP_BUF_SIZE); 1020 len = sprintf(buf, "T10 VPD Identifier Association: "); 1021 1022 switch (vpd->association) { 1023 case 0x00: 1024 sprintf(buf+len, "addressed logical unit\n"); 1025 break; 1026 case 0x10: 1027 sprintf(buf+len, "target port\n"); 1028 break; 1029 case 0x20: 1030 sprintf(buf+len, "SCSI target device\n"); 1031 break; 1032 default: 1033 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1034 ret = -EINVAL; 1035 break; 1036 } 1037 1038 if (p_buf) 1039 strncpy(p_buf, buf, p_buf_len); 1040 else 1041 pr_debug("%s", buf); 1042 1043 return ret; 1044 } 1045 1046 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1047 { 1048 /* 1049 * The VPD identification association.. 1050 * 1051 * from spc3r23.pdf Section 7.6.3.1 Table 297 1052 */ 1053 vpd->association = (page_83[1] & 0x30); 1054 return transport_dump_vpd_assoc(vpd, NULL, 0); 1055 } 1056 EXPORT_SYMBOL(transport_set_vpd_assoc); 1057 1058 int transport_dump_vpd_ident_type( 1059 struct t10_vpd *vpd, 1060 unsigned char *p_buf, 1061 int p_buf_len) 1062 { 1063 unsigned char buf[VPD_TMP_BUF_SIZE]; 1064 int ret = 0; 1065 int len; 1066 1067 memset(buf, 0, VPD_TMP_BUF_SIZE); 1068 len = sprintf(buf, "T10 VPD Identifier Type: "); 1069 1070 switch (vpd->device_identifier_type) { 1071 case 0x00: 1072 sprintf(buf+len, "Vendor specific\n"); 1073 break; 1074 case 0x01: 1075 sprintf(buf+len, "T10 Vendor ID based\n"); 1076 break; 1077 case 0x02: 1078 sprintf(buf+len, "EUI-64 based\n"); 1079 break; 1080 case 0x03: 1081 sprintf(buf+len, "NAA\n"); 1082 break; 1083 case 0x04: 1084 sprintf(buf+len, "Relative target port identifier\n"); 1085 break; 1086 case 0x08: 1087 sprintf(buf+len, "SCSI name string\n"); 1088 break; 1089 default: 1090 sprintf(buf+len, "Unsupported: 0x%02x\n", 1091 vpd->device_identifier_type); 1092 ret = -EINVAL; 1093 break; 1094 } 1095 1096 if (p_buf) { 1097 if (p_buf_len < strlen(buf)+1) 1098 return -EINVAL; 1099 strncpy(p_buf, buf, p_buf_len); 1100 } else { 1101 pr_debug("%s", buf); 1102 } 1103 1104 return ret; 1105 } 1106 1107 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1108 { 1109 /* 1110 * The VPD identifier type.. 1111 * 1112 * from spc3r23.pdf Section 7.6.3.1 Table 298 1113 */ 1114 vpd->device_identifier_type = (page_83[1] & 0x0f); 1115 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1116 } 1117 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1118 1119 int transport_dump_vpd_ident( 1120 struct t10_vpd *vpd, 1121 unsigned char *p_buf, 1122 int p_buf_len) 1123 { 1124 unsigned char buf[VPD_TMP_BUF_SIZE]; 1125 int ret = 0; 1126 1127 memset(buf, 0, VPD_TMP_BUF_SIZE); 1128 1129 switch (vpd->device_identifier_code_set) { 1130 case 0x01: /* Binary */ 1131 snprintf(buf, sizeof(buf), 1132 "T10 VPD Binary Device Identifier: %s\n", 1133 &vpd->device_identifier[0]); 1134 break; 1135 case 0x02: /* ASCII */ 1136 snprintf(buf, sizeof(buf), 1137 "T10 VPD ASCII Device Identifier: %s\n", 1138 &vpd->device_identifier[0]); 1139 break; 1140 case 0x03: /* UTF-8 */ 1141 snprintf(buf, sizeof(buf), 1142 "T10 VPD UTF-8 Device Identifier: %s\n", 1143 &vpd->device_identifier[0]); 1144 break; 1145 default: 1146 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1147 " 0x%02x", vpd->device_identifier_code_set); 1148 ret = -EINVAL; 1149 break; 1150 } 1151 1152 if (p_buf) 1153 strncpy(p_buf, buf, p_buf_len); 1154 else 1155 pr_debug("%s", buf); 1156 1157 return ret; 1158 } 1159 1160 int 1161 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1162 { 1163 static const char hex_str[] = "0123456789abcdef"; 1164 int j = 0, i = 4; /* offset to start of the identifier */ 1165 1166 /* 1167 * The VPD Code Set (encoding) 1168 * 1169 * from spc3r23.pdf Section 7.6.3.1 Table 296 1170 */ 1171 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1172 switch (vpd->device_identifier_code_set) { 1173 case 0x01: /* Binary */ 1174 vpd->device_identifier[j++] = 1175 hex_str[vpd->device_identifier_type]; 1176 while (i < (4 + page_83[3])) { 1177 vpd->device_identifier[j++] = 1178 hex_str[(page_83[i] & 0xf0) >> 4]; 1179 vpd->device_identifier[j++] = 1180 hex_str[page_83[i] & 0x0f]; 1181 i++; 1182 } 1183 break; 1184 case 0x02: /* ASCII */ 1185 case 0x03: /* UTF-8 */ 1186 while (i < (4 + page_83[3])) 1187 vpd->device_identifier[j++] = page_83[i++]; 1188 break; 1189 default: 1190 break; 1191 } 1192 1193 return transport_dump_vpd_ident(vpd, NULL, 0); 1194 } 1195 EXPORT_SYMBOL(transport_set_vpd_ident); 1196 1197 static sense_reason_t 1198 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1199 unsigned int size) 1200 { 1201 u32 mtl; 1202 1203 if (!cmd->se_tfo->max_data_sg_nents) 1204 return TCM_NO_SENSE; 1205 /* 1206 * Check if fabric enforced maximum SGL entries per I/O descriptor 1207 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1208 * residual_count and reduce original cmd->data_length to maximum 1209 * length based on single PAGE_SIZE entry scatter-lists. 1210 */ 1211 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1212 if (cmd->data_length > mtl) { 1213 /* 1214 * If an existing CDB overflow is present, calculate new residual 1215 * based on CDB size minus fabric maximum transfer length. 1216 * 1217 * If an existing CDB underflow is present, calculate new residual 1218 * based on original cmd->data_length minus fabric maximum transfer 1219 * length. 1220 * 1221 * Otherwise, set the underflow residual based on cmd->data_length 1222 * minus fabric maximum transfer length. 1223 */ 1224 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1225 cmd->residual_count = (size - mtl); 1226 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1227 u32 orig_dl = size + cmd->residual_count; 1228 cmd->residual_count = (orig_dl - mtl); 1229 } else { 1230 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1231 cmd->residual_count = (cmd->data_length - mtl); 1232 } 1233 cmd->data_length = mtl; 1234 /* 1235 * Reset sbc_check_prot() calculated protection payload 1236 * length based upon the new smaller MTL. 1237 */ 1238 if (cmd->prot_length) { 1239 u32 sectors = (mtl / dev->dev_attrib.block_size); 1240 cmd->prot_length = dev->prot_length * sectors; 1241 } 1242 } 1243 return TCM_NO_SENSE; 1244 } 1245 1246 sense_reason_t 1247 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1248 { 1249 struct se_device *dev = cmd->se_dev; 1250 1251 if (cmd->unknown_data_length) { 1252 cmd->data_length = size; 1253 } else if (size != cmd->data_length) { 1254 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1255 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1256 " 0x%02x\n", cmd->se_tfo->fabric_name, 1257 cmd->data_length, size, cmd->t_task_cdb[0]); 1258 1259 if (cmd->data_direction == DMA_TO_DEVICE) { 1260 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1261 pr_err_ratelimited("Rejecting underflow/overflow" 1262 " for WRITE data CDB\n"); 1263 return TCM_INVALID_CDB_FIELD; 1264 } 1265 /* 1266 * Some fabric drivers like iscsi-target still expect to 1267 * always reject overflow writes. Reject this case until 1268 * full fabric driver level support for overflow writes 1269 * is introduced tree-wide. 1270 */ 1271 if (size > cmd->data_length) { 1272 pr_err_ratelimited("Rejecting overflow for" 1273 " WRITE control CDB\n"); 1274 return TCM_INVALID_CDB_FIELD; 1275 } 1276 } 1277 /* 1278 * Reject READ_* or WRITE_* with overflow/underflow for 1279 * type SCF_SCSI_DATA_CDB. 1280 */ 1281 if (dev->dev_attrib.block_size != 512) { 1282 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1283 " CDB on non 512-byte sector setup subsystem" 1284 " plugin: %s\n", dev->transport->name); 1285 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1286 return TCM_INVALID_CDB_FIELD; 1287 } 1288 /* 1289 * For the overflow case keep the existing fabric provided 1290 * ->data_length. Otherwise for the underflow case, reset 1291 * ->data_length to the smaller SCSI expected data transfer 1292 * length. 1293 */ 1294 if (size > cmd->data_length) { 1295 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1296 cmd->residual_count = (size - cmd->data_length); 1297 } else { 1298 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1299 cmd->residual_count = (cmd->data_length - size); 1300 cmd->data_length = size; 1301 } 1302 } 1303 1304 return target_check_max_data_sg_nents(cmd, dev, size); 1305 1306 } 1307 1308 /* 1309 * Used by fabric modules containing a local struct se_cmd within their 1310 * fabric dependent per I/O descriptor. 1311 * 1312 * Preserves the value of @cmd->tag. 1313 */ 1314 void transport_init_se_cmd( 1315 struct se_cmd *cmd, 1316 const struct target_core_fabric_ops *tfo, 1317 struct se_session *se_sess, 1318 u32 data_length, 1319 int data_direction, 1320 int task_attr, 1321 unsigned char *sense_buffer) 1322 { 1323 INIT_LIST_HEAD(&cmd->se_delayed_node); 1324 INIT_LIST_HEAD(&cmd->se_qf_node); 1325 INIT_LIST_HEAD(&cmd->se_cmd_list); 1326 INIT_LIST_HEAD(&cmd->state_list); 1327 init_completion(&cmd->t_transport_stop_comp); 1328 cmd->free_compl = NULL; 1329 cmd->abrt_compl = NULL; 1330 spin_lock_init(&cmd->t_state_lock); 1331 INIT_WORK(&cmd->work, NULL); 1332 kref_init(&cmd->cmd_kref); 1333 1334 cmd->se_tfo = tfo; 1335 cmd->se_sess = se_sess; 1336 cmd->data_length = data_length; 1337 cmd->data_direction = data_direction; 1338 cmd->sam_task_attr = task_attr; 1339 cmd->sense_buffer = sense_buffer; 1340 1341 cmd->state_active = false; 1342 } 1343 EXPORT_SYMBOL(transport_init_se_cmd); 1344 1345 static sense_reason_t 1346 transport_check_alloc_task_attr(struct se_cmd *cmd) 1347 { 1348 struct se_device *dev = cmd->se_dev; 1349 1350 /* 1351 * Check if SAM Task Attribute emulation is enabled for this 1352 * struct se_device storage object 1353 */ 1354 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1355 return 0; 1356 1357 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1358 pr_debug("SAM Task Attribute ACA" 1359 " emulation is not supported\n"); 1360 return TCM_INVALID_CDB_FIELD; 1361 } 1362 1363 return 0; 1364 } 1365 1366 sense_reason_t 1367 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1368 { 1369 struct se_device *dev = cmd->se_dev; 1370 sense_reason_t ret; 1371 1372 /* 1373 * Ensure that the received CDB is less than the max (252 + 8) bytes 1374 * for VARIABLE_LENGTH_CMD 1375 */ 1376 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1377 pr_err("Received SCSI CDB with command_size: %d that" 1378 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1379 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1380 return TCM_INVALID_CDB_FIELD; 1381 } 1382 /* 1383 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1384 * allocate the additional extended CDB buffer now.. Otherwise 1385 * setup the pointer from __t_task_cdb to t_task_cdb. 1386 */ 1387 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1388 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1389 GFP_KERNEL); 1390 if (!cmd->t_task_cdb) { 1391 pr_err("Unable to allocate cmd->t_task_cdb" 1392 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1393 scsi_command_size(cdb), 1394 (unsigned long)sizeof(cmd->__t_task_cdb)); 1395 return TCM_OUT_OF_RESOURCES; 1396 } 1397 } else 1398 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1399 /* 1400 * Copy the original CDB into cmd-> 1401 */ 1402 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1403 1404 trace_target_sequencer_start(cmd); 1405 1406 ret = dev->transport->parse_cdb(cmd); 1407 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1408 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1409 cmd->se_tfo->fabric_name, 1410 cmd->se_sess->se_node_acl->initiatorname, 1411 cmd->t_task_cdb[0]); 1412 if (ret) 1413 return ret; 1414 1415 ret = transport_check_alloc_task_attr(cmd); 1416 if (ret) 1417 return ret; 1418 1419 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1420 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1421 return 0; 1422 } 1423 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1424 1425 /* 1426 * Used by fabric module frontends to queue tasks directly. 1427 * May only be used from process context. 1428 */ 1429 int transport_handle_cdb_direct( 1430 struct se_cmd *cmd) 1431 { 1432 sense_reason_t ret; 1433 1434 if (!cmd->se_lun) { 1435 dump_stack(); 1436 pr_err("cmd->se_lun is NULL\n"); 1437 return -EINVAL; 1438 } 1439 if (in_interrupt()) { 1440 dump_stack(); 1441 pr_err("transport_generic_handle_cdb cannot be called" 1442 " from interrupt context\n"); 1443 return -EINVAL; 1444 } 1445 /* 1446 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1447 * outstanding descriptors are handled correctly during shutdown via 1448 * transport_wait_for_tasks() 1449 * 1450 * Also, we don't take cmd->t_state_lock here as we only expect 1451 * this to be called for initial descriptor submission. 1452 */ 1453 cmd->t_state = TRANSPORT_NEW_CMD; 1454 cmd->transport_state |= CMD_T_ACTIVE; 1455 1456 /* 1457 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1458 * so follow TRANSPORT_NEW_CMD processing thread context usage 1459 * and call transport_generic_request_failure() if necessary.. 1460 */ 1461 ret = transport_generic_new_cmd(cmd); 1462 if (ret) 1463 transport_generic_request_failure(cmd, ret); 1464 return 0; 1465 } 1466 EXPORT_SYMBOL(transport_handle_cdb_direct); 1467 1468 sense_reason_t 1469 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1470 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1471 { 1472 if (!sgl || !sgl_count) 1473 return 0; 1474 1475 /* 1476 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1477 * scatterlists already have been set to follow what the fabric 1478 * passes for the original expected data transfer length. 1479 */ 1480 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1481 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1482 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1483 return TCM_INVALID_CDB_FIELD; 1484 } 1485 1486 cmd->t_data_sg = sgl; 1487 cmd->t_data_nents = sgl_count; 1488 cmd->t_bidi_data_sg = sgl_bidi; 1489 cmd->t_bidi_data_nents = sgl_bidi_count; 1490 1491 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1492 return 0; 1493 } 1494 1495 /** 1496 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1497 * se_cmd + use pre-allocated SGL memory. 1498 * 1499 * @se_cmd: command descriptor to submit 1500 * @se_sess: associated se_sess for endpoint 1501 * @cdb: pointer to SCSI CDB 1502 * @sense: pointer to SCSI sense buffer 1503 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1504 * @data_length: fabric expected data transfer length 1505 * @task_attr: SAM task attribute 1506 * @data_dir: DMA data direction 1507 * @flags: flags for command submission from target_sc_flags_tables 1508 * @sgl: struct scatterlist memory for unidirectional mapping 1509 * @sgl_count: scatterlist count for unidirectional mapping 1510 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1511 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1512 * @sgl_prot: struct scatterlist memory protection information 1513 * @sgl_prot_count: scatterlist count for protection information 1514 * 1515 * Task tags are supported if the caller has set @se_cmd->tag. 1516 * 1517 * Returns non zero to signal active I/O shutdown failure. All other 1518 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1519 * but still return zero here. 1520 * 1521 * This may only be called from process context, and also currently 1522 * assumes internal allocation of fabric payload buffer by target-core. 1523 */ 1524 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1525 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1526 u32 data_length, int task_attr, int data_dir, int flags, 1527 struct scatterlist *sgl, u32 sgl_count, 1528 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1529 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1530 { 1531 struct se_portal_group *se_tpg; 1532 sense_reason_t rc; 1533 int ret; 1534 1535 se_tpg = se_sess->se_tpg; 1536 BUG_ON(!se_tpg); 1537 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1538 BUG_ON(in_interrupt()); 1539 /* 1540 * Initialize se_cmd for target operation. From this point 1541 * exceptions are handled by sending exception status via 1542 * target_core_fabric_ops->queue_status() callback 1543 */ 1544 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1545 data_length, data_dir, task_attr, sense); 1546 1547 if (flags & TARGET_SCF_USE_CPUID) 1548 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1549 else 1550 se_cmd->cpuid = WORK_CPU_UNBOUND; 1551 1552 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1553 se_cmd->unknown_data_length = 1; 1554 /* 1555 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1556 * se_sess->sess_cmd_list. A second kref_get here is necessary 1557 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1558 * kref_put() to happen during fabric packet acknowledgement. 1559 */ 1560 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1561 if (ret) 1562 return ret; 1563 /* 1564 * Signal bidirectional data payloads to target-core 1565 */ 1566 if (flags & TARGET_SCF_BIDI_OP) 1567 se_cmd->se_cmd_flags |= SCF_BIDI; 1568 /* 1569 * Locate se_lun pointer and attach it to struct se_cmd 1570 */ 1571 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1572 if (rc) { 1573 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1574 target_put_sess_cmd(se_cmd); 1575 return 0; 1576 } 1577 1578 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1579 if (rc != 0) { 1580 transport_generic_request_failure(se_cmd, rc); 1581 return 0; 1582 } 1583 1584 /* 1585 * Save pointers for SGLs containing protection information, 1586 * if present. 1587 */ 1588 if (sgl_prot_count) { 1589 se_cmd->t_prot_sg = sgl_prot; 1590 se_cmd->t_prot_nents = sgl_prot_count; 1591 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1592 } 1593 1594 /* 1595 * When a non zero sgl_count has been passed perform SGL passthrough 1596 * mapping for pre-allocated fabric memory instead of having target 1597 * core perform an internal SGL allocation.. 1598 */ 1599 if (sgl_count != 0) { 1600 BUG_ON(!sgl); 1601 1602 /* 1603 * A work-around for tcm_loop as some userspace code via 1604 * scsi-generic do not memset their associated read buffers, 1605 * so go ahead and do that here for type non-data CDBs. Also 1606 * note that this is currently guaranteed to be a single SGL 1607 * for this case by target core in target_setup_cmd_from_cdb() 1608 * -> transport_generic_cmd_sequencer(). 1609 */ 1610 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1611 se_cmd->data_direction == DMA_FROM_DEVICE) { 1612 unsigned char *buf = NULL; 1613 1614 if (sgl) 1615 buf = kmap(sg_page(sgl)) + sgl->offset; 1616 1617 if (buf) { 1618 memset(buf, 0, sgl->length); 1619 kunmap(sg_page(sgl)); 1620 } 1621 } 1622 1623 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1624 sgl_bidi, sgl_bidi_count); 1625 if (rc != 0) { 1626 transport_generic_request_failure(se_cmd, rc); 1627 return 0; 1628 } 1629 } 1630 1631 /* 1632 * Check if we need to delay processing because of ALUA 1633 * Active/NonOptimized primary access state.. 1634 */ 1635 core_alua_check_nonop_delay(se_cmd); 1636 1637 transport_handle_cdb_direct(se_cmd); 1638 return 0; 1639 } 1640 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1641 1642 /** 1643 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1644 * 1645 * @se_cmd: command descriptor to submit 1646 * @se_sess: associated se_sess for endpoint 1647 * @cdb: pointer to SCSI CDB 1648 * @sense: pointer to SCSI sense buffer 1649 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1650 * @data_length: fabric expected data transfer length 1651 * @task_attr: SAM task attribute 1652 * @data_dir: DMA data direction 1653 * @flags: flags for command submission from target_sc_flags_tables 1654 * 1655 * Task tags are supported if the caller has set @se_cmd->tag. 1656 * 1657 * Returns non zero to signal active I/O shutdown failure. All other 1658 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1659 * but still return zero here. 1660 * 1661 * This may only be called from process context, and also currently 1662 * assumes internal allocation of fabric payload buffer by target-core. 1663 * 1664 * It also assumes interal target core SGL memory allocation. 1665 */ 1666 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1667 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1668 u32 data_length, int task_attr, int data_dir, int flags) 1669 { 1670 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1671 unpacked_lun, data_length, task_attr, data_dir, 1672 flags, NULL, 0, NULL, 0, NULL, 0); 1673 } 1674 EXPORT_SYMBOL(target_submit_cmd); 1675 1676 static void target_complete_tmr_failure(struct work_struct *work) 1677 { 1678 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1679 1680 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1681 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1682 1683 transport_cmd_check_stop_to_fabric(se_cmd); 1684 } 1685 1686 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, 1687 u64 *unpacked_lun) 1688 { 1689 struct se_cmd *se_cmd; 1690 unsigned long flags; 1691 bool ret = false; 1692 1693 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1694 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1695 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1696 continue; 1697 1698 if (se_cmd->tag == tag) { 1699 *unpacked_lun = se_cmd->orig_fe_lun; 1700 ret = true; 1701 break; 1702 } 1703 } 1704 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1705 1706 return ret; 1707 } 1708 1709 /** 1710 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1711 * for TMR CDBs 1712 * 1713 * @se_cmd: command descriptor to submit 1714 * @se_sess: associated se_sess for endpoint 1715 * @sense: pointer to SCSI sense buffer 1716 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1717 * @fabric_tmr_ptr: fabric context for TMR req 1718 * @tm_type: Type of TM request 1719 * @gfp: gfp type for caller 1720 * @tag: referenced task tag for TMR_ABORT_TASK 1721 * @flags: submit cmd flags 1722 * 1723 * Callable from all contexts. 1724 **/ 1725 1726 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1727 unsigned char *sense, u64 unpacked_lun, 1728 void *fabric_tmr_ptr, unsigned char tm_type, 1729 gfp_t gfp, u64 tag, int flags) 1730 { 1731 struct se_portal_group *se_tpg; 1732 int ret; 1733 1734 se_tpg = se_sess->se_tpg; 1735 BUG_ON(!se_tpg); 1736 1737 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1738 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1739 /* 1740 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1741 * allocation failure. 1742 */ 1743 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1744 if (ret < 0) 1745 return -ENOMEM; 1746 1747 if (tm_type == TMR_ABORT_TASK) 1748 se_cmd->se_tmr_req->ref_task_tag = tag; 1749 1750 /* See target_submit_cmd for commentary */ 1751 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1752 if (ret) { 1753 core_tmr_release_req(se_cmd->se_tmr_req); 1754 return ret; 1755 } 1756 /* 1757 * If this is ABORT_TASK with no explicit fabric provided LUN, 1758 * go ahead and search active session tags for a match to figure 1759 * out unpacked_lun for the original se_cmd. 1760 */ 1761 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { 1762 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) 1763 goto failure; 1764 } 1765 1766 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1767 if (ret) 1768 goto failure; 1769 1770 transport_generic_handle_tmr(se_cmd); 1771 return 0; 1772 1773 /* 1774 * For callback during failure handling, push this work off 1775 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1776 */ 1777 failure: 1778 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1779 schedule_work(&se_cmd->work); 1780 return 0; 1781 } 1782 EXPORT_SYMBOL(target_submit_tmr); 1783 1784 /* 1785 * Handle SAM-esque emulation for generic transport request failures. 1786 */ 1787 void transport_generic_request_failure(struct se_cmd *cmd, 1788 sense_reason_t sense_reason) 1789 { 1790 int ret = 0, post_ret; 1791 1792 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1793 sense_reason); 1794 target_show_cmd("-----[ ", cmd); 1795 1796 /* 1797 * For SAM Task Attribute emulation for failed struct se_cmd 1798 */ 1799 transport_complete_task_attr(cmd); 1800 1801 if (cmd->transport_complete_callback) 1802 cmd->transport_complete_callback(cmd, false, &post_ret); 1803 1804 if (cmd->transport_state & CMD_T_ABORTED) { 1805 INIT_WORK(&cmd->work, target_abort_work); 1806 queue_work(target_completion_wq, &cmd->work); 1807 return; 1808 } 1809 1810 switch (sense_reason) { 1811 case TCM_NON_EXISTENT_LUN: 1812 case TCM_UNSUPPORTED_SCSI_OPCODE: 1813 case TCM_INVALID_CDB_FIELD: 1814 case TCM_INVALID_PARAMETER_LIST: 1815 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1816 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1817 case TCM_UNKNOWN_MODE_PAGE: 1818 case TCM_WRITE_PROTECTED: 1819 case TCM_ADDRESS_OUT_OF_RANGE: 1820 case TCM_CHECK_CONDITION_ABORT_CMD: 1821 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1822 case TCM_CHECK_CONDITION_NOT_READY: 1823 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1824 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1825 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1826 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1827 case TCM_TOO_MANY_TARGET_DESCS: 1828 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1829 case TCM_TOO_MANY_SEGMENT_DESCS: 1830 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1831 break; 1832 case TCM_OUT_OF_RESOURCES: 1833 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1834 goto queue_status; 1835 case TCM_LUN_BUSY: 1836 cmd->scsi_status = SAM_STAT_BUSY; 1837 goto queue_status; 1838 case TCM_RESERVATION_CONFLICT: 1839 /* 1840 * No SENSE Data payload for this case, set SCSI Status 1841 * and queue the response to $FABRIC_MOD. 1842 * 1843 * Uses linux/include/scsi/scsi.h SAM status codes defs 1844 */ 1845 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1846 /* 1847 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1848 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1849 * CONFLICT STATUS. 1850 * 1851 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1852 */ 1853 if (cmd->se_sess && 1854 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1855 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1856 cmd->orig_fe_lun, 0x2C, 1857 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1858 } 1859 1860 goto queue_status; 1861 default: 1862 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1863 cmd->t_task_cdb[0], sense_reason); 1864 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1865 break; 1866 } 1867 1868 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1869 if (ret) 1870 goto queue_full; 1871 1872 check_stop: 1873 transport_cmd_check_stop_to_fabric(cmd); 1874 return; 1875 1876 queue_status: 1877 trace_target_cmd_complete(cmd); 1878 ret = cmd->se_tfo->queue_status(cmd); 1879 if (!ret) 1880 goto check_stop; 1881 queue_full: 1882 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1883 } 1884 EXPORT_SYMBOL(transport_generic_request_failure); 1885 1886 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1887 { 1888 sense_reason_t ret; 1889 1890 if (!cmd->execute_cmd) { 1891 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1892 goto err; 1893 } 1894 if (do_checks) { 1895 /* 1896 * Check for an existing UNIT ATTENTION condition after 1897 * target_handle_task_attr() has done SAM task attr 1898 * checking, and possibly have already defered execution 1899 * out to target_restart_delayed_cmds() context. 1900 */ 1901 ret = target_scsi3_ua_check(cmd); 1902 if (ret) 1903 goto err; 1904 1905 ret = target_alua_state_check(cmd); 1906 if (ret) 1907 goto err; 1908 1909 ret = target_check_reservation(cmd); 1910 if (ret) { 1911 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1912 goto err; 1913 } 1914 } 1915 1916 ret = cmd->execute_cmd(cmd); 1917 if (!ret) 1918 return; 1919 err: 1920 spin_lock_irq(&cmd->t_state_lock); 1921 cmd->transport_state &= ~CMD_T_SENT; 1922 spin_unlock_irq(&cmd->t_state_lock); 1923 1924 transport_generic_request_failure(cmd, ret); 1925 } 1926 1927 static int target_write_prot_action(struct se_cmd *cmd) 1928 { 1929 u32 sectors; 1930 /* 1931 * Perform WRITE_INSERT of PI using software emulation when backend 1932 * device has PI enabled, if the transport has not already generated 1933 * PI using hardware WRITE_INSERT offload. 1934 */ 1935 switch (cmd->prot_op) { 1936 case TARGET_PROT_DOUT_INSERT: 1937 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1938 sbc_dif_generate(cmd); 1939 break; 1940 case TARGET_PROT_DOUT_STRIP: 1941 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1942 break; 1943 1944 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1945 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1946 sectors, 0, cmd->t_prot_sg, 0); 1947 if (unlikely(cmd->pi_err)) { 1948 spin_lock_irq(&cmd->t_state_lock); 1949 cmd->transport_state &= ~CMD_T_SENT; 1950 spin_unlock_irq(&cmd->t_state_lock); 1951 transport_generic_request_failure(cmd, cmd->pi_err); 1952 return -1; 1953 } 1954 break; 1955 default: 1956 break; 1957 } 1958 1959 return 0; 1960 } 1961 1962 static bool target_handle_task_attr(struct se_cmd *cmd) 1963 { 1964 struct se_device *dev = cmd->se_dev; 1965 1966 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1967 return false; 1968 1969 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 1970 1971 /* 1972 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1973 * to allow the passed struct se_cmd list of tasks to the front of the list. 1974 */ 1975 switch (cmd->sam_task_attr) { 1976 case TCM_HEAD_TAG: 1977 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1978 cmd->t_task_cdb[0]); 1979 return false; 1980 case TCM_ORDERED_TAG: 1981 atomic_inc_mb(&dev->dev_ordered_sync); 1982 1983 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1984 cmd->t_task_cdb[0]); 1985 1986 /* 1987 * Execute an ORDERED command if no other older commands 1988 * exist that need to be completed first. 1989 */ 1990 if (!atomic_read(&dev->simple_cmds)) 1991 return false; 1992 break; 1993 default: 1994 /* 1995 * For SIMPLE and UNTAGGED Task Attribute commands 1996 */ 1997 atomic_inc_mb(&dev->simple_cmds); 1998 break; 1999 } 2000 2001 if (atomic_read(&dev->dev_ordered_sync) == 0) 2002 return false; 2003 2004 spin_lock(&dev->delayed_cmd_lock); 2005 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2006 spin_unlock(&dev->delayed_cmd_lock); 2007 2008 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2009 cmd->t_task_cdb[0], cmd->sam_task_attr); 2010 return true; 2011 } 2012 2013 void target_execute_cmd(struct se_cmd *cmd) 2014 { 2015 /* 2016 * Determine if frontend context caller is requesting the stopping of 2017 * this command for frontend exceptions. 2018 * 2019 * If the received CDB has already been aborted stop processing it here. 2020 */ 2021 if (target_cmd_interrupted(cmd)) 2022 return; 2023 2024 spin_lock_irq(&cmd->t_state_lock); 2025 cmd->t_state = TRANSPORT_PROCESSING; 2026 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2027 spin_unlock_irq(&cmd->t_state_lock); 2028 2029 if (target_write_prot_action(cmd)) 2030 return; 2031 2032 if (target_handle_task_attr(cmd)) { 2033 spin_lock_irq(&cmd->t_state_lock); 2034 cmd->transport_state &= ~CMD_T_SENT; 2035 spin_unlock_irq(&cmd->t_state_lock); 2036 return; 2037 } 2038 2039 __target_execute_cmd(cmd, true); 2040 } 2041 EXPORT_SYMBOL(target_execute_cmd); 2042 2043 /* 2044 * Process all commands up to the last received ORDERED task attribute which 2045 * requires another blocking boundary 2046 */ 2047 static void target_restart_delayed_cmds(struct se_device *dev) 2048 { 2049 for (;;) { 2050 struct se_cmd *cmd; 2051 2052 spin_lock(&dev->delayed_cmd_lock); 2053 if (list_empty(&dev->delayed_cmd_list)) { 2054 spin_unlock(&dev->delayed_cmd_lock); 2055 break; 2056 } 2057 2058 cmd = list_entry(dev->delayed_cmd_list.next, 2059 struct se_cmd, se_delayed_node); 2060 list_del(&cmd->se_delayed_node); 2061 spin_unlock(&dev->delayed_cmd_lock); 2062 2063 cmd->transport_state |= CMD_T_SENT; 2064 2065 __target_execute_cmd(cmd, true); 2066 2067 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2068 break; 2069 } 2070 } 2071 2072 /* 2073 * Called from I/O completion to determine which dormant/delayed 2074 * and ordered cmds need to have their tasks added to the execution queue. 2075 */ 2076 static void transport_complete_task_attr(struct se_cmd *cmd) 2077 { 2078 struct se_device *dev = cmd->se_dev; 2079 2080 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2081 return; 2082 2083 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2084 goto restart; 2085 2086 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2087 atomic_dec_mb(&dev->simple_cmds); 2088 dev->dev_cur_ordered_id++; 2089 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2090 dev->dev_cur_ordered_id++; 2091 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2092 dev->dev_cur_ordered_id); 2093 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2094 atomic_dec_mb(&dev->dev_ordered_sync); 2095 2096 dev->dev_cur_ordered_id++; 2097 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2098 dev->dev_cur_ordered_id); 2099 } 2100 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2101 2102 restart: 2103 target_restart_delayed_cmds(dev); 2104 } 2105 2106 static void transport_complete_qf(struct se_cmd *cmd) 2107 { 2108 int ret = 0; 2109 2110 transport_complete_task_attr(cmd); 2111 /* 2112 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2113 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2114 * the same callbacks should not be retried. Return CHECK_CONDITION 2115 * if a scsi_status is not already set. 2116 * 2117 * If a fabric driver ->queue_status() has returned non zero, always 2118 * keep retrying no matter what.. 2119 */ 2120 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2121 if (cmd->scsi_status) 2122 goto queue_status; 2123 2124 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2125 goto queue_status; 2126 } 2127 2128 /* 2129 * Check if we need to send a sense buffer from 2130 * the struct se_cmd in question. We do NOT want 2131 * to take this path of the IO has been marked as 2132 * needing to be treated like a "normal read". This 2133 * is the case if it's a tape read, and either the 2134 * FM, EOM, or ILI bits are set, but there is no 2135 * sense data. 2136 */ 2137 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2138 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2139 goto queue_status; 2140 2141 switch (cmd->data_direction) { 2142 case DMA_FROM_DEVICE: 2143 /* queue status if not treating this as a normal read */ 2144 if (cmd->scsi_status && 2145 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2146 goto queue_status; 2147 2148 trace_target_cmd_complete(cmd); 2149 ret = cmd->se_tfo->queue_data_in(cmd); 2150 break; 2151 case DMA_TO_DEVICE: 2152 if (cmd->se_cmd_flags & SCF_BIDI) { 2153 ret = cmd->se_tfo->queue_data_in(cmd); 2154 break; 2155 } 2156 /* fall through */ 2157 case DMA_NONE: 2158 queue_status: 2159 trace_target_cmd_complete(cmd); 2160 ret = cmd->se_tfo->queue_status(cmd); 2161 break; 2162 default: 2163 break; 2164 } 2165 2166 if (ret < 0) { 2167 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2168 return; 2169 } 2170 transport_cmd_check_stop_to_fabric(cmd); 2171 } 2172 2173 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2174 int err, bool write_pending) 2175 { 2176 /* 2177 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2178 * ->queue_data_in() callbacks from new process context. 2179 * 2180 * Otherwise for other errors, transport_complete_qf() will send 2181 * CHECK_CONDITION via ->queue_status() instead of attempting to 2182 * retry associated fabric driver data-transfer callbacks. 2183 */ 2184 if (err == -EAGAIN || err == -ENOMEM) { 2185 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2186 TRANSPORT_COMPLETE_QF_OK; 2187 } else { 2188 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2189 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2190 } 2191 2192 spin_lock_irq(&dev->qf_cmd_lock); 2193 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2194 atomic_inc_mb(&dev->dev_qf_count); 2195 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2196 2197 schedule_work(&cmd->se_dev->qf_work_queue); 2198 } 2199 2200 static bool target_read_prot_action(struct se_cmd *cmd) 2201 { 2202 switch (cmd->prot_op) { 2203 case TARGET_PROT_DIN_STRIP: 2204 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2205 u32 sectors = cmd->data_length >> 2206 ilog2(cmd->se_dev->dev_attrib.block_size); 2207 2208 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2209 sectors, 0, cmd->t_prot_sg, 2210 0); 2211 if (cmd->pi_err) 2212 return true; 2213 } 2214 break; 2215 case TARGET_PROT_DIN_INSERT: 2216 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2217 break; 2218 2219 sbc_dif_generate(cmd); 2220 break; 2221 default: 2222 break; 2223 } 2224 2225 return false; 2226 } 2227 2228 static void target_complete_ok_work(struct work_struct *work) 2229 { 2230 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2231 int ret; 2232 2233 /* 2234 * Check if we need to move delayed/dormant tasks from cmds on the 2235 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2236 * Attribute. 2237 */ 2238 transport_complete_task_attr(cmd); 2239 2240 /* 2241 * Check to schedule QUEUE_FULL work, or execute an existing 2242 * cmd->transport_qf_callback() 2243 */ 2244 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2245 schedule_work(&cmd->se_dev->qf_work_queue); 2246 2247 /* 2248 * Check if we need to send a sense buffer from 2249 * the struct se_cmd in question. We do NOT want 2250 * to take this path of the IO has been marked as 2251 * needing to be treated like a "normal read". This 2252 * is the case if it's a tape read, and either the 2253 * FM, EOM, or ILI bits are set, but there is no 2254 * sense data. 2255 */ 2256 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2257 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2258 WARN_ON(!cmd->scsi_status); 2259 ret = transport_send_check_condition_and_sense( 2260 cmd, 0, 1); 2261 if (ret) 2262 goto queue_full; 2263 2264 transport_cmd_check_stop_to_fabric(cmd); 2265 return; 2266 } 2267 /* 2268 * Check for a callback, used by amongst other things 2269 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2270 */ 2271 if (cmd->transport_complete_callback) { 2272 sense_reason_t rc; 2273 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2274 bool zero_dl = !(cmd->data_length); 2275 int post_ret = 0; 2276 2277 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2278 if (!rc && !post_ret) { 2279 if (caw && zero_dl) 2280 goto queue_rsp; 2281 2282 return; 2283 } else if (rc) { 2284 ret = transport_send_check_condition_and_sense(cmd, 2285 rc, 0); 2286 if (ret) 2287 goto queue_full; 2288 2289 transport_cmd_check_stop_to_fabric(cmd); 2290 return; 2291 } 2292 } 2293 2294 queue_rsp: 2295 switch (cmd->data_direction) { 2296 case DMA_FROM_DEVICE: 2297 /* 2298 * if this is a READ-type IO, but SCSI status 2299 * is set, then skip returning data and just 2300 * return the status -- unless this IO is marked 2301 * as needing to be treated as a normal read, 2302 * in which case we want to go ahead and return 2303 * the data. This happens, for example, for tape 2304 * reads with the FM, EOM, or ILI bits set, with 2305 * no sense data. 2306 */ 2307 if (cmd->scsi_status && 2308 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2309 goto queue_status; 2310 2311 atomic_long_add(cmd->data_length, 2312 &cmd->se_lun->lun_stats.tx_data_octets); 2313 /* 2314 * Perform READ_STRIP of PI using software emulation when 2315 * backend had PI enabled, if the transport will not be 2316 * performing hardware READ_STRIP offload. 2317 */ 2318 if (target_read_prot_action(cmd)) { 2319 ret = transport_send_check_condition_and_sense(cmd, 2320 cmd->pi_err, 0); 2321 if (ret) 2322 goto queue_full; 2323 2324 transport_cmd_check_stop_to_fabric(cmd); 2325 return; 2326 } 2327 2328 trace_target_cmd_complete(cmd); 2329 ret = cmd->se_tfo->queue_data_in(cmd); 2330 if (ret) 2331 goto queue_full; 2332 break; 2333 case DMA_TO_DEVICE: 2334 atomic_long_add(cmd->data_length, 2335 &cmd->se_lun->lun_stats.rx_data_octets); 2336 /* 2337 * Check if we need to send READ payload for BIDI-COMMAND 2338 */ 2339 if (cmd->se_cmd_flags & SCF_BIDI) { 2340 atomic_long_add(cmd->data_length, 2341 &cmd->se_lun->lun_stats.tx_data_octets); 2342 ret = cmd->se_tfo->queue_data_in(cmd); 2343 if (ret) 2344 goto queue_full; 2345 break; 2346 } 2347 /* fall through */ 2348 case DMA_NONE: 2349 queue_status: 2350 trace_target_cmd_complete(cmd); 2351 ret = cmd->se_tfo->queue_status(cmd); 2352 if (ret) 2353 goto queue_full; 2354 break; 2355 default: 2356 break; 2357 } 2358 2359 transport_cmd_check_stop_to_fabric(cmd); 2360 return; 2361 2362 queue_full: 2363 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2364 " data_direction: %d\n", cmd, cmd->data_direction); 2365 2366 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2367 } 2368 2369 void target_free_sgl(struct scatterlist *sgl, int nents) 2370 { 2371 sgl_free_n_order(sgl, nents, 0); 2372 } 2373 EXPORT_SYMBOL(target_free_sgl); 2374 2375 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2376 { 2377 /* 2378 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2379 * emulation, and free + reset pointers if necessary.. 2380 */ 2381 if (!cmd->t_data_sg_orig) 2382 return; 2383 2384 kfree(cmd->t_data_sg); 2385 cmd->t_data_sg = cmd->t_data_sg_orig; 2386 cmd->t_data_sg_orig = NULL; 2387 cmd->t_data_nents = cmd->t_data_nents_orig; 2388 cmd->t_data_nents_orig = 0; 2389 } 2390 2391 static inline void transport_free_pages(struct se_cmd *cmd) 2392 { 2393 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2394 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2395 cmd->t_prot_sg = NULL; 2396 cmd->t_prot_nents = 0; 2397 } 2398 2399 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2400 /* 2401 * Release special case READ buffer payload required for 2402 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2403 */ 2404 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2405 target_free_sgl(cmd->t_bidi_data_sg, 2406 cmd->t_bidi_data_nents); 2407 cmd->t_bidi_data_sg = NULL; 2408 cmd->t_bidi_data_nents = 0; 2409 } 2410 transport_reset_sgl_orig(cmd); 2411 return; 2412 } 2413 transport_reset_sgl_orig(cmd); 2414 2415 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2416 cmd->t_data_sg = NULL; 2417 cmd->t_data_nents = 0; 2418 2419 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2420 cmd->t_bidi_data_sg = NULL; 2421 cmd->t_bidi_data_nents = 0; 2422 } 2423 2424 void *transport_kmap_data_sg(struct se_cmd *cmd) 2425 { 2426 struct scatterlist *sg = cmd->t_data_sg; 2427 struct page **pages; 2428 int i; 2429 2430 /* 2431 * We need to take into account a possible offset here for fabrics like 2432 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2433 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2434 */ 2435 if (!cmd->t_data_nents) 2436 return NULL; 2437 2438 BUG_ON(!sg); 2439 if (cmd->t_data_nents == 1) 2440 return kmap(sg_page(sg)) + sg->offset; 2441 2442 /* >1 page. use vmap */ 2443 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2444 if (!pages) 2445 return NULL; 2446 2447 /* convert sg[] to pages[] */ 2448 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2449 pages[i] = sg_page(sg); 2450 } 2451 2452 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2453 kfree(pages); 2454 if (!cmd->t_data_vmap) 2455 return NULL; 2456 2457 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2458 } 2459 EXPORT_SYMBOL(transport_kmap_data_sg); 2460 2461 void transport_kunmap_data_sg(struct se_cmd *cmd) 2462 { 2463 if (!cmd->t_data_nents) { 2464 return; 2465 } else if (cmd->t_data_nents == 1) { 2466 kunmap(sg_page(cmd->t_data_sg)); 2467 return; 2468 } 2469 2470 vunmap(cmd->t_data_vmap); 2471 cmd->t_data_vmap = NULL; 2472 } 2473 EXPORT_SYMBOL(transport_kunmap_data_sg); 2474 2475 int 2476 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2477 bool zero_page, bool chainable) 2478 { 2479 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2480 2481 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2482 return *sgl ? 0 : -ENOMEM; 2483 } 2484 EXPORT_SYMBOL(target_alloc_sgl); 2485 2486 /* 2487 * Allocate any required resources to execute the command. For writes we 2488 * might not have the payload yet, so notify the fabric via a call to 2489 * ->write_pending instead. Otherwise place it on the execution queue. 2490 */ 2491 sense_reason_t 2492 transport_generic_new_cmd(struct se_cmd *cmd) 2493 { 2494 unsigned long flags; 2495 int ret = 0; 2496 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2497 2498 if (cmd->prot_op != TARGET_PROT_NORMAL && 2499 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2500 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2501 cmd->prot_length, true, false); 2502 if (ret < 0) 2503 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2504 } 2505 2506 /* 2507 * Determine if the TCM fabric module has already allocated physical 2508 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2509 * beforehand. 2510 */ 2511 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2512 cmd->data_length) { 2513 2514 if ((cmd->se_cmd_flags & SCF_BIDI) || 2515 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2516 u32 bidi_length; 2517 2518 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2519 bidi_length = cmd->t_task_nolb * 2520 cmd->se_dev->dev_attrib.block_size; 2521 else 2522 bidi_length = cmd->data_length; 2523 2524 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2525 &cmd->t_bidi_data_nents, 2526 bidi_length, zero_flag, false); 2527 if (ret < 0) 2528 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2529 } 2530 2531 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2532 cmd->data_length, zero_flag, false); 2533 if (ret < 0) 2534 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2535 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2536 cmd->data_length) { 2537 /* 2538 * Special case for COMPARE_AND_WRITE with fabrics 2539 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2540 */ 2541 u32 caw_length = cmd->t_task_nolb * 2542 cmd->se_dev->dev_attrib.block_size; 2543 2544 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2545 &cmd->t_bidi_data_nents, 2546 caw_length, zero_flag, false); 2547 if (ret < 0) 2548 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2549 } 2550 /* 2551 * If this command is not a write we can execute it right here, 2552 * for write buffers we need to notify the fabric driver first 2553 * and let it call back once the write buffers are ready. 2554 */ 2555 target_add_to_state_list(cmd); 2556 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2557 target_execute_cmd(cmd); 2558 return 0; 2559 } 2560 2561 spin_lock_irqsave(&cmd->t_state_lock, flags); 2562 cmd->t_state = TRANSPORT_WRITE_PENDING; 2563 /* 2564 * Determine if frontend context caller is requesting the stopping of 2565 * this command for frontend exceptions. 2566 */ 2567 if (cmd->transport_state & CMD_T_STOP && 2568 !cmd->se_tfo->write_pending_must_be_called) { 2569 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2570 __func__, __LINE__, cmd->tag); 2571 2572 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2573 2574 complete_all(&cmd->t_transport_stop_comp); 2575 return 0; 2576 } 2577 cmd->transport_state &= ~CMD_T_ACTIVE; 2578 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2579 2580 ret = cmd->se_tfo->write_pending(cmd); 2581 if (ret) 2582 goto queue_full; 2583 2584 return 0; 2585 2586 queue_full: 2587 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2588 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2589 return 0; 2590 } 2591 EXPORT_SYMBOL(transport_generic_new_cmd); 2592 2593 static void transport_write_pending_qf(struct se_cmd *cmd) 2594 { 2595 unsigned long flags; 2596 int ret; 2597 bool stop; 2598 2599 spin_lock_irqsave(&cmd->t_state_lock, flags); 2600 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2601 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2602 2603 if (stop) { 2604 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2605 __func__, __LINE__, cmd->tag); 2606 complete_all(&cmd->t_transport_stop_comp); 2607 return; 2608 } 2609 2610 ret = cmd->se_tfo->write_pending(cmd); 2611 if (ret) { 2612 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2613 cmd); 2614 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2615 } 2616 } 2617 2618 static bool 2619 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2620 unsigned long *flags); 2621 2622 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2623 { 2624 unsigned long flags; 2625 2626 spin_lock_irqsave(&cmd->t_state_lock, flags); 2627 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2628 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2629 } 2630 2631 /* 2632 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2633 * finished. 2634 */ 2635 void target_put_cmd_and_wait(struct se_cmd *cmd) 2636 { 2637 DECLARE_COMPLETION_ONSTACK(compl); 2638 2639 WARN_ON_ONCE(cmd->abrt_compl); 2640 cmd->abrt_compl = &compl; 2641 target_put_sess_cmd(cmd); 2642 wait_for_completion(&compl); 2643 } 2644 2645 /* 2646 * This function is called by frontend drivers after processing of a command 2647 * has finished. 2648 * 2649 * The protocol for ensuring that either the regular frontend command 2650 * processing flow or target_handle_abort() code drops one reference is as 2651 * follows: 2652 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2653 * the frontend driver to call this function synchronously or asynchronously. 2654 * That will cause one reference to be dropped. 2655 * - During regular command processing the target core sets CMD_T_COMPLETE 2656 * before invoking one of the .queue_*() functions. 2657 * - The code that aborts commands skips commands and TMFs for which 2658 * CMD_T_COMPLETE has been set. 2659 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2660 * commands that will be aborted. 2661 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2662 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2663 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2664 * be called and will drop a reference. 2665 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2666 * will be called. target_handle_abort() will drop the final reference. 2667 */ 2668 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2669 { 2670 DECLARE_COMPLETION_ONSTACK(compl); 2671 int ret = 0; 2672 bool aborted = false, tas = false; 2673 2674 if (wait_for_tasks) 2675 target_wait_free_cmd(cmd, &aborted, &tas); 2676 2677 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2678 /* 2679 * Handle WRITE failure case where transport_generic_new_cmd() 2680 * has already added se_cmd to state_list, but fabric has 2681 * failed command before I/O submission. 2682 */ 2683 if (cmd->state_active) 2684 target_remove_from_state_list(cmd); 2685 } 2686 if (aborted) 2687 cmd->free_compl = &compl; 2688 ret = target_put_sess_cmd(cmd); 2689 if (aborted) { 2690 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2691 wait_for_completion(&compl); 2692 ret = 1; 2693 } 2694 return ret; 2695 } 2696 EXPORT_SYMBOL(transport_generic_free_cmd); 2697 2698 /** 2699 * target_get_sess_cmd - Add command to active ->sess_cmd_list 2700 * @se_cmd: command descriptor to add 2701 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2702 */ 2703 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2704 { 2705 struct se_session *se_sess = se_cmd->se_sess; 2706 unsigned long flags; 2707 int ret = 0; 2708 2709 /* 2710 * Add a second kref if the fabric caller is expecting to handle 2711 * fabric acknowledgement that requires two target_put_sess_cmd() 2712 * invocations before se_cmd descriptor release. 2713 */ 2714 if (ack_kref) { 2715 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2716 return -EINVAL; 2717 2718 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2719 } 2720 2721 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2722 if (se_sess->sess_tearing_down) { 2723 ret = -ESHUTDOWN; 2724 goto out; 2725 } 2726 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2727 percpu_ref_get(&se_sess->cmd_count); 2728 out: 2729 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2730 2731 if (ret && ack_kref) 2732 target_put_sess_cmd(se_cmd); 2733 2734 return ret; 2735 } 2736 EXPORT_SYMBOL(target_get_sess_cmd); 2737 2738 static void target_free_cmd_mem(struct se_cmd *cmd) 2739 { 2740 transport_free_pages(cmd); 2741 2742 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2743 core_tmr_release_req(cmd->se_tmr_req); 2744 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2745 kfree(cmd->t_task_cdb); 2746 } 2747 2748 static void target_release_cmd_kref(struct kref *kref) 2749 { 2750 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2751 struct se_session *se_sess = se_cmd->se_sess; 2752 struct completion *free_compl = se_cmd->free_compl; 2753 struct completion *abrt_compl = se_cmd->abrt_compl; 2754 unsigned long flags; 2755 2756 if (se_cmd->lun_ref_active) 2757 percpu_ref_put(&se_cmd->se_lun->lun_ref); 2758 2759 if (se_sess) { 2760 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2761 list_del_init(&se_cmd->se_cmd_list); 2762 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2763 } 2764 2765 target_free_cmd_mem(se_cmd); 2766 se_cmd->se_tfo->release_cmd(se_cmd); 2767 if (free_compl) 2768 complete(free_compl); 2769 if (abrt_compl) 2770 complete(abrt_compl); 2771 2772 percpu_ref_put(&se_sess->cmd_count); 2773 } 2774 2775 /** 2776 * target_put_sess_cmd - decrease the command reference count 2777 * @se_cmd: command to drop a reference from 2778 * 2779 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2780 * refcount to drop to zero. Returns zero otherwise. 2781 */ 2782 int target_put_sess_cmd(struct se_cmd *se_cmd) 2783 { 2784 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2785 } 2786 EXPORT_SYMBOL(target_put_sess_cmd); 2787 2788 static const char *data_dir_name(enum dma_data_direction d) 2789 { 2790 switch (d) { 2791 case DMA_BIDIRECTIONAL: return "BIDI"; 2792 case DMA_TO_DEVICE: return "WRITE"; 2793 case DMA_FROM_DEVICE: return "READ"; 2794 case DMA_NONE: return "NONE"; 2795 } 2796 2797 return "(?)"; 2798 } 2799 2800 static const char *cmd_state_name(enum transport_state_table t) 2801 { 2802 switch (t) { 2803 case TRANSPORT_NO_STATE: return "NO_STATE"; 2804 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2805 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2806 case TRANSPORT_PROCESSING: return "PROCESSING"; 2807 case TRANSPORT_COMPLETE: return "COMPLETE"; 2808 case TRANSPORT_ISTATE_PROCESSING: 2809 return "ISTATE_PROCESSING"; 2810 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2811 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2812 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2813 } 2814 2815 return "(?)"; 2816 } 2817 2818 static void target_append_str(char **str, const char *txt) 2819 { 2820 char *prev = *str; 2821 2822 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2823 kstrdup(txt, GFP_ATOMIC); 2824 kfree(prev); 2825 } 2826 2827 /* 2828 * Convert a transport state bitmask into a string. The caller is 2829 * responsible for freeing the returned pointer. 2830 */ 2831 static char *target_ts_to_str(u32 ts) 2832 { 2833 char *str = NULL; 2834 2835 if (ts & CMD_T_ABORTED) 2836 target_append_str(&str, "aborted"); 2837 if (ts & CMD_T_ACTIVE) 2838 target_append_str(&str, "active"); 2839 if (ts & CMD_T_COMPLETE) 2840 target_append_str(&str, "complete"); 2841 if (ts & CMD_T_SENT) 2842 target_append_str(&str, "sent"); 2843 if (ts & CMD_T_STOP) 2844 target_append_str(&str, "stop"); 2845 if (ts & CMD_T_FABRIC_STOP) 2846 target_append_str(&str, "fabric_stop"); 2847 2848 return str; 2849 } 2850 2851 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2852 { 2853 switch (tmf) { 2854 case TMR_ABORT_TASK: return "ABORT_TASK"; 2855 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2856 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2857 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2858 case TMR_LUN_RESET: return "LUN_RESET"; 2859 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2860 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2861 case TMR_UNKNOWN: break; 2862 } 2863 return "(?)"; 2864 } 2865 2866 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2867 { 2868 char *ts_str = target_ts_to_str(cmd->transport_state); 2869 const u8 *cdb = cmd->t_task_cdb; 2870 struct se_tmr_req *tmf = cmd->se_tmr_req; 2871 2872 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2873 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2874 pfx, cdb[0], cdb[1], cmd->tag, 2875 data_dir_name(cmd->data_direction), 2876 cmd->se_tfo->get_cmd_state(cmd), 2877 cmd_state_name(cmd->t_state), cmd->data_length, 2878 kref_read(&cmd->cmd_kref), ts_str); 2879 } else { 2880 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2881 pfx, target_tmf_name(tmf->function), cmd->tag, 2882 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2883 cmd_state_name(cmd->t_state), 2884 kref_read(&cmd->cmd_kref), ts_str); 2885 } 2886 kfree(ts_str); 2887 } 2888 EXPORT_SYMBOL(target_show_cmd); 2889 2890 /** 2891 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. 2892 * @se_sess: session to flag 2893 */ 2894 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2895 { 2896 unsigned long flags; 2897 2898 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2899 se_sess->sess_tearing_down = 1; 2900 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2901 2902 percpu_ref_kill(&se_sess->cmd_count); 2903 } 2904 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2905 2906 /** 2907 * target_wait_for_sess_cmds - Wait for outstanding commands 2908 * @se_sess: session to wait for active I/O 2909 */ 2910 void target_wait_for_sess_cmds(struct se_session *se_sess) 2911 { 2912 struct se_cmd *cmd; 2913 int ret; 2914 2915 WARN_ON_ONCE(!se_sess->sess_tearing_down); 2916 2917 do { 2918 ret = wait_event_timeout(se_sess->cmd_list_wq, 2919 percpu_ref_is_zero(&se_sess->cmd_count), 2920 180 * HZ); 2921 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 2922 target_show_cmd("session shutdown: still waiting for ", 2923 cmd); 2924 } while (ret <= 0); 2925 } 2926 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2927 2928 /* 2929 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 2930 * all references to the LUN have been released. Called during LUN shutdown. 2931 */ 2932 void transport_clear_lun_ref(struct se_lun *lun) 2933 { 2934 percpu_ref_kill(&lun->lun_ref); 2935 wait_for_completion(&lun->lun_shutdown_comp); 2936 } 2937 2938 static bool 2939 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2940 bool *aborted, bool *tas, unsigned long *flags) 2941 __releases(&cmd->t_state_lock) 2942 __acquires(&cmd->t_state_lock) 2943 { 2944 2945 assert_spin_locked(&cmd->t_state_lock); 2946 WARN_ON_ONCE(!irqs_disabled()); 2947 2948 if (fabric_stop) 2949 cmd->transport_state |= CMD_T_FABRIC_STOP; 2950 2951 if (cmd->transport_state & CMD_T_ABORTED) 2952 *aborted = true; 2953 2954 if (cmd->transport_state & CMD_T_TAS) 2955 *tas = true; 2956 2957 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2958 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2959 return false; 2960 2961 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2962 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2963 return false; 2964 2965 if (!(cmd->transport_state & CMD_T_ACTIVE)) 2966 return false; 2967 2968 if (fabric_stop && *aborted) 2969 return false; 2970 2971 cmd->transport_state |= CMD_T_STOP; 2972 2973 target_show_cmd("wait_for_tasks: Stopping ", cmd); 2974 2975 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2976 2977 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 2978 180 * HZ)) 2979 target_show_cmd("wait for tasks: ", cmd); 2980 2981 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2982 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2983 2984 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 2985 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 2986 2987 return true; 2988 } 2989 2990 /** 2991 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 2992 * @cmd: command to wait on 2993 */ 2994 bool transport_wait_for_tasks(struct se_cmd *cmd) 2995 { 2996 unsigned long flags; 2997 bool ret, aborted = false, tas = false; 2998 2999 spin_lock_irqsave(&cmd->t_state_lock, flags); 3000 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3001 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3002 3003 return ret; 3004 } 3005 EXPORT_SYMBOL(transport_wait_for_tasks); 3006 3007 struct sense_info { 3008 u8 key; 3009 u8 asc; 3010 u8 ascq; 3011 bool add_sector_info; 3012 }; 3013 3014 static const struct sense_info sense_info_table[] = { 3015 [TCM_NO_SENSE] = { 3016 .key = NOT_READY 3017 }, 3018 [TCM_NON_EXISTENT_LUN] = { 3019 .key = ILLEGAL_REQUEST, 3020 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3021 }, 3022 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3023 .key = ILLEGAL_REQUEST, 3024 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3025 }, 3026 [TCM_SECTOR_COUNT_TOO_MANY] = { 3027 .key = ILLEGAL_REQUEST, 3028 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3029 }, 3030 [TCM_UNKNOWN_MODE_PAGE] = { 3031 .key = ILLEGAL_REQUEST, 3032 .asc = 0x24, /* INVALID FIELD IN CDB */ 3033 }, 3034 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3035 .key = ABORTED_COMMAND, 3036 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3037 .ascq = 0x03, 3038 }, 3039 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3040 .key = ABORTED_COMMAND, 3041 .asc = 0x0c, /* WRITE ERROR */ 3042 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3043 }, 3044 [TCM_INVALID_CDB_FIELD] = { 3045 .key = ILLEGAL_REQUEST, 3046 .asc = 0x24, /* INVALID FIELD IN CDB */ 3047 }, 3048 [TCM_INVALID_PARAMETER_LIST] = { 3049 .key = ILLEGAL_REQUEST, 3050 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3051 }, 3052 [TCM_TOO_MANY_TARGET_DESCS] = { 3053 .key = ILLEGAL_REQUEST, 3054 .asc = 0x26, 3055 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3056 }, 3057 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3058 .key = ILLEGAL_REQUEST, 3059 .asc = 0x26, 3060 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3061 }, 3062 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3063 .key = ILLEGAL_REQUEST, 3064 .asc = 0x26, 3065 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3066 }, 3067 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3068 .key = ILLEGAL_REQUEST, 3069 .asc = 0x26, 3070 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3071 }, 3072 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3073 .key = ILLEGAL_REQUEST, 3074 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3075 }, 3076 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3077 .key = ILLEGAL_REQUEST, 3078 .asc = 0x0c, /* WRITE ERROR */ 3079 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3080 }, 3081 [TCM_SERVICE_CRC_ERROR] = { 3082 .key = ABORTED_COMMAND, 3083 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3084 .ascq = 0x05, /* N/A */ 3085 }, 3086 [TCM_SNACK_REJECTED] = { 3087 .key = ABORTED_COMMAND, 3088 .asc = 0x11, /* READ ERROR */ 3089 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3090 }, 3091 [TCM_WRITE_PROTECTED] = { 3092 .key = DATA_PROTECT, 3093 .asc = 0x27, /* WRITE PROTECTED */ 3094 }, 3095 [TCM_ADDRESS_OUT_OF_RANGE] = { 3096 .key = ILLEGAL_REQUEST, 3097 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3098 }, 3099 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3100 .key = UNIT_ATTENTION, 3101 }, 3102 [TCM_CHECK_CONDITION_NOT_READY] = { 3103 .key = NOT_READY, 3104 }, 3105 [TCM_MISCOMPARE_VERIFY] = { 3106 .key = MISCOMPARE, 3107 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3108 .ascq = 0x00, 3109 }, 3110 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3111 .key = ABORTED_COMMAND, 3112 .asc = 0x10, 3113 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3114 .add_sector_info = true, 3115 }, 3116 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3117 .key = ABORTED_COMMAND, 3118 .asc = 0x10, 3119 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3120 .add_sector_info = true, 3121 }, 3122 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3123 .key = ABORTED_COMMAND, 3124 .asc = 0x10, 3125 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3126 .add_sector_info = true, 3127 }, 3128 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3129 .key = COPY_ABORTED, 3130 .asc = 0x0d, 3131 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3132 3133 }, 3134 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3135 /* 3136 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3137 * Solaris initiators. Returning NOT READY instead means the 3138 * operations will be retried a finite number of times and we 3139 * can survive intermittent errors. 3140 */ 3141 .key = NOT_READY, 3142 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3143 }, 3144 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3145 /* 3146 * From spc4r22 section5.7.7,5.7.8 3147 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3148 * or a REGISTER AND IGNORE EXISTING KEY service action or 3149 * REGISTER AND MOVE service actionis attempted, 3150 * but there are insufficient device server resources to complete the 3151 * operation, then the command shall be terminated with CHECK CONDITION 3152 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3153 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3154 */ 3155 .key = ILLEGAL_REQUEST, 3156 .asc = 0x55, 3157 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3158 }, 3159 }; 3160 3161 /** 3162 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3163 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3164 * be stored. 3165 * @reason: LIO sense reason code. If this argument has the value 3166 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3167 * dequeuing a unit attention fails due to multiple commands being processed 3168 * concurrently, set the command status to BUSY. 3169 * 3170 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3171 */ 3172 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3173 { 3174 const struct sense_info *si; 3175 u8 *buffer = cmd->sense_buffer; 3176 int r = (__force int)reason; 3177 u8 key, asc, ascq; 3178 bool desc_format = target_sense_desc_format(cmd->se_dev); 3179 3180 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 3181 si = &sense_info_table[r]; 3182 else 3183 si = &sense_info_table[(__force int) 3184 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3185 3186 key = si->key; 3187 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3188 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3189 &ascq)) { 3190 cmd->scsi_status = SAM_STAT_BUSY; 3191 return; 3192 } 3193 } else if (si->asc == 0) { 3194 WARN_ON_ONCE(cmd->scsi_asc == 0); 3195 asc = cmd->scsi_asc; 3196 ascq = cmd->scsi_ascq; 3197 } else { 3198 asc = si->asc; 3199 ascq = si->ascq; 3200 } 3201 3202 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3203 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3204 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3205 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3206 if (si->add_sector_info) 3207 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3208 cmd->scsi_sense_length, 3209 cmd->bad_sector) < 0); 3210 } 3211 3212 int 3213 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3214 sense_reason_t reason, int from_transport) 3215 { 3216 unsigned long flags; 3217 3218 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3219 3220 spin_lock_irqsave(&cmd->t_state_lock, flags); 3221 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3222 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3223 return 0; 3224 } 3225 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3226 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3227 3228 if (!from_transport) 3229 translate_sense_reason(cmd, reason); 3230 3231 trace_target_cmd_complete(cmd); 3232 return cmd->se_tfo->queue_status(cmd); 3233 } 3234 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3235 3236 /** 3237 * target_send_busy - Send SCSI BUSY status back to the initiator 3238 * @cmd: SCSI command for which to send a BUSY reply. 3239 * 3240 * Note: Only call this function if target_submit_cmd*() failed. 3241 */ 3242 int target_send_busy(struct se_cmd *cmd) 3243 { 3244 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3245 3246 cmd->scsi_status = SAM_STAT_BUSY; 3247 trace_target_cmd_complete(cmd); 3248 return cmd->se_tfo->queue_status(cmd); 3249 } 3250 EXPORT_SYMBOL(target_send_busy); 3251 3252 static void target_tmr_work(struct work_struct *work) 3253 { 3254 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3255 struct se_device *dev = cmd->se_dev; 3256 struct se_tmr_req *tmr = cmd->se_tmr_req; 3257 int ret; 3258 3259 if (cmd->transport_state & CMD_T_ABORTED) 3260 goto aborted; 3261 3262 switch (tmr->function) { 3263 case TMR_ABORT_TASK: 3264 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3265 break; 3266 case TMR_ABORT_TASK_SET: 3267 case TMR_CLEAR_ACA: 3268 case TMR_CLEAR_TASK_SET: 3269 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3270 break; 3271 case TMR_LUN_RESET: 3272 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3273 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3274 TMR_FUNCTION_REJECTED; 3275 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3276 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3277 cmd->orig_fe_lun, 0x29, 3278 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3279 } 3280 break; 3281 case TMR_TARGET_WARM_RESET: 3282 tmr->response = TMR_FUNCTION_REJECTED; 3283 break; 3284 case TMR_TARGET_COLD_RESET: 3285 tmr->response = TMR_FUNCTION_REJECTED; 3286 break; 3287 default: 3288 pr_err("Unknown TMR function: 0x%02x.\n", 3289 tmr->function); 3290 tmr->response = TMR_FUNCTION_REJECTED; 3291 break; 3292 } 3293 3294 if (cmd->transport_state & CMD_T_ABORTED) 3295 goto aborted; 3296 3297 cmd->se_tfo->queue_tm_rsp(cmd); 3298 3299 transport_cmd_check_stop_to_fabric(cmd); 3300 return; 3301 3302 aborted: 3303 target_handle_abort(cmd); 3304 } 3305 3306 int transport_generic_handle_tmr( 3307 struct se_cmd *cmd) 3308 { 3309 unsigned long flags; 3310 bool aborted = false; 3311 3312 spin_lock_irqsave(&cmd->t_state_lock, flags); 3313 if (cmd->transport_state & CMD_T_ABORTED) { 3314 aborted = true; 3315 } else { 3316 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3317 cmd->transport_state |= CMD_T_ACTIVE; 3318 } 3319 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3320 3321 if (aborted) { 3322 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3323 cmd->se_tmr_req->function, 3324 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3325 target_handle_abort(cmd); 3326 return 0; 3327 } 3328 3329 INIT_WORK(&cmd->work, target_tmr_work); 3330 schedule_work(&cmd->work); 3331 return 0; 3332 } 3333 EXPORT_SYMBOL(transport_generic_handle_tmr); 3334 3335 bool 3336 target_check_wce(struct se_device *dev) 3337 { 3338 bool wce = false; 3339 3340 if (dev->transport->get_write_cache) 3341 wce = dev->transport->get_write_cache(dev); 3342 else if (dev->dev_attrib.emulate_write_cache > 0) 3343 wce = true; 3344 3345 return wce; 3346 } 3347 3348 bool 3349 target_check_fua(struct se_device *dev) 3350 { 3351 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3352 } 3353