1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 68 static void transport_handle_queue_full(struct se_cmd *cmd, 69 struct se_device *dev, int err, bool write_pending); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 /** 228 * transport_init_session - initialize a session object 229 * @se_sess: Session object pointer. 230 * 231 * The caller must have zero-initialized @se_sess before calling this function. 232 */ 233 void transport_init_session(struct se_session *se_sess) 234 { 235 INIT_LIST_HEAD(&se_sess->sess_list); 236 INIT_LIST_HEAD(&se_sess->sess_acl_list); 237 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 238 spin_lock_init(&se_sess->sess_cmd_lock); 239 init_waitqueue_head(&se_sess->cmd_list_wq); 240 } 241 EXPORT_SYMBOL(transport_init_session); 242 243 /** 244 * transport_alloc_session - allocate a session object and initialize it 245 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 246 */ 247 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 248 { 249 struct se_session *se_sess; 250 251 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 252 if (!se_sess) { 253 pr_err("Unable to allocate struct se_session from" 254 " se_sess_cache\n"); 255 return ERR_PTR(-ENOMEM); 256 } 257 transport_init_session(se_sess); 258 se_sess->sup_prot_ops = sup_prot_ops; 259 260 return se_sess; 261 } 262 EXPORT_SYMBOL(transport_alloc_session); 263 264 /** 265 * transport_alloc_session_tags - allocate target driver private data 266 * @se_sess: Session pointer. 267 * @tag_num: Maximum number of in-flight commands between initiator and target. 268 * @tag_size: Size in bytes of the private data a target driver associates with 269 * each command. 270 */ 271 int transport_alloc_session_tags(struct se_session *se_sess, 272 unsigned int tag_num, unsigned int tag_size) 273 { 274 int rc; 275 276 se_sess->sess_cmd_map = kcalloc(tag_size, tag_num, 277 GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); 278 if (!se_sess->sess_cmd_map) { 279 se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num)); 280 if (!se_sess->sess_cmd_map) { 281 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 282 return -ENOMEM; 283 } 284 } 285 286 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 287 false, GFP_KERNEL, NUMA_NO_NODE); 288 if (rc < 0) { 289 pr_err("Unable to init se_sess->sess_tag_pool," 290 " tag_num: %u\n", tag_num); 291 kvfree(se_sess->sess_cmd_map); 292 se_sess->sess_cmd_map = NULL; 293 return -ENOMEM; 294 } 295 296 return 0; 297 } 298 EXPORT_SYMBOL(transport_alloc_session_tags); 299 300 /** 301 * transport_init_session_tags - allocate a session and target driver private data 302 * @tag_num: Maximum number of in-flight commands between initiator and target. 303 * @tag_size: Size in bytes of the private data a target driver associates with 304 * each command. 305 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 306 */ 307 static struct se_session * 308 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 309 enum target_prot_op sup_prot_ops) 310 { 311 struct se_session *se_sess; 312 int rc; 313 314 if (tag_num != 0 && !tag_size) { 315 pr_err("init_session_tags called with percpu-ida tag_num:" 316 " %u, but zero tag_size\n", tag_num); 317 return ERR_PTR(-EINVAL); 318 } 319 if (!tag_num && tag_size) { 320 pr_err("init_session_tags called with percpu-ida tag_size:" 321 " %u, but zero tag_num\n", tag_size); 322 return ERR_PTR(-EINVAL); 323 } 324 325 se_sess = transport_alloc_session(sup_prot_ops); 326 if (IS_ERR(se_sess)) 327 return se_sess; 328 329 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 330 if (rc < 0) { 331 transport_free_session(se_sess); 332 return ERR_PTR(-ENOMEM); 333 } 334 335 return se_sess; 336 } 337 338 /* 339 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 340 */ 341 void __transport_register_session( 342 struct se_portal_group *se_tpg, 343 struct se_node_acl *se_nacl, 344 struct se_session *se_sess, 345 void *fabric_sess_ptr) 346 { 347 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 348 unsigned char buf[PR_REG_ISID_LEN]; 349 unsigned long flags; 350 351 se_sess->se_tpg = se_tpg; 352 se_sess->fabric_sess_ptr = fabric_sess_ptr; 353 /* 354 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 355 * 356 * Only set for struct se_session's that will actually be moving I/O. 357 * eg: *NOT* discovery sessions. 358 */ 359 if (se_nacl) { 360 /* 361 * 362 * Determine if fabric allows for T10-PI feature bits exposed to 363 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 364 * 365 * If so, then always save prot_type on a per se_node_acl node 366 * basis and re-instate the previous sess_prot_type to avoid 367 * disabling PI from below any previously initiator side 368 * registered LUNs. 369 */ 370 if (se_nacl->saved_prot_type) 371 se_sess->sess_prot_type = se_nacl->saved_prot_type; 372 else if (tfo->tpg_check_prot_fabric_only) 373 se_sess->sess_prot_type = se_nacl->saved_prot_type = 374 tfo->tpg_check_prot_fabric_only(se_tpg); 375 /* 376 * If the fabric module supports an ISID based TransportID, 377 * save this value in binary from the fabric I_T Nexus now. 378 */ 379 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 380 memset(&buf[0], 0, PR_REG_ISID_LEN); 381 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 382 &buf[0], PR_REG_ISID_LEN); 383 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 384 } 385 386 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 387 /* 388 * The se_nacl->nacl_sess pointer will be set to the 389 * last active I_T Nexus for each struct se_node_acl. 390 */ 391 se_nacl->nacl_sess = se_sess; 392 393 list_add_tail(&se_sess->sess_acl_list, 394 &se_nacl->acl_sess_list); 395 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 396 } 397 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 398 399 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 400 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 401 } 402 EXPORT_SYMBOL(__transport_register_session); 403 404 void transport_register_session( 405 struct se_portal_group *se_tpg, 406 struct se_node_acl *se_nacl, 407 struct se_session *se_sess, 408 void *fabric_sess_ptr) 409 { 410 unsigned long flags; 411 412 spin_lock_irqsave(&se_tpg->session_lock, flags); 413 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 414 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 415 } 416 EXPORT_SYMBOL(transport_register_session); 417 418 struct se_session * 419 target_setup_session(struct se_portal_group *tpg, 420 unsigned int tag_num, unsigned int tag_size, 421 enum target_prot_op prot_op, 422 const char *initiatorname, void *private, 423 int (*callback)(struct se_portal_group *, 424 struct se_session *, void *)) 425 { 426 struct se_session *sess; 427 428 /* 429 * If the fabric driver is using percpu-ida based pre allocation 430 * of I/O descriptor tags, go ahead and perform that setup now.. 431 */ 432 if (tag_num != 0) 433 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 434 else 435 sess = transport_alloc_session(prot_op); 436 437 if (IS_ERR(sess)) 438 return sess; 439 440 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 441 (unsigned char *)initiatorname); 442 if (!sess->se_node_acl) { 443 transport_free_session(sess); 444 return ERR_PTR(-EACCES); 445 } 446 /* 447 * Go ahead and perform any remaining fabric setup that is 448 * required before transport_register_session(). 449 */ 450 if (callback != NULL) { 451 int rc = callback(tpg, sess, private); 452 if (rc) { 453 transport_free_session(sess); 454 return ERR_PTR(rc); 455 } 456 } 457 458 transport_register_session(tpg, sess->se_node_acl, sess, private); 459 return sess; 460 } 461 EXPORT_SYMBOL(target_setup_session); 462 463 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 464 { 465 struct se_session *se_sess; 466 ssize_t len = 0; 467 468 spin_lock_bh(&se_tpg->session_lock); 469 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 470 if (!se_sess->se_node_acl) 471 continue; 472 if (!se_sess->se_node_acl->dynamic_node_acl) 473 continue; 474 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 475 break; 476 477 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 478 se_sess->se_node_acl->initiatorname); 479 len += 1; /* Include NULL terminator */ 480 } 481 spin_unlock_bh(&se_tpg->session_lock); 482 483 return len; 484 } 485 EXPORT_SYMBOL(target_show_dynamic_sessions); 486 487 static void target_complete_nacl(struct kref *kref) 488 { 489 struct se_node_acl *nacl = container_of(kref, 490 struct se_node_acl, acl_kref); 491 struct se_portal_group *se_tpg = nacl->se_tpg; 492 493 if (!nacl->dynamic_stop) { 494 complete(&nacl->acl_free_comp); 495 return; 496 } 497 498 mutex_lock(&se_tpg->acl_node_mutex); 499 list_del_init(&nacl->acl_list); 500 mutex_unlock(&se_tpg->acl_node_mutex); 501 502 core_tpg_wait_for_nacl_pr_ref(nacl); 503 core_free_device_list_for_node(nacl, se_tpg); 504 kfree(nacl); 505 } 506 507 void target_put_nacl(struct se_node_acl *nacl) 508 { 509 kref_put(&nacl->acl_kref, target_complete_nacl); 510 } 511 EXPORT_SYMBOL(target_put_nacl); 512 513 void transport_deregister_session_configfs(struct se_session *se_sess) 514 { 515 struct se_node_acl *se_nacl; 516 unsigned long flags; 517 /* 518 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 519 */ 520 se_nacl = se_sess->se_node_acl; 521 if (se_nacl) { 522 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 523 if (!list_empty(&se_sess->sess_acl_list)) 524 list_del_init(&se_sess->sess_acl_list); 525 /* 526 * If the session list is empty, then clear the pointer. 527 * Otherwise, set the struct se_session pointer from the tail 528 * element of the per struct se_node_acl active session list. 529 */ 530 if (list_empty(&se_nacl->acl_sess_list)) 531 se_nacl->nacl_sess = NULL; 532 else { 533 se_nacl->nacl_sess = container_of( 534 se_nacl->acl_sess_list.prev, 535 struct se_session, sess_acl_list); 536 } 537 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 538 } 539 } 540 EXPORT_SYMBOL(transport_deregister_session_configfs); 541 542 void transport_free_session(struct se_session *se_sess) 543 { 544 struct se_node_acl *se_nacl = se_sess->se_node_acl; 545 546 /* 547 * Drop the se_node_acl->nacl_kref obtained from within 548 * core_tpg_get_initiator_node_acl(). 549 */ 550 if (se_nacl) { 551 struct se_portal_group *se_tpg = se_nacl->se_tpg; 552 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 553 unsigned long flags; 554 555 se_sess->se_node_acl = NULL; 556 557 /* 558 * Also determine if we need to drop the extra ->cmd_kref if 559 * it had been previously dynamically generated, and 560 * the endpoint is not caching dynamic ACLs. 561 */ 562 mutex_lock(&se_tpg->acl_node_mutex); 563 if (se_nacl->dynamic_node_acl && 564 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 565 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 566 if (list_empty(&se_nacl->acl_sess_list)) 567 se_nacl->dynamic_stop = true; 568 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 569 570 if (se_nacl->dynamic_stop) 571 list_del_init(&se_nacl->acl_list); 572 } 573 mutex_unlock(&se_tpg->acl_node_mutex); 574 575 if (se_nacl->dynamic_stop) 576 target_put_nacl(se_nacl); 577 578 target_put_nacl(se_nacl); 579 } 580 if (se_sess->sess_cmd_map) { 581 sbitmap_queue_free(&se_sess->sess_tag_pool); 582 kvfree(se_sess->sess_cmd_map); 583 } 584 kmem_cache_free(se_sess_cache, se_sess); 585 } 586 EXPORT_SYMBOL(transport_free_session); 587 588 void transport_deregister_session(struct se_session *se_sess) 589 { 590 struct se_portal_group *se_tpg = se_sess->se_tpg; 591 unsigned long flags; 592 593 if (!se_tpg) { 594 transport_free_session(se_sess); 595 return; 596 } 597 598 spin_lock_irqsave(&se_tpg->session_lock, flags); 599 list_del(&se_sess->sess_list); 600 se_sess->se_tpg = NULL; 601 se_sess->fabric_sess_ptr = NULL; 602 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 603 604 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 605 se_tpg->se_tpg_tfo->get_fabric_name()); 606 /* 607 * If last kref is dropping now for an explicit NodeACL, awake sleeping 608 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 609 * removal context from within transport_free_session() code. 610 * 611 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 612 * to release all remaining generate_node_acl=1 created ACL resources. 613 */ 614 615 transport_free_session(se_sess); 616 } 617 EXPORT_SYMBOL(transport_deregister_session); 618 619 void target_remove_session(struct se_session *se_sess) 620 { 621 transport_deregister_session_configfs(se_sess); 622 transport_deregister_session(se_sess); 623 } 624 EXPORT_SYMBOL(target_remove_session); 625 626 static void target_remove_from_state_list(struct se_cmd *cmd) 627 { 628 struct se_device *dev = cmd->se_dev; 629 unsigned long flags; 630 631 if (!dev) 632 return; 633 634 spin_lock_irqsave(&dev->execute_task_lock, flags); 635 if (cmd->state_active) { 636 list_del(&cmd->state_list); 637 cmd->state_active = false; 638 } 639 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 640 } 641 642 /* 643 * This function is called by the target core after the target core has 644 * finished processing a SCSI command or SCSI TMF. Both the regular command 645 * processing code and the code for aborting commands can call this 646 * function. CMD_T_STOP is set if and only if another thread is waiting 647 * inside transport_wait_for_tasks() for t_transport_stop_comp. 648 */ 649 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 650 { 651 unsigned long flags; 652 653 target_remove_from_state_list(cmd); 654 655 /* 656 * Clear struct se_cmd->se_lun before the handoff to FE. 657 */ 658 cmd->se_lun = NULL; 659 660 spin_lock_irqsave(&cmd->t_state_lock, flags); 661 /* 662 * Determine if frontend context caller is requesting the stopping of 663 * this command for frontend exceptions. 664 */ 665 if (cmd->transport_state & CMD_T_STOP) { 666 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 667 __func__, __LINE__, cmd->tag); 668 669 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 670 671 complete_all(&cmd->t_transport_stop_comp); 672 return 1; 673 } 674 cmd->transport_state &= ~CMD_T_ACTIVE; 675 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 676 677 /* 678 * Some fabric modules like tcm_loop can release their internally 679 * allocated I/O reference and struct se_cmd now. 680 * 681 * Fabric modules are expected to return '1' here if the se_cmd being 682 * passed is released at this point, or zero if not being released. 683 */ 684 return cmd->se_tfo->check_stop_free(cmd); 685 } 686 687 static void transport_lun_remove_cmd(struct se_cmd *cmd) 688 { 689 struct se_lun *lun = cmd->se_lun; 690 691 if (!lun) 692 return; 693 694 if (cmpxchg(&cmd->lun_ref_active, true, false)) 695 percpu_ref_put(&lun->lun_ref); 696 } 697 698 int transport_cmd_finish_abort(struct se_cmd *cmd) 699 { 700 bool send_tas = cmd->transport_state & CMD_T_TAS; 701 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 702 int ret = 0; 703 704 if (send_tas) 705 transport_send_task_abort(cmd); 706 707 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 708 transport_lun_remove_cmd(cmd); 709 /* 710 * Allow the fabric driver to unmap any resources before 711 * releasing the descriptor via TFO->release_cmd() 712 */ 713 if (!send_tas) 714 cmd->se_tfo->aborted_task(cmd); 715 716 if (transport_cmd_check_stop_to_fabric(cmd)) 717 return 1; 718 if (!send_tas && ack_kref) 719 ret = target_put_sess_cmd(cmd); 720 721 return ret; 722 } 723 724 static void target_complete_failure_work(struct work_struct *work) 725 { 726 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 727 728 transport_generic_request_failure(cmd, 729 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 730 } 731 732 /* 733 * Used when asking transport to copy Sense Data from the underlying 734 * Linux/SCSI struct scsi_cmnd 735 */ 736 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 737 { 738 struct se_device *dev = cmd->se_dev; 739 740 WARN_ON(!cmd->se_lun); 741 742 if (!dev) 743 return NULL; 744 745 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 746 return NULL; 747 748 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 749 750 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 751 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 752 return cmd->sense_buffer; 753 } 754 755 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 756 { 757 unsigned char *cmd_sense_buf; 758 unsigned long flags; 759 760 spin_lock_irqsave(&cmd->t_state_lock, flags); 761 cmd_sense_buf = transport_get_sense_buffer(cmd); 762 if (!cmd_sense_buf) { 763 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 764 return; 765 } 766 767 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 768 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 769 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 770 } 771 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 772 773 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 774 { 775 struct se_device *dev = cmd->se_dev; 776 int success; 777 unsigned long flags; 778 779 cmd->scsi_status = scsi_status; 780 781 spin_lock_irqsave(&cmd->t_state_lock, flags); 782 switch (cmd->scsi_status) { 783 case SAM_STAT_CHECK_CONDITION: 784 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 785 success = 1; 786 else 787 success = 0; 788 break; 789 default: 790 success = 1; 791 break; 792 } 793 794 /* 795 * Check for case where an explicit ABORT_TASK has been received 796 * and transport_wait_for_tasks() will be waiting for completion.. 797 */ 798 if (cmd->transport_state & CMD_T_ABORTED || 799 cmd->transport_state & CMD_T_STOP) { 800 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 801 /* 802 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(), 803 * release se_device->caw_sem obtained by sbc_compare_and_write() 804 * since target_complete_ok_work() or target_complete_failure_work() 805 * won't be called to invoke the normal CAW completion callbacks. 806 */ 807 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 808 up(&dev->caw_sem); 809 } 810 complete_all(&cmd->t_transport_stop_comp); 811 return; 812 } else if (!success) { 813 INIT_WORK(&cmd->work, target_complete_failure_work); 814 } else { 815 INIT_WORK(&cmd->work, target_complete_ok_work); 816 } 817 818 cmd->t_state = TRANSPORT_COMPLETE; 819 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 820 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 821 822 if (cmd->se_cmd_flags & SCF_USE_CPUID) 823 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 824 else 825 queue_work(target_completion_wq, &cmd->work); 826 } 827 EXPORT_SYMBOL(target_complete_cmd); 828 829 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 830 { 831 if ((scsi_status == SAM_STAT_GOOD || 832 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 833 length < cmd->data_length) { 834 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 835 cmd->residual_count += cmd->data_length - length; 836 } else { 837 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 838 cmd->residual_count = cmd->data_length - length; 839 } 840 841 cmd->data_length = length; 842 } 843 844 target_complete_cmd(cmd, scsi_status); 845 } 846 EXPORT_SYMBOL(target_complete_cmd_with_length); 847 848 static void target_add_to_state_list(struct se_cmd *cmd) 849 { 850 struct se_device *dev = cmd->se_dev; 851 unsigned long flags; 852 853 spin_lock_irqsave(&dev->execute_task_lock, flags); 854 if (!cmd->state_active) { 855 list_add_tail(&cmd->state_list, &dev->state_list); 856 cmd->state_active = true; 857 } 858 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 859 } 860 861 /* 862 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 863 */ 864 static void transport_write_pending_qf(struct se_cmd *cmd); 865 static void transport_complete_qf(struct se_cmd *cmd); 866 867 void target_qf_do_work(struct work_struct *work) 868 { 869 struct se_device *dev = container_of(work, struct se_device, 870 qf_work_queue); 871 LIST_HEAD(qf_cmd_list); 872 struct se_cmd *cmd, *cmd_tmp; 873 874 spin_lock_irq(&dev->qf_cmd_lock); 875 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 876 spin_unlock_irq(&dev->qf_cmd_lock); 877 878 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 879 list_del(&cmd->se_qf_node); 880 atomic_dec_mb(&dev->dev_qf_count); 881 882 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 883 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 884 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 885 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 886 : "UNKNOWN"); 887 888 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 889 transport_write_pending_qf(cmd); 890 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 891 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 892 transport_complete_qf(cmd); 893 } 894 } 895 896 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 897 { 898 switch (cmd->data_direction) { 899 case DMA_NONE: 900 return "NONE"; 901 case DMA_FROM_DEVICE: 902 return "READ"; 903 case DMA_TO_DEVICE: 904 return "WRITE"; 905 case DMA_BIDIRECTIONAL: 906 return "BIDI"; 907 default: 908 break; 909 } 910 911 return "UNKNOWN"; 912 } 913 914 void transport_dump_dev_state( 915 struct se_device *dev, 916 char *b, 917 int *bl) 918 { 919 *bl += sprintf(b + *bl, "Status: "); 920 if (dev->export_count) 921 *bl += sprintf(b + *bl, "ACTIVATED"); 922 else 923 *bl += sprintf(b + *bl, "DEACTIVATED"); 924 925 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 926 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 927 dev->dev_attrib.block_size, 928 dev->dev_attrib.hw_max_sectors); 929 *bl += sprintf(b + *bl, " "); 930 } 931 932 void transport_dump_vpd_proto_id( 933 struct t10_vpd *vpd, 934 unsigned char *p_buf, 935 int p_buf_len) 936 { 937 unsigned char buf[VPD_TMP_BUF_SIZE]; 938 int len; 939 940 memset(buf, 0, VPD_TMP_BUF_SIZE); 941 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 942 943 switch (vpd->protocol_identifier) { 944 case 0x00: 945 sprintf(buf+len, "Fibre Channel\n"); 946 break; 947 case 0x10: 948 sprintf(buf+len, "Parallel SCSI\n"); 949 break; 950 case 0x20: 951 sprintf(buf+len, "SSA\n"); 952 break; 953 case 0x30: 954 sprintf(buf+len, "IEEE 1394\n"); 955 break; 956 case 0x40: 957 sprintf(buf+len, "SCSI Remote Direct Memory Access" 958 " Protocol\n"); 959 break; 960 case 0x50: 961 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 962 break; 963 case 0x60: 964 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 965 break; 966 case 0x70: 967 sprintf(buf+len, "Automation/Drive Interface Transport" 968 " Protocol\n"); 969 break; 970 case 0x80: 971 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 972 break; 973 default: 974 sprintf(buf+len, "Unknown 0x%02x\n", 975 vpd->protocol_identifier); 976 break; 977 } 978 979 if (p_buf) 980 strncpy(p_buf, buf, p_buf_len); 981 else 982 pr_debug("%s", buf); 983 } 984 985 void 986 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 987 { 988 /* 989 * Check if the Protocol Identifier Valid (PIV) bit is set.. 990 * 991 * from spc3r23.pdf section 7.5.1 992 */ 993 if (page_83[1] & 0x80) { 994 vpd->protocol_identifier = (page_83[0] & 0xf0); 995 vpd->protocol_identifier_set = 1; 996 transport_dump_vpd_proto_id(vpd, NULL, 0); 997 } 998 } 999 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1000 1001 int transport_dump_vpd_assoc( 1002 struct t10_vpd *vpd, 1003 unsigned char *p_buf, 1004 int p_buf_len) 1005 { 1006 unsigned char buf[VPD_TMP_BUF_SIZE]; 1007 int ret = 0; 1008 int len; 1009 1010 memset(buf, 0, VPD_TMP_BUF_SIZE); 1011 len = sprintf(buf, "T10 VPD Identifier Association: "); 1012 1013 switch (vpd->association) { 1014 case 0x00: 1015 sprintf(buf+len, "addressed logical unit\n"); 1016 break; 1017 case 0x10: 1018 sprintf(buf+len, "target port\n"); 1019 break; 1020 case 0x20: 1021 sprintf(buf+len, "SCSI target device\n"); 1022 break; 1023 default: 1024 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1025 ret = -EINVAL; 1026 break; 1027 } 1028 1029 if (p_buf) 1030 strncpy(p_buf, buf, p_buf_len); 1031 else 1032 pr_debug("%s", buf); 1033 1034 return ret; 1035 } 1036 1037 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1038 { 1039 /* 1040 * The VPD identification association.. 1041 * 1042 * from spc3r23.pdf Section 7.6.3.1 Table 297 1043 */ 1044 vpd->association = (page_83[1] & 0x30); 1045 return transport_dump_vpd_assoc(vpd, NULL, 0); 1046 } 1047 EXPORT_SYMBOL(transport_set_vpd_assoc); 1048 1049 int transport_dump_vpd_ident_type( 1050 struct t10_vpd *vpd, 1051 unsigned char *p_buf, 1052 int p_buf_len) 1053 { 1054 unsigned char buf[VPD_TMP_BUF_SIZE]; 1055 int ret = 0; 1056 int len; 1057 1058 memset(buf, 0, VPD_TMP_BUF_SIZE); 1059 len = sprintf(buf, "T10 VPD Identifier Type: "); 1060 1061 switch (vpd->device_identifier_type) { 1062 case 0x00: 1063 sprintf(buf+len, "Vendor specific\n"); 1064 break; 1065 case 0x01: 1066 sprintf(buf+len, "T10 Vendor ID based\n"); 1067 break; 1068 case 0x02: 1069 sprintf(buf+len, "EUI-64 based\n"); 1070 break; 1071 case 0x03: 1072 sprintf(buf+len, "NAA\n"); 1073 break; 1074 case 0x04: 1075 sprintf(buf+len, "Relative target port identifier\n"); 1076 break; 1077 case 0x08: 1078 sprintf(buf+len, "SCSI name string\n"); 1079 break; 1080 default: 1081 sprintf(buf+len, "Unsupported: 0x%02x\n", 1082 vpd->device_identifier_type); 1083 ret = -EINVAL; 1084 break; 1085 } 1086 1087 if (p_buf) { 1088 if (p_buf_len < strlen(buf)+1) 1089 return -EINVAL; 1090 strncpy(p_buf, buf, p_buf_len); 1091 } else { 1092 pr_debug("%s", buf); 1093 } 1094 1095 return ret; 1096 } 1097 1098 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1099 { 1100 /* 1101 * The VPD identifier type.. 1102 * 1103 * from spc3r23.pdf Section 7.6.3.1 Table 298 1104 */ 1105 vpd->device_identifier_type = (page_83[1] & 0x0f); 1106 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1107 } 1108 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1109 1110 int transport_dump_vpd_ident( 1111 struct t10_vpd *vpd, 1112 unsigned char *p_buf, 1113 int p_buf_len) 1114 { 1115 unsigned char buf[VPD_TMP_BUF_SIZE]; 1116 int ret = 0; 1117 1118 memset(buf, 0, VPD_TMP_BUF_SIZE); 1119 1120 switch (vpd->device_identifier_code_set) { 1121 case 0x01: /* Binary */ 1122 snprintf(buf, sizeof(buf), 1123 "T10 VPD Binary Device Identifier: %s\n", 1124 &vpd->device_identifier[0]); 1125 break; 1126 case 0x02: /* ASCII */ 1127 snprintf(buf, sizeof(buf), 1128 "T10 VPD ASCII Device Identifier: %s\n", 1129 &vpd->device_identifier[0]); 1130 break; 1131 case 0x03: /* UTF-8 */ 1132 snprintf(buf, sizeof(buf), 1133 "T10 VPD UTF-8 Device Identifier: %s\n", 1134 &vpd->device_identifier[0]); 1135 break; 1136 default: 1137 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1138 " 0x%02x", vpd->device_identifier_code_set); 1139 ret = -EINVAL; 1140 break; 1141 } 1142 1143 if (p_buf) 1144 strncpy(p_buf, buf, p_buf_len); 1145 else 1146 pr_debug("%s", buf); 1147 1148 return ret; 1149 } 1150 1151 int 1152 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1153 { 1154 static const char hex_str[] = "0123456789abcdef"; 1155 int j = 0, i = 4; /* offset to start of the identifier */ 1156 1157 /* 1158 * The VPD Code Set (encoding) 1159 * 1160 * from spc3r23.pdf Section 7.6.3.1 Table 296 1161 */ 1162 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1163 switch (vpd->device_identifier_code_set) { 1164 case 0x01: /* Binary */ 1165 vpd->device_identifier[j++] = 1166 hex_str[vpd->device_identifier_type]; 1167 while (i < (4 + page_83[3])) { 1168 vpd->device_identifier[j++] = 1169 hex_str[(page_83[i] & 0xf0) >> 4]; 1170 vpd->device_identifier[j++] = 1171 hex_str[page_83[i] & 0x0f]; 1172 i++; 1173 } 1174 break; 1175 case 0x02: /* ASCII */ 1176 case 0x03: /* UTF-8 */ 1177 while (i < (4 + page_83[3])) 1178 vpd->device_identifier[j++] = page_83[i++]; 1179 break; 1180 default: 1181 break; 1182 } 1183 1184 return transport_dump_vpd_ident(vpd, NULL, 0); 1185 } 1186 EXPORT_SYMBOL(transport_set_vpd_ident); 1187 1188 static sense_reason_t 1189 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1190 unsigned int size) 1191 { 1192 u32 mtl; 1193 1194 if (!cmd->se_tfo->max_data_sg_nents) 1195 return TCM_NO_SENSE; 1196 /* 1197 * Check if fabric enforced maximum SGL entries per I/O descriptor 1198 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1199 * residual_count and reduce original cmd->data_length to maximum 1200 * length based on single PAGE_SIZE entry scatter-lists. 1201 */ 1202 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1203 if (cmd->data_length > mtl) { 1204 /* 1205 * If an existing CDB overflow is present, calculate new residual 1206 * based on CDB size minus fabric maximum transfer length. 1207 * 1208 * If an existing CDB underflow is present, calculate new residual 1209 * based on original cmd->data_length minus fabric maximum transfer 1210 * length. 1211 * 1212 * Otherwise, set the underflow residual based on cmd->data_length 1213 * minus fabric maximum transfer length. 1214 */ 1215 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1216 cmd->residual_count = (size - mtl); 1217 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1218 u32 orig_dl = size + cmd->residual_count; 1219 cmd->residual_count = (orig_dl - mtl); 1220 } else { 1221 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1222 cmd->residual_count = (cmd->data_length - mtl); 1223 } 1224 cmd->data_length = mtl; 1225 /* 1226 * Reset sbc_check_prot() calculated protection payload 1227 * length based upon the new smaller MTL. 1228 */ 1229 if (cmd->prot_length) { 1230 u32 sectors = (mtl / dev->dev_attrib.block_size); 1231 cmd->prot_length = dev->prot_length * sectors; 1232 } 1233 } 1234 return TCM_NO_SENSE; 1235 } 1236 1237 sense_reason_t 1238 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1239 { 1240 struct se_device *dev = cmd->se_dev; 1241 1242 if (cmd->unknown_data_length) { 1243 cmd->data_length = size; 1244 } else if (size != cmd->data_length) { 1245 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1246 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1247 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1248 cmd->data_length, size, cmd->t_task_cdb[0]); 1249 1250 if (cmd->data_direction == DMA_TO_DEVICE) { 1251 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1252 pr_err_ratelimited("Rejecting underflow/overflow" 1253 " for WRITE data CDB\n"); 1254 return TCM_INVALID_CDB_FIELD; 1255 } 1256 /* 1257 * Some fabric drivers like iscsi-target still expect to 1258 * always reject overflow writes. Reject this case until 1259 * full fabric driver level support for overflow writes 1260 * is introduced tree-wide. 1261 */ 1262 if (size > cmd->data_length) { 1263 pr_err_ratelimited("Rejecting overflow for" 1264 " WRITE control CDB\n"); 1265 return TCM_INVALID_CDB_FIELD; 1266 } 1267 } 1268 /* 1269 * Reject READ_* or WRITE_* with overflow/underflow for 1270 * type SCF_SCSI_DATA_CDB. 1271 */ 1272 if (dev->dev_attrib.block_size != 512) { 1273 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1274 " CDB on non 512-byte sector setup subsystem" 1275 " plugin: %s\n", dev->transport->name); 1276 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1277 return TCM_INVALID_CDB_FIELD; 1278 } 1279 /* 1280 * For the overflow case keep the existing fabric provided 1281 * ->data_length. Otherwise for the underflow case, reset 1282 * ->data_length to the smaller SCSI expected data transfer 1283 * length. 1284 */ 1285 if (size > cmd->data_length) { 1286 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1287 cmd->residual_count = (size - cmd->data_length); 1288 } else { 1289 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1290 cmd->residual_count = (cmd->data_length - size); 1291 cmd->data_length = size; 1292 } 1293 } 1294 1295 return target_check_max_data_sg_nents(cmd, dev, size); 1296 1297 } 1298 1299 /* 1300 * Used by fabric modules containing a local struct se_cmd within their 1301 * fabric dependent per I/O descriptor. 1302 * 1303 * Preserves the value of @cmd->tag. 1304 */ 1305 void transport_init_se_cmd( 1306 struct se_cmd *cmd, 1307 const struct target_core_fabric_ops *tfo, 1308 struct se_session *se_sess, 1309 u32 data_length, 1310 int data_direction, 1311 int task_attr, 1312 unsigned char *sense_buffer) 1313 { 1314 INIT_LIST_HEAD(&cmd->se_delayed_node); 1315 INIT_LIST_HEAD(&cmd->se_qf_node); 1316 INIT_LIST_HEAD(&cmd->se_cmd_list); 1317 INIT_LIST_HEAD(&cmd->state_list); 1318 init_completion(&cmd->t_transport_stop_comp); 1319 cmd->compl = NULL; 1320 spin_lock_init(&cmd->t_state_lock); 1321 INIT_WORK(&cmd->work, NULL); 1322 kref_init(&cmd->cmd_kref); 1323 1324 cmd->se_tfo = tfo; 1325 cmd->se_sess = se_sess; 1326 cmd->data_length = data_length; 1327 cmd->data_direction = data_direction; 1328 cmd->sam_task_attr = task_attr; 1329 cmd->sense_buffer = sense_buffer; 1330 1331 cmd->state_active = false; 1332 } 1333 EXPORT_SYMBOL(transport_init_se_cmd); 1334 1335 static sense_reason_t 1336 transport_check_alloc_task_attr(struct se_cmd *cmd) 1337 { 1338 struct se_device *dev = cmd->se_dev; 1339 1340 /* 1341 * Check if SAM Task Attribute emulation is enabled for this 1342 * struct se_device storage object 1343 */ 1344 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1345 return 0; 1346 1347 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1348 pr_debug("SAM Task Attribute ACA" 1349 " emulation is not supported\n"); 1350 return TCM_INVALID_CDB_FIELD; 1351 } 1352 1353 return 0; 1354 } 1355 1356 sense_reason_t 1357 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1358 { 1359 struct se_device *dev = cmd->se_dev; 1360 sense_reason_t ret; 1361 1362 /* 1363 * Ensure that the received CDB is less than the max (252 + 8) bytes 1364 * for VARIABLE_LENGTH_CMD 1365 */ 1366 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1367 pr_err("Received SCSI CDB with command_size: %d that" 1368 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1369 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1370 return TCM_INVALID_CDB_FIELD; 1371 } 1372 /* 1373 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1374 * allocate the additional extended CDB buffer now.. Otherwise 1375 * setup the pointer from __t_task_cdb to t_task_cdb. 1376 */ 1377 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1378 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1379 GFP_KERNEL); 1380 if (!cmd->t_task_cdb) { 1381 pr_err("Unable to allocate cmd->t_task_cdb" 1382 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1383 scsi_command_size(cdb), 1384 (unsigned long)sizeof(cmd->__t_task_cdb)); 1385 return TCM_OUT_OF_RESOURCES; 1386 } 1387 } else 1388 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1389 /* 1390 * Copy the original CDB into cmd-> 1391 */ 1392 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1393 1394 trace_target_sequencer_start(cmd); 1395 1396 ret = dev->transport->parse_cdb(cmd); 1397 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1398 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1399 cmd->se_tfo->get_fabric_name(), 1400 cmd->se_sess->se_node_acl->initiatorname, 1401 cmd->t_task_cdb[0]); 1402 if (ret) 1403 return ret; 1404 1405 ret = transport_check_alloc_task_attr(cmd); 1406 if (ret) 1407 return ret; 1408 1409 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1410 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1411 return 0; 1412 } 1413 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1414 1415 /* 1416 * Used by fabric module frontends to queue tasks directly. 1417 * May only be used from process context. 1418 */ 1419 int transport_handle_cdb_direct( 1420 struct se_cmd *cmd) 1421 { 1422 sense_reason_t ret; 1423 1424 if (!cmd->se_lun) { 1425 dump_stack(); 1426 pr_err("cmd->se_lun is NULL\n"); 1427 return -EINVAL; 1428 } 1429 if (in_interrupt()) { 1430 dump_stack(); 1431 pr_err("transport_generic_handle_cdb cannot be called" 1432 " from interrupt context\n"); 1433 return -EINVAL; 1434 } 1435 /* 1436 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1437 * outstanding descriptors are handled correctly during shutdown via 1438 * transport_wait_for_tasks() 1439 * 1440 * Also, we don't take cmd->t_state_lock here as we only expect 1441 * this to be called for initial descriptor submission. 1442 */ 1443 cmd->t_state = TRANSPORT_NEW_CMD; 1444 cmd->transport_state |= CMD_T_ACTIVE; 1445 1446 /* 1447 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1448 * so follow TRANSPORT_NEW_CMD processing thread context usage 1449 * and call transport_generic_request_failure() if necessary.. 1450 */ 1451 ret = transport_generic_new_cmd(cmd); 1452 if (ret) 1453 transport_generic_request_failure(cmd, ret); 1454 return 0; 1455 } 1456 EXPORT_SYMBOL(transport_handle_cdb_direct); 1457 1458 sense_reason_t 1459 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1460 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1461 { 1462 if (!sgl || !sgl_count) 1463 return 0; 1464 1465 /* 1466 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1467 * scatterlists already have been set to follow what the fabric 1468 * passes for the original expected data transfer length. 1469 */ 1470 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1471 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1472 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1473 return TCM_INVALID_CDB_FIELD; 1474 } 1475 1476 cmd->t_data_sg = sgl; 1477 cmd->t_data_nents = sgl_count; 1478 cmd->t_bidi_data_sg = sgl_bidi; 1479 cmd->t_bidi_data_nents = sgl_bidi_count; 1480 1481 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1482 return 0; 1483 } 1484 1485 /** 1486 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1487 * se_cmd + use pre-allocated SGL memory. 1488 * 1489 * @se_cmd: command descriptor to submit 1490 * @se_sess: associated se_sess for endpoint 1491 * @cdb: pointer to SCSI CDB 1492 * @sense: pointer to SCSI sense buffer 1493 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1494 * @data_length: fabric expected data transfer length 1495 * @task_attr: SAM task attribute 1496 * @data_dir: DMA data direction 1497 * @flags: flags for command submission from target_sc_flags_tables 1498 * @sgl: struct scatterlist memory for unidirectional mapping 1499 * @sgl_count: scatterlist count for unidirectional mapping 1500 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1501 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1502 * @sgl_prot: struct scatterlist memory protection information 1503 * @sgl_prot_count: scatterlist count for protection information 1504 * 1505 * Task tags are supported if the caller has set @se_cmd->tag. 1506 * 1507 * Returns non zero to signal active I/O shutdown failure. All other 1508 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1509 * but still return zero here. 1510 * 1511 * This may only be called from process context, and also currently 1512 * assumes internal allocation of fabric payload buffer by target-core. 1513 */ 1514 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1515 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1516 u32 data_length, int task_attr, int data_dir, int flags, 1517 struct scatterlist *sgl, u32 sgl_count, 1518 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1519 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1520 { 1521 struct se_portal_group *se_tpg; 1522 sense_reason_t rc; 1523 int ret; 1524 1525 se_tpg = se_sess->se_tpg; 1526 BUG_ON(!se_tpg); 1527 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1528 BUG_ON(in_interrupt()); 1529 /* 1530 * Initialize se_cmd for target operation. From this point 1531 * exceptions are handled by sending exception status via 1532 * target_core_fabric_ops->queue_status() callback 1533 */ 1534 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1535 data_length, data_dir, task_attr, sense); 1536 1537 if (flags & TARGET_SCF_USE_CPUID) 1538 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1539 else 1540 se_cmd->cpuid = WORK_CPU_UNBOUND; 1541 1542 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1543 se_cmd->unknown_data_length = 1; 1544 /* 1545 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1546 * se_sess->sess_cmd_list. A second kref_get here is necessary 1547 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1548 * kref_put() to happen during fabric packet acknowledgement. 1549 */ 1550 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1551 if (ret) 1552 return ret; 1553 /* 1554 * Signal bidirectional data payloads to target-core 1555 */ 1556 if (flags & TARGET_SCF_BIDI_OP) 1557 se_cmd->se_cmd_flags |= SCF_BIDI; 1558 /* 1559 * Locate se_lun pointer and attach it to struct se_cmd 1560 */ 1561 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1562 if (rc) { 1563 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1564 target_put_sess_cmd(se_cmd); 1565 return 0; 1566 } 1567 1568 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1569 if (rc != 0) { 1570 transport_generic_request_failure(se_cmd, rc); 1571 return 0; 1572 } 1573 1574 /* 1575 * Save pointers for SGLs containing protection information, 1576 * if present. 1577 */ 1578 if (sgl_prot_count) { 1579 se_cmd->t_prot_sg = sgl_prot; 1580 se_cmd->t_prot_nents = sgl_prot_count; 1581 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1582 } 1583 1584 /* 1585 * When a non zero sgl_count has been passed perform SGL passthrough 1586 * mapping for pre-allocated fabric memory instead of having target 1587 * core perform an internal SGL allocation.. 1588 */ 1589 if (sgl_count != 0) { 1590 BUG_ON(!sgl); 1591 1592 /* 1593 * A work-around for tcm_loop as some userspace code via 1594 * scsi-generic do not memset their associated read buffers, 1595 * so go ahead and do that here for type non-data CDBs. Also 1596 * note that this is currently guaranteed to be a single SGL 1597 * for this case by target core in target_setup_cmd_from_cdb() 1598 * -> transport_generic_cmd_sequencer(). 1599 */ 1600 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1601 se_cmd->data_direction == DMA_FROM_DEVICE) { 1602 unsigned char *buf = NULL; 1603 1604 if (sgl) 1605 buf = kmap(sg_page(sgl)) + sgl->offset; 1606 1607 if (buf) { 1608 memset(buf, 0, sgl->length); 1609 kunmap(sg_page(sgl)); 1610 } 1611 } 1612 1613 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1614 sgl_bidi, sgl_bidi_count); 1615 if (rc != 0) { 1616 transport_generic_request_failure(se_cmd, rc); 1617 return 0; 1618 } 1619 } 1620 1621 /* 1622 * Check if we need to delay processing because of ALUA 1623 * Active/NonOptimized primary access state.. 1624 */ 1625 core_alua_check_nonop_delay(se_cmd); 1626 1627 transport_handle_cdb_direct(se_cmd); 1628 return 0; 1629 } 1630 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1631 1632 /** 1633 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1634 * 1635 * @se_cmd: command descriptor to submit 1636 * @se_sess: associated se_sess for endpoint 1637 * @cdb: pointer to SCSI CDB 1638 * @sense: pointer to SCSI sense buffer 1639 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1640 * @data_length: fabric expected data transfer length 1641 * @task_attr: SAM task attribute 1642 * @data_dir: DMA data direction 1643 * @flags: flags for command submission from target_sc_flags_tables 1644 * 1645 * Task tags are supported if the caller has set @se_cmd->tag. 1646 * 1647 * Returns non zero to signal active I/O shutdown failure. All other 1648 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1649 * but still return zero here. 1650 * 1651 * This may only be called from process context, and also currently 1652 * assumes internal allocation of fabric payload buffer by target-core. 1653 * 1654 * It also assumes interal target core SGL memory allocation. 1655 */ 1656 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1657 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1658 u32 data_length, int task_attr, int data_dir, int flags) 1659 { 1660 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1661 unpacked_lun, data_length, task_attr, data_dir, 1662 flags, NULL, 0, NULL, 0, NULL, 0); 1663 } 1664 EXPORT_SYMBOL(target_submit_cmd); 1665 1666 static void target_complete_tmr_failure(struct work_struct *work) 1667 { 1668 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1669 1670 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1671 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1672 1673 transport_lun_remove_cmd(se_cmd); 1674 transport_cmd_check_stop_to_fabric(se_cmd); 1675 } 1676 1677 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, 1678 u64 *unpacked_lun) 1679 { 1680 struct se_cmd *se_cmd; 1681 unsigned long flags; 1682 bool ret = false; 1683 1684 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1685 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1686 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1687 continue; 1688 1689 if (se_cmd->tag == tag) { 1690 *unpacked_lun = se_cmd->orig_fe_lun; 1691 ret = true; 1692 break; 1693 } 1694 } 1695 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1696 1697 return ret; 1698 } 1699 1700 /** 1701 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1702 * for TMR CDBs 1703 * 1704 * @se_cmd: command descriptor to submit 1705 * @se_sess: associated se_sess for endpoint 1706 * @sense: pointer to SCSI sense buffer 1707 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1708 * @fabric_tmr_ptr: fabric context for TMR req 1709 * @tm_type: Type of TM request 1710 * @gfp: gfp type for caller 1711 * @tag: referenced task tag for TMR_ABORT_TASK 1712 * @flags: submit cmd flags 1713 * 1714 * Callable from all contexts. 1715 **/ 1716 1717 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1718 unsigned char *sense, u64 unpacked_lun, 1719 void *fabric_tmr_ptr, unsigned char tm_type, 1720 gfp_t gfp, u64 tag, int flags) 1721 { 1722 struct se_portal_group *se_tpg; 1723 int ret; 1724 1725 se_tpg = se_sess->se_tpg; 1726 BUG_ON(!se_tpg); 1727 1728 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1729 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1730 /* 1731 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1732 * allocation failure. 1733 */ 1734 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1735 if (ret < 0) 1736 return -ENOMEM; 1737 1738 if (tm_type == TMR_ABORT_TASK) 1739 se_cmd->se_tmr_req->ref_task_tag = tag; 1740 1741 /* See target_submit_cmd for commentary */ 1742 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1743 if (ret) { 1744 core_tmr_release_req(se_cmd->se_tmr_req); 1745 return ret; 1746 } 1747 /* 1748 * If this is ABORT_TASK with no explicit fabric provided LUN, 1749 * go ahead and search active session tags for a match to figure 1750 * out unpacked_lun for the original se_cmd. 1751 */ 1752 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { 1753 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) 1754 goto failure; 1755 } 1756 1757 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1758 if (ret) 1759 goto failure; 1760 1761 transport_generic_handle_tmr(se_cmd); 1762 return 0; 1763 1764 /* 1765 * For callback during failure handling, push this work off 1766 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1767 */ 1768 failure: 1769 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1770 schedule_work(&se_cmd->work); 1771 return 0; 1772 } 1773 EXPORT_SYMBOL(target_submit_tmr); 1774 1775 /* 1776 * Handle SAM-esque emulation for generic transport request failures. 1777 */ 1778 void transport_generic_request_failure(struct se_cmd *cmd, 1779 sense_reason_t sense_reason) 1780 { 1781 int ret = 0, post_ret = 0; 1782 1783 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1784 sense_reason); 1785 target_show_cmd("-----[ ", cmd); 1786 1787 /* 1788 * For SAM Task Attribute emulation for failed struct se_cmd 1789 */ 1790 transport_complete_task_attr(cmd); 1791 1792 /* 1793 * Handle special case for COMPARE_AND_WRITE failure, where the 1794 * callback is expected to drop the per device ->caw_sem. 1795 */ 1796 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1797 cmd->transport_complete_callback) 1798 cmd->transport_complete_callback(cmd, false, &post_ret); 1799 1800 if (transport_check_aborted_status(cmd, 1)) 1801 return; 1802 1803 switch (sense_reason) { 1804 case TCM_NON_EXISTENT_LUN: 1805 case TCM_UNSUPPORTED_SCSI_OPCODE: 1806 case TCM_INVALID_CDB_FIELD: 1807 case TCM_INVALID_PARAMETER_LIST: 1808 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1809 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1810 case TCM_UNKNOWN_MODE_PAGE: 1811 case TCM_WRITE_PROTECTED: 1812 case TCM_ADDRESS_OUT_OF_RANGE: 1813 case TCM_CHECK_CONDITION_ABORT_CMD: 1814 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1815 case TCM_CHECK_CONDITION_NOT_READY: 1816 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1817 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1818 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1819 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1820 case TCM_TOO_MANY_TARGET_DESCS: 1821 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1822 case TCM_TOO_MANY_SEGMENT_DESCS: 1823 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1824 break; 1825 case TCM_OUT_OF_RESOURCES: 1826 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1827 goto queue_status; 1828 case TCM_LUN_BUSY: 1829 cmd->scsi_status = SAM_STAT_BUSY; 1830 goto queue_status; 1831 case TCM_RESERVATION_CONFLICT: 1832 /* 1833 * No SENSE Data payload for this case, set SCSI Status 1834 * and queue the response to $FABRIC_MOD. 1835 * 1836 * Uses linux/include/scsi/scsi.h SAM status codes defs 1837 */ 1838 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1839 /* 1840 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1841 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1842 * CONFLICT STATUS. 1843 * 1844 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1845 */ 1846 if (cmd->se_sess && 1847 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1848 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1849 cmd->orig_fe_lun, 0x2C, 1850 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1851 } 1852 1853 goto queue_status; 1854 default: 1855 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1856 cmd->t_task_cdb[0], sense_reason); 1857 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1858 break; 1859 } 1860 1861 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1862 if (ret) 1863 goto queue_full; 1864 1865 check_stop: 1866 transport_lun_remove_cmd(cmd); 1867 transport_cmd_check_stop_to_fabric(cmd); 1868 return; 1869 1870 queue_status: 1871 trace_target_cmd_complete(cmd); 1872 ret = cmd->se_tfo->queue_status(cmd); 1873 if (!ret) 1874 goto check_stop; 1875 queue_full: 1876 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1877 } 1878 EXPORT_SYMBOL(transport_generic_request_failure); 1879 1880 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1881 { 1882 sense_reason_t ret; 1883 1884 if (!cmd->execute_cmd) { 1885 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1886 goto err; 1887 } 1888 if (do_checks) { 1889 /* 1890 * Check for an existing UNIT ATTENTION condition after 1891 * target_handle_task_attr() has done SAM task attr 1892 * checking, and possibly have already defered execution 1893 * out to target_restart_delayed_cmds() context. 1894 */ 1895 ret = target_scsi3_ua_check(cmd); 1896 if (ret) 1897 goto err; 1898 1899 ret = target_alua_state_check(cmd); 1900 if (ret) 1901 goto err; 1902 1903 ret = target_check_reservation(cmd); 1904 if (ret) { 1905 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1906 goto err; 1907 } 1908 } 1909 1910 ret = cmd->execute_cmd(cmd); 1911 if (!ret) 1912 return; 1913 err: 1914 spin_lock_irq(&cmd->t_state_lock); 1915 cmd->transport_state &= ~CMD_T_SENT; 1916 spin_unlock_irq(&cmd->t_state_lock); 1917 1918 transport_generic_request_failure(cmd, ret); 1919 } 1920 1921 static int target_write_prot_action(struct se_cmd *cmd) 1922 { 1923 u32 sectors; 1924 /* 1925 * Perform WRITE_INSERT of PI using software emulation when backend 1926 * device has PI enabled, if the transport has not already generated 1927 * PI using hardware WRITE_INSERT offload. 1928 */ 1929 switch (cmd->prot_op) { 1930 case TARGET_PROT_DOUT_INSERT: 1931 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1932 sbc_dif_generate(cmd); 1933 break; 1934 case TARGET_PROT_DOUT_STRIP: 1935 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1936 break; 1937 1938 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1939 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1940 sectors, 0, cmd->t_prot_sg, 0); 1941 if (unlikely(cmd->pi_err)) { 1942 spin_lock_irq(&cmd->t_state_lock); 1943 cmd->transport_state &= ~CMD_T_SENT; 1944 spin_unlock_irq(&cmd->t_state_lock); 1945 transport_generic_request_failure(cmd, cmd->pi_err); 1946 return -1; 1947 } 1948 break; 1949 default: 1950 break; 1951 } 1952 1953 return 0; 1954 } 1955 1956 static bool target_handle_task_attr(struct se_cmd *cmd) 1957 { 1958 struct se_device *dev = cmd->se_dev; 1959 1960 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1961 return false; 1962 1963 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 1964 1965 /* 1966 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1967 * to allow the passed struct se_cmd list of tasks to the front of the list. 1968 */ 1969 switch (cmd->sam_task_attr) { 1970 case TCM_HEAD_TAG: 1971 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1972 cmd->t_task_cdb[0]); 1973 return false; 1974 case TCM_ORDERED_TAG: 1975 atomic_inc_mb(&dev->dev_ordered_sync); 1976 1977 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1978 cmd->t_task_cdb[0]); 1979 1980 /* 1981 * Execute an ORDERED command if no other older commands 1982 * exist that need to be completed first. 1983 */ 1984 if (!atomic_read(&dev->simple_cmds)) 1985 return false; 1986 break; 1987 default: 1988 /* 1989 * For SIMPLE and UNTAGGED Task Attribute commands 1990 */ 1991 atomic_inc_mb(&dev->simple_cmds); 1992 break; 1993 } 1994 1995 if (atomic_read(&dev->dev_ordered_sync) == 0) 1996 return false; 1997 1998 spin_lock(&dev->delayed_cmd_lock); 1999 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2000 spin_unlock(&dev->delayed_cmd_lock); 2001 2002 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2003 cmd->t_task_cdb[0], cmd->sam_task_attr); 2004 return true; 2005 } 2006 2007 static int __transport_check_aborted_status(struct se_cmd *, int); 2008 2009 void target_execute_cmd(struct se_cmd *cmd) 2010 { 2011 /* 2012 * Determine if frontend context caller is requesting the stopping of 2013 * this command for frontend exceptions. 2014 * 2015 * If the received CDB has aleady been aborted stop processing it here. 2016 */ 2017 spin_lock_irq(&cmd->t_state_lock); 2018 if (__transport_check_aborted_status(cmd, 1)) { 2019 spin_unlock_irq(&cmd->t_state_lock); 2020 return; 2021 } 2022 if (cmd->transport_state & CMD_T_STOP) { 2023 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2024 __func__, __LINE__, cmd->tag); 2025 2026 spin_unlock_irq(&cmd->t_state_lock); 2027 complete_all(&cmd->t_transport_stop_comp); 2028 return; 2029 } 2030 2031 cmd->t_state = TRANSPORT_PROCESSING; 2032 cmd->transport_state &= ~CMD_T_PRE_EXECUTE; 2033 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2034 spin_unlock_irq(&cmd->t_state_lock); 2035 2036 if (target_write_prot_action(cmd)) 2037 return; 2038 2039 if (target_handle_task_attr(cmd)) { 2040 spin_lock_irq(&cmd->t_state_lock); 2041 cmd->transport_state &= ~CMD_T_SENT; 2042 spin_unlock_irq(&cmd->t_state_lock); 2043 return; 2044 } 2045 2046 __target_execute_cmd(cmd, true); 2047 } 2048 EXPORT_SYMBOL(target_execute_cmd); 2049 2050 /* 2051 * Process all commands up to the last received ORDERED task attribute which 2052 * requires another blocking boundary 2053 */ 2054 static void target_restart_delayed_cmds(struct se_device *dev) 2055 { 2056 for (;;) { 2057 struct se_cmd *cmd; 2058 2059 spin_lock(&dev->delayed_cmd_lock); 2060 if (list_empty(&dev->delayed_cmd_list)) { 2061 spin_unlock(&dev->delayed_cmd_lock); 2062 break; 2063 } 2064 2065 cmd = list_entry(dev->delayed_cmd_list.next, 2066 struct se_cmd, se_delayed_node); 2067 list_del(&cmd->se_delayed_node); 2068 spin_unlock(&dev->delayed_cmd_lock); 2069 2070 cmd->transport_state |= CMD_T_SENT; 2071 2072 __target_execute_cmd(cmd, true); 2073 2074 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2075 break; 2076 } 2077 } 2078 2079 /* 2080 * Called from I/O completion to determine which dormant/delayed 2081 * and ordered cmds need to have their tasks added to the execution queue. 2082 */ 2083 static void transport_complete_task_attr(struct se_cmd *cmd) 2084 { 2085 struct se_device *dev = cmd->se_dev; 2086 2087 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2088 return; 2089 2090 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2091 goto restart; 2092 2093 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2094 atomic_dec_mb(&dev->simple_cmds); 2095 dev->dev_cur_ordered_id++; 2096 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2097 dev->dev_cur_ordered_id++; 2098 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2099 dev->dev_cur_ordered_id); 2100 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2101 atomic_dec_mb(&dev->dev_ordered_sync); 2102 2103 dev->dev_cur_ordered_id++; 2104 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2105 dev->dev_cur_ordered_id); 2106 } 2107 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2108 2109 restart: 2110 target_restart_delayed_cmds(dev); 2111 } 2112 2113 static void transport_complete_qf(struct se_cmd *cmd) 2114 { 2115 int ret = 0; 2116 2117 transport_complete_task_attr(cmd); 2118 /* 2119 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2120 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2121 * the same callbacks should not be retried. Return CHECK_CONDITION 2122 * if a scsi_status is not already set. 2123 * 2124 * If a fabric driver ->queue_status() has returned non zero, always 2125 * keep retrying no matter what.. 2126 */ 2127 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2128 if (cmd->scsi_status) 2129 goto queue_status; 2130 2131 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2132 goto queue_status; 2133 } 2134 2135 /* 2136 * Check if we need to send a sense buffer from 2137 * the struct se_cmd in question. We do NOT want 2138 * to take this path of the IO has been marked as 2139 * needing to be treated like a "normal read". This 2140 * is the case if it's a tape read, and either the 2141 * FM, EOM, or ILI bits are set, but there is no 2142 * sense data. 2143 */ 2144 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2145 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2146 goto queue_status; 2147 2148 switch (cmd->data_direction) { 2149 case DMA_FROM_DEVICE: 2150 /* queue status if not treating this as a normal read */ 2151 if (cmd->scsi_status && 2152 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2153 goto queue_status; 2154 2155 trace_target_cmd_complete(cmd); 2156 ret = cmd->se_tfo->queue_data_in(cmd); 2157 break; 2158 case DMA_TO_DEVICE: 2159 if (cmd->se_cmd_flags & SCF_BIDI) { 2160 ret = cmd->se_tfo->queue_data_in(cmd); 2161 break; 2162 } 2163 /* fall through */ 2164 case DMA_NONE: 2165 queue_status: 2166 trace_target_cmd_complete(cmd); 2167 ret = cmd->se_tfo->queue_status(cmd); 2168 break; 2169 default: 2170 break; 2171 } 2172 2173 if (ret < 0) { 2174 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2175 return; 2176 } 2177 transport_lun_remove_cmd(cmd); 2178 transport_cmd_check_stop_to_fabric(cmd); 2179 } 2180 2181 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2182 int err, bool write_pending) 2183 { 2184 /* 2185 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2186 * ->queue_data_in() callbacks from new process context. 2187 * 2188 * Otherwise for other errors, transport_complete_qf() will send 2189 * CHECK_CONDITION via ->queue_status() instead of attempting to 2190 * retry associated fabric driver data-transfer callbacks. 2191 */ 2192 if (err == -EAGAIN || err == -ENOMEM) { 2193 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2194 TRANSPORT_COMPLETE_QF_OK; 2195 } else { 2196 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2197 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2198 } 2199 2200 spin_lock_irq(&dev->qf_cmd_lock); 2201 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2202 atomic_inc_mb(&dev->dev_qf_count); 2203 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2204 2205 schedule_work(&cmd->se_dev->qf_work_queue); 2206 } 2207 2208 static bool target_read_prot_action(struct se_cmd *cmd) 2209 { 2210 switch (cmd->prot_op) { 2211 case TARGET_PROT_DIN_STRIP: 2212 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2213 u32 sectors = cmd->data_length >> 2214 ilog2(cmd->se_dev->dev_attrib.block_size); 2215 2216 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2217 sectors, 0, cmd->t_prot_sg, 2218 0); 2219 if (cmd->pi_err) 2220 return true; 2221 } 2222 break; 2223 case TARGET_PROT_DIN_INSERT: 2224 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2225 break; 2226 2227 sbc_dif_generate(cmd); 2228 break; 2229 default: 2230 break; 2231 } 2232 2233 return false; 2234 } 2235 2236 static void target_complete_ok_work(struct work_struct *work) 2237 { 2238 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2239 int ret; 2240 2241 /* 2242 * Check if we need to move delayed/dormant tasks from cmds on the 2243 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2244 * Attribute. 2245 */ 2246 transport_complete_task_attr(cmd); 2247 2248 /* 2249 * Check to schedule QUEUE_FULL work, or execute an existing 2250 * cmd->transport_qf_callback() 2251 */ 2252 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2253 schedule_work(&cmd->se_dev->qf_work_queue); 2254 2255 /* 2256 * Check if we need to send a sense buffer from 2257 * the struct se_cmd in question. We do NOT want 2258 * to take this path of the IO has been marked as 2259 * needing to be treated like a "normal read". This 2260 * is the case if it's a tape read, and either the 2261 * FM, EOM, or ILI bits are set, but there is no 2262 * sense data. 2263 */ 2264 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2265 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2266 WARN_ON(!cmd->scsi_status); 2267 ret = transport_send_check_condition_and_sense( 2268 cmd, 0, 1); 2269 if (ret) 2270 goto queue_full; 2271 2272 transport_lun_remove_cmd(cmd); 2273 transport_cmd_check_stop_to_fabric(cmd); 2274 return; 2275 } 2276 /* 2277 * Check for a callback, used by amongst other things 2278 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2279 */ 2280 if (cmd->transport_complete_callback) { 2281 sense_reason_t rc; 2282 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2283 bool zero_dl = !(cmd->data_length); 2284 int post_ret = 0; 2285 2286 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2287 if (!rc && !post_ret) { 2288 if (caw && zero_dl) 2289 goto queue_rsp; 2290 2291 return; 2292 } else if (rc) { 2293 ret = transport_send_check_condition_and_sense(cmd, 2294 rc, 0); 2295 if (ret) 2296 goto queue_full; 2297 2298 transport_lun_remove_cmd(cmd); 2299 transport_cmd_check_stop_to_fabric(cmd); 2300 return; 2301 } 2302 } 2303 2304 queue_rsp: 2305 switch (cmd->data_direction) { 2306 case DMA_FROM_DEVICE: 2307 /* 2308 * if this is a READ-type IO, but SCSI status 2309 * is set, then skip returning data and just 2310 * return the status -- unless this IO is marked 2311 * as needing to be treated as a normal read, 2312 * in which case we want to go ahead and return 2313 * the data. This happens, for example, for tape 2314 * reads with the FM, EOM, or ILI bits set, with 2315 * no sense data. 2316 */ 2317 if (cmd->scsi_status && 2318 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2319 goto queue_status; 2320 2321 atomic_long_add(cmd->data_length, 2322 &cmd->se_lun->lun_stats.tx_data_octets); 2323 /* 2324 * Perform READ_STRIP of PI using software emulation when 2325 * backend had PI enabled, if the transport will not be 2326 * performing hardware READ_STRIP offload. 2327 */ 2328 if (target_read_prot_action(cmd)) { 2329 ret = transport_send_check_condition_and_sense(cmd, 2330 cmd->pi_err, 0); 2331 if (ret) 2332 goto queue_full; 2333 2334 transport_lun_remove_cmd(cmd); 2335 transport_cmd_check_stop_to_fabric(cmd); 2336 return; 2337 } 2338 2339 trace_target_cmd_complete(cmd); 2340 ret = cmd->se_tfo->queue_data_in(cmd); 2341 if (ret) 2342 goto queue_full; 2343 break; 2344 case DMA_TO_DEVICE: 2345 atomic_long_add(cmd->data_length, 2346 &cmd->se_lun->lun_stats.rx_data_octets); 2347 /* 2348 * Check if we need to send READ payload for BIDI-COMMAND 2349 */ 2350 if (cmd->se_cmd_flags & SCF_BIDI) { 2351 atomic_long_add(cmd->data_length, 2352 &cmd->se_lun->lun_stats.tx_data_octets); 2353 ret = cmd->se_tfo->queue_data_in(cmd); 2354 if (ret) 2355 goto queue_full; 2356 break; 2357 } 2358 /* fall through */ 2359 case DMA_NONE: 2360 queue_status: 2361 trace_target_cmd_complete(cmd); 2362 ret = cmd->se_tfo->queue_status(cmd); 2363 if (ret) 2364 goto queue_full; 2365 break; 2366 default: 2367 break; 2368 } 2369 2370 transport_lun_remove_cmd(cmd); 2371 transport_cmd_check_stop_to_fabric(cmd); 2372 return; 2373 2374 queue_full: 2375 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2376 " data_direction: %d\n", cmd, cmd->data_direction); 2377 2378 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2379 } 2380 2381 void target_free_sgl(struct scatterlist *sgl, int nents) 2382 { 2383 sgl_free_n_order(sgl, nents, 0); 2384 } 2385 EXPORT_SYMBOL(target_free_sgl); 2386 2387 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2388 { 2389 /* 2390 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2391 * emulation, and free + reset pointers if necessary.. 2392 */ 2393 if (!cmd->t_data_sg_orig) 2394 return; 2395 2396 kfree(cmd->t_data_sg); 2397 cmd->t_data_sg = cmd->t_data_sg_orig; 2398 cmd->t_data_sg_orig = NULL; 2399 cmd->t_data_nents = cmd->t_data_nents_orig; 2400 cmd->t_data_nents_orig = 0; 2401 } 2402 2403 static inline void transport_free_pages(struct se_cmd *cmd) 2404 { 2405 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2406 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2407 cmd->t_prot_sg = NULL; 2408 cmd->t_prot_nents = 0; 2409 } 2410 2411 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2412 /* 2413 * Release special case READ buffer payload required for 2414 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2415 */ 2416 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2417 target_free_sgl(cmd->t_bidi_data_sg, 2418 cmd->t_bidi_data_nents); 2419 cmd->t_bidi_data_sg = NULL; 2420 cmd->t_bidi_data_nents = 0; 2421 } 2422 transport_reset_sgl_orig(cmd); 2423 return; 2424 } 2425 transport_reset_sgl_orig(cmd); 2426 2427 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2428 cmd->t_data_sg = NULL; 2429 cmd->t_data_nents = 0; 2430 2431 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2432 cmd->t_bidi_data_sg = NULL; 2433 cmd->t_bidi_data_nents = 0; 2434 } 2435 2436 void *transport_kmap_data_sg(struct se_cmd *cmd) 2437 { 2438 struct scatterlist *sg = cmd->t_data_sg; 2439 struct page **pages; 2440 int i; 2441 2442 /* 2443 * We need to take into account a possible offset here for fabrics like 2444 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2445 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2446 */ 2447 if (!cmd->t_data_nents) 2448 return NULL; 2449 2450 BUG_ON(!sg); 2451 if (cmd->t_data_nents == 1) 2452 return kmap(sg_page(sg)) + sg->offset; 2453 2454 /* >1 page. use vmap */ 2455 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2456 if (!pages) 2457 return NULL; 2458 2459 /* convert sg[] to pages[] */ 2460 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2461 pages[i] = sg_page(sg); 2462 } 2463 2464 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2465 kfree(pages); 2466 if (!cmd->t_data_vmap) 2467 return NULL; 2468 2469 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2470 } 2471 EXPORT_SYMBOL(transport_kmap_data_sg); 2472 2473 void transport_kunmap_data_sg(struct se_cmd *cmd) 2474 { 2475 if (!cmd->t_data_nents) { 2476 return; 2477 } else if (cmd->t_data_nents == 1) { 2478 kunmap(sg_page(cmd->t_data_sg)); 2479 return; 2480 } 2481 2482 vunmap(cmd->t_data_vmap); 2483 cmd->t_data_vmap = NULL; 2484 } 2485 EXPORT_SYMBOL(transport_kunmap_data_sg); 2486 2487 int 2488 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2489 bool zero_page, bool chainable) 2490 { 2491 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2492 2493 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2494 return *sgl ? 0 : -ENOMEM; 2495 } 2496 EXPORT_SYMBOL(target_alloc_sgl); 2497 2498 /* 2499 * Allocate any required resources to execute the command. For writes we 2500 * might not have the payload yet, so notify the fabric via a call to 2501 * ->write_pending instead. Otherwise place it on the execution queue. 2502 */ 2503 sense_reason_t 2504 transport_generic_new_cmd(struct se_cmd *cmd) 2505 { 2506 unsigned long flags; 2507 int ret = 0; 2508 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2509 2510 if (cmd->prot_op != TARGET_PROT_NORMAL && 2511 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2512 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2513 cmd->prot_length, true, false); 2514 if (ret < 0) 2515 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2516 } 2517 2518 /* 2519 * Determine is the TCM fabric module has already allocated physical 2520 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2521 * beforehand. 2522 */ 2523 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2524 cmd->data_length) { 2525 2526 if ((cmd->se_cmd_flags & SCF_BIDI) || 2527 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2528 u32 bidi_length; 2529 2530 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2531 bidi_length = cmd->t_task_nolb * 2532 cmd->se_dev->dev_attrib.block_size; 2533 else 2534 bidi_length = cmd->data_length; 2535 2536 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2537 &cmd->t_bidi_data_nents, 2538 bidi_length, zero_flag, false); 2539 if (ret < 0) 2540 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2541 } 2542 2543 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2544 cmd->data_length, zero_flag, false); 2545 if (ret < 0) 2546 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2547 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2548 cmd->data_length) { 2549 /* 2550 * Special case for COMPARE_AND_WRITE with fabrics 2551 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2552 */ 2553 u32 caw_length = cmd->t_task_nolb * 2554 cmd->se_dev->dev_attrib.block_size; 2555 2556 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2557 &cmd->t_bidi_data_nents, 2558 caw_length, zero_flag, false); 2559 if (ret < 0) 2560 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2561 } 2562 /* 2563 * If this command is not a write we can execute it right here, 2564 * for write buffers we need to notify the fabric driver first 2565 * and let it call back once the write buffers are ready. 2566 */ 2567 target_add_to_state_list(cmd); 2568 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2569 target_execute_cmd(cmd); 2570 return 0; 2571 } 2572 2573 spin_lock_irqsave(&cmd->t_state_lock, flags); 2574 cmd->t_state = TRANSPORT_WRITE_PENDING; 2575 /* 2576 * Determine if frontend context caller is requesting the stopping of 2577 * this command for frontend exceptions. 2578 */ 2579 if (cmd->transport_state & CMD_T_STOP) { 2580 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2581 __func__, __LINE__, cmd->tag); 2582 2583 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2584 2585 complete_all(&cmd->t_transport_stop_comp); 2586 return 0; 2587 } 2588 cmd->transport_state &= ~CMD_T_ACTIVE; 2589 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2590 2591 ret = cmd->se_tfo->write_pending(cmd); 2592 if (ret) 2593 goto queue_full; 2594 2595 return 0; 2596 2597 queue_full: 2598 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2599 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2600 return 0; 2601 } 2602 EXPORT_SYMBOL(transport_generic_new_cmd); 2603 2604 static void transport_write_pending_qf(struct se_cmd *cmd) 2605 { 2606 unsigned long flags; 2607 int ret; 2608 bool stop; 2609 2610 spin_lock_irqsave(&cmd->t_state_lock, flags); 2611 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2612 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2613 2614 if (stop) { 2615 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2616 __func__, __LINE__, cmd->tag); 2617 complete_all(&cmd->t_transport_stop_comp); 2618 return; 2619 } 2620 2621 ret = cmd->se_tfo->write_pending(cmd); 2622 if (ret) { 2623 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2624 cmd); 2625 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2626 } 2627 } 2628 2629 static bool 2630 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2631 unsigned long *flags); 2632 2633 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2634 { 2635 unsigned long flags; 2636 2637 spin_lock_irqsave(&cmd->t_state_lock, flags); 2638 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2639 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2640 } 2641 2642 /* 2643 * This function is called by frontend drivers after processing of a command 2644 * has finished. 2645 * 2646 * The protocol for ensuring that either the regular flow or the TMF 2647 * code drops one reference is as follows: 2648 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2649 * the frontend driver to drop one reference, synchronously or asynchronously. 2650 * - During regular command processing the target core sets CMD_T_COMPLETE 2651 * before invoking one of the .queue_*() functions. 2652 * - The code that aborts commands skips commands and TMFs for which 2653 * CMD_T_COMPLETE has been set. 2654 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2655 * commands that will be aborted. 2656 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2657 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2658 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2659 * be called and will drop a reference. 2660 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2661 * will be called. transport_cmd_finish_abort() will drop the final reference. 2662 */ 2663 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2664 { 2665 DECLARE_COMPLETION_ONSTACK(compl); 2666 int ret = 0; 2667 bool aborted = false, tas = false; 2668 2669 if (wait_for_tasks) 2670 target_wait_free_cmd(cmd, &aborted, &tas); 2671 2672 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2673 /* 2674 * Handle WRITE failure case where transport_generic_new_cmd() 2675 * has already added se_cmd to state_list, but fabric has 2676 * failed command before I/O submission. 2677 */ 2678 if (cmd->state_active) 2679 target_remove_from_state_list(cmd); 2680 2681 if (cmd->se_lun) 2682 transport_lun_remove_cmd(cmd); 2683 } 2684 if (aborted) 2685 cmd->compl = &compl; 2686 if (!aborted || tas) 2687 ret = target_put_sess_cmd(cmd); 2688 if (aborted) { 2689 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2690 wait_for_completion(&compl); 2691 ret = 1; 2692 } 2693 return ret; 2694 } 2695 EXPORT_SYMBOL(transport_generic_free_cmd); 2696 2697 /** 2698 * target_get_sess_cmd - Add command to active ->sess_cmd_list 2699 * @se_cmd: command descriptor to add 2700 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2701 */ 2702 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2703 { 2704 struct se_session *se_sess = se_cmd->se_sess; 2705 unsigned long flags; 2706 int ret = 0; 2707 2708 /* 2709 * Add a second kref if the fabric caller is expecting to handle 2710 * fabric acknowledgement that requires two target_put_sess_cmd() 2711 * invocations before se_cmd descriptor release. 2712 */ 2713 if (ack_kref) { 2714 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2715 return -EINVAL; 2716 2717 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2718 } 2719 2720 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2721 if (se_sess->sess_tearing_down) { 2722 ret = -ESHUTDOWN; 2723 goto out; 2724 } 2725 se_cmd->transport_state |= CMD_T_PRE_EXECUTE; 2726 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2727 out: 2728 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2729 2730 if (ret && ack_kref) 2731 target_put_sess_cmd(se_cmd); 2732 2733 return ret; 2734 } 2735 EXPORT_SYMBOL(target_get_sess_cmd); 2736 2737 static void target_free_cmd_mem(struct se_cmd *cmd) 2738 { 2739 transport_free_pages(cmd); 2740 2741 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2742 core_tmr_release_req(cmd->se_tmr_req); 2743 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2744 kfree(cmd->t_task_cdb); 2745 } 2746 2747 static void target_release_cmd_kref(struct kref *kref) 2748 { 2749 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2750 struct se_session *se_sess = se_cmd->se_sess; 2751 struct completion *compl = se_cmd->compl; 2752 unsigned long flags; 2753 2754 if (se_sess) { 2755 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2756 list_del_init(&se_cmd->se_cmd_list); 2757 if (list_empty(&se_sess->sess_cmd_list)) 2758 wake_up(&se_sess->cmd_list_wq); 2759 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2760 } 2761 2762 target_free_cmd_mem(se_cmd); 2763 se_cmd->se_tfo->release_cmd(se_cmd); 2764 if (compl) 2765 complete(compl); 2766 } 2767 2768 /** 2769 * target_put_sess_cmd - decrease the command reference count 2770 * @se_cmd: command to drop a reference from 2771 * 2772 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2773 * refcount to drop to zero. Returns zero otherwise. 2774 */ 2775 int target_put_sess_cmd(struct se_cmd *se_cmd) 2776 { 2777 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2778 } 2779 EXPORT_SYMBOL(target_put_sess_cmd); 2780 2781 static const char *data_dir_name(enum dma_data_direction d) 2782 { 2783 switch (d) { 2784 case DMA_BIDIRECTIONAL: return "BIDI"; 2785 case DMA_TO_DEVICE: return "WRITE"; 2786 case DMA_FROM_DEVICE: return "READ"; 2787 case DMA_NONE: return "NONE"; 2788 } 2789 2790 return "(?)"; 2791 } 2792 2793 static const char *cmd_state_name(enum transport_state_table t) 2794 { 2795 switch (t) { 2796 case TRANSPORT_NO_STATE: return "NO_STATE"; 2797 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2798 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2799 case TRANSPORT_PROCESSING: return "PROCESSING"; 2800 case TRANSPORT_COMPLETE: return "COMPLETE"; 2801 case TRANSPORT_ISTATE_PROCESSING: 2802 return "ISTATE_PROCESSING"; 2803 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2804 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2805 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2806 } 2807 2808 return "(?)"; 2809 } 2810 2811 static void target_append_str(char **str, const char *txt) 2812 { 2813 char *prev = *str; 2814 2815 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2816 kstrdup(txt, GFP_ATOMIC); 2817 kfree(prev); 2818 } 2819 2820 /* 2821 * Convert a transport state bitmask into a string. The caller is 2822 * responsible for freeing the returned pointer. 2823 */ 2824 static char *target_ts_to_str(u32 ts) 2825 { 2826 char *str = NULL; 2827 2828 if (ts & CMD_T_ABORTED) 2829 target_append_str(&str, "aborted"); 2830 if (ts & CMD_T_ACTIVE) 2831 target_append_str(&str, "active"); 2832 if (ts & CMD_T_COMPLETE) 2833 target_append_str(&str, "complete"); 2834 if (ts & CMD_T_SENT) 2835 target_append_str(&str, "sent"); 2836 if (ts & CMD_T_STOP) 2837 target_append_str(&str, "stop"); 2838 if (ts & CMD_T_FABRIC_STOP) 2839 target_append_str(&str, "fabric_stop"); 2840 2841 return str; 2842 } 2843 2844 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2845 { 2846 switch (tmf) { 2847 case TMR_ABORT_TASK: return "ABORT_TASK"; 2848 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2849 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2850 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2851 case TMR_LUN_RESET: return "LUN_RESET"; 2852 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2853 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2854 case TMR_UNKNOWN: break; 2855 } 2856 return "(?)"; 2857 } 2858 2859 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2860 { 2861 char *ts_str = target_ts_to_str(cmd->transport_state); 2862 const u8 *cdb = cmd->t_task_cdb; 2863 struct se_tmr_req *tmf = cmd->se_tmr_req; 2864 2865 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2866 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2867 pfx, cdb[0], cdb[1], cmd->tag, 2868 data_dir_name(cmd->data_direction), 2869 cmd->se_tfo->get_cmd_state(cmd), 2870 cmd_state_name(cmd->t_state), cmd->data_length, 2871 kref_read(&cmd->cmd_kref), ts_str); 2872 } else { 2873 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2874 pfx, target_tmf_name(tmf->function), cmd->tag, 2875 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2876 cmd_state_name(cmd->t_state), 2877 kref_read(&cmd->cmd_kref), ts_str); 2878 } 2879 kfree(ts_str); 2880 } 2881 EXPORT_SYMBOL(target_show_cmd); 2882 2883 /** 2884 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. 2885 * @se_sess: session to flag 2886 */ 2887 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2888 { 2889 unsigned long flags; 2890 2891 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2892 se_sess->sess_tearing_down = 1; 2893 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2894 } 2895 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2896 2897 /** 2898 * target_wait_for_sess_cmds - Wait for outstanding commands 2899 * @se_sess: session to wait for active I/O 2900 */ 2901 void target_wait_for_sess_cmds(struct se_session *se_sess) 2902 { 2903 struct se_cmd *cmd; 2904 int ret; 2905 2906 WARN_ON_ONCE(!se_sess->sess_tearing_down); 2907 2908 spin_lock_irq(&se_sess->sess_cmd_lock); 2909 do { 2910 ret = wait_event_interruptible_lock_irq_timeout( 2911 se_sess->cmd_list_wq, 2912 list_empty(&se_sess->sess_cmd_list), 2913 se_sess->sess_cmd_lock, 180 * HZ); 2914 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 2915 target_show_cmd("session shutdown: still waiting for ", 2916 cmd); 2917 } while (ret <= 0); 2918 spin_unlock_irq(&se_sess->sess_cmd_lock); 2919 } 2920 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2921 2922 static void target_lun_confirm(struct percpu_ref *ref) 2923 { 2924 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); 2925 2926 complete(&lun->lun_ref_comp); 2927 } 2928 2929 void transport_clear_lun_ref(struct se_lun *lun) 2930 { 2931 /* 2932 * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop 2933 * the initial reference and schedule confirm kill to be 2934 * executed after one full RCU grace period has completed. 2935 */ 2936 percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); 2937 /* 2938 * The first completion waits for percpu_ref_switch_to_atomic_rcu() 2939 * to call target_lun_confirm after lun->lun_ref has been marked 2940 * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t 2941 * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref 2942 * fails for all new incoming I/O. 2943 */ 2944 wait_for_completion(&lun->lun_ref_comp); 2945 /* 2946 * The second completion waits for percpu_ref_put_many() to 2947 * invoke ->release() after lun->lun_ref has switched to 2948 * atomic_t mode, and lun->lun_ref.count has reached zero. 2949 * 2950 * At this point all target-core lun->lun_ref references have 2951 * been dropped via transport_lun_remove_cmd(), and it's safe 2952 * to proceed with the remaining LUN shutdown. 2953 */ 2954 wait_for_completion(&lun->lun_shutdown_comp); 2955 } 2956 2957 static bool 2958 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2959 bool *aborted, bool *tas, unsigned long *flags) 2960 __releases(&cmd->t_state_lock) 2961 __acquires(&cmd->t_state_lock) 2962 { 2963 2964 assert_spin_locked(&cmd->t_state_lock); 2965 WARN_ON_ONCE(!irqs_disabled()); 2966 2967 if (fabric_stop) 2968 cmd->transport_state |= CMD_T_FABRIC_STOP; 2969 2970 if (cmd->transport_state & CMD_T_ABORTED) 2971 *aborted = true; 2972 2973 if (cmd->transport_state & CMD_T_TAS) 2974 *tas = true; 2975 2976 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2977 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2978 return false; 2979 2980 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2981 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2982 return false; 2983 2984 if (!(cmd->transport_state & CMD_T_ACTIVE)) 2985 return false; 2986 2987 if (fabric_stop && *aborted) 2988 return false; 2989 2990 cmd->transport_state |= CMD_T_STOP; 2991 2992 target_show_cmd("wait_for_tasks: Stopping ", cmd); 2993 2994 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2995 2996 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 2997 180 * HZ)) 2998 target_show_cmd("wait for tasks: ", cmd); 2999 3000 spin_lock_irqsave(&cmd->t_state_lock, *flags); 3001 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 3002 3003 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 3004 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 3005 3006 return true; 3007 } 3008 3009 /** 3010 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3011 * @cmd: command to wait on 3012 */ 3013 bool transport_wait_for_tasks(struct se_cmd *cmd) 3014 { 3015 unsigned long flags; 3016 bool ret, aborted = false, tas = false; 3017 3018 spin_lock_irqsave(&cmd->t_state_lock, flags); 3019 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3020 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3021 3022 return ret; 3023 } 3024 EXPORT_SYMBOL(transport_wait_for_tasks); 3025 3026 struct sense_info { 3027 u8 key; 3028 u8 asc; 3029 u8 ascq; 3030 bool add_sector_info; 3031 }; 3032 3033 static const struct sense_info sense_info_table[] = { 3034 [TCM_NO_SENSE] = { 3035 .key = NOT_READY 3036 }, 3037 [TCM_NON_EXISTENT_LUN] = { 3038 .key = ILLEGAL_REQUEST, 3039 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3040 }, 3041 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3042 .key = ILLEGAL_REQUEST, 3043 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3044 }, 3045 [TCM_SECTOR_COUNT_TOO_MANY] = { 3046 .key = ILLEGAL_REQUEST, 3047 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3048 }, 3049 [TCM_UNKNOWN_MODE_PAGE] = { 3050 .key = ILLEGAL_REQUEST, 3051 .asc = 0x24, /* INVALID FIELD IN CDB */ 3052 }, 3053 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3054 .key = ABORTED_COMMAND, 3055 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3056 .ascq = 0x03, 3057 }, 3058 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3059 .key = ABORTED_COMMAND, 3060 .asc = 0x0c, /* WRITE ERROR */ 3061 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3062 }, 3063 [TCM_INVALID_CDB_FIELD] = { 3064 .key = ILLEGAL_REQUEST, 3065 .asc = 0x24, /* INVALID FIELD IN CDB */ 3066 }, 3067 [TCM_INVALID_PARAMETER_LIST] = { 3068 .key = ILLEGAL_REQUEST, 3069 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3070 }, 3071 [TCM_TOO_MANY_TARGET_DESCS] = { 3072 .key = ILLEGAL_REQUEST, 3073 .asc = 0x26, 3074 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3075 }, 3076 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3077 .key = ILLEGAL_REQUEST, 3078 .asc = 0x26, 3079 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3080 }, 3081 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3082 .key = ILLEGAL_REQUEST, 3083 .asc = 0x26, 3084 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3085 }, 3086 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3087 .key = ILLEGAL_REQUEST, 3088 .asc = 0x26, 3089 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3090 }, 3091 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3092 .key = ILLEGAL_REQUEST, 3093 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3094 }, 3095 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3096 .key = ILLEGAL_REQUEST, 3097 .asc = 0x0c, /* WRITE ERROR */ 3098 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3099 }, 3100 [TCM_SERVICE_CRC_ERROR] = { 3101 .key = ABORTED_COMMAND, 3102 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3103 .ascq = 0x05, /* N/A */ 3104 }, 3105 [TCM_SNACK_REJECTED] = { 3106 .key = ABORTED_COMMAND, 3107 .asc = 0x11, /* READ ERROR */ 3108 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3109 }, 3110 [TCM_WRITE_PROTECTED] = { 3111 .key = DATA_PROTECT, 3112 .asc = 0x27, /* WRITE PROTECTED */ 3113 }, 3114 [TCM_ADDRESS_OUT_OF_RANGE] = { 3115 .key = ILLEGAL_REQUEST, 3116 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3117 }, 3118 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3119 .key = UNIT_ATTENTION, 3120 }, 3121 [TCM_CHECK_CONDITION_NOT_READY] = { 3122 .key = NOT_READY, 3123 }, 3124 [TCM_MISCOMPARE_VERIFY] = { 3125 .key = MISCOMPARE, 3126 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3127 .ascq = 0x00, 3128 }, 3129 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3130 .key = ABORTED_COMMAND, 3131 .asc = 0x10, 3132 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3133 .add_sector_info = true, 3134 }, 3135 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3136 .key = ABORTED_COMMAND, 3137 .asc = 0x10, 3138 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3139 .add_sector_info = true, 3140 }, 3141 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3142 .key = ABORTED_COMMAND, 3143 .asc = 0x10, 3144 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3145 .add_sector_info = true, 3146 }, 3147 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3148 .key = COPY_ABORTED, 3149 .asc = 0x0d, 3150 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3151 3152 }, 3153 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3154 /* 3155 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3156 * Solaris initiators. Returning NOT READY instead means the 3157 * operations will be retried a finite number of times and we 3158 * can survive intermittent errors. 3159 */ 3160 .key = NOT_READY, 3161 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3162 }, 3163 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3164 /* 3165 * From spc4r22 section5.7.7,5.7.8 3166 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3167 * or a REGISTER AND IGNORE EXISTING KEY service action or 3168 * REGISTER AND MOVE service actionis attempted, 3169 * but there are insufficient device server resources to complete the 3170 * operation, then the command shall be terminated with CHECK CONDITION 3171 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3172 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3173 */ 3174 .key = ILLEGAL_REQUEST, 3175 .asc = 0x55, 3176 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3177 }, 3178 }; 3179 3180 /** 3181 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3182 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3183 * be stored. 3184 * @reason: LIO sense reason code. If this argument has the value 3185 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3186 * dequeuing a unit attention fails due to multiple commands being processed 3187 * concurrently, set the command status to BUSY. 3188 * 3189 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3190 */ 3191 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3192 { 3193 const struct sense_info *si; 3194 u8 *buffer = cmd->sense_buffer; 3195 int r = (__force int)reason; 3196 u8 key, asc, ascq; 3197 bool desc_format = target_sense_desc_format(cmd->se_dev); 3198 3199 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 3200 si = &sense_info_table[r]; 3201 else 3202 si = &sense_info_table[(__force int) 3203 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3204 3205 key = si->key; 3206 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3207 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3208 &ascq)) { 3209 cmd->scsi_status = SAM_STAT_BUSY; 3210 return; 3211 } 3212 } else if (si->asc == 0) { 3213 WARN_ON_ONCE(cmd->scsi_asc == 0); 3214 asc = cmd->scsi_asc; 3215 ascq = cmd->scsi_ascq; 3216 } else { 3217 asc = si->asc; 3218 ascq = si->ascq; 3219 } 3220 3221 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3222 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3223 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3224 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3225 if (si->add_sector_info) 3226 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3227 cmd->scsi_sense_length, 3228 cmd->bad_sector) < 0); 3229 } 3230 3231 int 3232 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3233 sense_reason_t reason, int from_transport) 3234 { 3235 unsigned long flags; 3236 3237 spin_lock_irqsave(&cmd->t_state_lock, flags); 3238 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3239 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3240 return 0; 3241 } 3242 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3243 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3244 3245 if (!from_transport) 3246 translate_sense_reason(cmd, reason); 3247 3248 trace_target_cmd_complete(cmd); 3249 return cmd->se_tfo->queue_status(cmd); 3250 } 3251 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3252 3253 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) 3254 __releases(&cmd->t_state_lock) 3255 __acquires(&cmd->t_state_lock) 3256 { 3257 int ret; 3258 3259 assert_spin_locked(&cmd->t_state_lock); 3260 WARN_ON_ONCE(!irqs_disabled()); 3261 3262 if (!(cmd->transport_state & CMD_T_ABORTED)) 3263 return 0; 3264 /* 3265 * If cmd has been aborted but either no status is to be sent or it has 3266 * already been sent, just return 3267 */ 3268 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { 3269 if (send_status) 3270 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 3271 return 1; 3272 } 3273 3274 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" 3275 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); 3276 3277 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 3278 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 3279 trace_target_cmd_complete(cmd); 3280 3281 spin_unlock_irq(&cmd->t_state_lock); 3282 ret = cmd->se_tfo->queue_status(cmd); 3283 if (ret) 3284 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 3285 spin_lock_irq(&cmd->t_state_lock); 3286 3287 return 1; 3288 } 3289 3290 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 3291 { 3292 int ret; 3293 3294 spin_lock_irq(&cmd->t_state_lock); 3295 ret = __transport_check_aborted_status(cmd, send_status); 3296 spin_unlock_irq(&cmd->t_state_lock); 3297 3298 return ret; 3299 } 3300 EXPORT_SYMBOL(transport_check_aborted_status); 3301 3302 void transport_send_task_abort(struct se_cmd *cmd) 3303 { 3304 unsigned long flags; 3305 int ret; 3306 3307 spin_lock_irqsave(&cmd->t_state_lock, flags); 3308 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3309 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3310 return; 3311 } 3312 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3313 3314 /* 3315 * If there are still expected incoming fabric WRITEs, we wait 3316 * until until they have completed before sending a TASK_ABORTED 3317 * response. This response with TASK_ABORTED status will be 3318 * queued back to fabric module by transport_check_aborted_status(). 3319 */ 3320 if (cmd->data_direction == DMA_TO_DEVICE) { 3321 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 3322 spin_lock_irqsave(&cmd->t_state_lock, flags); 3323 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { 3324 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3325 goto send_abort; 3326 } 3327 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 3328 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3329 return; 3330 } 3331 } 3332 send_abort: 3333 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 3334 3335 transport_lun_remove_cmd(cmd); 3336 3337 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 3338 cmd->t_task_cdb[0], cmd->tag); 3339 3340 trace_target_cmd_complete(cmd); 3341 ret = cmd->se_tfo->queue_status(cmd); 3342 if (ret) 3343 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 3344 } 3345 3346 static void target_tmr_work(struct work_struct *work) 3347 { 3348 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3349 struct se_device *dev = cmd->se_dev; 3350 struct se_tmr_req *tmr = cmd->se_tmr_req; 3351 unsigned long flags; 3352 int ret; 3353 3354 spin_lock_irqsave(&cmd->t_state_lock, flags); 3355 if (cmd->transport_state & CMD_T_ABORTED) { 3356 tmr->response = TMR_FUNCTION_REJECTED; 3357 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3358 goto check_stop; 3359 } 3360 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3361 3362 switch (tmr->function) { 3363 case TMR_ABORT_TASK: 3364 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3365 break; 3366 case TMR_ABORT_TASK_SET: 3367 case TMR_CLEAR_ACA: 3368 case TMR_CLEAR_TASK_SET: 3369 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3370 break; 3371 case TMR_LUN_RESET: 3372 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3373 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3374 TMR_FUNCTION_REJECTED; 3375 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3376 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3377 cmd->orig_fe_lun, 0x29, 3378 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3379 } 3380 break; 3381 case TMR_TARGET_WARM_RESET: 3382 tmr->response = TMR_FUNCTION_REJECTED; 3383 break; 3384 case TMR_TARGET_COLD_RESET: 3385 tmr->response = TMR_FUNCTION_REJECTED; 3386 break; 3387 default: 3388 pr_err("Unknown TMR function: 0x%02x.\n", 3389 tmr->function); 3390 tmr->response = TMR_FUNCTION_REJECTED; 3391 break; 3392 } 3393 3394 spin_lock_irqsave(&cmd->t_state_lock, flags); 3395 if (cmd->transport_state & CMD_T_ABORTED) { 3396 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3397 goto check_stop; 3398 } 3399 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3400 3401 cmd->se_tfo->queue_tm_rsp(cmd); 3402 3403 check_stop: 3404 transport_lun_remove_cmd(cmd); 3405 transport_cmd_check_stop_to_fabric(cmd); 3406 } 3407 3408 int transport_generic_handle_tmr( 3409 struct se_cmd *cmd) 3410 { 3411 unsigned long flags; 3412 bool aborted = false; 3413 3414 spin_lock_irqsave(&cmd->t_state_lock, flags); 3415 if (cmd->transport_state & CMD_T_ABORTED) { 3416 aborted = true; 3417 } else { 3418 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3419 cmd->transport_state |= CMD_T_ACTIVE; 3420 } 3421 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3422 3423 if (aborted) { 3424 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3425 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3426 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3427 transport_lun_remove_cmd(cmd); 3428 transport_cmd_check_stop_to_fabric(cmd); 3429 return 0; 3430 } 3431 3432 INIT_WORK(&cmd->work, target_tmr_work); 3433 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3434 return 0; 3435 } 3436 EXPORT_SYMBOL(transport_generic_handle_tmr); 3437 3438 bool 3439 target_check_wce(struct se_device *dev) 3440 { 3441 bool wce = false; 3442 3443 if (dev->transport->get_write_cache) 3444 wce = dev->transport->get_write_cache(dev); 3445 else if (dev->dev_attrib.emulate_write_cache > 0) 3446 wce = true; 3447 3448 return wce; 3449 } 3450 3451 bool 3452 target_check_fua(struct se_device *dev) 3453 { 3454 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3455 } 3456