1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/net.h> 30 #include <linux/delay.h> 31 #include <linux/string.h> 32 #include <linux/timer.h> 33 #include <linux/slab.h> 34 #include <linux/blkdev.h> 35 #include <linux/spinlock.h> 36 #include <linux/kthread.h> 37 #include <linux/in.h> 38 #include <linux/cdrom.h> 39 #include <linux/module.h> 40 #include <linux/ratelimit.h> 41 #include <asm/unaligned.h> 42 #include <net/sock.h> 43 #include <net/tcp.h> 44 #include <scsi/scsi.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_tcq.h> 47 48 #include <target/target_core_base.h> 49 #include <target/target_core_backend.h> 50 #include <target/target_core_fabric.h> 51 #include <target/target_core_configfs.h> 52 53 #include "target_core_internal.h" 54 #include "target_core_alua.h" 55 #include "target_core_pr.h" 56 #include "target_core_ua.h" 57 58 static int sub_api_initialized; 59 60 static struct workqueue_struct *target_completion_wq; 61 static struct kmem_cache *se_sess_cache; 62 struct kmem_cache *se_ua_cache; 63 struct kmem_cache *t10_pr_reg_cache; 64 struct kmem_cache *t10_alua_lu_gp_cache; 65 struct kmem_cache *t10_alua_lu_gp_mem_cache; 66 struct kmem_cache *t10_alua_tg_pt_gp_cache; 67 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 68 69 static int transport_generic_write_pending(struct se_cmd *); 70 static int transport_processing_thread(void *param); 71 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); 72 static void transport_complete_task_attr(struct se_cmd *cmd); 73 static void transport_handle_queue_full(struct se_cmd *cmd, 74 struct se_device *dev); 75 static void transport_free_dev_tasks(struct se_cmd *cmd); 76 static int transport_generic_get_mem(struct se_cmd *cmd); 77 static void transport_put_cmd(struct se_cmd *cmd); 78 static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 79 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 80 static void target_complete_ok_work(struct work_struct *work); 81 82 int init_se_kmem_caches(void) 83 { 84 se_sess_cache = kmem_cache_create("se_sess_cache", 85 sizeof(struct se_session), __alignof__(struct se_session), 86 0, NULL); 87 if (!se_sess_cache) { 88 pr_err("kmem_cache_create() for struct se_session" 89 " failed\n"); 90 goto out; 91 } 92 se_ua_cache = kmem_cache_create("se_ua_cache", 93 sizeof(struct se_ua), __alignof__(struct se_ua), 94 0, NULL); 95 if (!se_ua_cache) { 96 pr_err("kmem_cache_create() for struct se_ua failed\n"); 97 goto out_free_sess_cache; 98 } 99 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 100 sizeof(struct t10_pr_registration), 101 __alignof__(struct t10_pr_registration), 0, NULL); 102 if (!t10_pr_reg_cache) { 103 pr_err("kmem_cache_create() for struct t10_pr_registration" 104 " failed\n"); 105 goto out_free_ua_cache; 106 } 107 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 108 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 109 0, NULL); 110 if (!t10_alua_lu_gp_cache) { 111 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 112 " failed\n"); 113 goto out_free_pr_reg_cache; 114 } 115 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 116 sizeof(struct t10_alua_lu_gp_member), 117 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 118 if (!t10_alua_lu_gp_mem_cache) { 119 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 120 "cache failed\n"); 121 goto out_free_lu_gp_cache; 122 } 123 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 124 sizeof(struct t10_alua_tg_pt_gp), 125 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 126 if (!t10_alua_tg_pt_gp_cache) { 127 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 128 "cache failed\n"); 129 goto out_free_lu_gp_mem_cache; 130 } 131 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 132 "t10_alua_tg_pt_gp_mem_cache", 133 sizeof(struct t10_alua_tg_pt_gp_member), 134 __alignof__(struct t10_alua_tg_pt_gp_member), 135 0, NULL); 136 if (!t10_alua_tg_pt_gp_mem_cache) { 137 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 138 "mem_t failed\n"); 139 goto out_free_tg_pt_gp_cache; 140 } 141 142 target_completion_wq = alloc_workqueue("target_completion", 143 WQ_MEM_RECLAIM, 0); 144 if (!target_completion_wq) 145 goto out_free_tg_pt_gp_mem_cache; 146 147 return 0; 148 149 out_free_tg_pt_gp_mem_cache: 150 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 177 } 178 179 /* This code ensures unique mib indexes are handed out. */ 180 static DEFINE_SPINLOCK(scsi_mib_index_lock); 181 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 182 183 /* 184 * Allocate a new row index for the entry type specified 185 */ 186 u32 scsi_get_new_index(scsi_index_t type) 187 { 188 u32 new_index; 189 190 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 191 192 spin_lock(&scsi_mib_index_lock); 193 new_index = ++scsi_mib_index[type]; 194 spin_unlock(&scsi_mib_index_lock); 195 196 return new_index; 197 } 198 199 static void transport_init_queue_obj(struct se_queue_obj *qobj) 200 { 201 atomic_set(&qobj->queue_cnt, 0); 202 INIT_LIST_HEAD(&qobj->qobj_list); 203 init_waitqueue_head(&qobj->thread_wq); 204 spin_lock_init(&qobj->cmd_queue_lock); 205 } 206 207 void transport_subsystem_check_init(void) 208 { 209 int ret; 210 211 if (sub_api_initialized) 212 return; 213 214 ret = request_module("target_core_iblock"); 215 if (ret != 0) 216 pr_err("Unable to load target_core_iblock\n"); 217 218 ret = request_module("target_core_file"); 219 if (ret != 0) 220 pr_err("Unable to load target_core_file\n"); 221 222 ret = request_module("target_core_pscsi"); 223 if (ret != 0) 224 pr_err("Unable to load target_core_pscsi\n"); 225 226 ret = request_module("target_core_stgt"); 227 if (ret != 0) 228 pr_err("Unable to load target_core_stgt\n"); 229 230 sub_api_initialized = 1; 231 return; 232 } 233 234 struct se_session *transport_init_session(void) 235 { 236 struct se_session *se_sess; 237 238 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 239 if (!se_sess) { 240 pr_err("Unable to allocate struct se_session from" 241 " se_sess_cache\n"); 242 return ERR_PTR(-ENOMEM); 243 } 244 INIT_LIST_HEAD(&se_sess->sess_list); 245 INIT_LIST_HEAD(&se_sess->sess_acl_list); 246 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 247 INIT_LIST_HEAD(&se_sess->sess_wait_list); 248 spin_lock_init(&se_sess->sess_cmd_lock); 249 kref_init(&se_sess->sess_kref); 250 251 return se_sess; 252 } 253 EXPORT_SYMBOL(transport_init_session); 254 255 /* 256 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 257 */ 258 void __transport_register_session( 259 struct se_portal_group *se_tpg, 260 struct se_node_acl *se_nacl, 261 struct se_session *se_sess, 262 void *fabric_sess_ptr) 263 { 264 unsigned char buf[PR_REG_ISID_LEN]; 265 266 se_sess->se_tpg = se_tpg; 267 se_sess->fabric_sess_ptr = fabric_sess_ptr; 268 /* 269 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 270 * 271 * Only set for struct se_session's that will actually be moving I/O. 272 * eg: *NOT* discovery sessions. 273 */ 274 if (se_nacl) { 275 /* 276 * If the fabric module supports an ISID based TransportID, 277 * save this value in binary from the fabric I_T Nexus now. 278 */ 279 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 280 memset(&buf[0], 0, PR_REG_ISID_LEN); 281 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 282 &buf[0], PR_REG_ISID_LEN); 283 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 284 } 285 kref_get(&se_nacl->acl_kref); 286 287 spin_lock_irq(&se_nacl->nacl_sess_lock); 288 /* 289 * The se_nacl->nacl_sess pointer will be set to the 290 * last active I_T Nexus for each struct se_node_acl. 291 */ 292 se_nacl->nacl_sess = se_sess; 293 294 list_add_tail(&se_sess->sess_acl_list, 295 &se_nacl->acl_sess_list); 296 spin_unlock_irq(&se_nacl->nacl_sess_lock); 297 } 298 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 299 300 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 301 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 302 } 303 EXPORT_SYMBOL(__transport_register_session); 304 305 void transport_register_session( 306 struct se_portal_group *se_tpg, 307 struct se_node_acl *se_nacl, 308 struct se_session *se_sess, 309 void *fabric_sess_ptr) 310 { 311 unsigned long flags; 312 313 spin_lock_irqsave(&se_tpg->session_lock, flags); 314 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 315 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 316 } 317 EXPORT_SYMBOL(transport_register_session); 318 319 static void target_release_session(struct kref *kref) 320 { 321 struct se_session *se_sess = container_of(kref, 322 struct se_session, sess_kref); 323 struct se_portal_group *se_tpg = se_sess->se_tpg; 324 325 se_tpg->se_tpg_tfo->close_session(se_sess); 326 } 327 328 void target_get_session(struct se_session *se_sess) 329 { 330 kref_get(&se_sess->sess_kref); 331 } 332 EXPORT_SYMBOL(target_get_session); 333 334 int target_put_session(struct se_session *se_sess) 335 { 336 return kref_put(&se_sess->sess_kref, target_release_session); 337 } 338 EXPORT_SYMBOL(target_put_session); 339 340 static void target_complete_nacl(struct kref *kref) 341 { 342 struct se_node_acl *nacl = container_of(kref, 343 struct se_node_acl, acl_kref); 344 345 complete(&nacl->acl_free_comp); 346 } 347 348 void target_put_nacl(struct se_node_acl *nacl) 349 { 350 kref_put(&nacl->acl_kref, target_complete_nacl); 351 } 352 353 void transport_deregister_session_configfs(struct se_session *se_sess) 354 { 355 struct se_node_acl *se_nacl; 356 unsigned long flags; 357 /* 358 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 359 */ 360 se_nacl = se_sess->se_node_acl; 361 if (se_nacl) { 362 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 363 if (se_nacl->acl_stop == 0) 364 list_del(&se_sess->sess_acl_list); 365 /* 366 * If the session list is empty, then clear the pointer. 367 * Otherwise, set the struct se_session pointer from the tail 368 * element of the per struct se_node_acl active session list. 369 */ 370 if (list_empty(&se_nacl->acl_sess_list)) 371 se_nacl->nacl_sess = NULL; 372 else { 373 se_nacl->nacl_sess = container_of( 374 se_nacl->acl_sess_list.prev, 375 struct se_session, sess_acl_list); 376 } 377 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 378 } 379 } 380 EXPORT_SYMBOL(transport_deregister_session_configfs); 381 382 void transport_free_session(struct se_session *se_sess) 383 { 384 kmem_cache_free(se_sess_cache, se_sess); 385 } 386 EXPORT_SYMBOL(transport_free_session); 387 388 void transport_deregister_session(struct se_session *se_sess) 389 { 390 struct se_portal_group *se_tpg = se_sess->se_tpg; 391 struct target_core_fabric_ops *se_tfo; 392 struct se_node_acl *se_nacl; 393 unsigned long flags; 394 bool comp_nacl = true; 395 396 if (!se_tpg) { 397 transport_free_session(se_sess); 398 return; 399 } 400 se_tfo = se_tpg->se_tpg_tfo; 401 402 spin_lock_irqsave(&se_tpg->session_lock, flags); 403 list_del(&se_sess->sess_list); 404 se_sess->se_tpg = NULL; 405 se_sess->fabric_sess_ptr = NULL; 406 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 407 408 /* 409 * Determine if we need to do extra work for this initiator node's 410 * struct se_node_acl if it had been previously dynamically generated. 411 */ 412 se_nacl = se_sess->se_node_acl; 413 414 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 415 if (se_nacl && se_nacl->dynamic_node_acl) { 416 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 417 list_del(&se_nacl->acl_list); 418 se_tpg->num_node_acls--; 419 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 420 core_tpg_wait_for_nacl_pr_ref(se_nacl); 421 core_free_device_list_for_node(se_nacl, se_tpg); 422 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); 423 424 comp_nacl = false; 425 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 426 } 427 } 428 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 429 430 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 431 se_tpg->se_tpg_tfo->get_fabric_name()); 432 /* 433 * If last kref is dropping now for an explict NodeACL, awake sleeping 434 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 435 * removal context. 436 */ 437 if (se_nacl && comp_nacl == true) 438 target_put_nacl(se_nacl); 439 440 transport_free_session(se_sess); 441 } 442 EXPORT_SYMBOL(transport_deregister_session); 443 444 /* 445 * Called with cmd->t_state_lock held. 446 */ 447 static void transport_all_task_dev_remove_state(struct se_cmd *cmd) 448 { 449 struct se_device *dev = cmd->se_dev; 450 struct se_task *task; 451 unsigned long flags; 452 453 if (!dev) 454 return; 455 456 list_for_each_entry(task, &cmd->t_task_list, t_list) { 457 if (task->task_flags & TF_ACTIVE) 458 continue; 459 460 spin_lock_irqsave(&dev->execute_task_lock, flags); 461 if (task->t_state_active) { 462 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", 463 cmd->se_tfo->get_task_tag(cmd), dev, task); 464 465 list_del(&task->t_state_list); 466 atomic_dec(&cmd->t_task_cdbs_ex_left); 467 task->t_state_active = false; 468 } 469 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 470 } 471 472 } 473 474 /* transport_cmd_check_stop(): 475 * 476 * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared. 477 * 'transport_off = 2' determines if task_dev_state should be removed. 478 * 479 * A non-zero u8 t_state sets cmd->t_state. 480 * Returns 1 when command is stopped, else 0. 481 */ 482 static int transport_cmd_check_stop( 483 struct se_cmd *cmd, 484 int transport_off, 485 u8 t_state) 486 { 487 unsigned long flags; 488 489 spin_lock_irqsave(&cmd->t_state_lock, flags); 490 /* 491 * Determine if IOCTL context caller in requesting the stopping of this 492 * command for LUN shutdown purposes. 493 */ 494 if (cmd->transport_state & CMD_T_LUN_STOP) { 495 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", 496 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); 497 498 cmd->transport_state &= ~CMD_T_ACTIVE; 499 if (transport_off == 2) 500 transport_all_task_dev_remove_state(cmd); 501 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 502 503 complete(&cmd->transport_lun_stop_comp); 504 return 1; 505 } 506 /* 507 * Determine if frontend context caller is requesting the stopping of 508 * this command for frontend exceptions. 509 */ 510 if (cmd->transport_state & CMD_T_STOP) { 511 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 512 __func__, __LINE__, 513 cmd->se_tfo->get_task_tag(cmd)); 514 515 if (transport_off == 2) 516 transport_all_task_dev_remove_state(cmd); 517 518 /* 519 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff 520 * to FE. 521 */ 522 if (transport_off == 2) 523 cmd->se_lun = NULL; 524 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 525 526 complete(&cmd->t_transport_stop_comp); 527 return 1; 528 } 529 if (transport_off) { 530 cmd->transport_state &= ~CMD_T_ACTIVE; 531 if (transport_off == 2) { 532 transport_all_task_dev_remove_state(cmd); 533 /* 534 * Clear struct se_cmd->se_lun before the transport_off == 2 535 * handoff to fabric module. 536 */ 537 cmd->se_lun = NULL; 538 /* 539 * Some fabric modules like tcm_loop can release 540 * their internally allocated I/O reference now and 541 * struct se_cmd now. 542 * 543 * Fabric modules are expected to return '1' here if the 544 * se_cmd being passed is released at this point, 545 * or zero if not being released. 546 */ 547 if (cmd->se_tfo->check_stop_free != NULL) { 548 spin_unlock_irqrestore( 549 &cmd->t_state_lock, flags); 550 551 return cmd->se_tfo->check_stop_free(cmd); 552 } 553 } 554 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 555 556 return 0; 557 } else if (t_state) 558 cmd->t_state = t_state; 559 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 560 561 return 0; 562 } 563 564 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 565 { 566 return transport_cmd_check_stop(cmd, 2, 0); 567 } 568 569 static void transport_lun_remove_cmd(struct se_cmd *cmd) 570 { 571 struct se_lun *lun = cmd->se_lun; 572 unsigned long flags; 573 574 if (!lun) 575 return; 576 577 spin_lock_irqsave(&cmd->t_state_lock, flags); 578 if (cmd->transport_state & CMD_T_DEV_ACTIVE) { 579 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 580 transport_all_task_dev_remove_state(cmd); 581 } 582 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 583 584 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 585 if (!list_empty(&cmd->se_lun_node)) 586 list_del_init(&cmd->se_lun_node); 587 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); 588 } 589 590 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 591 { 592 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 593 transport_lun_remove_cmd(cmd); 594 595 if (transport_cmd_check_stop_to_fabric(cmd)) 596 return; 597 if (remove) { 598 transport_remove_cmd_from_queue(cmd); 599 transport_put_cmd(cmd); 600 } 601 } 602 603 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, 604 bool at_head) 605 { 606 struct se_device *dev = cmd->se_dev; 607 struct se_queue_obj *qobj = &dev->dev_queue_obj; 608 unsigned long flags; 609 610 if (t_state) { 611 spin_lock_irqsave(&cmd->t_state_lock, flags); 612 cmd->t_state = t_state; 613 cmd->transport_state |= CMD_T_ACTIVE; 614 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 615 } 616 617 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 618 619 /* If the cmd is already on the list, remove it before we add it */ 620 if (!list_empty(&cmd->se_queue_node)) 621 list_del(&cmd->se_queue_node); 622 else 623 atomic_inc(&qobj->queue_cnt); 624 625 if (at_head) 626 list_add(&cmd->se_queue_node, &qobj->qobj_list); 627 else 628 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); 629 cmd->transport_state |= CMD_T_QUEUED; 630 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 631 632 wake_up_interruptible(&qobj->thread_wq); 633 } 634 635 static struct se_cmd * 636 transport_get_cmd_from_queue(struct se_queue_obj *qobj) 637 { 638 struct se_cmd *cmd; 639 unsigned long flags; 640 641 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 642 if (list_empty(&qobj->qobj_list)) { 643 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 644 return NULL; 645 } 646 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); 647 648 cmd->transport_state &= ~CMD_T_QUEUED; 649 list_del_init(&cmd->se_queue_node); 650 atomic_dec(&qobj->queue_cnt); 651 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 652 653 return cmd; 654 } 655 656 static void transport_remove_cmd_from_queue(struct se_cmd *cmd) 657 { 658 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; 659 unsigned long flags; 660 661 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 662 if (!(cmd->transport_state & CMD_T_QUEUED)) { 663 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 664 return; 665 } 666 cmd->transport_state &= ~CMD_T_QUEUED; 667 atomic_dec(&qobj->queue_cnt); 668 list_del_init(&cmd->se_queue_node); 669 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 670 } 671 672 /* 673 * Completion function used by TCM subsystem plugins (such as FILEIO) 674 * for queueing up response from struct se_subsystem_api->do_task() 675 */ 676 void transport_complete_sync_cache(struct se_cmd *cmd, int good) 677 { 678 struct se_task *task = list_entry(cmd->t_task_list.next, 679 struct se_task, t_list); 680 681 if (good) { 682 cmd->scsi_status = SAM_STAT_GOOD; 683 task->task_scsi_status = GOOD; 684 } else { 685 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 686 task->task_se_cmd->scsi_sense_reason = 687 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 688 689 } 690 691 transport_complete_task(task, good); 692 } 693 EXPORT_SYMBOL(transport_complete_sync_cache); 694 695 static void target_complete_failure_work(struct work_struct *work) 696 { 697 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 698 699 transport_generic_request_failure(cmd); 700 } 701 702 /* transport_complete_task(): 703 * 704 * Called from interrupt and non interrupt context depending 705 * on the transport plugin. 706 */ 707 void transport_complete_task(struct se_task *task, int success) 708 { 709 struct se_cmd *cmd = task->task_se_cmd; 710 struct se_device *dev = cmd->se_dev; 711 unsigned long flags; 712 713 spin_lock_irqsave(&cmd->t_state_lock, flags); 714 task->task_flags &= ~TF_ACTIVE; 715 716 /* 717 * See if any sense data exists, if so set the TASK_SENSE flag. 718 * Also check for any other post completion work that needs to be 719 * done by the plugins. 720 */ 721 if (dev && dev->transport->transport_complete) { 722 if (dev->transport->transport_complete(task) != 0) { 723 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 724 task->task_flags |= TF_HAS_SENSE; 725 success = 1; 726 } 727 } 728 729 /* 730 * See if we are waiting for outstanding struct se_task 731 * to complete for an exception condition 732 */ 733 if (task->task_flags & TF_REQUEST_STOP) { 734 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 735 complete(&task->task_stop_comp); 736 return; 737 } 738 739 if (!success) 740 cmd->transport_state |= CMD_T_FAILED; 741 742 /* 743 * Decrement the outstanding t_task_cdbs_left count. The last 744 * struct se_task from struct se_cmd will complete itself into the 745 * device queue depending upon int success. 746 */ 747 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { 748 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 749 return; 750 } 751 /* 752 * Check for case where an explict ABORT_TASK has been received 753 * and transport_wait_for_tasks() will be waiting for completion.. 754 */ 755 if (cmd->transport_state & CMD_T_ABORTED && 756 cmd->transport_state & CMD_T_STOP) { 757 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 758 complete(&cmd->t_transport_stop_comp); 759 return; 760 } else if (cmd->transport_state & CMD_T_FAILED) { 761 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 762 INIT_WORK(&cmd->work, target_complete_failure_work); 763 } else { 764 INIT_WORK(&cmd->work, target_complete_ok_work); 765 } 766 767 cmd->t_state = TRANSPORT_COMPLETE; 768 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 769 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 770 771 queue_work(target_completion_wq, &cmd->work); 772 } 773 EXPORT_SYMBOL(transport_complete_task); 774 775 /* 776 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's 777 * struct se_task list are ready to be added to the active execution list 778 * struct se_device 779 780 * Called with se_dev_t->execute_task_lock called. 781 */ 782 static inline int transport_add_task_check_sam_attr( 783 struct se_task *task, 784 struct se_task *task_prev, 785 struct se_device *dev) 786 { 787 /* 788 * No SAM Task attribute emulation enabled, add to tail of 789 * execution queue 790 */ 791 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { 792 list_add_tail(&task->t_execute_list, &dev->execute_task_list); 793 return 0; 794 } 795 /* 796 * HEAD_OF_QUEUE attribute for received CDB, which means 797 * the first task that is associated with a struct se_cmd goes to 798 * head of the struct se_device->execute_task_list, and task_prev 799 * after that for each subsequent task 800 */ 801 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { 802 list_add(&task->t_execute_list, 803 (task_prev != NULL) ? 804 &task_prev->t_execute_list : 805 &dev->execute_task_list); 806 807 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" 808 " in execution queue\n", 809 task->task_se_cmd->t_task_cdb[0]); 810 return 1; 811 } 812 /* 813 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been 814 * transitioned from Dermant -> Active state, and are added to the end 815 * of the struct se_device->execute_task_list 816 */ 817 list_add_tail(&task->t_execute_list, &dev->execute_task_list); 818 return 0; 819 } 820 821 /* __transport_add_task_to_execute_queue(): 822 * 823 * Called with se_dev_t->execute_task_lock called. 824 */ 825 static void __transport_add_task_to_execute_queue( 826 struct se_task *task, 827 struct se_task *task_prev, 828 struct se_device *dev) 829 { 830 int head_of_queue; 831 832 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); 833 atomic_inc(&dev->execute_tasks); 834 835 if (task->t_state_active) 836 return; 837 /* 838 * Determine if this task needs to go to HEAD_OF_QUEUE for the 839 * state list as well. Running with SAM Task Attribute emulation 840 * will always return head_of_queue == 0 here 841 */ 842 if (head_of_queue) 843 list_add(&task->t_state_list, (task_prev) ? 844 &task_prev->t_state_list : 845 &dev->state_task_list); 846 else 847 list_add_tail(&task->t_state_list, &dev->state_task_list); 848 849 task->t_state_active = true; 850 851 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 852 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 853 task, dev); 854 } 855 856 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) 857 { 858 struct se_device *dev = cmd->se_dev; 859 struct se_task *task; 860 unsigned long flags; 861 862 spin_lock_irqsave(&cmd->t_state_lock, flags); 863 list_for_each_entry(task, &cmd->t_task_list, t_list) { 864 spin_lock(&dev->execute_task_lock); 865 if (!task->t_state_active) { 866 list_add_tail(&task->t_state_list, 867 &dev->state_task_list); 868 task->t_state_active = true; 869 870 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 871 task->task_se_cmd->se_tfo->get_task_tag( 872 task->task_se_cmd), task, dev); 873 } 874 spin_unlock(&dev->execute_task_lock); 875 } 876 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 877 } 878 879 static void __transport_add_tasks_from_cmd(struct se_cmd *cmd) 880 { 881 struct se_device *dev = cmd->se_dev; 882 struct se_task *task, *task_prev = NULL; 883 884 list_for_each_entry(task, &cmd->t_task_list, t_list) { 885 if (!list_empty(&task->t_execute_list)) 886 continue; 887 /* 888 * __transport_add_task_to_execute_queue() handles the 889 * SAM Task Attribute emulation if enabled 890 */ 891 __transport_add_task_to_execute_queue(task, task_prev, dev); 892 task_prev = task; 893 } 894 } 895 896 static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 897 { 898 unsigned long flags; 899 struct se_device *dev = cmd->se_dev; 900 901 spin_lock_irqsave(&dev->execute_task_lock, flags); 902 __transport_add_tasks_from_cmd(cmd); 903 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 904 } 905 906 void __transport_remove_task_from_execute_queue(struct se_task *task, 907 struct se_device *dev) 908 { 909 list_del_init(&task->t_execute_list); 910 atomic_dec(&dev->execute_tasks); 911 } 912 913 static void transport_remove_task_from_execute_queue( 914 struct se_task *task, 915 struct se_device *dev) 916 { 917 unsigned long flags; 918 919 if (WARN_ON(list_empty(&task->t_execute_list))) 920 return; 921 922 spin_lock_irqsave(&dev->execute_task_lock, flags); 923 __transport_remove_task_from_execute_queue(task, dev); 924 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 925 } 926 927 /* 928 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 929 */ 930 931 static void target_qf_do_work(struct work_struct *work) 932 { 933 struct se_device *dev = container_of(work, struct se_device, 934 qf_work_queue); 935 LIST_HEAD(qf_cmd_list); 936 struct se_cmd *cmd, *cmd_tmp; 937 938 spin_lock_irq(&dev->qf_cmd_lock); 939 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 940 spin_unlock_irq(&dev->qf_cmd_lock); 941 942 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 943 list_del(&cmd->se_qf_node); 944 atomic_dec(&dev->dev_qf_count); 945 smp_mb__after_atomic_dec(); 946 947 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 948 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 949 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 950 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 951 : "UNKNOWN"); 952 953 transport_add_cmd_to_queue(cmd, cmd->t_state, true); 954 } 955 } 956 957 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 958 { 959 switch (cmd->data_direction) { 960 case DMA_NONE: 961 return "NONE"; 962 case DMA_FROM_DEVICE: 963 return "READ"; 964 case DMA_TO_DEVICE: 965 return "WRITE"; 966 case DMA_BIDIRECTIONAL: 967 return "BIDI"; 968 default: 969 break; 970 } 971 972 return "UNKNOWN"; 973 } 974 975 void transport_dump_dev_state( 976 struct se_device *dev, 977 char *b, 978 int *bl) 979 { 980 *bl += sprintf(b + *bl, "Status: "); 981 switch (dev->dev_status) { 982 case TRANSPORT_DEVICE_ACTIVATED: 983 *bl += sprintf(b + *bl, "ACTIVATED"); 984 break; 985 case TRANSPORT_DEVICE_DEACTIVATED: 986 *bl += sprintf(b + *bl, "DEACTIVATED"); 987 break; 988 case TRANSPORT_DEVICE_SHUTDOWN: 989 *bl += sprintf(b + *bl, "SHUTDOWN"); 990 break; 991 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 992 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 993 *bl += sprintf(b + *bl, "OFFLINE"); 994 break; 995 default: 996 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); 997 break; 998 } 999 1000 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", 1001 atomic_read(&dev->execute_tasks), dev->queue_depth); 1002 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", 1003 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); 1004 *bl += sprintf(b + *bl, " "); 1005 } 1006 1007 void transport_dump_vpd_proto_id( 1008 struct t10_vpd *vpd, 1009 unsigned char *p_buf, 1010 int p_buf_len) 1011 { 1012 unsigned char buf[VPD_TMP_BUF_SIZE]; 1013 int len; 1014 1015 memset(buf, 0, VPD_TMP_BUF_SIZE); 1016 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 1017 1018 switch (vpd->protocol_identifier) { 1019 case 0x00: 1020 sprintf(buf+len, "Fibre Channel\n"); 1021 break; 1022 case 0x10: 1023 sprintf(buf+len, "Parallel SCSI\n"); 1024 break; 1025 case 0x20: 1026 sprintf(buf+len, "SSA\n"); 1027 break; 1028 case 0x30: 1029 sprintf(buf+len, "IEEE 1394\n"); 1030 break; 1031 case 0x40: 1032 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1033 " Protocol\n"); 1034 break; 1035 case 0x50: 1036 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1037 break; 1038 case 0x60: 1039 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1040 break; 1041 case 0x70: 1042 sprintf(buf+len, "Automation/Drive Interface Transport" 1043 " Protocol\n"); 1044 break; 1045 case 0x80: 1046 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1047 break; 1048 default: 1049 sprintf(buf+len, "Unknown 0x%02x\n", 1050 vpd->protocol_identifier); 1051 break; 1052 } 1053 1054 if (p_buf) 1055 strncpy(p_buf, buf, p_buf_len); 1056 else 1057 pr_debug("%s", buf); 1058 } 1059 1060 void 1061 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1062 { 1063 /* 1064 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1065 * 1066 * from spc3r23.pdf section 7.5.1 1067 */ 1068 if (page_83[1] & 0x80) { 1069 vpd->protocol_identifier = (page_83[0] & 0xf0); 1070 vpd->protocol_identifier_set = 1; 1071 transport_dump_vpd_proto_id(vpd, NULL, 0); 1072 } 1073 } 1074 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1075 1076 int transport_dump_vpd_assoc( 1077 struct t10_vpd *vpd, 1078 unsigned char *p_buf, 1079 int p_buf_len) 1080 { 1081 unsigned char buf[VPD_TMP_BUF_SIZE]; 1082 int ret = 0; 1083 int len; 1084 1085 memset(buf, 0, VPD_TMP_BUF_SIZE); 1086 len = sprintf(buf, "T10 VPD Identifier Association: "); 1087 1088 switch (vpd->association) { 1089 case 0x00: 1090 sprintf(buf+len, "addressed logical unit\n"); 1091 break; 1092 case 0x10: 1093 sprintf(buf+len, "target port\n"); 1094 break; 1095 case 0x20: 1096 sprintf(buf+len, "SCSI target device\n"); 1097 break; 1098 default: 1099 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1100 ret = -EINVAL; 1101 break; 1102 } 1103 1104 if (p_buf) 1105 strncpy(p_buf, buf, p_buf_len); 1106 else 1107 pr_debug("%s", buf); 1108 1109 return ret; 1110 } 1111 1112 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1113 { 1114 /* 1115 * The VPD identification association.. 1116 * 1117 * from spc3r23.pdf Section 7.6.3.1 Table 297 1118 */ 1119 vpd->association = (page_83[1] & 0x30); 1120 return transport_dump_vpd_assoc(vpd, NULL, 0); 1121 } 1122 EXPORT_SYMBOL(transport_set_vpd_assoc); 1123 1124 int transport_dump_vpd_ident_type( 1125 struct t10_vpd *vpd, 1126 unsigned char *p_buf, 1127 int p_buf_len) 1128 { 1129 unsigned char buf[VPD_TMP_BUF_SIZE]; 1130 int ret = 0; 1131 int len; 1132 1133 memset(buf, 0, VPD_TMP_BUF_SIZE); 1134 len = sprintf(buf, "T10 VPD Identifier Type: "); 1135 1136 switch (vpd->device_identifier_type) { 1137 case 0x00: 1138 sprintf(buf+len, "Vendor specific\n"); 1139 break; 1140 case 0x01: 1141 sprintf(buf+len, "T10 Vendor ID based\n"); 1142 break; 1143 case 0x02: 1144 sprintf(buf+len, "EUI-64 based\n"); 1145 break; 1146 case 0x03: 1147 sprintf(buf+len, "NAA\n"); 1148 break; 1149 case 0x04: 1150 sprintf(buf+len, "Relative target port identifier\n"); 1151 break; 1152 case 0x08: 1153 sprintf(buf+len, "SCSI name string\n"); 1154 break; 1155 default: 1156 sprintf(buf+len, "Unsupported: 0x%02x\n", 1157 vpd->device_identifier_type); 1158 ret = -EINVAL; 1159 break; 1160 } 1161 1162 if (p_buf) { 1163 if (p_buf_len < strlen(buf)+1) 1164 return -EINVAL; 1165 strncpy(p_buf, buf, p_buf_len); 1166 } else { 1167 pr_debug("%s", buf); 1168 } 1169 1170 return ret; 1171 } 1172 1173 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1174 { 1175 /* 1176 * The VPD identifier type.. 1177 * 1178 * from spc3r23.pdf Section 7.6.3.1 Table 298 1179 */ 1180 vpd->device_identifier_type = (page_83[1] & 0x0f); 1181 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1182 } 1183 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1184 1185 int transport_dump_vpd_ident( 1186 struct t10_vpd *vpd, 1187 unsigned char *p_buf, 1188 int p_buf_len) 1189 { 1190 unsigned char buf[VPD_TMP_BUF_SIZE]; 1191 int ret = 0; 1192 1193 memset(buf, 0, VPD_TMP_BUF_SIZE); 1194 1195 switch (vpd->device_identifier_code_set) { 1196 case 0x01: /* Binary */ 1197 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", 1198 &vpd->device_identifier[0]); 1199 break; 1200 case 0x02: /* ASCII */ 1201 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", 1202 &vpd->device_identifier[0]); 1203 break; 1204 case 0x03: /* UTF-8 */ 1205 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", 1206 &vpd->device_identifier[0]); 1207 break; 1208 default: 1209 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1210 " 0x%02x", vpd->device_identifier_code_set); 1211 ret = -EINVAL; 1212 break; 1213 } 1214 1215 if (p_buf) 1216 strncpy(p_buf, buf, p_buf_len); 1217 else 1218 pr_debug("%s", buf); 1219 1220 return ret; 1221 } 1222 1223 int 1224 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1225 { 1226 static const char hex_str[] = "0123456789abcdef"; 1227 int j = 0, i = 4; /* offset to start of the identifer */ 1228 1229 /* 1230 * The VPD Code Set (encoding) 1231 * 1232 * from spc3r23.pdf Section 7.6.3.1 Table 296 1233 */ 1234 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1235 switch (vpd->device_identifier_code_set) { 1236 case 0x01: /* Binary */ 1237 vpd->device_identifier[j++] = 1238 hex_str[vpd->device_identifier_type]; 1239 while (i < (4 + page_83[3])) { 1240 vpd->device_identifier[j++] = 1241 hex_str[(page_83[i] & 0xf0) >> 4]; 1242 vpd->device_identifier[j++] = 1243 hex_str[page_83[i] & 0x0f]; 1244 i++; 1245 } 1246 break; 1247 case 0x02: /* ASCII */ 1248 case 0x03: /* UTF-8 */ 1249 while (i < (4 + page_83[3])) 1250 vpd->device_identifier[j++] = page_83[i++]; 1251 break; 1252 default: 1253 break; 1254 } 1255 1256 return transport_dump_vpd_ident(vpd, NULL, 0); 1257 } 1258 EXPORT_SYMBOL(transport_set_vpd_ident); 1259 1260 static void core_setup_task_attr_emulation(struct se_device *dev) 1261 { 1262 /* 1263 * If this device is from Target_Core_Mod/pSCSI, disable the 1264 * SAM Task Attribute emulation. 1265 * 1266 * This is currently not available in upsream Linux/SCSI Target 1267 * mode code, and is assumed to be disabled while using TCM/pSCSI. 1268 */ 1269 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1270 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; 1271 return; 1272 } 1273 1274 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 1275 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 1276 " device\n", dev->transport->name, 1277 dev->transport->get_device_rev(dev)); 1278 } 1279 1280 static void scsi_dump_inquiry(struct se_device *dev) 1281 { 1282 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 1283 char buf[17]; 1284 int i, device_type; 1285 /* 1286 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1287 */ 1288 for (i = 0; i < 8; i++) 1289 if (wwn->vendor[i] >= 0x20) 1290 buf[i] = wwn->vendor[i]; 1291 else 1292 buf[i] = ' '; 1293 buf[i] = '\0'; 1294 pr_debug(" Vendor: %s\n", buf); 1295 1296 for (i = 0; i < 16; i++) 1297 if (wwn->model[i] >= 0x20) 1298 buf[i] = wwn->model[i]; 1299 else 1300 buf[i] = ' '; 1301 buf[i] = '\0'; 1302 pr_debug(" Model: %s\n", buf); 1303 1304 for (i = 0; i < 4; i++) 1305 if (wwn->revision[i] >= 0x20) 1306 buf[i] = wwn->revision[i]; 1307 else 1308 buf[i] = ' '; 1309 buf[i] = '\0'; 1310 pr_debug(" Revision: %s\n", buf); 1311 1312 device_type = dev->transport->get_device_type(dev); 1313 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1314 pr_debug(" ANSI SCSI revision: %02x\n", 1315 dev->transport->get_device_rev(dev)); 1316 } 1317 1318 struct se_device *transport_add_device_to_core_hba( 1319 struct se_hba *hba, 1320 struct se_subsystem_api *transport, 1321 struct se_subsystem_dev *se_dev, 1322 u32 device_flags, 1323 void *transport_dev, 1324 struct se_dev_limits *dev_limits, 1325 const char *inquiry_prod, 1326 const char *inquiry_rev) 1327 { 1328 int force_pt; 1329 struct se_device *dev; 1330 1331 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); 1332 if (!dev) { 1333 pr_err("Unable to allocate memory for se_dev_t\n"); 1334 return NULL; 1335 } 1336 1337 transport_init_queue_obj(&dev->dev_queue_obj); 1338 dev->dev_flags = device_flags; 1339 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 1340 dev->dev_ptr = transport_dev; 1341 dev->se_hba = hba; 1342 dev->se_sub_dev = se_dev; 1343 dev->transport = transport; 1344 INIT_LIST_HEAD(&dev->dev_list); 1345 INIT_LIST_HEAD(&dev->dev_sep_list); 1346 INIT_LIST_HEAD(&dev->dev_tmr_list); 1347 INIT_LIST_HEAD(&dev->execute_task_list); 1348 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1349 INIT_LIST_HEAD(&dev->state_task_list); 1350 INIT_LIST_HEAD(&dev->qf_cmd_list); 1351 spin_lock_init(&dev->execute_task_lock); 1352 spin_lock_init(&dev->delayed_cmd_lock); 1353 spin_lock_init(&dev->dev_reservation_lock); 1354 spin_lock_init(&dev->dev_status_lock); 1355 spin_lock_init(&dev->se_port_lock); 1356 spin_lock_init(&dev->se_tmr_lock); 1357 spin_lock_init(&dev->qf_cmd_lock); 1358 atomic_set(&dev->dev_ordered_id, 0); 1359 1360 se_dev_set_default_attribs(dev, dev_limits); 1361 1362 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1363 dev->creation_time = get_jiffies_64(); 1364 spin_lock_init(&dev->stats_lock); 1365 1366 spin_lock(&hba->device_lock); 1367 list_add_tail(&dev->dev_list, &hba->hba_dev_list); 1368 hba->dev_count++; 1369 spin_unlock(&hba->device_lock); 1370 /* 1371 * Setup the SAM Task Attribute emulation for struct se_device 1372 */ 1373 core_setup_task_attr_emulation(dev); 1374 /* 1375 * Force PR and ALUA passthrough emulation with internal object use. 1376 */ 1377 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); 1378 /* 1379 * Setup the Reservations infrastructure for struct se_device 1380 */ 1381 core_setup_reservations(dev, force_pt); 1382 /* 1383 * Setup the Asymmetric Logical Unit Assignment for struct se_device 1384 */ 1385 if (core_setup_alua(dev, force_pt) < 0) 1386 goto out; 1387 1388 /* 1389 * Startup the struct se_device processing thread 1390 */ 1391 dev->process_thread = kthread_run(transport_processing_thread, dev, 1392 "LIO_%s", dev->transport->name); 1393 if (IS_ERR(dev->process_thread)) { 1394 pr_err("Unable to create kthread: LIO_%s\n", 1395 dev->transport->name); 1396 goto out; 1397 } 1398 /* 1399 * Setup work_queue for QUEUE_FULL 1400 */ 1401 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1402 /* 1403 * Preload the initial INQUIRY const values if we are doing 1404 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1405 * passthrough because this is being provided by the backend LLD. 1406 * This is required so that transport_get_inquiry() copies these 1407 * originals once back into DEV_T10_WWN(dev) for the virtual device 1408 * setup. 1409 */ 1410 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1411 if (!inquiry_prod || !inquiry_rev) { 1412 pr_err("All non TCM/pSCSI plugins require" 1413 " INQUIRY consts\n"); 1414 goto out; 1415 } 1416 1417 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1418 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); 1419 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); 1420 } 1421 scsi_dump_inquiry(dev); 1422 1423 return dev; 1424 out: 1425 kthread_stop(dev->process_thread); 1426 1427 spin_lock(&hba->device_lock); 1428 list_del(&dev->dev_list); 1429 hba->dev_count--; 1430 spin_unlock(&hba->device_lock); 1431 1432 se_release_vpd_for_dev(dev); 1433 1434 kfree(dev); 1435 1436 return NULL; 1437 } 1438 EXPORT_SYMBOL(transport_add_device_to_core_hba); 1439 1440 /* transport_generic_prepare_cdb(): 1441 * 1442 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will 1443 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. 1444 * The point of this is since we are mapping iSCSI LUNs to 1445 * SCSI Target IDs having a non-zero LUN in the CDB will throw the 1446 * devices and HBAs for a loop. 1447 */ 1448 static inline void transport_generic_prepare_cdb( 1449 unsigned char *cdb) 1450 { 1451 switch (cdb[0]) { 1452 case READ_10: /* SBC - RDProtect */ 1453 case READ_12: /* SBC - RDProtect */ 1454 case READ_16: /* SBC - RDProtect */ 1455 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1456 case VERIFY: /* SBC - VRProtect */ 1457 case VERIFY_16: /* SBC - VRProtect */ 1458 case WRITE_VERIFY: /* SBC - VRProtect */ 1459 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1460 break; 1461 default: 1462 cdb[1] &= 0x1f; /* clear logical unit number */ 1463 break; 1464 } 1465 } 1466 1467 static struct se_task * 1468 transport_generic_get_task(struct se_cmd *cmd, 1469 enum dma_data_direction data_direction) 1470 { 1471 struct se_task *task; 1472 struct se_device *dev = cmd->se_dev; 1473 1474 task = dev->transport->alloc_task(cmd->t_task_cdb); 1475 if (!task) { 1476 pr_err("Unable to allocate struct se_task\n"); 1477 return NULL; 1478 } 1479 1480 INIT_LIST_HEAD(&task->t_list); 1481 INIT_LIST_HEAD(&task->t_execute_list); 1482 INIT_LIST_HEAD(&task->t_state_list); 1483 init_completion(&task->task_stop_comp); 1484 task->task_se_cmd = cmd; 1485 task->task_data_direction = data_direction; 1486 1487 return task; 1488 } 1489 1490 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); 1491 1492 /* 1493 * Used by fabric modules containing a local struct se_cmd within their 1494 * fabric dependent per I/O descriptor. 1495 */ 1496 void transport_init_se_cmd( 1497 struct se_cmd *cmd, 1498 struct target_core_fabric_ops *tfo, 1499 struct se_session *se_sess, 1500 u32 data_length, 1501 int data_direction, 1502 int task_attr, 1503 unsigned char *sense_buffer) 1504 { 1505 INIT_LIST_HEAD(&cmd->se_lun_node); 1506 INIT_LIST_HEAD(&cmd->se_delayed_node); 1507 INIT_LIST_HEAD(&cmd->se_qf_node); 1508 INIT_LIST_HEAD(&cmd->se_queue_node); 1509 INIT_LIST_HEAD(&cmd->se_cmd_list); 1510 INIT_LIST_HEAD(&cmd->t_task_list); 1511 init_completion(&cmd->transport_lun_fe_stop_comp); 1512 init_completion(&cmd->transport_lun_stop_comp); 1513 init_completion(&cmd->t_transport_stop_comp); 1514 init_completion(&cmd->cmd_wait_comp); 1515 spin_lock_init(&cmd->t_state_lock); 1516 cmd->transport_state = CMD_T_DEV_ACTIVE; 1517 1518 cmd->se_tfo = tfo; 1519 cmd->se_sess = se_sess; 1520 cmd->data_length = data_length; 1521 cmd->data_direction = data_direction; 1522 cmd->sam_task_attr = task_attr; 1523 cmd->sense_buffer = sense_buffer; 1524 } 1525 EXPORT_SYMBOL(transport_init_se_cmd); 1526 1527 static int transport_check_alloc_task_attr(struct se_cmd *cmd) 1528 { 1529 /* 1530 * Check if SAM Task Attribute emulation is enabled for this 1531 * struct se_device storage object 1532 */ 1533 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1534 return 0; 1535 1536 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1537 pr_debug("SAM Task Attribute ACA" 1538 " emulation is not supported\n"); 1539 return -EINVAL; 1540 } 1541 /* 1542 * Used to determine when ORDERED commands should go from 1543 * Dormant to Active status. 1544 */ 1545 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); 1546 smp_mb__after_atomic_inc(); 1547 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1548 cmd->se_ordered_id, cmd->sam_task_attr, 1549 cmd->se_dev->transport->name); 1550 return 0; 1551 } 1552 1553 /* transport_generic_allocate_tasks(): 1554 * 1555 * Called from fabric RX Thread. 1556 */ 1557 int transport_generic_allocate_tasks( 1558 struct se_cmd *cmd, 1559 unsigned char *cdb) 1560 { 1561 int ret; 1562 1563 transport_generic_prepare_cdb(cdb); 1564 /* 1565 * Ensure that the received CDB is less than the max (252 + 8) bytes 1566 * for VARIABLE_LENGTH_CMD 1567 */ 1568 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1569 pr_err("Received SCSI CDB with command_size: %d that" 1570 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1571 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1572 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1573 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1574 return -EINVAL; 1575 } 1576 /* 1577 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1578 * allocate the additional extended CDB buffer now.. Otherwise 1579 * setup the pointer from __t_task_cdb to t_task_cdb. 1580 */ 1581 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1582 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1583 GFP_KERNEL); 1584 if (!cmd->t_task_cdb) { 1585 pr_err("Unable to allocate cmd->t_task_cdb" 1586 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1587 scsi_command_size(cdb), 1588 (unsigned long)sizeof(cmd->__t_task_cdb)); 1589 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1590 cmd->scsi_sense_reason = 1591 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1592 return -ENOMEM; 1593 } 1594 } else 1595 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1596 /* 1597 * Copy the original CDB into cmd-> 1598 */ 1599 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1600 /* 1601 * Setup the received CDB based on SCSI defined opcodes and 1602 * perform unit attention, persistent reservations and ALUA 1603 * checks for virtual device backends. The cmd->t_task_cdb 1604 * pointer is expected to be setup before we reach this point. 1605 */ 1606 ret = transport_generic_cmd_sequencer(cmd, cdb); 1607 if (ret < 0) 1608 return ret; 1609 /* 1610 * Check for SAM Task Attribute Emulation 1611 */ 1612 if (transport_check_alloc_task_attr(cmd) < 0) { 1613 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1614 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1615 return -EINVAL; 1616 } 1617 spin_lock(&cmd->se_lun->lun_sep_lock); 1618 if (cmd->se_lun->lun_sep) 1619 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1620 spin_unlock(&cmd->se_lun->lun_sep_lock); 1621 return 0; 1622 } 1623 EXPORT_SYMBOL(transport_generic_allocate_tasks); 1624 1625 /* 1626 * Used by fabric module frontends to queue tasks directly. 1627 * Many only be used from process context only 1628 */ 1629 int transport_handle_cdb_direct( 1630 struct se_cmd *cmd) 1631 { 1632 int ret; 1633 1634 if (!cmd->se_lun) { 1635 dump_stack(); 1636 pr_err("cmd->se_lun is NULL\n"); 1637 return -EINVAL; 1638 } 1639 if (in_interrupt()) { 1640 dump_stack(); 1641 pr_err("transport_generic_handle_cdb cannot be called" 1642 " from interrupt context\n"); 1643 return -EINVAL; 1644 } 1645 /* 1646 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following 1647 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() 1648 * in existing usage to ensure that outstanding descriptors are handled 1649 * correctly during shutdown via transport_wait_for_tasks() 1650 * 1651 * Also, we don't take cmd->t_state_lock here as we only expect 1652 * this to be called for initial descriptor submission. 1653 */ 1654 cmd->t_state = TRANSPORT_NEW_CMD; 1655 cmd->transport_state |= CMD_T_ACTIVE; 1656 1657 /* 1658 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1659 * so follow TRANSPORT_NEW_CMD processing thread context usage 1660 * and call transport_generic_request_failure() if necessary.. 1661 */ 1662 ret = transport_generic_new_cmd(cmd); 1663 if (ret < 0) 1664 transport_generic_request_failure(cmd); 1665 1666 return 0; 1667 } 1668 EXPORT_SYMBOL(transport_handle_cdb_direct); 1669 1670 /** 1671 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1672 * 1673 * @se_cmd: command descriptor to submit 1674 * @se_sess: associated se_sess for endpoint 1675 * @cdb: pointer to SCSI CDB 1676 * @sense: pointer to SCSI sense buffer 1677 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1678 * @data_length: fabric expected data transfer length 1679 * @task_addr: SAM task attribute 1680 * @data_dir: DMA data direction 1681 * @flags: flags for command submission from target_sc_flags_tables 1682 * 1683 * This may only be called from process context, and also currently 1684 * assumes internal allocation of fabric payload buffer by target-core. 1685 **/ 1686 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1687 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1688 u32 data_length, int task_attr, int data_dir, int flags) 1689 { 1690 struct se_portal_group *se_tpg; 1691 int rc; 1692 1693 se_tpg = se_sess->se_tpg; 1694 BUG_ON(!se_tpg); 1695 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1696 BUG_ON(in_interrupt()); 1697 /* 1698 * Initialize se_cmd for target operation. From this point 1699 * exceptions are handled by sending exception status via 1700 * target_core_fabric_ops->queue_status() callback 1701 */ 1702 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1703 data_length, data_dir, task_attr, sense); 1704 /* 1705 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1706 * se_sess->sess_cmd_list. A second kref_get here is necessary 1707 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1708 * kref_put() to happen during fabric packet acknowledgement. 1709 */ 1710 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1711 /* 1712 * Signal bidirectional data payloads to target-core 1713 */ 1714 if (flags & TARGET_SCF_BIDI_OP) 1715 se_cmd->se_cmd_flags |= SCF_BIDI; 1716 /* 1717 * Locate se_lun pointer and attach it to struct se_cmd 1718 */ 1719 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) { 1720 transport_send_check_condition_and_sense(se_cmd, 1721 se_cmd->scsi_sense_reason, 0); 1722 target_put_sess_cmd(se_sess, se_cmd); 1723 return; 1724 } 1725 /* 1726 * Sanitize CDBs via transport_generic_cmd_sequencer() and 1727 * allocate the necessary tasks to complete the received CDB+data 1728 */ 1729 rc = transport_generic_allocate_tasks(se_cmd, cdb); 1730 if (rc != 0) { 1731 transport_generic_request_failure(se_cmd); 1732 return; 1733 } 1734 /* 1735 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend 1736 * for immediate execution of READs, otherwise wait for 1737 * transport_generic_handle_data() to be called for WRITEs 1738 * when fabric has filled the incoming buffer. 1739 */ 1740 transport_handle_cdb_direct(se_cmd); 1741 return; 1742 } 1743 EXPORT_SYMBOL(target_submit_cmd); 1744 1745 static void target_complete_tmr_failure(struct work_struct *work) 1746 { 1747 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1748 1749 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1750 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1751 transport_generic_free_cmd(se_cmd, 0); 1752 } 1753 1754 /** 1755 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1756 * for TMR CDBs 1757 * 1758 * @se_cmd: command descriptor to submit 1759 * @se_sess: associated se_sess for endpoint 1760 * @sense: pointer to SCSI sense buffer 1761 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1762 * @fabric_context: fabric context for TMR req 1763 * @tm_type: Type of TM request 1764 * @gfp: gfp type for caller 1765 * @tag: referenced task tag for TMR_ABORT_TASK 1766 * @flags: submit cmd flags 1767 * 1768 * Callable from all contexts. 1769 **/ 1770 1771 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1772 unsigned char *sense, u32 unpacked_lun, 1773 void *fabric_tmr_ptr, unsigned char tm_type, 1774 gfp_t gfp, unsigned int tag, int flags) 1775 { 1776 struct se_portal_group *se_tpg; 1777 int ret; 1778 1779 se_tpg = se_sess->se_tpg; 1780 BUG_ON(!se_tpg); 1781 1782 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1783 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1784 /* 1785 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1786 * allocation failure. 1787 */ 1788 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1789 if (ret < 0) 1790 return -ENOMEM; 1791 1792 if (tm_type == TMR_ABORT_TASK) 1793 se_cmd->se_tmr_req->ref_task_tag = tag; 1794 1795 /* See target_submit_cmd for commentary */ 1796 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1797 1798 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1799 if (ret) { 1800 /* 1801 * For callback during failure handling, push this work off 1802 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1803 */ 1804 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1805 schedule_work(&se_cmd->work); 1806 return 0; 1807 } 1808 transport_generic_handle_tmr(se_cmd); 1809 return 0; 1810 } 1811 EXPORT_SYMBOL(target_submit_tmr); 1812 1813 /* 1814 * Used by fabric module frontends defining a TFO->new_cmd_map() caller 1815 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to 1816 * complete setup in TCM process context w/ TFO->new_cmd_map(). 1817 */ 1818 int transport_generic_handle_cdb_map( 1819 struct se_cmd *cmd) 1820 { 1821 if (!cmd->se_lun) { 1822 dump_stack(); 1823 pr_err("cmd->se_lun is NULL\n"); 1824 return -EINVAL; 1825 } 1826 1827 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); 1828 return 0; 1829 } 1830 EXPORT_SYMBOL(transport_generic_handle_cdb_map); 1831 1832 /* transport_generic_handle_data(): 1833 * 1834 * 1835 */ 1836 int transport_generic_handle_data( 1837 struct se_cmd *cmd) 1838 { 1839 /* 1840 * For the software fabric case, then we assume the nexus is being 1841 * failed/shutdown when signals are pending from the kthread context 1842 * caller, so we return a failure. For the HW target mode case running 1843 * in interrupt code, the signal_pending() check is skipped. 1844 */ 1845 if (!in_interrupt() && signal_pending(current)) 1846 return -EPERM; 1847 /* 1848 * If the received CDB has aleady been ABORTED by the generic 1849 * target engine, we now call transport_check_aborted_status() 1850 * to queue any delated TASK_ABORTED status for the received CDB to the 1851 * fabric module as we are expecting no further incoming DATA OUT 1852 * sequences at this point. 1853 */ 1854 if (transport_check_aborted_status(cmd, 1) != 0) 1855 return 0; 1856 1857 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); 1858 return 0; 1859 } 1860 EXPORT_SYMBOL(transport_generic_handle_data); 1861 1862 /* transport_generic_handle_tmr(): 1863 * 1864 * 1865 */ 1866 int transport_generic_handle_tmr( 1867 struct se_cmd *cmd) 1868 { 1869 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); 1870 return 0; 1871 } 1872 EXPORT_SYMBOL(transport_generic_handle_tmr); 1873 1874 /* 1875 * If the task is active, request it to be stopped and sleep until it 1876 * has completed. 1877 */ 1878 bool target_stop_task(struct se_task *task, unsigned long *flags) 1879 { 1880 struct se_cmd *cmd = task->task_se_cmd; 1881 bool was_active = false; 1882 1883 if (task->task_flags & TF_ACTIVE) { 1884 task->task_flags |= TF_REQUEST_STOP; 1885 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1886 1887 pr_debug("Task %p waiting to complete\n", task); 1888 wait_for_completion(&task->task_stop_comp); 1889 pr_debug("Task %p stopped successfully\n", task); 1890 1891 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1892 atomic_dec(&cmd->t_task_cdbs_left); 1893 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); 1894 was_active = true; 1895 } 1896 1897 return was_active; 1898 } 1899 1900 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) 1901 { 1902 struct se_task *task, *task_tmp; 1903 unsigned long flags; 1904 int ret = 0; 1905 1906 pr_debug("ITT[0x%08x] - Stopping tasks\n", 1907 cmd->se_tfo->get_task_tag(cmd)); 1908 1909 /* 1910 * No tasks remain in the execution queue 1911 */ 1912 spin_lock_irqsave(&cmd->t_state_lock, flags); 1913 list_for_each_entry_safe(task, task_tmp, 1914 &cmd->t_task_list, t_list) { 1915 pr_debug("Processing task %p\n", task); 1916 /* 1917 * If the struct se_task has not been sent and is not active, 1918 * remove the struct se_task from the execution queue. 1919 */ 1920 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { 1921 spin_unlock_irqrestore(&cmd->t_state_lock, 1922 flags); 1923 transport_remove_task_from_execute_queue(task, 1924 cmd->se_dev); 1925 1926 pr_debug("Task %p removed from execute queue\n", task); 1927 spin_lock_irqsave(&cmd->t_state_lock, flags); 1928 continue; 1929 } 1930 1931 if (!target_stop_task(task, &flags)) { 1932 pr_debug("Task %p - did nothing\n", task); 1933 ret++; 1934 } 1935 } 1936 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1937 1938 return ret; 1939 } 1940 1941 /* 1942 * Handle SAM-esque emulation for generic transport request failures. 1943 */ 1944 void transport_generic_request_failure(struct se_cmd *cmd) 1945 { 1946 int ret = 0; 1947 1948 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1949 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1950 cmd->t_task_cdb[0]); 1951 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", 1952 cmd->se_tfo->get_cmd_state(cmd), 1953 cmd->t_state, cmd->scsi_sense_reason); 1954 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1955 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1956 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1957 cmd->t_task_list_num, 1958 atomic_read(&cmd->t_task_cdbs_left), 1959 atomic_read(&cmd->t_task_cdbs_sent), 1960 atomic_read(&cmd->t_task_cdbs_ex_left), 1961 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1962 (cmd->transport_state & CMD_T_STOP) != 0, 1963 (cmd->transport_state & CMD_T_SENT) != 0); 1964 1965 /* 1966 * For SAM Task Attribute emulation for failed struct se_cmd 1967 */ 1968 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 1969 transport_complete_task_attr(cmd); 1970 1971 switch (cmd->scsi_sense_reason) { 1972 case TCM_NON_EXISTENT_LUN: 1973 case TCM_UNSUPPORTED_SCSI_OPCODE: 1974 case TCM_INVALID_CDB_FIELD: 1975 case TCM_INVALID_PARAMETER_LIST: 1976 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1977 case TCM_UNKNOWN_MODE_PAGE: 1978 case TCM_WRITE_PROTECTED: 1979 case TCM_CHECK_CONDITION_ABORT_CMD: 1980 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1981 case TCM_CHECK_CONDITION_NOT_READY: 1982 break; 1983 case TCM_RESERVATION_CONFLICT: 1984 /* 1985 * No SENSE Data payload for this case, set SCSI Status 1986 * and queue the response to $FABRIC_MOD. 1987 * 1988 * Uses linux/include/scsi/scsi.h SAM status codes defs 1989 */ 1990 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1991 /* 1992 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1993 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1994 * CONFLICT STATUS. 1995 * 1996 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1997 */ 1998 if (cmd->se_sess && 1999 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) 2000 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 2001 cmd->orig_fe_lun, 0x2C, 2002 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 2003 2004 ret = cmd->se_tfo->queue_status(cmd); 2005 if (ret == -EAGAIN || ret == -ENOMEM) 2006 goto queue_full; 2007 goto check_stop; 2008 default: 2009 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 2010 cmd->t_task_cdb[0], cmd->scsi_sense_reason); 2011 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2012 break; 2013 } 2014 /* 2015 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, 2016 * make the call to transport_send_check_condition_and_sense() 2017 * directly. Otherwise expect the fabric to make the call to 2018 * transport_send_check_condition_and_sense() after handling 2019 * possible unsoliticied write data payloads. 2020 */ 2021 ret = transport_send_check_condition_and_sense(cmd, 2022 cmd->scsi_sense_reason, 0); 2023 if (ret == -EAGAIN || ret == -ENOMEM) 2024 goto queue_full; 2025 2026 check_stop: 2027 transport_lun_remove_cmd(cmd); 2028 if (!transport_cmd_check_stop_to_fabric(cmd)) 2029 ; 2030 return; 2031 2032 queue_full: 2033 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2034 transport_handle_queue_full(cmd, cmd->se_dev); 2035 } 2036 EXPORT_SYMBOL(transport_generic_request_failure); 2037 2038 static inline u32 transport_lba_21(unsigned char *cdb) 2039 { 2040 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 2041 } 2042 2043 static inline u32 transport_lba_32(unsigned char *cdb) 2044 { 2045 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 2046 } 2047 2048 static inline unsigned long long transport_lba_64(unsigned char *cdb) 2049 { 2050 unsigned int __v1, __v2; 2051 2052 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 2053 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 2054 2055 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 2056 } 2057 2058 /* 2059 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 2060 */ 2061 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 2062 { 2063 unsigned int __v1, __v2; 2064 2065 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 2066 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 2067 2068 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 2069 } 2070 2071 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) 2072 { 2073 unsigned long flags; 2074 2075 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 2076 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 2077 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 2078 } 2079 2080 /* 2081 * Called from Fabric Module context from transport_execute_tasks() 2082 * 2083 * The return of this function determins if the tasks from struct se_cmd 2084 * get added to the execution queue in transport_execute_tasks(), 2085 * or are added to the delayed or ordered lists here. 2086 */ 2087 static inline int transport_execute_task_attr(struct se_cmd *cmd) 2088 { 2089 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 2090 return 1; 2091 /* 2092 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2093 * to allow the passed struct se_cmd list of tasks to the front of the list. 2094 */ 2095 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 2096 pr_debug("Added HEAD_OF_QUEUE for CDB:" 2097 " 0x%02x, se_ordered_id: %u\n", 2098 cmd->t_task_cdb[0], 2099 cmd->se_ordered_id); 2100 return 1; 2101 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 2102 atomic_inc(&cmd->se_dev->dev_ordered_sync); 2103 smp_mb__after_atomic_inc(); 2104 2105 pr_debug("Added ORDERED for CDB: 0x%02x to ordered" 2106 " list, se_ordered_id: %u\n", 2107 cmd->t_task_cdb[0], 2108 cmd->se_ordered_id); 2109 /* 2110 * Add ORDERED command to tail of execution queue if 2111 * no other older commands exist that need to be 2112 * completed first. 2113 */ 2114 if (!atomic_read(&cmd->se_dev->simple_cmds)) 2115 return 1; 2116 } else { 2117 /* 2118 * For SIMPLE and UNTAGGED Task Attribute commands 2119 */ 2120 atomic_inc(&cmd->se_dev->simple_cmds); 2121 smp_mb__after_atomic_inc(); 2122 } 2123 /* 2124 * Otherwise if one or more outstanding ORDERED task attribute exist, 2125 * add the dormant task(s) built for the passed struct se_cmd to the 2126 * execution queue and become in Active state for this struct se_device. 2127 */ 2128 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { 2129 /* 2130 * Otherwise, add cmd w/ tasks to delayed cmd queue that 2131 * will be drained upon completion of HEAD_OF_QUEUE task. 2132 */ 2133 spin_lock(&cmd->se_dev->delayed_cmd_lock); 2134 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; 2135 list_add_tail(&cmd->se_delayed_node, 2136 &cmd->se_dev->delayed_cmd_list); 2137 spin_unlock(&cmd->se_dev->delayed_cmd_lock); 2138 2139 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 2140 " delayed CMD list, se_ordered_id: %u\n", 2141 cmd->t_task_cdb[0], cmd->sam_task_attr, 2142 cmd->se_ordered_id); 2143 /* 2144 * Return zero to let transport_execute_tasks() know 2145 * not to add the delayed tasks to the execution list. 2146 */ 2147 return 0; 2148 } 2149 /* 2150 * Otherwise, no ORDERED task attributes exist.. 2151 */ 2152 return 1; 2153 } 2154 2155 /* 2156 * Called from fabric module context in transport_generic_new_cmd() and 2157 * transport_generic_process_write() 2158 */ 2159 static int transport_execute_tasks(struct se_cmd *cmd) 2160 { 2161 int add_tasks; 2162 struct se_device *se_dev = cmd->se_dev; 2163 /* 2164 * Call transport_cmd_check_stop() to see if a fabric exception 2165 * has occurred that prevents execution. 2166 */ 2167 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { 2168 /* 2169 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE 2170 * attribute for the tasks of the received struct se_cmd CDB 2171 */ 2172 add_tasks = transport_execute_task_attr(cmd); 2173 if (!add_tasks) 2174 goto execute_tasks; 2175 /* 2176 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd() 2177 * adds associated se_tasks while holding dev->execute_task_lock 2178 * before I/O dispath to avoid a double spinlock access. 2179 */ 2180 __transport_execute_tasks(se_dev, cmd); 2181 return 0; 2182 } 2183 2184 execute_tasks: 2185 __transport_execute_tasks(se_dev, NULL); 2186 return 0; 2187 } 2188 2189 /* 2190 * Called to check struct se_device tcq depth window, and once open pull struct se_task 2191 * from struct se_device->execute_task_list and 2192 * 2193 * Called from transport_processing_thread() 2194 */ 2195 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) 2196 { 2197 int error; 2198 struct se_cmd *cmd = NULL; 2199 struct se_task *task = NULL; 2200 unsigned long flags; 2201 2202 check_depth: 2203 spin_lock_irq(&dev->execute_task_lock); 2204 if (new_cmd != NULL) 2205 __transport_add_tasks_from_cmd(new_cmd); 2206 2207 if (list_empty(&dev->execute_task_list)) { 2208 spin_unlock_irq(&dev->execute_task_lock); 2209 return 0; 2210 } 2211 task = list_first_entry(&dev->execute_task_list, 2212 struct se_task, t_execute_list); 2213 __transport_remove_task_from_execute_queue(task, dev); 2214 spin_unlock_irq(&dev->execute_task_lock); 2215 2216 cmd = task->task_se_cmd; 2217 spin_lock_irqsave(&cmd->t_state_lock, flags); 2218 task->task_flags |= (TF_ACTIVE | TF_SENT); 2219 atomic_inc(&cmd->t_task_cdbs_sent); 2220 2221 if (atomic_read(&cmd->t_task_cdbs_sent) == 2222 cmd->t_task_list_num) 2223 cmd->transport_state |= CMD_T_SENT; 2224 2225 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2226 2227 if (cmd->execute_task) 2228 error = cmd->execute_task(task); 2229 else 2230 error = dev->transport->do_task(task); 2231 if (error != 0) { 2232 spin_lock_irqsave(&cmd->t_state_lock, flags); 2233 task->task_flags &= ~TF_ACTIVE; 2234 cmd->transport_state &= ~CMD_T_SENT; 2235 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2236 2237 transport_stop_tasks_for_cmd(cmd); 2238 transport_generic_request_failure(cmd); 2239 } 2240 2241 new_cmd = NULL; 2242 goto check_depth; 2243 2244 return 0; 2245 } 2246 2247 static inline u32 transport_get_sectors_6( 2248 unsigned char *cdb, 2249 struct se_cmd *cmd, 2250 int *ret) 2251 { 2252 struct se_device *dev = cmd->se_dev; 2253 2254 /* 2255 * Assume TYPE_DISK for non struct se_device objects. 2256 * Use 8-bit sector value. 2257 */ 2258 if (!dev) 2259 goto type_disk; 2260 2261 /* 2262 * Use 24-bit allocation length for TYPE_TAPE. 2263 */ 2264 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2265 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; 2266 2267 /* 2268 * Everything else assume TYPE_DISK Sector CDB location. 2269 * Use 8-bit sector value. SBC-3 says: 2270 * 2271 * A TRANSFER LENGTH field set to zero specifies that 256 2272 * logical blocks shall be written. Any other value 2273 * specifies the number of logical blocks that shall be 2274 * written. 2275 */ 2276 type_disk: 2277 return cdb[4] ? : 256; 2278 } 2279 2280 static inline u32 transport_get_sectors_10( 2281 unsigned char *cdb, 2282 struct se_cmd *cmd, 2283 int *ret) 2284 { 2285 struct se_device *dev = cmd->se_dev; 2286 2287 /* 2288 * Assume TYPE_DISK for non struct se_device objects. 2289 * Use 16-bit sector value. 2290 */ 2291 if (!dev) 2292 goto type_disk; 2293 2294 /* 2295 * XXX_10 is not defined in SSC, throw an exception 2296 */ 2297 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2298 *ret = -EINVAL; 2299 return 0; 2300 } 2301 2302 /* 2303 * Everything else assume TYPE_DISK Sector CDB location. 2304 * Use 16-bit sector value. 2305 */ 2306 type_disk: 2307 return (u32)(cdb[7] << 8) + cdb[8]; 2308 } 2309 2310 static inline u32 transport_get_sectors_12( 2311 unsigned char *cdb, 2312 struct se_cmd *cmd, 2313 int *ret) 2314 { 2315 struct se_device *dev = cmd->se_dev; 2316 2317 /* 2318 * Assume TYPE_DISK for non struct se_device objects. 2319 * Use 32-bit sector value. 2320 */ 2321 if (!dev) 2322 goto type_disk; 2323 2324 /* 2325 * XXX_12 is not defined in SSC, throw an exception 2326 */ 2327 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2328 *ret = -EINVAL; 2329 return 0; 2330 } 2331 2332 /* 2333 * Everything else assume TYPE_DISK Sector CDB location. 2334 * Use 32-bit sector value. 2335 */ 2336 type_disk: 2337 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 2338 } 2339 2340 static inline u32 transport_get_sectors_16( 2341 unsigned char *cdb, 2342 struct se_cmd *cmd, 2343 int *ret) 2344 { 2345 struct se_device *dev = cmd->se_dev; 2346 2347 /* 2348 * Assume TYPE_DISK for non struct se_device objects. 2349 * Use 32-bit sector value. 2350 */ 2351 if (!dev) 2352 goto type_disk; 2353 2354 /* 2355 * Use 24-bit allocation length for TYPE_TAPE. 2356 */ 2357 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2358 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; 2359 2360 type_disk: 2361 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 2362 (cdb[12] << 8) + cdb[13]; 2363 } 2364 2365 /* 2366 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 2367 */ 2368 static inline u32 transport_get_sectors_32( 2369 unsigned char *cdb, 2370 struct se_cmd *cmd, 2371 int *ret) 2372 { 2373 /* 2374 * Assume TYPE_DISK for non struct se_device objects. 2375 * Use 32-bit sector value. 2376 */ 2377 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 2378 (cdb[30] << 8) + cdb[31]; 2379 2380 } 2381 2382 static inline u32 transport_get_size( 2383 u32 sectors, 2384 unsigned char *cdb, 2385 struct se_cmd *cmd) 2386 { 2387 struct se_device *dev = cmd->se_dev; 2388 2389 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2390 if (cdb[1] & 1) { /* sectors */ 2391 return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2392 } else /* bytes */ 2393 return sectors; 2394 } 2395 #if 0 2396 pr_debug("Returning block_size: %u, sectors: %u == %u for" 2397 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, 2398 dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2399 dev->transport->name); 2400 #endif 2401 return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2402 } 2403 2404 static void transport_xor_callback(struct se_cmd *cmd) 2405 { 2406 unsigned char *buf, *addr; 2407 struct scatterlist *sg; 2408 unsigned int offset; 2409 int i; 2410 int count; 2411 /* 2412 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 2413 * 2414 * 1) read the specified logical block(s); 2415 * 2) transfer logical blocks from the data-out buffer; 2416 * 3) XOR the logical blocks transferred from the data-out buffer with 2417 * the logical blocks read, storing the resulting XOR data in a buffer; 2418 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 2419 * blocks transferred from the data-out buffer; and 2420 * 5) transfer the resulting XOR data to the data-in buffer. 2421 */ 2422 buf = kmalloc(cmd->data_length, GFP_KERNEL); 2423 if (!buf) { 2424 pr_err("Unable to allocate xor_callback buf\n"); 2425 return; 2426 } 2427 /* 2428 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 2429 * into the locally allocated *buf 2430 */ 2431 sg_copy_to_buffer(cmd->t_data_sg, 2432 cmd->t_data_nents, 2433 buf, 2434 cmd->data_length); 2435 2436 /* 2437 * Now perform the XOR against the BIDI read memory located at 2438 * cmd->t_mem_bidi_list 2439 */ 2440 2441 offset = 0; 2442 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 2443 addr = kmap_atomic(sg_page(sg)); 2444 if (!addr) 2445 goto out; 2446 2447 for (i = 0; i < sg->length; i++) 2448 *(addr + sg->offset + i) ^= *(buf + offset + i); 2449 2450 offset += sg->length; 2451 kunmap_atomic(addr); 2452 } 2453 2454 out: 2455 kfree(buf); 2456 } 2457 2458 /* 2459 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd 2460 */ 2461 static int transport_get_sense_data(struct se_cmd *cmd) 2462 { 2463 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; 2464 struct se_device *dev = cmd->se_dev; 2465 struct se_task *task = NULL, *task_tmp; 2466 unsigned long flags; 2467 u32 offset = 0; 2468 2469 WARN_ON(!cmd->se_lun); 2470 2471 if (!dev) 2472 return 0; 2473 2474 spin_lock_irqsave(&cmd->t_state_lock, flags); 2475 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2476 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2477 return 0; 2478 } 2479 2480 list_for_each_entry_safe(task, task_tmp, 2481 &cmd->t_task_list, t_list) { 2482 if (!(task->task_flags & TF_HAS_SENSE)) 2483 continue; 2484 2485 if (!dev->transport->get_sense_buffer) { 2486 pr_err("dev->transport->get_sense_buffer" 2487 " is NULL\n"); 2488 continue; 2489 } 2490 2491 sense_buffer = dev->transport->get_sense_buffer(task); 2492 if (!sense_buffer) { 2493 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" 2494 " sense buffer for task with sense\n", 2495 cmd->se_tfo->get_task_tag(cmd), task); 2496 continue; 2497 } 2498 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2499 2500 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 2501 TRANSPORT_SENSE_BUFFER); 2502 2503 memcpy(&buffer[offset], sense_buffer, 2504 TRANSPORT_SENSE_BUFFER); 2505 cmd->scsi_status = task->task_scsi_status; 2506 /* Automatically padded */ 2507 cmd->scsi_sense_length = 2508 (TRANSPORT_SENSE_BUFFER + offset); 2509 2510 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" 2511 " and sense\n", 2512 dev->se_hba->hba_id, dev->transport->name, 2513 cmd->scsi_status); 2514 return 0; 2515 } 2516 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2517 2518 return -1; 2519 } 2520 2521 static inline long long transport_dev_end_lba(struct se_device *dev) 2522 { 2523 return dev->transport->get_blocks(dev) + 1; 2524 } 2525 2526 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) 2527 { 2528 struct se_device *dev = cmd->se_dev; 2529 u32 sectors; 2530 2531 if (dev->transport->get_device_type(dev) != TYPE_DISK) 2532 return 0; 2533 2534 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 2535 2536 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { 2537 pr_err("LBA: %llu Sectors: %u exceeds" 2538 " transport_dev_end_lba(): %llu\n", 2539 cmd->t_task_lba, sectors, 2540 transport_dev_end_lba(dev)); 2541 return -EINVAL; 2542 } 2543 2544 return 0; 2545 } 2546 2547 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) 2548 { 2549 /* 2550 * Determine if the received WRITE_SAME is used to for direct 2551 * passthrough into Linux/SCSI with struct request via TCM/pSCSI 2552 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 2553 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. 2554 */ 2555 int passthrough = (dev->transport->transport_type == 2556 TRANSPORT_PLUGIN_PHBA_PDEV); 2557 2558 if (!passthrough) { 2559 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 2560 pr_err("WRITE_SAME PBDATA and LBDATA" 2561 " bits not supported for Block Discard" 2562 " Emulation\n"); 2563 return -ENOSYS; 2564 } 2565 /* 2566 * Currently for the emulated case we only accept 2567 * tpws with the UNMAP=1 bit set. 2568 */ 2569 if (!(flags[0] & 0x08)) { 2570 pr_err("WRITE_SAME w/o UNMAP bit not" 2571 " supported for Block Discard Emulation\n"); 2572 return -ENOSYS; 2573 } 2574 } 2575 2576 return 0; 2577 } 2578 2579 /* transport_generic_cmd_sequencer(): 2580 * 2581 * Generic Command Sequencer that should work for most DAS transport 2582 * drivers. 2583 * 2584 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD 2585 * RX Thread. 2586 * 2587 * FIXME: Need to support other SCSI OPCODES where as well. 2588 */ 2589 static int transport_generic_cmd_sequencer( 2590 struct se_cmd *cmd, 2591 unsigned char *cdb) 2592 { 2593 struct se_device *dev = cmd->se_dev; 2594 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 2595 int ret = 0, sector_ret = 0, passthrough; 2596 u32 sectors = 0, size = 0, pr_reg_type = 0; 2597 u16 service_action; 2598 u8 alua_ascq = 0; 2599 /* 2600 * Check for an existing UNIT ATTENTION condition 2601 */ 2602 if (core_scsi3_ua_check(cmd, cdb) < 0) { 2603 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2604 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; 2605 return -EINVAL; 2606 } 2607 /* 2608 * Check status of Asymmetric Logical Unit Assignment port 2609 */ 2610 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); 2611 if (ret != 0) { 2612 /* 2613 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 2614 * The ALUA additional sense code qualifier (ASCQ) is determined 2615 * by the ALUA primary or secondary access state.. 2616 */ 2617 if (ret > 0) { 2618 #if 0 2619 pr_debug("[%s]: ALUA TG Port not available," 2620 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", 2621 cmd->se_tfo->get_fabric_name(), alua_ascq); 2622 #endif 2623 transport_set_sense_codes(cmd, 0x04, alua_ascq); 2624 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2625 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; 2626 return -EINVAL; 2627 } 2628 goto out_invalid_cdb_field; 2629 } 2630 /* 2631 * Check status for SPC-3 Persistent Reservations 2632 */ 2633 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { 2634 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 2635 cmd, cdb, pr_reg_type) != 0) { 2636 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2637 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; 2638 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 2639 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2640 return -EBUSY; 2641 } 2642 /* 2643 * This means the CDB is allowed for the SCSI Initiator port 2644 * when said port is *NOT* holding the legacy SPC-2 or 2645 * SPC-3 Persistent Reservation. 2646 */ 2647 } 2648 2649 /* 2650 * If we operate in passthrough mode we skip most CDB emulation and 2651 * instead hand the commands down to the physical SCSI device. 2652 */ 2653 passthrough = 2654 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); 2655 2656 switch (cdb[0]) { 2657 case READ_6: 2658 sectors = transport_get_sectors_6(cdb, cmd, §or_ret); 2659 if (sector_ret) 2660 goto out_unsupported_cdb; 2661 size = transport_get_size(sectors, cdb, cmd); 2662 cmd->t_task_lba = transport_lba_21(cdb); 2663 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2664 break; 2665 case READ_10: 2666 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2667 if (sector_ret) 2668 goto out_unsupported_cdb; 2669 size = transport_get_size(sectors, cdb, cmd); 2670 cmd->t_task_lba = transport_lba_32(cdb); 2671 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2672 break; 2673 case READ_12: 2674 sectors = transport_get_sectors_12(cdb, cmd, §or_ret); 2675 if (sector_ret) 2676 goto out_unsupported_cdb; 2677 size = transport_get_size(sectors, cdb, cmd); 2678 cmd->t_task_lba = transport_lba_32(cdb); 2679 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2680 break; 2681 case READ_16: 2682 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 2683 if (sector_ret) 2684 goto out_unsupported_cdb; 2685 size = transport_get_size(sectors, cdb, cmd); 2686 cmd->t_task_lba = transport_lba_64(cdb); 2687 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2688 break; 2689 case WRITE_6: 2690 sectors = transport_get_sectors_6(cdb, cmd, §or_ret); 2691 if (sector_ret) 2692 goto out_unsupported_cdb; 2693 size = transport_get_size(sectors, cdb, cmd); 2694 cmd->t_task_lba = transport_lba_21(cdb); 2695 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2696 break; 2697 case WRITE_10: 2698 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2699 if (sector_ret) 2700 goto out_unsupported_cdb; 2701 size = transport_get_size(sectors, cdb, cmd); 2702 cmd->t_task_lba = transport_lba_32(cdb); 2703 if (cdb[1] & 0x8) 2704 cmd->se_cmd_flags |= SCF_FUA; 2705 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2706 break; 2707 case WRITE_12: 2708 sectors = transport_get_sectors_12(cdb, cmd, §or_ret); 2709 if (sector_ret) 2710 goto out_unsupported_cdb; 2711 size = transport_get_size(sectors, cdb, cmd); 2712 cmd->t_task_lba = transport_lba_32(cdb); 2713 if (cdb[1] & 0x8) 2714 cmd->se_cmd_flags |= SCF_FUA; 2715 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2716 break; 2717 case WRITE_16: 2718 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 2719 if (sector_ret) 2720 goto out_unsupported_cdb; 2721 size = transport_get_size(sectors, cdb, cmd); 2722 cmd->t_task_lba = transport_lba_64(cdb); 2723 if (cdb[1] & 0x8) 2724 cmd->se_cmd_flags |= SCF_FUA; 2725 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2726 break; 2727 case XDWRITEREAD_10: 2728 if ((cmd->data_direction != DMA_TO_DEVICE) || 2729 !(cmd->se_cmd_flags & SCF_BIDI)) 2730 goto out_invalid_cdb_field; 2731 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2732 if (sector_ret) 2733 goto out_unsupported_cdb; 2734 size = transport_get_size(sectors, cdb, cmd); 2735 cmd->t_task_lba = transport_lba_32(cdb); 2736 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2737 2738 /* 2739 * Do now allow BIDI commands for passthrough mode. 2740 */ 2741 if (passthrough) 2742 goto out_unsupported_cdb; 2743 2744 /* 2745 * Setup BIDI XOR callback to be run after I/O completion. 2746 */ 2747 cmd->transport_complete_callback = &transport_xor_callback; 2748 if (cdb[1] & 0x8) 2749 cmd->se_cmd_flags |= SCF_FUA; 2750 break; 2751 case VARIABLE_LENGTH_CMD: 2752 service_action = get_unaligned_be16(&cdb[8]); 2753 switch (service_action) { 2754 case XDWRITEREAD_32: 2755 sectors = transport_get_sectors_32(cdb, cmd, §or_ret); 2756 if (sector_ret) 2757 goto out_unsupported_cdb; 2758 size = transport_get_size(sectors, cdb, cmd); 2759 /* 2760 * Use WRITE_32 and READ_32 opcodes for the emulated 2761 * XDWRITE_READ_32 logic. 2762 */ 2763 cmd->t_task_lba = transport_lba_64_ext(cdb); 2764 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2765 2766 /* 2767 * Do now allow BIDI commands for passthrough mode. 2768 */ 2769 if (passthrough) 2770 goto out_unsupported_cdb; 2771 2772 /* 2773 * Setup BIDI XOR callback to be run during after I/O 2774 * completion. 2775 */ 2776 cmd->transport_complete_callback = &transport_xor_callback; 2777 if (cdb[1] & 0x8) 2778 cmd->se_cmd_flags |= SCF_FUA; 2779 break; 2780 case WRITE_SAME_32: 2781 sectors = transport_get_sectors_32(cdb, cmd, §or_ret); 2782 if (sector_ret) 2783 goto out_unsupported_cdb; 2784 2785 if (sectors) 2786 size = transport_get_size(1, cdb, cmd); 2787 else { 2788 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 2789 " supported\n"); 2790 goto out_invalid_cdb_field; 2791 } 2792 2793 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 2794 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2795 2796 if (target_check_write_same_discard(&cdb[10], dev) < 0) 2797 goto out_unsupported_cdb; 2798 if (!passthrough) 2799 cmd->execute_task = target_emulate_write_same; 2800 break; 2801 default: 2802 pr_err("VARIABLE_LENGTH_CMD service action" 2803 " 0x%04x not supported\n", service_action); 2804 goto out_unsupported_cdb; 2805 } 2806 break; 2807 case MAINTENANCE_IN: 2808 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2809 /* MAINTENANCE_IN from SCC-2 */ 2810 /* 2811 * Check for emulated MI_REPORT_TARGET_PGS. 2812 */ 2813 if (cdb[1] == MI_REPORT_TARGET_PGS && 2814 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2815 cmd->execute_task = 2816 target_emulate_report_target_port_groups; 2817 } 2818 size = (cdb[6] << 24) | (cdb[7] << 16) | 2819 (cdb[8] << 8) | cdb[9]; 2820 } else { 2821 /* GPCMD_SEND_KEY from multi media commands */ 2822 size = (cdb[8] << 8) + cdb[9]; 2823 } 2824 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2825 break; 2826 case MODE_SELECT: 2827 size = cdb[4]; 2828 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2829 break; 2830 case MODE_SELECT_10: 2831 size = (cdb[7] << 8) + cdb[8]; 2832 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2833 break; 2834 case MODE_SENSE: 2835 size = cdb[4]; 2836 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2837 if (!passthrough) 2838 cmd->execute_task = target_emulate_modesense; 2839 break; 2840 case MODE_SENSE_10: 2841 size = (cdb[7] << 8) + cdb[8]; 2842 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2843 if (!passthrough) 2844 cmd->execute_task = target_emulate_modesense; 2845 break; 2846 case GPCMD_READ_BUFFER_CAPACITY: 2847 case GPCMD_SEND_OPC: 2848 case LOG_SELECT: 2849 case LOG_SENSE: 2850 size = (cdb[7] << 8) + cdb[8]; 2851 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2852 break; 2853 case READ_BLOCK_LIMITS: 2854 size = READ_BLOCK_LEN; 2855 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2856 break; 2857 case GPCMD_GET_CONFIGURATION: 2858 case GPCMD_READ_FORMAT_CAPACITIES: 2859 case GPCMD_READ_DISC_INFO: 2860 case GPCMD_READ_TRACK_RZONE_INFO: 2861 size = (cdb[7] << 8) + cdb[8]; 2862 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2863 break; 2864 case PERSISTENT_RESERVE_IN: 2865 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2866 cmd->execute_task = target_scsi3_emulate_pr_in; 2867 size = (cdb[7] << 8) + cdb[8]; 2868 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2869 break; 2870 case PERSISTENT_RESERVE_OUT: 2871 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2872 cmd->execute_task = target_scsi3_emulate_pr_out; 2873 size = (cdb[7] << 8) + cdb[8]; 2874 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2875 break; 2876 case GPCMD_MECHANISM_STATUS: 2877 case GPCMD_READ_DVD_STRUCTURE: 2878 size = (cdb[8] << 8) + cdb[9]; 2879 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2880 break; 2881 case READ_POSITION: 2882 size = READ_POSITION_LEN; 2883 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2884 break; 2885 case MAINTENANCE_OUT: 2886 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2887 /* MAINTENANCE_OUT from SCC-2 2888 * 2889 * Check for emulated MO_SET_TARGET_PGS. 2890 */ 2891 if (cdb[1] == MO_SET_TARGET_PGS && 2892 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2893 cmd->execute_task = 2894 target_emulate_set_target_port_groups; 2895 } 2896 2897 size = (cdb[6] << 24) | (cdb[7] << 16) | 2898 (cdb[8] << 8) | cdb[9]; 2899 } else { 2900 /* GPCMD_REPORT_KEY from multi media commands */ 2901 size = (cdb[8] << 8) + cdb[9]; 2902 } 2903 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2904 break; 2905 case INQUIRY: 2906 size = (cdb[3] << 8) + cdb[4]; 2907 /* 2908 * Do implict HEAD_OF_QUEUE processing for INQUIRY. 2909 * See spc4r17 section 5.3 2910 */ 2911 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 2912 cmd->sam_task_attr = MSG_HEAD_TAG; 2913 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2914 if (!passthrough) 2915 cmd->execute_task = target_emulate_inquiry; 2916 break; 2917 case READ_BUFFER: 2918 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2919 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2920 break; 2921 case READ_CAPACITY: 2922 size = READ_CAP_LEN; 2923 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2924 if (!passthrough) 2925 cmd->execute_task = target_emulate_readcapacity; 2926 break; 2927 case READ_MEDIA_SERIAL_NUMBER: 2928 case SECURITY_PROTOCOL_IN: 2929 case SECURITY_PROTOCOL_OUT: 2930 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 2931 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2932 break; 2933 case SERVICE_ACTION_IN: 2934 switch (cmd->t_task_cdb[1] & 0x1f) { 2935 case SAI_READ_CAPACITY_16: 2936 if (!passthrough) 2937 cmd->execute_task = 2938 target_emulate_readcapacity_16; 2939 break; 2940 default: 2941 if (passthrough) 2942 break; 2943 2944 pr_err("Unsupported SA: 0x%02x\n", 2945 cmd->t_task_cdb[1] & 0x1f); 2946 goto out_invalid_cdb_field; 2947 } 2948 /*FALLTHROUGH*/ 2949 case ACCESS_CONTROL_IN: 2950 case ACCESS_CONTROL_OUT: 2951 case EXTENDED_COPY: 2952 case READ_ATTRIBUTE: 2953 case RECEIVE_COPY_RESULTS: 2954 case WRITE_ATTRIBUTE: 2955 size = (cdb[10] << 24) | (cdb[11] << 16) | 2956 (cdb[12] << 8) | cdb[13]; 2957 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2958 break; 2959 case RECEIVE_DIAGNOSTIC: 2960 case SEND_DIAGNOSTIC: 2961 size = (cdb[3] << 8) | cdb[4]; 2962 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2963 break; 2964 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ 2965 #if 0 2966 case GPCMD_READ_CD: 2967 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2968 size = (2336 * sectors); 2969 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2970 break; 2971 #endif 2972 case READ_TOC: 2973 size = cdb[8]; 2974 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2975 break; 2976 case REQUEST_SENSE: 2977 size = cdb[4]; 2978 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2979 if (!passthrough) 2980 cmd->execute_task = target_emulate_request_sense; 2981 break; 2982 case READ_ELEMENT_STATUS: 2983 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; 2984 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2985 break; 2986 case WRITE_BUFFER: 2987 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2988 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2989 break; 2990 case RESERVE: 2991 case RESERVE_10: 2992 /* 2993 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 2994 * Assume the passthrough or $FABRIC_MOD will tell us about it. 2995 */ 2996 if (cdb[0] == RESERVE_10) 2997 size = (cdb[7] << 8) | cdb[8]; 2998 else 2999 size = cmd->data_length; 3000 3001 /* 3002 * Setup the legacy emulated handler for SPC-2 and 3003 * >= SPC-3 compatible reservation handling (CRH=1) 3004 * Otherwise, we assume the underlying SCSI logic is 3005 * is running in SPC_PASSTHROUGH, and wants reservations 3006 * emulation disabled. 3007 */ 3008 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 3009 cmd->execute_task = target_scsi2_reservation_reserve; 3010 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3011 break; 3012 case RELEASE: 3013 case RELEASE_10: 3014 /* 3015 * The SPC-2 RELEASE does not contain a size in the SCSI CDB. 3016 * Assume the passthrough or $FABRIC_MOD will tell us about it. 3017 */ 3018 if (cdb[0] == RELEASE_10) 3019 size = (cdb[7] << 8) | cdb[8]; 3020 else 3021 size = cmd->data_length; 3022 3023 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 3024 cmd->execute_task = target_scsi2_reservation_release; 3025 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3026 break; 3027 case SYNCHRONIZE_CACHE: 3028 case SYNCHRONIZE_CACHE_16: 3029 /* 3030 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 3031 */ 3032 if (cdb[0] == SYNCHRONIZE_CACHE) { 3033 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 3034 cmd->t_task_lba = transport_lba_32(cdb); 3035 } else { 3036 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 3037 cmd->t_task_lba = transport_lba_64(cdb); 3038 } 3039 if (sector_ret) 3040 goto out_unsupported_cdb; 3041 3042 size = transport_get_size(sectors, cdb, cmd); 3043 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3044 3045 if (passthrough) 3046 break; 3047 3048 /* 3049 * Check to ensure that LBA + Range does not exceed past end of 3050 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 3051 */ 3052 if ((cmd->t_task_lba != 0) || (sectors != 0)) { 3053 if (transport_cmd_get_valid_sectors(cmd) < 0) 3054 goto out_invalid_cdb_field; 3055 } 3056 cmd->execute_task = target_emulate_synchronize_cache; 3057 break; 3058 case UNMAP: 3059 size = get_unaligned_be16(&cdb[7]); 3060 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3061 if (!passthrough) 3062 cmd->execute_task = target_emulate_unmap; 3063 break; 3064 case WRITE_SAME_16: 3065 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 3066 if (sector_ret) 3067 goto out_unsupported_cdb; 3068 3069 if (sectors) 3070 size = transport_get_size(1, cdb, cmd); 3071 else { 3072 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3073 goto out_invalid_cdb_field; 3074 } 3075 3076 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 3077 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3078 3079 if (target_check_write_same_discard(&cdb[1], dev) < 0) 3080 goto out_unsupported_cdb; 3081 if (!passthrough) 3082 cmd->execute_task = target_emulate_write_same; 3083 break; 3084 case WRITE_SAME: 3085 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 3086 if (sector_ret) 3087 goto out_unsupported_cdb; 3088 3089 if (sectors) 3090 size = transport_get_size(1, cdb, cmd); 3091 else { 3092 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3093 goto out_invalid_cdb_field; 3094 } 3095 3096 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 3097 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3098 /* 3099 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 3100 * of byte 1 bit 3 UNMAP instead of original reserved field 3101 */ 3102 if (target_check_write_same_discard(&cdb[1], dev) < 0) 3103 goto out_unsupported_cdb; 3104 if (!passthrough) 3105 cmd->execute_task = target_emulate_write_same; 3106 break; 3107 case ALLOW_MEDIUM_REMOVAL: 3108 case ERASE: 3109 case REZERO_UNIT: 3110 case SEEK_10: 3111 case SPACE: 3112 case START_STOP: 3113 case TEST_UNIT_READY: 3114 case VERIFY: 3115 case WRITE_FILEMARKS: 3116 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3117 if (!passthrough) 3118 cmd->execute_task = target_emulate_noop; 3119 break; 3120 case GPCMD_CLOSE_TRACK: 3121 case INITIALIZE_ELEMENT_STATUS: 3122 case GPCMD_LOAD_UNLOAD: 3123 case GPCMD_SET_SPEED: 3124 case MOVE_MEDIUM: 3125 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3126 break; 3127 case REPORT_LUNS: 3128 cmd->execute_task = target_report_luns; 3129 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 3130 /* 3131 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 3132 * See spc4r17 section 5.3 3133 */ 3134 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3135 cmd->sam_task_attr = MSG_HEAD_TAG; 3136 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3137 break; 3138 default: 3139 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 3140 " 0x%02x, sending CHECK_CONDITION.\n", 3141 cmd->se_tfo->get_fabric_name(), cdb[0]); 3142 goto out_unsupported_cdb; 3143 } 3144 3145 if (size != cmd->data_length) { 3146 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 3147 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 3148 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 3149 cmd->data_length, size, cdb[0]); 3150 3151 cmd->cmd_spdtl = size; 3152 3153 if (cmd->data_direction == DMA_TO_DEVICE) { 3154 pr_err("Rejecting underflow/overflow" 3155 " WRITE data\n"); 3156 goto out_invalid_cdb_field; 3157 } 3158 /* 3159 * Reject READ_* or WRITE_* with overflow/underflow for 3160 * type SCF_SCSI_DATA_SG_IO_CDB. 3161 */ 3162 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { 3163 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 3164 " CDB on non 512-byte sector setup subsystem" 3165 " plugin: %s\n", dev->transport->name); 3166 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 3167 goto out_invalid_cdb_field; 3168 } 3169 3170 if (size > cmd->data_length) { 3171 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 3172 cmd->residual_count = (size - cmd->data_length); 3173 } else { 3174 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 3175 cmd->residual_count = (cmd->data_length - size); 3176 } 3177 cmd->data_length = size; 3178 } 3179 3180 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB && 3181 sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { 3182 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n", 3183 cdb[0], sectors); 3184 goto out_invalid_cdb_field; 3185 } 3186 3187 /* reject any command that we don't have a handler for */ 3188 if (!(passthrough || cmd->execute_task || 3189 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3190 goto out_unsupported_cdb; 3191 3192 transport_set_supported_SAM_opcode(cmd); 3193 return ret; 3194 3195 out_unsupported_cdb: 3196 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3197 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 3198 return -EINVAL; 3199 out_invalid_cdb_field: 3200 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3201 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3202 return -EINVAL; 3203 } 3204 3205 /* 3206 * Called from I/O completion to determine which dormant/delayed 3207 * and ordered cmds need to have their tasks added to the execution queue. 3208 */ 3209 static void transport_complete_task_attr(struct se_cmd *cmd) 3210 { 3211 struct se_device *dev = cmd->se_dev; 3212 struct se_cmd *cmd_p, *cmd_tmp; 3213 int new_active_tasks = 0; 3214 3215 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 3216 atomic_dec(&dev->simple_cmds); 3217 smp_mb__after_atomic_dec(); 3218 dev->dev_cur_ordered_id++; 3219 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 3220 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3221 cmd->se_ordered_id); 3222 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3223 dev->dev_cur_ordered_id++; 3224 pr_debug("Incremented dev_cur_ordered_id: %u for" 3225 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3226 cmd->se_ordered_id); 3227 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3228 atomic_dec(&dev->dev_ordered_sync); 3229 smp_mb__after_atomic_dec(); 3230 3231 dev->dev_cur_ordered_id++; 3232 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 3233 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 3234 } 3235 /* 3236 * Process all commands up to the last received 3237 * ORDERED task attribute which requires another blocking 3238 * boundary 3239 */ 3240 spin_lock(&dev->delayed_cmd_lock); 3241 list_for_each_entry_safe(cmd_p, cmd_tmp, 3242 &dev->delayed_cmd_list, se_delayed_node) { 3243 3244 list_del(&cmd_p->se_delayed_node); 3245 spin_unlock(&dev->delayed_cmd_lock); 3246 3247 pr_debug("Calling add_tasks() for" 3248 " cmd_p: 0x%02x Task Attr: 0x%02x" 3249 " Dormant -> Active, se_ordered_id: %u\n", 3250 cmd_p->t_task_cdb[0], 3251 cmd_p->sam_task_attr, cmd_p->se_ordered_id); 3252 3253 transport_add_tasks_from_cmd(cmd_p); 3254 new_active_tasks++; 3255 3256 spin_lock(&dev->delayed_cmd_lock); 3257 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) 3258 break; 3259 } 3260 spin_unlock(&dev->delayed_cmd_lock); 3261 /* 3262 * If new tasks have become active, wake up the transport thread 3263 * to do the processing of the Active tasks. 3264 */ 3265 if (new_active_tasks != 0) 3266 wake_up_interruptible(&dev->dev_queue_obj.thread_wq); 3267 } 3268 3269 static void transport_complete_qf(struct se_cmd *cmd) 3270 { 3271 int ret = 0; 3272 3273 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3274 transport_complete_task_attr(cmd); 3275 3276 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 3277 ret = cmd->se_tfo->queue_status(cmd); 3278 if (ret) 3279 goto out; 3280 } 3281 3282 switch (cmd->data_direction) { 3283 case DMA_FROM_DEVICE: 3284 ret = cmd->se_tfo->queue_data_in(cmd); 3285 break; 3286 case DMA_TO_DEVICE: 3287 if (cmd->t_bidi_data_sg) { 3288 ret = cmd->se_tfo->queue_data_in(cmd); 3289 if (ret < 0) 3290 break; 3291 } 3292 /* Fall through for DMA_TO_DEVICE */ 3293 case DMA_NONE: 3294 ret = cmd->se_tfo->queue_status(cmd); 3295 break; 3296 default: 3297 break; 3298 } 3299 3300 out: 3301 if (ret < 0) { 3302 transport_handle_queue_full(cmd, cmd->se_dev); 3303 return; 3304 } 3305 transport_lun_remove_cmd(cmd); 3306 transport_cmd_check_stop_to_fabric(cmd); 3307 } 3308 3309 static void transport_handle_queue_full( 3310 struct se_cmd *cmd, 3311 struct se_device *dev) 3312 { 3313 spin_lock_irq(&dev->qf_cmd_lock); 3314 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 3315 atomic_inc(&dev->dev_qf_count); 3316 smp_mb__after_atomic_inc(); 3317 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 3318 3319 schedule_work(&cmd->se_dev->qf_work_queue); 3320 } 3321 3322 static void target_complete_ok_work(struct work_struct *work) 3323 { 3324 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3325 int reason = 0, ret; 3326 3327 /* 3328 * Check if we need to move delayed/dormant tasks from cmds on the 3329 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 3330 * Attribute. 3331 */ 3332 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3333 transport_complete_task_attr(cmd); 3334 /* 3335 * Check to schedule QUEUE_FULL work, or execute an existing 3336 * cmd->transport_qf_callback() 3337 */ 3338 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 3339 schedule_work(&cmd->se_dev->qf_work_queue); 3340 3341 /* 3342 * Check if we need to retrieve a sense buffer from 3343 * the struct se_cmd in question. 3344 */ 3345 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 3346 if (transport_get_sense_data(cmd) < 0) 3347 reason = TCM_NON_EXISTENT_LUN; 3348 3349 /* 3350 * Only set when an struct se_task->task_scsi_status returned 3351 * a non GOOD status. 3352 */ 3353 if (cmd->scsi_status) { 3354 ret = transport_send_check_condition_and_sense( 3355 cmd, reason, 1); 3356 if (ret == -EAGAIN || ret == -ENOMEM) 3357 goto queue_full; 3358 3359 transport_lun_remove_cmd(cmd); 3360 transport_cmd_check_stop_to_fabric(cmd); 3361 return; 3362 } 3363 } 3364 /* 3365 * Check for a callback, used by amongst other things 3366 * XDWRITE_READ_10 emulation. 3367 */ 3368 if (cmd->transport_complete_callback) 3369 cmd->transport_complete_callback(cmd); 3370 3371 switch (cmd->data_direction) { 3372 case DMA_FROM_DEVICE: 3373 spin_lock(&cmd->se_lun->lun_sep_lock); 3374 if (cmd->se_lun->lun_sep) { 3375 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3376 cmd->data_length; 3377 } 3378 spin_unlock(&cmd->se_lun->lun_sep_lock); 3379 3380 ret = cmd->se_tfo->queue_data_in(cmd); 3381 if (ret == -EAGAIN || ret == -ENOMEM) 3382 goto queue_full; 3383 break; 3384 case DMA_TO_DEVICE: 3385 spin_lock(&cmd->se_lun->lun_sep_lock); 3386 if (cmd->se_lun->lun_sep) { 3387 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 3388 cmd->data_length; 3389 } 3390 spin_unlock(&cmd->se_lun->lun_sep_lock); 3391 /* 3392 * Check if we need to send READ payload for BIDI-COMMAND 3393 */ 3394 if (cmd->t_bidi_data_sg) { 3395 spin_lock(&cmd->se_lun->lun_sep_lock); 3396 if (cmd->se_lun->lun_sep) { 3397 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3398 cmd->data_length; 3399 } 3400 spin_unlock(&cmd->se_lun->lun_sep_lock); 3401 ret = cmd->se_tfo->queue_data_in(cmd); 3402 if (ret == -EAGAIN || ret == -ENOMEM) 3403 goto queue_full; 3404 break; 3405 } 3406 /* Fall through for DMA_TO_DEVICE */ 3407 case DMA_NONE: 3408 ret = cmd->se_tfo->queue_status(cmd); 3409 if (ret == -EAGAIN || ret == -ENOMEM) 3410 goto queue_full; 3411 break; 3412 default: 3413 break; 3414 } 3415 3416 transport_lun_remove_cmd(cmd); 3417 transport_cmd_check_stop_to_fabric(cmd); 3418 return; 3419 3420 queue_full: 3421 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 3422 " data_direction: %d\n", cmd, cmd->data_direction); 3423 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 3424 transport_handle_queue_full(cmd, cmd->se_dev); 3425 } 3426 3427 static void transport_free_dev_tasks(struct se_cmd *cmd) 3428 { 3429 struct se_task *task, *task_tmp; 3430 unsigned long flags; 3431 LIST_HEAD(dispose_list); 3432 3433 spin_lock_irqsave(&cmd->t_state_lock, flags); 3434 list_for_each_entry_safe(task, task_tmp, 3435 &cmd->t_task_list, t_list) { 3436 if (!(task->task_flags & TF_ACTIVE)) 3437 list_move_tail(&task->t_list, &dispose_list); 3438 } 3439 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3440 3441 while (!list_empty(&dispose_list)) { 3442 task = list_first_entry(&dispose_list, struct se_task, t_list); 3443 3444 if (task->task_sg != cmd->t_data_sg && 3445 task->task_sg != cmd->t_bidi_data_sg) 3446 kfree(task->task_sg); 3447 3448 list_del(&task->t_list); 3449 3450 cmd->se_dev->transport->free_task(task); 3451 } 3452 } 3453 3454 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 3455 { 3456 struct scatterlist *sg; 3457 int count; 3458 3459 for_each_sg(sgl, sg, nents, count) 3460 __free_page(sg_page(sg)); 3461 3462 kfree(sgl); 3463 } 3464 3465 static inline void transport_free_pages(struct se_cmd *cmd) 3466 { 3467 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 3468 return; 3469 3470 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 3471 cmd->t_data_sg = NULL; 3472 cmd->t_data_nents = 0; 3473 3474 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 3475 cmd->t_bidi_data_sg = NULL; 3476 cmd->t_bidi_data_nents = 0; 3477 } 3478 3479 /** 3480 * transport_release_cmd - free a command 3481 * @cmd: command to free 3482 * 3483 * This routine unconditionally frees a command, and reference counting 3484 * or list removal must be done in the caller. 3485 */ 3486 static void transport_release_cmd(struct se_cmd *cmd) 3487 { 3488 BUG_ON(!cmd->se_tfo); 3489 3490 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 3491 core_tmr_release_req(cmd->se_tmr_req); 3492 if (cmd->t_task_cdb != cmd->__t_task_cdb) 3493 kfree(cmd->t_task_cdb); 3494 /* 3495 * If this cmd has been setup with target_get_sess_cmd(), drop 3496 * the kref and call ->release_cmd() in kref callback. 3497 */ 3498 if (cmd->check_release != 0) { 3499 target_put_sess_cmd(cmd->se_sess, cmd); 3500 return; 3501 } 3502 cmd->se_tfo->release_cmd(cmd); 3503 } 3504 3505 /** 3506 * transport_put_cmd - release a reference to a command 3507 * @cmd: command to release 3508 * 3509 * This routine releases our reference to the command and frees it if possible. 3510 */ 3511 static void transport_put_cmd(struct se_cmd *cmd) 3512 { 3513 unsigned long flags; 3514 int free_tasks = 0; 3515 3516 spin_lock_irqsave(&cmd->t_state_lock, flags); 3517 if (atomic_read(&cmd->t_fe_count)) { 3518 if (!atomic_dec_and_test(&cmd->t_fe_count)) 3519 goto out_busy; 3520 } 3521 3522 if (atomic_read(&cmd->t_se_count)) { 3523 if (!atomic_dec_and_test(&cmd->t_se_count)) 3524 goto out_busy; 3525 } 3526 3527 if (cmd->transport_state & CMD_T_DEV_ACTIVE) { 3528 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 3529 transport_all_task_dev_remove_state(cmd); 3530 free_tasks = 1; 3531 } 3532 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3533 3534 if (free_tasks != 0) 3535 transport_free_dev_tasks(cmd); 3536 3537 transport_free_pages(cmd); 3538 transport_release_cmd(cmd); 3539 return; 3540 out_busy: 3541 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3542 } 3543 3544 /* 3545 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of 3546 * allocating in the core. 3547 * @cmd: Associated se_cmd descriptor 3548 * @mem: SGL style memory for TCM WRITE / READ 3549 * @sg_mem_num: Number of SGL elements 3550 * @mem_bidi_in: SGL style memory for TCM BIDI READ 3551 * @sg_mem_bidi_num: Number of BIDI READ SGL elements 3552 * 3553 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage 3554 * of parameters. 3555 */ 3556 int transport_generic_map_mem_to_cmd( 3557 struct se_cmd *cmd, 3558 struct scatterlist *sgl, 3559 u32 sgl_count, 3560 struct scatterlist *sgl_bidi, 3561 u32 sgl_bidi_count) 3562 { 3563 if (!sgl || !sgl_count) 3564 return 0; 3565 3566 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 3567 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 3568 /* 3569 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 3570 * scatterlists already have been set to follow what the fabric 3571 * passes for the original expected data transfer length. 3572 */ 3573 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 3574 pr_warn("Rejecting SCSI DATA overflow for fabric using" 3575 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 3576 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3577 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3578 return -EINVAL; 3579 } 3580 3581 cmd->t_data_sg = sgl; 3582 cmd->t_data_nents = sgl_count; 3583 3584 if (sgl_bidi && sgl_bidi_count) { 3585 cmd->t_bidi_data_sg = sgl_bidi; 3586 cmd->t_bidi_data_nents = sgl_bidi_count; 3587 } 3588 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 3589 } 3590 3591 return 0; 3592 } 3593 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); 3594 3595 void *transport_kmap_data_sg(struct se_cmd *cmd) 3596 { 3597 struct scatterlist *sg = cmd->t_data_sg; 3598 struct page **pages; 3599 int i; 3600 3601 BUG_ON(!sg); 3602 /* 3603 * We need to take into account a possible offset here for fabrics like 3604 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 3605 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 3606 */ 3607 if (!cmd->t_data_nents) 3608 return NULL; 3609 else if (cmd->t_data_nents == 1) 3610 return kmap(sg_page(sg)) + sg->offset; 3611 3612 /* >1 page. use vmap */ 3613 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 3614 if (!pages) 3615 return NULL; 3616 3617 /* convert sg[] to pages[] */ 3618 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 3619 pages[i] = sg_page(sg); 3620 } 3621 3622 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 3623 kfree(pages); 3624 if (!cmd->t_data_vmap) 3625 return NULL; 3626 3627 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 3628 } 3629 EXPORT_SYMBOL(transport_kmap_data_sg); 3630 3631 void transport_kunmap_data_sg(struct se_cmd *cmd) 3632 { 3633 if (!cmd->t_data_nents) { 3634 return; 3635 } else if (cmd->t_data_nents == 1) { 3636 kunmap(sg_page(cmd->t_data_sg)); 3637 return; 3638 } 3639 3640 vunmap(cmd->t_data_vmap); 3641 cmd->t_data_vmap = NULL; 3642 } 3643 EXPORT_SYMBOL(transport_kunmap_data_sg); 3644 3645 static int 3646 transport_generic_get_mem(struct se_cmd *cmd) 3647 { 3648 u32 length = cmd->data_length; 3649 unsigned int nents; 3650 struct page *page; 3651 gfp_t zero_flag; 3652 int i = 0; 3653 3654 nents = DIV_ROUND_UP(length, PAGE_SIZE); 3655 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 3656 if (!cmd->t_data_sg) 3657 return -ENOMEM; 3658 3659 cmd->t_data_nents = nents; 3660 sg_init_table(cmd->t_data_sg, nents); 3661 3662 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO; 3663 3664 while (length) { 3665 u32 page_len = min_t(u32, length, PAGE_SIZE); 3666 page = alloc_page(GFP_KERNEL | zero_flag); 3667 if (!page) 3668 goto out; 3669 3670 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); 3671 length -= page_len; 3672 i++; 3673 } 3674 return 0; 3675 3676 out: 3677 while (i >= 0) { 3678 __free_page(sg_page(&cmd->t_data_sg[i])); 3679 i--; 3680 } 3681 kfree(cmd->t_data_sg); 3682 cmd->t_data_sg = NULL; 3683 return -ENOMEM; 3684 } 3685 3686 /* Reduce sectors if they are too long for the device */ 3687 static inline sector_t transport_limit_task_sectors( 3688 struct se_device *dev, 3689 unsigned long long lba, 3690 sector_t sectors) 3691 { 3692 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 3693 3694 if (dev->transport->get_device_type(dev) == TYPE_DISK) 3695 if ((lba + sectors) > transport_dev_end_lba(dev)) 3696 sectors = ((transport_dev_end_lba(dev) - lba) + 1); 3697 3698 return sectors; 3699 } 3700 3701 3702 /* 3703 * This function can be used by HW target mode drivers to create a linked 3704 * scatterlist from all contiguously allocated struct se_task->task_sg[]. 3705 * This is intended to be called during the completion path by TCM Core 3706 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. 3707 */ 3708 void transport_do_task_sg_chain(struct se_cmd *cmd) 3709 { 3710 struct scatterlist *sg_first = NULL; 3711 struct scatterlist *sg_prev = NULL; 3712 int sg_prev_nents = 0; 3713 struct scatterlist *sg; 3714 struct se_task *task; 3715 u32 chained_nents = 0; 3716 int i; 3717 3718 BUG_ON(!cmd->se_tfo->task_sg_chaining); 3719 3720 /* 3721 * Walk the struct se_task list and setup scatterlist chains 3722 * for each contiguously allocated struct se_task->task_sg[]. 3723 */ 3724 list_for_each_entry(task, &cmd->t_task_list, t_list) { 3725 if (!task->task_sg) 3726 continue; 3727 3728 if (!sg_first) { 3729 sg_first = task->task_sg; 3730 chained_nents = task->task_sg_nents; 3731 } else { 3732 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 3733 chained_nents += task->task_sg_nents; 3734 } 3735 /* 3736 * For the padded tasks, use the extra SGL vector allocated 3737 * in transport_allocate_data_tasks() for the sg_prev_nents 3738 * offset into sg_chain() above. 3739 * 3740 * We do not need the padding for the last task (or a single 3741 * task), but in that case we will never use the sg_prev_nents 3742 * value below which would be incorrect. 3743 */ 3744 sg_prev_nents = (task->task_sg_nents + 1); 3745 sg_prev = task->task_sg; 3746 } 3747 /* 3748 * Setup the starting pointer and total t_tasks_sg_linked_no including 3749 * padding SGs for linking and to mark the end. 3750 */ 3751 cmd->t_tasks_sg_chained = sg_first; 3752 cmd->t_tasks_sg_chained_no = chained_nents; 3753 3754 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 3755 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, 3756 cmd->t_tasks_sg_chained_no); 3757 3758 for_each_sg(cmd->t_tasks_sg_chained, sg, 3759 cmd->t_tasks_sg_chained_no, i) { 3760 3761 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", 3762 i, sg, sg_page(sg), sg->length, sg->offset); 3763 if (sg_is_chain(sg)) 3764 pr_debug("SG: %p sg_is_chain=1\n", sg); 3765 if (sg_is_last(sg)) 3766 pr_debug("SG: %p sg_is_last=1\n", sg); 3767 } 3768 } 3769 EXPORT_SYMBOL(transport_do_task_sg_chain); 3770 3771 /* 3772 * Break up cmd into chunks transport can handle 3773 */ 3774 static int 3775 transport_allocate_data_tasks(struct se_cmd *cmd, 3776 enum dma_data_direction data_direction, 3777 struct scatterlist *cmd_sg, unsigned int sgl_nents) 3778 { 3779 struct se_device *dev = cmd->se_dev; 3780 int task_count, i; 3781 unsigned long long lba; 3782 sector_t sectors, dev_max_sectors; 3783 u32 sector_size; 3784 3785 if (transport_cmd_get_valid_sectors(cmd) < 0) 3786 return -EINVAL; 3787 3788 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 3789 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; 3790 3791 WARN_ON(cmd->data_length % sector_size); 3792 3793 lba = cmd->t_task_lba; 3794 sectors = DIV_ROUND_UP(cmd->data_length, sector_size); 3795 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); 3796 3797 /* 3798 * If we need just a single task reuse the SG list in the command 3799 * and avoid a lot of work. 3800 */ 3801 if (task_count == 1) { 3802 struct se_task *task; 3803 unsigned long flags; 3804 3805 task = transport_generic_get_task(cmd, data_direction); 3806 if (!task) 3807 return -ENOMEM; 3808 3809 task->task_sg = cmd_sg; 3810 task->task_sg_nents = sgl_nents; 3811 3812 task->task_lba = lba; 3813 task->task_sectors = sectors; 3814 task->task_size = task->task_sectors * sector_size; 3815 3816 spin_lock_irqsave(&cmd->t_state_lock, flags); 3817 list_add_tail(&task->t_list, &cmd->t_task_list); 3818 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3819 3820 return task_count; 3821 } 3822 3823 for (i = 0; i < task_count; i++) { 3824 struct se_task *task; 3825 unsigned int task_size, task_sg_nents_padded; 3826 struct scatterlist *sg; 3827 unsigned long flags; 3828 int count; 3829 3830 task = transport_generic_get_task(cmd, data_direction); 3831 if (!task) 3832 return -ENOMEM; 3833 3834 task->task_lba = lba; 3835 task->task_sectors = min(sectors, dev_max_sectors); 3836 task->task_size = task->task_sectors * sector_size; 3837 3838 /* 3839 * This now assumes that passed sg_ents are in PAGE_SIZE chunks 3840 * in order to calculate the number per task SGL entries 3841 */ 3842 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); 3843 /* 3844 * Check if the fabric module driver is requesting that all 3845 * struct se_task->task_sg[] be chained together.. If so, 3846 * then allocate an extra padding SG entry for linking and 3847 * marking the end of the chained SGL for every task except 3848 * the last one for (task_count > 1) operation, or skipping 3849 * the extra padding for the (task_count == 1) case. 3850 */ 3851 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { 3852 task_sg_nents_padded = (task->task_sg_nents + 1); 3853 } else 3854 task_sg_nents_padded = task->task_sg_nents; 3855 3856 task->task_sg = kmalloc(sizeof(struct scatterlist) * 3857 task_sg_nents_padded, GFP_KERNEL); 3858 if (!task->task_sg) { 3859 cmd->se_dev->transport->free_task(task); 3860 return -ENOMEM; 3861 } 3862 3863 sg_init_table(task->task_sg, task_sg_nents_padded); 3864 3865 task_size = task->task_size; 3866 3867 /* Build new sgl, only up to task_size */ 3868 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { 3869 if (cmd_sg->length > task_size) 3870 break; 3871 3872 *sg = *cmd_sg; 3873 task_size -= cmd_sg->length; 3874 cmd_sg = sg_next(cmd_sg); 3875 } 3876 3877 lba += task->task_sectors; 3878 sectors -= task->task_sectors; 3879 3880 spin_lock_irqsave(&cmd->t_state_lock, flags); 3881 list_add_tail(&task->t_list, &cmd->t_task_list); 3882 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3883 } 3884 3885 return task_count; 3886 } 3887 3888 static int 3889 transport_allocate_control_task(struct se_cmd *cmd) 3890 { 3891 struct se_task *task; 3892 unsigned long flags; 3893 3894 /* Workaround for handling zero-length control CDBs */ 3895 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && 3896 !cmd->data_length) 3897 return 0; 3898 3899 task = transport_generic_get_task(cmd, cmd->data_direction); 3900 if (!task) 3901 return -ENOMEM; 3902 3903 task->task_sg = cmd->t_data_sg; 3904 task->task_size = cmd->data_length; 3905 task->task_sg_nents = cmd->t_data_nents; 3906 3907 spin_lock_irqsave(&cmd->t_state_lock, flags); 3908 list_add_tail(&task->t_list, &cmd->t_task_list); 3909 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3910 3911 /* Success! Return number of tasks allocated */ 3912 return 1; 3913 } 3914 3915 /* 3916 * Allocate any required ressources to execute the command, and either place 3917 * it on the execution queue if possible. For writes we might not have the 3918 * payload yet, thus notify the fabric via a call to ->write_pending instead. 3919 */ 3920 int transport_generic_new_cmd(struct se_cmd *cmd) 3921 { 3922 struct se_device *dev = cmd->se_dev; 3923 int task_cdbs, task_cdbs_bidi = 0; 3924 int set_counts = 1; 3925 int ret = 0; 3926 3927 /* 3928 * Determine is the TCM fabric module has already allocated physical 3929 * memory, and is directly calling transport_generic_map_mem_to_cmd() 3930 * beforehand. 3931 */ 3932 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 3933 cmd->data_length) { 3934 ret = transport_generic_get_mem(cmd); 3935 if (ret < 0) 3936 goto out_fail; 3937 } 3938 3939 /* 3940 * For BIDI command set up the read tasks first. 3941 */ 3942 if (cmd->t_bidi_data_sg && 3943 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 3944 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); 3945 3946 task_cdbs_bidi = transport_allocate_data_tasks(cmd, 3947 DMA_FROM_DEVICE, cmd->t_bidi_data_sg, 3948 cmd->t_bidi_data_nents); 3949 if (task_cdbs_bidi <= 0) 3950 goto out_fail; 3951 3952 atomic_inc(&cmd->t_fe_count); 3953 atomic_inc(&cmd->t_se_count); 3954 set_counts = 0; 3955 } 3956 3957 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 3958 task_cdbs = transport_allocate_data_tasks(cmd, 3959 cmd->data_direction, cmd->t_data_sg, 3960 cmd->t_data_nents); 3961 } else { 3962 task_cdbs = transport_allocate_control_task(cmd); 3963 } 3964 3965 if (task_cdbs < 0) 3966 goto out_fail; 3967 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 3968 spin_lock_irq(&cmd->t_state_lock); 3969 cmd->t_state = TRANSPORT_COMPLETE; 3970 cmd->transport_state |= CMD_T_ACTIVE; 3971 spin_unlock_irq(&cmd->t_state_lock); 3972 3973 if (cmd->t_task_cdb[0] == REQUEST_SENSE) { 3974 u8 ua_asc = 0, ua_ascq = 0; 3975 3976 core_scsi3_ua_clear_for_request_sense(cmd, 3977 &ua_asc, &ua_ascq); 3978 } 3979 3980 INIT_WORK(&cmd->work, target_complete_ok_work); 3981 queue_work(target_completion_wq, &cmd->work); 3982 return 0; 3983 } 3984 3985 if (set_counts) { 3986 atomic_inc(&cmd->t_fe_count); 3987 atomic_inc(&cmd->t_se_count); 3988 } 3989 3990 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); 3991 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); 3992 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); 3993 3994 /* 3995 * For WRITEs, let the fabric know its buffer is ready.. 3996 * This WRITE struct se_cmd (and all of its associated struct se_task's) 3997 * will be added to the struct se_device execution queue after its WRITE 3998 * data has arrived. (ie: It gets handled by the transport processing 3999 * thread a second time) 4000 */ 4001 if (cmd->data_direction == DMA_TO_DEVICE) { 4002 transport_add_tasks_to_state_queue(cmd); 4003 return transport_generic_write_pending(cmd); 4004 } 4005 /* 4006 * Everything else but a WRITE, add the struct se_cmd's struct se_task's 4007 * to the execution queue. 4008 */ 4009 transport_execute_tasks(cmd); 4010 return 0; 4011 4012 out_fail: 4013 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4014 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4015 return -EINVAL; 4016 } 4017 EXPORT_SYMBOL(transport_generic_new_cmd); 4018 4019 /* transport_generic_process_write(): 4020 * 4021 * 4022 */ 4023 void transport_generic_process_write(struct se_cmd *cmd) 4024 { 4025 transport_execute_tasks(cmd); 4026 } 4027 EXPORT_SYMBOL(transport_generic_process_write); 4028 4029 static void transport_write_pending_qf(struct se_cmd *cmd) 4030 { 4031 int ret; 4032 4033 ret = cmd->se_tfo->write_pending(cmd); 4034 if (ret == -EAGAIN || ret == -ENOMEM) { 4035 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 4036 cmd); 4037 transport_handle_queue_full(cmd, cmd->se_dev); 4038 } 4039 } 4040 4041 static int transport_generic_write_pending(struct se_cmd *cmd) 4042 { 4043 unsigned long flags; 4044 int ret; 4045 4046 spin_lock_irqsave(&cmd->t_state_lock, flags); 4047 cmd->t_state = TRANSPORT_WRITE_PENDING; 4048 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4049 4050 /* 4051 * Clear the se_cmd for WRITE_PENDING status in order to set 4052 * CMD_T_ACTIVE so that transport_generic_handle_data can be called 4053 * from HW target mode interrupt code. This is safe to be called 4054 * with transport_off=1 before the cmd->se_tfo->write_pending 4055 * because the se_cmd->se_lun pointer is not being cleared. 4056 */ 4057 transport_cmd_check_stop(cmd, 1, 0); 4058 4059 /* 4060 * Call the fabric write_pending function here to let the 4061 * frontend know that WRITE buffers are ready. 4062 */ 4063 ret = cmd->se_tfo->write_pending(cmd); 4064 if (ret == -EAGAIN || ret == -ENOMEM) 4065 goto queue_full; 4066 else if (ret < 0) 4067 return ret; 4068 4069 return 1; 4070 4071 queue_full: 4072 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 4073 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 4074 transport_handle_queue_full(cmd, cmd->se_dev); 4075 return 0; 4076 } 4077 4078 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 4079 { 4080 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 4081 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 4082 transport_wait_for_tasks(cmd); 4083 4084 transport_release_cmd(cmd); 4085 } else { 4086 if (wait_for_tasks) 4087 transport_wait_for_tasks(cmd); 4088 4089 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); 4090 4091 if (cmd->se_lun) 4092 transport_lun_remove_cmd(cmd); 4093 4094 transport_free_dev_tasks(cmd); 4095 4096 transport_put_cmd(cmd); 4097 } 4098 } 4099 EXPORT_SYMBOL(transport_generic_free_cmd); 4100 4101 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 4102 * @se_sess: session to reference 4103 * @se_cmd: command descriptor to add 4104 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 4105 */ 4106 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 4107 bool ack_kref) 4108 { 4109 unsigned long flags; 4110 4111 kref_init(&se_cmd->cmd_kref); 4112 /* 4113 * Add a second kref if the fabric caller is expecting to handle 4114 * fabric acknowledgement that requires two target_put_sess_cmd() 4115 * invocations before se_cmd descriptor release. 4116 */ 4117 if (ack_kref == true) { 4118 kref_get(&se_cmd->cmd_kref); 4119 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 4120 } 4121 4122 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 4123 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 4124 se_cmd->check_release = 1; 4125 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4126 } 4127 EXPORT_SYMBOL(target_get_sess_cmd); 4128 4129 static void target_release_cmd_kref(struct kref *kref) 4130 { 4131 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 4132 struct se_session *se_sess = se_cmd->se_sess; 4133 unsigned long flags; 4134 4135 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 4136 if (list_empty(&se_cmd->se_cmd_list)) { 4137 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4138 se_cmd->se_tfo->release_cmd(se_cmd); 4139 return; 4140 } 4141 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 4142 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4143 complete(&se_cmd->cmd_wait_comp); 4144 return; 4145 } 4146 list_del(&se_cmd->se_cmd_list); 4147 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4148 4149 se_cmd->se_tfo->release_cmd(se_cmd); 4150 } 4151 4152 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 4153 * @se_sess: session to reference 4154 * @se_cmd: command descriptor to drop 4155 */ 4156 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 4157 { 4158 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 4159 } 4160 EXPORT_SYMBOL(target_put_sess_cmd); 4161 4162 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list 4163 * @se_sess: session to split 4164 */ 4165 void target_splice_sess_cmd_list(struct se_session *se_sess) 4166 { 4167 struct se_cmd *se_cmd; 4168 unsigned long flags; 4169 4170 WARN_ON(!list_empty(&se_sess->sess_wait_list)); 4171 INIT_LIST_HEAD(&se_sess->sess_wait_list); 4172 4173 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 4174 se_sess->sess_tearing_down = 1; 4175 4176 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 4177 4178 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 4179 se_cmd->cmd_wait_set = 1; 4180 4181 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4182 } 4183 EXPORT_SYMBOL(target_splice_sess_cmd_list); 4184 4185 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 4186 * @se_sess: session to wait for active I/O 4187 * @wait_for_tasks: Make extra transport_wait_for_tasks call 4188 */ 4189 void target_wait_for_sess_cmds( 4190 struct se_session *se_sess, 4191 int wait_for_tasks) 4192 { 4193 struct se_cmd *se_cmd, *tmp_cmd; 4194 bool rc = false; 4195 4196 list_for_each_entry_safe(se_cmd, tmp_cmd, 4197 &se_sess->sess_wait_list, se_cmd_list) { 4198 list_del(&se_cmd->se_cmd_list); 4199 4200 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 4201 " %d\n", se_cmd, se_cmd->t_state, 4202 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4203 4204 if (wait_for_tasks) { 4205 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," 4206 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4207 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4208 4209 rc = transport_wait_for_tasks(se_cmd); 4210 4211 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," 4212 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4213 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4214 } 4215 4216 if (!rc) { 4217 wait_for_completion(&se_cmd->cmd_wait_comp); 4218 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 4219 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4220 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4221 } 4222 4223 se_cmd->se_tfo->release_cmd(se_cmd); 4224 } 4225 } 4226 EXPORT_SYMBOL(target_wait_for_sess_cmds); 4227 4228 /* transport_lun_wait_for_tasks(): 4229 * 4230 * Called from ConfigFS context to stop the passed struct se_cmd to allow 4231 * an struct se_lun to be successfully shutdown. 4232 */ 4233 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) 4234 { 4235 unsigned long flags; 4236 int ret; 4237 /* 4238 * If the frontend has already requested this struct se_cmd to 4239 * be stopped, we can safely ignore this struct se_cmd. 4240 */ 4241 spin_lock_irqsave(&cmd->t_state_lock, flags); 4242 if (cmd->transport_state & CMD_T_STOP) { 4243 cmd->transport_state &= ~CMD_T_LUN_STOP; 4244 4245 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", 4246 cmd->se_tfo->get_task_tag(cmd)); 4247 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4248 transport_cmd_check_stop(cmd, 1, 0); 4249 return -EPERM; 4250 } 4251 cmd->transport_state |= CMD_T_LUN_FE_STOP; 4252 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4253 4254 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 4255 4256 ret = transport_stop_tasks_for_cmd(cmd); 4257 4258 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" 4259 " %d\n", cmd, cmd->t_task_list_num, ret); 4260 if (!ret) { 4261 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 4262 cmd->se_tfo->get_task_tag(cmd)); 4263 wait_for_completion(&cmd->transport_lun_stop_comp); 4264 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 4265 cmd->se_tfo->get_task_tag(cmd)); 4266 } 4267 transport_remove_cmd_from_queue(cmd); 4268 4269 return 0; 4270 } 4271 4272 static void __transport_clear_lun_from_sessions(struct se_lun *lun) 4273 { 4274 struct se_cmd *cmd = NULL; 4275 unsigned long lun_flags, cmd_flags; 4276 /* 4277 * Do exception processing and return CHECK_CONDITION status to the 4278 * Initiator Port. 4279 */ 4280 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4281 while (!list_empty(&lun->lun_cmd_list)) { 4282 cmd = list_first_entry(&lun->lun_cmd_list, 4283 struct se_cmd, se_lun_node); 4284 list_del_init(&cmd->se_lun_node); 4285 4286 /* 4287 * This will notify iscsi_target_transport.c: 4288 * transport_cmd_check_stop() that a LUN shutdown is in 4289 * progress for the iscsi_cmd_t. 4290 */ 4291 spin_lock(&cmd->t_state_lock); 4292 pr_debug("SE_LUN[%d] - Setting cmd->transport" 4293 "_lun_stop for ITT: 0x%08x\n", 4294 cmd->se_lun->unpacked_lun, 4295 cmd->se_tfo->get_task_tag(cmd)); 4296 cmd->transport_state |= CMD_T_LUN_STOP; 4297 spin_unlock(&cmd->t_state_lock); 4298 4299 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4300 4301 if (!cmd->se_lun) { 4302 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", 4303 cmd->se_tfo->get_task_tag(cmd), 4304 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4305 BUG(); 4306 } 4307 /* 4308 * If the Storage engine still owns the iscsi_cmd_t, determine 4309 * and/or stop its context. 4310 */ 4311 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" 4312 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 4313 cmd->se_tfo->get_task_tag(cmd)); 4314 4315 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { 4316 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4317 continue; 4318 } 4319 4320 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 4321 "_wait_for_tasks(): SUCCESS\n", 4322 cmd->se_lun->unpacked_lun, 4323 cmd->se_tfo->get_task_tag(cmd)); 4324 4325 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4326 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { 4327 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4328 goto check_cond; 4329 } 4330 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 4331 transport_all_task_dev_remove_state(cmd); 4332 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4333 4334 transport_free_dev_tasks(cmd); 4335 /* 4336 * The Storage engine stopped this struct se_cmd before it was 4337 * send to the fabric frontend for delivery back to the 4338 * Initiator Node. Return this SCSI CDB back with an 4339 * CHECK_CONDITION status. 4340 */ 4341 check_cond: 4342 transport_send_check_condition_and_sense(cmd, 4343 TCM_NON_EXISTENT_LUN, 0); 4344 /* 4345 * If the fabric frontend is waiting for this iscsi_cmd_t to 4346 * be released, notify the waiting thread now that LU has 4347 * finished accessing it. 4348 */ 4349 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4350 if (cmd->transport_state & CMD_T_LUN_FE_STOP) { 4351 pr_debug("SE_LUN[%d] - Detected FE stop for" 4352 " struct se_cmd: %p ITT: 0x%08x\n", 4353 lun->unpacked_lun, 4354 cmd, cmd->se_tfo->get_task_tag(cmd)); 4355 4356 spin_unlock_irqrestore(&cmd->t_state_lock, 4357 cmd_flags); 4358 transport_cmd_check_stop(cmd, 1, 0); 4359 complete(&cmd->transport_lun_fe_stop_comp); 4360 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4361 continue; 4362 } 4363 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 4364 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 4365 4366 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4367 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4368 } 4369 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4370 } 4371 4372 static int transport_clear_lun_thread(void *p) 4373 { 4374 struct se_lun *lun = p; 4375 4376 __transport_clear_lun_from_sessions(lun); 4377 complete(&lun->lun_shutdown_comp); 4378 4379 return 0; 4380 } 4381 4382 int transport_clear_lun_from_sessions(struct se_lun *lun) 4383 { 4384 struct task_struct *kt; 4385 4386 kt = kthread_run(transport_clear_lun_thread, lun, 4387 "tcm_cl_%u", lun->unpacked_lun); 4388 if (IS_ERR(kt)) { 4389 pr_err("Unable to start clear_lun thread\n"); 4390 return PTR_ERR(kt); 4391 } 4392 wait_for_completion(&lun->lun_shutdown_comp); 4393 4394 return 0; 4395 } 4396 4397 /** 4398 * transport_wait_for_tasks - wait for completion to occur 4399 * @cmd: command to wait 4400 * 4401 * Called from frontend fabric context to wait for storage engine 4402 * to pause and/or release frontend generated struct se_cmd. 4403 */ 4404 bool transport_wait_for_tasks(struct se_cmd *cmd) 4405 { 4406 unsigned long flags; 4407 4408 spin_lock_irqsave(&cmd->t_state_lock, flags); 4409 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 4410 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 4411 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4412 return false; 4413 } 4414 /* 4415 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE 4416 * has been set in transport_set_supported_SAM_opcode(). 4417 */ 4418 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 4419 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 4420 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4421 return false; 4422 } 4423 /* 4424 * If we are already stopped due to an external event (ie: LUN shutdown) 4425 * sleep until the connection can have the passed struct se_cmd back. 4426 * The cmd->transport_lun_stopped_sem will be upped by 4427 * transport_clear_lun_from_sessions() once the ConfigFS context caller 4428 * has completed its operation on the struct se_cmd. 4429 */ 4430 if (cmd->transport_state & CMD_T_LUN_STOP) { 4431 pr_debug("wait_for_tasks: Stopping" 4432 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 4433 "_stop_comp); for ITT: 0x%08x\n", 4434 cmd->se_tfo->get_task_tag(cmd)); 4435 /* 4436 * There is a special case for WRITES where a FE exception + 4437 * LUN shutdown means ConfigFS context is still sleeping on 4438 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). 4439 * We go ahead and up transport_lun_stop_comp just to be sure 4440 * here. 4441 */ 4442 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4443 complete(&cmd->transport_lun_stop_comp); 4444 wait_for_completion(&cmd->transport_lun_fe_stop_comp); 4445 spin_lock_irqsave(&cmd->t_state_lock, flags); 4446 4447 transport_all_task_dev_remove_state(cmd); 4448 /* 4449 * At this point, the frontend who was the originator of this 4450 * struct se_cmd, now owns the structure and can be released through 4451 * normal means below. 4452 */ 4453 pr_debug("wait_for_tasks: Stopped" 4454 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 4455 "stop_comp); for ITT: 0x%08x\n", 4456 cmd->se_tfo->get_task_tag(cmd)); 4457 4458 cmd->transport_state &= ~CMD_T_LUN_STOP; 4459 } 4460 4461 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 4462 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4463 return false; 4464 } 4465 4466 cmd->transport_state |= CMD_T_STOP; 4467 4468 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 4469 " i_state: %d, t_state: %d, CMD_T_STOP\n", 4470 cmd, cmd->se_tfo->get_task_tag(cmd), 4471 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4472 4473 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4474 4475 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 4476 4477 wait_for_completion(&cmd->t_transport_stop_comp); 4478 4479 spin_lock_irqsave(&cmd->t_state_lock, flags); 4480 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 4481 4482 pr_debug("wait_for_tasks: Stopped wait_for_compltion(" 4483 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 4484 cmd->se_tfo->get_task_tag(cmd)); 4485 4486 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4487 4488 return true; 4489 } 4490 EXPORT_SYMBOL(transport_wait_for_tasks); 4491 4492 static int transport_get_sense_codes( 4493 struct se_cmd *cmd, 4494 u8 *asc, 4495 u8 *ascq) 4496 { 4497 *asc = cmd->scsi_asc; 4498 *ascq = cmd->scsi_ascq; 4499 4500 return 0; 4501 } 4502 4503 static int transport_set_sense_codes( 4504 struct se_cmd *cmd, 4505 u8 asc, 4506 u8 ascq) 4507 { 4508 cmd->scsi_asc = asc; 4509 cmd->scsi_ascq = ascq; 4510 4511 return 0; 4512 } 4513 4514 int transport_send_check_condition_and_sense( 4515 struct se_cmd *cmd, 4516 u8 reason, 4517 int from_transport) 4518 { 4519 unsigned char *buffer = cmd->sense_buffer; 4520 unsigned long flags; 4521 int offset; 4522 u8 asc = 0, ascq = 0; 4523 4524 spin_lock_irqsave(&cmd->t_state_lock, flags); 4525 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 4526 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4527 return 0; 4528 } 4529 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 4530 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4531 4532 if (!reason && from_transport) 4533 goto after_reason; 4534 4535 if (!from_transport) 4536 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 4537 /* 4538 * Data Segment and SenseLength of the fabric response PDU. 4539 * 4540 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE 4541 * from include/scsi/scsi_cmnd.h 4542 */ 4543 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 4544 TRANSPORT_SENSE_BUFFER); 4545 /* 4546 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 4547 * SENSE KEY values from include/scsi/scsi.h 4548 */ 4549 switch (reason) { 4550 case TCM_NON_EXISTENT_LUN: 4551 /* CURRENT ERROR */ 4552 buffer[offset] = 0x70; 4553 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4554 /* ILLEGAL REQUEST */ 4555 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4556 /* LOGICAL UNIT NOT SUPPORTED */ 4557 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; 4558 break; 4559 case TCM_UNSUPPORTED_SCSI_OPCODE: 4560 case TCM_SECTOR_COUNT_TOO_MANY: 4561 /* CURRENT ERROR */ 4562 buffer[offset] = 0x70; 4563 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4564 /* ILLEGAL REQUEST */ 4565 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4566 /* INVALID COMMAND OPERATION CODE */ 4567 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; 4568 break; 4569 case TCM_UNKNOWN_MODE_PAGE: 4570 /* CURRENT ERROR */ 4571 buffer[offset] = 0x70; 4572 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4573 /* ILLEGAL REQUEST */ 4574 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4575 /* INVALID FIELD IN CDB */ 4576 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4577 break; 4578 case TCM_CHECK_CONDITION_ABORT_CMD: 4579 /* CURRENT ERROR */ 4580 buffer[offset] = 0x70; 4581 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4582 /* ABORTED COMMAND */ 4583 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4584 /* BUS DEVICE RESET FUNCTION OCCURRED */ 4585 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; 4586 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; 4587 break; 4588 case TCM_INCORRECT_AMOUNT_OF_DATA: 4589 /* CURRENT ERROR */ 4590 buffer[offset] = 0x70; 4591 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4592 /* ABORTED COMMAND */ 4593 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4594 /* WRITE ERROR */ 4595 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 4596 /* NOT ENOUGH UNSOLICITED DATA */ 4597 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; 4598 break; 4599 case TCM_INVALID_CDB_FIELD: 4600 /* CURRENT ERROR */ 4601 buffer[offset] = 0x70; 4602 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4603 /* ILLEGAL REQUEST */ 4604 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4605 /* INVALID FIELD IN CDB */ 4606 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4607 break; 4608 case TCM_INVALID_PARAMETER_LIST: 4609 /* CURRENT ERROR */ 4610 buffer[offset] = 0x70; 4611 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4612 /* ILLEGAL REQUEST */ 4613 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4614 /* INVALID FIELD IN PARAMETER LIST */ 4615 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; 4616 break; 4617 case TCM_UNEXPECTED_UNSOLICITED_DATA: 4618 /* CURRENT ERROR */ 4619 buffer[offset] = 0x70; 4620 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4621 /* ABORTED COMMAND */ 4622 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4623 /* WRITE ERROR */ 4624 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 4625 /* UNEXPECTED_UNSOLICITED_DATA */ 4626 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; 4627 break; 4628 case TCM_SERVICE_CRC_ERROR: 4629 /* CURRENT ERROR */ 4630 buffer[offset] = 0x70; 4631 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4632 /* ABORTED COMMAND */ 4633 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4634 /* PROTOCOL SERVICE CRC ERROR */ 4635 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; 4636 /* N/A */ 4637 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; 4638 break; 4639 case TCM_SNACK_REJECTED: 4640 /* CURRENT ERROR */ 4641 buffer[offset] = 0x70; 4642 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4643 /* ABORTED COMMAND */ 4644 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4645 /* READ ERROR */ 4646 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; 4647 /* FAILED RETRANSMISSION REQUEST */ 4648 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; 4649 break; 4650 case TCM_WRITE_PROTECTED: 4651 /* CURRENT ERROR */ 4652 buffer[offset] = 0x70; 4653 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4654 /* DATA PROTECT */ 4655 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 4656 /* WRITE PROTECTED */ 4657 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; 4658 break; 4659 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 4660 /* CURRENT ERROR */ 4661 buffer[offset] = 0x70; 4662 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4663 /* UNIT ATTENTION */ 4664 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 4665 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 4666 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 4667 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 4668 break; 4669 case TCM_CHECK_CONDITION_NOT_READY: 4670 /* CURRENT ERROR */ 4671 buffer[offset] = 0x70; 4672 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4673 /* Not Ready */ 4674 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; 4675 transport_get_sense_codes(cmd, &asc, &ascq); 4676 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 4677 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 4678 break; 4679 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 4680 default: 4681 /* CURRENT ERROR */ 4682 buffer[offset] = 0x70; 4683 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4684 /* ILLEGAL REQUEST */ 4685 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4686 /* LOGICAL UNIT COMMUNICATION FAILURE */ 4687 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; 4688 break; 4689 } 4690 /* 4691 * This code uses linux/include/scsi/scsi.h SAM status codes! 4692 */ 4693 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 4694 /* 4695 * Automatically padded, this value is encoded in the fabric's 4696 * data_length response PDU containing the SCSI defined sense data. 4697 */ 4698 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; 4699 4700 after_reason: 4701 return cmd->se_tfo->queue_status(cmd); 4702 } 4703 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 4704 4705 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 4706 { 4707 int ret = 0; 4708 4709 if (cmd->transport_state & CMD_T_ABORTED) { 4710 if (!send_status || 4711 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 4712 return 1; 4713 #if 0 4714 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" 4715 " status for CDB: 0x%02x ITT: 0x%08x\n", 4716 cmd->t_task_cdb[0], 4717 cmd->se_tfo->get_task_tag(cmd)); 4718 #endif 4719 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 4720 cmd->se_tfo->queue_status(cmd); 4721 ret = 1; 4722 } 4723 return ret; 4724 } 4725 EXPORT_SYMBOL(transport_check_aborted_status); 4726 4727 void transport_send_task_abort(struct se_cmd *cmd) 4728 { 4729 unsigned long flags; 4730 4731 spin_lock_irqsave(&cmd->t_state_lock, flags); 4732 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 4733 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4734 return; 4735 } 4736 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4737 4738 /* 4739 * If there are still expected incoming fabric WRITEs, we wait 4740 * until until they have completed before sending a TASK_ABORTED 4741 * response. This response with TASK_ABORTED status will be 4742 * queued back to fabric module by transport_check_aborted_status(). 4743 */ 4744 if (cmd->data_direction == DMA_TO_DEVICE) { 4745 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 4746 cmd->transport_state |= CMD_T_ABORTED; 4747 smp_mb__after_atomic_inc(); 4748 } 4749 } 4750 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4751 #if 0 4752 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 4753 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 4754 cmd->se_tfo->get_task_tag(cmd)); 4755 #endif 4756 cmd->se_tfo->queue_status(cmd); 4757 } 4758 4759 static int transport_generic_do_tmr(struct se_cmd *cmd) 4760 { 4761 struct se_device *dev = cmd->se_dev; 4762 struct se_tmr_req *tmr = cmd->se_tmr_req; 4763 int ret; 4764 4765 switch (tmr->function) { 4766 case TMR_ABORT_TASK: 4767 core_tmr_abort_task(dev, tmr, cmd->se_sess); 4768 break; 4769 case TMR_ABORT_TASK_SET: 4770 case TMR_CLEAR_ACA: 4771 case TMR_CLEAR_TASK_SET: 4772 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 4773 break; 4774 case TMR_LUN_RESET: 4775 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 4776 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 4777 TMR_FUNCTION_REJECTED; 4778 break; 4779 case TMR_TARGET_WARM_RESET: 4780 tmr->response = TMR_FUNCTION_REJECTED; 4781 break; 4782 case TMR_TARGET_COLD_RESET: 4783 tmr->response = TMR_FUNCTION_REJECTED; 4784 break; 4785 default: 4786 pr_err("Uknown TMR function: 0x%02x.\n", 4787 tmr->function); 4788 tmr->response = TMR_FUNCTION_REJECTED; 4789 break; 4790 } 4791 4792 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 4793 cmd->se_tfo->queue_tm_rsp(cmd); 4794 4795 transport_cmd_check_stop_to_fabric(cmd); 4796 return 0; 4797 } 4798 4799 /* transport_processing_thread(): 4800 * 4801 * 4802 */ 4803 static int transport_processing_thread(void *param) 4804 { 4805 int ret; 4806 struct se_cmd *cmd; 4807 struct se_device *dev = param; 4808 4809 while (!kthread_should_stop()) { 4810 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4811 atomic_read(&dev->dev_queue_obj.queue_cnt) || 4812 kthread_should_stop()); 4813 if (ret < 0) 4814 goto out; 4815 4816 get_cmd: 4817 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); 4818 if (!cmd) 4819 continue; 4820 4821 switch (cmd->t_state) { 4822 case TRANSPORT_NEW_CMD: 4823 BUG(); 4824 break; 4825 case TRANSPORT_NEW_CMD_MAP: 4826 if (!cmd->se_tfo->new_cmd_map) { 4827 pr_err("cmd->se_tfo->new_cmd_map is" 4828 " NULL for TRANSPORT_NEW_CMD_MAP\n"); 4829 BUG(); 4830 } 4831 ret = cmd->se_tfo->new_cmd_map(cmd); 4832 if (ret < 0) { 4833 transport_generic_request_failure(cmd); 4834 break; 4835 } 4836 ret = transport_generic_new_cmd(cmd); 4837 if (ret < 0) { 4838 transport_generic_request_failure(cmd); 4839 break; 4840 } 4841 break; 4842 case TRANSPORT_PROCESS_WRITE: 4843 transport_generic_process_write(cmd); 4844 break; 4845 case TRANSPORT_PROCESS_TMR: 4846 transport_generic_do_tmr(cmd); 4847 break; 4848 case TRANSPORT_COMPLETE_QF_WP: 4849 transport_write_pending_qf(cmd); 4850 break; 4851 case TRANSPORT_COMPLETE_QF_OK: 4852 transport_complete_qf(cmd); 4853 break; 4854 default: 4855 pr_err("Unknown t_state: %d for ITT: 0x%08x " 4856 "i_state: %d on SE LUN: %u\n", 4857 cmd->t_state, 4858 cmd->se_tfo->get_task_tag(cmd), 4859 cmd->se_tfo->get_cmd_state(cmd), 4860 cmd->se_lun->unpacked_lun); 4861 BUG(); 4862 } 4863 4864 goto get_cmd; 4865 } 4866 4867 out: 4868 WARN_ON(!list_empty(&dev->state_task_list)); 4869 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); 4870 dev->process_thread = NULL; 4871 return 0; 4872 } 4873