1 /* 2 * scsi.c Copyright (C) 1992 Drew Eckhardt 3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 4 * Copyright (C) 2002, 2003 Christoph Hellwig 5 * 6 * generic mid-level SCSI driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * 10 * <drew@colorado.edu> 11 * 12 * Bug correction thanks go to : 13 * Rik Faith <faith@cs.unc.edu> 14 * Tommy Thorn <tthorn> 15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 16 * 17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 18 * add scatter-gather, multiple outstanding request, and other 19 * enhancements. 20 * 21 * Native multichannel, wide scsi, /proc/scsi and hot plugging 22 * support added by Michael Neuffer <mike@i-connect.net> 23 * 24 * Added request_module("scsi_hostadapter") for kerneld: 25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 26 * Bjorn Ekwall <bj0rn@blox.se> 27 * (changed to kmod) 28 * 29 * Major improvements to the timeout, abort, and reset processing, 30 * as well as performance modifications for large queue depths by 31 * Leonard N. Zubkoff <lnz@dandelion.com> 32 * 33 * Converted cli() code to spinlocks, Ingo Molnar 34 * 35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 36 * 37 * out_of_space hacks, D. Gilbert (dpg) 990608 38 */ 39 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/kernel.h> 43 #include <linux/timer.h> 44 #include <linux/string.h> 45 #include <linux/slab.h> 46 #include <linux/blkdev.h> 47 #include <linux/delay.h> 48 #include <linux/init.h> 49 #include <linux/completion.h> 50 #include <linux/unistd.h> 51 #include <linux/spinlock.h> 52 #include <linux/kmod.h> 53 #include <linux/interrupt.h> 54 #include <linux/notifier.h> 55 #include <linux/cpu.h> 56 #include <linux/mutex.h> 57 #include <linux/async.h> 58 #include <asm/unaligned.h> 59 60 #include <scsi/scsi.h> 61 #include <scsi/scsi_cmnd.h> 62 #include <scsi/scsi_dbg.h> 63 #include <scsi/scsi_device.h> 64 #include <scsi/scsi_driver.h> 65 #include <scsi/scsi_eh.h> 66 #include <scsi/scsi_host.h> 67 #include <scsi/scsi_tcq.h> 68 69 #include "scsi_priv.h" 70 #include "scsi_logging.h" 71 72 #define CREATE_TRACE_POINTS 73 #include <trace/events/scsi.h> 74 75 static void scsi_done(struct scsi_cmnd *cmd); 76 77 /* 78 * Definitions and constants. 79 */ 80 81 /* 82 * Note - the initial logging level can be set here to log events at boot time. 83 * After the system is up, you may enable logging via the /proc interface. 84 */ 85 unsigned int scsi_logging_level; 86 #if defined(CONFIG_SCSI_LOGGING) 87 EXPORT_SYMBOL(scsi_logging_level); 88 #endif 89 90 /* sd, scsi core and power management need to coordinate flushing async actions */ 91 ASYNC_DOMAIN(scsi_sd_probe_domain); 92 EXPORT_SYMBOL(scsi_sd_probe_domain); 93 94 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 95 * You may not alter any existing entry (although adding new ones is 96 * encouraged once assigned by ANSI/INCITS T10 97 */ 98 static const char *const scsi_device_types[] = { 99 "Direct-Access ", 100 "Sequential-Access", 101 "Printer ", 102 "Processor ", 103 "WORM ", 104 "CD-ROM ", 105 "Scanner ", 106 "Optical Device ", 107 "Medium Changer ", 108 "Communications ", 109 "ASC IT8 ", 110 "ASC IT8 ", 111 "RAID ", 112 "Enclosure ", 113 "Direct-Access-RBC", 114 "Optical card ", 115 "Bridge controller", 116 "Object storage ", 117 "Automation/Drive ", 118 }; 119 120 /** 121 * scsi_device_type - Return 17 char string indicating device type. 122 * @type: type number to look up 123 */ 124 125 const char * scsi_device_type(unsigned type) 126 { 127 if (type == 0x1e) 128 return "Well-known LUN "; 129 if (type == 0x1f) 130 return "No Device "; 131 if (type >= ARRAY_SIZE(scsi_device_types)) 132 return "Unknown "; 133 return scsi_device_types[type]; 134 } 135 136 EXPORT_SYMBOL(scsi_device_type); 137 138 struct scsi_host_cmd_pool { 139 struct kmem_cache *cmd_slab; 140 struct kmem_cache *sense_slab; 141 unsigned int users; 142 char *cmd_name; 143 char *sense_name; 144 unsigned int slab_flags; 145 gfp_t gfp_mask; 146 }; 147 148 static struct scsi_host_cmd_pool scsi_cmd_pool = { 149 .cmd_name = "scsi_cmd_cache", 150 .sense_name = "scsi_sense_cache", 151 .slab_flags = SLAB_HWCACHE_ALIGN, 152 }; 153 154 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 155 .cmd_name = "scsi_cmd_cache(DMA)", 156 .sense_name = "scsi_sense_cache(DMA)", 157 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 158 .gfp_mask = __GFP_DMA, 159 }; 160 161 static DEFINE_MUTEX(host_cmd_pool_mutex); 162 163 /** 164 * scsi_pool_alloc_command - internal function to get a fully allocated command 165 * @pool: slab pool to allocate the command from 166 * @gfp_mask: mask for the allocation 167 * 168 * Returns a fully allocated command (with the allied sense buffer) or 169 * NULL on failure 170 */ 171 static struct scsi_cmnd * 172 scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask) 173 { 174 struct scsi_cmnd *cmd; 175 176 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); 177 if (!cmd) 178 return NULL; 179 180 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, 181 gfp_mask | pool->gfp_mask); 182 if (!cmd->sense_buffer) { 183 kmem_cache_free(pool->cmd_slab, cmd); 184 return NULL; 185 } 186 187 return cmd; 188 } 189 190 /** 191 * scsi_pool_free_command - internal function to release a command 192 * @pool: slab pool to allocate the command from 193 * @cmd: command to release 194 * 195 * the command must previously have been allocated by 196 * scsi_pool_alloc_command. 197 */ 198 static void 199 scsi_pool_free_command(struct scsi_host_cmd_pool *pool, 200 struct scsi_cmnd *cmd) 201 { 202 if (cmd->prot_sdb) 203 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); 204 205 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); 206 kmem_cache_free(pool->cmd_slab, cmd); 207 } 208 209 /** 210 * scsi_host_alloc_command - internal function to allocate command 211 * @shost: SCSI host whose pool to allocate from 212 * @gfp_mask: mask for the allocation 213 * 214 * Returns a fully allocated command with sense buffer and protection 215 * data buffer (where applicable) or NULL on failure 216 */ 217 static struct scsi_cmnd * 218 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) 219 { 220 struct scsi_cmnd *cmd; 221 222 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 223 if (!cmd) 224 return NULL; 225 226 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { 227 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); 228 229 if (!cmd->prot_sdb) { 230 scsi_pool_free_command(shost->cmd_pool, cmd); 231 return NULL; 232 } 233 } 234 235 return cmd; 236 } 237 238 /** 239 * __scsi_get_command - Allocate a struct scsi_cmnd 240 * @shost: host to transmit command 241 * @gfp_mask: allocation mask 242 * 243 * Description: allocate a struct scsi_cmd from host's slab, recycling from the 244 * host's free_list if necessary. 245 */ 246 struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) 247 { 248 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask); 249 250 if (unlikely(!cmd)) { 251 unsigned long flags; 252 253 spin_lock_irqsave(&shost->free_list_lock, flags); 254 if (likely(!list_empty(&shost->free_list))) { 255 cmd = list_entry(shost->free_list.next, 256 struct scsi_cmnd, list); 257 list_del_init(&cmd->list); 258 } 259 spin_unlock_irqrestore(&shost->free_list_lock, flags); 260 261 if (cmd) { 262 void *buf, *prot; 263 264 buf = cmd->sense_buffer; 265 prot = cmd->prot_sdb; 266 267 memset(cmd, 0, sizeof(*cmd)); 268 269 cmd->sense_buffer = buf; 270 cmd->prot_sdb = prot; 271 } 272 } 273 274 return cmd; 275 } 276 EXPORT_SYMBOL_GPL(__scsi_get_command); 277 278 /** 279 * scsi_get_command - Allocate and setup a scsi command block 280 * @dev: parent scsi device 281 * @gfp_mask: allocator flags 282 * 283 * Returns: The allocated scsi command structure. 284 */ 285 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) 286 { 287 struct scsi_cmnd *cmd; 288 289 /* Bail if we can't get a reference to the device */ 290 if (!get_device(&dev->sdev_gendev)) 291 return NULL; 292 293 cmd = __scsi_get_command(dev->host, gfp_mask); 294 295 if (likely(cmd != NULL)) { 296 unsigned long flags; 297 298 cmd->device = dev; 299 INIT_LIST_HEAD(&cmd->list); 300 spin_lock_irqsave(&dev->list_lock, flags); 301 list_add_tail(&cmd->list, &dev->cmd_list); 302 spin_unlock_irqrestore(&dev->list_lock, flags); 303 cmd->jiffies_at_alloc = jiffies; 304 } else 305 put_device(&dev->sdev_gendev); 306 307 return cmd; 308 } 309 EXPORT_SYMBOL(scsi_get_command); 310 311 /** 312 * __scsi_put_command - Free a struct scsi_cmnd 313 * @shost: dev->host 314 * @cmd: Command to free 315 * @dev: parent scsi device 316 */ 317 void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd, 318 struct device *dev) 319 { 320 unsigned long flags; 321 322 /* changing locks here, don't need to restore the irq state */ 323 spin_lock_irqsave(&shost->free_list_lock, flags); 324 if (unlikely(list_empty(&shost->free_list))) { 325 list_add(&cmd->list, &shost->free_list); 326 cmd = NULL; 327 } 328 spin_unlock_irqrestore(&shost->free_list_lock, flags); 329 330 if (likely(cmd != NULL)) 331 scsi_pool_free_command(shost->cmd_pool, cmd); 332 333 put_device(dev); 334 } 335 EXPORT_SYMBOL(__scsi_put_command); 336 337 /** 338 * scsi_put_command - Free a scsi command block 339 * @cmd: command block to free 340 * 341 * Returns: Nothing. 342 * 343 * Notes: The command must not belong to any lists. 344 */ 345 void scsi_put_command(struct scsi_cmnd *cmd) 346 { 347 struct scsi_device *sdev = cmd->device; 348 unsigned long flags; 349 350 /* serious error if the command hasn't come from a device list */ 351 spin_lock_irqsave(&cmd->device->list_lock, flags); 352 BUG_ON(list_empty(&cmd->list)); 353 list_del_init(&cmd->list); 354 spin_unlock_irqrestore(&cmd->device->list_lock, flags); 355 356 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev); 357 } 358 EXPORT_SYMBOL(scsi_put_command); 359 360 static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask) 361 { 362 struct scsi_host_cmd_pool *retval = NULL, *pool; 363 /* 364 * Select a command slab for this host and create it if not 365 * yet existent. 366 */ 367 mutex_lock(&host_cmd_pool_mutex); 368 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : 369 &scsi_cmd_pool; 370 if (!pool->users) { 371 pool->cmd_slab = kmem_cache_create(pool->cmd_name, 372 sizeof(struct scsi_cmnd), 0, 373 pool->slab_flags, NULL); 374 if (!pool->cmd_slab) 375 goto fail; 376 377 pool->sense_slab = kmem_cache_create(pool->sense_name, 378 SCSI_SENSE_BUFFERSIZE, 0, 379 pool->slab_flags, NULL); 380 if (!pool->sense_slab) { 381 kmem_cache_destroy(pool->cmd_slab); 382 goto fail; 383 } 384 } 385 386 pool->users++; 387 retval = pool; 388 fail: 389 mutex_unlock(&host_cmd_pool_mutex); 390 return retval; 391 } 392 393 static void scsi_put_host_cmd_pool(gfp_t gfp_mask) 394 { 395 struct scsi_host_cmd_pool *pool; 396 397 mutex_lock(&host_cmd_pool_mutex); 398 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : 399 &scsi_cmd_pool; 400 /* 401 * This may happen if a driver has a mismatched get and put 402 * of the command pool; the driver should be implicated in 403 * the stack trace 404 */ 405 BUG_ON(pool->users == 0); 406 407 if (!--pool->users) { 408 kmem_cache_destroy(pool->cmd_slab); 409 kmem_cache_destroy(pool->sense_slab); 410 } 411 mutex_unlock(&host_cmd_pool_mutex); 412 } 413 414 /** 415 * scsi_allocate_command - get a fully allocated SCSI command 416 * @gfp_mask: allocation mask 417 * 418 * This function is for use outside of the normal host based pools. 419 * It allocates the relevant command and takes an additional reference 420 * on the pool it used. This function *must* be paired with 421 * scsi_free_command which also has the identical mask, otherwise the 422 * free pool counts will eventually go wrong and you'll trigger a bug. 423 * 424 * This function should *only* be used by drivers that need a static 425 * command allocation at start of day for internal functions. 426 */ 427 struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask) 428 { 429 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); 430 431 if (!pool) 432 return NULL; 433 434 return scsi_pool_alloc_command(pool, gfp_mask); 435 } 436 EXPORT_SYMBOL(scsi_allocate_command); 437 438 /** 439 * scsi_free_command - free a command allocated by scsi_allocate_command 440 * @gfp_mask: mask used in the original allocation 441 * @cmd: command to free 442 * 443 * Note: using the original allocation mask is vital because that's 444 * what determines which command pool we use to free the command. Any 445 * mismatch will cause the system to BUG eventually. 446 */ 447 void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd) 448 { 449 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); 450 451 /* 452 * this could trigger if the mask to scsi_allocate_command 453 * doesn't match this mask. Otherwise we're guaranteed that this 454 * succeeds because scsi_allocate_command must have taken a reference 455 * on the pool 456 */ 457 BUG_ON(!pool); 458 459 scsi_pool_free_command(pool, cmd); 460 /* 461 * scsi_put_host_cmd_pool is called twice; once to release the 462 * reference we took above, and once to release the reference 463 * originally taken by scsi_allocate_command 464 */ 465 scsi_put_host_cmd_pool(gfp_mask); 466 scsi_put_host_cmd_pool(gfp_mask); 467 } 468 EXPORT_SYMBOL(scsi_free_command); 469 470 /** 471 * scsi_setup_command_freelist - Setup the command freelist for a scsi host. 472 * @shost: host to allocate the freelist for. 473 * 474 * Description: The command freelist protects against system-wide out of memory 475 * deadlock by preallocating one SCSI command structure for each host, so the 476 * system can always write to a swap file on a device associated with that host. 477 * 478 * Returns: Nothing. 479 */ 480 int scsi_setup_command_freelist(struct Scsi_Host *shost) 481 { 482 struct scsi_cmnd *cmd; 483 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; 484 485 spin_lock_init(&shost->free_list_lock); 486 INIT_LIST_HEAD(&shost->free_list); 487 488 shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask); 489 490 if (!shost->cmd_pool) 491 return -ENOMEM; 492 493 /* 494 * Get one backup command for this host. 495 */ 496 cmd = scsi_host_alloc_command(shost, gfp_mask); 497 if (!cmd) { 498 scsi_put_host_cmd_pool(gfp_mask); 499 shost->cmd_pool = NULL; 500 return -ENOMEM; 501 } 502 list_add(&cmd->list, &shost->free_list); 503 return 0; 504 } 505 506 /** 507 * scsi_destroy_command_freelist - Release the command freelist for a scsi host. 508 * @shost: host whose freelist is going to be destroyed 509 */ 510 void scsi_destroy_command_freelist(struct Scsi_Host *shost) 511 { 512 /* 513 * If cmd_pool is NULL the free list was not initialized, so 514 * do not attempt to release resources. 515 */ 516 if (!shost->cmd_pool) 517 return; 518 519 while (!list_empty(&shost->free_list)) { 520 struct scsi_cmnd *cmd; 521 522 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 523 list_del_init(&cmd->list); 524 scsi_pool_free_command(shost->cmd_pool, cmd); 525 } 526 shost->cmd_pool = NULL; 527 scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL); 528 } 529 530 #ifdef CONFIG_SCSI_LOGGING 531 void scsi_log_send(struct scsi_cmnd *cmd) 532 { 533 unsigned int level; 534 535 /* 536 * If ML QUEUE log level is greater than or equal to: 537 * 538 * 1: nothing (match completion) 539 * 540 * 2: log opcode + command of all commands 541 * 542 * 3: same as 2 plus dump cmd address 543 * 544 * 4: same as 3 plus dump extra junk 545 */ 546 if (unlikely(scsi_logging_level)) { 547 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 548 SCSI_LOG_MLQUEUE_BITS); 549 if (level > 1) { 550 scmd_printk(KERN_INFO, cmd, "Send: "); 551 if (level > 2) 552 printk("0x%p ", cmd); 553 printk("\n"); 554 scsi_print_command(cmd); 555 if (level > 3) { 556 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 557 " queuecommand 0x%p\n", 558 scsi_sglist(cmd), scsi_bufflen(cmd), 559 cmd->device->host->hostt->queuecommand); 560 561 } 562 } 563 } 564 } 565 566 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 567 { 568 unsigned int level; 569 570 /* 571 * If ML COMPLETE log level is greater than or equal to: 572 * 573 * 1: log disposition, result, opcode + command, and conditionally 574 * sense data for failures or non SUCCESS dispositions. 575 * 576 * 2: same as 1 but for all command completions. 577 * 578 * 3: same as 2 plus dump cmd address 579 * 580 * 4: same as 3 plus dump extra junk 581 */ 582 if (unlikely(scsi_logging_level)) { 583 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 584 SCSI_LOG_MLCOMPLETE_BITS); 585 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 586 (level > 1)) { 587 scmd_printk(KERN_INFO, cmd, "Done: "); 588 if (level > 2) 589 printk("0x%p ", cmd); 590 /* 591 * Dump truncated values, so we usually fit within 592 * 80 chars. 593 */ 594 switch (disposition) { 595 case SUCCESS: 596 printk("SUCCESS\n"); 597 break; 598 case NEEDS_RETRY: 599 printk("RETRY\n"); 600 break; 601 case ADD_TO_MLQUEUE: 602 printk("MLQUEUE\n"); 603 break; 604 case FAILED: 605 printk("FAILED\n"); 606 break; 607 case TIMEOUT_ERROR: 608 /* 609 * If called via scsi_times_out. 610 */ 611 printk("TIMEOUT\n"); 612 break; 613 default: 614 printk("UNKNOWN\n"); 615 } 616 scsi_print_result(cmd); 617 scsi_print_command(cmd); 618 if (status_byte(cmd->result) & CHECK_CONDITION) 619 scsi_print_sense("", cmd); 620 if (level > 3) 621 scmd_printk(KERN_INFO, cmd, 622 "scsi host busy %d failed %d\n", 623 cmd->device->host->host_busy, 624 cmd->device->host->host_failed); 625 } 626 } 627 } 628 #endif 629 630 /** 631 * scsi_cmd_get_serial - Assign a serial number to a command 632 * @host: the scsi host 633 * @cmd: command to assign serial number to 634 * 635 * Description: a serial number identifies a request for error recovery 636 * and debugging purposes. Protected by the Host_Lock of host. 637 */ 638 void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 639 { 640 cmd->serial_number = host->cmd_serial_number++; 641 if (cmd->serial_number == 0) 642 cmd->serial_number = host->cmd_serial_number++; 643 } 644 EXPORT_SYMBOL(scsi_cmd_get_serial); 645 646 /** 647 * scsi_dispatch_command - Dispatch a command to the low-level driver. 648 * @cmd: command block we are dispatching. 649 * 650 * Return: nonzero return request was rejected and device's queue needs to be 651 * plugged. 652 */ 653 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 654 { 655 struct Scsi_Host *host = cmd->device->host; 656 int rtn = 0; 657 658 atomic_inc(&cmd->device->iorequest_cnt); 659 660 /* check if the device is still usable */ 661 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 662 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 663 * returns an immediate error upwards, and signals 664 * that the device is no longer present */ 665 cmd->result = DID_NO_CONNECT << 16; 666 scsi_done(cmd); 667 /* return 0 (because the command has been processed) */ 668 goto out; 669 } 670 671 /* Check to see if the scsi lld made this device blocked. */ 672 if (unlikely(scsi_device_blocked(cmd->device))) { 673 /* 674 * in blocked state, the command is just put back on 675 * the device queue. The suspend state has already 676 * blocked the queue so future requests should not 677 * occur until the device transitions out of the 678 * suspend state. 679 */ 680 681 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 682 683 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 684 685 /* 686 * NOTE: rtn is still zero here because we don't need the 687 * queue to be plugged on return (it's already stopped) 688 */ 689 goto out; 690 } 691 692 /* 693 * If SCSI-2 or lower, store the LUN value in cmnd. 694 */ 695 if (cmd->device->scsi_level <= SCSI_2 && 696 cmd->device->scsi_level != SCSI_UNKNOWN) { 697 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 698 (cmd->device->lun << 5 & 0xe0); 699 } 700 701 scsi_log_send(cmd); 702 703 /* 704 * Before we queue this command, check if the command 705 * length exceeds what the host adapter can handle. 706 */ 707 if (cmd->cmd_len > cmd->device->host->max_cmd_len) { 708 SCSI_LOG_MLQUEUE(3, 709 printk("queuecommand : command too long. " 710 "cdb_size=%d host->max_cmd_len=%d\n", 711 cmd->cmd_len, cmd->device->host->max_cmd_len)); 712 cmd->result = (DID_ABORT << 16); 713 714 scsi_done(cmd); 715 goto out; 716 } 717 718 if (unlikely(host->shost_state == SHOST_DEL)) { 719 cmd->result = (DID_NO_CONNECT << 16); 720 scsi_done(cmd); 721 } else { 722 trace_scsi_dispatch_cmd_start(cmd); 723 cmd->scsi_done = scsi_done; 724 rtn = host->hostt->queuecommand(host, cmd); 725 } 726 727 if (rtn) { 728 trace_scsi_dispatch_cmd_error(cmd, rtn); 729 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && 730 rtn != SCSI_MLQUEUE_TARGET_BUSY) 731 rtn = SCSI_MLQUEUE_HOST_BUSY; 732 733 scsi_queue_insert(cmd, rtn); 734 735 SCSI_LOG_MLQUEUE(3, 736 printk("queuecommand : request rejected\n")); 737 } 738 739 out: 740 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); 741 return rtn; 742 } 743 744 /** 745 * scsi_done - Enqueue the finished SCSI command into the done queue. 746 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 747 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 748 * 749 * Description: This function is the mid-level's (SCSI Core) interrupt routine, 750 * which regains ownership of the SCSI command (de facto) from a LLDD, and 751 * enqueues the command to the done queue for further processing. 752 * 753 * This is the producer of the done queue who enqueues at the tail. 754 * 755 * This function is interrupt context safe. 756 */ 757 static void scsi_done(struct scsi_cmnd *cmd) 758 { 759 trace_scsi_dispatch_cmd_done(cmd); 760 blk_complete_request(cmd->request); 761 } 762 763 /** 764 * scsi_finish_command - cleanup and pass command back to upper layer 765 * @cmd: the command 766 * 767 * Description: Pass command off to upper layer for finishing of I/O 768 * request, waking processes that are waiting on results, 769 * etc. 770 */ 771 void scsi_finish_command(struct scsi_cmnd *cmd) 772 { 773 struct scsi_device *sdev = cmd->device; 774 struct scsi_target *starget = scsi_target(sdev); 775 struct Scsi_Host *shost = sdev->host; 776 struct scsi_driver *drv; 777 unsigned int good_bytes; 778 779 scsi_device_unbusy(sdev); 780 781 /* 782 * Clear the flags which say that the device/host is no longer 783 * capable of accepting new commands. These are set in scsi_queue.c 784 * for both the queue full condition on a device, and for a 785 * host full condition on the host. 786 * 787 * XXX(hch): What about locking? 788 */ 789 shost->host_blocked = 0; 790 starget->target_blocked = 0; 791 sdev->device_blocked = 0; 792 793 /* 794 * If we have valid sense information, then some kind of recovery 795 * must have taken place. Make a note of this. 796 */ 797 if (SCSI_SENSE_VALID(cmd)) 798 cmd->result |= (DRIVER_SENSE << 24); 799 800 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, 801 "Notifying upper driver of completion " 802 "(result %x)\n", cmd->result)); 803 804 good_bytes = scsi_bufflen(cmd); 805 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 806 int old_good_bytes = good_bytes; 807 drv = scsi_cmd_to_driver(cmd); 808 if (drv->done) 809 good_bytes = drv->done(cmd); 810 /* 811 * USB may not give sense identifying bad sector and 812 * simply return a residue instead, so subtract off the 813 * residue if drv->done() error processing indicates no 814 * change to the completion length. 815 */ 816 if (good_bytes == old_good_bytes) 817 good_bytes -= scsi_get_resid(cmd); 818 } 819 scsi_io_completion(cmd, good_bytes); 820 } 821 EXPORT_SYMBOL(scsi_finish_command); 822 823 /** 824 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth 825 * @sdev: SCSI Device in question 826 * @tagged: Do we use tagged queueing (non-0) or do we treat 827 * this device as an untagged device (0) 828 * @tags: Number of tags allowed if tagged queueing enabled, 829 * or number of commands the low level driver can 830 * queue up in non-tagged mode (as per cmd_per_lun). 831 * 832 * Returns: Nothing 833 * 834 * Lock Status: None held on entry 835 * 836 * Notes: Low level drivers may call this at any time and we will do 837 * the right thing depending on whether or not the device is 838 * currently active and whether or not it even has the 839 * command blocks built yet. 840 */ 841 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 842 { 843 unsigned long flags; 844 845 /* 846 * refuse to set tagged depth to an unworkable size 847 */ 848 if (tags <= 0) 849 return; 850 851 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 852 853 /* 854 * Check to see if the queue is managed by the block layer. 855 * If it is, and we fail to adjust the depth, exit. 856 * 857 * Do not resize the tag map if it is a host wide share bqt, 858 * because the size should be the hosts's can_queue. If there 859 * is more IO than the LLD's can_queue (so there are not enuogh 860 * tags) request_fn's host queue ready check will handle it. 861 */ 862 if (!sdev->host->bqt) { 863 if (blk_queue_tagged(sdev->request_queue) && 864 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 865 goto out; 866 } 867 868 sdev->queue_depth = tags; 869 switch (tagged) { 870 case MSG_ORDERED_TAG: 871 sdev->ordered_tags = 1; 872 sdev->simple_tags = 1; 873 break; 874 case MSG_SIMPLE_TAG: 875 sdev->ordered_tags = 0; 876 sdev->simple_tags = 1; 877 break; 878 default: 879 sdev_printk(KERN_WARNING, sdev, 880 "scsi_adjust_queue_depth, bad queue type, " 881 "disabled\n"); 882 case 0: 883 sdev->ordered_tags = sdev->simple_tags = 0; 884 sdev->queue_depth = tags; 885 break; 886 } 887 out: 888 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 889 } 890 EXPORT_SYMBOL(scsi_adjust_queue_depth); 891 892 /** 893 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth 894 * @sdev: SCSI Device in question 895 * @depth: Current number of outstanding SCSI commands on this device, 896 * not counting the one returned as QUEUE_FULL. 897 * 898 * Description: This function will track successive QUEUE_FULL events on a 899 * specific SCSI device to determine if and when there is a 900 * need to adjust the queue depth on the device. 901 * 902 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth, 903 * -1 - Drop back to untagged operation using host->cmd_per_lun 904 * as the untagged command depth 905 * 906 * Lock Status: None held on entry 907 * 908 * Notes: Low level drivers may call this at any time and we will do 909 * "The Right Thing." We are interrupt context safe. 910 */ 911 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 912 { 913 914 /* 915 * Don't let QUEUE_FULLs on the same 916 * jiffies count, they could all be from 917 * same event. 918 */ 919 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) 920 return 0; 921 922 sdev->last_queue_full_time = jiffies; 923 if (sdev->last_queue_full_depth != depth) { 924 sdev->last_queue_full_count = 1; 925 sdev->last_queue_full_depth = depth; 926 } else { 927 sdev->last_queue_full_count++; 928 } 929 930 if (sdev->last_queue_full_count <= 10) 931 return 0; 932 if (sdev->last_queue_full_depth < 8) { 933 /* Drop back to untagged */ 934 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 935 return -1; 936 } 937 938 if (sdev->ordered_tags) 939 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 940 else 941 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 942 return depth; 943 } 944 EXPORT_SYMBOL(scsi_track_queue_full); 945 946 /** 947 * scsi_vpd_inquiry - Request a device provide us with a VPD page 948 * @sdev: The device to ask 949 * @buffer: Where to put the result 950 * @page: Which Vital Product Data to return 951 * @len: The length of the buffer 952 * 953 * This is an internal helper function. You probably want to use 954 * scsi_get_vpd_page instead. 955 * 956 * Returns 0 on success or a negative error number. 957 */ 958 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, 959 u8 page, unsigned len) 960 { 961 int result; 962 unsigned char cmd[16]; 963 964 cmd[0] = INQUIRY; 965 cmd[1] = 1; /* EVPD */ 966 cmd[2] = page; 967 cmd[3] = len >> 8; 968 cmd[4] = len & 0xff; 969 cmd[5] = 0; /* Control byte */ 970 971 /* 972 * I'm not convinced we need to try quite this hard to get VPD, but 973 * all the existing users tried this hard. 974 */ 975 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, 976 len, NULL, 30 * HZ, 3, NULL); 977 if (result) 978 return result; 979 980 /* Sanity check that we got the page back that we asked for */ 981 if (buffer[1] != page) 982 return -EIO; 983 984 return 0; 985 } 986 987 /** 988 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device 989 * @sdev: The device to ask 990 * @page: Which Vital Product Data to return 991 * @buf: where to store the VPD 992 * @buf_len: number of bytes in the VPD buffer area 993 * 994 * SCSI devices may optionally supply Vital Product Data. Each 'page' 995 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). 996 * If the device supports this VPD page, this routine returns a pointer 997 * to a buffer containing the data from that page. The caller is 998 * responsible for calling kfree() on this pointer when it is no longer 999 * needed. If we cannot retrieve the VPD page this routine returns %NULL. 1000 */ 1001 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, 1002 int buf_len) 1003 { 1004 int i, result; 1005 1006 if (sdev->skip_vpd_pages) 1007 goto fail; 1008 1009 /* Ask for all the pages supported by this device */ 1010 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); 1011 if (result) 1012 goto fail; 1013 1014 /* If the user actually wanted this page, we can skip the rest */ 1015 if (page == 0) 1016 return 0; 1017 1018 for (i = 0; i < min((int)buf[3], buf_len - 4); i++) 1019 if (buf[i + 4] == page) 1020 goto found; 1021 1022 if (i < buf[3] && i >= buf_len - 4) 1023 /* ran off the end of the buffer, give us benefit of doubt */ 1024 goto found; 1025 /* The device claims it doesn't support the requested page */ 1026 goto fail; 1027 1028 found: 1029 result = scsi_vpd_inquiry(sdev, buf, page, buf_len); 1030 if (result) 1031 goto fail; 1032 1033 return 0; 1034 1035 fail: 1036 return -EINVAL; 1037 } 1038 EXPORT_SYMBOL_GPL(scsi_get_vpd_page); 1039 1040 /** 1041 * scsi_report_opcode - Find out if a given command opcode is supported 1042 * @sdev: scsi device to query 1043 * @buffer: scratch buffer (must be at least 20 bytes long) 1044 * @len: length of buffer 1045 * @opcode: opcode for command to look up 1046 * 1047 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given 1048 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is 1049 * unsupported and 1 if the device claims to support the command. 1050 */ 1051 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, 1052 unsigned int len, unsigned char opcode) 1053 { 1054 unsigned char cmd[16]; 1055 struct scsi_sense_hdr sshdr; 1056 int result; 1057 1058 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) 1059 return -EINVAL; 1060 1061 memset(cmd, 0, 16); 1062 cmd[0] = MAINTENANCE_IN; 1063 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; 1064 cmd[2] = 1; /* One command format */ 1065 cmd[3] = opcode; 1066 put_unaligned_be32(len, &cmd[6]); 1067 memset(buffer, 0, len); 1068 1069 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1070 &sshdr, 30 * HZ, 3, NULL); 1071 1072 if (result && scsi_sense_valid(&sshdr) && 1073 sshdr.sense_key == ILLEGAL_REQUEST && 1074 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) 1075 return -EINVAL; 1076 1077 if ((buffer[1] & 3) == 3) /* Command supported */ 1078 return 1; 1079 1080 return 0; 1081 } 1082 EXPORT_SYMBOL(scsi_report_opcode); 1083 1084 /** 1085 * scsi_device_get - get an additional reference to a scsi_device 1086 * @sdev: device to get a reference to 1087 * 1088 * Description: Gets a reference to the scsi_device and increments the use count 1089 * of the underlying LLDD module. You must hold host_lock of the 1090 * parent Scsi_Host or already have a reference when calling this. 1091 */ 1092 int scsi_device_get(struct scsi_device *sdev) 1093 { 1094 if (sdev->sdev_state == SDEV_DEL) 1095 return -ENXIO; 1096 if (!get_device(&sdev->sdev_gendev)) 1097 return -ENXIO; 1098 /* We can fail this if we're doing SCSI operations 1099 * from module exit (like cache flush) */ 1100 try_module_get(sdev->host->hostt->module); 1101 1102 return 0; 1103 } 1104 EXPORT_SYMBOL(scsi_device_get); 1105 1106 /** 1107 * scsi_device_put - release a reference to a scsi_device 1108 * @sdev: device to release a reference on. 1109 * 1110 * Description: Release a reference to the scsi_device and decrements the use 1111 * count of the underlying LLDD module. The device is freed once the last 1112 * user vanishes. 1113 */ 1114 void scsi_device_put(struct scsi_device *sdev) 1115 { 1116 #ifdef CONFIG_MODULE_UNLOAD 1117 struct module *module = sdev->host->hostt->module; 1118 1119 /* The module refcount will be zero if scsi_device_get() 1120 * was called from a module removal routine */ 1121 if (module && module_refcount(module) != 0) 1122 module_put(module); 1123 #endif 1124 put_device(&sdev->sdev_gendev); 1125 } 1126 EXPORT_SYMBOL(scsi_device_put); 1127 1128 /* helper for shost_for_each_device, see that for documentation */ 1129 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 1130 struct scsi_device *prev) 1131 { 1132 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 1133 struct scsi_device *next = NULL; 1134 unsigned long flags; 1135 1136 spin_lock_irqsave(shost->host_lock, flags); 1137 while (list->next != &shost->__devices) { 1138 next = list_entry(list->next, struct scsi_device, siblings); 1139 /* skip devices that we can't get a reference to */ 1140 if (!scsi_device_get(next)) 1141 break; 1142 next = NULL; 1143 list = list->next; 1144 } 1145 spin_unlock_irqrestore(shost->host_lock, flags); 1146 1147 if (prev) 1148 scsi_device_put(prev); 1149 return next; 1150 } 1151 EXPORT_SYMBOL(__scsi_iterate_devices); 1152 1153 /** 1154 * starget_for_each_device - helper to walk all devices of a target 1155 * @starget: target whose devices we want to iterate over. 1156 * @data: Opaque passed to each function call. 1157 * @fn: Function to call on each device 1158 * 1159 * This traverses over each device of @starget. The devices have 1160 * a reference that must be released by scsi_host_put when breaking 1161 * out of the loop. 1162 */ 1163 void starget_for_each_device(struct scsi_target *starget, void *data, 1164 void (*fn)(struct scsi_device *, void *)) 1165 { 1166 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1167 struct scsi_device *sdev; 1168 1169 shost_for_each_device(sdev, shost) { 1170 if ((sdev->channel == starget->channel) && 1171 (sdev->id == starget->id)) 1172 fn(sdev, data); 1173 } 1174 } 1175 EXPORT_SYMBOL(starget_for_each_device); 1176 1177 /** 1178 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) 1179 * @starget: target whose devices we want to iterate over. 1180 * @data: parameter for callback @fn() 1181 * @fn: callback function that is invoked for each device 1182 * 1183 * This traverses over each device of @starget. It does _not_ 1184 * take a reference on the scsi_device, so the whole loop must be 1185 * protected by shost->host_lock. 1186 * 1187 * Note: The only reason why drivers would want to use this is because 1188 * they need to access the device list in irq context. Otherwise you 1189 * really want to use starget_for_each_device instead. 1190 **/ 1191 void __starget_for_each_device(struct scsi_target *starget, void *data, 1192 void (*fn)(struct scsi_device *, void *)) 1193 { 1194 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1195 struct scsi_device *sdev; 1196 1197 __shost_for_each_device(sdev, shost) { 1198 if ((sdev->channel == starget->channel) && 1199 (sdev->id == starget->id)) 1200 fn(sdev, data); 1201 } 1202 } 1203 EXPORT_SYMBOL(__starget_for_each_device); 1204 1205 /** 1206 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 1207 * @starget: SCSI target pointer 1208 * @lun: SCSI Logical Unit Number 1209 * 1210 * Description: Looks up the scsi_device with the specified @lun for a given 1211 * @starget. The returned scsi_device does not have an additional 1212 * reference. You must hold the host's host_lock over this call and 1213 * any access to the returned scsi_device. A scsi_device in state 1214 * SDEV_DEL is skipped. 1215 * 1216 * Note: The only reason why drivers should use this is because 1217 * they need to access the device list in irq context. Otherwise you 1218 * really want to use scsi_device_lookup_by_target instead. 1219 **/ 1220 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 1221 uint lun) 1222 { 1223 struct scsi_device *sdev; 1224 1225 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 1226 if (sdev->sdev_state == SDEV_DEL) 1227 continue; 1228 if (sdev->lun ==lun) 1229 return sdev; 1230 } 1231 1232 return NULL; 1233 } 1234 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 1235 1236 /** 1237 * scsi_device_lookup_by_target - find a device given the target 1238 * @starget: SCSI target pointer 1239 * @lun: SCSI Logical Unit Number 1240 * 1241 * Description: Looks up the scsi_device with the specified @lun for a given 1242 * @starget. The returned scsi_device has an additional reference that 1243 * needs to be released with scsi_device_put once you're done with it. 1244 **/ 1245 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1246 uint lun) 1247 { 1248 struct scsi_device *sdev; 1249 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1250 unsigned long flags; 1251 1252 spin_lock_irqsave(shost->host_lock, flags); 1253 sdev = __scsi_device_lookup_by_target(starget, lun); 1254 if (sdev && scsi_device_get(sdev)) 1255 sdev = NULL; 1256 spin_unlock_irqrestore(shost->host_lock, flags); 1257 1258 return sdev; 1259 } 1260 EXPORT_SYMBOL(scsi_device_lookup_by_target); 1261 1262 /** 1263 * __scsi_device_lookup - find a device given the host (UNLOCKED) 1264 * @shost: SCSI host pointer 1265 * @channel: SCSI channel (zero if only one channel) 1266 * @id: SCSI target number (physical unit number) 1267 * @lun: SCSI Logical Unit Number 1268 * 1269 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1270 * for a given host. The returned scsi_device does not have an additional 1271 * reference. You must hold the host's host_lock over this call and any access 1272 * to the returned scsi_device. 1273 * 1274 * Note: The only reason why drivers would want to use this is because 1275 * they need to access the device list in irq context. Otherwise you 1276 * really want to use scsi_device_lookup instead. 1277 **/ 1278 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 1279 uint channel, uint id, uint lun) 1280 { 1281 struct scsi_device *sdev; 1282 1283 list_for_each_entry(sdev, &shost->__devices, siblings) { 1284 if (sdev->channel == channel && sdev->id == id && 1285 sdev->lun ==lun) 1286 return sdev; 1287 } 1288 1289 return NULL; 1290 } 1291 EXPORT_SYMBOL(__scsi_device_lookup); 1292 1293 /** 1294 * scsi_device_lookup - find a device given the host 1295 * @shost: SCSI host pointer 1296 * @channel: SCSI channel (zero if only one channel) 1297 * @id: SCSI target number (physical unit number) 1298 * @lun: SCSI Logical Unit Number 1299 * 1300 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1301 * for a given host. The returned scsi_device has an additional reference that 1302 * needs to be released with scsi_device_put once you're done with it. 1303 **/ 1304 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1305 uint channel, uint id, uint lun) 1306 { 1307 struct scsi_device *sdev; 1308 unsigned long flags; 1309 1310 spin_lock_irqsave(shost->host_lock, flags); 1311 sdev = __scsi_device_lookup(shost, channel, id, lun); 1312 if (sdev && scsi_device_get(sdev)) 1313 sdev = NULL; 1314 spin_unlock_irqrestore(shost->host_lock, flags); 1315 1316 return sdev; 1317 } 1318 EXPORT_SYMBOL(scsi_device_lookup); 1319 1320 MODULE_DESCRIPTION("SCSI core"); 1321 MODULE_LICENSE("GPL"); 1322 1323 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1324 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1325 1326 static int __init init_scsi(void) 1327 { 1328 int error; 1329 1330 error = scsi_init_queue(); 1331 if (error) 1332 return error; 1333 error = scsi_init_procfs(); 1334 if (error) 1335 goto cleanup_queue; 1336 error = scsi_init_devinfo(); 1337 if (error) 1338 goto cleanup_procfs; 1339 error = scsi_init_hosts(); 1340 if (error) 1341 goto cleanup_devlist; 1342 error = scsi_init_sysctl(); 1343 if (error) 1344 goto cleanup_hosts; 1345 error = scsi_sysfs_register(); 1346 if (error) 1347 goto cleanup_sysctl; 1348 1349 scsi_netlink_init(); 1350 1351 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1352 return 0; 1353 1354 cleanup_sysctl: 1355 scsi_exit_sysctl(); 1356 cleanup_hosts: 1357 scsi_exit_hosts(); 1358 cleanup_devlist: 1359 scsi_exit_devinfo(); 1360 cleanup_procfs: 1361 scsi_exit_procfs(); 1362 cleanup_queue: 1363 scsi_exit_queue(); 1364 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1365 -error); 1366 return error; 1367 } 1368 1369 static void __exit exit_scsi(void) 1370 { 1371 scsi_netlink_exit(); 1372 scsi_sysfs_unregister(); 1373 scsi_exit_sysctl(); 1374 scsi_exit_hosts(); 1375 scsi_exit_devinfo(); 1376 scsi_exit_procfs(); 1377 scsi_exit_queue(); 1378 async_unregister_domain(&scsi_sd_probe_domain); 1379 } 1380 1381 subsys_initcall(init_scsi); 1382 module_exit(exit_scsi); 1383