xref: /openbmc/linux/drivers/scsi/scsi.c (revision 5f32c314)
1 /*
2  *  scsi.c Copyright (C) 1992 Drew Eckhardt
3  *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4  *         Copyright (C) 2002, 2003 Christoph Hellwig
5  *
6  *  generic mid-level SCSI driver
7  *      Initial versions: Drew Eckhardt
8  *      Subsequent revisions: Eric Youngdale
9  *
10  *  <drew@colorado.edu>
11  *
12  *  Bug correction thanks go to :
13  *      Rik Faith <faith@cs.unc.edu>
14  *      Tommy Thorn <tthorn>
15  *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16  *
17  *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18  *  add scatter-gather, multiple outstanding request, and other
19  *  enhancements.
20  *
21  *  Native multichannel, wide scsi, /proc/scsi and hot plugging
22  *  support added by Michael Neuffer <mike@i-connect.net>
23  *
24  *  Added request_module("scsi_hostadapter") for kerneld:
25  *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26  *  Bjorn Ekwall  <bj0rn@blox.se>
27  *  (changed to kmod)
28  *
29  *  Major improvements to the timeout, abort, and reset processing,
30  *  as well as performance modifications for large queue depths by
31  *  Leonard N. Zubkoff <lnz@dandelion.com>
32  *
33  *  Converted cli() code to spinlocks, Ingo Molnar
34  *
35  *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36  *
37  *  out_of_space hacks, D. Gilbert (dpg) 990608
38  */
39 
40 #include <linux/module.h>
41 #include <linux/moduleparam.h>
42 #include <linux/kernel.h>
43 #include <linux/timer.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/completion.h>
50 #include <linux/unistd.h>
51 #include <linux/spinlock.h>
52 #include <linux/kmod.h>
53 #include <linux/interrupt.h>
54 #include <linux/notifier.h>
55 #include <linux/cpu.h>
56 #include <linux/mutex.h>
57 #include <linux/async.h>
58 #include <asm/unaligned.h>
59 
60 #include <scsi/scsi.h>
61 #include <scsi/scsi_cmnd.h>
62 #include <scsi/scsi_dbg.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_driver.h>
65 #include <scsi/scsi_eh.h>
66 #include <scsi/scsi_host.h>
67 #include <scsi/scsi_tcq.h>
68 
69 #include "scsi_priv.h"
70 #include "scsi_logging.h"
71 
72 #define CREATE_TRACE_POINTS
73 #include <trace/events/scsi.h>
74 
75 static void scsi_done(struct scsi_cmnd *cmd);
76 
77 /*
78  * Definitions and constants.
79  */
80 
81 /*
82  * Note - the initial logging level can be set here to log events at boot time.
83  * After the system is up, you may enable logging via the /proc interface.
84  */
85 unsigned int scsi_logging_level;
86 #if defined(CONFIG_SCSI_LOGGING)
87 EXPORT_SYMBOL(scsi_logging_level);
88 #endif
89 
90 /* sd, scsi core and power management need to coordinate flushing async actions */
91 ASYNC_DOMAIN(scsi_sd_probe_domain);
92 EXPORT_SYMBOL(scsi_sd_probe_domain);
93 
94 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
95  * You may not alter any existing entry (although adding new ones is
96  * encouraged once assigned by ANSI/INCITS T10
97  */
98 static const char *const scsi_device_types[] = {
99 	"Direct-Access    ",
100 	"Sequential-Access",
101 	"Printer          ",
102 	"Processor        ",
103 	"WORM             ",
104 	"CD-ROM           ",
105 	"Scanner          ",
106 	"Optical Device   ",
107 	"Medium Changer   ",
108 	"Communications   ",
109 	"ASC IT8          ",
110 	"ASC IT8          ",
111 	"RAID             ",
112 	"Enclosure        ",
113 	"Direct-Access-RBC",
114 	"Optical card     ",
115 	"Bridge controller",
116 	"Object storage   ",
117 	"Automation/Drive ",
118 };
119 
120 /**
121  * scsi_device_type - Return 17 char string indicating device type.
122  * @type: type number to look up
123  */
124 
125 const char * scsi_device_type(unsigned type)
126 {
127 	if (type == 0x1e)
128 		return "Well-known LUN   ";
129 	if (type == 0x1f)
130 		return "No Device        ";
131 	if (type >= ARRAY_SIZE(scsi_device_types))
132 		return "Unknown          ";
133 	return scsi_device_types[type];
134 }
135 
136 EXPORT_SYMBOL(scsi_device_type);
137 
138 struct scsi_host_cmd_pool {
139 	struct kmem_cache	*cmd_slab;
140 	struct kmem_cache	*sense_slab;
141 	unsigned int		users;
142 	char			*cmd_name;
143 	char			*sense_name;
144 	unsigned int		slab_flags;
145 	gfp_t			gfp_mask;
146 };
147 
148 static struct scsi_host_cmd_pool scsi_cmd_pool = {
149 	.cmd_name	= "scsi_cmd_cache",
150 	.sense_name	= "scsi_sense_cache",
151 	.slab_flags	= SLAB_HWCACHE_ALIGN,
152 };
153 
154 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
155 	.cmd_name	= "scsi_cmd_cache(DMA)",
156 	.sense_name	= "scsi_sense_cache(DMA)",
157 	.slab_flags	= SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
158 	.gfp_mask	= __GFP_DMA,
159 };
160 
161 static DEFINE_MUTEX(host_cmd_pool_mutex);
162 
163 /**
164  * scsi_pool_alloc_command - internal function to get a fully allocated command
165  * @pool:	slab pool to allocate the command from
166  * @gfp_mask:	mask for the allocation
167  *
168  * Returns a fully allocated command (with the allied sense buffer) or
169  * NULL on failure
170  */
171 static struct scsi_cmnd *
172 scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
173 {
174 	struct scsi_cmnd *cmd;
175 
176 	cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
177 	if (!cmd)
178 		return NULL;
179 
180 	cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
181 					     gfp_mask | pool->gfp_mask);
182 	if (!cmd->sense_buffer) {
183 		kmem_cache_free(pool->cmd_slab, cmd);
184 		return NULL;
185 	}
186 
187 	return cmd;
188 }
189 
190 /**
191  * scsi_pool_free_command - internal function to release a command
192  * @pool:	slab pool to allocate the command from
193  * @cmd:	command to release
194  *
195  * the command must previously have been allocated by
196  * scsi_pool_alloc_command.
197  */
198 static void
199 scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
200 			 struct scsi_cmnd *cmd)
201 {
202 	if (cmd->prot_sdb)
203 		kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
204 
205 	kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
206 	kmem_cache_free(pool->cmd_slab, cmd);
207 }
208 
209 /**
210  * scsi_host_alloc_command - internal function to allocate command
211  * @shost:	SCSI host whose pool to allocate from
212  * @gfp_mask:	mask for the allocation
213  *
214  * Returns a fully allocated command with sense buffer and protection
215  * data buffer (where applicable) or NULL on failure
216  */
217 static struct scsi_cmnd *
218 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
219 {
220 	struct scsi_cmnd *cmd;
221 
222 	cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
223 	if (!cmd)
224 		return NULL;
225 
226 	if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
227 		cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
228 
229 		if (!cmd->prot_sdb) {
230 			scsi_pool_free_command(shost->cmd_pool, cmd);
231 			return NULL;
232 		}
233 	}
234 
235 	return cmd;
236 }
237 
238 /**
239  * __scsi_get_command - Allocate a struct scsi_cmnd
240  * @shost: host to transmit command
241  * @gfp_mask: allocation mask
242  *
243  * Description: allocate a struct scsi_cmd from host's slab, recycling from the
244  *              host's free_list if necessary.
245  */
246 struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
247 {
248 	struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
249 
250 	if (unlikely(!cmd)) {
251 		unsigned long flags;
252 
253 		spin_lock_irqsave(&shost->free_list_lock, flags);
254 		if (likely(!list_empty(&shost->free_list))) {
255 			cmd = list_entry(shost->free_list.next,
256 					 struct scsi_cmnd, list);
257 			list_del_init(&cmd->list);
258 		}
259 		spin_unlock_irqrestore(&shost->free_list_lock, flags);
260 
261 		if (cmd) {
262 			void *buf, *prot;
263 
264 			buf = cmd->sense_buffer;
265 			prot = cmd->prot_sdb;
266 
267 			memset(cmd, 0, sizeof(*cmd));
268 
269 			cmd->sense_buffer = buf;
270 			cmd->prot_sdb = prot;
271 		}
272 	}
273 
274 	return cmd;
275 }
276 EXPORT_SYMBOL_GPL(__scsi_get_command);
277 
278 /**
279  * scsi_get_command - Allocate and setup a scsi command block
280  * @dev: parent scsi device
281  * @gfp_mask: allocator flags
282  *
283  * Returns:	The allocated scsi command structure.
284  */
285 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
286 {
287 	struct scsi_cmnd *cmd;
288 
289 	/* Bail if we can't get a reference to the device */
290 	if (!get_device(&dev->sdev_gendev))
291 		return NULL;
292 
293 	cmd = __scsi_get_command(dev->host, gfp_mask);
294 
295 	if (likely(cmd != NULL)) {
296 		unsigned long flags;
297 
298 		cmd->device = dev;
299 		INIT_LIST_HEAD(&cmd->list);
300 		INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
301 		spin_lock_irqsave(&dev->list_lock, flags);
302 		list_add_tail(&cmd->list, &dev->cmd_list);
303 		spin_unlock_irqrestore(&dev->list_lock, flags);
304 		cmd->jiffies_at_alloc = jiffies;
305 	} else
306 		put_device(&dev->sdev_gendev);
307 
308 	return cmd;
309 }
310 EXPORT_SYMBOL(scsi_get_command);
311 
312 /**
313  * __scsi_put_command - Free a struct scsi_cmnd
314  * @shost: dev->host
315  * @cmd: Command to free
316  * @dev: parent scsi device
317  */
318 void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
319 			struct device *dev)
320 {
321 	unsigned long flags;
322 
323 	/* changing locks here, don't need to restore the irq state */
324 	spin_lock_irqsave(&shost->free_list_lock, flags);
325 	if (unlikely(list_empty(&shost->free_list))) {
326 		list_add(&cmd->list, &shost->free_list);
327 		cmd = NULL;
328 	}
329 	spin_unlock_irqrestore(&shost->free_list_lock, flags);
330 
331 	if (likely(cmd != NULL))
332 		scsi_pool_free_command(shost->cmd_pool, cmd);
333 
334 	put_device(dev);
335 }
336 EXPORT_SYMBOL(__scsi_put_command);
337 
338 /**
339  * scsi_put_command - Free a scsi command block
340  * @cmd: command block to free
341  *
342  * Returns:	Nothing.
343  *
344  * Notes:	The command must not belong to any lists.
345  */
346 void scsi_put_command(struct scsi_cmnd *cmd)
347 {
348 	struct scsi_device *sdev = cmd->device;
349 	unsigned long flags;
350 
351 	/* serious error if the command hasn't come from a device list */
352 	spin_lock_irqsave(&cmd->device->list_lock, flags);
353 	BUG_ON(list_empty(&cmd->list));
354 	list_del_init(&cmd->list);
355 	spin_unlock_irqrestore(&cmd->device->list_lock, flags);
356 
357 	cancel_delayed_work(&cmd->abort_work);
358 
359 	__scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
360 }
361 EXPORT_SYMBOL(scsi_put_command);
362 
363 static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
364 {
365 	struct scsi_host_cmd_pool *retval = NULL, *pool;
366 	/*
367 	 * Select a command slab for this host and create it if not
368 	 * yet existent.
369 	 */
370 	mutex_lock(&host_cmd_pool_mutex);
371 	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
372 		&scsi_cmd_pool;
373 	if (!pool->users) {
374 		pool->cmd_slab = kmem_cache_create(pool->cmd_name,
375 						   sizeof(struct scsi_cmnd), 0,
376 						   pool->slab_flags, NULL);
377 		if (!pool->cmd_slab)
378 			goto fail;
379 
380 		pool->sense_slab = kmem_cache_create(pool->sense_name,
381 						     SCSI_SENSE_BUFFERSIZE, 0,
382 						     pool->slab_flags, NULL);
383 		if (!pool->sense_slab) {
384 			kmem_cache_destroy(pool->cmd_slab);
385 			goto fail;
386 		}
387 	}
388 
389 	pool->users++;
390 	retval = pool;
391  fail:
392 	mutex_unlock(&host_cmd_pool_mutex);
393 	return retval;
394 }
395 
396 static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
397 {
398 	struct scsi_host_cmd_pool *pool;
399 
400 	mutex_lock(&host_cmd_pool_mutex);
401 	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
402 		&scsi_cmd_pool;
403 	/*
404 	 * This may happen if a driver has a mismatched get and put
405 	 * of the command pool; the driver should be implicated in
406 	 * the stack trace
407 	 */
408 	BUG_ON(pool->users == 0);
409 
410 	if (!--pool->users) {
411 		kmem_cache_destroy(pool->cmd_slab);
412 		kmem_cache_destroy(pool->sense_slab);
413 	}
414 	mutex_unlock(&host_cmd_pool_mutex);
415 }
416 
417 /**
418  * scsi_allocate_command - get a fully allocated SCSI command
419  * @gfp_mask:	allocation mask
420  *
421  * This function is for use outside of the normal host based pools.
422  * It allocates the relevant command and takes an additional reference
423  * on the pool it used.  This function *must* be paired with
424  * scsi_free_command which also has the identical mask, otherwise the
425  * free pool counts will eventually go wrong and you'll trigger a bug.
426  *
427  * This function should *only* be used by drivers that need a static
428  * command allocation at start of day for internal functions.
429  */
430 struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
431 {
432 	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
433 
434 	if (!pool)
435 		return NULL;
436 
437 	return scsi_pool_alloc_command(pool, gfp_mask);
438 }
439 EXPORT_SYMBOL(scsi_allocate_command);
440 
441 /**
442  * scsi_free_command - free a command allocated by scsi_allocate_command
443  * @gfp_mask:	mask used in the original allocation
444  * @cmd:	command to free
445  *
446  * Note: using the original allocation mask is vital because that's
447  * what determines which command pool we use to free the command.  Any
448  * mismatch will cause the system to BUG eventually.
449  */
450 void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
451 {
452 	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
453 
454 	/*
455 	 * this could trigger if the mask to scsi_allocate_command
456 	 * doesn't match this mask.  Otherwise we're guaranteed that this
457 	 * succeeds because scsi_allocate_command must have taken a reference
458 	 * on the pool
459 	 */
460 	BUG_ON(!pool);
461 
462 	scsi_pool_free_command(pool, cmd);
463 	/*
464 	 * scsi_put_host_cmd_pool is called twice; once to release the
465 	 * reference we took above, and once to release the reference
466 	 * originally taken by scsi_allocate_command
467 	 */
468 	scsi_put_host_cmd_pool(gfp_mask);
469 	scsi_put_host_cmd_pool(gfp_mask);
470 }
471 EXPORT_SYMBOL(scsi_free_command);
472 
473 /**
474  * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
475  * @shost: host to allocate the freelist for.
476  *
477  * Description: The command freelist protects against system-wide out of memory
478  * deadlock by preallocating one SCSI command structure for each host, so the
479  * system can always write to a swap file on a device associated with that host.
480  *
481  * Returns:	Nothing.
482  */
483 int scsi_setup_command_freelist(struct Scsi_Host *shost)
484 {
485 	struct scsi_cmnd *cmd;
486 	const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
487 
488 	spin_lock_init(&shost->free_list_lock);
489 	INIT_LIST_HEAD(&shost->free_list);
490 
491 	shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
492 
493 	if (!shost->cmd_pool)
494 		return -ENOMEM;
495 
496 	/*
497 	 * Get one backup command for this host.
498 	 */
499 	cmd = scsi_host_alloc_command(shost, gfp_mask);
500 	if (!cmd) {
501 		scsi_put_host_cmd_pool(gfp_mask);
502 		shost->cmd_pool = NULL;
503 		return -ENOMEM;
504 	}
505 	list_add(&cmd->list, &shost->free_list);
506 	return 0;
507 }
508 
509 /**
510  * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
511  * @shost: host whose freelist is going to be destroyed
512  */
513 void scsi_destroy_command_freelist(struct Scsi_Host *shost)
514 {
515 	/*
516 	 * If cmd_pool is NULL the free list was not initialized, so
517 	 * do not attempt to release resources.
518 	 */
519 	if (!shost->cmd_pool)
520 		return;
521 
522 	while (!list_empty(&shost->free_list)) {
523 		struct scsi_cmnd *cmd;
524 
525 		cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
526 		list_del_init(&cmd->list);
527 		scsi_pool_free_command(shost->cmd_pool, cmd);
528 	}
529 	shost->cmd_pool = NULL;
530 	scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
531 }
532 
533 #ifdef CONFIG_SCSI_LOGGING
534 void scsi_log_send(struct scsi_cmnd *cmd)
535 {
536 	unsigned int level;
537 
538 	/*
539 	 * If ML QUEUE log level is greater than or equal to:
540 	 *
541 	 * 1: nothing (match completion)
542 	 *
543 	 * 2: log opcode + command of all commands
544 	 *
545 	 * 3: same as 2 plus dump cmd address
546 	 *
547 	 * 4: same as 3 plus dump extra junk
548 	 */
549 	if (unlikely(scsi_logging_level)) {
550 		level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
551 				       SCSI_LOG_MLQUEUE_BITS);
552 		if (level > 1) {
553 			scmd_printk(KERN_INFO, cmd, "Send: ");
554 			if (level > 2)
555 				printk("0x%p ", cmd);
556 			printk("\n");
557 			scsi_print_command(cmd);
558 			if (level > 3) {
559 				printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
560 				       " queuecommand 0x%p\n",
561 					scsi_sglist(cmd), scsi_bufflen(cmd),
562 					cmd->device->host->hostt->queuecommand);
563 
564 			}
565 		}
566 	}
567 }
568 
569 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
570 {
571 	unsigned int level;
572 
573 	/*
574 	 * If ML COMPLETE log level is greater than or equal to:
575 	 *
576 	 * 1: log disposition, result, opcode + command, and conditionally
577 	 * sense data for failures or non SUCCESS dispositions.
578 	 *
579 	 * 2: same as 1 but for all command completions.
580 	 *
581 	 * 3: same as 2 plus dump cmd address
582 	 *
583 	 * 4: same as 3 plus dump extra junk
584 	 */
585 	if (unlikely(scsi_logging_level)) {
586 		level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
587 				       SCSI_LOG_MLCOMPLETE_BITS);
588 		if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
589 		    (level > 1)) {
590 			scmd_printk(KERN_INFO, cmd, "Done: ");
591 			if (level > 2)
592 				printk("0x%p ", cmd);
593 			/*
594 			 * Dump truncated values, so we usually fit within
595 			 * 80 chars.
596 			 */
597 			switch (disposition) {
598 			case SUCCESS:
599 				printk("SUCCESS\n");
600 				break;
601 			case NEEDS_RETRY:
602 				printk("RETRY\n");
603 				break;
604 			case ADD_TO_MLQUEUE:
605 				printk("MLQUEUE\n");
606 				break;
607 			case FAILED:
608 				printk("FAILED\n");
609 				break;
610 			case TIMEOUT_ERROR:
611 				/*
612 				 * If called via scsi_times_out.
613 				 */
614 				printk("TIMEOUT\n");
615 				break;
616 			default:
617 				printk("UNKNOWN\n");
618 			}
619 			scsi_print_result(cmd);
620 			scsi_print_command(cmd);
621 			if (status_byte(cmd->result) & CHECK_CONDITION)
622 				scsi_print_sense("", cmd);
623 			if (level > 3)
624 				scmd_printk(KERN_INFO, cmd,
625 					    "scsi host busy %d failed %d\n",
626 					    cmd->device->host->host_busy,
627 					    cmd->device->host->host_failed);
628 		}
629 	}
630 }
631 #endif
632 
633 /**
634  * scsi_cmd_get_serial - Assign a serial number to a command
635  * @host: the scsi host
636  * @cmd: command to assign serial number to
637  *
638  * Description: a serial number identifies a request for error recovery
639  * and debugging purposes.  Protected by the Host_Lock of host.
640  */
641 void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
642 {
643 	cmd->serial_number = host->cmd_serial_number++;
644 	if (cmd->serial_number == 0)
645 		cmd->serial_number = host->cmd_serial_number++;
646 }
647 EXPORT_SYMBOL(scsi_cmd_get_serial);
648 
649 /**
650  * scsi_dispatch_command - Dispatch a command to the low-level driver.
651  * @cmd: command block we are dispatching.
652  *
653  * Return: nonzero return request was rejected and device's queue needs to be
654  * plugged.
655  */
656 int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
657 {
658 	struct Scsi_Host *host = cmd->device->host;
659 	int rtn = 0;
660 
661 	atomic_inc(&cmd->device->iorequest_cnt);
662 
663 	/* check if the device is still usable */
664 	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
665 		/* in SDEV_DEL we error all commands. DID_NO_CONNECT
666 		 * returns an immediate error upwards, and signals
667 		 * that the device is no longer present */
668 		cmd->result = DID_NO_CONNECT << 16;
669 		scsi_done(cmd);
670 		/* return 0 (because the command has been processed) */
671 		goto out;
672 	}
673 
674 	/* Check to see if the scsi lld made this device blocked. */
675 	if (unlikely(scsi_device_blocked(cmd->device))) {
676 		/*
677 		 * in blocked state, the command is just put back on
678 		 * the device queue.  The suspend state has already
679 		 * blocked the queue so future requests should not
680 		 * occur until the device transitions out of the
681 		 * suspend state.
682 		 */
683 
684 		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
685 
686 		SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
687 
688 		/*
689 		 * NOTE: rtn is still zero here because we don't need the
690 		 * queue to be plugged on return (it's already stopped)
691 		 */
692 		goto out;
693 	}
694 
695 	/*
696 	 * If SCSI-2 or lower, store the LUN value in cmnd.
697 	 */
698 	if (cmd->device->scsi_level <= SCSI_2 &&
699 	    cmd->device->scsi_level != SCSI_UNKNOWN) {
700 		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
701 			       (cmd->device->lun << 5 & 0xe0);
702 	}
703 
704 	scsi_log_send(cmd);
705 
706 	/*
707 	 * Before we queue this command, check if the command
708 	 * length exceeds what the host adapter can handle.
709 	 */
710 	if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
711 		SCSI_LOG_MLQUEUE(3,
712 			printk("queuecommand : command too long. "
713 			       "cdb_size=%d host->max_cmd_len=%d\n",
714 			       cmd->cmd_len, cmd->device->host->max_cmd_len));
715 		cmd->result = (DID_ABORT << 16);
716 
717 		scsi_done(cmd);
718 		goto out;
719 	}
720 
721 	if (unlikely(host->shost_state == SHOST_DEL)) {
722 		cmd->result = (DID_NO_CONNECT << 16);
723 		scsi_done(cmd);
724 	} else {
725 		trace_scsi_dispatch_cmd_start(cmd);
726 		cmd->scsi_done = scsi_done;
727 		rtn = host->hostt->queuecommand(host, cmd);
728 	}
729 
730 	if (rtn) {
731 		trace_scsi_dispatch_cmd_error(cmd, rtn);
732 		if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
733 		    rtn != SCSI_MLQUEUE_TARGET_BUSY)
734 			rtn = SCSI_MLQUEUE_HOST_BUSY;
735 
736 		scsi_queue_insert(cmd, rtn);
737 
738 		SCSI_LOG_MLQUEUE(3,
739 		    printk("queuecommand : request rejected\n"));
740 	}
741 
742  out:
743 	SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
744 	return rtn;
745 }
746 
747 /**
748  * scsi_done - Invoke completion on finished SCSI command.
749  * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
750  * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
751  *
752  * Description: This function is the mid-level's (SCSI Core) interrupt routine,
753  * which regains ownership of the SCSI command (de facto) from a LLDD, and
754  * calls blk_complete_request() for further processing.
755  *
756  * This function is interrupt context safe.
757  */
758 static void scsi_done(struct scsi_cmnd *cmd)
759 {
760 	trace_scsi_dispatch_cmd_done(cmd);
761 	blk_complete_request(cmd->request);
762 }
763 
764 /**
765  * scsi_finish_command - cleanup and pass command back to upper layer
766  * @cmd: the command
767  *
768  * Description: Pass command off to upper layer for finishing of I/O
769  *              request, waking processes that are waiting on results,
770  *              etc.
771  */
772 void scsi_finish_command(struct scsi_cmnd *cmd)
773 {
774 	struct scsi_device *sdev = cmd->device;
775 	struct scsi_target *starget = scsi_target(sdev);
776 	struct Scsi_Host *shost = sdev->host;
777 	struct scsi_driver *drv;
778 	unsigned int good_bytes;
779 
780 	scsi_device_unbusy(sdev);
781 
782         /*
783          * Clear the flags which say that the device/host is no longer
784          * capable of accepting new commands.  These are set in scsi_queue.c
785          * for both the queue full condition on a device, and for a
786          * host full condition on the host.
787 	 *
788 	 * XXX(hch): What about locking?
789          */
790         shost->host_blocked = 0;
791 	starget->target_blocked = 0;
792         sdev->device_blocked = 0;
793 
794 	/*
795 	 * If we have valid sense information, then some kind of recovery
796 	 * must have taken place.  Make a note of this.
797 	 */
798 	if (SCSI_SENSE_VALID(cmd))
799 		cmd->result |= (DRIVER_SENSE << 24);
800 
801 	SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
802 				"Notifying upper driver of completion "
803 				"(result %x)\n", cmd->result));
804 
805 	good_bytes = scsi_bufflen(cmd);
806         if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
807 		int old_good_bytes = good_bytes;
808 		drv = scsi_cmd_to_driver(cmd);
809 		if (drv->done)
810 			good_bytes = drv->done(cmd);
811 		/*
812 		 * USB may not give sense identifying bad sector and
813 		 * simply return a residue instead, so subtract off the
814 		 * residue if drv->done() error processing indicates no
815 		 * change to the completion length.
816 		 */
817 		if (good_bytes == old_good_bytes)
818 			good_bytes -= scsi_get_resid(cmd);
819 	}
820 	scsi_io_completion(cmd, good_bytes);
821 }
822 EXPORT_SYMBOL(scsi_finish_command);
823 
824 /**
825  * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
826  * @sdev: SCSI Device in question
827  * @tagged: Do we use tagged queueing (non-0) or do we treat
828  *          this device as an untagged device (0)
829  * @tags: Number of tags allowed if tagged queueing enabled,
830  *        or number of commands the low level driver can
831  *        queue up in non-tagged mode (as per cmd_per_lun).
832  *
833  * Returns:	Nothing
834  *
835  * Lock Status:	None held on entry
836  *
837  * Notes:	Low level drivers may call this at any time and we will do
838  * 		the right thing depending on whether or not the device is
839  * 		currently active and whether or not it even has the
840  * 		command blocks built yet.
841  */
842 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
843 {
844 	unsigned long flags;
845 
846 	/*
847 	 * refuse to set tagged depth to an unworkable size
848 	 */
849 	if (tags <= 0)
850 		return;
851 
852 	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
853 
854 	/*
855 	 * Check to see if the queue is managed by the block layer.
856 	 * If it is, and we fail to adjust the depth, exit.
857 	 *
858 	 * Do not resize the tag map if it is a host wide share bqt,
859 	 * because the size should be the hosts's can_queue. If there
860 	 * is more IO than the LLD's can_queue (so there are not enuogh
861 	 * tags) request_fn's host queue ready check will handle it.
862 	 */
863 	if (!sdev->host->bqt) {
864 		if (blk_queue_tagged(sdev->request_queue) &&
865 		    blk_queue_resize_tags(sdev->request_queue, tags) != 0)
866 			goto out;
867 	}
868 
869 	sdev->queue_depth = tags;
870 	switch (tagged) {
871 		case MSG_ORDERED_TAG:
872 			sdev->ordered_tags = 1;
873 			sdev->simple_tags = 1;
874 			break;
875 		case MSG_SIMPLE_TAG:
876 			sdev->ordered_tags = 0;
877 			sdev->simple_tags = 1;
878 			break;
879 		default:
880 			sdev_printk(KERN_WARNING, sdev,
881 				    "scsi_adjust_queue_depth, bad queue type, "
882 				    "disabled\n");
883 		case 0:
884 			sdev->ordered_tags = sdev->simple_tags = 0;
885 			sdev->queue_depth = tags;
886 			break;
887 	}
888  out:
889 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
890 }
891 EXPORT_SYMBOL(scsi_adjust_queue_depth);
892 
893 /**
894  * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
895  * @sdev: SCSI Device in question
896  * @depth: Current number of outstanding SCSI commands on this device,
897  *         not counting the one returned as QUEUE_FULL.
898  *
899  * Description:	This function will track successive QUEUE_FULL events on a
900  * 		specific SCSI device to determine if and when there is a
901  * 		need to adjust the queue depth on the device.
902  *
903  * Returns:	0 - No change needed, >0 - Adjust queue depth to this new depth,
904  * 		-1 - Drop back to untagged operation using host->cmd_per_lun
905  * 			as the untagged command depth
906  *
907  * Lock Status:	None held on entry
908  *
909  * Notes:	Low level drivers may call this at any time and we will do
910  * 		"The Right Thing."  We are interrupt context safe.
911  */
912 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
913 {
914 
915 	/*
916 	 * Don't let QUEUE_FULLs on the same
917 	 * jiffies count, they could all be from
918 	 * same event.
919 	 */
920 	if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
921 		return 0;
922 
923 	sdev->last_queue_full_time = jiffies;
924 	if (sdev->last_queue_full_depth != depth) {
925 		sdev->last_queue_full_count = 1;
926 		sdev->last_queue_full_depth = depth;
927 	} else {
928 		sdev->last_queue_full_count++;
929 	}
930 
931 	if (sdev->last_queue_full_count <= 10)
932 		return 0;
933 	if (sdev->last_queue_full_depth < 8) {
934 		/* Drop back to untagged */
935 		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
936 		return -1;
937 	}
938 
939 	if (sdev->ordered_tags)
940 		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
941 	else
942 		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
943 	return depth;
944 }
945 EXPORT_SYMBOL(scsi_track_queue_full);
946 
947 /**
948  * scsi_vpd_inquiry - Request a device provide us with a VPD page
949  * @sdev: The device to ask
950  * @buffer: Where to put the result
951  * @page: Which Vital Product Data to return
952  * @len: The length of the buffer
953  *
954  * This is an internal helper function.  You probably want to use
955  * scsi_get_vpd_page instead.
956  *
957  * Returns 0 on success or a negative error number.
958  */
959 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
960 							u8 page, unsigned len)
961 {
962 	int result;
963 	unsigned char cmd[16];
964 
965 	cmd[0] = INQUIRY;
966 	cmd[1] = 1;		/* EVPD */
967 	cmd[2] = page;
968 	cmd[3] = len >> 8;
969 	cmd[4] = len & 0xff;
970 	cmd[5] = 0;		/* Control byte */
971 
972 	/*
973 	 * I'm not convinced we need to try quite this hard to get VPD, but
974 	 * all the existing users tried this hard.
975 	 */
976 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
977 				  len, NULL, 30 * HZ, 3, NULL);
978 	if (result)
979 		return result;
980 
981 	/* Sanity check that we got the page back that we asked for */
982 	if (buffer[1] != page)
983 		return -EIO;
984 
985 	return 0;
986 }
987 
988 /**
989  * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
990  * @sdev: The device to ask
991  * @page: Which Vital Product Data to return
992  * @buf: where to store the VPD
993  * @buf_len: number of bytes in the VPD buffer area
994  *
995  * SCSI devices may optionally supply Vital Product Data.  Each 'page'
996  * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
997  * If the device supports this VPD page, this routine returns a pointer
998  * to a buffer containing the data from that page.  The caller is
999  * responsible for calling kfree() on this pointer when it is no longer
1000  * needed.  If we cannot retrieve the VPD page this routine returns %NULL.
1001  */
1002 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1003 		      int buf_len)
1004 {
1005 	int i, result;
1006 
1007 	if (sdev->skip_vpd_pages)
1008 		goto fail;
1009 
1010 	/* Ask for all the pages supported by this device */
1011 	result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1012 	if (result)
1013 		goto fail;
1014 
1015 	/* If the user actually wanted this page, we can skip the rest */
1016 	if (page == 0)
1017 		return 0;
1018 
1019 	for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
1020 		if (buf[i + 4] == page)
1021 			goto found;
1022 
1023 	if (i < buf[3] && i >= buf_len - 4)
1024 		/* ran off the end of the buffer, give us benefit of doubt */
1025 		goto found;
1026 	/* The device claims it doesn't support the requested page */
1027 	goto fail;
1028 
1029  found:
1030 	result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
1031 	if (result)
1032 		goto fail;
1033 
1034 	return 0;
1035 
1036  fail:
1037 	return -EINVAL;
1038 }
1039 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
1040 
1041 /**
1042  * scsi_report_opcode - Find out if a given command opcode is supported
1043  * @sdev:	scsi device to query
1044  * @buffer:	scratch buffer (must be at least 20 bytes long)
1045  * @len:	length of buffer
1046  * @opcode:	opcode for command to look up
1047  *
1048  * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
1049  * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
1050  * unsupported and 1 if the device claims to support the command.
1051  */
1052 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
1053 		       unsigned int len, unsigned char opcode)
1054 {
1055 	unsigned char cmd[16];
1056 	struct scsi_sense_hdr sshdr;
1057 	int result;
1058 
1059 	if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
1060 		return -EINVAL;
1061 
1062 	memset(cmd, 0, 16);
1063 	cmd[0] = MAINTENANCE_IN;
1064 	cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
1065 	cmd[2] = 1;		/* One command format */
1066 	cmd[3] = opcode;
1067 	put_unaligned_be32(len, &cmd[6]);
1068 	memset(buffer, 0, len);
1069 
1070 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1071 				  &sshdr, 30 * HZ, 3, NULL);
1072 
1073 	if (result && scsi_sense_valid(&sshdr) &&
1074 	    sshdr.sense_key == ILLEGAL_REQUEST &&
1075 	    (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
1076 		return -EINVAL;
1077 
1078 	if ((buffer[1] & 3) == 3) /* Command supported */
1079 		return 1;
1080 
1081 	return 0;
1082 }
1083 EXPORT_SYMBOL(scsi_report_opcode);
1084 
1085 /**
1086  * scsi_device_get  -  get an additional reference to a scsi_device
1087  * @sdev:	device to get a reference to
1088  *
1089  * Description: Gets a reference to the scsi_device and increments the use count
1090  * of the underlying LLDD module.  You must hold host_lock of the
1091  * parent Scsi_Host or already have a reference when calling this.
1092  */
1093 int scsi_device_get(struct scsi_device *sdev)
1094 {
1095 	if (sdev->sdev_state == SDEV_DEL)
1096 		return -ENXIO;
1097 	if (!get_device(&sdev->sdev_gendev))
1098 		return -ENXIO;
1099 	/* We can fail this if we're doing SCSI operations
1100 	 * from module exit (like cache flush) */
1101 	try_module_get(sdev->host->hostt->module);
1102 
1103 	return 0;
1104 }
1105 EXPORT_SYMBOL(scsi_device_get);
1106 
1107 /**
1108  * scsi_device_put  -  release a reference to a scsi_device
1109  * @sdev:	device to release a reference on.
1110  *
1111  * Description: Release a reference to the scsi_device and decrements the use
1112  * count of the underlying LLDD module.  The device is freed once the last
1113  * user vanishes.
1114  */
1115 void scsi_device_put(struct scsi_device *sdev)
1116 {
1117 #ifdef CONFIG_MODULE_UNLOAD
1118 	struct module *module = sdev->host->hostt->module;
1119 
1120 	/* The module refcount will be zero if scsi_device_get()
1121 	 * was called from a module removal routine */
1122 	if (module && module_refcount(module) != 0)
1123 		module_put(module);
1124 #endif
1125 	put_device(&sdev->sdev_gendev);
1126 }
1127 EXPORT_SYMBOL(scsi_device_put);
1128 
1129 /* helper for shost_for_each_device, see that for documentation */
1130 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1131 					   struct scsi_device *prev)
1132 {
1133 	struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1134 	struct scsi_device *next = NULL;
1135 	unsigned long flags;
1136 
1137 	spin_lock_irqsave(shost->host_lock, flags);
1138 	while (list->next != &shost->__devices) {
1139 		next = list_entry(list->next, struct scsi_device, siblings);
1140 		/* skip devices that we can't get a reference to */
1141 		if (!scsi_device_get(next))
1142 			break;
1143 		next = NULL;
1144 		list = list->next;
1145 	}
1146 	spin_unlock_irqrestore(shost->host_lock, flags);
1147 
1148 	if (prev)
1149 		scsi_device_put(prev);
1150 	return next;
1151 }
1152 EXPORT_SYMBOL(__scsi_iterate_devices);
1153 
1154 /**
1155  * starget_for_each_device  -  helper to walk all devices of a target
1156  * @starget:	target whose devices we want to iterate over.
1157  * @data:	Opaque passed to each function call.
1158  * @fn:		Function to call on each device
1159  *
1160  * This traverses over each device of @starget.  The devices have
1161  * a reference that must be released by scsi_host_put when breaking
1162  * out of the loop.
1163  */
1164 void starget_for_each_device(struct scsi_target *starget, void *data,
1165 		     void (*fn)(struct scsi_device *, void *))
1166 {
1167 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1168 	struct scsi_device *sdev;
1169 
1170 	shost_for_each_device(sdev, shost) {
1171 		if ((sdev->channel == starget->channel) &&
1172 		    (sdev->id == starget->id))
1173 			fn(sdev, data);
1174 	}
1175 }
1176 EXPORT_SYMBOL(starget_for_each_device);
1177 
1178 /**
1179  * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
1180  * @starget:	target whose devices we want to iterate over.
1181  * @data:	parameter for callback @fn()
1182  * @fn:		callback function that is invoked for each device
1183  *
1184  * This traverses over each device of @starget.  It does _not_
1185  * take a reference on the scsi_device, so the whole loop must be
1186  * protected by shost->host_lock.
1187  *
1188  * Note:  The only reason why drivers would want to use this is because
1189  * they need to access the device list in irq context.  Otherwise you
1190  * really want to use starget_for_each_device instead.
1191  **/
1192 void __starget_for_each_device(struct scsi_target *starget, void *data,
1193 			       void (*fn)(struct scsi_device *, void *))
1194 {
1195 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1196 	struct scsi_device *sdev;
1197 
1198 	__shost_for_each_device(sdev, shost) {
1199 		if ((sdev->channel == starget->channel) &&
1200 		    (sdev->id == starget->id))
1201 			fn(sdev, data);
1202 	}
1203 }
1204 EXPORT_SYMBOL(__starget_for_each_device);
1205 
1206 /**
1207  * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1208  * @starget:	SCSI target pointer
1209  * @lun:	SCSI Logical Unit Number
1210  *
1211  * Description: Looks up the scsi_device with the specified @lun for a given
1212  * @starget.  The returned scsi_device does not have an additional
1213  * reference.  You must hold the host's host_lock over this call and
1214  * any access to the returned scsi_device. A scsi_device in state
1215  * SDEV_DEL is skipped.
1216  *
1217  * Note:  The only reason why drivers should use this is because
1218  * they need to access the device list in irq context.  Otherwise you
1219  * really want to use scsi_device_lookup_by_target instead.
1220  **/
1221 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1222 						   uint lun)
1223 {
1224 	struct scsi_device *sdev;
1225 
1226 	list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1227 		if (sdev->sdev_state == SDEV_DEL)
1228 			continue;
1229 		if (sdev->lun ==lun)
1230 			return sdev;
1231 	}
1232 
1233 	return NULL;
1234 }
1235 EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1236 
1237 /**
1238  * scsi_device_lookup_by_target - find a device given the target
1239  * @starget:	SCSI target pointer
1240  * @lun:	SCSI Logical Unit Number
1241  *
1242  * Description: Looks up the scsi_device with the specified @lun for a given
1243  * @starget.  The returned scsi_device has an additional reference that
1244  * needs to be released with scsi_device_put once you're done with it.
1245  **/
1246 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1247 						 uint lun)
1248 {
1249 	struct scsi_device *sdev;
1250 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1251 	unsigned long flags;
1252 
1253 	spin_lock_irqsave(shost->host_lock, flags);
1254 	sdev = __scsi_device_lookup_by_target(starget, lun);
1255 	if (sdev && scsi_device_get(sdev))
1256 		sdev = NULL;
1257 	spin_unlock_irqrestore(shost->host_lock, flags);
1258 
1259 	return sdev;
1260 }
1261 EXPORT_SYMBOL(scsi_device_lookup_by_target);
1262 
1263 /**
1264  * __scsi_device_lookup - find a device given the host (UNLOCKED)
1265  * @shost:	SCSI host pointer
1266  * @channel:	SCSI channel (zero if only one channel)
1267  * @id:		SCSI target number (physical unit number)
1268  * @lun:	SCSI Logical Unit Number
1269  *
1270  * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1271  * for a given host. The returned scsi_device does not have an additional
1272  * reference.  You must hold the host's host_lock over this call and any access
1273  * to the returned scsi_device.
1274  *
1275  * Note:  The only reason why drivers would want to use this is because
1276  * they need to access the device list in irq context.  Otherwise you
1277  * really want to use scsi_device_lookup instead.
1278  **/
1279 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1280 		uint channel, uint id, uint lun)
1281 {
1282 	struct scsi_device *sdev;
1283 
1284 	list_for_each_entry(sdev, &shost->__devices, siblings) {
1285 		if (sdev->channel == channel && sdev->id == id &&
1286 				sdev->lun ==lun)
1287 			return sdev;
1288 	}
1289 
1290 	return NULL;
1291 }
1292 EXPORT_SYMBOL(__scsi_device_lookup);
1293 
1294 /**
1295  * scsi_device_lookup - find a device given the host
1296  * @shost:	SCSI host pointer
1297  * @channel:	SCSI channel (zero if only one channel)
1298  * @id:		SCSI target number (physical unit number)
1299  * @lun:	SCSI Logical Unit Number
1300  *
1301  * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1302  * for a given host.  The returned scsi_device has an additional reference that
1303  * needs to be released with scsi_device_put once you're done with it.
1304  **/
1305 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1306 		uint channel, uint id, uint lun)
1307 {
1308 	struct scsi_device *sdev;
1309 	unsigned long flags;
1310 
1311 	spin_lock_irqsave(shost->host_lock, flags);
1312 	sdev = __scsi_device_lookup(shost, channel, id, lun);
1313 	if (sdev && scsi_device_get(sdev))
1314 		sdev = NULL;
1315 	spin_unlock_irqrestore(shost->host_lock, flags);
1316 
1317 	return sdev;
1318 }
1319 EXPORT_SYMBOL(scsi_device_lookup);
1320 
1321 MODULE_DESCRIPTION("SCSI core");
1322 MODULE_LICENSE("GPL");
1323 
1324 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1325 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1326 
1327 static int __init init_scsi(void)
1328 {
1329 	int error;
1330 
1331 	error = scsi_init_queue();
1332 	if (error)
1333 		return error;
1334 	error = scsi_init_procfs();
1335 	if (error)
1336 		goto cleanup_queue;
1337 	error = scsi_init_devinfo();
1338 	if (error)
1339 		goto cleanup_procfs;
1340 	error = scsi_init_hosts();
1341 	if (error)
1342 		goto cleanup_devlist;
1343 	error = scsi_init_sysctl();
1344 	if (error)
1345 		goto cleanup_hosts;
1346 	error = scsi_sysfs_register();
1347 	if (error)
1348 		goto cleanup_sysctl;
1349 
1350 	scsi_netlink_init();
1351 
1352 	printk(KERN_NOTICE "SCSI subsystem initialized\n");
1353 	return 0;
1354 
1355 cleanup_sysctl:
1356 	scsi_exit_sysctl();
1357 cleanup_hosts:
1358 	scsi_exit_hosts();
1359 cleanup_devlist:
1360 	scsi_exit_devinfo();
1361 cleanup_procfs:
1362 	scsi_exit_procfs();
1363 cleanup_queue:
1364 	scsi_exit_queue();
1365 	printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1366 	       -error);
1367 	return error;
1368 }
1369 
1370 static void __exit exit_scsi(void)
1371 {
1372 	scsi_netlink_exit();
1373 	scsi_sysfs_unregister();
1374 	scsi_exit_sysctl();
1375 	scsi_exit_hosts();
1376 	scsi_exit_devinfo();
1377 	scsi_exit_procfs();
1378 	scsi_exit_queue();
1379 	async_unregister_domain(&scsi_sd_probe_domain);
1380 }
1381 
1382 subsys_initcall(init_scsi);
1383 module_exit(exit_scsi);
1384