xref: /openbmc/linux/drivers/scsi/scsi.c (revision c1f51218)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  scsi.c Copyright (C) 1992 Drew Eckhardt
4  *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5  *         Copyright (C) 2002, 2003 Christoph Hellwig
6  *
7  *  generic mid-level SCSI driver
8  *      Initial versions: Drew Eckhardt
9  *      Subsequent revisions: Eric Youngdale
10  *
11  *  <drew@colorado.edu>
12  *
13  *  Bug correction thanks go to :
14  *      Rik Faith <faith@cs.unc.edu>
15  *      Tommy Thorn <tthorn>
16  *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
17  *
18  *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
19  *  add scatter-gather, multiple outstanding request, and other
20  *  enhancements.
21  *
22  *  Native multichannel, wide scsi, /proc/scsi and hot plugging
23  *  support added by Michael Neuffer <mike@i-connect.net>
24  *
25  *  Added request_module("scsi_hostadapter") for kerneld:
26  *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
27  *  Bjorn Ekwall  <bj0rn@blox.se>
28  *  (changed to kmod)
29  *
30  *  Major improvements to the timeout, abort, and reset processing,
31  *  as well as performance modifications for large queue depths by
32  *  Leonard N. Zubkoff <lnz@dandelion.com>
33  *
34  *  Converted cli() code to spinlocks, Ingo Molnar
35  *
36  *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
37  *
38  *  out_of_space hacks, D. Gilbert (dpg) 990608
39  */
40 
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/kernel.h>
44 #include <linux/timer.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/completion.h>
51 #include <linux/unistd.h>
52 #include <linux/spinlock.h>
53 #include <linux/kmod.h>
54 #include <linux/interrupt.h>
55 #include <linux/notifier.h>
56 #include <linux/cpu.h>
57 #include <linux/mutex.h>
58 #include <linux/async.h>
59 #include <asm/unaligned.h>
60 
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_dbg.h>
64 #include <scsi/scsi_device.h>
65 #include <scsi/scsi_driver.h>
66 #include <scsi/scsi_eh.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
69 
70 #include "scsi_priv.h"
71 #include "scsi_logging.h"
72 
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/scsi.h>
75 
76 /*
77  * Definitions and constants.
78  */
79 
80 /*
81  * Note - the initial logging level can be set here to log events at boot time.
82  * After the system is up, you may enable logging via the /proc interface.
83  */
84 unsigned int scsi_logging_level;
85 #if defined(CONFIG_SCSI_LOGGING)
86 EXPORT_SYMBOL(scsi_logging_level);
87 #endif
88 
89 /*
90  * Domain for asynchronous system resume operations.  It is marked 'exclusive'
91  * to avoid being included in the async_synchronize_full() that is invoked by
92  * dpm_resume().
93  */
94 ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
95 EXPORT_SYMBOL(scsi_sd_pm_domain);
96 
97 #ifdef CONFIG_SCSI_LOGGING
98 void scsi_log_send(struct scsi_cmnd *cmd)
99 {
100 	unsigned int level;
101 
102 	/*
103 	 * If ML QUEUE log level is greater than or equal to:
104 	 *
105 	 * 1: nothing (match completion)
106 	 *
107 	 * 2: log opcode + command of all commands + cmd address
108 	 *
109 	 * 3: same as 2
110 	 *
111 	 * 4: same as 3
112 	 */
113 	if (unlikely(scsi_logging_level)) {
114 		level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
115 				       SCSI_LOG_MLQUEUE_BITS);
116 		if (level > 1) {
117 			scmd_printk(KERN_INFO, cmd,
118 				    "Send: scmd 0x%p\n", cmd);
119 			scsi_print_command(cmd);
120 		}
121 	}
122 }
123 
124 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
125 {
126 	unsigned int level;
127 
128 	/*
129 	 * If ML COMPLETE log level is greater than or equal to:
130 	 *
131 	 * 1: log disposition, result, opcode + command, and conditionally
132 	 * sense data for failures or non SUCCESS dispositions.
133 	 *
134 	 * 2: same as 1 but for all command completions.
135 	 *
136 	 * 3: same as 2
137 	 *
138 	 * 4: same as 3 plus dump extra junk
139 	 */
140 	if (unlikely(scsi_logging_level)) {
141 		level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
142 				       SCSI_LOG_MLCOMPLETE_BITS);
143 		if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
144 		    (level > 1)) {
145 			scsi_print_result(cmd, "Done", disposition);
146 			scsi_print_command(cmd);
147 			if (status_byte(cmd->result) == CHECK_CONDITION)
148 				scsi_print_sense(cmd);
149 			if (level > 3)
150 				scmd_printk(KERN_INFO, cmd,
151 					    "scsi host busy %d failed %d\n",
152 					    scsi_host_busy(cmd->device->host),
153 					    cmd->device->host->host_failed);
154 		}
155 	}
156 }
157 #endif
158 
159 /**
160  * scsi_finish_command - cleanup and pass command back to upper layer
161  * @cmd: the command
162  *
163  * Description: Pass command off to upper layer for finishing of I/O
164  *              request, waking processes that are waiting on results,
165  *              etc.
166  */
167 void scsi_finish_command(struct scsi_cmnd *cmd)
168 {
169 	struct scsi_device *sdev = cmd->device;
170 	struct scsi_target *starget = scsi_target(sdev);
171 	struct Scsi_Host *shost = sdev->host;
172 	struct scsi_driver *drv;
173 	unsigned int good_bytes;
174 
175 	scsi_device_unbusy(sdev, cmd);
176 
177 	/*
178 	 * Clear the flags that say that the device/target/host is no longer
179 	 * capable of accepting new commands.
180 	 */
181 	if (atomic_read(&shost->host_blocked))
182 		atomic_set(&shost->host_blocked, 0);
183 	if (atomic_read(&starget->target_blocked))
184 		atomic_set(&starget->target_blocked, 0);
185 	if (atomic_read(&sdev->device_blocked))
186 		atomic_set(&sdev->device_blocked, 0);
187 
188 	/*
189 	 * If we have valid sense information, then some kind of recovery
190 	 * must have taken place.  Make a note of this.
191 	 */
192 	if (SCSI_SENSE_VALID(cmd))
193 		cmd->result |= (DRIVER_SENSE << 24);
194 
195 	SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
196 				"Notifying upper driver of completion "
197 				"(result %x)\n", cmd->result));
198 
199 	good_bytes = scsi_bufflen(cmd);
200 	if (!blk_rq_is_passthrough(cmd->request)) {
201 		int old_good_bytes = good_bytes;
202 		drv = scsi_cmd_to_driver(cmd);
203 		if (drv->done)
204 			good_bytes = drv->done(cmd);
205 		/*
206 		 * USB may not give sense identifying bad sector and
207 		 * simply return a residue instead, so subtract off the
208 		 * residue if drv->done() error processing indicates no
209 		 * change to the completion length.
210 		 */
211 		if (good_bytes == old_good_bytes)
212 			good_bytes -= scsi_get_resid(cmd);
213 	}
214 	scsi_io_completion(cmd, good_bytes);
215 }
216 
217 
218 /*
219  * 1024 is big enough for saturating the fast scsi LUN now
220  */
221 int scsi_device_max_queue_depth(struct scsi_device *sdev)
222 {
223 	return max_t(int, sdev->host->can_queue, 1024);
224 }
225 
226 /**
227  * scsi_change_queue_depth - change a device's queue depth
228  * @sdev: SCSI Device in question
229  * @depth: number of commands allowed to be queued to the driver
230  *
231  * Sets the device queue depth and returns the new value.
232  */
233 int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
234 {
235 	depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
236 
237 	if (depth > 0) {
238 		sdev->queue_depth = depth;
239 		wmb();
240 	}
241 
242 	if (sdev->request_queue)
243 		blk_set_queue_depth(sdev->request_queue, depth);
244 
245 	sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
246 
247 	return sdev->queue_depth;
248 }
249 EXPORT_SYMBOL(scsi_change_queue_depth);
250 
251 /**
252  * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
253  * @sdev: SCSI Device in question
254  * @depth: Current number of outstanding SCSI commands on this device,
255  *         not counting the one returned as QUEUE_FULL.
256  *
257  * Description:	This function will track successive QUEUE_FULL events on a
258  * 		specific SCSI device to determine if and when there is a
259  * 		need to adjust the queue depth on the device.
260  *
261  * Returns:	0 - No change needed, >0 - Adjust queue depth to this new depth,
262  * 		-1 - Drop back to untagged operation using host->cmd_per_lun
263  * 			as the untagged command depth
264  *
265  * Lock Status:	None held on entry
266  *
267  * Notes:	Low level drivers may call this at any time and we will do
268  * 		"The Right Thing."  We are interrupt context safe.
269  */
270 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
271 {
272 
273 	/*
274 	 * Don't let QUEUE_FULLs on the same
275 	 * jiffies count, they could all be from
276 	 * same event.
277 	 */
278 	if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
279 		return 0;
280 
281 	sdev->last_queue_full_time = jiffies;
282 	if (sdev->last_queue_full_depth != depth) {
283 		sdev->last_queue_full_count = 1;
284 		sdev->last_queue_full_depth = depth;
285 	} else {
286 		sdev->last_queue_full_count++;
287 	}
288 
289 	if (sdev->last_queue_full_count <= 10)
290 		return 0;
291 
292 	return scsi_change_queue_depth(sdev, depth);
293 }
294 EXPORT_SYMBOL(scsi_track_queue_full);
295 
296 /**
297  * scsi_vpd_inquiry - Request a device provide us with a VPD page
298  * @sdev: The device to ask
299  * @buffer: Where to put the result
300  * @page: Which Vital Product Data to return
301  * @len: The length of the buffer
302  *
303  * This is an internal helper function.  You probably want to use
304  * scsi_get_vpd_page instead.
305  *
306  * Returns size of the vpd page on success or a negative error number.
307  */
308 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
309 							u8 page, unsigned len)
310 {
311 	int result;
312 	unsigned char cmd[16];
313 
314 	if (len < 4)
315 		return -EINVAL;
316 
317 	cmd[0] = INQUIRY;
318 	cmd[1] = 1;		/* EVPD */
319 	cmd[2] = page;
320 	cmd[3] = len >> 8;
321 	cmd[4] = len & 0xff;
322 	cmd[5] = 0;		/* Control byte */
323 
324 	/*
325 	 * I'm not convinced we need to try quite this hard to get VPD, but
326 	 * all the existing users tried this hard.
327 	 */
328 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
329 				  len, NULL, 30 * HZ, 3, NULL);
330 	if (result)
331 		return -EIO;
332 
333 	/* Sanity check that we got the page back that we asked for */
334 	if (buffer[1] != page)
335 		return -EIO;
336 
337 	return get_unaligned_be16(&buffer[2]) + 4;
338 }
339 
340 /**
341  * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
342  * @sdev: The device to ask
343  * @page: Which Vital Product Data to return
344  * @buf: where to store the VPD
345  * @buf_len: number of bytes in the VPD buffer area
346  *
347  * SCSI devices may optionally supply Vital Product Data.  Each 'page'
348  * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
349  * If the device supports this VPD page, this routine returns a pointer
350  * to a buffer containing the data from that page.  The caller is
351  * responsible for calling kfree() on this pointer when it is no longer
352  * needed.  If we cannot retrieve the VPD page this routine returns %NULL.
353  */
354 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
355 		      int buf_len)
356 {
357 	int i, result;
358 
359 	if (sdev->skip_vpd_pages)
360 		goto fail;
361 
362 	/* Ask for all the pages supported by this device */
363 	result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
364 	if (result < 4)
365 		goto fail;
366 
367 	/* If the user actually wanted this page, we can skip the rest */
368 	if (page == 0)
369 		return 0;
370 
371 	for (i = 4; i < min(result, buf_len); i++)
372 		if (buf[i] == page)
373 			goto found;
374 
375 	if (i < result && i >= buf_len)
376 		/* ran off the end of the buffer, give us benefit of doubt */
377 		goto found;
378 	/* The device claims it doesn't support the requested page */
379 	goto fail;
380 
381  found:
382 	result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
383 	if (result < 0)
384 		goto fail;
385 
386 	return 0;
387 
388  fail:
389 	return -EINVAL;
390 }
391 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
392 
393 /**
394  * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device
395  * @sdev: The device to ask
396  * @page: Which Vital Product Data to return
397  *
398  * Returns %NULL upon failure.
399  */
400 static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
401 {
402 	struct scsi_vpd *vpd_buf;
403 	int vpd_len = SCSI_VPD_PG_LEN, result;
404 
405 retry_pg:
406 	vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
407 	if (!vpd_buf)
408 		return NULL;
409 
410 	result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len);
411 	if (result < 0) {
412 		kfree(vpd_buf);
413 		return NULL;
414 	}
415 	if (result > vpd_len) {
416 		vpd_len = result;
417 		kfree(vpd_buf);
418 		goto retry_pg;
419 	}
420 
421 	vpd_buf->len = result;
422 
423 	return vpd_buf;
424 }
425 
426 static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
427 				 struct scsi_vpd __rcu **sdev_vpd_buf)
428 {
429 	struct scsi_vpd *vpd_buf;
430 
431 	vpd_buf = scsi_get_vpd_buf(sdev, page);
432 	if (!vpd_buf)
433 		return;
434 
435 	mutex_lock(&sdev->inquiry_mutex);
436 	vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf,
437 				      lockdep_is_held(&sdev->inquiry_mutex));
438 	mutex_unlock(&sdev->inquiry_mutex);
439 
440 	if (vpd_buf)
441 		kfree_rcu(vpd_buf, rcu);
442 }
443 
444 /**
445  * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
446  * @sdev: The device to ask
447  *
448  * Attach the 'Device Identification' VPD page (0x83) and the
449  * 'Unit Serial Number' VPD page (0x80) to a SCSI device
450  * structure. This information can be used to identify the device
451  * uniquely.
452  */
453 void scsi_attach_vpd(struct scsi_device *sdev)
454 {
455 	int i;
456 	struct scsi_vpd *vpd_buf;
457 
458 	if (!scsi_device_supports_vpd(sdev))
459 		return;
460 
461 	/* Ask for all the pages supported by this device */
462 	vpd_buf = scsi_get_vpd_buf(sdev, 0);
463 	if (!vpd_buf)
464 		return;
465 
466 	for (i = 4; i < vpd_buf->len; i++) {
467 		if (vpd_buf->data[i] == 0x0)
468 			scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
469 		if (vpd_buf->data[i] == 0x80)
470 			scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
471 		if (vpd_buf->data[i] == 0x83)
472 			scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
473 		if (vpd_buf->data[i] == 0x89)
474 			scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
475 	}
476 	kfree(vpd_buf);
477 }
478 
479 /**
480  * scsi_report_opcode - Find out if a given command opcode is supported
481  * @sdev:	scsi device to query
482  * @buffer:	scratch buffer (must be at least 20 bytes long)
483  * @len:	length of buffer
484  * @opcode:	opcode for command to look up
485  *
486  * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
487  * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
488  * unsupported and 1 if the device claims to support the command.
489  */
490 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
491 		       unsigned int len, unsigned char opcode)
492 {
493 	unsigned char cmd[16];
494 	struct scsi_sense_hdr sshdr;
495 	int result;
496 
497 	if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
498 		return -EINVAL;
499 
500 	memset(cmd, 0, 16);
501 	cmd[0] = MAINTENANCE_IN;
502 	cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
503 	cmd[2] = 1;		/* One command format */
504 	cmd[3] = opcode;
505 	put_unaligned_be32(len, &cmd[6]);
506 	memset(buffer, 0, len);
507 
508 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
509 				  &sshdr, 30 * HZ, 3, NULL);
510 
511 	if (result && scsi_sense_valid(&sshdr) &&
512 	    sshdr.sense_key == ILLEGAL_REQUEST &&
513 	    (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
514 		return -EINVAL;
515 
516 	if ((buffer[1] & 3) == 3) /* Command supported */
517 		return 1;
518 
519 	return 0;
520 }
521 EXPORT_SYMBOL(scsi_report_opcode);
522 
523 /**
524  * scsi_device_get  -  get an additional reference to a scsi_device
525  * @sdev:	device to get a reference to
526  *
527  * Description: Gets a reference to the scsi_device and increments the use count
528  * of the underlying LLDD module.  You must hold host_lock of the
529  * parent Scsi_Host or already have a reference when calling this.
530  *
531  * This will fail if a device is deleted or cancelled, or when the LLD module
532  * is in the process of being unloaded.
533  */
534 int scsi_device_get(struct scsi_device *sdev)
535 {
536 	if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
537 		goto fail;
538 	if (!get_device(&sdev->sdev_gendev))
539 		goto fail;
540 	if (!try_module_get(sdev->host->hostt->module))
541 		goto fail_put_device;
542 	return 0;
543 
544 fail_put_device:
545 	put_device(&sdev->sdev_gendev);
546 fail:
547 	return -ENXIO;
548 }
549 EXPORT_SYMBOL(scsi_device_get);
550 
551 /**
552  * scsi_device_put  -  release a reference to a scsi_device
553  * @sdev:	device to release a reference on.
554  *
555  * Description: Release a reference to the scsi_device and decrements the use
556  * count of the underlying LLDD module.  The device is freed once the last
557  * user vanishes.
558  */
559 void scsi_device_put(struct scsi_device *sdev)
560 {
561 	module_put(sdev->host->hostt->module);
562 	put_device(&sdev->sdev_gendev);
563 }
564 EXPORT_SYMBOL(scsi_device_put);
565 
566 /* helper for shost_for_each_device, see that for documentation */
567 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
568 					   struct scsi_device *prev)
569 {
570 	struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
571 	struct scsi_device *next = NULL;
572 	unsigned long flags;
573 
574 	spin_lock_irqsave(shost->host_lock, flags);
575 	while (list->next != &shost->__devices) {
576 		next = list_entry(list->next, struct scsi_device, siblings);
577 		/* skip devices that we can't get a reference to */
578 		if (!scsi_device_get(next))
579 			break;
580 		next = NULL;
581 		list = list->next;
582 	}
583 	spin_unlock_irqrestore(shost->host_lock, flags);
584 
585 	if (prev)
586 		scsi_device_put(prev);
587 	return next;
588 }
589 EXPORT_SYMBOL(__scsi_iterate_devices);
590 
591 /**
592  * starget_for_each_device  -  helper to walk all devices of a target
593  * @starget:	target whose devices we want to iterate over.
594  * @data:	Opaque passed to each function call.
595  * @fn:		Function to call on each device
596  *
597  * This traverses over each device of @starget.  The devices have
598  * a reference that must be released by scsi_host_put when breaking
599  * out of the loop.
600  */
601 void starget_for_each_device(struct scsi_target *starget, void *data,
602 		     void (*fn)(struct scsi_device *, void *))
603 {
604 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
605 	struct scsi_device *sdev;
606 
607 	shost_for_each_device(sdev, shost) {
608 		if ((sdev->channel == starget->channel) &&
609 		    (sdev->id == starget->id))
610 			fn(sdev, data);
611 	}
612 }
613 EXPORT_SYMBOL(starget_for_each_device);
614 
615 /**
616  * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
617  * @starget:	target whose devices we want to iterate over.
618  * @data:	parameter for callback @fn()
619  * @fn:		callback function that is invoked for each device
620  *
621  * This traverses over each device of @starget.  It does _not_
622  * take a reference on the scsi_device, so the whole loop must be
623  * protected by shost->host_lock.
624  *
625  * Note:  The only reason why drivers would want to use this is because
626  * they need to access the device list in irq context.  Otherwise you
627  * really want to use starget_for_each_device instead.
628  **/
629 void __starget_for_each_device(struct scsi_target *starget, void *data,
630 			       void (*fn)(struct scsi_device *, void *))
631 {
632 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
633 	struct scsi_device *sdev;
634 
635 	__shost_for_each_device(sdev, shost) {
636 		if ((sdev->channel == starget->channel) &&
637 		    (sdev->id == starget->id))
638 			fn(sdev, data);
639 	}
640 }
641 EXPORT_SYMBOL(__starget_for_each_device);
642 
643 /**
644  * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
645  * @starget:	SCSI target pointer
646  * @lun:	SCSI Logical Unit Number
647  *
648  * Description: Looks up the scsi_device with the specified @lun for a given
649  * @starget.  The returned scsi_device does not have an additional
650  * reference.  You must hold the host's host_lock over this call and
651  * any access to the returned scsi_device. A scsi_device in state
652  * SDEV_DEL is skipped.
653  *
654  * Note:  The only reason why drivers should use this is because
655  * they need to access the device list in irq context.  Otherwise you
656  * really want to use scsi_device_lookup_by_target instead.
657  **/
658 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
659 						   u64 lun)
660 {
661 	struct scsi_device *sdev;
662 
663 	list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
664 		if (sdev->sdev_state == SDEV_DEL)
665 			continue;
666 		if (sdev->lun ==lun)
667 			return sdev;
668 	}
669 
670 	return NULL;
671 }
672 EXPORT_SYMBOL(__scsi_device_lookup_by_target);
673 
674 /**
675  * scsi_device_lookup_by_target - find a device given the target
676  * @starget:	SCSI target pointer
677  * @lun:	SCSI Logical Unit Number
678  *
679  * Description: Looks up the scsi_device with the specified @lun for a given
680  * @starget.  The returned scsi_device has an additional reference that
681  * needs to be released with scsi_device_put once you're done with it.
682  **/
683 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
684 						 u64 lun)
685 {
686 	struct scsi_device *sdev;
687 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
688 	unsigned long flags;
689 
690 	spin_lock_irqsave(shost->host_lock, flags);
691 	sdev = __scsi_device_lookup_by_target(starget, lun);
692 	if (sdev && scsi_device_get(sdev))
693 		sdev = NULL;
694 	spin_unlock_irqrestore(shost->host_lock, flags);
695 
696 	return sdev;
697 }
698 EXPORT_SYMBOL(scsi_device_lookup_by_target);
699 
700 /**
701  * __scsi_device_lookup - find a device given the host (UNLOCKED)
702  * @shost:	SCSI host pointer
703  * @channel:	SCSI channel (zero if only one channel)
704  * @id:		SCSI target number (physical unit number)
705  * @lun:	SCSI Logical Unit Number
706  *
707  * Description: Looks up the scsi_device with the specified @channel, @id, @lun
708  * for a given host. The returned scsi_device does not have an additional
709  * reference.  You must hold the host's host_lock over this call and any access
710  * to the returned scsi_device.
711  *
712  * Note:  The only reason why drivers would want to use this is because
713  * they need to access the device list in irq context.  Otherwise you
714  * really want to use scsi_device_lookup instead.
715  **/
716 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
717 		uint channel, uint id, u64 lun)
718 {
719 	struct scsi_device *sdev;
720 
721 	list_for_each_entry(sdev, &shost->__devices, siblings) {
722 		if (sdev->sdev_state == SDEV_DEL)
723 			continue;
724 		if (sdev->channel == channel && sdev->id == id &&
725 				sdev->lun ==lun)
726 			return sdev;
727 	}
728 
729 	return NULL;
730 }
731 EXPORT_SYMBOL(__scsi_device_lookup);
732 
733 /**
734  * scsi_device_lookup - find a device given the host
735  * @shost:	SCSI host pointer
736  * @channel:	SCSI channel (zero if only one channel)
737  * @id:		SCSI target number (physical unit number)
738  * @lun:	SCSI Logical Unit Number
739  *
740  * Description: Looks up the scsi_device with the specified @channel, @id, @lun
741  * for a given host.  The returned scsi_device has an additional reference that
742  * needs to be released with scsi_device_put once you're done with it.
743  **/
744 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
745 		uint channel, uint id, u64 lun)
746 {
747 	struct scsi_device *sdev;
748 	unsigned long flags;
749 
750 	spin_lock_irqsave(shost->host_lock, flags);
751 	sdev = __scsi_device_lookup(shost, channel, id, lun);
752 	if (sdev && scsi_device_get(sdev))
753 		sdev = NULL;
754 	spin_unlock_irqrestore(shost->host_lock, flags);
755 
756 	return sdev;
757 }
758 EXPORT_SYMBOL(scsi_device_lookup);
759 
760 MODULE_DESCRIPTION("SCSI core");
761 MODULE_LICENSE("GPL");
762 
763 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
764 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
765 
766 static int __init init_scsi(void)
767 {
768 	int error;
769 
770 	error = scsi_init_procfs();
771 	if (error)
772 		goto cleanup_queue;
773 	error = scsi_init_devinfo();
774 	if (error)
775 		goto cleanup_procfs;
776 	error = scsi_init_hosts();
777 	if (error)
778 		goto cleanup_devlist;
779 	error = scsi_init_sysctl();
780 	if (error)
781 		goto cleanup_hosts;
782 	error = scsi_sysfs_register();
783 	if (error)
784 		goto cleanup_sysctl;
785 
786 	scsi_netlink_init();
787 
788 	printk(KERN_NOTICE "SCSI subsystem initialized\n");
789 	return 0;
790 
791 cleanup_sysctl:
792 	scsi_exit_sysctl();
793 cleanup_hosts:
794 	scsi_exit_hosts();
795 cleanup_devlist:
796 	scsi_exit_devinfo();
797 cleanup_procfs:
798 	scsi_exit_procfs();
799 cleanup_queue:
800 	scsi_exit_queue();
801 	printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
802 	       -error);
803 	return error;
804 }
805 
806 static void __exit exit_scsi(void)
807 {
808 	scsi_netlink_exit();
809 	scsi_sysfs_unregister();
810 	scsi_exit_sysctl();
811 	scsi_exit_hosts();
812 	scsi_exit_devinfo();
813 	scsi_exit_procfs();
814 	scsi_exit_queue();
815 }
816 
817 subsys_initcall(init_scsi);
818 module_exit(exit_scsi);
819