xref: /openbmc/linux/drivers/scsi/scsi_lib.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/mempool.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/pci.h>
19 #include <linux/delay.h>
20 #include <linux/hardirq.h>
21 #include <linux/scatterlist.h>
22 
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_dbg.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 
31 #include "scsi_priv.h"
32 #include "scsi_logging.h"
33 
34 
35 #define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
36 #define SG_MEMPOOL_SIZE		2
37 
38 struct scsi_host_sg_pool {
39 	size_t		size;
40 	char		*name;
41 	struct kmem_cache	*slab;
42 	mempool_t	*pool;
43 };
44 
45 #define SP(x) { x, "sgpool-" __stringify(x) }
46 #if (SCSI_MAX_SG_SEGMENTS < 32)
47 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48 #endif
49 static struct scsi_host_sg_pool scsi_sg_pools[] = {
50 	SP(8),
51 	SP(16),
52 #if (SCSI_MAX_SG_SEGMENTS > 32)
53 	SP(32),
54 #if (SCSI_MAX_SG_SEGMENTS > 64)
55 	SP(64),
56 #if (SCSI_MAX_SG_SEGMENTS > 128)
57 	SP(128),
58 #if (SCSI_MAX_SG_SEGMENTS > 256)
59 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60 #endif
61 #endif
62 #endif
63 #endif
64 	SP(SCSI_MAX_SG_SEGMENTS)
65 };
66 #undef SP
67 
68 struct kmem_cache *scsi_sdb_cache;
69 
70 static void scsi_run_queue(struct request_queue *q);
71 
72 /*
73  * Function:	scsi_unprep_request()
74  *
75  * Purpose:	Remove all preparation done for a request, including its
76  *		associated scsi_cmnd, so that it can be requeued.
77  *
78  * Arguments:	req	- request to unprepare
79  *
80  * Lock status:	Assumed that no locks are held upon entry.
81  *
82  * Returns:	Nothing.
83  */
84 static void scsi_unprep_request(struct request *req)
85 {
86 	struct scsi_cmnd *cmd = req->special;
87 
88 	blk_unprep_request(req);
89 	req->special = NULL;
90 
91 	scsi_put_command(cmd);
92 }
93 
94 /**
95  * __scsi_queue_insert - private queue insertion
96  * @cmd: The SCSI command being requeued
97  * @reason:  The reason for the requeue
98  * @unbusy: Whether the queue should be unbusied
99  *
100  * This is a private queue insertion.  The public interface
101  * scsi_queue_insert() always assumes the queue should be unbusied
102  * because it's always called before the completion.  This function is
103  * for a requeue after completion, which should only occur in this
104  * file.
105  */
106 static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107 {
108 	struct Scsi_Host *host = cmd->device->host;
109 	struct scsi_device *device = cmd->device;
110 	struct scsi_target *starget = scsi_target(device);
111 	struct request_queue *q = device->request_queue;
112 	unsigned long flags;
113 
114 	SCSI_LOG_MLQUEUE(1,
115 		 printk("Inserting command %p into mlqueue\n", cmd));
116 
117 	/*
118 	 * Set the appropriate busy bit for the device/host.
119 	 *
120 	 * If the host/device isn't busy, assume that something actually
121 	 * completed, and that we should be able to queue a command now.
122 	 *
123 	 * Note that the prior mid-layer assumption that any host could
124 	 * always queue at least one command is now broken.  The mid-layer
125 	 * will implement a user specifiable stall (see
126 	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127 	 * if a command is requeued with no other commands outstanding
128 	 * either for the device or for the host.
129 	 */
130 	switch (reason) {
131 	case SCSI_MLQUEUE_HOST_BUSY:
132 		host->host_blocked = host->max_host_blocked;
133 		break;
134 	case SCSI_MLQUEUE_DEVICE_BUSY:
135 		device->device_blocked = device->max_device_blocked;
136 		break;
137 	case SCSI_MLQUEUE_TARGET_BUSY:
138 		starget->target_blocked = starget->max_target_blocked;
139 		break;
140 	}
141 
142 	/*
143 	 * Decrement the counters, since these commands are no longer
144 	 * active on the host/device.
145 	 */
146 	if (unbusy)
147 		scsi_device_unbusy(device);
148 
149 	/*
150 	 * Requeue this command.  It will go before all other commands
151 	 * that are already in the queue.
152 	 *
153 	 * NOTE: there is magic here about the way the queue is plugged if
154 	 * we have no outstanding commands.
155 	 *
156 	 * Although we *don't* plug the queue, we call the request
157 	 * function.  The SCSI request function detects the blocked condition
158 	 * and plugs the queue appropriately.
159          */
160 	spin_lock_irqsave(q->queue_lock, flags);
161 	blk_requeue_request(q, cmd->request);
162 	spin_unlock_irqrestore(q->queue_lock, flags);
163 
164 	scsi_run_queue(q);
165 
166 	return 0;
167 }
168 
169 /*
170  * Function:    scsi_queue_insert()
171  *
172  * Purpose:     Insert a command in the midlevel queue.
173  *
174  * Arguments:   cmd    - command that we are adding to queue.
175  *              reason - why we are inserting command to queue.
176  *
177  * Lock status: Assumed that lock is not held upon entry.
178  *
179  * Returns:     Nothing.
180  *
181  * Notes:       We do this for one of two cases.  Either the host is busy
182  *              and it cannot accept any more commands for the time being,
183  *              or the device returned QUEUE_FULL and can accept no more
184  *              commands.
185  * Notes:       This could be called either from an interrupt context or a
186  *              normal process context.
187  */
188 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189 {
190 	return __scsi_queue_insert(cmd, reason, 1);
191 }
192 /**
193  * scsi_execute - insert request and wait for the result
194  * @sdev:	scsi device
195  * @cmd:	scsi command
196  * @data_direction: data direction
197  * @buffer:	data buffer
198  * @bufflen:	len of buffer
199  * @sense:	optional sense buffer
200  * @timeout:	request timeout in seconds
201  * @retries:	number of times to retry request
202  * @flags:	or into request flags;
203  * @resid:	optional residual length
204  *
205  * returns the req->errors value which is the scsi_cmnd result
206  * field.
207  */
208 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 		 int data_direction, void *buffer, unsigned bufflen,
210 		 unsigned char *sense, int timeout, int retries, int flags,
211 		 int *resid)
212 {
213 	struct request *req;
214 	int write = (data_direction == DMA_TO_DEVICE);
215 	int ret = DRIVER_ERROR << 24;
216 
217 	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218 
219 	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
220 					buffer, bufflen, __GFP_WAIT))
221 		goto out;
222 
223 	req->cmd_len = COMMAND_SIZE(cmd[0]);
224 	memcpy(req->cmd, cmd, req->cmd_len);
225 	req->sense = sense;
226 	req->sense_len = 0;
227 	req->retries = retries;
228 	req->timeout = timeout;
229 	req->cmd_type = REQ_TYPE_BLOCK_PC;
230 	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231 
232 	/*
233 	 * head injection *required* here otherwise quiesce won't work
234 	 */
235 	blk_execute_rq(req->q, NULL, req, 1);
236 
237 	/*
238 	 * Some devices (USB mass-storage in particular) may transfer
239 	 * garbage data together with a residue indicating that the data
240 	 * is invalid.  Prevent the garbage from being misinterpreted
241 	 * and prevent security leaks by zeroing out the excess data.
242 	 */
243 	if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 		memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 
246 	if (resid)
247 		*resid = req->resid_len;
248 	ret = req->errors;
249  out:
250 	blk_put_request(req);
251 
252 	return ret;
253 }
254 EXPORT_SYMBOL(scsi_execute);
255 
256 
257 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 		     int data_direction, void *buffer, unsigned bufflen,
259 		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 		     int *resid)
261 {
262 	char *sense = NULL;
263 	int result;
264 
265 	if (sshdr) {
266 		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267 		if (!sense)
268 			return DRIVER_ERROR << 24;
269 	}
270 	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271 			      sense, timeout, retries, 0, resid);
272 	if (sshdr)
273 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274 
275 	kfree(sense);
276 	return result;
277 }
278 EXPORT_SYMBOL(scsi_execute_req);
279 
280 /*
281  * Function:    scsi_init_cmd_errh()
282  *
283  * Purpose:     Initialize cmd fields related to error handling.
284  *
285  * Arguments:   cmd	- command that is ready to be queued.
286  *
287  * Notes:       This function has the job of initializing a number of
288  *              fields related to error handling.   Typically this will
289  *              be called once for each command, as required.
290  */
291 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292 {
293 	cmd->serial_number = 0;
294 	scsi_set_resid(cmd, 0);
295 	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296 	if (cmd->cmd_len == 0)
297 		cmd->cmd_len = scsi_command_size(cmd->cmnd);
298 }
299 
300 void scsi_device_unbusy(struct scsi_device *sdev)
301 {
302 	struct Scsi_Host *shost = sdev->host;
303 	struct scsi_target *starget = scsi_target(sdev);
304 	unsigned long flags;
305 
306 	spin_lock_irqsave(shost->host_lock, flags);
307 	shost->host_busy--;
308 	starget->target_busy--;
309 	if (unlikely(scsi_host_in_recovery(shost) &&
310 		     (shost->host_failed || shost->host_eh_scheduled)))
311 		scsi_eh_wakeup(shost);
312 	spin_unlock(shost->host_lock);
313 	spin_lock(sdev->request_queue->queue_lock);
314 	sdev->device_busy--;
315 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316 }
317 
318 /*
319  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320  * and call blk_run_queue for all the scsi_devices on the target -
321  * including current_sdev first.
322  *
323  * Called with *no* scsi locks held.
324  */
325 static void scsi_single_lun_run(struct scsi_device *current_sdev)
326 {
327 	struct Scsi_Host *shost = current_sdev->host;
328 	struct scsi_device *sdev, *tmp;
329 	struct scsi_target *starget = scsi_target(current_sdev);
330 	unsigned long flags;
331 
332 	spin_lock_irqsave(shost->host_lock, flags);
333 	starget->starget_sdev_user = NULL;
334 	spin_unlock_irqrestore(shost->host_lock, flags);
335 
336 	/*
337 	 * Call blk_run_queue for all LUNs on the target, starting with
338 	 * current_sdev. We race with others (to set starget_sdev_user),
339 	 * but in most cases, we will be first. Ideally, each LU on the
340 	 * target would get some limited time or requests on the target.
341 	 */
342 	blk_run_queue(current_sdev->request_queue);
343 
344 	spin_lock_irqsave(shost->host_lock, flags);
345 	if (starget->starget_sdev_user)
346 		goto out;
347 	list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 			same_target_siblings) {
349 		if (sdev == current_sdev)
350 			continue;
351 		if (scsi_device_get(sdev))
352 			continue;
353 
354 		spin_unlock_irqrestore(shost->host_lock, flags);
355 		blk_run_queue(sdev->request_queue);
356 		spin_lock_irqsave(shost->host_lock, flags);
357 
358 		scsi_device_put(sdev);
359 	}
360  out:
361 	spin_unlock_irqrestore(shost->host_lock, flags);
362 }
363 
364 static inline int scsi_device_is_busy(struct scsi_device *sdev)
365 {
366 	if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 		return 1;
368 
369 	return 0;
370 }
371 
372 static inline int scsi_target_is_busy(struct scsi_target *starget)
373 {
374 	return ((starget->can_queue > 0 &&
375 		 starget->target_busy >= starget->can_queue) ||
376 		 starget->target_blocked);
377 }
378 
379 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380 {
381 	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 	    shost->host_blocked || shost->host_self_blocked)
383 		return 1;
384 
385 	return 0;
386 }
387 
388 /*
389  * Function:	scsi_run_queue()
390  *
391  * Purpose:	Select a proper request queue to serve next
392  *
393  * Arguments:	q	- last request's queue
394  *
395  * Returns:     Nothing
396  *
397  * Notes:	The previous command was completely finished, start
398  *		a new one if possible.
399  */
400 static void scsi_run_queue(struct request_queue *q)
401 {
402 	struct scsi_device *sdev = q->queuedata;
403 	struct Scsi_Host *shost = sdev->host;
404 	LIST_HEAD(starved_list);
405 	unsigned long flags;
406 
407 	if (scsi_target(sdev)->single_lun)
408 		scsi_single_lun_run(sdev);
409 
410 	spin_lock_irqsave(shost->host_lock, flags);
411 	list_splice_init(&shost->starved_list, &starved_list);
412 
413 	while (!list_empty(&starved_list)) {
414 		int flagset;
415 
416 		/*
417 		 * As long as shost is accepting commands and we have
418 		 * starved queues, call blk_run_queue. scsi_request_fn
419 		 * drops the queue_lock and can add us back to the
420 		 * starved_list.
421 		 *
422 		 * host_lock protects the starved_list and starved_entry.
423 		 * scsi_request_fn must get the host_lock before checking
424 		 * or modifying starved_list or starved_entry.
425 		 */
426 		if (scsi_host_is_busy(shost))
427 			break;
428 
429 		sdev = list_entry(starved_list.next,
430 				  struct scsi_device, starved_entry);
431 		list_del_init(&sdev->starved_entry);
432 		if (scsi_target_is_busy(scsi_target(sdev))) {
433 			list_move_tail(&sdev->starved_entry,
434 				       &shost->starved_list);
435 			continue;
436 		}
437 
438 		spin_unlock(shost->host_lock);
439 
440 		spin_lock(sdev->request_queue->queue_lock);
441 		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 				!test_bit(QUEUE_FLAG_REENTER,
443 					&sdev->request_queue->queue_flags);
444 		if (flagset)
445 			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 		__blk_run_queue(sdev->request_queue);
447 		if (flagset)
448 			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 		spin_unlock(sdev->request_queue->queue_lock);
450 
451 		spin_lock(shost->host_lock);
452 	}
453 	/* put any unprocessed entries back */
454 	list_splice(&starved_list, &shost->starved_list);
455 	spin_unlock_irqrestore(shost->host_lock, flags);
456 
457 	blk_run_queue(q);
458 }
459 
460 /*
461  * Function:	scsi_requeue_command()
462  *
463  * Purpose:	Handle post-processing of completed commands.
464  *
465  * Arguments:	q	- queue to operate on
466  *		cmd	- command that may need to be requeued.
467  *
468  * Returns:	Nothing
469  *
470  * Notes:	After command completion, there may be blocks left
471  *		over which weren't finished by the previous command
472  *		this can be for a number of reasons - the main one is
473  *		I/O errors in the middle of the request, in which case
474  *		we need to request the blocks that come after the bad
475  *		sector.
476  * Notes:	Upon return, cmd is a stale pointer.
477  */
478 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479 {
480 	struct request *req = cmd->request;
481 	unsigned long flags;
482 
483 	spin_lock_irqsave(q->queue_lock, flags);
484 	scsi_unprep_request(req);
485 	blk_requeue_request(q, req);
486 	spin_unlock_irqrestore(q->queue_lock, flags);
487 
488 	scsi_run_queue(q);
489 }
490 
491 void scsi_next_command(struct scsi_cmnd *cmd)
492 {
493 	struct scsi_device *sdev = cmd->device;
494 	struct request_queue *q = sdev->request_queue;
495 
496 	/* need to hold a reference on the device before we let go of the cmd */
497 	get_device(&sdev->sdev_gendev);
498 
499 	scsi_put_command(cmd);
500 	scsi_run_queue(q);
501 
502 	/* ok to remove device now */
503 	put_device(&sdev->sdev_gendev);
504 }
505 
506 void scsi_run_host_queues(struct Scsi_Host *shost)
507 {
508 	struct scsi_device *sdev;
509 
510 	shost_for_each_device(sdev, shost)
511 		scsi_run_queue(sdev->request_queue);
512 }
513 
514 static void __scsi_release_buffers(struct scsi_cmnd *, int);
515 
516 /*
517  * Function:    scsi_end_request()
518  *
519  * Purpose:     Post-processing of completed commands (usually invoked at end
520  *		of upper level post-processing and scsi_io_completion).
521  *
522  * Arguments:   cmd	 - command that is complete.
523  *              error    - 0 if I/O indicates success, < 0 for I/O error.
524  *              bytes    - number of bytes of completed I/O
525  *		requeue  - indicates whether we should requeue leftovers.
526  *
527  * Lock status: Assumed that lock is not held upon entry.
528  *
529  * Returns:     cmd if requeue required, NULL otherwise.
530  *
531  * Notes:       This is called for block device requests in order to
532  *              mark some number of sectors as complete.
533  *
534  *		We are guaranteeing that the request queue will be goosed
535  *		at some point during this call.
536  * Notes:	If cmd was requeued, upon return it will be a stale pointer.
537  */
538 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
539 					  int bytes, int requeue)
540 {
541 	struct request_queue *q = cmd->device->request_queue;
542 	struct request *req = cmd->request;
543 
544 	/*
545 	 * If there are blocks left over at the end, set up the command
546 	 * to queue the remainder of them.
547 	 */
548 	if (blk_end_request(req, error, bytes)) {
549 		/* kill remainder if no retrys */
550 		if (error && scsi_noretry_cmd(cmd))
551 			blk_end_request_all(req, error);
552 		else {
553 			if (requeue) {
554 				/*
555 				 * Bleah.  Leftovers again.  Stick the
556 				 * leftovers in the front of the
557 				 * queue, and goose the queue again.
558 				 */
559 				scsi_release_buffers(cmd);
560 				scsi_requeue_command(q, cmd);
561 				cmd = NULL;
562 			}
563 			return cmd;
564 		}
565 	}
566 
567 	/*
568 	 * This will goose the queue request function at the end, so we don't
569 	 * need to worry about launching another command.
570 	 */
571 	__scsi_release_buffers(cmd, 0);
572 	scsi_next_command(cmd);
573 	return NULL;
574 }
575 
576 static inline unsigned int scsi_sgtable_index(unsigned short nents)
577 {
578 	unsigned int index;
579 
580 	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581 
582 	if (nents <= 8)
583 		index = 0;
584 	else
585 		index = get_count_order(nents) - 3;
586 
587 	return index;
588 }
589 
590 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
591 {
592 	struct scsi_host_sg_pool *sgp;
593 
594 	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595 	mempool_free(sgl, sgp->pool);
596 }
597 
598 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599 {
600 	struct scsi_host_sg_pool *sgp;
601 
602 	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603 	return mempool_alloc(sgp->pool, gfp_mask);
604 }
605 
606 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607 			      gfp_t gfp_mask)
608 {
609 	int ret;
610 
611 	BUG_ON(!nents);
612 
613 	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614 			       gfp_mask, scsi_sg_alloc);
615 	if (unlikely(ret))
616 		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
617 				scsi_sg_free);
618 
619 	return ret;
620 }
621 
622 static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
623 {
624 	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
625 }
626 
627 static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628 {
629 
630 	if (cmd->sdb.table.nents)
631 		scsi_free_sgtable(&cmd->sdb);
632 
633 	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634 
635 	if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636 		struct scsi_data_buffer *bidi_sdb =
637 			cmd->request->next_rq->special;
638 		scsi_free_sgtable(bidi_sdb);
639 		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640 		cmd->request->next_rq->special = NULL;
641 	}
642 
643 	if (scsi_prot_sg_count(cmd))
644 		scsi_free_sgtable(cmd->prot_sdb);
645 }
646 
647 /*
648  * Function:    scsi_release_buffers()
649  *
650  * Purpose:     Completion processing for block device I/O requests.
651  *
652  * Arguments:   cmd	- command that we are bailing.
653  *
654  * Lock status: Assumed that no lock is held upon entry.
655  *
656  * Returns:     Nothing
657  *
658  * Notes:       In the event that an upper level driver rejects a
659  *		command, we must release resources allocated during
660  *		the __init_io() function.  Primarily this would involve
661  *		the scatter-gather table, and potentially any bounce
662  *		buffers.
663  */
664 void scsi_release_buffers(struct scsi_cmnd *cmd)
665 {
666 	__scsi_release_buffers(cmd, 1);
667 }
668 EXPORT_SYMBOL(scsi_release_buffers);
669 
670 /*
671  * Function:    scsi_io_completion()
672  *
673  * Purpose:     Completion processing for block device I/O requests.
674  *
675  * Arguments:   cmd   - command that is finished.
676  *
677  * Lock status: Assumed that no lock is held upon entry.
678  *
679  * Returns:     Nothing
680  *
681  * Notes:       This function is matched in terms of capabilities to
682  *              the function that created the scatter-gather list.
683  *              In other words, if there are no bounce buffers
684  *              (the normal case for most drivers), we don't need
685  *              the logic to deal with cleaning up afterwards.
686  *
687  *		We must call scsi_end_request().  This will finish off
688  *		the specified number of sectors.  If we are done, the
689  *		command block will be released and the queue function
690  *		will be goosed.  If we are not done then we have to
691  *		figure out what to do next:
692  *
693  *		a) We can call scsi_requeue_command().  The request
694  *		   will be unprepared and put back on the queue.  Then
695  *		   a new command will be created for it.  This should
696  *		   be used if we made forward progress, or if we want
697  *		   to switch from READ(10) to READ(6) for example.
698  *
699  *		b) We can call scsi_queue_insert().  The request will
700  *		   be put back on the queue and retried using the same
701  *		   command as before, possibly after a delay.
702  *
703  *		c) We can call blk_end_request() with -EIO to fail
704  *		   the remainder of the request.
705  */
706 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
707 {
708 	int result = cmd->result;
709 	struct request_queue *q = cmd->device->request_queue;
710 	struct request *req = cmd->request;
711 	int error = 0;
712 	struct scsi_sense_hdr sshdr;
713 	int sense_valid = 0;
714 	int sense_deferred = 0;
715 	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716 	      ACTION_DELAYED_RETRY} action;
717 	char *description = NULL;
718 
719 	if (result) {
720 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721 		if (sense_valid)
722 			sense_deferred = scsi_sense_is_deferred(&sshdr);
723 	}
724 
725 	if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
726 		req->errors = result;
727 		if (result) {
728 			if (sense_valid && req->sense) {
729 				/*
730 				 * SG_IO wants current and deferred errors
731 				 */
732 				int len = 8 + cmd->sense_buffer[7];
733 
734 				if (len > SCSI_SENSE_BUFFERSIZE)
735 					len = SCSI_SENSE_BUFFERSIZE;
736 				memcpy(req->sense, cmd->sense_buffer,  len);
737 				req->sense_len = len;
738 			}
739 			if (!sense_deferred)
740 				error = -EIO;
741 		}
742 
743 		req->resid_len = scsi_get_resid(cmd);
744 
745 		if (scsi_bidi_cmnd(cmd)) {
746 			/*
747 			 * Bidi commands Must be complete as a whole,
748 			 * both sides at once.
749 			 */
750 			req->next_rq->resid_len = scsi_in(cmd)->resid;
751 
752 			scsi_release_buffers(cmd);
753 			blk_end_request_all(req, 0);
754 
755 			scsi_next_command(cmd);
756 			return;
757 		}
758 	}
759 
760 	/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
761 	BUG_ON(blk_bidi_rq(req));
762 
763 	/*
764 	 * Next deal with any sectors which we were able to correctly
765 	 * handle.
766 	 */
767 	SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
768 				      "%d bytes done.\n",
769 				      blk_rq_sectors(req), good_bytes));
770 
771 	/*
772 	 * Recovered errors need reporting, but they're always treated
773 	 * as success, so fiddle the result code here.  For BLOCK_PC
774 	 * we already took a copy of the original into rq->errors which
775 	 * is what gets returned to the user
776 	 */
777 	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
778 		/* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
779 		 * print since caller wants ATA registers. Only occurs on
780 		 * SCSI ATA PASS_THROUGH commands when CK_COND=1
781 		 */
782 		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
783 			;
784 		else if (!(req->cmd_flags & REQ_QUIET))
785 			scsi_print_sense("", cmd);
786 		result = 0;
787 		/* BLOCK_PC may have set error */
788 		error = 0;
789 	}
790 
791 	/*
792 	 * A number of bytes were successfully read.  If there
793 	 * are leftovers and there is some kind of error
794 	 * (result != 0), retry the rest.
795 	 */
796 	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
797 		return;
798 
799 	error = -EIO;
800 
801 	if (host_byte(result) == DID_RESET) {
802 		/* Third party bus reset or reset for error recovery
803 		 * reasons.  Just retry the command and see what
804 		 * happens.
805 		 */
806 		action = ACTION_RETRY;
807 	} else if (sense_valid && !sense_deferred) {
808 		switch (sshdr.sense_key) {
809 		case UNIT_ATTENTION:
810 			if (cmd->device->removable) {
811 				/* Detected disc change.  Set a bit
812 				 * and quietly refuse further access.
813 				 */
814 				cmd->device->changed = 1;
815 				description = "Media Changed";
816 				action = ACTION_FAIL;
817 			} else {
818 				/* Must have been a power glitch, or a
819 				 * bus reset.  Could not have been a
820 				 * media change, so we just retry the
821 				 * command and see what happens.
822 				 */
823 				action = ACTION_RETRY;
824 			}
825 			break;
826 		case ILLEGAL_REQUEST:
827 			/* If we had an ILLEGAL REQUEST returned, then
828 			 * we may have performed an unsupported
829 			 * command.  The only thing this should be
830 			 * would be a ten byte read where only a six
831 			 * byte read was supported.  Also, on a system
832 			 * where READ CAPACITY failed, we may have
833 			 * read past the end of the disk.
834 			 */
835 			if ((cmd->device->use_10_for_rw &&
836 			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
837 			    (cmd->cmnd[0] == READ_10 ||
838 			     cmd->cmnd[0] == WRITE_10)) {
839 				/* This will issue a new 6-byte command. */
840 				cmd->device->use_10_for_rw = 0;
841 				action = ACTION_REPREP;
842 			} else if (sshdr.asc == 0x10) /* DIX */ {
843 				description = "Host Data Integrity Failure";
844 				action = ACTION_FAIL;
845 				error = -EILSEQ;
846 			} else
847 				action = ACTION_FAIL;
848 			break;
849 		case ABORTED_COMMAND:
850 			action = ACTION_FAIL;
851 			if (sshdr.asc == 0x10) { /* DIF */
852 				description = "Target Data Integrity Failure";
853 				error = -EILSEQ;
854 			}
855 			break;
856 		case NOT_READY:
857 			/* If the device is in the process of becoming
858 			 * ready, or has a temporary blockage, retry.
859 			 */
860 			if (sshdr.asc == 0x04) {
861 				switch (sshdr.ascq) {
862 				case 0x01: /* becoming ready */
863 				case 0x04: /* format in progress */
864 				case 0x05: /* rebuild in progress */
865 				case 0x06: /* recalculation in progress */
866 				case 0x07: /* operation in progress */
867 				case 0x08: /* Long write in progress */
868 				case 0x09: /* self test in progress */
869 				case 0x14: /* space allocation in progress */
870 					action = ACTION_DELAYED_RETRY;
871 					break;
872 				default:
873 					description = "Device not ready";
874 					action = ACTION_FAIL;
875 					break;
876 				}
877 			} else {
878 				description = "Device not ready";
879 				action = ACTION_FAIL;
880 			}
881 			break;
882 		case VOLUME_OVERFLOW:
883 			/* See SSC3rXX or current. */
884 			action = ACTION_FAIL;
885 			break;
886 		default:
887 			description = "Unhandled sense code";
888 			action = ACTION_FAIL;
889 			break;
890 		}
891 	} else {
892 		description = "Unhandled error code";
893 		action = ACTION_FAIL;
894 	}
895 
896 	switch (action) {
897 	case ACTION_FAIL:
898 		/* Give up and fail the remainder of the request */
899 		scsi_release_buffers(cmd);
900 		if (!(req->cmd_flags & REQ_QUIET)) {
901 			if (description)
902 				scmd_printk(KERN_INFO, cmd, "%s\n",
903 					    description);
904 			scsi_print_result(cmd);
905 			if (driver_byte(result) & DRIVER_SENSE)
906 				scsi_print_sense("", cmd);
907 			scsi_print_command(cmd);
908 		}
909 		if (blk_end_request_err(req, error))
910 			scsi_requeue_command(q, cmd);
911 		else
912 			scsi_next_command(cmd);
913 		break;
914 	case ACTION_REPREP:
915 		/* Unprep the request and put it back at the head of the queue.
916 		 * A new command will be prepared and issued.
917 		 */
918 		scsi_release_buffers(cmd);
919 		scsi_requeue_command(q, cmd);
920 		break;
921 	case ACTION_RETRY:
922 		/* Retry the same command immediately */
923 		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
924 		break;
925 	case ACTION_DELAYED_RETRY:
926 		/* Retry the same command after a delay */
927 		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
928 		break;
929 	}
930 }
931 
932 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
933 			     gfp_t gfp_mask)
934 {
935 	int count;
936 
937 	/*
938 	 * If sg table allocation fails, requeue request later.
939 	 */
940 	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
941 					gfp_mask))) {
942 		return BLKPREP_DEFER;
943 	}
944 
945 	req->buffer = NULL;
946 
947 	/*
948 	 * Next, walk the list, and fill in the addresses and sizes of
949 	 * each segment.
950 	 */
951 	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
952 	BUG_ON(count > sdb->table.nents);
953 	sdb->table.nents = count;
954 	sdb->length = blk_rq_bytes(req);
955 	return BLKPREP_OK;
956 }
957 
958 /*
959  * Function:    scsi_init_io()
960  *
961  * Purpose:     SCSI I/O initialize function.
962  *
963  * Arguments:   cmd   - Command descriptor we wish to initialize
964  *
965  * Returns:     0 on success
966  *		BLKPREP_DEFER if the failure is retryable
967  *		BLKPREP_KILL if the failure is fatal
968  */
969 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
970 {
971 	struct request *rq = cmd->request;
972 
973 	int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
974 	if (error)
975 		goto err_exit;
976 
977 	if (blk_bidi_rq(rq)) {
978 		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
979 			scsi_sdb_cache, GFP_ATOMIC);
980 		if (!bidi_sdb) {
981 			error = BLKPREP_DEFER;
982 			goto err_exit;
983 		}
984 
985 		rq->next_rq->special = bidi_sdb;
986 		error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
987 		if (error)
988 			goto err_exit;
989 	}
990 
991 	if (blk_integrity_rq(rq)) {
992 		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
993 		int ivecs, count;
994 
995 		BUG_ON(prot_sdb == NULL);
996 		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
997 
998 		if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
999 			error = BLKPREP_DEFER;
1000 			goto err_exit;
1001 		}
1002 
1003 		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1004 						prot_sdb->table.sgl);
1005 		BUG_ON(unlikely(count > ivecs));
1006 		BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1007 
1008 		cmd->prot_sdb = prot_sdb;
1009 		cmd->prot_sdb->table.nents = count;
1010 	}
1011 
1012 	return BLKPREP_OK ;
1013 
1014 err_exit:
1015 	scsi_release_buffers(cmd);
1016 	cmd->request->special = NULL;
1017 	scsi_put_command(cmd);
1018 	return error;
1019 }
1020 EXPORT_SYMBOL(scsi_init_io);
1021 
1022 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1023 		struct request *req)
1024 {
1025 	struct scsi_cmnd *cmd;
1026 
1027 	if (!req->special) {
1028 		cmd = scsi_get_command(sdev, GFP_ATOMIC);
1029 		if (unlikely(!cmd))
1030 			return NULL;
1031 		req->special = cmd;
1032 	} else {
1033 		cmd = req->special;
1034 	}
1035 
1036 	/* pull a tag out of the request if we have one */
1037 	cmd->tag = req->tag;
1038 	cmd->request = req;
1039 
1040 	cmd->cmnd = req->cmd;
1041 
1042 	return cmd;
1043 }
1044 
1045 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1046 {
1047 	struct scsi_cmnd *cmd;
1048 	int ret = scsi_prep_state_check(sdev, req);
1049 
1050 	if (ret != BLKPREP_OK)
1051 		return ret;
1052 
1053 	cmd = scsi_get_cmd_from_req(sdev, req);
1054 	if (unlikely(!cmd))
1055 		return BLKPREP_DEFER;
1056 
1057 	/*
1058 	 * BLOCK_PC requests may transfer data, in which case they must
1059 	 * a bio attached to them.  Or they might contain a SCSI command
1060 	 * that does not transfer data, in which case they may optionally
1061 	 * submit a request without an attached bio.
1062 	 */
1063 	if (req->bio) {
1064 		int ret;
1065 
1066 		BUG_ON(!req->nr_phys_segments);
1067 
1068 		ret = scsi_init_io(cmd, GFP_ATOMIC);
1069 		if (unlikely(ret))
1070 			return ret;
1071 	} else {
1072 		BUG_ON(blk_rq_bytes(req));
1073 
1074 		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1075 		req->buffer = NULL;
1076 	}
1077 
1078 	cmd->cmd_len = req->cmd_len;
1079 	if (!blk_rq_bytes(req))
1080 		cmd->sc_data_direction = DMA_NONE;
1081 	else if (rq_data_dir(req) == WRITE)
1082 		cmd->sc_data_direction = DMA_TO_DEVICE;
1083 	else
1084 		cmd->sc_data_direction = DMA_FROM_DEVICE;
1085 
1086 	cmd->transfersize = blk_rq_bytes(req);
1087 	cmd->allowed = req->retries;
1088 	return BLKPREP_OK;
1089 }
1090 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1091 
1092 /*
1093  * Setup a REQ_TYPE_FS command.  These are simple read/write request
1094  * from filesystems that still need to be translated to SCSI CDBs from
1095  * the ULD.
1096  */
1097 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1098 {
1099 	struct scsi_cmnd *cmd;
1100 	int ret = scsi_prep_state_check(sdev, req);
1101 
1102 	if (ret != BLKPREP_OK)
1103 		return ret;
1104 
1105 	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1106 			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1107 		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1108 		if (ret != BLKPREP_OK)
1109 			return ret;
1110 	}
1111 
1112 	/*
1113 	 * Filesystem requests must transfer data.
1114 	 */
1115 	BUG_ON(!req->nr_phys_segments);
1116 
1117 	cmd = scsi_get_cmd_from_req(sdev, req);
1118 	if (unlikely(!cmd))
1119 		return BLKPREP_DEFER;
1120 
1121 	memset(cmd->cmnd, 0, BLK_MAX_CDB);
1122 	return scsi_init_io(cmd, GFP_ATOMIC);
1123 }
1124 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1125 
1126 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1127 {
1128 	int ret = BLKPREP_OK;
1129 
1130 	/*
1131 	 * If the device is not in running state we will reject some
1132 	 * or all commands.
1133 	 */
1134 	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1135 		switch (sdev->sdev_state) {
1136 		case SDEV_OFFLINE:
1137 			/*
1138 			 * If the device is offline we refuse to process any
1139 			 * commands.  The device must be brought online
1140 			 * before trying any recovery commands.
1141 			 */
1142 			sdev_printk(KERN_ERR, sdev,
1143 				    "rejecting I/O to offline device\n");
1144 			ret = BLKPREP_KILL;
1145 			break;
1146 		case SDEV_DEL:
1147 			/*
1148 			 * If the device is fully deleted, we refuse to
1149 			 * process any commands as well.
1150 			 */
1151 			sdev_printk(KERN_ERR, sdev,
1152 				    "rejecting I/O to dead device\n");
1153 			ret = BLKPREP_KILL;
1154 			break;
1155 		case SDEV_QUIESCE:
1156 		case SDEV_BLOCK:
1157 		case SDEV_CREATED_BLOCK:
1158 			/*
1159 			 * If the devices is blocked we defer normal commands.
1160 			 */
1161 			if (!(req->cmd_flags & REQ_PREEMPT))
1162 				ret = BLKPREP_DEFER;
1163 			break;
1164 		default:
1165 			/*
1166 			 * For any other not fully online state we only allow
1167 			 * special commands.  In particular any user initiated
1168 			 * command is not allowed.
1169 			 */
1170 			if (!(req->cmd_flags & REQ_PREEMPT))
1171 				ret = BLKPREP_KILL;
1172 			break;
1173 		}
1174 	}
1175 	return ret;
1176 }
1177 EXPORT_SYMBOL(scsi_prep_state_check);
1178 
1179 int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1180 {
1181 	struct scsi_device *sdev = q->queuedata;
1182 
1183 	switch (ret) {
1184 	case BLKPREP_KILL:
1185 		req->errors = DID_NO_CONNECT << 16;
1186 		/* release the command and kill it */
1187 		if (req->special) {
1188 			struct scsi_cmnd *cmd = req->special;
1189 			scsi_release_buffers(cmd);
1190 			scsi_put_command(cmd);
1191 			req->special = NULL;
1192 		}
1193 		break;
1194 	case BLKPREP_DEFER:
1195 		/*
1196 		 * If we defer, the blk_peek_request() returns NULL, but the
1197 		 * queue must be restarted, so we plug here if no returning
1198 		 * command will automatically do that.
1199 		 */
1200 		if (sdev->device_busy == 0)
1201 			blk_plug_device(q);
1202 		break;
1203 	default:
1204 		req->cmd_flags |= REQ_DONTPREP;
1205 	}
1206 
1207 	return ret;
1208 }
1209 EXPORT_SYMBOL(scsi_prep_return);
1210 
1211 int scsi_prep_fn(struct request_queue *q, struct request *req)
1212 {
1213 	struct scsi_device *sdev = q->queuedata;
1214 	int ret = BLKPREP_KILL;
1215 
1216 	if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1217 		ret = scsi_setup_blk_pc_cmnd(sdev, req);
1218 	return scsi_prep_return(q, req, ret);
1219 }
1220 EXPORT_SYMBOL(scsi_prep_fn);
1221 
1222 /*
1223  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1224  * return 0.
1225  *
1226  * Called with the queue_lock held.
1227  */
1228 static inline int scsi_dev_queue_ready(struct request_queue *q,
1229 				  struct scsi_device *sdev)
1230 {
1231 	if (sdev->device_busy == 0 && sdev->device_blocked) {
1232 		/*
1233 		 * unblock after device_blocked iterates to zero
1234 		 */
1235 		if (--sdev->device_blocked == 0) {
1236 			SCSI_LOG_MLQUEUE(3,
1237 				   sdev_printk(KERN_INFO, sdev,
1238 				   "unblocking device at zero depth\n"));
1239 		} else {
1240 			blk_plug_device(q);
1241 			return 0;
1242 		}
1243 	}
1244 	if (scsi_device_is_busy(sdev))
1245 		return 0;
1246 
1247 	return 1;
1248 }
1249 
1250 
1251 /*
1252  * scsi_target_queue_ready: checks if there we can send commands to target
1253  * @sdev: scsi device on starget to check.
1254  *
1255  * Called with the host lock held.
1256  */
1257 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1258 					   struct scsi_device *sdev)
1259 {
1260 	struct scsi_target *starget = scsi_target(sdev);
1261 
1262 	if (starget->single_lun) {
1263 		if (starget->starget_sdev_user &&
1264 		    starget->starget_sdev_user != sdev)
1265 			return 0;
1266 		starget->starget_sdev_user = sdev;
1267 	}
1268 
1269 	if (starget->target_busy == 0 && starget->target_blocked) {
1270 		/*
1271 		 * unblock after target_blocked iterates to zero
1272 		 */
1273 		if (--starget->target_blocked == 0) {
1274 			SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1275 					 "unblocking target at zero depth\n"));
1276 		} else
1277 			return 0;
1278 	}
1279 
1280 	if (scsi_target_is_busy(starget)) {
1281 		if (list_empty(&sdev->starved_entry)) {
1282 			list_add_tail(&sdev->starved_entry,
1283 				      &shost->starved_list);
1284 			return 0;
1285 		}
1286 	}
1287 
1288 	/* We're OK to process the command, so we can't be starved */
1289 	if (!list_empty(&sdev->starved_entry))
1290 		list_del_init(&sdev->starved_entry);
1291 	return 1;
1292 }
1293 
1294 /*
1295  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1296  * return 0. We must end up running the queue again whenever 0 is
1297  * returned, else IO can hang.
1298  *
1299  * Called with host_lock held.
1300  */
1301 static inline int scsi_host_queue_ready(struct request_queue *q,
1302 				   struct Scsi_Host *shost,
1303 				   struct scsi_device *sdev)
1304 {
1305 	if (scsi_host_in_recovery(shost))
1306 		return 0;
1307 	if (shost->host_busy == 0 && shost->host_blocked) {
1308 		/*
1309 		 * unblock after host_blocked iterates to zero
1310 		 */
1311 		if (--shost->host_blocked == 0) {
1312 			SCSI_LOG_MLQUEUE(3,
1313 				printk("scsi%d unblocking host at zero depth\n",
1314 					shost->host_no));
1315 		} else {
1316 			return 0;
1317 		}
1318 	}
1319 	if (scsi_host_is_busy(shost)) {
1320 		if (list_empty(&sdev->starved_entry))
1321 			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1322 		return 0;
1323 	}
1324 
1325 	/* We're OK to process the command, so we can't be starved */
1326 	if (!list_empty(&sdev->starved_entry))
1327 		list_del_init(&sdev->starved_entry);
1328 
1329 	return 1;
1330 }
1331 
1332 /*
1333  * Busy state exporting function for request stacking drivers.
1334  *
1335  * For efficiency, no lock is taken to check the busy state of
1336  * shost/starget/sdev, since the returned value is not guaranteed and
1337  * may be changed after request stacking drivers call the function,
1338  * regardless of taking lock or not.
1339  *
1340  * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1341  * (e.g. !sdev), scsi needs to return 'not busy'.
1342  * Otherwise, request stacking drivers may hold requests forever.
1343  */
1344 static int scsi_lld_busy(struct request_queue *q)
1345 {
1346 	struct scsi_device *sdev = q->queuedata;
1347 	struct Scsi_Host *shost;
1348 	struct scsi_target *starget;
1349 
1350 	if (!sdev)
1351 		return 0;
1352 
1353 	shost = sdev->host;
1354 	starget = scsi_target(sdev);
1355 
1356 	if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1357 	    scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1358 		return 1;
1359 
1360 	return 0;
1361 }
1362 
1363 /*
1364  * Kill a request for a dead device
1365  */
1366 static void scsi_kill_request(struct request *req, struct request_queue *q)
1367 {
1368 	struct scsi_cmnd *cmd = req->special;
1369 	struct scsi_device *sdev;
1370 	struct scsi_target *starget;
1371 	struct Scsi_Host *shost;
1372 
1373 	blk_start_request(req);
1374 
1375 	sdev = cmd->device;
1376 	starget = scsi_target(sdev);
1377 	shost = sdev->host;
1378 	scsi_init_cmd_errh(cmd);
1379 	cmd->result = DID_NO_CONNECT << 16;
1380 	atomic_inc(&cmd->device->iorequest_cnt);
1381 
1382 	/*
1383 	 * SCSI request completion path will do scsi_device_unbusy(),
1384 	 * bump busy counts.  To bump the counters, we need to dance
1385 	 * with the locks as normal issue path does.
1386 	 */
1387 	sdev->device_busy++;
1388 	spin_unlock(sdev->request_queue->queue_lock);
1389 	spin_lock(shost->host_lock);
1390 	shost->host_busy++;
1391 	starget->target_busy++;
1392 	spin_unlock(shost->host_lock);
1393 	spin_lock(sdev->request_queue->queue_lock);
1394 
1395 	blk_complete_request(req);
1396 }
1397 
1398 static void scsi_softirq_done(struct request *rq)
1399 {
1400 	struct scsi_cmnd *cmd = rq->special;
1401 	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1402 	int disposition;
1403 
1404 	INIT_LIST_HEAD(&cmd->eh_entry);
1405 
1406 	atomic_inc(&cmd->device->iodone_cnt);
1407 	if (cmd->result)
1408 		atomic_inc(&cmd->device->ioerr_cnt);
1409 
1410 	disposition = scsi_decide_disposition(cmd);
1411 	if (disposition != SUCCESS &&
1412 	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1413 		sdev_printk(KERN_ERR, cmd->device,
1414 			    "timing out command, waited %lus\n",
1415 			    wait_for/HZ);
1416 		disposition = SUCCESS;
1417 	}
1418 
1419 	scsi_log_completion(cmd, disposition);
1420 
1421 	switch (disposition) {
1422 		case SUCCESS:
1423 			scsi_finish_command(cmd);
1424 			break;
1425 		case NEEDS_RETRY:
1426 			scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1427 			break;
1428 		case ADD_TO_MLQUEUE:
1429 			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1430 			break;
1431 		default:
1432 			if (!scsi_eh_scmd_add(cmd, 0))
1433 				scsi_finish_command(cmd);
1434 	}
1435 }
1436 
1437 /*
1438  * Function:    scsi_request_fn()
1439  *
1440  * Purpose:     Main strategy routine for SCSI.
1441  *
1442  * Arguments:   q       - Pointer to actual queue.
1443  *
1444  * Returns:     Nothing
1445  *
1446  * Lock status: IO request lock assumed to be held when called.
1447  */
1448 static void scsi_request_fn(struct request_queue *q)
1449 {
1450 	struct scsi_device *sdev = q->queuedata;
1451 	struct Scsi_Host *shost;
1452 	struct scsi_cmnd *cmd;
1453 	struct request *req;
1454 
1455 	if (!sdev) {
1456 		printk("scsi: killing requests for dead queue\n");
1457 		while ((req = blk_peek_request(q)) != NULL)
1458 			scsi_kill_request(req, q);
1459 		return;
1460 	}
1461 
1462 	if(!get_device(&sdev->sdev_gendev))
1463 		/* We must be tearing the block queue down already */
1464 		return;
1465 
1466 	/*
1467 	 * To start with, we keep looping until the queue is empty, or until
1468 	 * the host is no longer able to accept any more requests.
1469 	 */
1470 	shost = sdev->host;
1471 	while (!blk_queue_plugged(q)) {
1472 		int rtn;
1473 		/*
1474 		 * get next queueable request.  We do this early to make sure
1475 		 * that the request is fully prepared even if we cannot
1476 		 * accept it.
1477 		 */
1478 		req = blk_peek_request(q);
1479 		if (!req || !scsi_dev_queue_ready(q, sdev))
1480 			break;
1481 
1482 		if (unlikely(!scsi_device_online(sdev))) {
1483 			sdev_printk(KERN_ERR, sdev,
1484 				    "rejecting I/O to offline device\n");
1485 			scsi_kill_request(req, q);
1486 			continue;
1487 		}
1488 
1489 
1490 		/*
1491 		 * Remove the request from the request list.
1492 		 */
1493 		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1494 			blk_start_request(req);
1495 		sdev->device_busy++;
1496 
1497 		spin_unlock(q->queue_lock);
1498 		cmd = req->special;
1499 		if (unlikely(cmd == NULL)) {
1500 			printk(KERN_CRIT "impossible request in %s.\n"
1501 					 "please mail a stack trace to "
1502 					 "linux-scsi@vger.kernel.org\n",
1503 					 __func__);
1504 			blk_dump_rq_flags(req, "foo");
1505 			BUG();
1506 		}
1507 		spin_lock(shost->host_lock);
1508 
1509 		/*
1510 		 * We hit this when the driver is using a host wide
1511 		 * tag map. For device level tag maps the queue_depth check
1512 		 * in the device ready fn would prevent us from trying
1513 		 * to allocate a tag. Since the map is a shared host resource
1514 		 * we add the dev to the starved list so it eventually gets
1515 		 * a run when a tag is freed.
1516 		 */
1517 		if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1518 			if (list_empty(&sdev->starved_entry))
1519 				list_add_tail(&sdev->starved_entry,
1520 					      &shost->starved_list);
1521 			goto not_ready;
1522 		}
1523 
1524 		if (!scsi_target_queue_ready(shost, sdev))
1525 			goto not_ready;
1526 
1527 		if (!scsi_host_queue_ready(q, shost, sdev))
1528 			goto not_ready;
1529 
1530 		scsi_target(sdev)->target_busy++;
1531 		shost->host_busy++;
1532 
1533 		/*
1534 		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1535 		 *		take the lock again.
1536 		 */
1537 		spin_unlock_irq(shost->host_lock);
1538 
1539 		/*
1540 		 * Finally, initialize any error handling parameters, and set up
1541 		 * the timers for timeouts.
1542 		 */
1543 		scsi_init_cmd_errh(cmd);
1544 
1545 		/*
1546 		 * Dispatch the command to the low-level driver.
1547 		 */
1548 		rtn = scsi_dispatch_cmd(cmd);
1549 		spin_lock_irq(q->queue_lock);
1550 		if(rtn) {
1551 			/* we're refusing the command; because of
1552 			 * the way locks get dropped, we need to
1553 			 * check here if plugging is required */
1554 			if(sdev->device_busy == 0)
1555 				blk_plug_device(q);
1556 
1557 			break;
1558 		}
1559 	}
1560 
1561 	goto out;
1562 
1563  not_ready:
1564 	spin_unlock_irq(shost->host_lock);
1565 
1566 	/*
1567 	 * lock q, handle tag, requeue req, and decrement device_busy. We
1568 	 * must return with queue_lock held.
1569 	 *
1570 	 * Decrementing device_busy without checking it is OK, as all such
1571 	 * cases (host limits or settings) should run the queue at some
1572 	 * later time.
1573 	 */
1574 	spin_lock_irq(q->queue_lock);
1575 	blk_requeue_request(q, req);
1576 	sdev->device_busy--;
1577 	if(sdev->device_busy == 0)
1578 		blk_plug_device(q);
1579  out:
1580 	/* must be careful here...if we trigger the ->remove() function
1581 	 * we cannot be holding the q lock */
1582 	spin_unlock_irq(q->queue_lock);
1583 	put_device(&sdev->sdev_gendev);
1584 	spin_lock_irq(q->queue_lock);
1585 }
1586 
1587 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1588 {
1589 	struct device *host_dev;
1590 	u64 bounce_limit = 0xffffffff;
1591 
1592 	if (shost->unchecked_isa_dma)
1593 		return BLK_BOUNCE_ISA;
1594 	/*
1595 	 * Platforms with virtual-DMA translation
1596 	 * hardware have no practical limit.
1597 	 */
1598 	if (!PCI_DMA_BUS_IS_PHYS)
1599 		return BLK_BOUNCE_ANY;
1600 
1601 	host_dev = scsi_get_device(shost);
1602 	if (host_dev && host_dev->dma_mask)
1603 		bounce_limit = *host_dev->dma_mask;
1604 
1605 	return bounce_limit;
1606 }
1607 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1608 
1609 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1610 					 request_fn_proc *request_fn)
1611 {
1612 	struct request_queue *q;
1613 	struct device *dev = shost->shost_gendev.parent;
1614 
1615 	q = blk_init_queue(request_fn, NULL);
1616 	if (!q)
1617 		return NULL;
1618 
1619 	/*
1620 	 * this limit is imposed by hardware restrictions
1621 	 */
1622 	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1623 					SCSI_MAX_SG_CHAIN_SEGMENTS));
1624 
1625 	if (scsi_host_prot_dma(shost)) {
1626 		shost->sg_prot_tablesize =
1627 			min_not_zero(shost->sg_prot_tablesize,
1628 				     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1629 		BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1630 		blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1631 	}
1632 
1633 	blk_queue_max_hw_sectors(q, shost->max_sectors);
1634 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1635 	blk_queue_segment_boundary(q, shost->dma_boundary);
1636 	dma_set_seg_boundary(dev, shost->dma_boundary);
1637 
1638 	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1639 
1640 	if (!shost->use_clustering)
1641 		q->limits.cluster = 0;
1642 
1643 	/*
1644 	 * set a reasonable default alignment on word boundaries: the
1645 	 * host and device may alter it using
1646 	 * blk_queue_update_dma_alignment() later.
1647 	 */
1648 	blk_queue_dma_alignment(q, 0x03);
1649 
1650 	return q;
1651 }
1652 EXPORT_SYMBOL(__scsi_alloc_queue);
1653 
1654 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1655 {
1656 	struct request_queue *q;
1657 
1658 	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1659 	if (!q)
1660 		return NULL;
1661 
1662 	blk_queue_prep_rq(q, scsi_prep_fn);
1663 	blk_queue_softirq_done(q, scsi_softirq_done);
1664 	blk_queue_rq_timed_out(q, scsi_times_out);
1665 	blk_queue_lld_busy(q, scsi_lld_busy);
1666 	return q;
1667 }
1668 
1669 void scsi_free_queue(struct request_queue *q)
1670 {
1671 	blk_cleanup_queue(q);
1672 }
1673 
1674 /*
1675  * Function:    scsi_block_requests()
1676  *
1677  * Purpose:     Utility function used by low-level drivers to prevent further
1678  *		commands from being queued to the device.
1679  *
1680  * Arguments:   shost       - Host in question
1681  *
1682  * Returns:     Nothing
1683  *
1684  * Lock status: No locks are assumed held.
1685  *
1686  * Notes:       There is no timer nor any other means by which the requests
1687  *		get unblocked other than the low-level driver calling
1688  *		scsi_unblock_requests().
1689  */
1690 void scsi_block_requests(struct Scsi_Host *shost)
1691 {
1692 	shost->host_self_blocked = 1;
1693 }
1694 EXPORT_SYMBOL(scsi_block_requests);
1695 
1696 /*
1697  * Function:    scsi_unblock_requests()
1698  *
1699  * Purpose:     Utility function used by low-level drivers to allow further
1700  *		commands from being queued to the device.
1701  *
1702  * Arguments:   shost       - Host in question
1703  *
1704  * Returns:     Nothing
1705  *
1706  * Lock status: No locks are assumed held.
1707  *
1708  * Notes:       There is no timer nor any other means by which the requests
1709  *		get unblocked other than the low-level driver calling
1710  *		scsi_unblock_requests().
1711  *
1712  *		This is done as an API function so that changes to the
1713  *		internals of the scsi mid-layer won't require wholesale
1714  *		changes to drivers that use this feature.
1715  */
1716 void scsi_unblock_requests(struct Scsi_Host *shost)
1717 {
1718 	shost->host_self_blocked = 0;
1719 	scsi_run_host_queues(shost);
1720 }
1721 EXPORT_SYMBOL(scsi_unblock_requests);
1722 
1723 int __init scsi_init_queue(void)
1724 {
1725 	int i;
1726 
1727 	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1728 					   sizeof(struct scsi_data_buffer),
1729 					   0, 0, NULL);
1730 	if (!scsi_sdb_cache) {
1731 		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1732 		return -ENOMEM;
1733 	}
1734 
1735 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1736 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1737 		int size = sgp->size * sizeof(struct scatterlist);
1738 
1739 		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1740 				SLAB_HWCACHE_ALIGN, NULL);
1741 		if (!sgp->slab) {
1742 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1743 					sgp->name);
1744 			goto cleanup_sdb;
1745 		}
1746 
1747 		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1748 						     sgp->slab);
1749 		if (!sgp->pool) {
1750 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1751 					sgp->name);
1752 			goto cleanup_sdb;
1753 		}
1754 	}
1755 
1756 	return 0;
1757 
1758 cleanup_sdb:
1759 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1760 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1761 		if (sgp->pool)
1762 			mempool_destroy(sgp->pool);
1763 		if (sgp->slab)
1764 			kmem_cache_destroy(sgp->slab);
1765 	}
1766 	kmem_cache_destroy(scsi_sdb_cache);
1767 
1768 	return -ENOMEM;
1769 }
1770 
1771 void scsi_exit_queue(void)
1772 {
1773 	int i;
1774 
1775 	kmem_cache_destroy(scsi_sdb_cache);
1776 
1777 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1778 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1779 		mempool_destroy(sgp->pool);
1780 		kmem_cache_destroy(sgp->slab);
1781 	}
1782 }
1783 
1784 /**
1785  *	scsi_mode_select - issue a mode select
1786  *	@sdev:	SCSI device to be queried
1787  *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1788  *	@sp:	Save page bit (0 == don't save, 1 == save)
1789  *	@modepage: mode page being requested
1790  *	@buffer: request buffer (may not be smaller than eight bytes)
1791  *	@len:	length of request buffer.
1792  *	@timeout: command timeout
1793  *	@retries: number of retries before failing
1794  *	@data: returns a structure abstracting the mode header data
1795  *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1796  *		must be SCSI_SENSE_BUFFERSIZE big.
1797  *
1798  *	Returns zero if successful; negative error number or scsi
1799  *	status on error
1800  *
1801  */
1802 int
1803 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1804 		 unsigned char *buffer, int len, int timeout, int retries,
1805 		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1806 {
1807 	unsigned char cmd[10];
1808 	unsigned char *real_buffer;
1809 	int ret;
1810 
1811 	memset(cmd, 0, sizeof(cmd));
1812 	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1813 
1814 	if (sdev->use_10_for_ms) {
1815 		if (len > 65535)
1816 			return -EINVAL;
1817 		real_buffer = kmalloc(8 + len, GFP_KERNEL);
1818 		if (!real_buffer)
1819 			return -ENOMEM;
1820 		memcpy(real_buffer + 8, buffer, len);
1821 		len += 8;
1822 		real_buffer[0] = 0;
1823 		real_buffer[1] = 0;
1824 		real_buffer[2] = data->medium_type;
1825 		real_buffer[3] = data->device_specific;
1826 		real_buffer[4] = data->longlba ? 0x01 : 0;
1827 		real_buffer[5] = 0;
1828 		real_buffer[6] = data->block_descriptor_length >> 8;
1829 		real_buffer[7] = data->block_descriptor_length;
1830 
1831 		cmd[0] = MODE_SELECT_10;
1832 		cmd[7] = len >> 8;
1833 		cmd[8] = len;
1834 	} else {
1835 		if (len > 255 || data->block_descriptor_length > 255 ||
1836 		    data->longlba)
1837 			return -EINVAL;
1838 
1839 		real_buffer = kmalloc(4 + len, GFP_KERNEL);
1840 		if (!real_buffer)
1841 			return -ENOMEM;
1842 		memcpy(real_buffer + 4, buffer, len);
1843 		len += 4;
1844 		real_buffer[0] = 0;
1845 		real_buffer[1] = data->medium_type;
1846 		real_buffer[2] = data->device_specific;
1847 		real_buffer[3] = data->block_descriptor_length;
1848 
1849 
1850 		cmd[0] = MODE_SELECT;
1851 		cmd[4] = len;
1852 	}
1853 
1854 	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1855 			       sshdr, timeout, retries, NULL);
1856 	kfree(real_buffer);
1857 	return ret;
1858 }
1859 EXPORT_SYMBOL_GPL(scsi_mode_select);
1860 
1861 /**
1862  *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1863  *	@sdev:	SCSI device to be queried
1864  *	@dbd:	set if mode sense will allow block descriptors to be returned
1865  *	@modepage: mode page being requested
1866  *	@buffer: request buffer (may not be smaller than eight bytes)
1867  *	@len:	length of request buffer.
1868  *	@timeout: command timeout
1869  *	@retries: number of retries before failing
1870  *	@data: returns a structure abstracting the mode header data
1871  *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1872  *		must be SCSI_SENSE_BUFFERSIZE big.
1873  *
1874  *	Returns zero if unsuccessful, or the header offset (either 4
1875  *	or 8 depending on whether a six or ten byte command was
1876  *	issued) if successful.
1877  */
1878 int
1879 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1880 		  unsigned char *buffer, int len, int timeout, int retries,
1881 		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1882 {
1883 	unsigned char cmd[12];
1884 	int use_10_for_ms;
1885 	int header_length;
1886 	int result;
1887 	struct scsi_sense_hdr my_sshdr;
1888 
1889 	memset(data, 0, sizeof(*data));
1890 	memset(&cmd[0], 0, 12);
1891 	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1892 	cmd[2] = modepage;
1893 
1894 	/* caller might not be interested in sense, but we need it */
1895 	if (!sshdr)
1896 		sshdr = &my_sshdr;
1897 
1898  retry:
1899 	use_10_for_ms = sdev->use_10_for_ms;
1900 
1901 	if (use_10_for_ms) {
1902 		if (len < 8)
1903 			len = 8;
1904 
1905 		cmd[0] = MODE_SENSE_10;
1906 		cmd[8] = len;
1907 		header_length = 8;
1908 	} else {
1909 		if (len < 4)
1910 			len = 4;
1911 
1912 		cmd[0] = MODE_SENSE;
1913 		cmd[4] = len;
1914 		header_length = 4;
1915 	}
1916 
1917 	memset(buffer, 0, len);
1918 
1919 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1920 				  sshdr, timeout, retries, NULL);
1921 
1922 	/* This code looks awful: what it's doing is making sure an
1923 	 * ILLEGAL REQUEST sense return identifies the actual command
1924 	 * byte as the problem.  MODE_SENSE commands can return
1925 	 * ILLEGAL REQUEST if the code page isn't supported */
1926 
1927 	if (use_10_for_ms && !scsi_status_is_good(result) &&
1928 	    (driver_byte(result) & DRIVER_SENSE)) {
1929 		if (scsi_sense_valid(sshdr)) {
1930 			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1931 			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1932 				/*
1933 				 * Invalid command operation code
1934 				 */
1935 				sdev->use_10_for_ms = 0;
1936 				goto retry;
1937 			}
1938 		}
1939 	}
1940 
1941 	if(scsi_status_is_good(result)) {
1942 		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1943 			     (modepage == 6 || modepage == 8))) {
1944 			/* Initio breakage? */
1945 			header_length = 0;
1946 			data->length = 13;
1947 			data->medium_type = 0;
1948 			data->device_specific = 0;
1949 			data->longlba = 0;
1950 			data->block_descriptor_length = 0;
1951 		} else if(use_10_for_ms) {
1952 			data->length = buffer[0]*256 + buffer[1] + 2;
1953 			data->medium_type = buffer[2];
1954 			data->device_specific = buffer[3];
1955 			data->longlba = buffer[4] & 0x01;
1956 			data->block_descriptor_length = buffer[6]*256
1957 				+ buffer[7];
1958 		} else {
1959 			data->length = buffer[0] + 1;
1960 			data->medium_type = buffer[1];
1961 			data->device_specific = buffer[2];
1962 			data->block_descriptor_length = buffer[3];
1963 		}
1964 		data->header_length = header_length;
1965 	}
1966 
1967 	return result;
1968 }
1969 EXPORT_SYMBOL(scsi_mode_sense);
1970 
1971 /**
1972  *	scsi_test_unit_ready - test if unit is ready
1973  *	@sdev:	scsi device to change the state of.
1974  *	@timeout: command timeout
1975  *	@retries: number of retries before failing
1976  *	@sshdr_external: Optional pointer to struct scsi_sense_hdr for
1977  *		returning sense. Make sure that this is cleared before passing
1978  *		in.
1979  *
1980  *	Returns zero if unsuccessful or an error if TUR failed.  For
1981  *	removable media, a return of NOT_READY or UNIT_ATTENTION is
1982  *	translated to success, with the ->changed flag updated.
1983  **/
1984 int
1985 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1986 		     struct scsi_sense_hdr *sshdr_external)
1987 {
1988 	char cmd[] = {
1989 		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1990 	};
1991 	struct scsi_sense_hdr *sshdr;
1992 	int result;
1993 
1994 	if (!sshdr_external)
1995 		sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1996 	else
1997 		sshdr = sshdr_external;
1998 
1999 	/* try to eat the UNIT_ATTENTION if there are enough retries */
2000 	do {
2001 		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2002 					  timeout, retries, NULL);
2003 		if (sdev->removable && scsi_sense_valid(sshdr) &&
2004 		    sshdr->sense_key == UNIT_ATTENTION)
2005 			sdev->changed = 1;
2006 	} while (scsi_sense_valid(sshdr) &&
2007 		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2008 
2009 	if (!sshdr)
2010 		/* could not allocate sense buffer, so can't process it */
2011 		return result;
2012 
2013 	if (sdev->removable && scsi_sense_valid(sshdr) &&
2014 	    (sshdr->sense_key == UNIT_ATTENTION ||
2015 	     sshdr->sense_key == NOT_READY)) {
2016 		sdev->changed = 1;
2017 		result = 0;
2018 	}
2019 	if (!sshdr_external)
2020 		kfree(sshdr);
2021 	return result;
2022 }
2023 EXPORT_SYMBOL(scsi_test_unit_ready);
2024 
2025 /**
2026  *	scsi_device_set_state - Take the given device through the device state model.
2027  *	@sdev:	scsi device to change the state of.
2028  *	@state:	state to change to.
2029  *
2030  *	Returns zero if unsuccessful or an error if the requested
2031  *	transition is illegal.
2032  */
2033 int
2034 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2035 {
2036 	enum scsi_device_state oldstate = sdev->sdev_state;
2037 
2038 	if (state == oldstate)
2039 		return 0;
2040 
2041 	switch (state) {
2042 	case SDEV_CREATED:
2043 		switch (oldstate) {
2044 		case SDEV_CREATED_BLOCK:
2045 			break;
2046 		default:
2047 			goto illegal;
2048 		}
2049 		break;
2050 
2051 	case SDEV_RUNNING:
2052 		switch (oldstate) {
2053 		case SDEV_CREATED:
2054 		case SDEV_OFFLINE:
2055 		case SDEV_QUIESCE:
2056 		case SDEV_BLOCK:
2057 			break;
2058 		default:
2059 			goto illegal;
2060 		}
2061 		break;
2062 
2063 	case SDEV_QUIESCE:
2064 		switch (oldstate) {
2065 		case SDEV_RUNNING:
2066 		case SDEV_OFFLINE:
2067 			break;
2068 		default:
2069 			goto illegal;
2070 		}
2071 		break;
2072 
2073 	case SDEV_OFFLINE:
2074 		switch (oldstate) {
2075 		case SDEV_CREATED:
2076 		case SDEV_RUNNING:
2077 		case SDEV_QUIESCE:
2078 		case SDEV_BLOCK:
2079 			break;
2080 		default:
2081 			goto illegal;
2082 		}
2083 		break;
2084 
2085 	case SDEV_BLOCK:
2086 		switch (oldstate) {
2087 		case SDEV_RUNNING:
2088 		case SDEV_CREATED_BLOCK:
2089 			break;
2090 		default:
2091 			goto illegal;
2092 		}
2093 		break;
2094 
2095 	case SDEV_CREATED_BLOCK:
2096 		switch (oldstate) {
2097 		case SDEV_CREATED:
2098 			break;
2099 		default:
2100 			goto illegal;
2101 		}
2102 		break;
2103 
2104 	case SDEV_CANCEL:
2105 		switch (oldstate) {
2106 		case SDEV_CREATED:
2107 		case SDEV_RUNNING:
2108 		case SDEV_QUIESCE:
2109 		case SDEV_OFFLINE:
2110 		case SDEV_BLOCK:
2111 			break;
2112 		default:
2113 			goto illegal;
2114 		}
2115 		break;
2116 
2117 	case SDEV_DEL:
2118 		switch (oldstate) {
2119 		case SDEV_CREATED:
2120 		case SDEV_RUNNING:
2121 		case SDEV_OFFLINE:
2122 		case SDEV_CANCEL:
2123 			break;
2124 		default:
2125 			goto illegal;
2126 		}
2127 		break;
2128 
2129 	}
2130 	sdev->sdev_state = state;
2131 	return 0;
2132 
2133  illegal:
2134 	SCSI_LOG_ERROR_RECOVERY(1,
2135 				sdev_printk(KERN_ERR, sdev,
2136 					    "Illegal state transition %s->%s\n",
2137 					    scsi_device_state_name(oldstate),
2138 					    scsi_device_state_name(state))
2139 				);
2140 	return -EINVAL;
2141 }
2142 EXPORT_SYMBOL(scsi_device_set_state);
2143 
2144 /**
2145  * 	sdev_evt_emit - emit a single SCSI device uevent
2146  *	@sdev: associated SCSI device
2147  *	@evt: event to emit
2148  *
2149  *	Send a single uevent (scsi_event) to the associated scsi_device.
2150  */
2151 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2152 {
2153 	int idx = 0;
2154 	char *envp[3];
2155 
2156 	switch (evt->evt_type) {
2157 	case SDEV_EVT_MEDIA_CHANGE:
2158 		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2159 		break;
2160 
2161 	default:
2162 		/* do nothing */
2163 		break;
2164 	}
2165 
2166 	envp[idx++] = NULL;
2167 
2168 	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2169 }
2170 
2171 /**
2172  * 	sdev_evt_thread - send a uevent for each scsi event
2173  *	@work: work struct for scsi_device
2174  *
2175  *	Dispatch queued events to their associated scsi_device kobjects
2176  *	as uevents.
2177  */
2178 void scsi_evt_thread(struct work_struct *work)
2179 {
2180 	struct scsi_device *sdev;
2181 	LIST_HEAD(event_list);
2182 
2183 	sdev = container_of(work, struct scsi_device, event_work);
2184 
2185 	while (1) {
2186 		struct scsi_event *evt;
2187 		struct list_head *this, *tmp;
2188 		unsigned long flags;
2189 
2190 		spin_lock_irqsave(&sdev->list_lock, flags);
2191 		list_splice_init(&sdev->event_list, &event_list);
2192 		spin_unlock_irqrestore(&sdev->list_lock, flags);
2193 
2194 		if (list_empty(&event_list))
2195 			break;
2196 
2197 		list_for_each_safe(this, tmp, &event_list) {
2198 			evt = list_entry(this, struct scsi_event, node);
2199 			list_del(&evt->node);
2200 			scsi_evt_emit(sdev, evt);
2201 			kfree(evt);
2202 		}
2203 	}
2204 }
2205 
2206 /**
2207  * 	sdev_evt_send - send asserted event to uevent thread
2208  *	@sdev: scsi_device event occurred on
2209  *	@evt: event to send
2210  *
2211  *	Assert scsi device event asynchronously.
2212  */
2213 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2214 {
2215 	unsigned long flags;
2216 
2217 #if 0
2218 	/* FIXME: currently this check eliminates all media change events
2219 	 * for polled devices.  Need to update to discriminate between AN
2220 	 * and polled events */
2221 	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2222 		kfree(evt);
2223 		return;
2224 	}
2225 #endif
2226 
2227 	spin_lock_irqsave(&sdev->list_lock, flags);
2228 	list_add_tail(&evt->node, &sdev->event_list);
2229 	schedule_work(&sdev->event_work);
2230 	spin_unlock_irqrestore(&sdev->list_lock, flags);
2231 }
2232 EXPORT_SYMBOL_GPL(sdev_evt_send);
2233 
2234 /**
2235  * 	sdev_evt_alloc - allocate a new scsi event
2236  *	@evt_type: type of event to allocate
2237  *	@gfpflags: GFP flags for allocation
2238  *
2239  *	Allocates and returns a new scsi_event.
2240  */
2241 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2242 				  gfp_t gfpflags)
2243 {
2244 	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2245 	if (!evt)
2246 		return NULL;
2247 
2248 	evt->evt_type = evt_type;
2249 	INIT_LIST_HEAD(&evt->node);
2250 
2251 	/* evt_type-specific initialization, if any */
2252 	switch (evt_type) {
2253 	case SDEV_EVT_MEDIA_CHANGE:
2254 	default:
2255 		/* do nothing */
2256 		break;
2257 	}
2258 
2259 	return evt;
2260 }
2261 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2262 
2263 /**
2264  * 	sdev_evt_send_simple - send asserted event to uevent thread
2265  *	@sdev: scsi_device event occurred on
2266  *	@evt_type: type of event to send
2267  *	@gfpflags: GFP flags for allocation
2268  *
2269  *	Assert scsi device event asynchronously, given an event type.
2270  */
2271 void sdev_evt_send_simple(struct scsi_device *sdev,
2272 			  enum scsi_device_event evt_type, gfp_t gfpflags)
2273 {
2274 	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2275 	if (!evt) {
2276 		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2277 			    evt_type);
2278 		return;
2279 	}
2280 
2281 	sdev_evt_send(sdev, evt);
2282 }
2283 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2284 
2285 /**
2286  *	scsi_device_quiesce - Block user issued commands.
2287  *	@sdev:	scsi device to quiesce.
2288  *
2289  *	This works by trying to transition to the SDEV_QUIESCE state
2290  *	(which must be a legal transition).  When the device is in this
2291  *	state, only special requests will be accepted, all others will
2292  *	be deferred.  Since special requests may also be requeued requests,
2293  *	a successful return doesn't guarantee the device will be
2294  *	totally quiescent.
2295  *
2296  *	Must be called with user context, may sleep.
2297  *
2298  *	Returns zero if unsuccessful or an error if not.
2299  */
2300 int
2301 scsi_device_quiesce(struct scsi_device *sdev)
2302 {
2303 	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2304 	if (err)
2305 		return err;
2306 
2307 	scsi_run_queue(sdev->request_queue);
2308 	while (sdev->device_busy) {
2309 		msleep_interruptible(200);
2310 		scsi_run_queue(sdev->request_queue);
2311 	}
2312 	return 0;
2313 }
2314 EXPORT_SYMBOL(scsi_device_quiesce);
2315 
2316 /**
2317  *	scsi_device_resume - Restart user issued commands to a quiesced device.
2318  *	@sdev:	scsi device to resume.
2319  *
2320  *	Moves the device from quiesced back to running and restarts the
2321  *	queues.
2322  *
2323  *	Must be called with user context, may sleep.
2324  */
2325 void
2326 scsi_device_resume(struct scsi_device *sdev)
2327 {
2328 	if(scsi_device_set_state(sdev, SDEV_RUNNING))
2329 		return;
2330 	scsi_run_queue(sdev->request_queue);
2331 }
2332 EXPORT_SYMBOL(scsi_device_resume);
2333 
2334 static void
2335 device_quiesce_fn(struct scsi_device *sdev, void *data)
2336 {
2337 	scsi_device_quiesce(sdev);
2338 }
2339 
2340 void
2341 scsi_target_quiesce(struct scsi_target *starget)
2342 {
2343 	starget_for_each_device(starget, NULL, device_quiesce_fn);
2344 }
2345 EXPORT_SYMBOL(scsi_target_quiesce);
2346 
2347 static void
2348 device_resume_fn(struct scsi_device *sdev, void *data)
2349 {
2350 	scsi_device_resume(sdev);
2351 }
2352 
2353 void
2354 scsi_target_resume(struct scsi_target *starget)
2355 {
2356 	starget_for_each_device(starget, NULL, device_resume_fn);
2357 }
2358 EXPORT_SYMBOL(scsi_target_resume);
2359 
2360 /**
2361  * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2362  * @sdev:	device to block
2363  *
2364  * Block request made by scsi lld's to temporarily stop all
2365  * scsi commands on the specified device.  Called from interrupt
2366  * or normal process context.
2367  *
2368  * Returns zero if successful or error if not
2369  *
2370  * Notes:
2371  *	This routine transitions the device to the SDEV_BLOCK state
2372  *	(which must be a legal transition).  When the device is in this
2373  *	state, all commands are deferred until the scsi lld reenables
2374  *	the device with scsi_device_unblock or device_block_tmo fires.
2375  *	This routine assumes the host_lock is held on entry.
2376  */
2377 int
2378 scsi_internal_device_block(struct scsi_device *sdev)
2379 {
2380 	struct request_queue *q = sdev->request_queue;
2381 	unsigned long flags;
2382 	int err = 0;
2383 
2384 	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2385 	if (err) {
2386 		err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2387 
2388 		if (err)
2389 			return err;
2390 	}
2391 
2392 	/*
2393 	 * The device has transitioned to SDEV_BLOCK.  Stop the
2394 	 * block layer from calling the midlayer with this device's
2395 	 * request queue.
2396 	 */
2397 	spin_lock_irqsave(q->queue_lock, flags);
2398 	blk_stop_queue(q);
2399 	spin_unlock_irqrestore(q->queue_lock, flags);
2400 
2401 	return 0;
2402 }
2403 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2404 
2405 /**
2406  * scsi_internal_device_unblock - resume a device after a block request
2407  * @sdev:	device to resume
2408  *
2409  * Called by scsi lld's or the midlayer to restart the device queue
2410  * for the previously suspended scsi device.  Called from interrupt or
2411  * normal process context.
2412  *
2413  * Returns zero if successful or error if not.
2414  *
2415  * Notes:
2416  *	This routine transitions the device to the SDEV_RUNNING state
2417  *	(which must be a legal transition) allowing the midlayer to
2418  *	goose the queue for this device.  This routine assumes the
2419  *	host_lock is held upon entry.
2420  */
2421 int
2422 scsi_internal_device_unblock(struct scsi_device *sdev)
2423 {
2424 	struct request_queue *q = sdev->request_queue;
2425 	unsigned long flags;
2426 
2427 	/*
2428 	 * Try to transition the scsi device to SDEV_RUNNING
2429 	 * and goose the device queue if successful.
2430 	 */
2431 	if (sdev->sdev_state == SDEV_BLOCK)
2432 		sdev->sdev_state = SDEV_RUNNING;
2433 	else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2434 		sdev->sdev_state = SDEV_CREATED;
2435 	else if (sdev->sdev_state != SDEV_CANCEL &&
2436 		 sdev->sdev_state != SDEV_OFFLINE)
2437 		return -EINVAL;
2438 
2439 	spin_lock_irqsave(q->queue_lock, flags);
2440 	blk_start_queue(q);
2441 	spin_unlock_irqrestore(q->queue_lock, flags);
2442 
2443 	return 0;
2444 }
2445 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2446 
2447 static void
2448 device_block(struct scsi_device *sdev, void *data)
2449 {
2450 	scsi_internal_device_block(sdev);
2451 }
2452 
2453 static int
2454 target_block(struct device *dev, void *data)
2455 {
2456 	if (scsi_is_target_device(dev))
2457 		starget_for_each_device(to_scsi_target(dev), NULL,
2458 					device_block);
2459 	return 0;
2460 }
2461 
2462 void
2463 scsi_target_block(struct device *dev)
2464 {
2465 	if (scsi_is_target_device(dev))
2466 		starget_for_each_device(to_scsi_target(dev), NULL,
2467 					device_block);
2468 	else
2469 		device_for_each_child(dev, NULL, target_block);
2470 }
2471 EXPORT_SYMBOL_GPL(scsi_target_block);
2472 
2473 static void
2474 device_unblock(struct scsi_device *sdev, void *data)
2475 {
2476 	scsi_internal_device_unblock(sdev);
2477 }
2478 
2479 static int
2480 target_unblock(struct device *dev, void *data)
2481 {
2482 	if (scsi_is_target_device(dev))
2483 		starget_for_each_device(to_scsi_target(dev), NULL,
2484 					device_unblock);
2485 	return 0;
2486 }
2487 
2488 void
2489 scsi_target_unblock(struct device *dev)
2490 {
2491 	if (scsi_is_target_device(dev))
2492 		starget_for_each_device(to_scsi_target(dev), NULL,
2493 					device_unblock);
2494 	else
2495 		device_for_each_child(dev, NULL, target_unblock);
2496 }
2497 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2498 
2499 /**
2500  * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2501  * @sgl:	scatter-gather list
2502  * @sg_count:	number of segments in sg
2503  * @offset:	offset in bytes into sg, on return offset into the mapped area
2504  * @len:	bytes to map, on return number of bytes mapped
2505  *
2506  * Returns virtual address of the start of the mapped page
2507  */
2508 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2509 			  size_t *offset, size_t *len)
2510 {
2511 	int i;
2512 	size_t sg_len = 0, len_complete = 0;
2513 	struct scatterlist *sg;
2514 	struct page *page;
2515 
2516 	WARN_ON(!irqs_disabled());
2517 
2518 	for_each_sg(sgl, sg, sg_count, i) {
2519 		len_complete = sg_len; /* Complete sg-entries */
2520 		sg_len += sg->length;
2521 		if (sg_len > *offset)
2522 			break;
2523 	}
2524 
2525 	if (unlikely(i == sg_count)) {
2526 		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2527 			"elements %d\n",
2528 		       __func__, sg_len, *offset, sg_count);
2529 		WARN_ON(1);
2530 		return NULL;
2531 	}
2532 
2533 	/* Offset starting from the beginning of first page in this sg-entry */
2534 	*offset = *offset - len_complete + sg->offset;
2535 
2536 	/* Assumption: contiguous pages can be accessed as "page + i" */
2537 	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2538 	*offset &= ~PAGE_MASK;
2539 
2540 	/* Bytes in this sg-entry from *offset to the end of the page */
2541 	sg_len = PAGE_SIZE - *offset;
2542 	if (*len > sg_len)
2543 		*len = sg_len;
2544 
2545 	return kmap_atomic(page, KM_BIO_SRC_IRQ);
2546 }
2547 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2548 
2549 /**
2550  * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2551  * @virt:	virtual address to be unmapped
2552  */
2553 void scsi_kunmap_atomic_sg(void *virt)
2554 {
2555 	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2556 }
2557 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2558