xref: /openbmc/linux/drivers/scsi/scsi_lib.c (revision 545e4006)
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/mempool.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/pci.h>
19 #include <linux/delay.h>
20 #include <linux/hardirq.h>
21 #include <linux/scatterlist.h>
22 
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_dbg.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 
31 #include "scsi_priv.h"
32 #include "scsi_logging.h"
33 
34 
35 #define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
36 #define SG_MEMPOOL_SIZE		2
37 
38 struct scsi_host_sg_pool {
39 	size_t		size;
40 	char		*name;
41 	struct kmem_cache	*slab;
42 	mempool_t	*pool;
43 };
44 
45 #define SP(x) { x, "sgpool-" __stringify(x) }
46 #if (SCSI_MAX_SG_SEGMENTS < 32)
47 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48 #endif
49 static struct scsi_host_sg_pool scsi_sg_pools[] = {
50 	SP(8),
51 	SP(16),
52 #if (SCSI_MAX_SG_SEGMENTS > 32)
53 	SP(32),
54 #if (SCSI_MAX_SG_SEGMENTS > 64)
55 	SP(64),
56 #if (SCSI_MAX_SG_SEGMENTS > 128)
57 	SP(128),
58 #if (SCSI_MAX_SG_SEGMENTS > 256)
59 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60 #endif
61 #endif
62 #endif
63 #endif
64 	SP(SCSI_MAX_SG_SEGMENTS)
65 };
66 #undef SP
67 
68 static struct kmem_cache *scsi_sdb_cache;
69 
70 static void scsi_run_queue(struct request_queue *q);
71 
72 /*
73  * Function:	scsi_unprep_request()
74  *
75  * Purpose:	Remove all preparation done for a request, including its
76  *		associated scsi_cmnd, so that it can be requeued.
77  *
78  * Arguments:	req	- request to unprepare
79  *
80  * Lock status:	Assumed that no locks are held upon entry.
81  *
82  * Returns:	Nothing.
83  */
84 static void scsi_unprep_request(struct request *req)
85 {
86 	struct scsi_cmnd *cmd = req->special;
87 
88 	req->cmd_flags &= ~REQ_DONTPREP;
89 	req->special = NULL;
90 
91 	scsi_put_command(cmd);
92 }
93 
94 /*
95  * Function:    scsi_queue_insert()
96  *
97  * Purpose:     Insert a command in the midlevel queue.
98  *
99  * Arguments:   cmd    - command that we are adding to queue.
100  *              reason - why we are inserting command to queue.
101  *
102  * Lock status: Assumed that lock is not held upon entry.
103  *
104  * Returns:     Nothing.
105  *
106  * Notes:       We do this for one of two cases.  Either the host is busy
107  *              and it cannot accept any more commands for the time being,
108  *              or the device returned QUEUE_FULL and can accept no more
109  *              commands.
110  * Notes:       This could be called either from an interrupt context or a
111  *              normal process context.
112  */
113 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
114 {
115 	struct Scsi_Host *host = cmd->device->host;
116 	struct scsi_device *device = cmd->device;
117 	struct request_queue *q = device->request_queue;
118 	unsigned long flags;
119 
120 	SCSI_LOG_MLQUEUE(1,
121 		 printk("Inserting command %p into mlqueue\n", cmd));
122 
123 	/*
124 	 * Set the appropriate busy bit for the device/host.
125 	 *
126 	 * If the host/device isn't busy, assume that something actually
127 	 * completed, and that we should be able to queue a command now.
128 	 *
129 	 * Note that the prior mid-layer assumption that any host could
130 	 * always queue at least one command is now broken.  The mid-layer
131 	 * will implement a user specifiable stall (see
132 	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
133 	 * if a command is requeued with no other commands outstanding
134 	 * either for the device or for the host.
135 	 */
136 	if (reason == SCSI_MLQUEUE_HOST_BUSY)
137 		host->host_blocked = host->max_host_blocked;
138 	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
139 		device->device_blocked = device->max_device_blocked;
140 
141 	/*
142 	 * Decrement the counters, since these commands are no longer
143 	 * active on the host/device.
144 	 */
145 	scsi_device_unbusy(device);
146 
147 	/*
148 	 * Requeue this command.  It will go before all other commands
149 	 * that are already in the queue.
150 	 *
151 	 * NOTE: there is magic here about the way the queue is plugged if
152 	 * we have no outstanding commands.
153 	 *
154 	 * Although we *don't* plug the queue, we call the request
155 	 * function.  The SCSI request function detects the blocked condition
156 	 * and plugs the queue appropriately.
157          */
158 	spin_lock_irqsave(q->queue_lock, flags);
159 	blk_requeue_request(q, cmd->request);
160 	spin_unlock_irqrestore(q->queue_lock, flags);
161 
162 	scsi_run_queue(q);
163 
164 	return 0;
165 }
166 
167 /**
168  * scsi_execute - insert request and wait for the result
169  * @sdev:	scsi device
170  * @cmd:	scsi command
171  * @data_direction: data direction
172  * @buffer:	data buffer
173  * @bufflen:	len of buffer
174  * @sense:	optional sense buffer
175  * @timeout:	request timeout in seconds
176  * @retries:	number of times to retry request
177  * @flags:	or into request flags;
178  *
179  * returns the req->errors value which is the scsi_cmnd result
180  * field.
181  */
182 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
183 		 int data_direction, void *buffer, unsigned bufflen,
184 		 unsigned char *sense, int timeout, int retries, int flags)
185 {
186 	struct request *req;
187 	int write = (data_direction == DMA_TO_DEVICE);
188 	int ret = DRIVER_ERROR << 24;
189 
190 	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
191 
192 	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
193 					buffer, bufflen, __GFP_WAIT))
194 		goto out;
195 
196 	req->cmd_len = COMMAND_SIZE(cmd[0]);
197 	memcpy(req->cmd, cmd, req->cmd_len);
198 	req->sense = sense;
199 	req->sense_len = 0;
200 	req->retries = retries;
201 	req->timeout = timeout;
202 	req->cmd_type = REQ_TYPE_BLOCK_PC;
203 	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
204 
205 	/*
206 	 * head injection *required* here otherwise quiesce won't work
207 	 */
208 	blk_execute_rq(req->q, NULL, req, 1);
209 
210 	/*
211 	 * Some devices (USB mass-storage in particular) may transfer
212 	 * garbage data together with a residue indicating that the data
213 	 * is invalid.  Prevent the garbage from being misinterpreted
214 	 * and prevent security leaks by zeroing out the excess data.
215 	 */
216 	if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
217 		memset(buffer + (bufflen - req->data_len), 0, req->data_len);
218 
219 	ret = req->errors;
220  out:
221 	blk_put_request(req);
222 
223 	return ret;
224 }
225 EXPORT_SYMBOL(scsi_execute);
226 
227 
228 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
229 		     int data_direction, void *buffer, unsigned bufflen,
230 		     struct scsi_sense_hdr *sshdr, int timeout, int retries)
231 {
232 	char *sense = NULL;
233 	int result;
234 
235 	if (sshdr) {
236 		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
237 		if (!sense)
238 			return DRIVER_ERROR << 24;
239 	}
240 	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
241 			      sense, timeout, retries, 0);
242 	if (sshdr)
243 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
244 
245 	kfree(sense);
246 	return result;
247 }
248 EXPORT_SYMBOL(scsi_execute_req);
249 
250 struct scsi_io_context {
251 	void *data;
252 	void (*done)(void *data, char *sense, int result, int resid);
253 	char sense[SCSI_SENSE_BUFFERSIZE];
254 };
255 
256 static struct kmem_cache *scsi_io_context_cache;
257 
258 static void scsi_end_async(struct request *req, int uptodate)
259 {
260 	struct scsi_io_context *sioc = req->end_io_data;
261 
262 	if (sioc->done)
263 		sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
264 
265 	kmem_cache_free(scsi_io_context_cache, sioc);
266 	__blk_put_request(req->q, req);
267 }
268 
269 static int scsi_merge_bio(struct request *rq, struct bio *bio)
270 {
271 	struct request_queue *q = rq->q;
272 
273 	bio->bi_flags &= ~(1 << BIO_SEG_VALID);
274 	if (rq_data_dir(rq) == WRITE)
275 		bio->bi_rw |= (1 << BIO_RW);
276 	blk_queue_bounce(q, &bio);
277 
278 	return blk_rq_append_bio(q, rq, bio);
279 }
280 
281 static void scsi_bi_endio(struct bio *bio, int error)
282 {
283 	bio_put(bio);
284 }
285 
286 /**
287  * scsi_req_map_sg - map a scatterlist into a request
288  * @rq:		request to fill
289  * @sgl:	scatterlist
290  * @nsegs:	number of elements
291  * @bufflen:	len of buffer
292  * @gfp:	memory allocation flags
293  *
294  * scsi_req_map_sg maps a scatterlist into a request so that the
295  * request can be sent to the block layer. We do not trust the scatterlist
296  * sent to use, as some ULDs use that struct to only organize the pages.
297  */
298 static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
299 			   int nsegs, unsigned bufflen, gfp_t gfp)
300 {
301 	struct request_queue *q = rq->q;
302 	int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
303 	unsigned int data_len = bufflen, len, bytes, off;
304 	struct scatterlist *sg;
305 	struct page *page;
306 	struct bio *bio = NULL;
307 	int i, err, nr_vecs = 0;
308 
309 	for_each_sg(sgl, sg, nsegs, i) {
310 		page = sg_page(sg);
311 		off = sg->offset;
312 		len = sg->length;
313 
314 		while (len > 0 && data_len > 0) {
315 			/*
316 			 * sg sends a scatterlist that is larger than
317 			 * the data_len it wants transferred for certain
318 			 * IO sizes
319 			 */
320 			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
321 			bytes = min(bytes, data_len);
322 
323 			if (!bio) {
324 				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
325 				nr_pages -= nr_vecs;
326 
327 				bio = bio_alloc(gfp, nr_vecs);
328 				if (!bio) {
329 					err = -ENOMEM;
330 					goto free_bios;
331 				}
332 				bio->bi_end_io = scsi_bi_endio;
333 			}
334 
335 			if (bio_add_pc_page(q, bio, page, bytes, off) !=
336 			    bytes) {
337 				bio_put(bio);
338 				err = -EINVAL;
339 				goto free_bios;
340 			}
341 
342 			if (bio->bi_vcnt >= nr_vecs) {
343 				err = scsi_merge_bio(rq, bio);
344 				if (err) {
345 					bio_endio(bio, 0);
346 					goto free_bios;
347 				}
348 				bio = NULL;
349 			}
350 
351 			page++;
352 			len -= bytes;
353 			data_len -=bytes;
354 			off = 0;
355 		}
356 	}
357 
358 	rq->buffer = rq->data = NULL;
359 	rq->data_len = bufflen;
360 	return 0;
361 
362 free_bios:
363 	while ((bio = rq->bio) != NULL) {
364 		rq->bio = bio->bi_next;
365 		/*
366 		 * call endio instead of bio_put incase it was bounced
367 		 */
368 		bio_endio(bio, 0);
369 	}
370 
371 	return err;
372 }
373 
374 /**
375  * scsi_execute_async - insert request
376  * @sdev:	scsi device
377  * @cmd:	scsi command
378  * @cmd_len:	length of scsi cdb
379  * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
380  * @buffer:	data buffer (this can be a kernel buffer or scatterlist)
381  * @bufflen:	len of buffer
382  * @use_sg:	if buffer is a scatterlist this is the number of elements
383  * @timeout:	request timeout in seconds
384  * @retries:	number of times to retry request
385  * @privdata:	data passed to done()
386  * @done:	callback function when done
387  * @gfp:	memory allocation flags
388  */
389 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
390 		       int cmd_len, int data_direction, void *buffer, unsigned bufflen,
391 		       int use_sg, int timeout, int retries, void *privdata,
392 		       void (*done)(void *, char *, int, int), gfp_t gfp)
393 {
394 	struct request *req;
395 	struct scsi_io_context *sioc;
396 	int err = 0;
397 	int write = (data_direction == DMA_TO_DEVICE);
398 
399 	sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
400 	if (!sioc)
401 		return DRIVER_ERROR << 24;
402 
403 	req = blk_get_request(sdev->request_queue, write, gfp);
404 	if (!req)
405 		goto free_sense;
406 	req->cmd_type = REQ_TYPE_BLOCK_PC;
407 	req->cmd_flags |= REQ_QUIET;
408 
409 	if (use_sg)
410 		err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
411 	else if (bufflen)
412 		err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
413 
414 	if (err)
415 		goto free_req;
416 
417 	req->cmd_len = cmd_len;
418 	memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
419 	memcpy(req->cmd, cmd, req->cmd_len);
420 	req->sense = sioc->sense;
421 	req->sense_len = 0;
422 	req->timeout = timeout;
423 	req->retries = retries;
424 	req->end_io_data = sioc;
425 
426 	sioc->data = privdata;
427 	sioc->done = done;
428 
429 	blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
430 	return 0;
431 
432 free_req:
433 	blk_put_request(req);
434 free_sense:
435 	kmem_cache_free(scsi_io_context_cache, sioc);
436 	return DRIVER_ERROR << 24;
437 }
438 EXPORT_SYMBOL_GPL(scsi_execute_async);
439 
440 /*
441  * Function:    scsi_init_cmd_errh()
442  *
443  * Purpose:     Initialize cmd fields related to error handling.
444  *
445  * Arguments:   cmd	- command that is ready to be queued.
446  *
447  * Notes:       This function has the job of initializing a number of
448  *              fields related to error handling.   Typically this will
449  *              be called once for each command, as required.
450  */
451 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
452 {
453 	cmd->serial_number = 0;
454 	scsi_set_resid(cmd, 0);
455 	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
456 	if (cmd->cmd_len == 0)
457 		cmd->cmd_len = scsi_command_size(cmd->cmnd);
458 }
459 
460 void scsi_device_unbusy(struct scsi_device *sdev)
461 {
462 	struct Scsi_Host *shost = sdev->host;
463 	unsigned long flags;
464 
465 	spin_lock_irqsave(shost->host_lock, flags);
466 	shost->host_busy--;
467 	if (unlikely(scsi_host_in_recovery(shost) &&
468 		     (shost->host_failed || shost->host_eh_scheduled)))
469 		scsi_eh_wakeup(shost);
470 	spin_unlock(shost->host_lock);
471 	spin_lock(sdev->request_queue->queue_lock);
472 	sdev->device_busy--;
473 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
474 }
475 
476 /*
477  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
478  * and call blk_run_queue for all the scsi_devices on the target -
479  * including current_sdev first.
480  *
481  * Called with *no* scsi locks held.
482  */
483 static void scsi_single_lun_run(struct scsi_device *current_sdev)
484 {
485 	struct Scsi_Host *shost = current_sdev->host;
486 	struct scsi_device *sdev, *tmp;
487 	struct scsi_target *starget = scsi_target(current_sdev);
488 	unsigned long flags;
489 
490 	spin_lock_irqsave(shost->host_lock, flags);
491 	starget->starget_sdev_user = NULL;
492 	spin_unlock_irqrestore(shost->host_lock, flags);
493 
494 	/*
495 	 * Call blk_run_queue for all LUNs on the target, starting with
496 	 * current_sdev. We race with others (to set starget_sdev_user),
497 	 * but in most cases, we will be first. Ideally, each LU on the
498 	 * target would get some limited time or requests on the target.
499 	 */
500 	blk_run_queue(current_sdev->request_queue);
501 
502 	spin_lock_irqsave(shost->host_lock, flags);
503 	if (starget->starget_sdev_user)
504 		goto out;
505 	list_for_each_entry_safe(sdev, tmp, &starget->devices,
506 			same_target_siblings) {
507 		if (sdev == current_sdev)
508 			continue;
509 		if (scsi_device_get(sdev))
510 			continue;
511 
512 		spin_unlock_irqrestore(shost->host_lock, flags);
513 		blk_run_queue(sdev->request_queue);
514 		spin_lock_irqsave(shost->host_lock, flags);
515 
516 		scsi_device_put(sdev);
517 	}
518  out:
519 	spin_unlock_irqrestore(shost->host_lock, flags);
520 }
521 
522 /*
523  * Function:	scsi_run_queue()
524  *
525  * Purpose:	Select a proper request queue to serve next
526  *
527  * Arguments:	q	- last request's queue
528  *
529  * Returns:     Nothing
530  *
531  * Notes:	The previous command was completely finished, start
532  *		a new one if possible.
533  */
534 static void scsi_run_queue(struct request_queue *q)
535 {
536 	struct scsi_device *sdev = q->queuedata;
537 	struct Scsi_Host *shost = sdev->host;
538 	unsigned long flags;
539 
540 	if (scsi_target(sdev)->single_lun)
541 		scsi_single_lun_run(sdev);
542 
543 	spin_lock_irqsave(shost->host_lock, flags);
544 	while (!list_empty(&shost->starved_list) &&
545 	       !shost->host_blocked && !shost->host_self_blocked &&
546 		!((shost->can_queue > 0) &&
547 		  (shost->host_busy >= shost->can_queue))) {
548 
549 		int flagset;
550 
551 		/*
552 		 * As long as shost is accepting commands and we have
553 		 * starved queues, call blk_run_queue. scsi_request_fn
554 		 * drops the queue_lock and can add us back to the
555 		 * starved_list.
556 		 *
557 		 * host_lock protects the starved_list and starved_entry.
558 		 * scsi_request_fn must get the host_lock before checking
559 		 * or modifying starved_list or starved_entry.
560 		 */
561 		sdev = list_entry(shost->starved_list.next,
562 					  struct scsi_device, starved_entry);
563 		list_del_init(&sdev->starved_entry);
564 		spin_unlock(shost->host_lock);
565 
566 		spin_lock(sdev->request_queue->queue_lock);
567 		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
568 				!test_bit(QUEUE_FLAG_REENTER,
569 					&sdev->request_queue->queue_flags);
570 		if (flagset)
571 			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
572 		__blk_run_queue(sdev->request_queue);
573 		if (flagset)
574 			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
575 		spin_unlock(sdev->request_queue->queue_lock);
576 
577 		spin_lock(shost->host_lock);
578 		if (unlikely(!list_empty(&sdev->starved_entry)))
579 			/*
580 			 * sdev lost a race, and was put back on the
581 			 * starved list. This is unlikely but without this
582 			 * in theory we could loop forever.
583 			 */
584 			break;
585 	}
586 	spin_unlock_irqrestore(shost->host_lock, flags);
587 
588 	blk_run_queue(q);
589 }
590 
591 /*
592  * Function:	scsi_requeue_command()
593  *
594  * Purpose:	Handle post-processing of completed commands.
595  *
596  * Arguments:	q	- queue to operate on
597  *		cmd	- command that may need to be requeued.
598  *
599  * Returns:	Nothing
600  *
601  * Notes:	After command completion, there may be blocks left
602  *		over which weren't finished by the previous command
603  *		this can be for a number of reasons - the main one is
604  *		I/O errors in the middle of the request, in which case
605  *		we need to request the blocks that come after the bad
606  *		sector.
607  * Notes:	Upon return, cmd is a stale pointer.
608  */
609 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
610 {
611 	struct request *req = cmd->request;
612 	unsigned long flags;
613 
614 	scsi_unprep_request(req);
615 	spin_lock_irqsave(q->queue_lock, flags);
616 	blk_requeue_request(q, req);
617 	spin_unlock_irqrestore(q->queue_lock, flags);
618 
619 	scsi_run_queue(q);
620 }
621 
622 void scsi_next_command(struct scsi_cmnd *cmd)
623 {
624 	struct scsi_device *sdev = cmd->device;
625 	struct request_queue *q = sdev->request_queue;
626 
627 	/* need to hold a reference on the device before we let go of the cmd */
628 	get_device(&sdev->sdev_gendev);
629 
630 	scsi_put_command(cmd);
631 	scsi_run_queue(q);
632 
633 	/* ok to remove device now */
634 	put_device(&sdev->sdev_gendev);
635 }
636 
637 void scsi_run_host_queues(struct Scsi_Host *shost)
638 {
639 	struct scsi_device *sdev;
640 
641 	shost_for_each_device(sdev, shost)
642 		scsi_run_queue(sdev->request_queue);
643 }
644 
645 /*
646  * Function:    scsi_end_request()
647  *
648  * Purpose:     Post-processing of completed commands (usually invoked at end
649  *		of upper level post-processing and scsi_io_completion).
650  *
651  * Arguments:   cmd	 - command that is complete.
652  *              error    - 0 if I/O indicates success, < 0 for I/O error.
653  *              bytes    - number of bytes of completed I/O
654  *		requeue  - indicates whether we should requeue leftovers.
655  *
656  * Lock status: Assumed that lock is not held upon entry.
657  *
658  * Returns:     cmd if requeue required, NULL otherwise.
659  *
660  * Notes:       This is called for block device requests in order to
661  *              mark some number of sectors as complete.
662  *
663  *		We are guaranteeing that the request queue will be goosed
664  *		at some point during this call.
665  * Notes:	If cmd was requeued, upon return it will be a stale pointer.
666  */
667 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
668 					  int bytes, int requeue)
669 {
670 	struct request_queue *q = cmd->device->request_queue;
671 	struct request *req = cmd->request;
672 
673 	/*
674 	 * If there are blocks left over at the end, set up the command
675 	 * to queue the remainder of them.
676 	 */
677 	if (blk_end_request(req, error, bytes)) {
678 		int leftover = (req->hard_nr_sectors << 9);
679 
680 		if (blk_pc_request(req))
681 			leftover = req->data_len;
682 
683 		/* kill remainder if no retrys */
684 		if (error && blk_noretry_request(req))
685 			blk_end_request(req, error, leftover);
686 		else {
687 			if (requeue) {
688 				/*
689 				 * Bleah.  Leftovers again.  Stick the
690 				 * leftovers in the front of the
691 				 * queue, and goose the queue again.
692 				 */
693 				scsi_requeue_command(q, cmd);
694 				cmd = NULL;
695 			}
696 			return cmd;
697 		}
698 	}
699 
700 	/*
701 	 * This will goose the queue request function at the end, so we don't
702 	 * need to worry about launching another command.
703 	 */
704 	scsi_next_command(cmd);
705 	return NULL;
706 }
707 
708 static inline unsigned int scsi_sgtable_index(unsigned short nents)
709 {
710 	unsigned int index;
711 
712 	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
713 
714 	if (nents <= 8)
715 		index = 0;
716 	else
717 		index = get_count_order(nents) - 3;
718 
719 	return index;
720 }
721 
722 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
723 {
724 	struct scsi_host_sg_pool *sgp;
725 
726 	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
727 	mempool_free(sgl, sgp->pool);
728 }
729 
730 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
731 {
732 	struct scsi_host_sg_pool *sgp;
733 
734 	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
735 	return mempool_alloc(sgp->pool, gfp_mask);
736 }
737 
738 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
739 			      gfp_t gfp_mask)
740 {
741 	int ret;
742 
743 	BUG_ON(!nents);
744 
745 	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
746 			       gfp_mask, scsi_sg_alloc);
747 	if (unlikely(ret))
748 		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
749 				scsi_sg_free);
750 
751 	return ret;
752 }
753 
754 static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
755 {
756 	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
757 }
758 
759 /*
760  * Function:    scsi_release_buffers()
761  *
762  * Purpose:     Completion processing for block device I/O requests.
763  *
764  * Arguments:   cmd	- command that we are bailing.
765  *
766  * Lock status: Assumed that no lock is held upon entry.
767  *
768  * Returns:     Nothing
769  *
770  * Notes:       In the event that an upper level driver rejects a
771  *		command, we must release resources allocated during
772  *		the __init_io() function.  Primarily this would involve
773  *		the scatter-gather table, and potentially any bounce
774  *		buffers.
775  */
776 void scsi_release_buffers(struct scsi_cmnd *cmd)
777 {
778 	if (cmd->sdb.table.nents)
779 		scsi_free_sgtable(&cmd->sdb);
780 
781 	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
782 
783 	if (scsi_bidi_cmnd(cmd)) {
784 		struct scsi_data_buffer *bidi_sdb =
785 			cmd->request->next_rq->special;
786 		scsi_free_sgtable(bidi_sdb);
787 		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
788 		cmd->request->next_rq->special = NULL;
789 	}
790 }
791 EXPORT_SYMBOL(scsi_release_buffers);
792 
793 /*
794  * Bidi commands Must be complete as a whole, both sides at once.
795  * If part of the bytes were written and lld returned
796  * scsi_in()->resid and/or scsi_out()->resid this information will be left
797  * in req->data_len and req->next_rq->data_len. The upper-layer driver can
798  * decide what to do with this information.
799  */
800 static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
801 {
802 	struct request *req = cmd->request;
803 	unsigned int dlen = req->data_len;
804 	unsigned int next_dlen = req->next_rq->data_len;
805 
806 	req->data_len = scsi_out(cmd)->resid;
807 	req->next_rq->data_len = scsi_in(cmd)->resid;
808 
809 	/* The req and req->next_rq have not been completed */
810 	BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
811 
812 	scsi_release_buffers(cmd);
813 
814 	/*
815 	 * This will goose the queue request function at the end, so we don't
816 	 * need to worry about launching another command.
817 	 */
818 	scsi_next_command(cmd);
819 }
820 
821 /*
822  * Function:    scsi_io_completion()
823  *
824  * Purpose:     Completion processing for block device I/O requests.
825  *
826  * Arguments:   cmd   - command that is finished.
827  *
828  * Lock status: Assumed that no lock is held upon entry.
829  *
830  * Returns:     Nothing
831  *
832  * Notes:       This function is matched in terms of capabilities to
833  *              the function that created the scatter-gather list.
834  *              In other words, if there are no bounce buffers
835  *              (the normal case for most drivers), we don't need
836  *              the logic to deal with cleaning up afterwards.
837  *
838  *		We must do one of several things here:
839  *
840  *		a) Call scsi_end_request.  This will finish off the
841  *		   specified number of sectors.  If we are done, the
842  *		   command block will be released, and the queue
843  *		   function will be goosed.  If we are not done, then
844  *		   scsi_end_request will directly goose the queue.
845  *
846  *		b) We can just use scsi_requeue_command() here.  This would
847  *		   be used if we just wanted to retry, for example.
848  */
849 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
850 {
851 	int result = cmd->result;
852 	int this_count = scsi_bufflen(cmd);
853 	struct request_queue *q = cmd->device->request_queue;
854 	struct request *req = cmd->request;
855 	int error = 0;
856 	struct scsi_sense_hdr sshdr;
857 	int sense_valid = 0;
858 	int sense_deferred = 0;
859 
860 	if (result) {
861 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
862 		if (sense_valid)
863 			sense_deferred = scsi_sense_is_deferred(&sshdr);
864 	}
865 
866 	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
867 		req->errors = result;
868 		if (result) {
869 			if (sense_valid && req->sense) {
870 				/*
871 				 * SG_IO wants current and deferred errors
872 				 */
873 				int len = 8 + cmd->sense_buffer[7];
874 
875 				if (len > SCSI_SENSE_BUFFERSIZE)
876 					len = SCSI_SENSE_BUFFERSIZE;
877 				memcpy(req->sense, cmd->sense_buffer,  len);
878 				req->sense_len = len;
879 			}
880 			if (!sense_deferred)
881 				error = -EIO;
882 		}
883 		if (scsi_bidi_cmnd(cmd)) {
884 			/* will also release_buffers */
885 			scsi_end_bidi_request(cmd);
886 			return;
887 		}
888 		req->data_len = scsi_get_resid(cmd);
889 	}
890 
891 	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
892 	scsi_release_buffers(cmd);
893 
894 	/*
895 	 * Next deal with any sectors which we were able to correctly
896 	 * handle.
897 	 */
898 	SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
899 				      "%d bytes done.\n",
900 				      req->nr_sectors, good_bytes));
901 
902 	/* A number of bytes were successfully read.  If there
903 	 * are leftovers and there is some kind of error
904 	 * (result != 0), retry the rest.
905 	 */
906 	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
907 		return;
908 
909 	/* good_bytes = 0, or (inclusive) there were leftovers and
910 	 * result = 0, so scsi_end_request couldn't retry.
911 	 */
912 	if (sense_valid && !sense_deferred) {
913 		switch (sshdr.sense_key) {
914 		case UNIT_ATTENTION:
915 			if (cmd->device->removable) {
916 				/* Detected disc change.  Set a bit
917 				 * and quietly refuse further access.
918 				 */
919 				cmd->device->changed = 1;
920 				scsi_end_request(cmd, -EIO, this_count, 1);
921 				return;
922 			} else {
923 				/* Must have been a power glitch, or a
924 				 * bus reset.  Could not have been a
925 				 * media change, so we just retry the
926 				 * request and see what happens.
927 				 */
928 				scsi_requeue_command(q, cmd);
929 				return;
930 			}
931 			break;
932 		case ILLEGAL_REQUEST:
933 			/* If we had an ILLEGAL REQUEST returned, then
934 			 * we may have performed an unsupported
935 			 * command.  The only thing this should be
936 			 * would be a ten byte read where only a six
937 			 * byte read was supported.  Also, on a system
938 			 * where READ CAPACITY failed, we may have
939 			 * read past the end of the disk.
940 			 */
941 			if ((cmd->device->use_10_for_rw &&
942 			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
943 			    (cmd->cmnd[0] == READ_10 ||
944 			     cmd->cmnd[0] == WRITE_10)) {
945 				cmd->device->use_10_for_rw = 0;
946 				/* This will cause a retry with a
947 				 * 6-byte command.
948 				 */
949 				scsi_requeue_command(q, cmd);
950 				return;
951 			} else {
952 				scsi_end_request(cmd, -EIO, this_count, 1);
953 				return;
954 			}
955 			break;
956 		case NOT_READY:
957 			/* If the device is in the process of becoming
958 			 * ready, or has a temporary blockage, retry.
959 			 */
960 			if (sshdr.asc == 0x04) {
961 				switch (sshdr.ascq) {
962 				case 0x01: /* becoming ready */
963 				case 0x04: /* format in progress */
964 				case 0x05: /* rebuild in progress */
965 				case 0x06: /* recalculation in progress */
966 				case 0x07: /* operation in progress */
967 				case 0x08: /* Long write in progress */
968 				case 0x09: /* self test in progress */
969 					scsi_requeue_command(q, cmd);
970 					return;
971 				default:
972 					break;
973 				}
974 			}
975 			if (!(req->cmd_flags & REQ_QUIET))
976 				scsi_cmd_print_sense_hdr(cmd,
977 							 "Device not ready",
978 							 &sshdr);
979 
980 			scsi_end_request(cmd, -EIO, this_count, 1);
981 			return;
982 		case VOLUME_OVERFLOW:
983 			if (!(req->cmd_flags & REQ_QUIET)) {
984 				scmd_printk(KERN_INFO, cmd,
985 					    "Volume overflow, CDB: ");
986 				__scsi_print_command(cmd->cmnd);
987 				scsi_print_sense("", cmd);
988 			}
989 			/* See SSC3rXX or current. */
990 			scsi_end_request(cmd, -EIO, this_count, 1);
991 			return;
992 		default:
993 			break;
994 		}
995 	}
996 	if (host_byte(result) == DID_RESET) {
997 		/* Third party bus reset or reset for error recovery
998 		 * reasons.  Just retry the request and see what
999 		 * happens.
1000 		 */
1001 		scsi_requeue_command(q, cmd);
1002 		return;
1003 	}
1004 	if (result) {
1005 		if (!(req->cmd_flags & REQ_QUIET)) {
1006 			scsi_print_result(cmd);
1007 			if (driver_byte(result) & DRIVER_SENSE)
1008 				scsi_print_sense("", cmd);
1009 		}
1010 	}
1011 	scsi_end_request(cmd, -EIO, this_count, !result);
1012 }
1013 
1014 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1015 			     gfp_t gfp_mask)
1016 {
1017 	int count;
1018 
1019 	/*
1020 	 * If sg table allocation fails, requeue request later.
1021 	 */
1022 	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1023 					gfp_mask))) {
1024 		return BLKPREP_DEFER;
1025 	}
1026 
1027 	req->buffer = NULL;
1028 
1029 	/*
1030 	 * Next, walk the list, and fill in the addresses and sizes of
1031 	 * each segment.
1032 	 */
1033 	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1034 	BUG_ON(count > sdb->table.nents);
1035 	sdb->table.nents = count;
1036 	if (blk_pc_request(req))
1037 		sdb->length = req->data_len;
1038 	else
1039 		sdb->length = req->nr_sectors << 9;
1040 	return BLKPREP_OK;
1041 }
1042 
1043 /*
1044  * Function:    scsi_init_io()
1045  *
1046  * Purpose:     SCSI I/O initialize function.
1047  *
1048  * Arguments:   cmd   - Command descriptor we wish to initialize
1049  *
1050  * Returns:     0 on success
1051  *		BLKPREP_DEFER if the failure is retryable
1052  *		BLKPREP_KILL if the failure is fatal
1053  */
1054 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1055 {
1056 	int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
1057 	if (error)
1058 		goto err_exit;
1059 
1060 	if (blk_bidi_rq(cmd->request)) {
1061 		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1062 			scsi_sdb_cache, GFP_ATOMIC);
1063 		if (!bidi_sdb) {
1064 			error = BLKPREP_DEFER;
1065 			goto err_exit;
1066 		}
1067 
1068 		cmd->request->next_rq->special = bidi_sdb;
1069 		error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
1070 								    GFP_ATOMIC);
1071 		if (error)
1072 			goto err_exit;
1073 	}
1074 
1075 	return BLKPREP_OK ;
1076 
1077 err_exit:
1078 	scsi_release_buffers(cmd);
1079 	if (error == BLKPREP_KILL)
1080 		scsi_put_command(cmd);
1081 	else /* BLKPREP_DEFER */
1082 		scsi_unprep_request(cmd->request);
1083 
1084 	return error;
1085 }
1086 EXPORT_SYMBOL(scsi_init_io);
1087 
1088 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1089 		struct request *req)
1090 {
1091 	struct scsi_cmnd *cmd;
1092 
1093 	if (!req->special) {
1094 		cmd = scsi_get_command(sdev, GFP_ATOMIC);
1095 		if (unlikely(!cmd))
1096 			return NULL;
1097 		req->special = cmd;
1098 	} else {
1099 		cmd = req->special;
1100 	}
1101 
1102 	/* pull a tag out of the request if we have one */
1103 	cmd->tag = req->tag;
1104 	cmd->request = req;
1105 
1106 	cmd->cmnd = req->cmd;
1107 
1108 	return cmd;
1109 }
1110 
1111 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1112 {
1113 	struct scsi_cmnd *cmd;
1114 	int ret = scsi_prep_state_check(sdev, req);
1115 
1116 	if (ret != BLKPREP_OK)
1117 		return ret;
1118 
1119 	cmd = scsi_get_cmd_from_req(sdev, req);
1120 	if (unlikely(!cmd))
1121 		return BLKPREP_DEFER;
1122 
1123 	/*
1124 	 * BLOCK_PC requests may transfer data, in which case they must
1125 	 * a bio attached to them.  Or they might contain a SCSI command
1126 	 * that does not transfer data, in which case they may optionally
1127 	 * submit a request without an attached bio.
1128 	 */
1129 	if (req->bio) {
1130 		int ret;
1131 
1132 		BUG_ON(!req->nr_phys_segments);
1133 
1134 		ret = scsi_init_io(cmd, GFP_ATOMIC);
1135 		if (unlikely(ret))
1136 			return ret;
1137 	} else {
1138 		BUG_ON(req->data_len);
1139 		BUG_ON(req->data);
1140 
1141 		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1142 		req->buffer = NULL;
1143 	}
1144 
1145 	cmd->cmd_len = req->cmd_len;
1146 	if (!req->data_len)
1147 		cmd->sc_data_direction = DMA_NONE;
1148 	else if (rq_data_dir(req) == WRITE)
1149 		cmd->sc_data_direction = DMA_TO_DEVICE;
1150 	else
1151 		cmd->sc_data_direction = DMA_FROM_DEVICE;
1152 
1153 	cmd->transfersize = req->data_len;
1154 	cmd->allowed = req->retries;
1155 	cmd->timeout_per_command = req->timeout;
1156 	return BLKPREP_OK;
1157 }
1158 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1159 
1160 /*
1161  * Setup a REQ_TYPE_FS command.  These are simple read/write request
1162  * from filesystems that still need to be translated to SCSI CDBs from
1163  * the ULD.
1164  */
1165 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1166 {
1167 	struct scsi_cmnd *cmd;
1168 	int ret = scsi_prep_state_check(sdev, req);
1169 
1170 	if (ret != BLKPREP_OK)
1171 		return ret;
1172 
1173 	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1174 			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1175 		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1176 		if (ret != BLKPREP_OK)
1177 			return ret;
1178 	}
1179 
1180 	/*
1181 	 * Filesystem requests must transfer data.
1182 	 */
1183 	BUG_ON(!req->nr_phys_segments);
1184 
1185 	cmd = scsi_get_cmd_from_req(sdev, req);
1186 	if (unlikely(!cmd))
1187 		return BLKPREP_DEFER;
1188 
1189 	memset(cmd->cmnd, 0, BLK_MAX_CDB);
1190 	return scsi_init_io(cmd, GFP_ATOMIC);
1191 }
1192 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1193 
1194 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1195 {
1196 	int ret = BLKPREP_OK;
1197 
1198 	/*
1199 	 * If the device is not in running state we will reject some
1200 	 * or all commands.
1201 	 */
1202 	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1203 		switch (sdev->sdev_state) {
1204 		case SDEV_OFFLINE:
1205 			/*
1206 			 * If the device is offline we refuse to process any
1207 			 * commands.  The device must be brought online
1208 			 * before trying any recovery commands.
1209 			 */
1210 			sdev_printk(KERN_ERR, sdev,
1211 				    "rejecting I/O to offline device\n");
1212 			ret = BLKPREP_KILL;
1213 			break;
1214 		case SDEV_DEL:
1215 			/*
1216 			 * If the device is fully deleted, we refuse to
1217 			 * process any commands as well.
1218 			 */
1219 			sdev_printk(KERN_ERR, sdev,
1220 				    "rejecting I/O to dead device\n");
1221 			ret = BLKPREP_KILL;
1222 			break;
1223 		case SDEV_QUIESCE:
1224 		case SDEV_BLOCK:
1225 			/*
1226 			 * If the devices is blocked we defer normal commands.
1227 			 */
1228 			if (!(req->cmd_flags & REQ_PREEMPT))
1229 				ret = BLKPREP_DEFER;
1230 			break;
1231 		default:
1232 			/*
1233 			 * For any other not fully online state we only allow
1234 			 * special commands.  In particular any user initiated
1235 			 * command is not allowed.
1236 			 */
1237 			if (!(req->cmd_flags & REQ_PREEMPT))
1238 				ret = BLKPREP_KILL;
1239 			break;
1240 		}
1241 	}
1242 	return ret;
1243 }
1244 EXPORT_SYMBOL(scsi_prep_state_check);
1245 
1246 int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1247 {
1248 	struct scsi_device *sdev = q->queuedata;
1249 
1250 	switch (ret) {
1251 	case BLKPREP_KILL:
1252 		req->errors = DID_NO_CONNECT << 16;
1253 		/* release the command and kill it */
1254 		if (req->special) {
1255 			struct scsi_cmnd *cmd = req->special;
1256 			scsi_release_buffers(cmd);
1257 			scsi_put_command(cmd);
1258 			req->special = NULL;
1259 		}
1260 		break;
1261 	case BLKPREP_DEFER:
1262 		/*
1263 		 * If we defer, the elv_next_request() returns NULL, but the
1264 		 * queue must be restarted, so we plug here if no returning
1265 		 * command will automatically do that.
1266 		 */
1267 		if (sdev->device_busy == 0)
1268 			blk_plug_device(q);
1269 		break;
1270 	default:
1271 		req->cmd_flags |= REQ_DONTPREP;
1272 	}
1273 
1274 	return ret;
1275 }
1276 EXPORT_SYMBOL(scsi_prep_return);
1277 
1278 int scsi_prep_fn(struct request_queue *q, struct request *req)
1279 {
1280 	struct scsi_device *sdev = q->queuedata;
1281 	int ret = BLKPREP_KILL;
1282 
1283 	if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1284 		ret = scsi_setup_blk_pc_cmnd(sdev, req);
1285 	return scsi_prep_return(q, req, ret);
1286 }
1287 
1288 /*
1289  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1290  * return 0.
1291  *
1292  * Called with the queue_lock held.
1293  */
1294 static inline int scsi_dev_queue_ready(struct request_queue *q,
1295 				  struct scsi_device *sdev)
1296 {
1297 	if (sdev->device_busy >= sdev->queue_depth)
1298 		return 0;
1299 	if (sdev->device_busy == 0 && sdev->device_blocked) {
1300 		/*
1301 		 * unblock after device_blocked iterates to zero
1302 		 */
1303 		if (--sdev->device_blocked == 0) {
1304 			SCSI_LOG_MLQUEUE(3,
1305 				   sdev_printk(KERN_INFO, sdev,
1306 				   "unblocking device at zero depth\n"));
1307 		} else {
1308 			blk_plug_device(q);
1309 			return 0;
1310 		}
1311 	}
1312 	if (sdev->device_blocked)
1313 		return 0;
1314 
1315 	return 1;
1316 }
1317 
1318 /*
1319  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1320  * return 0. We must end up running the queue again whenever 0 is
1321  * returned, else IO can hang.
1322  *
1323  * Called with host_lock held.
1324  */
1325 static inline int scsi_host_queue_ready(struct request_queue *q,
1326 				   struct Scsi_Host *shost,
1327 				   struct scsi_device *sdev)
1328 {
1329 	if (scsi_host_in_recovery(shost))
1330 		return 0;
1331 	if (shost->host_busy == 0 && shost->host_blocked) {
1332 		/*
1333 		 * unblock after host_blocked iterates to zero
1334 		 */
1335 		if (--shost->host_blocked == 0) {
1336 			SCSI_LOG_MLQUEUE(3,
1337 				printk("scsi%d unblocking host at zero depth\n",
1338 					shost->host_no));
1339 		} else {
1340 			return 0;
1341 		}
1342 	}
1343 	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1344 	    shost->host_blocked || shost->host_self_blocked) {
1345 		if (list_empty(&sdev->starved_entry))
1346 			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1347 		return 0;
1348 	}
1349 
1350 	/* We're OK to process the command, so we can't be starved */
1351 	if (!list_empty(&sdev->starved_entry))
1352 		list_del_init(&sdev->starved_entry);
1353 
1354 	return 1;
1355 }
1356 
1357 /*
1358  * Kill a request for a dead device
1359  */
1360 static void scsi_kill_request(struct request *req, struct request_queue *q)
1361 {
1362 	struct scsi_cmnd *cmd = req->special;
1363 	struct scsi_device *sdev = cmd->device;
1364 	struct Scsi_Host *shost = sdev->host;
1365 
1366 	blkdev_dequeue_request(req);
1367 
1368 	if (unlikely(cmd == NULL)) {
1369 		printk(KERN_CRIT "impossible request in %s.\n",
1370 				 __FUNCTION__);
1371 		BUG();
1372 	}
1373 
1374 	scsi_init_cmd_errh(cmd);
1375 	cmd->result = DID_NO_CONNECT << 16;
1376 	atomic_inc(&cmd->device->iorequest_cnt);
1377 
1378 	/*
1379 	 * SCSI request completion path will do scsi_device_unbusy(),
1380 	 * bump busy counts.  To bump the counters, we need to dance
1381 	 * with the locks as normal issue path does.
1382 	 */
1383 	sdev->device_busy++;
1384 	spin_unlock(sdev->request_queue->queue_lock);
1385 	spin_lock(shost->host_lock);
1386 	shost->host_busy++;
1387 	spin_unlock(shost->host_lock);
1388 	spin_lock(sdev->request_queue->queue_lock);
1389 
1390 	__scsi_done(cmd);
1391 }
1392 
1393 static void scsi_softirq_done(struct request *rq)
1394 {
1395 	struct scsi_cmnd *cmd = rq->completion_data;
1396 	unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
1397 	int disposition;
1398 
1399 	INIT_LIST_HEAD(&cmd->eh_entry);
1400 
1401 	disposition = scsi_decide_disposition(cmd);
1402 	if (disposition != SUCCESS &&
1403 	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1404 		sdev_printk(KERN_ERR, cmd->device,
1405 			    "timing out command, waited %lus\n",
1406 			    wait_for/HZ);
1407 		disposition = SUCCESS;
1408 	}
1409 
1410 	scsi_log_completion(cmd, disposition);
1411 
1412 	switch (disposition) {
1413 		case SUCCESS:
1414 			scsi_finish_command(cmd);
1415 			break;
1416 		case NEEDS_RETRY:
1417 			scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1418 			break;
1419 		case ADD_TO_MLQUEUE:
1420 			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1421 			break;
1422 		default:
1423 			if (!scsi_eh_scmd_add(cmd, 0))
1424 				scsi_finish_command(cmd);
1425 	}
1426 }
1427 
1428 /*
1429  * Function:    scsi_request_fn()
1430  *
1431  * Purpose:     Main strategy routine for SCSI.
1432  *
1433  * Arguments:   q       - Pointer to actual queue.
1434  *
1435  * Returns:     Nothing
1436  *
1437  * Lock status: IO request lock assumed to be held when called.
1438  */
1439 static void scsi_request_fn(struct request_queue *q)
1440 {
1441 	struct scsi_device *sdev = q->queuedata;
1442 	struct Scsi_Host *shost;
1443 	struct scsi_cmnd *cmd;
1444 	struct request *req;
1445 
1446 	if (!sdev) {
1447 		printk("scsi: killing requests for dead queue\n");
1448 		while ((req = elv_next_request(q)) != NULL)
1449 			scsi_kill_request(req, q);
1450 		return;
1451 	}
1452 
1453 	if(!get_device(&sdev->sdev_gendev))
1454 		/* We must be tearing the block queue down already */
1455 		return;
1456 
1457 	/*
1458 	 * To start with, we keep looping until the queue is empty, or until
1459 	 * the host is no longer able to accept any more requests.
1460 	 */
1461 	shost = sdev->host;
1462 	while (!blk_queue_plugged(q)) {
1463 		int rtn;
1464 		/*
1465 		 * get next queueable request.  We do this early to make sure
1466 		 * that the request is fully prepared even if we cannot
1467 		 * accept it.
1468 		 */
1469 		req = elv_next_request(q);
1470 		if (!req || !scsi_dev_queue_ready(q, sdev))
1471 			break;
1472 
1473 		if (unlikely(!scsi_device_online(sdev))) {
1474 			sdev_printk(KERN_ERR, sdev,
1475 				    "rejecting I/O to offline device\n");
1476 			scsi_kill_request(req, q);
1477 			continue;
1478 		}
1479 
1480 
1481 		/*
1482 		 * Remove the request from the request list.
1483 		 */
1484 		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1485 			blkdev_dequeue_request(req);
1486 		sdev->device_busy++;
1487 
1488 		spin_unlock(q->queue_lock);
1489 		cmd = req->special;
1490 		if (unlikely(cmd == NULL)) {
1491 			printk(KERN_CRIT "impossible request in %s.\n"
1492 					 "please mail a stack trace to "
1493 					 "linux-scsi@vger.kernel.org\n",
1494 					 __FUNCTION__);
1495 			blk_dump_rq_flags(req, "foo");
1496 			BUG();
1497 		}
1498 		spin_lock(shost->host_lock);
1499 
1500 		if (!scsi_host_queue_ready(q, shost, sdev))
1501 			goto not_ready;
1502 		if (scsi_target(sdev)->single_lun) {
1503 			if (scsi_target(sdev)->starget_sdev_user &&
1504 			    scsi_target(sdev)->starget_sdev_user != sdev)
1505 				goto not_ready;
1506 			scsi_target(sdev)->starget_sdev_user = sdev;
1507 		}
1508 		shost->host_busy++;
1509 
1510 		/*
1511 		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1512 		 *		take the lock again.
1513 		 */
1514 		spin_unlock_irq(shost->host_lock);
1515 
1516 		/*
1517 		 * Finally, initialize any error handling parameters, and set up
1518 		 * the timers for timeouts.
1519 		 */
1520 		scsi_init_cmd_errh(cmd);
1521 
1522 		/*
1523 		 * Dispatch the command to the low-level driver.
1524 		 */
1525 		rtn = scsi_dispatch_cmd(cmd);
1526 		spin_lock_irq(q->queue_lock);
1527 		if(rtn) {
1528 			/* we're refusing the command; because of
1529 			 * the way locks get dropped, we need to
1530 			 * check here if plugging is required */
1531 			if(sdev->device_busy == 0)
1532 				blk_plug_device(q);
1533 
1534 			break;
1535 		}
1536 	}
1537 
1538 	goto out;
1539 
1540  not_ready:
1541 	spin_unlock_irq(shost->host_lock);
1542 
1543 	/*
1544 	 * lock q, handle tag, requeue req, and decrement device_busy. We
1545 	 * must return with queue_lock held.
1546 	 *
1547 	 * Decrementing device_busy without checking it is OK, as all such
1548 	 * cases (host limits or settings) should run the queue at some
1549 	 * later time.
1550 	 */
1551 	spin_lock_irq(q->queue_lock);
1552 	blk_requeue_request(q, req);
1553 	sdev->device_busy--;
1554 	if(sdev->device_busy == 0)
1555 		blk_plug_device(q);
1556  out:
1557 	/* must be careful here...if we trigger the ->remove() function
1558 	 * we cannot be holding the q lock */
1559 	spin_unlock_irq(q->queue_lock);
1560 	put_device(&sdev->sdev_gendev);
1561 	spin_lock_irq(q->queue_lock);
1562 }
1563 
1564 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1565 {
1566 	struct device *host_dev;
1567 	u64 bounce_limit = 0xffffffff;
1568 
1569 	if (shost->unchecked_isa_dma)
1570 		return BLK_BOUNCE_ISA;
1571 	/*
1572 	 * Platforms with virtual-DMA translation
1573 	 * hardware have no practical limit.
1574 	 */
1575 	if (!PCI_DMA_BUS_IS_PHYS)
1576 		return BLK_BOUNCE_ANY;
1577 
1578 	host_dev = scsi_get_device(shost);
1579 	if (host_dev && host_dev->dma_mask)
1580 		bounce_limit = *host_dev->dma_mask;
1581 
1582 	return bounce_limit;
1583 }
1584 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1585 
1586 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1587 					 request_fn_proc *request_fn)
1588 {
1589 	struct request_queue *q;
1590 	struct device *dev = shost->shost_gendev.parent;
1591 
1592 	q = blk_init_queue(request_fn, NULL);
1593 	if (!q)
1594 		return NULL;
1595 
1596 	/*
1597 	 * this limit is imposed by hardware restrictions
1598 	 */
1599 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1600 	blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1601 
1602 	blk_queue_max_sectors(q, shost->max_sectors);
1603 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1604 	blk_queue_segment_boundary(q, shost->dma_boundary);
1605 	dma_set_seg_boundary(dev, shost->dma_boundary);
1606 
1607 	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1608 
1609 	/* New queue, no concurrency on queue_flags */
1610 	if (!shost->use_clustering)
1611 		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1612 
1613 	/*
1614 	 * set a reasonable default alignment on word boundaries: the
1615 	 * host and device may alter it using
1616 	 * blk_queue_update_dma_alignment() later.
1617 	 */
1618 	blk_queue_dma_alignment(q, 0x03);
1619 
1620 	return q;
1621 }
1622 EXPORT_SYMBOL(__scsi_alloc_queue);
1623 
1624 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1625 {
1626 	struct request_queue *q;
1627 
1628 	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1629 	if (!q)
1630 		return NULL;
1631 
1632 	blk_queue_prep_rq(q, scsi_prep_fn);
1633 	blk_queue_softirq_done(q, scsi_softirq_done);
1634 	return q;
1635 }
1636 
1637 void scsi_free_queue(struct request_queue *q)
1638 {
1639 	blk_cleanup_queue(q);
1640 }
1641 
1642 /*
1643  * Function:    scsi_block_requests()
1644  *
1645  * Purpose:     Utility function used by low-level drivers to prevent further
1646  *		commands from being queued to the device.
1647  *
1648  * Arguments:   shost       - Host in question
1649  *
1650  * Returns:     Nothing
1651  *
1652  * Lock status: No locks are assumed held.
1653  *
1654  * Notes:       There is no timer nor any other means by which the requests
1655  *		get unblocked other than the low-level driver calling
1656  *		scsi_unblock_requests().
1657  */
1658 void scsi_block_requests(struct Scsi_Host *shost)
1659 {
1660 	shost->host_self_blocked = 1;
1661 }
1662 EXPORT_SYMBOL(scsi_block_requests);
1663 
1664 /*
1665  * Function:    scsi_unblock_requests()
1666  *
1667  * Purpose:     Utility function used by low-level drivers to allow further
1668  *		commands from being queued to the device.
1669  *
1670  * Arguments:   shost       - Host in question
1671  *
1672  * Returns:     Nothing
1673  *
1674  * Lock status: No locks are assumed held.
1675  *
1676  * Notes:       There is no timer nor any other means by which the requests
1677  *		get unblocked other than the low-level driver calling
1678  *		scsi_unblock_requests().
1679  *
1680  *		This is done as an API function so that changes to the
1681  *		internals of the scsi mid-layer won't require wholesale
1682  *		changes to drivers that use this feature.
1683  */
1684 void scsi_unblock_requests(struct Scsi_Host *shost)
1685 {
1686 	shost->host_self_blocked = 0;
1687 	scsi_run_host_queues(shost);
1688 }
1689 EXPORT_SYMBOL(scsi_unblock_requests);
1690 
1691 int __init scsi_init_queue(void)
1692 {
1693 	int i;
1694 
1695 	scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1696 					sizeof(struct scsi_io_context),
1697 					0, 0, NULL);
1698 	if (!scsi_io_context_cache) {
1699 		printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1700 		return -ENOMEM;
1701 	}
1702 
1703 	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1704 					   sizeof(struct scsi_data_buffer),
1705 					   0, 0, NULL);
1706 	if (!scsi_sdb_cache) {
1707 		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1708 		goto cleanup_io_context;
1709 	}
1710 
1711 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1712 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1713 		int size = sgp->size * sizeof(struct scatterlist);
1714 
1715 		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1716 				SLAB_HWCACHE_ALIGN, NULL);
1717 		if (!sgp->slab) {
1718 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1719 					sgp->name);
1720 			goto cleanup_sdb;
1721 		}
1722 
1723 		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1724 						     sgp->slab);
1725 		if (!sgp->pool) {
1726 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1727 					sgp->name);
1728 			goto cleanup_sdb;
1729 		}
1730 	}
1731 
1732 	return 0;
1733 
1734 cleanup_sdb:
1735 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1736 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1737 		if (sgp->pool)
1738 			mempool_destroy(sgp->pool);
1739 		if (sgp->slab)
1740 			kmem_cache_destroy(sgp->slab);
1741 	}
1742 	kmem_cache_destroy(scsi_sdb_cache);
1743 cleanup_io_context:
1744 	kmem_cache_destroy(scsi_io_context_cache);
1745 
1746 	return -ENOMEM;
1747 }
1748 
1749 void scsi_exit_queue(void)
1750 {
1751 	int i;
1752 
1753 	kmem_cache_destroy(scsi_io_context_cache);
1754 	kmem_cache_destroy(scsi_sdb_cache);
1755 
1756 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1757 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1758 		mempool_destroy(sgp->pool);
1759 		kmem_cache_destroy(sgp->slab);
1760 	}
1761 }
1762 
1763 /**
1764  *	scsi_mode_select - issue a mode select
1765  *	@sdev:	SCSI device to be queried
1766  *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1767  *	@sp:	Save page bit (0 == don't save, 1 == save)
1768  *	@modepage: mode page being requested
1769  *	@buffer: request buffer (may not be smaller than eight bytes)
1770  *	@len:	length of request buffer.
1771  *	@timeout: command timeout
1772  *	@retries: number of retries before failing
1773  *	@data: returns a structure abstracting the mode header data
1774  *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1775  *		must be SCSI_SENSE_BUFFERSIZE big.
1776  *
1777  *	Returns zero if successful; negative error number or scsi
1778  *	status on error
1779  *
1780  */
1781 int
1782 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1783 		 unsigned char *buffer, int len, int timeout, int retries,
1784 		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1785 {
1786 	unsigned char cmd[10];
1787 	unsigned char *real_buffer;
1788 	int ret;
1789 
1790 	memset(cmd, 0, sizeof(cmd));
1791 	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1792 
1793 	if (sdev->use_10_for_ms) {
1794 		if (len > 65535)
1795 			return -EINVAL;
1796 		real_buffer = kmalloc(8 + len, GFP_KERNEL);
1797 		if (!real_buffer)
1798 			return -ENOMEM;
1799 		memcpy(real_buffer + 8, buffer, len);
1800 		len += 8;
1801 		real_buffer[0] = 0;
1802 		real_buffer[1] = 0;
1803 		real_buffer[2] = data->medium_type;
1804 		real_buffer[3] = data->device_specific;
1805 		real_buffer[4] = data->longlba ? 0x01 : 0;
1806 		real_buffer[5] = 0;
1807 		real_buffer[6] = data->block_descriptor_length >> 8;
1808 		real_buffer[7] = data->block_descriptor_length;
1809 
1810 		cmd[0] = MODE_SELECT_10;
1811 		cmd[7] = len >> 8;
1812 		cmd[8] = len;
1813 	} else {
1814 		if (len > 255 || data->block_descriptor_length > 255 ||
1815 		    data->longlba)
1816 			return -EINVAL;
1817 
1818 		real_buffer = kmalloc(4 + len, GFP_KERNEL);
1819 		if (!real_buffer)
1820 			return -ENOMEM;
1821 		memcpy(real_buffer + 4, buffer, len);
1822 		len += 4;
1823 		real_buffer[0] = 0;
1824 		real_buffer[1] = data->medium_type;
1825 		real_buffer[2] = data->device_specific;
1826 		real_buffer[3] = data->block_descriptor_length;
1827 
1828 
1829 		cmd[0] = MODE_SELECT;
1830 		cmd[4] = len;
1831 	}
1832 
1833 	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1834 			       sshdr, timeout, retries);
1835 	kfree(real_buffer);
1836 	return ret;
1837 }
1838 EXPORT_SYMBOL_GPL(scsi_mode_select);
1839 
1840 /**
1841  *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1842  *	@sdev:	SCSI device to be queried
1843  *	@dbd:	set if mode sense will allow block descriptors to be returned
1844  *	@modepage: mode page being requested
1845  *	@buffer: request buffer (may not be smaller than eight bytes)
1846  *	@len:	length of request buffer.
1847  *	@timeout: command timeout
1848  *	@retries: number of retries before failing
1849  *	@data: returns a structure abstracting the mode header data
1850  *	@sshdr: place to put sense data (or NULL if no sense to be collected).
1851  *		must be SCSI_SENSE_BUFFERSIZE big.
1852  *
1853  *	Returns zero if unsuccessful, or the header offset (either 4
1854  *	or 8 depending on whether a six or ten byte command was
1855  *	issued) if successful.
1856  */
1857 int
1858 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1859 		  unsigned char *buffer, int len, int timeout, int retries,
1860 		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1861 {
1862 	unsigned char cmd[12];
1863 	int use_10_for_ms;
1864 	int header_length;
1865 	int result;
1866 	struct scsi_sense_hdr my_sshdr;
1867 
1868 	memset(data, 0, sizeof(*data));
1869 	memset(&cmd[0], 0, 12);
1870 	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1871 	cmd[2] = modepage;
1872 
1873 	/* caller might not be interested in sense, but we need it */
1874 	if (!sshdr)
1875 		sshdr = &my_sshdr;
1876 
1877  retry:
1878 	use_10_for_ms = sdev->use_10_for_ms;
1879 
1880 	if (use_10_for_ms) {
1881 		if (len < 8)
1882 			len = 8;
1883 
1884 		cmd[0] = MODE_SENSE_10;
1885 		cmd[8] = len;
1886 		header_length = 8;
1887 	} else {
1888 		if (len < 4)
1889 			len = 4;
1890 
1891 		cmd[0] = MODE_SENSE;
1892 		cmd[4] = len;
1893 		header_length = 4;
1894 	}
1895 
1896 	memset(buffer, 0, len);
1897 
1898 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1899 				  sshdr, timeout, retries);
1900 
1901 	/* This code looks awful: what it's doing is making sure an
1902 	 * ILLEGAL REQUEST sense return identifies the actual command
1903 	 * byte as the problem.  MODE_SENSE commands can return
1904 	 * ILLEGAL REQUEST if the code page isn't supported */
1905 
1906 	if (use_10_for_ms && !scsi_status_is_good(result) &&
1907 	    (driver_byte(result) & DRIVER_SENSE)) {
1908 		if (scsi_sense_valid(sshdr)) {
1909 			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1910 			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1911 				/*
1912 				 * Invalid command operation code
1913 				 */
1914 				sdev->use_10_for_ms = 0;
1915 				goto retry;
1916 			}
1917 		}
1918 	}
1919 
1920 	if(scsi_status_is_good(result)) {
1921 		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1922 			     (modepage == 6 || modepage == 8))) {
1923 			/* Initio breakage? */
1924 			header_length = 0;
1925 			data->length = 13;
1926 			data->medium_type = 0;
1927 			data->device_specific = 0;
1928 			data->longlba = 0;
1929 			data->block_descriptor_length = 0;
1930 		} else if(use_10_for_ms) {
1931 			data->length = buffer[0]*256 + buffer[1] + 2;
1932 			data->medium_type = buffer[2];
1933 			data->device_specific = buffer[3];
1934 			data->longlba = buffer[4] & 0x01;
1935 			data->block_descriptor_length = buffer[6]*256
1936 				+ buffer[7];
1937 		} else {
1938 			data->length = buffer[0] + 1;
1939 			data->medium_type = buffer[1];
1940 			data->device_specific = buffer[2];
1941 			data->block_descriptor_length = buffer[3];
1942 		}
1943 		data->header_length = header_length;
1944 	}
1945 
1946 	return result;
1947 }
1948 EXPORT_SYMBOL(scsi_mode_sense);
1949 
1950 /**
1951  *	scsi_test_unit_ready - test if unit is ready
1952  *	@sdev:	scsi device to change the state of.
1953  *	@timeout: command timeout
1954  *	@retries: number of retries before failing
1955  *	@sshdr_external: Optional pointer to struct scsi_sense_hdr for
1956  *		returning sense. Make sure that this is cleared before passing
1957  *		in.
1958  *
1959  *	Returns zero if unsuccessful or an error if TUR failed.  For
1960  *	removable media, a return of NOT_READY or UNIT_ATTENTION is
1961  *	translated to success, with the ->changed flag updated.
1962  **/
1963 int
1964 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1965 		     struct scsi_sense_hdr *sshdr_external)
1966 {
1967 	char cmd[] = {
1968 		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1969 	};
1970 	struct scsi_sense_hdr *sshdr;
1971 	int result;
1972 
1973 	if (!sshdr_external)
1974 		sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1975 	else
1976 		sshdr = sshdr_external;
1977 
1978 	/* try to eat the UNIT_ATTENTION if there are enough retries */
1979 	do {
1980 		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
1981 					  timeout, retries);
1982 	} while ((driver_byte(result) & DRIVER_SENSE) &&
1983 		 sshdr && sshdr->sense_key == UNIT_ATTENTION &&
1984 		 --retries);
1985 
1986 	if (!sshdr)
1987 		/* could not allocate sense buffer, so can't process it */
1988 		return result;
1989 
1990 	if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1991 
1992 		if ((scsi_sense_valid(sshdr)) &&
1993 		    ((sshdr->sense_key == UNIT_ATTENTION) ||
1994 		     (sshdr->sense_key == NOT_READY))) {
1995 			sdev->changed = 1;
1996 			result = 0;
1997 		}
1998 	}
1999 	if (!sshdr_external)
2000 		kfree(sshdr);
2001 	return result;
2002 }
2003 EXPORT_SYMBOL(scsi_test_unit_ready);
2004 
2005 /**
2006  *	scsi_device_set_state - Take the given device through the device state model.
2007  *	@sdev:	scsi device to change the state of.
2008  *	@state:	state to change to.
2009  *
2010  *	Returns zero if unsuccessful or an error if the requested
2011  *	transition is illegal.
2012  */
2013 int
2014 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2015 {
2016 	enum scsi_device_state oldstate = sdev->sdev_state;
2017 
2018 	if (state == oldstate)
2019 		return 0;
2020 
2021 	switch (state) {
2022 	case SDEV_CREATED:
2023 		/* There are no legal states that come back to
2024 		 * created.  This is the manually initialised start
2025 		 * state */
2026 		goto illegal;
2027 
2028 	case SDEV_RUNNING:
2029 		switch (oldstate) {
2030 		case SDEV_CREATED:
2031 		case SDEV_OFFLINE:
2032 		case SDEV_QUIESCE:
2033 		case SDEV_BLOCK:
2034 			break;
2035 		default:
2036 			goto illegal;
2037 		}
2038 		break;
2039 
2040 	case SDEV_QUIESCE:
2041 		switch (oldstate) {
2042 		case SDEV_RUNNING:
2043 		case SDEV_OFFLINE:
2044 			break;
2045 		default:
2046 			goto illegal;
2047 		}
2048 		break;
2049 
2050 	case SDEV_OFFLINE:
2051 		switch (oldstate) {
2052 		case SDEV_CREATED:
2053 		case SDEV_RUNNING:
2054 		case SDEV_QUIESCE:
2055 		case SDEV_BLOCK:
2056 			break;
2057 		default:
2058 			goto illegal;
2059 		}
2060 		break;
2061 
2062 	case SDEV_BLOCK:
2063 		switch (oldstate) {
2064 		case SDEV_CREATED:
2065 		case SDEV_RUNNING:
2066 			break;
2067 		default:
2068 			goto illegal;
2069 		}
2070 		break;
2071 
2072 	case SDEV_CANCEL:
2073 		switch (oldstate) {
2074 		case SDEV_CREATED:
2075 		case SDEV_RUNNING:
2076 		case SDEV_QUIESCE:
2077 		case SDEV_OFFLINE:
2078 		case SDEV_BLOCK:
2079 			break;
2080 		default:
2081 			goto illegal;
2082 		}
2083 		break;
2084 
2085 	case SDEV_DEL:
2086 		switch (oldstate) {
2087 		case SDEV_CREATED:
2088 		case SDEV_RUNNING:
2089 		case SDEV_OFFLINE:
2090 		case SDEV_CANCEL:
2091 			break;
2092 		default:
2093 			goto illegal;
2094 		}
2095 		break;
2096 
2097 	}
2098 	sdev->sdev_state = state;
2099 	return 0;
2100 
2101  illegal:
2102 	SCSI_LOG_ERROR_RECOVERY(1,
2103 				sdev_printk(KERN_ERR, sdev,
2104 					    "Illegal state transition %s->%s\n",
2105 					    scsi_device_state_name(oldstate),
2106 					    scsi_device_state_name(state))
2107 				);
2108 	return -EINVAL;
2109 }
2110 EXPORT_SYMBOL(scsi_device_set_state);
2111 
2112 /**
2113  * 	sdev_evt_emit - emit a single SCSI device uevent
2114  *	@sdev: associated SCSI device
2115  *	@evt: event to emit
2116  *
2117  *	Send a single uevent (scsi_event) to the associated scsi_device.
2118  */
2119 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2120 {
2121 	int idx = 0;
2122 	char *envp[3];
2123 
2124 	switch (evt->evt_type) {
2125 	case SDEV_EVT_MEDIA_CHANGE:
2126 		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2127 		break;
2128 
2129 	default:
2130 		/* do nothing */
2131 		break;
2132 	}
2133 
2134 	envp[idx++] = NULL;
2135 
2136 	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2137 }
2138 
2139 /**
2140  * 	sdev_evt_thread - send a uevent for each scsi event
2141  *	@work: work struct for scsi_device
2142  *
2143  *	Dispatch queued events to their associated scsi_device kobjects
2144  *	as uevents.
2145  */
2146 void scsi_evt_thread(struct work_struct *work)
2147 {
2148 	struct scsi_device *sdev;
2149 	LIST_HEAD(event_list);
2150 
2151 	sdev = container_of(work, struct scsi_device, event_work);
2152 
2153 	while (1) {
2154 		struct scsi_event *evt;
2155 		struct list_head *this, *tmp;
2156 		unsigned long flags;
2157 
2158 		spin_lock_irqsave(&sdev->list_lock, flags);
2159 		list_splice_init(&sdev->event_list, &event_list);
2160 		spin_unlock_irqrestore(&sdev->list_lock, flags);
2161 
2162 		if (list_empty(&event_list))
2163 			break;
2164 
2165 		list_for_each_safe(this, tmp, &event_list) {
2166 			evt = list_entry(this, struct scsi_event, node);
2167 			list_del(&evt->node);
2168 			scsi_evt_emit(sdev, evt);
2169 			kfree(evt);
2170 		}
2171 	}
2172 }
2173 
2174 /**
2175  * 	sdev_evt_send - send asserted event to uevent thread
2176  *	@sdev: scsi_device event occurred on
2177  *	@evt: event to send
2178  *
2179  *	Assert scsi device event asynchronously.
2180  */
2181 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2182 {
2183 	unsigned long flags;
2184 
2185 #if 0
2186 	/* FIXME: currently this check eliminates all media change events
2187 	 * for polled devices.  Need to update to discriminate between AN
2188 	 * and polled events */
2189 	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2190 		kfree(evt);
2191 		return;
2192 	}
2193 #endif
2194 
2195 	spin_lock_irqsave(&sdev->list_lock, flags);
2196 	list_add_tail(&evt->node, &sdev->event_list);
2197 	schedule_work(&sdev->event_work);
2198 	spin_unlock_irqrestore(&sdev->list_lock, flags);
2199 }
2200 EXPORT_SYMBOL_GPL(sdev_evt_send);
2201 
2202 /**
2203  * 	sdev_evt_alloc - allocate a new scsi event
2204  *	@evt_type: type of event to allocate
2205  *	@gfpflags: GFP flags for allocation
2206  *
2207  *	Allocates and returns a new scsi_event.
2208  */
2209 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2210 				  gfp_t gfpflags)
2211 {
2212 	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2213 	if (!evt)
2214 		return NULL;
2215 
2216 	evt->evt_type = evt_type;
2217 	INIT_LIST_HEAD(&evt->node);
2218 
2219 	/* evt_type-specific initialization, if any */
2220 	switch (evt_type) {
2221 	case SDEV_EVT_MEDIA_CHANGE:
2222 	default:
2223 		/* do nothing */
2224 		break;
2225 	}
2226 
2227 	return evt;
2228 }
2229 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2230 
2231 /**
2232  * 	sdev_evt_send_simple - send asserted event to uevent thread
2233  *	@sdev: scsi_device event occurred on
2234  *	@evt_type: type of event to send
2235  *	@gfpflags: GFP flags for allocation
2236  *
2237  *	Assert scsi device event asynchronously, given an event type.
2238  */
2239 void sdev_evt_send_simple(struct scsi_device *sdev,
2240 			  enum scsi_device_event evt_type, gfp_t gfpflags)
2241 {
2242 	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2243 	if (!evt) {
2244 		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2245 			    evt_type);
2246 		return;
2247 	}
2248 
2249 	sdev_evt_send(sdev, evt);
2250 }
2251 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2252 
2253 /**
2254  *	scsi_device_quiesce - Block user issued commands.
2255  *	@sdev:	scsi device to quiesce.
2256  *
2257  *	This works by trying to transition to the SDEV_QUIESCE state
2258  *	(which must be a legal transition).  When the device is in this
2259  *	state, only special requests will be accepted, all others will
2260  *	be deferred.  Since special requests may also be requeued requests,
2261  *	a successful return doesn't guarantee the device will be
2262  *	totally quiescent.
2263  *
2264  *	Must be called with user context, may sleep.
2265  *
2266  *	Returns zero if unsuccessful or an error if not.
2267  */
2268 int
2269 scsi_device_quiesce(struct scsi_device *sdev)
2270 {
2271 	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2272 	if (err)
2273 		return err;
2274 
2275 	scsi_run_queue(sdev->request_queue);
2276 	while (sdev->device_busy) {
2277 		msleep_interruptible(200);
2278 		scsi_run_queue(sdev->request_queue);
2279 	}
2280 	return 0;
2281 }
2282 EXPORT_SYMBOL(scsi_device_quiesce);
2283 
2284 /**
2285  *	scsi_device_resume - Restart user issued commands to a quiesced device.
2286  *	@sdev:	scsi device to resume.
2287  *
2288  *	Moves the device from quiesced back to running and restarts the
2289  *	queues.
2290  *
2291  *	Must be called with user context, may sleep.
2292  */
2293 void
2294 scsi_device_resume(struct scsi_device *sdev)
2295 {
2296 	if(scsi_device_set_state(sdev, SDEV_RUNNING))
2297 		return;
2298 	scsi_run_queue(sdev->request_queue);
2299 }
2300 EXPORT_SYMBOL(scsi_device_resume);
2301 
2302 static void
2303 device_quiesce_fn(struct scsi_device *sdev, void *data)
2304 {
2305 	scsi_device_quiesce(sdev);
2306 }
2307 
2308 void
2309 scsi_target_quiesce(struct scsi_target *starget)
2310 {
2311 	starget_for_each_device(starget, NULL, device_quiesce_fn);
2312 }
2313 EXPORT_SYMBOL(scsi_target_quiesce);
2314 
2315 static void
2316 device_resume_fn(struct scsi_device *sdev, void *data)
2317 {
2318 	scsi_device_resume(sdev);
2319 }
2320 
2321 void
2322 scsi_target_resume(struct scsi_target *starget)
2323 {
2324 	starget_for_each_device(starget, NULL, device_resume_fn);
2325 }
2326 EXPORT_SYMBOL(scsi_target_resume);
2327 
2328 /**
2329  * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2330  * @sdev:	device to block
2331  *
2332  * Block request made by scsi lld's to temporarily stop all
2333  * scsi commands on the specified device.  Called from interrupt
2334  * or normal process context.
2335  *
2336  * Returns zero if successful or error if not
2337  *
2338  * Notes:
2339  *	This routine transitions the device to the SDEV_BLOCK state
2340  *	(which must be a legal transition).  When the device is in this
2341  *	state, all commands are deferred until the scsi lld reenables
2342  *	the device with scsi_device_unblock or device_block_tmo fires.
2343  *	This routine assumes the host_lock is held on entry.
2344  */
2345 int
2346 scsi_internal_device_block(struct scsi_device *sdev)
2347 {
2348 	struct request_queue *q = sdev->request_queue;
2349 	unsigned long flags;
2350 	int err = 0;
2351 
2352 	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2353 	if (err)
2354 		return err;
2355 
2356 	/*
2357 	 * The device has transitioned to SDEV_BLOCK.  Stop the
2358 	 * block layer from calling the midlayer with this device's
2359 	 * request queue.
2360 	 */
2361 	spin_lock_irqsave(q->queue_lock, flags);
2362 	blk_stop_queue(q);
2363 	spin_unlock_irqrestore(q->queue_lock, flags);
2364 
2365 	return 0;
2366 }
2367 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2368 
2369 /**
2370  * scsi_internal_device_unblock - resume a device after a block request
2371  * @sdev:	device to resume
2372  *
2373  * Called by scsi lld's or the midlayer to restart the device queue
2374  * for the previously suspended scsi device.  Called from interrupt or
2375  * normal process context.
2376  *
2377  * Returns zero if successful or error if not.
2378  *
2379  * Notes:
2380  *	This routine transitions the device to the SDEV_RUNNING state
2381  *	(which must be a legal transition) allowing the midlayer to
2382  *	goose the queue for this device.  This routine assumes the
2383  *	host_lock is held upon entry.
2384  */
2385 int
2386 scsi_internal_device_unblock(struct scsi_device *sdev)
2387 {
2388 	struct request_queue *q = sdev->request_queue;
2389 	int err;
2390 	unsigned long flags;
2391 
2392 	/*
2393 	 * Try to transition the scsi device to SDEV_RUNNING
2394 	 * and goose the device queue if successful.
2395 	 */
2396 	err = scsi_device_set_state(sdev, SDEV_RUNNING);
2397 	if (err)
2398 		return err;
2399 
2400 	spin_lock_irqsave(q->queue_lock, flags);
2401 	blk_start_queue(q);
2402 	spin_unlock_irqrestore(q->queue_lock, flags);
2403 
2404 	return 0;
2405 }
2406 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2407 
2408 static void
2409 device_block(struct scsi_device *sdev, void *data)
2410 {
2411 	scsi_internal_device_block(sdev);
2412 }
2413 
2414 static int
2415 target_block(struct device *dev, void *data)
2416 {
2417 	if (scsi_is_target_device(dev))
2418 		starget_for_each_device(to_scsi_target(dev), NULL,
2419 					device_block);
2420 	return 0;
2421 }
2422 
2423 void
2424 scsi_target_block(struct device *dev)
2425 {
2426 	if (scsi_is_target_device(dev))
2427 		starget_for_each_device(to_scsi_target(dev), NULL,
2428 					device_block);
2429 	else
2430 		device_for_each_child(dev, NULL, target_block);
2431 }
2432 EXPORT_SYMBOL_GPL(scsi_target_block);
2433 
2434 static void
2435 device_unblock(struct scsi_device *sdev, void *data)
2436 {
2437 	scsi_internal_device_unblock(sdev);
2438 }
2439 
2440 static int
2441 target_unblock(struct device *dev, void *data)
2442 {
2443 	if (scsi_is_target_device(dev))
2444 		starget_for_each_device(to_scsi_target(dev), NULL,
2445 					device_unblock);
2446 	return 0;
2447 }
2448 
2449 void
2450 scsi_target_unblock(struct device *dev)
2451 {
2452 	if (scsi_is_target_device(dev))
2453 		starget_for_each_device(to_scsi_target(dev), NULL,
2454 					device_unblock);
2455 	else
2456 		device_for_each_child(dev, NULL, target_unblock);
2457 }
2458 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2459 
2460 /**
2461  * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2462  * @sgl:	scatter-gather list
2463  * @sg_count:	number of segments in sg
2464  * @offset:	offset in bytes into sg, on return offset into the mapped area
2465  * @len:	bytes to map, on return number of bytes mapped
2466  *
2467  * Returns virtual address of the start of the mapped page
2468  */
2469 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2470 			  size_t *offset, size_t *len)
2471 {
2472 	int i;
2473 	size_t sg_len = 0, len_complete = 0;
2474 	struct scatterlist *sg;
2475 	struct page *page;
2476 
2477 	WARN_ON(!irqs_disabled());
2478 
2479 	for_each_sg(sgl, sg, sg_count, i) {
2480 		len_complete = sg_len; /* Complete sg-entries */
2481 		sg_len += sg->length;
2482 		if (sg_len > *offset)
2483 			break;
2484 	}
2485 
2486 	if (unlikely(i == sg_count)) {
2487 		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2488 			"elements %d\n",
2489 		       __FUNCTION__, sg_len, *offset, sg_count);
2490 		WARN_ON(1);
2491 		return NULL;
2492 	}
2493 
2494 	/* Offset starting from the beginning of first page in this sg-entry */
2495 	*offset = *offset - len_complete + sg->offset;
2496 
2497 	/* Assumption: contiguous pages can be accessed as "page + i" */
2498 	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2499 	*offset &= ~PAGE_MASK;
2500 
2501 	/* Bytes in this sg-entry from *offset to the end of the page */
2502 	sg_len = PAGE_SIZE - *offset;
2503 	if (*len > sg_len)
2504 		*len = sg_len;
2505 
2506 	return kmap_atomic(page, KM_BIO_SRC_IRQ);
2507 }
2508 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2509 
2510 /**
2511  * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2512  * @virt:	virtual address to be unmapped
2513  */
2514 void scsi_kunmap_atomic_sg(void *virt)
2515 {
2516 	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2517 }
2518 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2519