1 /* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10 #include <linux/bio.h> 11 #include <linux/bitops.h> 12 #include <linux/blkdev.h> 13 #include <linux/completion.h> 14 #include <linux/kernel.h> 15 #include <linux/export.h> 16 #include <linux/mempool.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/pci.h> 20 #include <linux/delay.h> 21 #include <linux/hardirq.h> 22 #include <linux/scatterlist.h> 23 24 #include <scsi/scsi.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_dbg.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_driver.h> 29 #include <scsi/scsi_eh.h> 30 #include <scsi/scsi_host.h> 31 32 #include "scsi_priv.h" 33 #include "scsi_logging.h" 34 35 36 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 37 #define SG_MEMPOOL_SIZE 2 38 39 struct scsi_host_sg_pool { 40 size_t size; 41 char *name; 42 struct kmem_cache *slab; 43 mempool_t *pool; 44 }; 45 46 #define SP(x) { x, "sgpool-" __stringify(x) } 47 #if (SCSI_MAX_SG_SEGMENTS < 32) 48 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 49 #endif 50 static struct scsi_host_sg_pool scsi_sg_pools[] = { 51 SP(8), 52 SP(16), 53 #if (SCSI_MAX_SG_SEGMENTS > 32) 54 SP(32), 55 #if (SCSI_MAX_SG_SEGMENTS > 64) 56 SP(64), 57 #if (SCSI_MAX_SG_SEGMENTS > 128) 58 SP(128), 59 #if (SCSI_MAX_SG_SEGMENTS > 256) 60 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 61 #endif 62 #endif 63 #endif 64 #endif 65 SP(SCSI_MAX_SG_SEGMENTS) 66 }; 67 #undef SP 68 69 struct kmem_cache *scsi_sdb_cache; 70 71 /* 72 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 73 * not change behaviour from the previous unplug mechanism, experimentation 74 * may prove this needs changing. 75 */ 76 #define SCSI_QUEUE_DELAY 3 77 78 /* 79 * Function: scsi_unprep_request() 80 * 81 * Purpose: Remove all preparation done for a request, including its 82 * associated scsi_cmnd, so that it can be requeued. 83 * 84 * Arguments: req - request to unprepare 85 * 86 * Lock status: Assumed that no locks are held upon entry. 87 * 88 * Returns: Nothing. 89 */ 90 static void scsi_unprep_request(struct request *req) 91 { 92 struct scsi_cmnd *cmd = req->special; 93 94 blk_unprep_request(req); 95 req->special = NULL; 96 97 scsi_put_command(cmd); 98 } 99 100 /** 101 * __scsi_queue_insert - private queue insertion 102 * @cmd: The SCSI command being requeued 103 * @reason: The reason for the requeue 104 * @unbusy: Whether the queue should be unbusied 105 * 106 * This is a private queue insertion. The public interface 107 * scsi_queue_insert() always assumes the queue should be unbusied 108 * because it's always called before the completion. This function is 109 * for a requeue after completion, which should only occur in this 110 * file. 111 */ 112 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 113 { 114 struct Scsi_Host *host = cmd->device->host; 115 struct scsi_device *device = cmd->device; 116 struct scsi_target *starget = scsi_target(device); 117 struct request_queue *q = device->request_queue; 118 unsigned long flags; 119 120 SCSI_LOG_MLQUEUE(1, 121 printk("Inserting command %p into mlqueue\n", cmd)); 122 123 /* 124 * Set the appropriate busy bit for the device/host. 125 * 126 * If the host/device isn't busy, assume that something actually 127 * completed, and that we should be able to queue a command now. 128 * 129 * Note that the prior mid-layer assumption that any host could 130 * always queue at least one command is now broken. The mid-layer 131 * will implement a user specifiable stall (see 132 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 133 * if a command is requeued with no other commands outstanding 134 * either for the device or for the host. 135 */ 136 switch (reason) { 137 case SCSI_MLQUEUE_HOST_BUSY: 138 host->host_blocked = host->max_host_blocked; 139 break; 140 case SCSI_MLQUEUE_DEVICE_BUSY: 141 case SCSI_MLQUEUE_EH_RETRY: 142 device->device_blocked = device->max_device_blocked; 143 break; 144 case SCSI_MLQUEUE_TARGET_BUSY: 145 starget->target_blocked = starget->max_target_blocked; 146 break; 147 } 148 149 /* 150 * Decrement the counters, since these commands are no longer 151 * active on the host/device. 152 */ 153 if (unbusy) 154 scsi_device_unbusy(device); 155 156 /* 157 * Requeue this command. It will go before all other commands 158 * that are already in the queue. Schedule requeue work under 159 * lock such that the kblockd_schedule_work() call happens 160 * before blk_cleanup_queue() finishes. 161 */ 162 spin_lock_irqsave(q->queue_lock, flags); 163 blk_requeue_request(q, cmd->request); 164 kblockd_schedule_work(q, &device->requeue_work); 165 spin_unlock_irqrestore(q->queue_lock, flags); 166 } 167 168 /* 169 * Function: scsi_queue_insert() 170 * 171 * Purpose: Insert a command in the midlevel queue. 172 * 173 * Arguments: cmd - command that we are adding to queue. 174 * reason - why we are inserting command to queue. 175 * 176 * Lock status: Assumed that lock is not held upon entry. 177 * 178 * Returns: Nothing. 179 * 180 * Notes: We do this for one of two cases. Either the host is busy 181 * and it cannot accept any more commands for the time being, 182 * or the device returned QUEUE_FULL and can accept no more 183 * commands. 184 * Notes: This could be called either from an interrupt context or a 185 * normal process context. 186 */ 187 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 188 { 189 __scsi_queue_insert(cmd, reason, 1); 190 } 191 /** 192 * scsi_execute - insert request and wait for the result 193 * @sdev: scsi device 194 * @cmd: scsi command 195 * @data_direction: data direction 196 * @buffer: data buffer 197 * @bufflen: len of buffer 198 * @sense: optional sense buffer 199 * @timeout: request timeout in seconds 200 * @retries: number of times to retry request 201 * @flags: or into request flags; 202 * @resid: optional residual length 203 * 204 * returns the req->errors value which is the scsi_cmnd result 205 * field. 206 */ 207 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 208 int data_direction, void *buffer, unsigned bufflen, 209 unsigned char *sense, int timeout, int retries, int flags, 210 int *resid) 211 { 212 struct request *req; 213 int write = (data_direction == DMA_TO_DEVICE); 214 int ret = DRIVER_ERROR << 24; 215 216 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 217 if (!req) 218 return ret; 219 220 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 221 buffer, bufflen, __GFP_WAIT)) 222 goto out; 223 224 req->cmd_len = COMMAND_SIZE(cmd[0]); 225 memcpy(req->cmd, cmd, req->cmd_len); 226 req->sense = sense; 227 req->sense_len = 0; 228 req->retries = retries; 229 req->timeout = timeout; 230 req->cmd_type = REQ_TYPE_BLOCK_PC; 231 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 232 233 /* 234 * head injection *required* here otherwise quiesce won't work 235 */ 236 blk_execute_rq(req->q, NULL, req, 1); 237 238 /* 239 * Some devices (USB mass-storage in particular) may transfer 240 * garbage data together with a residue indicating that the data 241 * is invalid. Prevent the garbage from being misinterpreted 242 * and prevent security leaks by zeroing out the excess data. 243 */ 244 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 245 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 246 247 if (resid) 248 *resid = req->resid_len; 249 ret = req->errors; 250 out: 251 blk_put_request(req); 252 253 return ret; 254 } 255 EXPORT_SYMBOL(scsi_execute); 256 257 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 258 int data_direction, void *buffer, unsigned bufflen, 259 struct scsi_sense_hdr *sshdr, int timeout, int retries, 260 int *resid, int flags) 261 { 262 char *sense = NULL; 263 int result; 264 265 if (sshdr) { 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 267 if (!sense) 268 return DRIVER_ERROR << 24; 269 } 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 271 sense, timeout, retries, flags, resid); 272 if (sshdr) 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 274 275 kfree(sense); 276 return result; 277 } 278 EXPORT_SYMBOL(scsi_execute_req_flags); 279 280 /* 281 * Function: scsi_init_cmd_errh() 282 * 283 * Purpose: Initialize cmd fields related to error handling. 284 * 285 * Arguments: cmd - command that is ready to be queued. 286 * 287 * Notes: This function has the job of initializing a number of 288 * fields related to error handling. Typically this will 289 * be called once for each command, as required. 290 */ 291 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 292 { 293 cmd->serial_number = 0; 294 scsi_set_resid(cmd, 0); 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 296 if (cmd->cmd_len == 0) 297 cmd->cmd_len = scsi_command_size(cmd->cmnd); 298 } 299 300 void scsi_device_unbusy(struct scsi_device *sdev) 301 { 302 struct Scsi_Host *shost = sdev->host; 303 struct scsi_target *starget = scsi_target(sdev); 304 unsigned long flags; 305 306 spin_lock_irqsave(shost->host_lock, flags); 307 shost->host_busy--; 308 starget->target_busy--; 309 if (unlikely(scsi_host_in_recovery(shost) && 310 (shost->host_failed || shost->host_eh_scheduled))) 311 scsi_eh_wakeup(shost); 312 spin_unlock(shost->host_lock); 313 spin_lock(sdev->request_queue->queue_lock); 314 sdev->device_busy--; 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 316 } 317 318 /* 319 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 320 * and call blk_run_queue for all the scsi_devices on the target - 321 * including current_sdev first. 322 * 323 * Called with *no* scsi locks held. 324 */ 325 static void scsi_single_lun_run(struct scsi_device *current_sdev) 326 { 327 struct Scsi_Host *shost = current_sdev->host; 328 struct scsi_device *sdev, *tmp; 329 struct scsi_target *starget = scsi_target(current_sdev); 330 unsigned long flags; 331 332 spin_lock_irqsave(shost->host_lock, flags); 333 starget->starget_sdev_user = NULL; 334 spin_unlock_irqrestore(shost->host_lock, flags); 335 336 /* 337 * Call blk_run_queue for all LUNs on the target, starting with 338 * current_sdev. We race with others (to set starget_sdev_user), 339 * but in most cases, we will be first. Ideally, each LU on the 340 * target would get some limited time or requests on the target. 341 */ 342 blk_run_queue(current_sdev->request_queue); 343 344 spin_lock_irqsave(shost->host_lock, flags); 345 if (starget->starget_sdev_user) 346 goto out; 347 list_for_each_entry_safe(sdev, tmp, &starget->devices, 348 same_target_siblings) { 349 if (sdev == current_sdev) 350 continue; 351 if (scsi_device_get(sdev)) 352 continue; 353 354 spin_unlock_irqrestore(shost->host_lock, flags); 355 blk_run_queue(sdev->request_queue); 356 spin_lock_irqsave(shost->host_lock, flags); 357 358 scsi_device_put(sdev); 359 } 360 out: 361 spin_unlock_irqrestore(shost->host_lock, flags); 362 } 363 364 static inline int scsi_device_is_busy(struct scsi_device *sdev) 365 { 366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) 367 return 1; 368 369 return 0; 370 } 371 372 static inline int scsi_target_is_busy(struct scsi_target *starget) 373 { 374 return ((starget->can_queue > 0 && 375 starget->target_busy >= starget->can_queue) || 376 starget->target_blocked); 377 } 378 379 static inline int scsi_host_is_busy(struct Scsi_Host *shost) 380 { 381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 382 shost->host_blocked || shost->host_self_blocked) 383 return 1; 384 385 return 0; 386 } 387 388 /* 389 * Function: scsi_run_queue() 390 * 391 * Purpose: Select a proper request queue to serve next 392 * 393 * Arguments: q - last request's queue 394 * 395 * Returns: Nothing 396 * 397 * Notes: The previous command was completely finished, start 398 * a new one if possible. 399 */ 400 static void scsi_run_queue(struct request_queue *q) 401 { 402 struct scsi_device *sdev = q->queuedata; 403 struct Scsi_Host *shost; 404 LIST_HEAD(starved_list); 405 unsigned long flags; 406 407 shost = sdev->host; 408 if (scsi_target(sdev)->single_lun) 409 scsi_single_lun_run(sdev); 410 411 spin_lock_irqsave(shost->host_lock, flags); 412 list_splice_init(&shost->starved_list, &starved_list); 413 414 while (!list_empty(&starved_list)) { 415 struct request_queue *slq; 416 417 /* 418 * As long as shost is accepting commands and we have 419 * starved queues, call blk_run_queue. scsi_request_fn 420 * drops the queue_lock and can add us back to the 421 * starved_list. 422 * 423 * host_lock protects the starved_list and starved_entry. 424 * scsi_request_fn must get the host_lock before checking 425 * or modifying starved_list or starved_entry. 426 */ 427 if (scsi_host_is_busy(shost)) 428 break; 429 430 sdev = list_entry(starved_list.next, 431 struct scsi_device, starved_entry); 432 list_del_init(&sdev->starved_entry); 433 if (scsi_target_is_busy(scsi_target(sdev))) { 434 list_move_tail(&sdev->starved_entry, 435 &shost->starved_list); 436 continue; 437 } 438 439 /* 440 * Once we drop the host lock, a racing scsi_remove_device() 441 * call may remove the sdev from the starved list and destroy 442 * it and the queue. Mitigate by taking a reference to the 443 * queue and never touching the sdev again after we drop the 444 * host lock. Note: if __scsi_remove_device() invokes 445 * blk_cleanup_queue() before the queue is run from this 446 * function then blk_run_queue() will return immediately since 447 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. 448 */ 449 slq = sdev->request_queue; 450 if (!blk_get_queue(slq)) 451 continue; 452 spin_unlock_irqrestore(shost->host_lock, flags); 453 454 blk_run_queue(slq); 455 blk_put_queue(slq); 456 457 spin_lock_irqsave(shost->host_lock, flags); 458 } 459 /* put any unprocessed entries back */ 460 list_splice(&starved_list, &shost->starved_list); 461 spin_unlock_irqrestore(shost->host_lock, flags); 462 463 blk_run_queue(q); 464 } 465 466 void scsi_requeue_run_queue(struct work_struct *work) 467 { 468 struct scsi_device *sdev; 469 struct request_queue *q; 470 471 sdev = container_of(work, struct scsi_device, requeue_work); 472 q = sdev->request_queue; 473 scsi_run_queue(q); 474 } 475 476 /* 477 * Function: scsi_requeue_command() 478 * 479 * Purpose: Handle post-processing of completed commands. 480 * 481 * Arguments: q - queue to operate on 482 * cmd - command that may need to be requeued. 483 * 484 * Returns: Nothing 485 * 486 * Notes: After command completion, there may be blocks left 487 * over which weren't finished by the previous command 488 * this can be for a number of reasons - the main one is 489 * I/O errors in the middle of the request, in which case 490 * we need to request the blocks that come after the bad 491 * sector. 492 * Notes: Upon return, cmd is a stale pointer. 493 */ 494 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 495 { 496 struct scsi_device *sdev = cmd->device; 497 struct request *req = cmd->request; 498 unsigned long flags; 499 500 /* 501 * We need to hold a reference on the device to avoid the queue being 502 * killed after the unlock and before scsi_run_queue is invoked which 503 * may happen because scsi_unprep_request() puts the command which 504 * releases its reference on the device. 505 */ 506 get_device(&sdev->sdev_gendev); 507 508 spin_lock_irqsave(q->queue_lock, flags); 509 scsi_unprep_request(req); 510 blk_requeue_request(q, req); 511 spin_unlock_irqrestore(q->queue_lock, flags); 512 513 scsi_run_queue(q); 514 515 put_device(&sdev->sdev_gendev); 516 } 517 518 void scsi_next_command(struct scsi_cmnd *cmd) 519 { 520 struct scsi_device *sdev = cmd->device; 521 struct request_queue *q = sdev->request_queue; 522 523 /* need to hold a reference on the device before we let go of the cmd */ 524 get_device(&sdev->sdev_gendev); 525 526 scsi_put_command(cmd); 527 scsi_run_queue(q); 528 529 /* ok to remove device now */ 530 put_device(&sdev->sdev_gendev); 531 } 532 533 void scsi_run_host_queues(struct Scsi_Host *shost) 534 { 535 struct scsi_device *sdev; 536 537 shost_for_each_device(sdev, shost) 538 scsi_run_queue(sdev->request_queue); 539 } 540 541 static void __scsi_release_buffers(struct scsi_cmnd *, int); 542 543 /* 544 * Function: scsi_end_request() 545 * 546 * Purpose: Post-processing of completed commands (usually invoked at end 547 * of upper level post-processing and scsi_io_completion). 548 * 549 * Arguments: cmd - command that is complete. 550 * error - 0 if I/O indicates success, < 0 for I/O error. 551 * bytes - number of bytes of completed I/O 552 * requeue - indicates whether we should requeue leftovers. 553 * 554 * Lock status: Assumed that lock is not held upon entry. 555 * 556 * Returns: cmd if requeue required, NULL otherwise. 557 * 558 * Notes: This is called for block device requests in order to 559 * mark some number of sectors as complete. 560 * 561 * We are guaranteeing that the request queue will be goosed 562 * at some point during this call. 563 * Notes: If cmd was requeued, upon return it will be a stale pointer. 564 */ 565 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 566 int bytes, int requeue) 567 { 568 struct request_queue *q = cmd->device->request_queue; 569 struct request *req = cmd->request; 570 571 /* 572 * If there are blocks left over at the end, set up the command 573 * to queue the remainder of them. 574 */ 575 if (blk_end_request(req, error, bytes)) { 576 /* kill remainder if no retrys */ 577 if (error && scsi_noretry_cmd(cmd)) 578 blk_end_request_all(req, error); 579 else { 580 if (requeue) { 581 /* 582 * Bleah. Leftovers again. Stick the 583 * leftovers in the front of the 584 * queue, and goose the queue again. 585 */ 586 scsi_release_buffers(cmd); 587 scsi_requeue_command(q, cmd); 588 cmd = NULL; 589 } 590 return cmd; 591 } 592 } 593 594 /* 595 * This will goose the queue request function at the end, so we don't 596 * need to worry about launching another command. 597 */ 598 __scsi_release_buffers(cmd, 0); 599 scsi_next_command(cmd); 600 return NULL; 601 } 602 603 static inline unsigned int scsi_sgtable_index(unsigned short nents) 604 { 605 unsigned int index; 606 607 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 608 609 if (nents <= 8) 610 index = 0; 611 else 612 index = get_count_order(nents) - 3; 613 614 return index; 615 } 616 617 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 618 { 619 struct scsi_host_sg_pool *sgp; 620 621 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 622 mempool_free(sgl, sgp->pool); 623 } 624 625 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 626 { 627 struct scsi_host_sg_pool *sgp; 628 629 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 630 return mempool_alloc(sgp->pool, gfp_mask); 631 } 632 633 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 634 gfp_t gfp_mask) 635 { 636 int ret; 637 638 BUG_ON(!nents); 639 640 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 641 gfp_mask, scsi_sg_alloc); 642 if (unlikely(ret)) 643 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 644 scsi_sg_free); 645 646 return ret; 647 } 648 649 static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 650 { 651 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 652 } 653 654 static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) 655 { 656 657 if (cmd->sdb.table.nents) 658 scsi_free_sgtable(&cmd->sdb); 659 660 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 661 662 if (do_bidi_check && scsi_bidi_cmnd(cmd)) { 663 struct scsi_data_buffer *bidi_sdb = 664 cmd->request->next_rq->special; 665 scsi_free_sgtable(bidi_sdb); 666 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 667 cmd->request->next_rq->special = NULL; 668 } 669 670 if (scsi_prot_sg_count(cmd)) 671 scsi_free_sgtable(cmd->prot_sdb); 672 } 673 674 /* 675 * Function: scsi_release_buffers() 676 * 677 * Purpose: Completion processing for block device I/O requests. 678 * 679 * Arguments: cmd - command that we are bailing. 680 * 681 * Lock status: Assumed that no lock is held upon entry. 682 * 683 * Returns: Nothing 684 * 685 * Notes: In the event that an upper level driver rejects a 686 * command, we must release resources allocated during 687 * the __init_io() function. Primarily this would involve 688 * the scatter-gather table, and potentially any bounce 689 * buffers. 690 */ 691 void scsi_release_buffers(struct scsi_cmnd *cmd) 692 { 693 __scsi_release_buffers(cmd, 1); 694 } 695 EXPORT_SYMBOL(scsi_release_buffers); 696 697 /** 698 * __scsi_error_from_host_byte - translate SCSI error code into errno 699 * @cmd: SCSI command (unused) 700 * @result: scsi error code 701 * 702 * Translate SCSI error code into standard UNIX errno. 703 * Return values: 704 * -ENOLINK temporary transport failure 705 * -EREMOTEIO permanent target failure, do not retry 706 * -EBADE permanent nexus failure, retry on other path 707 * -ENOSPC No write space available 708 * -ENODATA Medium error 709 * -EIO unspecified I/O error 710 */ 711 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) 712 { 713 int error = 0; 714 715 switch(host_byte(result)) { 716 case DID_TRANSPORT_FAILFAST: 717 error = -ENOLINK; 718 break; 719 case DID_TARGET_FAILURE: 720 set_host_byte(cmd, DID_OK); 721 error = -EREMOTEIO; 722 break; 723 case DID_NEXUS_FAILURE: 724 set_host_byte(cmd, DID_OK); 725 error = -EBADE; 726 break; 727 case DID_ALLOC_FAILURE: 728 set_host_byte(cmd, DID_OK); 729 error = -ENOSPC; 730 break; 731 case DID_MEDIUM_ERROR: 732 set_host_byte(cmd, DID_OK); 733 error = -ENODATA; 734 break; 735 default: 736 error = -EIO; 737 break; 738 } 739 740 return error; 741 } 742 743 /* 744 * Function: scsi_io_completion() 745 * 746 * Purpose: Completion processing for block device I/O requests. 747 * 748 * Arguments: cmd - command that is finished. 749 * 750 * Lock status: Assumed that no lock is held upon entry. 751 * 752 * Returns: Nothing 753 * 754 * Notes: This function is matched in terms of capabilities to 755 * the function that created the scatter-gather list. 756 * In other words, if there are no bounce buffers 757 * (the normal case for most drivers), we don't need 758 * the logic to deal with cleaning up afterwards. 759 * 760 * We must call scsi_end_request(). This will finish off 761 * the specified number of sectors. If we are done, the 762 * command block will be released and the queue function 763 * will be goosed. If we are not done then we have to 764 * figure out what to do next: 765 * 766 * a) We can call scsi_requeue_command(). The request 767 * will be unprepared and put back on the queue. Then 768 * a new command will be created for it. This should 769 * be used if we made forward progress, or if we want 770 * to switch from READ(10) to READ(6) for example. 771 * 772 * b) We can call scsi_queue_insert(). The request will 773 * be put back on the queue and retried using the same 774 * command as before, possibly after a delay. 775 * 776 * c) We can call blk_end_request() with -EIO to fail 777 * the remainder of the request. 778 */ 779 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 780 { 781 int result = cmd->result; 782 struct request_queue *q = cmd->device->request_queue; 783 struct request *req = cmd->request; 784 int error = 0; 785 struct scsi_sense_hdr sshdr; 786 int sense_valid = 0; 787 int sense_deferred = 0; 788 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 789 ACTION_DELAYED_RETRY} action; 790 char *description = NULL; 791 792 if (result) { 793 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 794 if (sense_valid) 795 sense_deferred = scsi_sense_is_deferred(&sshdr); 796 } 797 798 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 799 if (result) { 800 if (sense_valid && req->sense) { 801 /* 802 * SG_IO wants current and deferred errors 803 */ 804 int len = 8 + cmd->sense_buffer[7]; 805 806 if (len > SCSI_SENSE_BUFFERSIZE) 807 len = SCSI_SENSE_BUFFERSIZE; 808 memcpy(req->sense, cmd->sense_buffer, len); 809 req->sense_len = len; 810 } 811 if (!sense_deferred) 812 error = __scsi_error_from_host_byte(cmd, result); 813 } 814 /* 815 * __scsi_error_from_host_byte may have reset the host_byte 816 */ 817 req->errors = cmd->result; 818 819 req->resid_len = scsi_get_resid(cmd); 820 821 if (scsi_bidi_cmnd(cmd)) { 822 /* 823 * Bidi commands Must be complete as a whole, 824 * both sides at once. 825 */ 826 req->next_rq->resid_len = scsi_in(cmd)->resid; 827 828 scsi_release_buffers(cmd); 829 blk_end_request_all(req, 0); 830 831 scsi_next_command(cmd); 832 return; 833 } 834 } 835 836 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 837 BUG_ON(blk_bidi_rq(req)); 838 839 /* 840 * Next deal with any sectors which we were able to correctly 841 * handle. 842 */ 843 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " 844 "%d bytes done.\n", 845 blk_rq_sectors(req), good_bytes)); 846 847 /* 848 * Recovered errors need reporting, but they're always treated 849 * as success, so fiddle the result code here. For BLOCK_PC 850 * we already took a copy of the original into rq->errors which 851 * is what gets returned to the user 852 */ 853 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 854 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 855 * print since caller wants ATA registers. Only occurs on 856 * SCSI ATA PASS_THROUGH commands when CK_COND=1 857 */ 858 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 859 ; 860 else if (!(req->cmd_flags & REQ_QUIET)) 861 scsi_print_sense("", cmd); 862 result = 0; 863 /* BLOCK_PC may have set error */ 864 error = 0; 865 } 866 867 /* 868 * A number of bytes were successfully read. If there 869 * are leftovers and there is some kind of error 870 * (result != 0), retry the rest. 871 */ 872 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 873 return; 874 875 error = __scsi_error_from_host_byte(cmd, result); 876 877 if (host_byte(result) == DID_RESET) { 878 /* Third party bus reset or reset for error recovery 879 * reasons. Just retry the command and see what 880 * happens. 881 */ 882 action = ACTION_RETRY; 883 } else if (sense_valid && !sense_deferred) { 884 switch (sshdr.sense_key) { 885 case UNIT_ATTENTION: 886 if (cmd->device->removable) { 887 /* Detected disc change. Set a bit 888 * and quietly refuse further access. 889 */ 890 cmd->device->changed = 1; 891 description = "Media Changed"; 892 action = ACTION_FAIL; 893 } else { 894 /* Must have been a power glitch, or a 895 * bus reset. Could not have been a 896 * media change, so we just retry the 897 * command and see what happens. 898 */ 899 action = ACTION_RETRY; 900 } 901 break; 902 case ILLEGAL_REQUEST: 903 /* If we had an ILLEGAL REQUEST returned, then 904 * we may have performed an unsupported 905 * command. The only thing this should be 906 * would be a ten byte read where only a six 907 * byte read was supported. Also, on a system 908 * where READ CAPACITY failed, we may have 909 * read past the end of the disk. 910 */ 911 if ((cmd->device->use_10_for_rw && 912 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 913 (cmd->cmnd[0] == READ_10 || 914 cmd->cmnd[0] == WRITE_10)) { 915 /* This will issue a new 6-byte command. */ 916 cmd->device->use_10_for_rw = 0; 917 action = ACTION_REPREP; 918 } else if (sshdr.asc == 0x10) /* DIX */ { 919 description = "Host Data Integrity Failure"; 920 action = ACTION_FAIL; 921 error = -EILSEQ; 922 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ 923 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { 924 switch (cmd->cmnd[0]) { 925 case UNMAP: 926 description = "Discard failure"; 927 break; 928 case WRITE_SAME: 929 case WRITE_SAME_16: 930 if (cmd->cmnd[1] & 0x8) 931 description = "Discard failure"; 932 else 933 description = 934 "Write same failure"; 935 break; 936 default: 937 description = "Invalid command failure"; 938 break; 939 } 940 action = ACTION_FAIL; 941 error = -EREMOTEIO; 942 } else 943 action = ACTION_FAIL; 944 break; 945 case ABORTED_COMMAND: 946 action = ACTION_FAIL; 947 if (sshdr.asc == 0x10) { /* DIF */ 948 description = "Target Data Integrity Failure"; 949 error = -EILSEQ; 950 } 951 break; 952 case NOT_READY: 953 /* If the device is in the process of becoming 954 * ready, or has a temporary blockage, retry. 955 */ 956 if (sshdr.asc == 0x04) { 957 switch (sshdr.ascq) { 958 case 0x01: /* becoming ready */ 959 case 0x04: /* format in progress */ 960 case 0x05: /* rebuild in progress */ 961 case 0x06: /* recalculation in progress */ 962 case 0x07: /* operation in progress */ 963 case 0x08: /* Long write in progress */ 964 case 0x09: /* self test in progress */ 965 case 0x14: /* space allocation in progress */ 966 action = ACTION_DELAYED_RETRY; 967 break; 968 default: 969 description = "Device not ready"; 970 action = ACTION_FAIL; 971 break; 972 } 973 } else { 974 description = "Device not ready"; 975 action = ACTION_FAIL; 976 } 977 break; 978 case VOLUME_OVERFLOW: 979 /* See SSC3rXX or current. */ 980 action = ACTION_FAIL; 981 break; 982 default: 983 description = "Unhandled sense code"; 984 action = ACTION_FAIL; 985 break; 986 } 987 } else { 988 description = "Unhandled error code"; 989 action = ACTION_FAIL; 990 } 991 992 switch (action) { 993 case ACTION_FAIL: 994 /* Give up and fail the remainder of the request */ 995 scsi_release_buffers(cmd); 996 if (!(req->cmd_flags & REQ_QUIET)) { 997 if (description) 998 scmd_printk(KERN_INFO, cmd, "%s\n", 999 description); 1000 scsi_print_result(cmd); 1001 if (driver_byte(result) & DRIVER_SENSE) 1002 scsi_print_sense("", cmd); 1003 scsi_print_command(cmd); 1004 } 1005 if (blk_end_request_err(req, error)) 1006 scsi_requeue_command(q, cmd); 1007 else 1008 scsi_next_command(cmd); 1009 break; 1010 case ACTION_REPREP: 1011 /* Unprep the request and put it back at the head of the queue. 1012 * A new command will be prepared and issued. 1013 */ 1014 scsi_release_buffers(cmd); 1015 scsi_requeue_command(q, cmd); 1016 break; 1017 case ACTION_RETRY: 1018 /* Retry the same command immediately */ 1019 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 1020 break; 1021 case ACTION_DELAYED_RETRY: 1022 /* Retry the same command after a delay */ 1023 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 1024 break; 1025 } 1026 } 1027 1028 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1029 gfp_t gfp_mask) 1030 { 1031 int count; 1032 1033 /* 1034 * If sg table allocation fails, requeue request later. 1035 */ 1036 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1037 gfp_mask))) { 1038 return BLKPREP_DEFER; 1039 } 1040 1041 req->buffer = NULL; 1042 1043 /* 1044 * Next, walk the list, and fill in the addresses and sizes of 1045 * each segment. 1046 */ 1047 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1048 BUG_ON(count > sdb->table.nents); 1049 sdb->table.nents = count; 1050 sdb->length = blk_rq_bytes(req); 1051 return BLKPREP_OK; 1052 } 1053 1054 /* 1055 * Function: scsi_init_io() 1056 * 1057 * Purpose: SCSI I/O initialize function. 1058 * 1059 * Arguments: cmd - Command descriptor we wish to initialize 1060 * 1061 * Returns: 0 on success 1062 * BLKPREP_DEFER if the failure is retryable 1063 * BLKPREP_KILL if the failure is fatal 1064 */ 1065 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 1066 { 1067 struct request *rq = cmd->request; 1068 1069 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); 1070 if (error) 1071 goto err_exit; 1072 1073 if (blk_bidi_rq(rq)) { 1074 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 1075 scsi_sdb_cache, GFP_ATOMIC); 1076 if (!bidi_sdb) { 1077 error = BLKPREP_DEFER; 1078 goto err_exit; 1079 } 1080 1081 rq->next_rq->special = bidi_sdb; 1082 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC); 1083 if (error) 1084 goto err_exit; 1085 } 1086 1087 if (blk_integrity_rq(rq)) { 1088 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1089 int ivecs, count; 1090 1091 BUG_ON(prot_sdb == NULL); 1092 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1093 1094 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 1095 error = BLKPREP_DEFER; 1096 goto err_exit; 1097 } 1098 1099 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1100 prot_sdb->table.sgl); 1101 BUG_ON(unlikely(count > ivecs)); 1102 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1103 1104 cmd->prot_sdb = prot_sdb; 1105 cmd->prot_sdb->table.nents = count; 1106 } 1107 1108 return BLKPREP_OK ; 1109 1110 err_exit: 1111 scsi_release_buffers(cmd); 1112 cmd->request->special = NULL; 1113 scsi_put_command(cmd); 1114 return error; 1115 } 1116 EXPORT_SYMBOL(scsi_init_io); 1117 1118 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1119 struct request *req) 1120 { 1121 struct scsi_cmnd *cmd; 1122 1123 if (!req->special) { 1124 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1125 if (unlikely(!cmd)) 1126 return NULL; 1127 req->special = cmd; 1128 } else { 1129 cmd = req->special; 1130 } 1131 1132 /* pull a tag out of the request if we have one */ 1133 cmd->tag = req->tag; 1134 cmd->request = req; 1135 1136 cmd->cmnd = req->cmd; 1137 cmd->prot_op = SCSI_PROT_NORMAL; 1138 1139 return cmd; 1140 } 1141 1142 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1143 { 1144 struct scsi_cmnd *cmd; 1145 int ret = scsi_prep_state_check(sdev, req); 1146 1147 if (ret != BLKPREP_OK) 1148 return ret; 1149 1150 cmd = scsi_get_cmd_from_req(sdev, req); 1151 if (unlikely(!cmd)) 1152 return BLKPREP_DEFER; 1153 1154 /* 1155 * BLOCK_PC requests may transfer data, in which case they must 1156 * a bio attached to them. Or they might contain a SCSI command 1157 * that does not transfer data, in which case they may optionally 1158 * submit a request without an attached bio. 1159 */ 1160 if (req->bio) { 1161 int ret; 1162 1163 BUG_ON(!req->nr_phys_segments); 1164 1165 ret = scsi_init_io(cmd, GFP_ATOMIC); 1166 if (unlikely(ret)) 1167 return ret; 1168 } else { 1169 BUG_ON(blk_rq_bytes(req)); 1170 1171 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1172 req->buffer = NULL; 1173 } 1174 1175 cmd->cmd_len = req->cmd_len; 1176 if (!blk_rq_bytes(req)) 1177 cmd->sc_data_direction = DMA_NONE; 1178 else if (rq_data_dir(req) == WRITE) 1179 cmd->sc_data_direction = DMA_TO_DEVICE; 1180 else 1181 cmd->sc_data_direction = DMA_FROM_DEVICE; 1182 1183 cmd->transfersize = blk_rq_bytes(req); 1184 cmd->allowed = req->retries; 1185 return BLKPREP_OK; 1186 } 1187 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1188 1189 /* 1190 * Setup a REQ_TYPE_FS command. These are simple read/write request 1191 * from filesystems that still need to be translated to SCSI CDBs from 1192 * the ULD. 1193 */ 1194 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1195 { 1196 struct scsi_cmnd *cmd; 1197 int ret = scsi_prep_state_check(sdev, req); 1198 1199 if (ret != BLKPREP_OK) 1200 return ret; 1201 1202 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1203 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1204 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1205 if (ret != BLKPREP_OK) 1206 return ret; 1207 } 1208 1209 /* 1210 * Filesystem requests must transfer data. 1211 */ 1212 BUG_ON(!req->nr_phys_segments); 1213 1214 cmd = scsi_get_cmd_from_req(sdev, req); 1215 if (unlikely(!cmd)) 1216 return BLKPREP_DEFER; 1217 1218 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1219 return scsi_init_io(cmd, GFP_ATOMIC); 1220 } 1221 EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1222 1223 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1224 { 1225 int ret = BLKPREP_OK; 1226 1227 /* 1228 * If the device is not in running state we will reject some 1229 * or all commands. 1230 */ 1231 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1232 switch (sdev->sdev_state) { 1233 case SDEV_OFFLINE: 1234 case SDEV_TRANSPORT_OFFLINE: 1235 /* 1236 * If the device is offline we refuse to process any 1237 * commands. The device must be brought online 1238 * before trying any recovery commands. 1239 */ 1240 sdev_printk(KERN_ERR, sdev, 1241 "rejecting I/O to offline device\n"); 1242 ret = BLKPREP_KILL; 1243 break; 1244 case SDEV_DEL: 1245 /* 1246 * If the device is fully deleted, we refuse to 1247 * process any commands as well. 1248 */ 1249 sdev_printk(KERN_ERR, sdev, 1250 "rejecting I/O to dead device\n"); 1251 ret = BLKPREP_KILL; 1252 break; 1253 case SDEV_QUIESCE: 1254 case SDEV_BLOCK: 1255 case SDEV_CREATED_BLOCK: 1256 /* 1257 * If the devices is blocked we defer normal commands. 1258 */ 1259 if (!(req->cmd_flags & REQ_PREEMPT)) 1260 ret = BLKPREP_DEFER; 1261 break; 1262 default: 1263 /* 1264 * For any other not fully online state we only allow 1265 * special commands. In particular any user initiated 1266 * command is not allowed. 1267 */ 1268 if (!(req->cmd_flags & REQ_PREEMPT)) 1269 ret = BLKPREP_KILL; 1270 break; 1271 } 1272 } 1273 return ret; 1274 } 1275 EXPORT_SYMBOL(scsi_prep_state_check); 1276 1277 int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1278 { 1279 struct scsi_device *sdev = q->queuedata; 1280 1281 switch (ret) { 1282 case BLKPREP_KILL: 1283 req->errors = DID_NO_CONNECT << 16; 1284 /* release the command and kill it */ 1285 if (req->special) { 1286 struct scsi_cmnd *cmd = req->special; 1287 scsi_release_buffers(cmd); 1288 scsi_put_command(cmd); 1289 req->special = NULL; 1290 } 1291 break; 1292 case BLKPREP_DEFER: 1293 /* 1294 * If we defer, the blk_peek_request() returns NULL, but the 1295 * queue must be restarted, so we schedule a callback to happen 1296 * shortly. 1297 */ 1298 if (sdev->device_busy == 0) 1299 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1300 break; 1301 default: 1302 req->cmd_flags |= REQ_DONTPREP; 1303 } 1304 1305 return ret; 1306 } 1307 EXPORT_SYMBOL(scsi_prep_return); 1308 1309 int scsi_prep_fn(struct request_queue *q, struct request *req) 1310 { 1311 struct scsi_device *sdev = q->queuedata; 1312 int ret = BLKPREP_KILL; 1313 1314 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1315 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1316 return scsi_prep_return(q, req, ret); 1317 } 1318 EXPORT_SYMBOL(scsi_prep_fn); 1319 1320 /* 1321 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1322 * return 0. 1323 * 1324 * Called with the queue_lock held. 1325 */ 1326 static inline int scsi_dev_queue_ready(struct request_queue *q, 1327 struct scsi_device *sdev) 1328 { 1329 if (sdev->device_busy == 0 && sdev->device_blocked) { 1330 /* 1331 * unblock after device_blocked iterates to zero 1332 */ 1333 if (--sdev->device_blocked == 0) { 1334 SCSI_LOG_MLQUEUE(3, 1335 sdev_printk(KERN_INFO, sdev, 1336 "unblocking device at zero depth\n")); 1337 } else { 1338 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1339 return 0; 1340 } 1341 } 1342 if (scsi_device_is_busy(sdev)) 1343 return 0; 1344 1345 return 1; 1346 } 1347 1348 1349 /* 1350 * scsi_target_queue_ready: checks if there we can send commands to target 1351 * @sdev: scsi device on starget to check. 1352 * 1353 * Called with the host lock held. 1354 */ 1355 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1356 struct scsi_device *sdev) 1357 { 1358 struct scsi_target *starget = scsi_target(sdev); 1359 1360 if (starget->single_lun) { 1361 if (starget->starget_sdev_user && 1362 starget->starget_sdev_user != sdev) 1363 return 0; 1364 starget->starget_sdev_user = sdev; 1365 } 1366 1367 if (starget->target_busy == 0 && starget->target_blocked) { 1368 /* 1369 * unblock after target_blocked iterates to zero 1370 */ 1371 if (--starget->target_blocked == 0) { 1372 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1373 "unblocking target at zero depth\n")); 1374 } else 1375 return 0; 1376 } 1377 1378 if (scsi_target_is_busy(starget)) { 1379 list_move_tail(&sdev->starved_entry, &shost->starved_list); 1380 return 0; 1381 } 1382 1383 return 1; 1384 } 1385 1386 /* 1387 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1388 * return 0. We must end up running the queue again whenever 0 is 1389 * returned, else IO can hang. 1390 * 1391 * Called with host_lock held. 1392 */ 1393 static inline int scsi_host_queue_ready(struct request_queue *q, 1394 struct Scsi_Host *shost, 1395 struct scsi_device *sdev) 1396 { 1397 if (scsi_host_in_recovery(shost)) 1398 return 0; 1399 if (shost->host_busy == 0 && shost->host_blocked) { 1400 /* 1401 * unblock after host_blocked iterates to zero 1402 */ 1403 if (--shost->host_blocked == 0) { 1404 SCSI_LOG_MLQUEUE(3, 1405 printk("scsi%d unblocking host at zero depth\n", 1406 shost->host_no)); 1407 } else { 1408 return 0; 1409 } 1410 } 1411 if (scsi_host_is_busy(shost)) { 1412 if (list_empty(&sdev->starved_entry)) 1413 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1414 return 0; 1415 } 1416 1417 /* We're OK to process the command, so we can't be starved */ 1418 if (!list_empty(&sdev->starved_entry)) 1419 list_del_init(&sdev->starved_entry); 1420 1421 return 1; 1422 } 1423 1424 /* 1425 * Busy state exporting function for request stacking drivers. 1426 * 1427 * For efficiency, no lock is taken to check the busy state of 1428 * shost/starget/sdev, since the returned value is not guaranteed and 1429 * may be changed after request stacking drivers call the function, 1430 * regardless of taking lock or not. 1431 * 1432 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi 1433 * needs to return 'not busy'. Otherwise, request stacking drivers 1434 * may hold requests forever. 1435 */ 1436 static int scsi_lld_busy(struct request_queue *q) 1437 { 1438 struct scsi_device *sdev = q->queuedata; 1439 struct Scsi_Host *shost; 1440 1441 if (blk_queue_dying(q)) 1442 return 0; 1443 1444 shost = sdev->host; 1445 1446 /* 1447 * Ignore host/starget busy state. 1448 * Since block layer does not have a concept of fairness across 1449 * multiple queues, congestion of host/starget needs to be handled 1450 * in SCSI layer. 1451 */ 1452 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1453 return 1; 1454 1455 return 0; 1456 } 1457 1458 /* 1459 * Kill a request for a dead device 1460 */ 1461 static void scsi_kill_request(struct request *req, struct request_queue *q) 1462 { 1463 struct scsi_cmnd *cmd = req->special; 1464 struct scsi_device *sdev; 1465 struct scsi_target *starget; 1466 struct Scsi_Host *shost; 1467 1468 blk_start_request(req); 1469 1470 scmd_printk(KERN_INFO, cmd, "killing request\n"); 1471 1472 sdev = cmd->device; 1473 starget = scsi_target(sdev); 1474 shost = sdev->host; 1475 scsi_init_cmd_errh(cmd); 1476 cmd->result = DID_NO_CONNECT << 16; 1477 atomic_inc(&cmd->device->iorequest_cnt); 1478 1479 /* 1480 * SCSI request completion path will do scsi_device_unbusy(), 1481 * bump busy counts. To bump the counters, we need to dance 1482 * with the locks as normal issue path does. 1483 */ 1484 sdev->device_busy++; 1485 spin_unlock(sdev->request_queue->queue_lock); 1486 spin_lock(shost->host_lock); 1487 shost->host_busy++; 1488 starget->target_busy++; 1489 spin_unlock(shost->host_lock); 1490 spin_lock(sdev->request_queue->queue_lock); 1491 1492 blk_complete_request(req); 1493 } 1494 1495 static void scsi_softirq_done(struct request *rq) 1496 { 1497 struct scsi_cmnd *cmd = rq->special; 1498 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1499 int disposition; 1500 1501 INIT_LIST_HEAD(&cmd->eh_entry); 1502 1503 atomic_inc(&cmd->device->iodone_cnt); 1504 if (cmd->result) 1505 atomic_inc(&cmd->device->ioerr_cnt); 1506 1507 disposition = scsi_decide_disposition(cmd); 1508 if (disposition != SUCCESS && 1509 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1510 sdev_printk(KERN_ERR, cmd->device, 1511 "timing out command, waited %lus\n", 1512 wait_for/HZ); 1513 disposition = SUCCESS; 1514 } 1515 1516 scsi_log_completion(cmd, disposition); 1517 1518 switch (disposition) { 1519 case SUCCESS: 1520 scsi_finish_command(cmd); 1521 break; 1522 case NEEDS_RETRY: 1523 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1524 break; 1525 case ADD_TO_MLQUEUE: 1526 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1527 break; 1528 default: 1529 if (!scsi_eh_scmd_add(cmd, 0)) 1530 scsi_finish_command(cmd); 1531 } 1532 } 1533 1534 /* 1535 * Function: scsi_request_fn() 1536 * 1537 * Purpose: Main strategy routine for SCSI. 1538 * 1539 * Arguments: q - Pointer to actual queue. 1540 * 1541 * Returns: Nothing 1542 * 1543 * Lock status: IO request lock assumed to be held when called. 1544 */ 1545 static void scsi_request_fn(struct request_queue *q) 1546 { 1547 struct scsi_device *sdev = q->queuedata; 1548 struct Scsi_Host *shost; 1549 struct scsi_cmnd *cmd; 1550 struct request *req; 1551 1552 if(!get_device(&sdev->sdev_gendev)) 1553 /* We must be tearing the block queue down already */ 1554 return; 1555 1556 /* 1557 * To start with, we keep looping until the queue is empty, or until 1558 * the host is no longer able to accept any more requests. 1559 */ 1560 shost = sdev->host; 1561 for (;;) { 1562 int rtn; 1563 /* 1564 * get next queueable request. We do this early to make sure 1565 * that the request is fully prepared even if we cannot 1566 * accept it. 1567 */ 1568 req = blk_peek_request(q); 1569 if (!req || !scsi_dev_queue_ready(q, sdev)) 1570 break; 1571 1572 if (unlikely(!scsi_device_online(sdev))) { 1573 sdev_printk(KERN_ERR, sdev, 1574 "rejecting I/O to offline device\n"); 1575 scsi_kill_request(req, q); 1576 continue; 1577 } 1578 1579 1580 /* 1581 * Remove the request from the request list. 1582 */ 1583 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1584 blk_start_request(req); 1585 sdev->device_busy++; 1586 1587 spin_unlock(q->queue_lock); 1588 cmd = req->special; 1589 if (unlikely(cmd == NULL)) { 1590 printk(KERN_CRIT "impossible request in %s.\n" 1591 "please mail a stack trace to " 1592 "linux-scsi@vger.kernel.org\n", 1593 __func__); 1594 blk_dump_rq_flags(req, "foo"); 1595 BUG(); 1596 } 1597 spin_lock(shost->host_lock); 1598 1599 /* 1600 * We hit this when the driver is using a host wide 1601 * tag map. For device level tag maps the queue_depth check 1602 * in the device ready fn would prevent us from trying 1603 * to allocate a tag. Since the map is a shared host resource 1604 * we add the dev to the starved list so it eventually gets 1605 * a run when a tag is freed. 1606 */ 1607 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1608 if (list_empty(&sdev->starved_entry)) 1609 list_add_tail(&sdev->starved_entry, 1610 &shost->starved_list); 1611 goto not_ready; 1612 } 1613 1614 if (!scsi_target_queue_ready(shost, sdev)) 1615 goto not_ready; 1616 1617 if (!scsi_host_queue_ready(q, shost, sdev)) 1618 goto not_ready; 1619 1620 scsi_target(sdev)->target_busy++; 1621 shost->host_busy++; 1622 1623 /* 1624 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1625 * take the lock again. 1626 */ 1627 spin_unlock_irq(shost->host_lock); 1628 1629 /* 1630 * Finally, initialize any error handling parameters, and set up 1631 * the timers for timeouts. 1632 */ 1633 scsi_init_cmd_errh(cmd); 1634 1635 /* 1636 * Dispatch the command to the low-level driver. 1637 */ 1638 rtn = scsi_dispatch_cmd(cmd); 1639 spin_lock_irq(q->queue_lock); 1640 if (rtn) 1641 goto out_delay; 1642 } 1643 1644 goto out; 1645 1646 not_ready: 1647 spin_unlock_irq(shost->host_lock); 1648 1649 /* 1650 * lock q, handle tag, requeue req, and decrement device_busy. We 1651 * must return with queue_lock held. 1652 * 1653 * Decrementing device_busy without checking it is OK, as all such 1654 * cases (host limits or settings) should run the queue at some 1655 * later time. 1656 */ 1657 spin_lock_irq(q->queue_lock); 1658 blk_requeue_request(q, req); 1659 sdev->device_busy--; 1660 out_delay: 1661 if (sdev->device_busy == 0) 1662 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1663 out: 1664 /* must be careful here...if we trigger the ->remove() function 1665 * we cannot be holding the q lock */ 1666 spin_unlock_irq(q->queue_lock); 1667 put_device(&sdev->sdev_gendev); 1668 spin_lock_irq(q->queue_lock); 1669 } 1670 1671 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1672 { 1673 struct device *host_dev; 1674 u64 bounce_limit = 0xffffffff; 1675 1676 if (shost->unchecked_isa_dma) 1677 return BLK_BOUNCE_ISA; 1678 /* 1679 * Platforms with virtual-DMA translation 1680 * hardware have no practical limit. 1681 */ 1682 if (!PCI_DMA_BUS_IS_PHYS) 1683 return BLK_BOUNCE_ANY; 1684 1685 host_dev = scsi_get_device(shost); 1686 if (host_dev && host_dev->dma_mask) 1687 bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; 1688 1689 return bounce_limit; 1690 } 1691 EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1692 1693 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1694 request_fn_proc *request_fn) 1695 { 1696 struct request_queue *q; 1697 struct device *dev = shost->dma_dev; 1698 1699 q = blk_init_queue(request_fn, NULL); 1700 if (!q) 1701 return NULL; 1702 1703 /* 1704 * this limit is imposed by hardware restrictions 1705 */ 1706 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 1707 SCSI_MAX_SG_CHAIN_SEGMENTS)); 1708 1709 if (scsi_host_prot_dma(shost)) { 1710 shost->sg_prot_tablesize = 1711 min_not_zero(shost->sg_prot_tablesize, 1712 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); 1713 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); 1714 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 1715 } 1716 1717 blk_queue_max_hw_sectors(q, shost->max_sectors); 1718 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1719 blk_queue_segment_boundary(q, shost->dma_boundary); 1720 dma_set_seg_boundary(dev, shost->dma_boundary); 1721 1722 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1723 1724 if (!shost->use_clustering) 1725 q->limits.cluster = 0; 1726 1727 /* 1728 * set a reasonable default alignment on word boundaries: the 1729 * host and device may alter it using 1730 * blk_queue_update_dma_alignment() later. 1731 */ 1732 blk_queue_dma_alignment(q, 0x03); 1733 1734 return q; 1735 } 1736 EXPORT_SYMBOL(__scsi_alloc_queue); 1737 1738 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1739 { 1740 struct request_queue *q; 1741 1742 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1743 if (!q) 1744 return NULL; 1745 1746 blk_queue_prep_rq(q, scsi_prep_fn); 1747 blk_queue_softirq_done(q, scsi_softirq_done); 1748 blk_queue_rq_timed_out(q, scsi_times_out); 1749 blk_queue_lld_busy(q, scsi_lld_busy); 1750 return q; 1751 } 1752 1753 /* 1754 * Function: scsi_block_requests() 1755 * 1756 * Purpose: Utility function used by low-level drivers to prevent further 1757 * commands from being queued to the device. 1758 * 1759 * Arguments: shost - Host in question 1760 * 1761 * Returns: Nothing 1762 * 1763 * Lock status: No locks are assumed held. 1764 * 1765 * Notes: There is no timer nor any other means by which the requests 1766 * get unblocked other than the low-level driver calling 1767 * scsi_unblock_requests(). 1768 */ 1769 void scsi_block_requests(struct Scsi_Host *shost) 1770 { 1771 shost->host_self_blocked = 1; 1772 } 1773 EXPORT_SYMBOL(scsi_block_requests); 1774 1775 /* 1776 * Function: scsi_unblock_requests() 1777 * 1778 * Purpose: Utility function used by low-level drivers to allow further 1779 * commands from being queued to the device. 1780 * 1781 * Arguments: shost - Host in question 1782 * 1783 * Returns: Nothing 1784 * 1785 * Lock status: No locks are assumed held. 1786 * 1787 * Notes: There is no timer nor any other means by which the requests 1788 * get unblocked other than the low-level driver calling 1789 * scsi_unblock_requests(). 1790 * 1791 * This is done as an API function so that changes to the 1792 * internals of the scsi mid-layer won't require wholesale 1793 * changes to drivers that use this feature. 1794 */ 1795 void scsi_unblock_requests(struct Scsi_Host *shost) 1796 { 1797 shost->host_self_blocked = 0; 1798 scsi_run_host_queues(shost); 1799 } 1800 EXPORT_SYMBOL(scsi_unblock_requests); 1801 1802 int __init scsi_init_queue(void) 1803 { 1804 int i; 1805 1806 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1807 sizeof(struct scsi_data_buffer), 1808 0, 0, NULL); 1809 if (!scsi_sdb_cache) { 1810 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1811 return -ENOMEM; 1812 } 1813 1814 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1815 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1816 int size = sgp->size * sizeof(struct scatterlist); 1817 1818 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1819 SLAB_HWCACHE_ALIGN, NULL); 1820 if (!sgp->slab) { 1821 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1822 sgp->name); 1823 goto cleanup_sdb; 1824 } 1825 1826 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1827 sgp->slab); 1828 if (!sgp->pool) { 1829 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1830 sgp->name); 1831 goto cleanup_sdb; 1832 } 1833 } 1834 1835 return 0; 1836 1837 cleanup_sdb: 1838 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1839 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1840 if (sgp->pool) 1841 mempool_destroy(sgp->pool); 1842 if (sgp->slab) 1843 kmem_cache_destroy(sgp->slab); 1844 } 1845 kmem_cache_destroy(scsi_sdb_cache); 1846 1847 return -ENOMEM; 1848 } 1849 1850 void scsi_exit_queue(void) 1851 { 1852 int i; 1853 1854 kmem_cache_destroy(scsi_sdb_cache); 1855 1856 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1857 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1858 mempool_destroy(sgp->pool); 1859 kmem_cache_destroy(sgp->slab); 1860 } 1861 } 1862 1863 /** 1864 * scsi_mode_select - issue a mode select 1865 * @sdev: SCSI device to be queried 1866 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1867 * @sp: Save page bit (0 == don't save, 1 == save) 1868 * @modepage: mode page being requested 1869 * @buffer: request buffer (may not be smaller than eight bytes) 1870 * @len: length of request buffer. 1871 * @timeout: command timeout 1872 * @retries: number of retries before failing 1873 * @data: returns a structure abstracting the mode header data 1874 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1875 * must be SCSI_SENSE_BUFFERSIZE big. 1876 * 1877 * Returns zero if successful; negative error number or scsi 1878 * status on error 1879 * 1880 */ 1881 int 1882 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1883 unsigned char *buffer, int len, int timeout, int retries, 1884 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1885 { 1886 unsigned char cmd[10]; 1887 unsigned char *real_buffer; 1888 int ret; 1889 1890 memset(cmd, 0, sizeof(cmd)); 1891 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1892 1893 if (sdev->use_10_for_ms) { 1894 if (len > 65535) 1895 return -EINVAL; 1896 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1897 if (!real_buffer) 1898 return -ENOMEM; 1899 memcpy(real_buffer + 8, buffer, len); 1900 len += 8; 1901 real_buffer[0] = 0; 1902 real_buffer[1] = 0; 1903 real_buffer[2] = data->medium_type; 1904 real_buffer[3] = data->device_specific; 1905 real_buffer[4] = data->longlba ? 0x01 : 0; 1906 real_buffer[5] = 0; 1907 real_buffer[6] = data->block_descriptor_length >> 8; 1908 real_buffer[7] = data->block_descriptor_length; 1909 1910 cmd[0] = MODE_SELECT_10; 1911 cmd[7] = len >> 8; 1912 cmd[8] = len; 1913 } else { 1914 if (len > 255 || data->block_descriptor_length > 255 || 1915 data->longlba) 1916 return -EINVAL; 1917 1918 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1919 if (!real_buffer) 1920 return -ENOMEM; 1921 memcpy(real_buffer + 4, buffer, len); 1922 len += 4; 1923 real_buffer[0] = 0; 1924 real_buffer[1] = data->medium_type; 1925 real_buffer[2] = data->device_specific; 1926 real_buffer[3] = data->block_descriptor_length; 1927 1928 1929 cmd[0] = MODE_SELECT; 1930 cmd[4] = len; 1931 } 1932 1933 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1934 sshdr, timeout, retries, NULL); 1935 kfree(real_buffer); 1936 return ret; 1937 } 1938 EXPORT_SYMBOL_GPL(scsi_mode_select); 1939 1940 /** 1941 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1942 * @sdev: SCSI device to be queried 1943 * @dbd: set if mode sense will allow block descriptors to be returned 1944 * @modepage: mode page being requested 1945 * @buffer: request buffer (may not be smaller than eight bytes) 1946 * @len: length of request buffer. 1947 * @timeout: command timeout 1948 * @retries: number of retries before failing 1949 * @data: returns a structure abstracting the mode header data 1950 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1951 * must be SCSI_SENSE_BUFFERSIZE big. 1952 * 1953 * Returns zero if unsuccessful, or the header offset (either 4 1954 * or 8 depending on whether a six or ten byte command was 1955 * issued) if successful. 1956 */ 1957 int 1958 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1959 unsigned char *buffer, int len, int timeout, int retries, 1960 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1961 { 1962 unsigned char cmd[12]; 1963 int use_10_for_ms; 1964 int header_length; 1965 int result; 1966 struct scsi_sense_hdr my_sshdr; 1967 1968 memset(data, 0, sizeof(*data)); 1969 memset(&cmd[0], 0, 12); 1970 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1971 cmd[2] = modepage; 1972 1973 /* caller might not be interested in sense, but we need it */ 1974 if (!sshdr) 1975 sshdr = &my_sshdr; 1976 1977 retry: 1978 use_10_for_ms = sdev->use_10_for_ms; 1979 1980 if (use_10_for_ms) { 1981 if (len < 8) 1982 len = 8; 1983 1984 cmd[0] = MODE_SENSE_10; 1985 cmd[8] = len; 1986 header_length = 8; 1987 } else { 1988 if (len < 4) 1989 len = 4; 1990 1991 cmd[0] = MODE_SENSE; 1992 cmd[4] = len; 1993 header_length = 4; 1994 } 1995 1996 memset(buffer, 0, len); 1997 1998 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1999 sshdr, timeout, retries, NULL); 2000 2001 /* This code looks awful: what it's doing is making sure an 2002 * ILLEGAL REQUEST sense return identifies the actual command 2003 * byte as the problem. MODE_SENSE commands can return 2004 * ILLEGAL REQUEST if the code page isn't supported */ 2005 2006 if (use_10_for_ms && !scsi_status_is_good(result) && 2007 (driver_byte(result) & DRIVER_SENSE)) { 2008 if (scsi_sense_valid(sshdr)) { 2009 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2010 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2011 /* 2012 * Invalid command operation code 2013 */ 2014 sdev->use_10_for_ms = 0; 2015 goto retry; 2016 } 2017 } 2018 } 2019 2020 if(scsi_status_is_good(result)) { 2021 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 2022 (modepage == 6 || modepage == 8))) { 2023 /* Initio breakage? */ 2024 header_length = 0; 2025 data->length = 13; 2026 data->medium_type = 0; 2027 data->device_specific = 0; 2028 data->longlba = 0; 2029 data->block_descriptor_length = 0; 2030 } else if(use_10_for_ms) { 2031 data->length = buffer[0]*256 + buffer[1] + 2; 2032 data->medium_type = buffer[2]; 2033 data->device_specific = buffer[3]; 2034 data->longlba = buffer[4] & 0x01; 2035 data->block_descriptor_length = buffer[6]*256 2036 + buffer[7]; 2037 } else { 2038 data->length = buffer[0] + 1; 2039 data->medium_type = buffer[1]; 2040 data->device_specific = buffer[2]; 2041 data->block_descriptor_length = buffer[3]; 2042 } 2043 data->header_length = header_length; 2044 } 2045 2046 return result; 2047 } 2048 EXPORT_SYMBOL(scsi_mode_sense); 2049 2050 /** 2051 * scsi_test_unit_ready - test if unit is ready 2052 * @sdev: scsi device to change the state of. 2053 * @timeout: command timeout 2054 * @retries: number of retries before failing 2055 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 2056 * returning sense. Make sure that this is cleared before passing 2057 * in. 2058 * 2059 * Returns zero if unsuccessful or an error if TUR failed. For 2060 * removable media, UNIT_ATTENTION sets ->changed flag. 2061 **/ 2062 int 2063 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2064 struct scsi_sense_hdr *sshdr_external) 2065 { 2066 char cmd[] = { 2067 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2068 }; 2069 struct scsi_sense_hdr *sshdr; 2070 int result; 2071 2072 if (!sshdr_external) 2073 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2074 else 2075 sshdr = sshdr_external; 2076 2077 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2078 do { 2079 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2080 timeout, retries, NULL); 2081 if (sdev->removable && scsi_sense_valid(sshdr) && 2082 sshdr->sense_key == UNIT_ATTENTION) 2083 sdev->changed = 1; 2084 } while (scsi_sense_valid(sshdr) && 2085 sshdr->sense_key == UNIT_ATTENTION && --retries); 2086 2087 if (!sshdr_external) 2088 kfree(sshdr); 2089 return result; 2090 } 2091 EXPORT_SYMBOL(scsi_test_unit_ready); 2092 2093 /** 2094 * scsi_device_set_state - Take the given device through the device state model. 2095 * @sdev: scsi device to change the state of. 2096 * @state: state to change to. 2097 * 2098 * Returns zero if unsuccessful or an error if the requested 2099 * transition is illegal. 2100 */ 2101 int 2102 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2103 { 2104 enum scsi_device_state oldstate = sdev->sdev_state; 2105 2106 if (state == oldstate) 2107 return 0; 2108 2109 switch (state) { 2110 case SDEV_CREATED: 2111 switch (oldstate) { 2112 case SDEV_CREATED_BLOCK: 2113 break; 2114 default: 2115 goto illegal; 2116 } 2117 break; 2118 2119 case SDEV_RUNNING: 2120 switch (oldstate) { 2121 case SDEV_CREATED: 2122 case SDEV_OFFLINE: 2123 case SDEV_TRANSPORT_OFFLINE: 2124 case SDEV_QUIESCE: 2125 case SDEV_BLOCK: 2126 break; 2127 default: 2128 goto illegal; 2129 } 2130 break; 2131 2132 case SDEV_QUIESCE: 2133 switch (oldstate) { 2134 case SDEV_RUNNING: 2135 case SDEV_OFFLINE: 2136 case SDEV_TRANSPORT_OFFLINE: 2137 break; 2138 default: 2139 goto illegal; 2140 } 2141 break; 2142 2143 case SDEV_OFFLINE: 2144 case SDEV_TRANSPORT_OFFLINE: 2145 switch (oldstate) { 2146 case SDEV_CREATED: 2147 case SDEV_RUNNING: 2148 case SDEV_QUIESCE: 2149 case SDEV_BLOCK: 2150 break; 2151 default: 2152 goto illegal; 2153 } 2154 break; 2155 2156 case SDEV_BLOCK: 2157 switch (oldstate) { 2158 case SDEV_RUNNING: 2159 case SDEV_CREATED_BLOCK: 2160 break; 2161 default: 2162 goto illegal; 2163 } 2164 break; 2165 2166 case SDEV_CREATED_BLOCK: 2167 switch (oldstate) { 2168 case SDEV_CREATED: 2169 break; 2170 default: 2171 goto illegal; 2172 } 2173 break; 2174 2175 case SDEV_CANCEL: 2176 switch (oldstate) { 2177 case SDEV_CREATED: 2178 case SDEV_RUNNING: 2179 case SDEV_QUIESCE: 2180 case SDEV_OFFLINE: 2181 case SDEV_TRANSPORT_OFFLINE: 2182 case SDEV_BLOCK: 2183 break; 2184 default: 2185 goto illegal; 2186 } 2187 break; 2188 2189 case SDEV_DEL: 2190 switch (oldstate) { 2191 case SDEV_CREATED: 2192 case SDEV_RUNNING: 2193 case SDEV_OFFLINE: 2194 case SDEV_TRANSPORT_OFFLINE: 2195 case SDEV_CANCEL: 2196 case SDEV_CREATED_BLOCK: 2197 break; 2198 default: 2199 goto illegal; 2200 } 2201 break; 2202 2203 } 2204 sdev->sdev_state = state; 2205 return 0; 2206 2207 illegal: 2208 SCSI_LOG_ERROR_RECOVERY(1, 2209 sdev_printk(KERN_ERR, sdev, 2210 "Illegal state transition %s->%s\n", 2211 scsi_device_state_name(oldstate), 2212 scsi_device_state_name(state)) 2213 ); 2214 return -EINVAL; 2215 } 2216 EXPORT_SYMBOL(scsi_device_set_state); 2217 2218 /** 2219 * sdev_evt_emit - emit a single SCSI device uevent 2220 * @sdev: associated SCSI device 2221 * @evt: event to emit 2222 * 2223 * Send a single uevent (scsi_event) to the associated scsi_device. 2224 */ 2225 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2226 { 2227 int idx = 0; 2228 char *envp[3]; 2229 2230 switch (evt->evt_type) { 2231 case SDEV_EVT_MEDIA_CHANGE: 2232 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2233 break; 2234 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2235 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; 2236 break; 2237 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2238 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; 2239 break; 2240 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2241 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; 2242 break; 2243 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2244 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; 2245 break; 2246 case SDEV_EVT_LUN_CHANGE_REPORTED: 2247 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2248 break; 2249 default: 2250 /* do nothing */ 2251 break; 2252 } 2253 2254 envp[idx++] = NULL; 2255 2256 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2257 } 2258 2259 /** 2260 * sdev_evt_thread - send a uevent for each scsi event 2261 * @work: work struct for scsi_device 2262 * 2263 * Dispatch queued events to their associated scsi_device kobjects 2264 * as uevents. 2265 */ 2266 void scsi_evt_thread(struct work_struct *work) 2267 { 2268 struct scsi_device *sdev; 2269 enum scsi_device_event evt_type; 2270 LIST_HEAD(event_list); 2271 2272 sdev = container_of(work, struct scsi_device, event_work); 2273 2274 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) 2275 if (test_and_clear_bit(evt_type, sdev->pending_events)) 2276 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); 2277 2278 while (1) { 2279 struct scsi_event *evt; 2280 struct list_head *this, *tmp; 2281 unsigned long flags; 2282 2283 spin_lock_irqsave(&sdev->list_lock, flags); 2284 list_splice_init(&sdev->event_list, &event_list); 2285 spin_unlock_irqrestore(&sdev->list_lock, flags); 2286 2287 if (list_empty(&event_list)) 2288 break; 2289 2290 list_for_each_safe(this, tmp, &event_list) { 2291 evt = list_entry(this, struct scsi_event, node); 2292 list_del(&evt->node); 2293 scsi_evt_emit(sdev, evt); 2294 kfree(evt); 2295 } 2296 } 2297 } 2298 2299 /** 2300 * sdev_evt_send - send asserted event to uevent thread 2301 * @sdev: scsi_device event occurred on 2302 * @evt: event to send 2303 * 2304 * Assert scsi device event asynchronously. 2305 */ 2306 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2307 { 2308 unsigned long flags; 2309 2310 #if 0 2311 /* FIXME: currently this check eliminates all media change events 2312 * for polled devices. Need to update to discriminate between AN 2313 * and polled events */ 2314 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2315 kfree(evt); 2316 return; 2317 } 2318 #endif 2319 2320 spin_lock_irqsave(&sdev->list_lock, flags); 2321 list_add_tail(&evt->node, &sdev->event_list); 2322 schedule_work(&sdev->event_work); 2323 spin_unlock_irqrestore(&sdev->list_lock, flags); 2324 } 2325 EXPORT_SYMBOL_GPL(sdev_evt_send); 2326 2327 /** 2328 * sdev_evt_alloc - allocate a new scsi event 2329 * @evt_type: type of event to allocate 2330 * @gfpflags: GFP flags for allocation 2331 * 2332 * Allocates and returns a new scsi_event. 2333 */ 2334 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2335 gfp_t gfpflags) 2336 { 2337 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2338 if (!evt) 2339 return NULL; 2340 2341 evt->evt_type = evt_type; 2342 INIT_LIST_HEAD(&evt->node); 2343 2344 /* evt_type-specific initialization, if any */ 2345 switch (evt_type) { 2346 case SDEV_EVT_MEDIA_CHANGE: 2347 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2348 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2349 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2350 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2351 case SDEV_EVT_LUN_CHANGE_REPORTED: 2352 default: 2353 /* do nothing */ 2354 break; 2355 } 2356 2357 return evt; 2358 } 2359 EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2360 2361 /** 2362 * sdev_evt_send_simple - send asserted event to uevent thread 2363 * @sdev: scsi_device event occurred on 2364 * @evt_type: type of event to send 2365 * @gfpflags: GFP flags for allocation 2366 * 2367 * Assert scsi device event asynchronously, given an event type. 2368 */ 2369 void sdev_evt_send_simple(struct scsi_device *sdev, 2370 enum scsi_device_event evt_type, gfp_t gfpflags) 2371 { 2372 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2373 if (!evt) { 2374 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2375 evt_type); 2376 return; 2377 } 2378 2379 sdev_evt_send(sdev, evt); 2380 } 2381 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2382 2383 /** 2384 * scsi_device_quiesce - Block user issued commands. 2385 * @sdev: scsi device to quiesce. 2386 * 2387 * This works by trying to transition to the SDEV_QUIESCE state 2388 * (which must be a legal transition). When the device is in this 2389 * state, only special requests will be accepted, all others will 2390 * be deferred. Since special requests may also be requeued requests, 2391 * a successful return doesn't guarantee the device will be 2392 * totally quiescent. 2393 * 2394 * Must be called with user context, may sleep. 2395 * 2396 * Returns zero if unsuccessful or an error if not. 2397 */ 2398 int 2399 scsi_device_quiesce(struct scsi_device *sdev) 2400 { 2401 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2402 if (err) 2403 return err; 2404 2405 scsi_run_queue(sdev->request_queue); 2406 while (sdev->device_busy) { 2407 msleep_interruptible(200); 2408 scsi_run_queue(sdev->request_queue); 2409 } 2410 return 0; 2411 } 2412 EXPORT_SYMBOL(scsi_device_quiesce); 2413 2414 /** 2415 * scsi_device_resume - Restart user issued commands to a quiesced device. 2416 * @sdev: scsi device to resume. 2417 * 2418 * Moves the device from quiesced back to running and restarts the 2419 * queues. 2420 * 2421 * Must be called with user context, may sleep. 2422 */ 2423 void scsi_device_resume(struct scsi_device *sdev) 2424 { 2425 /* check if the device state was mutated prior to resume, and if 2426 * so assume the state is being managed elsewhere (for example 2427 * device deleted during suspend) 2428 */ 2429 if (sdev->sdev_state != SDEV_QUIESCE || 2430 scsi_device_set_state(sdev, SDEV_RUNNING)) 2431 return; 2432 scsi_run_queue(sdev->request_queue); 2433 } 2434 EXPORT_SYMBOL(scsi_device_resume); 2435 2436 static void 2437 device_quiesce_fn(struct scsi_device *sdev, void *data) 2438 { 2439 scsi_device_quiesce(sdev); 2440 } 2441 2442 void 2443 scsi_target_quiesce(struct scsi_target *starget) 2444 { 2445 starget_for_each_device(starget, NULL, device_quiesce_fn); 2446 } 2447 EXPORT_SYMBOL(scsi_target_quiesce); 2448 2449 static void 2450 device_resume_fn(struct scsi_device *sdev, void *data) 2451 { 2452 scsi_device_resume(sdev); 2453 } 2454 2455 void 2456 scsi_target_resume(struct scsi_target *starget) 2457 { 2458 starget_for_each_device(starget, NULL, device_resume_fn); 2459 } 2460 EXPORT_SYMBOL(scsi_target_resume); 2461 2462 /** 2463 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2464 * @sdev: device to block 2465 * 2466 * Block request made by scsi lld's to temporarily stop all 2467 * scsi commands on the specified device. Called from interrupt 2468 * or normal process context. 2469 * 2470 * Returns zero if successful or error if not 2471 * 2472 * Notes: 2473 * This routine transitions the device to the SDEV_BLOCK state 2474 * (which must be a legal transition). When the device is in this 2475 * state, all commands are deferred until the scsi lld reenables 2476 * the device with scsi_device_unblock or device_block_tmo fires. 2477 */ 2478 int 2479 scsi_internal_device_block(struct scsi_device *sdev) 2480 { 2481 struct request_queue *q = sdev->request_queue; 2482 unsigned long flags; 2483 int err = 0; 2484 2485 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2486 if (err) { 2487 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2488 2489 if (err) 2490 return err; 2491 } 2492 2493 /* 2494 * The device has transitioned to SDEV_BLOCK. Stop the 2495 * block layer from calling the midlayer with this device's 2496 * request queue. 2497 */ 2498 spin_lock_irqsave(q->queue_lock, flags); 2499 blk_stop_queue(q); 2500 spin_unlock_irqrestore(q->queue_lock, flags); 2501 2502 return 0; 2503 } 2504 EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2505 2506 /** 2507 * scsi_internal_device_unblock - resume a device after a block request 2508 * @sdev: device to resume 2509 * @new_state: state to set devices to after unblocking 2510 * 2511 * Called by scsi lld's or the midlayer to restart the device queue 2512 * for the previously suspended scsi device. Called from interrupt or 2513 * normal process context. 2514 * 2515 * Returns zero if successful or error if not. 2516 * 2517 * Notes: 2518 * This routine transitions the device to the SDEV_RUNNING state 2519 * or to one of the offline states (which must be a legal transition) 2520 * allowing the midlayer to goose the queue for this device. 2521 */ 2522 int 2523 scsi_internal_device_unblock(struct scsi_device *sdev, 2524 enum scsi_device_state new_state) 2525 { 2526 struct request_queue *q = sdev->request_queue; 2527 unsigned long flags; 2528 2529 /* 2530 * Try to transition the scsi device to SDEV_RUNNING or one of the 2531 * offlined states and goose the device queue if successful. 2532 */ 2533 if ((sdev->sdev_state == SDEV_BLOCK) || 2534 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) 2535 sdev->sdev_state = new_state; 2536 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { 2537 if (new_state == SDEV_TRANSPORT_OFFLINE || 2538 new_state == SDEV_OFFLINE) 2539 sdev->sdev_state = new_state; 2540 else 2541 sdev->sdev_state = SDEV_CREATED; 2542 } else if (sdev->sdev_state != SDEV_CANCEL && 2543 sdev->sdev_state != SDEV_OFFLINE) 2544 return -EINVAL; 2545 2546 spin_lock_irqsave(q->queue_lock, flags); 2547 blk_start_queue(q); 2548 spin_unlock_irqrestore(q->queue_lock, flags); 2549 2550 return 0; 2551 } 2552 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2553 2554 static void 2555 device_block(struct scsi_device *sdev, void *data) 2556 { 2557 scsi_internal_device_block(sdev); 2558 } 2559 2560 static int 2561 target_block(struct device *dev, void *data) 2562 { 2563 if (scsi_is_target_device(dev)) 2564 starget_for_each_device(to_scsi_target(dev), NULL, 2565 device_block); 2566 return 0; 2567 } 2568 2569 void 2570 scsi_target_block(struct device *dev) 2571 { 2572 if (scsi_is_target_device(dev)) 2573 starget_for_each_device(to_scsi_target(dev), NULL, 2574 device_block); 2575 else 2576 device_for_each_child(dev, NULL, target_block); 2577 } 2578 EXPORT_SYMBOL_GPL(scsi_target_block); 2579 2580 static void 2581 device_unblock(struct scsi_device *sdev, void *data) 2582 { 2583 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); 2584 } 2585 2586 static int 2587 target_unblock(struct device *dev, void *data) 2588 { 2589 if (scsi_is_target_device(dev)) 2590 starget_for_each_device(to_scsi_target(dev), data, 2591 device_unblock); 2592 return 0; 2593 } 2594 2595 void 2596 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) 2597 { 2598 if (scsi_is_target_device(dev)) 2599 starget_for_each_device(to_scsi_target(dev), &new_state, 2600 device_unblock); 2601 else 2602 device_for_each_child(dev, &new_state, target_unblock); 2603 } 2604 EXPORT_SYMBOL_GPL(scsi_target_unblock); 2605 2606 /** 2607 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2608 * @sgl: scatter-gather list 2609 * @sg_count: number of segments in sg 2610 * @offset: offset in bytes into sg, on return offset into the mapped area 2611 * @len: bytes to map, on return number of bytes mapped 2612 * 2613 * Returns virtual address of the start of the mapped page 2614 */ 2615 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2616 size_t *offset, size_t *len) 2617 { 2618 int i; 2619 size_t sg_len = 0, len_complete = 0; 2620 struct scatterlist *sg; 2621 struct page *page; 2622 2623 WARN_ON(!irqs_disabled()); 2624 2625 for_each_sg(sgl, sg, sg_count, i) { 2626 len_complete = sg_len; /* Complete sg-entries */ 2627 sg_len += sg->length; 2628 if (sg_len > *offset) 2629 break; 2630 } 2631 2632 if (unlikely(i == sg_count)) { 2633 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2634 "elements %d\n", 2635 __func__, sg_len, *offset, sg_count); 2636 WARN_ON(1); 2637 return NULL; 2638 } 2639 2640 /* Offset starting from the beginning of first page in this sg-entry */ 2641 *offset = *offset - len_complete + sg->offset; 2642 2643 /* Assumption: contiguous pages can be accessed as "page + i" */ 2644 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2645 *offset &= ~PAGE_MASK; 2646 2647 /* Bytes in this sg-entry from *offset to the end of the page */ 2648 sg_len = PAGE_SIZE - *offset; 2649 if (*len > sg_len) 2650 *len = sg_len; 2651 2652 return kmap_atomic(page); 2653 } 2654 EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2655 2656 /** 2657 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2658 * @virt: virtual address to be unmapped 2659 */ 2660 void scsi_kunmap_atomic_sg(void *virt) 2661 { 2662 kunmap_atomic(virt); 2663 } 2664 EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2665 2666 void sdev_disable_disk_events(struct scsi_device *sdev) 2667 { 2668 atomic_inc(&sdev->disk_events_disable_depth); 2669 } 2670 EXPORT_SYMBOL(sdev_disable_disk_events); 2671 2672 void sdev_enable_disk_events(struct scsi_device *sdev) 2673 { 2674 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) 2675 return; 2676 atomic_dec(&sdev->disk_events_disable_depth); 2677 } 2678 EXPORT_SYMBOL(sdev_enable_disk_events); 2679