1 /* 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 3 * 4 * SCSI queueing library. 5 * Initial versions: Eric Youngdale (eric@andante.org). 6 * Based upon conversations with large numbers 7 * of people at Linux Expo. 8 */ 9 10 #include <linux/bio.h> 11 #include <linux/bitops.h> 12 #include <linux/blkdev.h> 13 #include <linux/completion.h> 14 #include <linux/kernel.h> 15 #include <linux/mempool.h> 16 #include <linux/slab.h> 17 #include <linux/init.h> 18 #include <linux/pci.h> 19 #include <linux/delay.h> 20 #include <linux/hardirq.h> 21 #include <linux/scatterlist.h> 22 23 #include <scsi/scsi.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_dbg.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_driver.h> 28 #include <scsi/scsi_eh.h> 29 #include <scsi/scsi_host.h> 30 31 #include "scsi_priv.h" 32 #include "scsi_logging.h" 33 34 35 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 36 #define SG_MEMPOOL_SIZE 2 37 38 struct scsi_host_sg_pool { 39 size_t size; 40 char *name; 41 struct kmem_cache *slab; 42 mempool_t *pool; 43 }; 44 45 #define SP(x) { x, "sgpool-" __stringify(x) } 46 #if (SCSI_MAX_SG_SEGMENTS < 32) 47 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 48 #endif 49 static struct scsi_host_sg_pool scsi_sg_pools[] = { 50 SP(8), 51 SP(16), 52 #if (SCSI_MAX_SG_SEGMENTS > 32) 53 SP(32), 54 #if (SCSI_MAX_SG_SEGMENTS > 64) 55 SP(64), 56 #if (SCSI_MAX_SG_SEGMENTS > 128) 57 SP(128), 58 #if (SCSI_MAX_SG_SEGMENTS > 256) 59 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 60 #endif 61 #endif 62 #endif 63 #endif 64 SP(SCSI_MAX_SG_SEGMENTS) 65 }; 66 #undef SP 67 68 struct kmem_cache *scsi_sdb_cache; 69 70 static void scsi_run_queue(struct request_queue *q); 71 72 /* 73 * Function: scsi_unprep_request() 74 * 75 * Purpose: Remove all preparation done for a request, including its 76 * associated scsi_cmnd, so that it can be requeued. 77 * 78 * Arguments: req - request to unprepare 79 * 80 * Lock status: Assumed that no locks are held upon entry. 81 * 82 * Returns: Nothing. 83 */ 84 static void scsi_unprep_request(struct request *req) 85 { 86 struct scsi_cmnd *cmd = req->special; 87 88 req->cmd_flags &= ~REQ_DONTPREP; 89 req->special = NULL; 90 91 scsi_put_command(cmd); 92 } 93 94 /** 95 * __scsi_queue_insert - private queue insertion 96 * @cmd: The SCSI command being requeued 97 * @reason: The reason for the requeue 98 * @unbusy: Whether the queue should be unbusied 99 * 100 * This is a private queue insertion. The public interface 101 * scsi_queue_insert() always assumes the queue should be unbusied 102 * because it's always called before the completion. This function is 103 * for a requeue after completion, which should only occur in this 104 * file. 105 */ 106 static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 107 { 108 struct Scsi_Host *host = cmd->device->host; 109 struct scsi_device *device = cmd->device; 110 struct scsi_target *starget = scsi_target(device); 111 struct request_queue *q = device->request_queue; 112 unsigned long flags; 113 114 SCSI_LOG_MLQUEUE(1, 115 printk("Inserting command %p into mlqueue\n", cmd)); 116 117 /* 118 * Set the appropriate busy bit for the device/host. 119 * 120 * If the host/device isn't busy, assume that something actually 121 * completed, and that we should be able to queue a command now. 122 * 123 * Note that the prior mid-layer assumption that any host could 124 * always queue at least one command is now broken. The mid-layer 125 * will implement a user specifiable stall (see 126 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 127 * if a command is requeued with no other commands outstanding 128 * either for the device or for the host. 129 */ 130 switch (reason) { 131 case SCSI_MLQUEUE_HOST_BUSY: 132 host->host_blocked = host->max_host_blocked; 133 break; 134 case SCSI_MLQUEUE_DEVICE_BUSY: 135 device->device_blocked = device->max_device_blocked; 136 break; 137 case SCSI_MLQUEUE_TARGET_BUSY: 138 starget->target_blocked = starget->max_target_blocked; 139 break; 140 } 141 142 /* 143 * Decrement the counters, since these commands are no longer 144 * active on the host/device. 145 */ 146 if (unbusy) 147 scsi_device_unbusy(device); 148 149 /* 150 * Requeue this command. It will go before all other commands 151 * that are already in the queue. 152 * 153 * NOTE: there is magic here about the way the queue is plugged if 154 * we have no outstanding commands. 155 * 156 * Although we *don't* plug the queue, we call the request 157 * function. The SCSI request function detects the blocked condition 158 * and plugs the queue appropriately. 159 */ 160 spin_lock_irqsave(q->queue_lock, flags); 161 blk_requeue_request(q, cmd->request); 162 spin_unlock_irqrestore(q->queue_lock, flags); 163 164 scsi_run_queue(q); 165 166 return 0; 167 } 168 169 /* 170 * Function: scsi_queue_insert() 171 * 172 * Purpose: Insert a command in the midlevel queue. 173 * 174 * Arguments: cmd - command that we are adding to queue. 175 * reason - why we are inserting command to queue. 176 * 177 * Lock status: Assumed that lock is not held upon entry. 178 * 179 * Returns: Nothing. 180 * 181 * Notes: We do this for one of two cases. Either the host is busy 182 * and it cannot accept any more commands for the time being, 183 * or the device returned QUEUE_FULL and can accept no more 184 * commands. 185 * Notes: This could be called either from an interrupt context or a 186 * normal process context. 187 */ 188 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 189 { 190 return __scsi_queue_insert(cmd, reason, 1); 191 } 192 /** 193 * scsi_execute - insert request and wait for the result 194 * @sdev: scsi device 195 * @cmd: scsi command 196 * @data_direction: data direction 197 * @buffer: data buffer 198 * @bufflen: len of buffer 199 * @sense: optional sense buffer 200 * @timeout: request timeout in seconds 201 * @retries: number of times to retry request 202 * @flags: or into request flags; 203 * @resid: optional residual length 204 * 205 * returns the req->errors value which is the scsi_cmnd result 206 * field. 207 */ 208 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 209 int data_direction, void *buffer, unsigned bufflen, 210 unsigned char *sense, int timeout, int retries, int flags, 211 int *resid) 212 { 213 struct request *req; 214 int write = (data_direction == DMA_TO_DEVICE); 215 int ret = DRIVER_ERROR << 24; 216 217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 218 219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 220 buffer, bufflen, __GFP_WAIT)) 221 goto out; 222 223 req->cmd_len = COMMAND_SIZE(cmd[0]); 224 memcpy(req->cmd, cmd, req->cmd_len); 225 req->sense = sense; 226 req->sense_len = 0; 227 req->retries = retries; 228 req->timeout = timeout; 229 req->cmd_type = REQ_TYPE_BLOCK_PC; 230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 231 232 /* 233 * head injection *required* here otherwise quiesce won't work 234 */ 235 blk_execute_rq(req->q, NULL, req, 1); 236 237 /* 238 * Some devices (USB mass-storage in particular) may transfer 239 * garbage data together with a residue indicating that the data 240 * is invalid. Prevent the garbage from being misinterpreted 241 * and prevent security leaks by zeroing out the excess data. 242 */ 243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 245 246 if (resid) 247 *resid = req->data_len; 248 ret = req->errors; 249 out: 250 blk_put_request(req); 251 252 return ret; 253 } 254 EXPORT_SYMBOL(scsi_execute); 255 256 257 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 258 int data_direction, void *buffer, unsigned bufflen, 259 struct scsi_sense_hdr *sshdr, int timeout, int retries, 260 int *resid) 261 { 262 char *sense = NULL; 263 int result; 264 265 if (sshdr) { 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 267 if (!sense) 268 return DRIVER_ERROR << 24; 269 } 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 271 sense, timeout, retries, 0, resid); 272 if (sshdr) 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 274 275 kfree(sense); 276 return result; 277 } 278 EXPORT_SYMBOL(scsi_execute_req); 279 280 /* 281 * Function: scsi_init_cmd_errh() 282 * 283 * Purpose: Initialize cmd fields related to error handling. 284 * 285 * Arguments: cmd - command that is ready to be queued. 286 * 287 * Notes: This function has the job of initializing a number of 288 * fields related to error handling. Typically this will 289 * be called once for each command, as required. 290 */ 291 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 292 { 293 cmd->serial_number = 0; 294 scsi_set_resid(cmd, 0); 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 296 if (cmd->cmd_len == 0) 297 cmd->cmd_len = scsi_command_size(cmd->cmnd); 298 } 299 300 void scsi_device_unbusy(struct scsi_device *sdev) 301 { 302 struct Scsi_Host *shost = sdev->host; 303 struct scsi_target *starget = scsi_target(sdev); 304 unsigned long flags; 305 306 spin_lock_irqsave(shost->host_lock, flags); 307 shost->host_busy--; 308 starget->target_busy--; 309 if (unlikely(scsi_host_in_recovery(shost) && 310 (shost->host_failed || shost->host_eh_scheduled))) 311 scsi_eh_wakeup(shost); 312 spin_unlock(shost->host_lock); 313 spin_lock(sdev->request_queue->queue_lock); 314 sdev->device_busy--; 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 316 } 317 318 /* 319 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 320 * and call blk_run_queue for all the scsi_devices on the target - 321 * including current_sdev first. 322 * 323 * Called with *no* scsi locks held. 324 */ 325 static void scsi_single_lun_run(struct scsi_device *current_sdev) 326 { 327 struct Scsi_Host *shost = current_sdev->host; 328 struct scsi_device *sdev, *tmp; 329 struct scsi_target *starget = scsi_target(current_sdev); 330 unsigned long flags; 331 332 spin_lock_irqsave(shost->host_lock, flags); 333 starget->starget_sdev_user = NULL; 334 spin_unlock_irqrestore(shost->host_lock, flags); 335 336 /* 337 * Call blk_run_queue for all LUNs on the target, starting with 338 * current_sdev. We race with others (to set starget_sdev_user), 339 * but in most cases, we will be first. Ideally, each LU on the 340 * target would get some limited time or requests on the target. 341 */ 342 blk_run_queue(current_sdev->request_queue); 343 344 spin_lock_irqsave(shost->host_lock, flags); 345 if (starget->starget_sdev_user) 346 goto out; 347 list_for_each_entry_safe(sdev, tmp, &starget->devices, 348 same_target_siblings) { 349 if (sdev == current_sdev) 350 continue; 351 if (scsi_device_get(sdev)) 352 continue; 353 354 spin_unlock_irqrestore(shost->host_lock, flags); 355 blk_run_queue(sdev->request_queue); 356 spin_lock_irqsave(shost->host_lock, flags); 357 358 scsi_device_put(sdev); 359 } 360 out: 361 spin_unlock_irqrestore(shost->host_lock, flags); 362 } 363 364 static inline int scsi_device_is_busy(struct scsi_device *sdev) 365 { 366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) 367 return 1; 368 369 return 0; 370 } 371 372 static inline int scsi_target_is_busy(struct scsi_target *starget) 373 { 374 return ((starget->can_queue > 0 && 375 starget->target_busy >= starget->can_queue) || 376 starget->target_blocked); 377 } 378 379 static inline int scsi_host_is_busy(struct Scsi_Host *shost) 380 { 381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 382 shost->host_blocked || shost->host_self_blocked) 383 return 1; 384 385 return 0; 386 } 387 388 /* 389 * Function: scsi_run_queue() 390 * 391 * Purpose: Select a proper request queue to serve next 392 * 393 * Arguments: q - last request's queue 394 * 395 * Returns: Nothing 396 * 397 * Notes: The previous command was completely finished, start 398 * a new one if possible. 399 */ 400 static void scsi_run_queue(struct request_queue *q) 401 { 402 struct scsi_device *sdev = q->queuedata; 403 struct Scsi_Host *shost = sdev->host; 404 LIST_HEAD(starved_list); 405 unsigned long flags; 406 407 if (scsi_target(sdev)->single_lun) 408 scsi_single_lun_run(sdev); 409 410 spin_lock_irqsave(shost->host_lock, flags); 411 list_splice_init(&shost->starved_list, &starved_list); 412 413 while (!list_empty(&starved_list)) { 414 int flagset; 415 416 /* 417 * As long as shost is accepting commands and we have 418 * starved queues, call blk_run_queue. scsi_request_fn 419 * drops the queue_lock and can add us back to the 420 * starved_list. 421 * 422 * host_lock protects the starved_list and starved_entry. 423 * scsi_request_fn must get the host_lock before checking 424 * or modifying starved_list or starved_entry. 425 */ 426 if (scsi_host_is_busy(shost)) 427 break; 428 429 sdev = list_entry(starved_list.next, 430 struct scsi_device, starved_entry); 431 list_del_init(&sdev->starved_entry); 432 if (scsi_target_is_busy(scsi_target(sdev))) { 433 list_move_tail(&sdev->starved_entry, 434 &shost->starved_list); 435 continue; 436 } 437 438 spin_unlock(shost->host_lock); 439 440 spin_lock(sdev->request_queue->queue_lock); 441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 442 !test_bit(QUEUE_FLAG_REENTER, 443 &sdev->request_queue->queue_flags); 444 if (flagset) 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 446 __blk_run_queue(sdev->request_queue); 447 if (flagset) 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 449 spin_unlock(sdev->request_queue->queue_lock); 450 451 spin_lock(shost->host_lock); 452 } 453 /* put any unprocessed entries back */ 454 list_splice(&starved_list, &shost->starved_list); 455 spin_unlock_irqrestore(shost->host_lock, flags); 456 457 blk_run_queue(q); 458 } 459 460 /* 461 * Function: scsi_requeue_command() 462 * 463 * Purpose: Handle post-processing of completed commands. 464 * 465 * Arguments: q - queue to operate on 466 * cmd - command that may need to be requeued. 467 * 468 * Returns: Nothing 469 * 470 * Notes: After command completion, there may be blocks left 471 * over which weren't finished by the previous command 472 * this can be for a number of reasons - the main one is 473 * I/O errors in the middle of the request, in which case 474 * we need to request the blocks that come after the bad 475 * sector. 476 * Notes: Upon return, cmd is a stale pointer. 477 */ 478 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 479 { 480 struct request *req = cmd->request; 481 unsigned long flags; 482 483 spin_lock_irqsave(q->queue_lock, flags); 484 scsi_unprep_request(req); 485 blk_requeue_request(q, req); 486 spin_unlock_irqrestore(q->queue_lock, flags); 487 488 scsi_run_queue(q); 489 } 490 491 void scsi_next_command(struct scsi_cmnd *cmd) 492 { 493 struct scsi_device *sdev = cmd->device; 494 struct request_queue *q = sdev->request_queue; 495 496 /* need to hold a reference on the device before we let go of the cmd */ 497 get_device(&sdev->sdev_gendev); 498 499 scsi_put_command(cmd); 500 scsi_run_queue(q); 501 502 /* ok to remove device now */ 503 put_device(&sdev->sdev_gendev); 504 } 505 506 void scsi_run_host_queues(struct Scsi_Host *shost) 507 { 508 struct scsi_device *sdev; 509 510 shost_for_each_device(sdev, shost) 511 scsi_run_queue(sdev->request_queue); 512 } 513 514 static void __scsi_release_buffers(struct scsi_cmnd *, int); 515 516 /* 517 * Function: scsi_end_request() 518 * 519 * Purpose: Post-processing of completed commands (usually invoked at end 520 * of upper level post-processing and scsi_io_completion). 521 * 522 * Arguments: cmd - command that is complete. 523 * error - 0 if I/O indicates success, < 0 for I/O error. 524 * bytes - number of bytes of completed I/O 525 * requeue - indicates whether we should requeue leftovers. 526 * 527 * Lock status: Assumed that lock is not held upon entry. 528 * 529 * Returns: cmd if requeue required, NULL otherwise. 530 * 531 * Notes: This is called for block device requests in order to 532 * mark some number of sectors as complete. 533 * 534 * We are guaranteeing that the request queue will be goosed 535 * at some point during this call. 536 * Notes: If cmd was requeued, upon return it will be a stale pointer. 537 */ 538 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, 539 int bytes, int requeue) 540 { 541 struct request_queue *q = cmd->device->request_queue; 542 struct request *req = cmd->request; 543 544 /* 545 * If there are blocks left over at the end, set up the command 546 * to queue the remainder of them. 547 */ 548 if (blk_end_request(req, error, bytes)) { 549 int leftover = (req->hard_nr_sectors << 9); 550 551 if (blk_pc_request(req)) 552 leftover = req->data_len; 553 554 /* kill remainder if no retrys */ 555 if (error && scsi_noretry_cmd(cmd)) 556 blk_end_request(req, error, leftover); 557 else { 558 if (requeue) { 559 /* 560 * Bleah. Leftovers again. Stick the 561 * leftovers in the front of the 562 * queue, and goose the queue again. 563 */ 564 scsi_release_buffers(cmd); 565 scsi_requeue_command(q, cmd); 566 cmd = NULL; 567 } 568 return cmd; 569 } 570 } 571 572 /* 573 * This will goose the queue request function at the end, so we don't 574 * need to worry about launching another command. 575 */ 576 __scsi_release_buffers(cmd, 0); 577 scsi_next_command(cmd); 578 return NULL; 579 } 580 581 static inline unsigned int scsi_sgtable_index(unsigned short nents) 582 { 583 unsigned int index; 584 585 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 586 587 if (nents <= 8) 588 index = 0; 589 else 590 index = get_count_order(nents) - 3; 591 592 return index; 593 } 594 595 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 596 { 597 struct scsi_host_sg_pool *sgp; 598 599 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 600 mempool_free(sgl, sgp->pool); 601 } 602 603 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 604 { 605 struct scsi_host_sg_pool *sgp; 606 607 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 608 return mempool_alloc(sgp->pool, gfp_mask); 609 } 610 611 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 612 gfp_t gfp_mask) 613 { 614 int ret; 615 616 BUG_ON(!nents); 617 618 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 619 gfp_mask, scsi_sg_alloc); 620 if (unlikely(ret)) 621 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 622 scsi_sg_free); 623 624 return ret; 625 } 626 627 static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 628 { 629 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 630 } 631 632 static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) 633 { 634 635 if (cmd->sdb.table.nents) 636 scsi_free_sgtable(&cmd->sdb); 637 638 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 639 640 if (do_bidi_check && scsi_bidi_cmnd(cmd)) { 641 struct scsi_data_buffer *bidi_sdb = 642 cmd->request->next_rq->special; 643 scsi_free_sgtable(bidi_sdb); 644 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 645 cmd->request->next_rq->special = NULL; 646 } 647 648 if (scsi_prot_sg_count(cmd)) 649 scsi_free_sgtable(cmd->prot_sdb); 650 } 651 652 /* 653 * Function: scsi_release_buffers() 654 * 655 * Purpose: Completion processing for block device I/O requests. 656 * 657 * Arguments: cmd - command that we are bailing. 658 * 659 * Lock status: Assumed that no lock is held upon entry. 660 * 661 * Returns: Nothing 662 * 663 * Notes: In the event that an upper level driver rejects a 664 * command, we must release resources allocated during 665 * the __init_io() function. Primarily this would involve 666 * the scatter-gather table, and potentially any bounce 667 * buffers. 668 */ 669 void scsi_release_buffers(struct scsi_cmnd *cmd) 670 { 671 __scsi_release_buffers(cmd, 1); 672 } 673 EXPORT_SYMBOL(scsi_release_buffers); 674 675 /* 676 * Bidi commands Must be complete as a whole, both sides at once. 677 * If part of the bytes were written and lld returned 678 * scsi_in()->resid and/or scsi_out()->resid this information will be left 679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can 680 * decide what to do with this information. 681 */ 682 static void scsi_end_bidi_request(struct scsi_cmnd *cmd) 683 { 684 struct request *req = cmd->request; 685 unsigned int dlen = req->data_len; 686 unsigned int next_dlen = req->next_rq->data_len; 687 688 req->data_len = scsi_out(cmd)->resid; 689 req->next_rq->data_len = scsi_in(cmd)->resid; 690 691 /* The req and req->next_rq have not been completed */ 692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 693 694 scsi_release_buffers(cmd); 695 696 /* 697 * This will goose the queue request function at the end, so we don't 698 * need to worry about launching another command. 699 */ 700 scsi_next_command(cmd); 701 } 702 703 /* 704 * Function: scsi_io_completion() 705 * 706 * Purpose: Completion processing for block device I/O requests. 707 * 708 * Arguments: cmd - command that is finished. 709 * 710 * Lock status: Assumed that no lock is held upon entry. 711 * 712 * Returns: Nothing 713 * 714 * Notes: This function is matched in terms of capabilities to 715 * the function that created the scatter-gather list. 716 * In other words, if there are no bounce buffers 717 * (the normal case for most drivers), we don't need 718 * the logic to deal with cleaning up afterwards. 719 * 720 * We must call scsi_end_request(). This will finish off 721 * the specified number of sectors. If we are done, the 722 * command block will be released and the queue function 723 * will be goosed. If we are not done then we have to 724 * figure out what to do next: 725 * 726 * a) We can call scsi_requeue_command(). The request 727 * will be unprepared and put back on the queue. Then 728 * a new command will be created for it. This should 729 * be used if we made forward progress, or if we want 730 * to switch from READ(10) to READ(6) for example. 731 * 732 * b) We can call scsi_queue_insert(). The request will 733 * be put back on the queue and retried using the same 734 * command as before, possibly after a delay. 735 * 736 * c) We can call blk_end_request() with -EIO to fail 737 * the remainder of the request. 738 */ 739 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 740 { 741 int result = cmd->result; 742 int this_count; 743 struct request_queue *q = cmd->device->request_queue; 744 struct request *req = cmd->request; 745 int error = 0; 746 struct scsi_sense_hdr sshdr; 747 int sense_valid = 0; 748 int sense_deferred = 0; 749 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 750 ACTION_DELAYED_RETRY} action; 751 char *description = NULL; 752 753 if (result) { 754 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 755 if (sense_valid) 756 sense_deferred = scsi_sense_is_deferred(&sshdr); 757 } 758 759 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 760 req->errors = result; 761 if (result) { 762 if (sense_valid && req->sense) { 763 /* 764 * SG_IO wants current and deferred errors 765 */ 766 int len = 8 + cmd->sense_buffer[7]; 767 768 if (len > SCSI_SENSE_BUFFERSIZE) 769 len = SCSI_SENSE_BUFFERSIZE; 770 memcpy(req->sense, cmd->sense_buffer, len); 771 req->sense_len = len; 772 } 773 if (!sense_deferred) 774 error = -EIO; 775 } 776 if (scsi_bidi_cmnd(cmd)) { 777 /* will also release_buffers */ 778 scsi_end_bidi_request(cmd); 779 return; 780 } 781 req->data_len = scsi_get_resid(cmd); 782 } 783 784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 785 786 /* 787 * Next deal with any sectors which we were able to correctly 788 * handle. 789 */ 790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 791 "%d bytes done.\n", 792 req->nr_sectors, good_bytes)); 793 794 /* A number of bytes were successfully read. If there 795 * are leftovers and there is some kind of error 796 * (result != 0), retry the rest. 797 */ 798 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 799 return; 800 this_count = blk_rq_bytes(req); 801 802 error = -EIO; 803 804 if (host_byte(result) == DID_RESET) { 805 /* Third party bus reset or reset for error recovery 806 * reasons. Just retry the command and see what 807 * happens. 808 */ 809 action = ACTION_RETRY; 810 } else if (sense_valid && !sense_deferred) { 811 switch (sshdr.sense_key) { 812 case UNIT_ATTENTION: 813 if (cmd->device->removable) { 814 /* Detected disc change. Set a bit 815 * and quietly refuse further access. 816 */ 817 cmd->device->changed = 1; 818 description = "Media Changed"; 819 action = ACTION_FAIL; 820 } else { 821 /* Must have been a power glitch, or a 822 * bus reset. Could not have been a 823 * media change, so we just retry the 824 * command and see what happens. 825 */ 826 action = ACTION_RETRY; 827 } 828 break; 829 case ILLEGAL_REQUEST: 830 /* If we had an ILLEGAL REQUEST returned, then 831 * we may have performed an unsupported 832 * command. The only thing this should be 833 * would be a ten byte read where only a six 834 * byte read was supported. Also, on a system 835 * where READ CAPACITY failed, we may have 836 * read past the end of the disk. 837 */ 838 if ((cmd->device->use_10_for_rw && 839 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 840 (cmd->cmnd[0] == READ_10 || 841 cmd->cmnd[0] == WRITE_10)) { 842 /* This will issue a new 6-byte command. */ 843 cmd->device->use_10_for_rw = 0; 844 action = ACTION_REPREP; 845 } else if (sshdr.asc == 0x10) /* DIX */ { 846 description = "Host Data Integrity Failure"; 847 action = ACTION_FAIL; 848 error = -EILSEQ; 849 } else 850 action = ACTION_FAIL; 851 break; 852 case ABORTED_COMMAND: 853 action = ACTION_FAIL; 854 if (sshdr.asc == 0x10) { /* DIF */ 855 description = "Target Data Integrity Failure"; 856 error = -EILSEQ; 857 } 858 break; 859 case NOT_READY: 860 /* If the device is in the process of becoming 861 * ready, or has a temporary blockage, retry. 862 */ 863 if (sshdr.asc == 0x04) { 864 switch (sshdr.ascq) { 865 case 0x01: /* becoming ready */ 866 case 0x04: /* format in progress */ 867 case 0x05: /* rebuild in progress */ 868 case 0x06: /* recalculation in progress */ 869 case 0x07: /* operation in progress */ 870 case 0x08: /* Long write in progress */ 871 case 0x09: /* self test in progress */ 872 action = ACTION_DELAYED_RETRY; 873 break; 874 default: 875 description = "Device not ready"; 876 action = ACTION_FAIL; 877 break; 878 } 879 } else { 880 description = "Device not ready"; 881 action = ACTION_FAIL; 882 } 883 break; 884 case VOLUME_OVERFLOW: 885 /* See SSC3rXX or current. */ 886 action = ACTION_FAIL; 887 break; 888 default: 889 description = "Unhandled sense code"; 890 action = ACTION_FAIL; 891 break; 892 } 893 } else { 894 description = "Unhandled error code"; 895 action = ACTION_FAIL; 896 } 897 898 switch (action) { 899 case ACTION_FAIL: 900 /* Give up and fail the remainder of the request */ 901 scsi_release_buffers(cmd); 902 if (!(req->cmd_flags & REQ_QUIET)) { 903 if (description) 904 scmd_printk(KERN_INFO, cmd, "%s\n", 905 description); 906 scsi_print_result(cmd); 907 if (driver_byte(result) & DRIVER_SENSE) 908 scsi_print_sense("", cmd); 909 } 910 blk_end_request(req, -EIO, blk_rq_bytes(req)); 911 scsi_next_command(cmd); 912 break; 913 case ACTION_REPREP: 914 /* Unprep the request and put it back at the head of the queue. 915 * A new command will be prepared and issued. 916 */ 917 scsi_release_buffers(cmd); 918 scsi_requeue_command(q, cmd); 919 break; 920 case ACTION_RETRY: 921 /* Retry the same command immediately */ 922 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 923 break; 924 case ACTION_DELAYED_RETRY: 925 /* Retry the same command after a delay */ 926 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 927 break; 928 } 929 } 930 931 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 932 gfp_t gfp_mask) 933 { 934 int count; 935 936 /* 937 * If sg table allocation fails, requeue request later. 938 */ 939 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 940 gfp_mask))) { 941 return BLKPREP_DEFER; 942 } 943 944 req->buffer = NULL; 945 946 /* 947 * Next, walk the list, and fill in the addresses and sizes of 948 * each segment. 949 */ 950 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 951 BUG_ON(count > sdb->table.nents); 952 sdb->table.nents = count; 953 if (blk_pc_request(req)) 954 sdb->length = req->data_len; 955 else 956 sdb->length = req->nr_sectors << 9; 957 return BLKPREP_OK; 958 } 959 960 /* 961 * Function: scsi_init_io() 962 * 963 * Purpose: SCSI I/O initialize function. 964 * 965 * Arguments: cmd - Command descriptor we wish to initialize 966 * 967 * Returns: 0 on success 968 * BLKPREP_DEFER if the failure is retryable 969 * BLKPREP_KILL if the failure is fatal 970 */ 971 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 972 { 973 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 974 if (error) 975 goto err_exit; 976 977 if (blk_bidi_rq(cmd->request)) { 978 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 979 scsi_sdb_cache, GFP_ATOMIC); 980 if (!bidi_sdb) { 981 error = BLKPREP_DEFER; 982 goto err_exit; 983 } 984 985 cmd->request->next_rq->special = bidi_sdb; 986 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 987 GFP_ATOMIC); 988 if (error) 989 goto err_exit; 990 } 991 992 if (blk_integrity_rq(cmd->request)) { 993 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 994 int ivecs, count; 995 996 BUG_ON(prot_sdb == NULL); 997 ivecs = blk_rq_count_integrity_sg(cmd->request); 998 999 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 1000 error = BLKPREP_DEFER; 1001 goto err_exit; 1002 } 1003 1004 count = blk_rq_map_integrity_sg(cmd->request, 1005 prot_sdb->table.sgl); 1006 BUG_ON(unlikely(count > ivecs)); 1007 1008 cmd->prot_sdb = prot_sdb; 1009 cmd->prot_sdb->table.nents = count; 1010 } 1011 1012 return BLKPREP_OK ; 1013 1014 err_exit: 1015 scsi_release_buffers(cmd); 1016 if (error == BLKPREP_KILL) 1017 scsi_put_command(cmd); 1018 else /* BLKPREP_DEFER */ 1019 scsi_unprep_request(cmd->request); 1020 1021 return error; 1022 } 1023 EXPORT_SYMBOL(scsi_init_io); 1024 1025 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1026 struct request *req) 1027 { 1028 struct scsi_cmnd *cmd; 1029 1030 if (!req->special) { 1031 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1032 if (unlikely(!cmd)) 1033 return NULL; 1034 req->special = cmd; 1035 } else { 1036 cmd = req->special; 1037 } 1038 1039 /* pull a tag out of the request if we have one */ 1040 cmd->tag = req->tag; 1041 cmd->request = req; 1042 1043 cmd->cmnd = req->cmd; 1044 1045 return cmd; 1046 } 1047 1048 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1049 { 1050 struct scsi_cmnd *cmd; 1051 int ret = scsi_prep_state_check(sdev, req); 1052 1053 if (ret != BLKPREP_OK) 1054 return ret; 1055 1056 cmd = scsi_get_cmd_from_req(sdev, req); 1057 if (unlikely(!cmd)) 1058 return BLKPREP_DEFER; 1059 1060 /* 1061 * BLOCK_PC requests may transfer data, in which case they must 1062 * a bio attached to them. Or they might contain a SCSI command 1063 * that does not transfer data, in which case they may optionally 1064 * submit a request without an attached bio. 1065 */ 1066 if (req->bio) { 1067 int ret; 1068 1069 BUG_ON(!req->nr_phys_segments); 1070 1071 ret = scsi_init_io(cmd, GFP_ATOMIC); 1072 if (unlikely(ret)) 1073 return ret; 1074 } else { 1075 BUG_ON(req->data_len); 1076 BUG_ON(req->data); 1077 1078 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1079 req->buffer = NULL; 1080 } 1081 1082 cmd->cmd_len = req->cmd_len; 1083 if (!req->data_len) 1084 cmd->sc_data_direction = DMA_NONE; 1085 else if (rq_data_dir(req) == WRITE) 1086 cmd->sc_data_direction = DMA_TO_DEVICE; 1087 else 1088 cmd->sc_data_direction = DMA_FROM_DEVICE; 1089 1090 cmd->transfersize = req->data_len; 1091 cmd->allowed = req->retries; 1092 return BLKPREP_OK; 1093 } 1094 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1095 1096 /* 1097 * Setup a REQ_TYPE_FS command. These are simple read/write request 1098 * from filesystems that still need to be translated to SCSI CDBs from 1099 * the ULD. 1100 */ 1101 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1102 { 1103 struct scsi_cmnd *cmd; 1104 int ret = scsi_prep_state_check(sdev, req); 1105 1106 if (ret != BLKPREP_OK) 1107 return ret; 1108 1109 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1110 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1111 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1112 if (ret != BLKPREP_OK) 1113 return ret; 1114 } 1115 1116 /* 1117 * Filesystem requests must transfer data. 1118 */ 1119 BUG_ON(!req->nr_phys_segments); 1120 1121 cmd = scsi_get_cmd_from_req(sdev, req); 1122 if (unlikely(!cmd)) 1123 return BLKPREP_DEFER; 1124 1125 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1126 return scsi_init_io(cmd, GFP_ATOMIC); 1127 } 1128 EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1129 1130 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1131 { 1132 int ret = BLKPREP_OK; 1133 1134 /* 1135 * If the device is not in running state we will reject some 1136 * or all commands. 1137 */ 1138 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1139 switch (sdev->sdev_state) { 1140 case SDEV_OFFLINE: 1141 /* 1142 * If the device is offline we refuse to process any 1143 * commands. The device must be brought online 1144 * before trying any recovery commands. 1145 */ 1146 sdev_printk(KERN_ERR, sdev, 1147 "rejecting I/O to offline device\n"); 1148 ret = BLKPREP_KILL; 1149 break; 1150 case SDEV_DEL: 1151 /* 1152 * If the device is fully deleted, we refuse to 1153 * process any commands as well. 1154 */ 1155 sdev_printk(KERN_ERR, sdev, 1156 "rejecting I/O to dead device\n"); 1157 ret = BLKPREP_KILL; 1158 break; 1159 case SDEV_QUIESCE: 1160 case SDEV_BLOCK: 1161 case SDEV_CREATED_BLOCK: 1162 /* 1163 * If the devices is blocked we defer normal commands. 1164 */ 1165 if (!(req->cmd_flags & REQ_PREEMPT)) 1166 ret = BLKPREP_DEFER; 1167 break; 1168 default: 1169 /* 1170 * For any other not fully online state we only allow 1171 * special commands. In particular any user initiated 1172 * command is not allowed. 1173 */ 1174 if (!(req->cmd_flags & REQ_PREEMPT)) 1175 ret = BLKPREP_KILL; 1176 break; 1177 } 1178 } 1179 return ret; 1180 } 1181 EXPORT_SYMBOL(scsi_prep_state_check); 1182 1183 int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1184 { 1185 struct scsi_device *sdev = q->queuedata; 1186 1187 switch (ret) { 1188 case BLKPREP_KILL: 1189 req->errors = DID_NO_CONNECT << 16; 1190 /* release the command and kill it */ 1191 if (req->special) { 1192 struct scsi_cmnd *cmd = req->special; 1193 scsi_release_buffers(cmd); 1194 scsi_put_command(cmd); 1195 req->special = NULL; 1196 } 1197 break; 1198 case BLKPREP_DEFER: 1199 /* 1200 * If we defer, the elv_next_request() returns NULL, but the 1201 * queue must be restarted, so we plug here if no returning 1202 * command will automatically do that. 1203 */ 1204 if (sdev->device_busy == 0) 1205 blk_plug_device(q); 1206 break; 1207 default: 1208 req->cmd_flags |= REQ_DONTPREP; 1209 } 1210 1211 return ret; 1212 } 1213 EXPORT_SYMBOL(scsi_prep_return); 1214 1215 int scsi_prep_fn(struct request_queue *q, struct request *req) 1216 { 1217 struct scsi_device *sdev = q->queuedata; 1218 int ret = BLKPREP_KILL; 1219 1220 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1221 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1222 return scsi_prep_return(q, req, ret); 1223 } 1224 1225 /* 1226 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1227 * return 0. 1228 * 1229 * Called with the queue_lock held. 1230 */ 1231 static inline int scsi_dev_queue_ready(struct request_queue *q, 1232 struct scsi_device *sdev) 1233 { 1234 if (sdev->device_busy == 0 && sdev->device_blocked) { 1235 /* 1236 * unblock after device_blocked iterates to zero 1237 */ 1238 if (--sdev->device_blocked == 0) { 1239 SCSI_LOG_MLQUEUE(3, 1240 sdev_printk(KERN_INFO, sdev, 1241 "unblocking device at zero depth\n")); 1242 } else { 1243 blk_plug_device(q); 1244 return 0; 1245 } 1246 } 1247 if (scsi_device_is_busy(sdev)) 1248 return 0; 1249 1250 return 1; 1251 } 1252 1253 1254 /* 1255 * scsi_target_queue_ready: checks if there we can send commands to target 1256 * @sdev: scsi device on starget to check. 1257 * 1258 * Called with the host lock held. 1259 */ 1260 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1261 struct scsi_device *sdev) 1262 { 1263 struct scsi_target *starget = scsi_target(sdev); 1264 1265 if (starget->single_lun) { 1266 if (starget->starget_sdev_user && 1267 starget->starget_sdev_user != sdev) 1268 return 0; 1269 starget->starget_sdev_user = sdev; 1270 } 1271 1272 if (starget->target_busy == 0 && starget->target_blocked) { 1273 /* 1274 * unblock after target_blocked iterates to zero 1275 */ 1276 if (--starget->target_blocked == 0) { 1277 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1278 "unblocking target at zero depth\n")); 1279 } else { 1280 blk_plug_device(sdev->request_queue); 1281 return 0; 1282 } 1283 } 1284 1285 if (scsi_target_is_busy(starget)) { 1286 if (list_empty(&sdev->starved_entry)) { 1287 list_add_tail(&sdev->starved_entry, 1288 &shost->starved_list); 1289 return 0; 1290 } 1291 } 1292 1293 /* We're OK to process the command, so we can't be starved */ 1294 if (!list_empty(&sdev->starved_entry)) 1295 list_del_init(&sdev->starved_entry); 1296 return 1; 1297 } 1298 1299 /* 1300 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1301 * return 0. We must end up running the queue again whenever 0 is 1302 * returned, else IO can hang. 1303 * 1304 * Called with host_lock held. 1305 */ 1306 static inline int scsi_host_queue_ready(struct request_queue *q, 1307 struct Scsi_Host *shost, 1308 struct scsi_device *sdev) 1309 { 1310 if (scsi_host_in_recovery(shost)) 1311 return 0; 1312 if (shost->host_busy == 0 && shost->host_blocked) { 1313 /* 1314 * unblock after host_blocked iterates to zero 1315 */ 1316 if (--shost->host_blocked == 0) { 1317 SCSI_LOG_MLQUEUE(3, 1318 printk("scsi%d unblocking host at zero depth\n", 1319 shost->host_no)); 1320 } else { 1321 return 0; 1322 } 1323 } 1324 if (scsi_host_is_busy(shost)) { 1325 if (list_empty(&sdev->starved_entry)) 1326 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1327 return 0; 1328 } 1329 1330 /* We're OK to process the command, so we can't be starved */ 1331 if (!list_empty(&sdev->starved_entry)) 1332 list_del_init(&sdev->starved_entry); 1333 1334 return 1; 1335 } 1336 1337 /* 1338 * Busy state exporting function for request stacking drivers. 1339 * 1340 * For efficiency, no lock is taken to check the busy state of 1341 * shost/starget/sdev, since the returned value is not guaranteed and 1342 * may be changed after request stacking drivers call the function, 1343 * regardless of taking lock or not. 1344 * 1345 * When scsi can't dispatch I/Os anymore and needs to kill I/Os 1346 * (e.g. !sdev), scsi needs to return 'not busy'. 1347 * Otherwise, request stacking drivers may hold requests forever. 1348 */ 1349 static int scsi_lld_busy(struct request_queue *q) 1350 { 1351 struct scsi_device *sdev = q->queuedata; 1352 struct Scsi_Host *shost; 1353 struct scsi_target *starget; 1354 1355 if (!sdev) 1356 return 0; 1357 1358 shost = sdev->host; 1359 starget = scsi_target(sdev); 1360 1361 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || 1362 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) 1363 return 1; 1364 1365 return 0; 1366 } 1367 1368 /* 1369 * Kill a request for a dead device 1370 */ 1371 static void scsi_kill_request(struct request *req, struct request_queue *q) 1372 { 1373 struct scsi_cmnd *cmd = req->special; 1374 struct scsi_device *sdev = cmd->device; 1375 struct scsi_target *starget = scsi_target(sdev); 1376 struct Scsi_Host *shost = sdev->host; 1377 1378 blkdev_dequeue_request(req); 1379 1380 if (unlikely(cmd == NULL)) { 1381 printk(KERN_CRIT "impossible request in %s.\n", 1382 __func__); 1383 BUG(); 1384 } 1385 1386 scsi_init_cmd_errh(cmd); 1387 cmd->result = DID_NO_CONNECT << 16; 1388 atomic_inc(&cmd->device->iorequest_cnt); 1389 1390 /* 1391 * SCSI request completion path will do scsi_device_unbusy(), 1392 * bump busy counts. To bump the counters, we need to dance 1393 * with the locks as normal issue path does. 1394 */ 1395 sdev->device_busy++; 1396 spin_unlock(sdev->request_queue->queue_lock); 1397 spin_lock(shost->host_lock); 1398 shost->host_busy++; 1399 starget->target_busy++; 1400 spin_unlock(shost->host_lock); 1401 spin_lock(sdev->request_queue->queue_lock); 1402 1403 blk_complete_request(req); 1404 } 1405 1406 static void scsi_softirq_done(struct request *rq) 1407 { 1408 struct scsi_cmnd *cmd = rq->special; 1409 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1410 int disposition; 1411 1412 INIT_LIST_HEAD(&cmd->eh_entry); 1413 1414 /* 1415 * Set the serial numbers back to zero 1416 */ 1417 cmd->serial_number = 0; 1418 1419 atomic_inc(&cmd->device->iodone_cnt); 1420 if (cmd->result) 1421 atomic_inc(&cmd->device->ioerr_cnt); 1422 1423 disposition = scsi_decide_disposition(cmd); 1424 if (disposition != SUCCESS && 1425 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1426 sdev_printk(KERN_ERR, cmd->device, 1427 "timing out command, waited %lus\n", 1428 wait_for/HZ); 1429 disposition = SUCCESS; 1430 } 1431 1432 scsi_log_completion(cmd, disposition); 1433 1434 switch (disposition) { 1435 case SUCCESS: 1436 scsi_finish_command(cmd); 1437 break; 1438 case NEEDS_RETRY: 1439 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1440 break; 1441 case ADD_TO_MLQUEUE: 1442 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1443 break; 1444 default: 1445 if (!scsi_eh_scmd_add(cmd, 0)) 1446 scsi_finish_command(cmd); 1447 } 1448 } 1449 1450 /* 1451 * Function: scsi_request_fn() 1452 * 1453 * Purpose: Main strategy routine for SCSI. 1454 * 1455 * Arguments: q - Pointer to actual queue. 1456 * 1457 * Returns: Nothing 1458 * 1459 * Lock status: IO request lock assumed to be held when called. 1460 */ 1461 static void scsi_request_fn(struct request_queue *q) 1462 { 1463 struct scsi_device *sdev = q->queuedata; 1464 struct Scsi_Host *shost; 1465 struct scsi_cmnd *cmd; 1466 struct request *req; 1467 1468 if (!sdev) { 1469 printk("scsi: killing requests for dead queue\n"); 1470 while ((req = elv_next_request(q)) != NULL) 1471 scsi_kill_request(req, q); 1472 return; 1473 } 1474 1475 if(!get_device(&sdev->sdev_gendev)) 1476 /* We must be tearing the block queue down already */ 1477 return; 1478 1479 /* 1480 * To start with, we keep looping until the queue is empty, or until 1481 * the host is no longer able to accept any more requests. 1482 */ 1483 shost = sdev->host; 1484 while (!blk_queue_plugged(q)) { 1485 int rtn; 1486 /* 1487 * get next queueable request. We do this early to make sure 1488 * that the request is fully prepared even if we cannot 1489 * accept it. 1490 */ 1491 req = elv_next_request(q); 1492 if (!req || !scsi_dev_queue_ready(q, sdev)) 1493 break; 1494 1495 if (unlikely(!scsi_device_online(sdev))) { 1496 sdev_printk(KERN_ERR, sdev, 1497 "rejecting I/O to offline device\n"); 1498 scsi_kill_request(req, q); 1499 continue; 1500 } 1501 1502 1503 /* 1504 * Remove the request from the request list. 1505 */ 1506 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1507 blkdev_dequeue_request(req); 1508 sdev->device_busy++; 1509 1510 spin_unlock(q->queue_lock); 1511 cmd = req->special; 1512 if (unlikely(cmd == NULL)) { 1513 printk(KERN_CRIT "impossible request in %s.\n" 1514 "please mail a stack trace to " 1515 "linux-scsi@vger.kernel.org\n", 1516 __func__); 1517 blk_dump_rq_flags(req, "foo"); 1518 BUG(); 1519 } 1520 spin_lock(shost->host_lock); 1521 1522 /* 1523 * We hit this when the driver is using a host wide 1524 * tag map. For device level tag maps the queue_depth check 1525 * in the device ready fn would prevent us from trying 1526 * to allocate a tag. Since the map is a shared host resource 1527 * we add the dev to the starved list so it eventually gets 1528 * a run when a tag is freed. 1529 */ 1530 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1531 if (list_empty(&sdev->starved_entry)) 1532 list_add_tail(&sdev->starved_entry, 1533 &shost->starved_list); 1534 goto not_ready; 1535 } 1536 1537 if (!scsi_target_queue_ready(shost, sdev)) 1538 goto not_ready; 1539 1540 if (!scsi_host_queue_ready(q, shost, sdev)) 1541 goto not_ready; 1542 1543 scsi_target(sdev)->target_busy++; 1544 shost->host_busy++; 1545 1546 /* 1547 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1548 * take the lock again. 1549 */ 1550 spin_unlock_irq(shost->host_lock); 1551 1552 /* 1553 * Finally, initialize any error handling parameters, and set up 1554 * the timers for timeouts. 1555 */ 1556 scsi_init_cmd_errh(cmd); 1557 1558 /* 1559 * Dispatch the command to the low-level driver. 1560 */ 1561 rtn = scsi_dispatch_cmd(cmd); 1562 spin_lock_irq(q->queue_lock); 1563 if(rtn) { 1564 /* we're refusing the command; because of 1565 * the way locks get dropped, we need to 1566 * check here if plugging is required */ 1567 if(sdev->device_busy == 0) 1568 blk_plug_device(q); 1569 1570 break; 1571 } 1572 } 1573 1574 goto out; 1575 1576 not_ready: 1577 spin_unlock_irq(shost->host_lock); 1578 1579 /* 1580 * lock q, handle tag, requeue req, and decrement device_busy. We 1581 * must return with queue_lock held. 1582 * 1583 * Decrementing device_busy without checking it is OK, as all such 1584 * cases (host limits or settings) should run the queue at some 1585 * later time. 1586 */ 1587 spin_lock_irq(q->queue_lock); 1588 blk_requeue_request(q, req); 1589 sdev->device_busy--; 1590 if(sdev->device_busy == 0) 1591 blk_plug_device(q); 1592 out: 1593 /* must be careful here...if we trigger the ->remove() function 1594 * we cannot be holding the q lock */ 1595 spin_unlock_irq(q->queue_lock); 1596 put_device(&sdev->sdev_gendev); 1597 spin_lock_irq(q->queue_lock); 1598 } 1599 1600 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1601 { 1602 struct device *host_dev; 1603 u64 bounce_limit = 0xffffffff; 1604 1605 if (shost->unchecked_isa_dma) 1606 return BLK_BOUNCE_ISA; 1607 /* 1608 * Platforms with virtual-DMA translation 1609 * hardware have no practical limit. 1610 */ 1611 if (!PCI_DMA_BUS_IS_PHYS) 1612 return BLK_BOUNCE_ANY; 1613 1614 host_dev = scsi_get_device(shost); 1615 if (host_dev && host_dev->dma_mask) 1616 bounce_limit = *host_dev->dma_mask; 1617 1618 return bounce_limit; 1619 } 1620 EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1621 1622 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1623 request_fn_proc *request_fn) 1624 { 1625 struct request_queue *q; 1626 struct device *dev = shost->shost_gendev.parent; 1627 1628 q = blk_init_queue(request_fn, NULL); 1629 if (!q) 1630 return NULL; 1631 1632 /* 1633 * this limit is imposed by hardware restrictions 1634 */ 1635 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1636 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1637 1638 blk_queue_max_sectors(q, shost->max_sectors); 1639 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1640 blk_queue_segment_boundary(q, shost->dma_boundary); 1641 dma_set_seg_boundary(dev, shost->dma_boundary); 1642 1643 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1644 1645 /* New queue, no concurrency on queue_flags */ 1646 if (!shost->use_clustering) 1647 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1648 1649 /* 1650 * set a reasonable default alignment on word boundaries: the 1651 * host and device may alter it using 1652 * blk_queue_update_dma_alignment() later. 1653 */ 1654 blk_queue_dma_alignment(q, 0x03); 1655 1656 return q; 1657 } 1658 EXPORT_SYMBOL(__scsi_alloc_queue); 1659 1660 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1661 { 1662 struct request_queue *q; 1663 1664 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1665 if (!q) 1666 return NULL; 1667 1668 blk_queue_prep_rq(q, scsi_prep_fn); 1669 blk_queue_softirq_done(q, scsi_softirq_done); 1670 blk_queue_rq_timed_out(q, scsi_times_out); 1671 blk_queue_lld_busy(q, scsi_lld_busy); 1672 return q; 1673 } 1674 1675 void scsi_free_queue(struct request_queue *q) 1676 { 1677 blk_cleanup_queue(q); 1678 } 1679 1680 /* 1681 * Function: scsi_block_requests() 1682 * 1683 * Purpose: Utility function used by low-level drivers to prevent further 1684 * commands from being queued to the device. 1685 * 1686 * Arguments: shost - Host in question 1687 * 1688 * Returns: Nothing 1689 * 1690 * Lock status: No locks are assumed held. 1691 * 1692 * Notes: There is no timer nor any other means by which the requests 1693 * get unblocked other than the low-level driver calling 1694 * scsi_unblock_requests(). 1695 */ 1696 void scsi_block_requests(struct Scsi_Host *shost) 1697 { 1698 shost->host_self_blocked = 1; 1699 } 1700 EXPORT_SYMBOL(scsi_block_requests); 1701 1702 /* 1703 * Function: scsi_unblock_requests() 1704 * 1705 * Purpose: Utility function used by low-level drivers to allow further 1706 * commands from being queued to the device. 1707 * 1708 * Arguments: shost - Host in question 1709 * 1710 * Returns: Nothing 1711 * 1712 * Lock status: No locks are assumed held. 1713 * 1714 * Notes: There is no timer nor any other means by which the requests 1715 * get unblocked other than the low-level driver calling 1716 * scsi_unblock_requests(). 1717 * 1718 * This is done as an API function so that changes to the 1719 * internals of the scsi mid-layer won't require wholesale 1720 * changes to drivers that use this feature. 1721 */ 1722 void scsi_unblock_requests(struct Scsi_Host *shost) 1723 { 1724 shost->host_self_blocked = 0; 1725 scsi_run_host_queues(shost); 1726 } 1727 EXPORT_SYMBOL(scsi_unblock_requests); 1728 1729 int __init scsi_init_queue(void) 1730 { 1731 int i; 1732 1733 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1734 sizeof(struct scsi_data_buffer), 1735 0, 0, NULL); 1736 if (!scsi_sdb_cache) { 1737 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1738 return -ENOMEM; 1739 } 1740 1741 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1742 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1743 int size = sgp->size * sizeof(struct scatterlist); 1744 1745 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1746 SLAB_HWCACHE_ALIGN, NULL); 1747 if (!sgp->slab) { 1748 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1749 sgp->name); 1750 goto cleanup_sdb; 1751 } 1752 1753 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1754 sgp->slab); 1755 if (!sgp->pool) { 1756 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1757 sgp->name); 1758 goto cleanup_sdb; 1759 } 1760 } 1761 1762 return 0; 1763 1764 cleanup_sdb: 1765 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1766 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1767 if (sgp->pool) 1768 mempool_destroy(sgp->pool); 1769 if (sgp->slab) 1770 kmem_cache_destroy(sgp->slab); 1771 } 1772 kmem_cache_destroy(scsi_sdb_cache); 1773 1774 return -ENOMEM; 1775 } 1776 1777 void scsi_exit_queue(void) 1778 { 1779 int i; 1780 1781 kmem_cache_destroy(scsi_sdb_cache); 1782 1783 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1784 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1785 mempool_destroy(sgp->pool); 1786 kmem_cache_destroy(sgp->slab); 1787 } 1788 } 1789 1790 /** 1791 * scsi_mode_select - issue a mode select 1792 * @sdev: SCSI device to be queried 1793 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1794 * @sp: Save page bit (0 == don't save, 1 == save) 1795 * @modepage: mode page being requested 1796 * @buffer: request buffer (may not be smaller than eight bytes) 1797 * @len: length of request buffer. 1798 * @timeout: command timeout 1799 * @retries: number of retries before failing 1800 * @data: returns a structure abstracting the mode header data 1801 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1802 * must be SCSI_SENSE_BUFFERSIZE big. 1803 * 1804 * Returns zero if successful; negative error number or scsi 1805 * status on error 1806 * 1807 */ 1808 int 1809 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1810 unsigned char *buffer, int len, int timeout, int retries, 1811 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1812 { 1813 unsigned char cmd[10]; 1814 unsigned char *real_buffer; 1815 int ret; 1816 1817 memset(cmd, 0, sizeof(cmd)); 1818 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1819 1820 if (sdev->use_10_for_ms) { 1821 if (len > 65535) 1822 return -EINVAL; 1823 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1824 if (!real_buffer) 1825 return -ENOMEM; 1826 memcpy(real_buffer + 8, buffer, len); 1827 len += 8; 1828 real_buffer[0] = 0; 1829 real_buffer[1] = 0; 1830 real_buffer[2] = data->medium_type; 1831 real_buffer[3] = data->device_specific; 1832 real_buffer[4] = data->longlba ? 0x01 : 0; 1833 real_buffer[5] = 0; 1834 real_buffer[6] = data->block_descriptor_length >> 8; 1835 real_buffer[7] = data->block_descriptor_length; 1836 1837 cmd[0] = MODE_SELECT_10; 1838 cmd[7] = len >> 8; 1839 cmd[8] = len; 1840 } else { 1841 if (len > 255 || data->block_descriptor_length > 255 || 1842 data->longlba) 1843 return -EINVAL; 1844 1845 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1846 if (!real_buffer) 1847 return -ENOMEM; 1848 memcpy(real_buffer + 4, buffer, len); 1849 len += 4; 1850 real_buffer[0] = 0; 1851 real_buffer[1] = data->medium_type; 1852 real_buffer[2] = data->device_specific; 1853 real_buffer[3] = data->block_descriptor_length; 1854 1855 1856 cmd[0] = MODE_SELECT; 1857 cmd[4] = len; 1858 } 1859 1860 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1861 sshdr, timeout, retries, NULL); 1862 kfree(real_buffer); 1863 return ret; 1864 } 1865 EXPORT_SYMBOL_GPL(scsi_mode_select); 1866 1867 /** 1868 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1869 * @sdev: SCSI device to be queried 1870 * @dbd: set if mode sense will allow block descriptors to be returned 1871 * @modepage: mode page being requested 1872 * @buffer: request buffer (may not be smaller than eight bytes) 1873 * @len: length of request buffer. 1874 * @timeout: command timeout 1875 * @retries: number of retries before failing 1876 * @data: returns a structure abstracting the mode header data 1877 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1878 * must be SCSI_SENSE_BUFFERSIZE big. 1879 * 1880 * Returns zero if unsuccessful, or the header offset (either 4 1881 * or 8 depending on whether a six or ten byte command was 1882 * issued) if successful. 1883 */ 1884 int 1885 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1886 unsigned char *buffer, int len, int timeout, int retries, 1887 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1888 { 1889 unsigned char cmd[12]; 1890 int use_10_for_ms; 1891 int header_length; 1892 int result; 1893 struct scsi_sense_hdr my_sshdr; 1894 1895 memset(data, 0, sizeof(*data)); 1896 memset(&cmd[0], 0, 12); 1897 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1898 cmd[2] = modepage; 1899 1900 /* caller might not be interested in sense, but we need it */ 1901 if (!sshdr) 1902 sshdr = &my_sshdr; 1903 1904 retry: 1905 use_10_for_ms = sdev->use_10_for_ms; 1906 1907 if (use_10_for_ms) { 1908 if (len < 8) 1909 len = 8; 1910 1911 cmd[0] = MODE_SENSE_10; 1912 cmd[8] = len; 1913 header_length = 8; 1914 } else { 1915 if (len < 4) 1916 len = 4; 1917 1918 cmd[0] = MODE_SENSE; 1919 cmd[4] = len; 1920 header_length = 4; 1921 } 1922 1923 memset(buffer, 0, len); 1924 1925 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1926 sshdr, timeout, retries, NULL); 1927 1928 /* This code looks awful: what it's doing is making sure an 1929 * ILLEGAL REQUEST sense return identifies the actual command 1930 * byte as the problem. MODE_SENSE commands can return 1931 * ILLEGAL REQUEST if the code page isn't supported */ 1932 1933 if (use_10_for_ms && !scsi_status_is_good(result) && 1934 (driver_byte(result) & DRIVER_SENSE)) { 1935 if (scsi_sense_valid(sshdr)) { 1936 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1937 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1938 /* 1939 * Invalid command operation code 1940 */ 1941 sdev->use_10_for_ms = 0; 1942 goto retry; 1943 } 1944 } 1945 } 1946 1947 if(scsi_status_is_good(result)) { 1948 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1949 (modepage == 6 || modepage == 8))) { 1950 /* Initio breakage? */ 1951 header_length = 0; 1952 data->length = 13; 1953 data->medium_type = 0; 1954 data->device_specific = 0; 1955 data->longlba = 0; 1956 data->block_descriptor_length = 0; 1957 } else if(use_10_for_ms) { 1958 data->length = buffer[0]*256 + buffer[1] + 2; 1959 data->medium_type = buffer[2]; 1960 data->device_specific = buffer[3]; 1961 data->longlba = buffer[4] & 0x01; 1962 data->block_descriptor_length = buffer[6]*256 1963 + buffer[7]; 1964 } else { 1965 data->length = buffer[0] + 1; 1966 data->medium_type = buffer[1]; 1967 data->device_specific = buffer[2]; 1968 data->block_descriptor_length = buffer[3]; 1969 } 1970 data->header_length = header_length; 1971 } 1972 1973 return result; 1974 } 1975 EXPORT_SYMBOL(scsi_mode_sense); 1976 1977 /** 1978 * scsi_test_unit_ready - test if unit is ready 1979 * @sdev: scsi device to change the state of. 1980 * @timeout: command timeout 1981 * @retries: number of retries before failing 1982 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 1983 * returning sense. Make sure that this is cleared before passing 1984 * in. 1985 * 1986 * Returns zero if unsuccessful or an error if TUR failed. For 1987 * removable media, a return of NOT_READY or UNIT_ATTENTION is 1988 * translated to success, with the ->changed flag updated. 1989 **/ 1990 int 1991 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 1992 struct scsi_sense_hdr *sshdr_external) 1993 { 1994 char cmd[] = { 1995 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1996 }; 1997 struct scsi_sense_hdr *sshdr; 1998 int result; 1999 2000 if (!sshdr_external) 2001 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2002 else 2003 sshdr = sshdr_external; 2004 2005 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2006 do { 2007 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2008 timeout, retries, NULL); 2009 if (sdev->removable && scsi_sense_valid(sshdr) && 2010 sshdr->sense_key == UNIT_ATTENTION) 2011 sdev->changed = 1; 2012 } while (scsi_sense_valid(sshdr) && 2013 sshdr->sense_key == UNIT_ATTENTION && --retries); 2014 2015 if (!sshdr) 2016 /* could not allocate sense buffer, so can't process it */ 2017 return result; 2018 2019 if (sdev->removable && scsi_sense_valid(sshdr) && 2020 (sshdr->sense_key == UNIT_ATTENTION || 2021 sshdr->sense_key == NOT_READY)) { 2022 sdev->changed = 1; 2023 result = 0; 2024 } 2025 if (!sshdr_external) 2026 kfree(sshdr); 2027 return result; 2028 } 2029 EXPORT_SYMBOL(scsi_test_unit_ready); 2030 2031 /** 2032 * scsi_device_set_state - Take the given device through the device state model. 2033 * @sdev: scsi device to change the state of. 2034 * @state: state to change to. 2035 * 2036 * Returns zero if unsuccessful or an error if the requested 2037 * transition is illegal. 2038 */ 2039 int 2040 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2041 { 2042 enum scsi_device_state oldstate = sdev->sdev_state; 2043 2044 if (state == oldstate) 2045 return 0; 2046 2047 switch (state) { 2048 case SDEV_CREATED: 2049 switch (oldstate) { 2050 case SDEV_CREATED_BLOCK: 2051 break; 2052 default: 2053 goto illegal; 2054 } 2055 break; 2056 2057 case SDEV_RUNNING: 2058 switch (oldstate) { 2059 case SDEV_CREATED: 2060 case SDEV_OFFLINE: 2061 case SDEV_QUIESCE: 2062 case SDEV_BLOCK: 2063 break; 2064 default: 2065 goto illegal; 2066 } 2067 break; 2068 2069 case SDEV_QUIESCE: 2070 switch (oldstate) { 2071 case SDEV_RUNNING: 2072 case SDEV_OFFLINE: 2073 break; 2074 default: 2075 goto illegal; 2076 } 2077 break; 2078 2079 case SDEV_OFFLINE: 2080 switch (oldstate) { 2081 case SDEV_CREATED: 2082 case SDEV_RUNNING: 2083 case SDEV_QUIESCE: 2084 case SDEV_BLOCK: 2085 break; 2086 default: 2087 goto illegal; 2088 } 2089 break; 2090 2091 case SDEV_BLOCK: 2092 switch (oldstate) { 2093 case SDEV_RUNNING: 2094 case SDEV_CREATED_BLOCK: 2095 break; 2096 default: 2097 goto illegal; 2098 } 2099 break; 2100 2101 case SDEV_CREATED_BLOCK: 2102 switch (oldstate) { 2103 case SDEV_CREATED: 2104 break; 2105 default: 2106 goto illegal; 2107 } 2108 break; 2109 2110 case SDEV_CANCEL: 2111 switch (oldstate) { 2112 case SDEV_CREATED: 2113 case SDEV_RUNNING: 2114 case SDEV_QUIESCE: 2115 case SDEV_OFFLINE: 2116 case SDEV_BLOCK: 2117 break; 2118 default: 2119 goto illegal; 2120 } 2121 break; 2122 2123 case SDEV_DEL: 2124 switch (oldstate) { 2125 case SDEV_CREATED: 2126 case SDEV_RUNNING: 2127 case SDEV_OFFLINE: 2128 case SDEV_CANCEL: 2129 break; 2130 default: 2131 goto illegal; 2132 } 2133 break; 2134 2135 } 2136 sdev->sdev_state = state; 2137 return 0; 2138 2139 illegal: 2140 SCSI_LOG_ERROR_RECOVERY(1, 2141 sdev_printk(KERN_ERR, sdev, 2142 "Illegal state transition %s->%s\n", 2143 scsi_device_state_name(oldstate), 2144 scsi_device_state_name(state)) 2145 ); 2146 return -EINVAL; 2147 } 2148 EXPORT_SYMBOL(scsi_device_set_state); 2149 2150 /** 2151 * sdev_evt_emit - emit a single SCSI device uevent 2152 * @sdev: associated SCSI device 2153 * @evt: event to emit 2154 * 2155 * Send a single uevent (scsi_event) to the associated scsi_device. 2156 */ 2157 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2158 { 2159 int idx = 0; 2160 char *envp[3]; 2161 2162 switch (evt->evt_type) { 2163 case SDEV_EVT_MEDIA_CHANGE: 2164 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2165 break; 2166 2167 default: 2168 /* do nothing */ 2169 break; 2170 } 2171 2172 envp[idx++] = NULL; 2173 2174 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2175 } 2176 2177 /** 2178 * sdev_evt_thread - send a uevent for each scsi event 2179 * @work: work struct for scsi_device 2180 * 2181 * Dispatch queued events to their associated scsi_device kobjects 2182 * as uevents. 2183 */ 2184 void scsi_evt_thread(struct work_struct *work) 2185 { 2186 struct scsi_device *sdev; 2187 LIST_HEAD(event_list); 2188 2189 sdev = container_of(work, struct scsi_device, event_work); 2190 2191 while (1) { 2192 struct scsi_event *evt; 2193 struct list_head *this, *tmp; 2194 unsigned long flags; 2195 2196 spin_lock_irqsave(&sdev->list_lock, flags); 2197 list_splice_init(&sdev->event_list, &event_list); 2198 spin_unlock_irqrestore(&sdev->list_lock, flags); 2199 2200 if (list_empty(&event_list)) 2201 break; 2202 2203 list_for_each_safe(this, tmp, &event_list) { 2204 evt = list_entry(this, struct scsi_event, node); 2205 list_del(&evt->node); 2206 scsi_evt_emit(sdev, evt); 2207 kfree(evt); 2208 } 2209 } 2210 } 2211 2212 /** 2213 * sdev_evt_send - send asserted event to uevent thread 2214 * @sdev: scsi_device event occurred on 2215 * @evt: event to send 2216 * 2217 * Assert scsi device event asynchronously. 2218 */ 2219 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2220 { 2221 unsigned long flags; 2222 2223 #if 0 2224 /* FIXME: currently this check eliminates all media change events 2225 * for polled devices. Need to update to discriminate between AN 2226 * and polled events */ 2227 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2228 kfree(evt); 2229 return; 2230 } 2231 #endif 2232 2233 spin_lock_irqsave(&sdev->list_lock, flags); 2234 list_add_tail(&evt->node, &sdev->event_list); 2235 schedule_work(&sdev->event_work); 2236 spin_unlock_irqrestore(&sdev->list_lock, flags); 2237 } 2238 EXPORT_SYMBOL_GPL(sdev_evt_send); 2239 2240 /** 2241 * sdev_evt_alloc - allocate a new scsi event 2242 * @evt_type: type of event to allocate 2243 * @gfpflags: GFP flags for allocation 2244 * 2245 * Allocates and returns a new scsi_event. 2246 */ 2247 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2248 gfp_t gfpflags) 2249 { 2250 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2251 if (!evt) 2252 return NULL; 2253 2254 evt->evt_type = evt_type; 2255 INIT_LIST_HEAD(&evt->node); 2256 2257 /* evt_type-specific initialization, if any */ 2258 switch (evt_type) { 2259 case SDEV_EVT_MEDIA_CHANGE: 2260 default: 2261 /* do nothing */ 2262 break; 2263 } 2264 2265 return evt; 2266 } 2267 EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2268 2269 /** 2270 * sdev_evt_send_simple - send asserted event to uevent thread 2271 * @sdev: scsi_device event occurred on 2272 * @evt_type: type of event to send 2273 * @gfpflags: GFP flags for allocation 2274 * 2275 * Assert scsi device event asynchronously, given an event type. 2276 */ 2277 void sdev_evt_send_simple(struct scsi_device *sdev, 2278 enum scsi_device_event evt_type, gfp_t gfpflags) 2279 { 2280 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2281 if (!evt) { 2282 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2283 evt_type); 2284 return; 2285 } 2286 2287 sdev_evt_send(sdev, evt); 2288 } 2289 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2290 2291 /** 2292 * scsi_device_quiesce - Block user issued commands. 2293 * @sdev: scsi device to quiesce. 2294 * 2295 * This works by trying to transition to the SDEV_QUIESCE state 2296 * (which must be a legal transition). When the device is in this 2297 * state, only special requests will be accepted, all others will 2298 * be deferred. Since special requests may also be requeued requests, 2299 * a successful return doesn't guarantee the device will be 2300 * totally quiescent. 2301 * 2302 * Must be called with user context, may sleep. 2303 * 2304 * Returns zero if unsuccessful or an error if not. 2305 */ 2306 int 2307 scsi_device_quiesce(struct scsi_device *sdev) 2308 { 2309 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2310 if (err) 2311 return err; 2312 2313 scsi_run_queue(sdev->request_queue); 2314 while (sdev->device_busy) { 2315 msleep_interruptible(200); 2316 scsi_run_queue(sdev->request_queue); 2317 } 2318 return 0; 2319 } 2320 EXPORT_SYMBOL(scsi_device_quiesce); 2321 2322 /** 2323 * scsi_device_resume - Restart user issued commands to a quiesced device. 2324 * @sdev: scsi device to resume. 2325 * 2326 * Moves the device from quiesced back to running and restarts the 2327 * queues. 2328 * 2329 * Must be called with user context, may sleep. 2330 */ 2331 void 2332 scsi_device_resume(struct scsi_device *sdev) 2333 { 2334 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2335 return; 2336 scsi_run_queue(sdev->request_queue); 2337 } 2338 EXPORT_SYMBOL(scsi_device_resume); 2339 2340 static void 2341 device_quiesce_fn(struct scsi_device *sdev, void *data) 2342 { 2343 scsi_device_quiesce(sdev); 2344 } 2345 2346 void 2347 scsi_target_quiesce(struct scsi_target *starget) 2348 { 2349 starget_for_each_device(starget, NULL, device_quiesce_fn); 2350 } 2351 EXPORT_SYMBOL(scsi_target_quiesce); 2352 2353 static void 2354 device_resume_fn(struct scsi_device *sdev, void *data) 2355 { 2356 scsi_device_resume(sdev); 2357 } 2358 2359 void 2360 scsi_target_resume(struct scsi_target *starget) 2361 { 2362 starget_for_each_device(starget, NULL, device_resume_fn); 2363 } 2364 EXPORT_SYMBOL(scsi_target_resume); 2365 2366 /** 2367 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2368 * @sdev: device to block 2369 * 2370 * Block request made by scsi lld's to temporarily stop all 2371 * scsi commands on the specified device. Called from interrupt 2372 * or normal process context. 2373 * 2374 * Returns zero if successful or error if not 2375 * 2376 * Notes: 2377 * This routine transitions the device to the SDEV_BLOCK state 2378 * (which must be a legal transition). When the device is in this 2379 * state, all commands are deferred until the scsi lld reenables 2380 * the device with scsi_device_unblock or device_block_tmo fires. 2381 * This routine assumes the host_lock is held on entry. 2382 */ 2383 int 2384 scsi_internal_device_block(struct scsi_device *sdev) 2385 { 2386 struct request_queue *q = sdev->request_queue; 2387 unsigned long flags; 2388 int err = 0; 2389 2390 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2391 if (err) { 2392 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2393 2394 if (err) 2395 return err; 2396 } 2397 2398 /* 2399 * The device has transitioned to SDEV_BLOCK. Stop the 2400 * block layer from calling the midlayer with this device's 2401 * request queue. 2402 */ 2403 spin_lock_irqsave(q->queue_lock, flags); 2404 blk_stop_queue(q); 2405 spin_unlock_irqrestore(q->queue_lock, flags); 2406 2407 return 0; 2408 } 2409 EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2410 2411 /** 2412 * scsi_internal_device_unblock - resume a device after a block request 2413 * @sdev: device to resume 2414 * 2415 * Called by scsi lld's or the midlayer to restart the device queue 2416 * for the previously suspended scsi device. Called from interrupt or 2417 * normal process context. 2418 * 2419 * Returns zero if successful or error if not. 2420 * 2421 * Notes: 2422 * This routine transitions the device to the SDEV_RUNNING state 2423 * (which must be a legal transition) allowing the midlayer to 2424 * goose the queue for this device. This routine assumes the 2425 * host_lock is held upon entry. 2426 */ 2427 int 2428 scsi_internal_device_unblock(struct scsi_device *sdev) 2429 { 2430 struct request_queue *q = sdev->request_queue; 2431 int err; 2432 unsigned long flags; 2433 2434 /* 2435 * Try to transition the scsi device to SDEV_RUNNING 2436 * and goose the device queue if successful. 2437 */ 2438 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2439 if (err) { 2440 err = scsi_device_set_state(sdev, SDEV_CREATED); 2441 2442 if (err) 2443 return err; 2444 } 2445 2446 spin_lock_irqsave(q->queue_lock, flags); 2447 blk_start_queue(q); 2448 spin_unlock_irqrestore(q->queue_lock, flags); 2449 2450 return 0; 2451 } 2452 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2453 2454 static void 2455 device_block(struct scsi_device *sdev, void *data) 2456 { 2457 scsi_internal_device_block(sdev); 2458 } 2459 2460 static int 2461 target_block(struct device *dev, void *data) 2462 { 2463 if (scsi_is_target_device(dev)) 2464 starget_for_each_device(to_scsi_target(dev), NULL, 2465 device_block); 2466 return 0; 2467 } 2468 2469 void 2470 scsi_target_block(struct device *dev) 2471 { 2472 if (scsi_is_target_device(dev)) 2473 starget_for_each_device(to_scsi_target(dev), NULL, 2474 device_block); 2475 else 2476 device_for_each_child(dev, NULL, target_block); 2477 } 2478 EXPORT_SYMBOL_GPL(scsi_target_block); 2479 2480 static void 2481 device_unblock(struct scsi_device *sdev, void *data) 2482 { 2483 scsi_internal_device_unblock(sdev); 2484 } 2485 2486 static int 2487 target_unblock(struct device *dev, void *data) 2488 { 2489 if (scsi_is_target_device(dev)) 2490 starget_for_each_device(to_scsi_target(dev), NULL, 2491 device_unblock); 2492 return 0; 2493 } 2494 2495 void 2496 scsi_target_unblock(struct device *dev) 2497 { 2498 if (scsi_is_target_device(dev)) 2499 starget_for_each_device(to_scsi_target(dev), NULL, 2500 device_unblock); 2501 else 2502 device_for_each_child(dev, NULL, target_unblock); 2503 } 2504 EXPORT_SYMBOL_GPL(scsi_target_unblock); 2505 2506 /** 2507 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2508 * @sgl: scatter-gather list 2509 * @sg_count: number of segments in sg 2510 * @offset: offset in bytes into sg, on return offset into the mapped area 2511 * @len: bytes to map, on return number of bytes mapped 2512 * 2513 * Returns virtual address of the start of the mapped page 2514 */ 2515 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2516 size_t *offset, size_t *len) 2517 { 2518 int i; 2519 size_t sg_len = 0, len_complete = 0; 2520 struct scatterlist *sg; 2521 struct page *page; 2522 2523 WARN_ON(!irqs_disabled()); 2524 2525 for_each_sg(sgl, sg, sg_count, i) { 2526 len_complete = sg_len; /* Complete sg-entries */ 2527 sg_len += sg->length; 2528 if (sg_len > *offset) 2529 break; 2530 } 2531 2532 if (unlikely(i == sg_count)) { 2533 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2534 "elements %d\n", 2535 __func__, sg_len, *offset, sg_count); 2536 WARN_ON(1); 2537 return NULL; 2538 } 2539 2540 /* Offset starting from the beginning of first page in this sg-entry */ 2541 *offset = *offset - len_complete + sg->offset; 2542 2543 /* Assumption: contiguous pages can be accessed as "page + i" */ 2544 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2545 *offset &= ~PAGE_MASK; 2546 2547 /* Bytes in this sg-entry from *offset to the end of the page */ 2548 sg_len = PAGE_SIZE - *offset; 2549 if (*len > sg_len) 2550 *len = sg_len; 2551 2552 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2553 } 2554 EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2555 2556 /** 2557 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2558 * @virt: virtual address to be unmapped 2559 */ 2560 void scsi_kunmap_atomic_sg(void *virt) 2561 { 2562 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2563 } 2564 EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2565