1 /* 2 * Copyright (C) 1999 Eric Youngdale 3 * Copyright (C) 2014 Christoph Hellwig 4 * 5 * SCSI queueing library. 6 * Initial versions: Eric Youngdale (eric@andante.org). 7 * Based upon conversations with large numbers 8 * of people at Linux Expo. 9 */ 10 11 #include <linux/bio.h> 12 #include <linux/bitops.h> 13 #include <linux/blkdev.h> 14 #include <linux/completion.h> 15 #include <linux/kernel.h> 16 #include <linux/export.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/init.h> 20 #include <linux/pci.h> 21 #include <linux/delay.h> 22 #include <linux/hardirq.h> 23 #include <linux/scatterlist.h> 24 #include <linux/blk-mq.h> 25 #include <linux/ratelimit.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_dbg.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_driver.h> 32 #include <scsi/scsi_eh.h> 33 #include <scsi/scsi_host.h> 34 35 #include <trace/events/scsi.h> 36 37 #include "scsi_priv.h" 38 #include "scsi_logging.h" 39 40 41 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 42 #define SG_MEMPOOL_SIZE 2 43 44 struct scsi_host_sg_pool { 45 size_t size; 46 char *name; 47 struct kmem_cache *slab; 48 mempool_t *pool; 49 }; 50 51 #define SP(x) { .size = x, "sgpool-" __stringify(x) } 52 #if (SCSI_MAX_SG_SEGMENTS < 32) 53 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 54 #endif 55 static struct scsi_host_sg_pool scsi_sg_pools[] = { 56 SP(8), 57 SP(16), 58 #if (SCSI_MAX_SG_SEGMENTS > 32) 59 SP(32), 60 #if (SCSI_MAX_SG_SEGMENTS > 64) 61 SP(64), 62 #if (SCSI_MAX_SG_SEGMENTS > 128) 63 SP(128), 64 #if (SCSI_MAX_SG_SEGMENTS > 256) 65 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 66 #endif 67 #endif 68 #endif 69 #endif 70 SP(SCSI_MAX_SG_SEGMENTS) 71 }; 72 #undef SP 73 74 struct kmem_cache *scsi_sdb_cache; 75 76 /* 77 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 78 * not change behaviour from the previous unplug mechanism, experimentation 79 * may prove this needs changing. 80 */ 81 #define SCSI_QUEUE_DELAY 3 82 83 static void 84 scsi_set_blocked(struct scsi_cmnd *cmd, int reason) 85 { 86 struct Scsi_Host *host = cmd->device->host; 87 struct scsi_device *device = cmd->device; 88 struct scsi_target *starget = scsi_target(device); 89 90 /* 91 * Set the appropriate busy bit for the device/host. 92 * 93 * If the host/device isn't busy, assume that something actually 94 * completed, and that we should be able to queue a command now. 95 * 96 * Note that the prior mid-layer assumption that any host could 97 * always queue at least one command is now broken. The mid-layer 98 * will implement a user specifiable stall (see 99 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 100 * if a command is requeued with no other commands outstanding 101 * either for the device or for the host. 102 */ 103 switch (reason) { 104 case SCSI_MLQUEUE_HOST_BUSY: 105 atomic_set(&host->host_blocked, host->max_host_blocked); 106 break; 107 case SCSI_MLQUEUE_DEVICE_BUSY: 108 case SCSI_MLQUEUE_EH_RETRY: 109 atomic_set(&device->device_blocked, 110 device->max_device_blocked); 111 break; 112 case SCSI_MLQUEUE_TARGET_BUSY: 113 atomic_set(&starget->target_blocked, 114 starget->max_target_blocked); 115 break; 116 } 117 } 118 119 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) 120 { 121 struct scsi_device *sdev = cmd->device; 122 struct request_queue *q = cmd->request->q; 123 124 blk_mq_requeue_request(cmd->request); 125 blk_mq_kick_requeue_list(q); 126 put_device(&sdev->sdev_gendev); 127 } 128 129 /** 130 * __scsi_queue_insert - private queue insertion 131 * @cmd: The SCSI command being requeued 132 * @reason: The reason for the requeue 133 * @unbusy: Whether the queue should be unbusied 134 * 135 * This is a private queue insertion. The public interface 136 * scsi_queue_insert() always assumes the queue should be unbusied 137 * because it's always called before the completion. This function is 138 * for a requeue after completion, which should only occur in this 139 * file. 140 */ 141 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 142 { 143 struct scsi_device *device = cmd->device; 144 struct request_queue *q = device->request_queue; 145 unsigned long flags; 146 147 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, 148 "Inserting command %p into mlqueue\n", cmd)); 149 150 scsi_set_blocked(cmd, reason); 151 152 /* 153 * Decrement the counters, since these commands are no longer 154 * active on the host/device. 155 */ 156 if (unbusy) 157 scsi_device_unbusy(device); 158 159 /* 160 * Requeue this command. It will go before all other commands 161 * that are already in the queue. Schedule requeue work under 162 * lock such that the kblockd_schedule_work() call happens 163 * before blk_cleanup_queue() finishes. 164 */ 165 cmd->result = 0; 166 if (q->mq_ops) { 167 scsi_mq_requeue_cmd(cmd); 168 return; 169 } 170 spin_lock_irqsave(q->queue_lock, flags); 171 blk_requeue_request(q, cmd->request); 172 kblockd_schedule_work(&device->requeue_work); 173 spin_unlock_irqrestore(q->queue_lock, flags); 174 } 175 176 /* 177 * Function: scsi_queue_insert() 178 * 179 * Purpose: Insert a command in the midlevel queue. 180 * 181 * Arguments: cmd - command that we are adding to queue. 182 * reason - why we are inserting command to queue. 183 * 184 * Lock status: Assumed that lock is not held upon entry. 185 * 186 * Returns: Nothing. 187 * 188 * Notes: We do this for one of two cases. Either the host is busy 189 * and it cannot accept any more commands for the time being, 190 * or the device returned QUEUE_FULL and can accept no more 191 * commands. 192 * Notes: This could be called either from an interrupt context or a 193 * normal process context. 194 */ 195 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 196 { 197 __scsi_queue_insert(cmd, reason, 1); 198 } 199 /** 200 * scsi_execute - insert request and wait for the result 201 * @sdev: scsi device 202 * @cmd: scsi command 203 * @data_direction: data direction 204 * @buffer: data buffer 205 * @bufflen: len of buffer 206 * @sense: optional sense buffer 207 * @timeout: request timeout in seconds 208 * @retries: number of times to retry request 209 * @flags: or into request flags; 210 * @resid: optional residual length 211 * 212 * returns the req->errors value which is the scsi_cmnd result 213 * field. 214 */ 215 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 216 int data_direction, void *buffer, unsigned bufflen, 217 unsigned char *sense, int timeout, int retries, u64 flags, 218 int *resid) 219 { 220 struct request *req; 221 int write = (data_direction == DMA_TO_DEVICE); 222 int ret = DRIVER_ERROR << 24; 223 224 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 225 if (IS_ERR(req)) 226 return ret; 227 blk_rq_set_block_pc(req); 228 229 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 230 buffer, bufflen, __GFP_WAIT)) 231 goto out; 232 233 req->cmd_len = COMMAND_SIZE(cmd[0]); 234 memcpy(req->cmd, cmd, req->cmd_len); 235 req->sense = sense; 236 req->sense_len = 0; 237 req->retries = retries; 238 req->timeout = timeout; 239 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 240 241 /* 242 * head injection *required* here otherwise quiesce won't work 243 */ 244 blk_execute_rq(req->q, NULL, req, 1); 245 246 /* 247 * Some devices (USB mass-storage in particular) may transfer 248 * garbage data together with a residue indicating that the data 249 * is invalid. Prevent the garbage from being misinterpreted 250 * and prevent security leaks by zeroing out the excess data. 251 */ 252 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 253 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 254 255 if (resid) 256 *resid = req->resid_len; 257 ret = req->errors; 258 out: 259 blk_put_request(req); 260 261 return ret; 262 } 263 EXPORT_SYMBOL(scsi_execute); 264 265 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 266 int data_direction, void *buffer, unsigned bufflen, 267 struct scsi_sense_hdr *sshdr, int timeout, int retries, 268 int *resid, u64 flags) 269 { 270 char *sense = NULL; 271 int result; 272 273 if (sshdr) { 274 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 275 if (!sense) 276 return DRIVER_ERROR << 24; 277 } 278 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 279 sense, timeout, retries, flags, resid); 280 if (sshdr) 281 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 282 283 kfree(sense); 284 return result; 285 } 286 EXPORT_SYMBOL(scsi_execute_req_flags); 287 288 /* 289 * Function: scsi_init_cmd_errh() 290 * 291 * Purpose: Initialize cmd fields related to error handling. 292 * 293 * Arguments: cmd - command that is ready to be queued. 294 * 295 * Notes: This function has the job of initializing a number of 296 * fields related to error handling. Typically this will 297 * be called once for each command, as required. 298 */ 299 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 300 { 301 cmd->serial_number = 0; 302 scsi_set_resid(cmd, 0); 303 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 304 if (cmd->cmd_len == 0) 305 cmd->cmd_len = scsi_command_size(cmd->cmnd); 306 } 307 308 void scsi_device_unbusy(struct scsi_device *sdev) 309 { 310 struct Scsi_Host *shost = sdev->host; 311 struct scsi_target *starget = scsi_target(sdev); 312 unsigned long flags; 313 314 atomic_dec(&shost->host_busy); 315 if (starget->can_queue > 0) 316 atomic_dec(&starget->target_busy); 317 318 if (unlikely(scsi_host_in_recovery(shost) && 319 (shost->host_failed || shost->host_eh_scheduled))) { 320 spin_lock_irqsave(shost->host_lock, flags); 321 scsi_eh_wakeup(shost); 322 spin_unlock_irqrestore(shost->host_lock, flags); 323 } 324 325 atomic_dec(&sdev->device_busy); 326 } 327 328 static void scsi_kick_queue(struct request_queue *q) 329 { 330 if (q->mq_ops) 331 blk_mq_start_hw_queues(q); 332 else 333 blk_run_queue(q); 334 } 335 336 /* 337 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 338 * and call blk_run_queue for all the scsi_devices on the target - 339 * including current_sdev first. 340 * 341 * Called with *no* scsi locks held. 342 */ 343 static void scsi_single_lun_run(struct scsi_device *current_sdev) 344 { 345 struct Scsi_Host *shost = current_sdev->host; 346 struct scsi_device *sdev, *tmp; 347 struct scsi_target *starget = scsi_target(current_sdev); 348 unsigned long flags; 349 350 spin_lock_irqsave(shost->host_lock, flags); 351 starget->starget_sdev_user = NULL; 352 spin_unlock_irqrestore(shost->host_lock, flags); 353 354 /* 355 * Call blk_run_queue for all LUNs on the target, starting with 356 * current_sdev. We race with others (to set starget_sdev_user), 357 * but in most cases, we will be first. Ideally, each LU on the 358 * target would get some limited time or requests on the target. 359 */ 360 scsi_kick_queue(current_sdev->request_queue); 361 362 spin_lock_irqsave(shost->host_lock, flags); 363 if (starget->starget_sdev_user) 364 goto out; 365 list_for_each_entry_safe(sdev, tmp, &starget->devices, 366 same_target_siblings) { 367 if (sdev == current_sdev) 368 continue; 369 if (scsi_device_get(sdev)) 370 continue; 371 372 spin_unlock_irqrestore(shost->host_lock, flags); 373 scsi_kick_queue(sdev->request_queue); 374 spin_lock_irqsave(shost->host_lock, flags); 375 376 scsi_device_put(sdev); 377 } 378 out: 379 spin_unlock_irqrestore(shost->host_lock, flags); 380 } 381 382 static inline bool scsi_device_is_busy(struct scsi_device *sdev) 383 { 384 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) 385 return true; 386 if (atomic_read(&sdev->device_blocked) > 0) 387 return true; 388 return false; 389 } 390 391 static inline bool scsi_target_is_busy(struct scsi_target *starget) 392 { 393 if (starget->can_queue > 0) { 394 if (atomic_read(&starget->target_busy) >= starget->can_queue) 395 return true; 396 if (atomic_read(&starget->target_blocked) > 0) 397 return true; 398 } 399 return false; 400 } 401 402 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 403 { 404 if (shost->can_queue > 0 && 405 atomic_read(&shost->host_busy) >= shost->can_queue) 406 return true; 407 if (atomic_read(&shost->host_blocked) > 0) 408 return true; 409 if (shost->host_self_blocked) 410 return true; 411 return false; 412 } 413 414 static void scsi_starved_list_run(struct Scsi_Host *shost) 415 { 416 LIST_HEAD(starved_list); 417 struct scsi_device *sdev; 418 unsigned long flags; 419 420 spin_lock_irqsave(shost->host_lock, flags); 421 list_splice_init(&shost->starved_list, &starved_list); 422 423 while (!list_empty(&starved_list)) { 424 struct request_queue *slq; 425 426 /* 427 * As long as shost is accepting commands and we have 428 * starved queues, call blk_run_queue. scsi_request_fn 429 * drops the queue_lock and can add us back to the 430 * starved_list. 431 * 432 * host_lock protects the starved_list and starved_entry. 433 * scsi_request_fn must get the host_lock before checking 434 * or modifying starved_list or starved_entry. 435 */ 436 if (scsi_host_is_busy(shost)) 437 break; 438 439 sdev = list_entry(starved_list.next, 440 struct scsi_device, starved_entry); 441 list_del_init(&sdev->starved_entry); 442 if (scsi_target_is_busy(scsi_target(sdev))) { 443 list_move_tail(&sdev->starved_entry, 444 &shost->starved_list); 445 continue; 446 } 447 448 /* 449 * Once we drop the host lock, a racing scsi_remove_device() 450 * call may remove the sdev from the starved list and destroy 451 * it and the queue. Mitigate by taking a reference to the 452 * queue and never touching the sdev again after we drop the 453 * host lock. Note: if __scsi_remove_device() invokes 454 * blk_cleanup_queue() before the queue is run from this 455 * function then blk_run_queue() will return immediately since 456 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. 457 */ 458 slq = sdev->request_queue; 459 if (!blk_get_queue(slq)) 460 continue; 461 spin_unlock_irqrestore(shost->host_lock, flags); 462 463 scsi_kick_queue(slq); 464 blk_put_queue(slq); 465 466 spin_lock_irqsave(shost->host_lock, flags); 467 } 468 /* put any unprocessed entries back */ 469 list_splice(&starved_list, &shost->starved_list); 470 spin_unlock_irqrestore(shost->host_lock, flags); 471 } 472 473 /* 474 * Function: scsi_run_queue() 475 * 476 * Purpose: Select a proper request queue to serve next 477 * 478 * Arguments: q - last request's queue 479 * 480 * Returns: Nothing 481 * 482 * Notes: The previous command was completely finished, start 483 * a new one if possible. 484 */ 485 static void scsi_run_queue(struct request_queue *q) 486 { 487 struct scsi_device *sdev = q->queuedata; 488 489 if (scsi_target(sdev)->single_lun) 490 scsi_single_lun_run(sdev); 491 if (!list_empty(&sdev->host->starved_list)) 492 scsi_starved_list_run(sdev->host); 493 494 if (q->mq_ops) 495 blk_mq_start_stopped_hw_queues(q, false); 496 else 497 blk_run_queue(q); 498 } 499 500 void scsi_requeue_run_queue(struct work_struct *work) 501 { 502 struct scsi_device *sdev; 503 struct request_queue *q; 504 505 sdev = container_of(work, struct scsi_device, requeue_work); 506 q = sdev->request_queue; 507 scsi_run_queue(q); 508 } 509 510 /* 511 * Function: scsi_requeue_command() 512 * 513 * Purpose: Handle post-processing of completed commands. 514 * 515 * Arguments: q - queue to operate on 516 * cmd - command that may need to be requeued. 517 * 518 * Returns: Nothing 519 * 520 * Notes: After command completion, there may be blocks left 521 * over which weren't finished by the previous command 522 * this can be for a number of reasons - the main one is 523 * I/O errors in the middle of the request, in which case 524 * we need to request the blocks that come after the bad 525 * sector. 526 * Notes: Upon return, cmd is a stale pointer. 527 */ 528 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 529 { 530 struct scsi_device *sdev = cmd->device; 531 struct request *req = cmd->request; 532 unsigned long flags; 533 534 spin_lock_irqsave(q->queue_lock, flags); 535 blk_unprep_request(req); 536 req->special = NULL; 537 scsi_put_command(cmd); 538 blk_requeue_request(q, req); 539 spin_unlock_irqrestore(q->queue_lock, flags); 540 541 scsi_run_queue(q); 542 543 put_device(&sdev->sdev_gendev); 544 } 545 546 void scsi_run_host_queues(struct Scsi_Host *shost) 547 { 548 struct scsi_device *sdev; 549 550 shost_for_each_device(sdev, shost) 551 scsi_run_queue(sdev->request_queue); 552 } 553 554 static inline unsigned int scsi_sgtable_index(unsigned short nents) 555 { 556 unsigned int index; 557 558 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 559 560 if (nents <= 8) 561 index = 0; 562 else 563 index = get_count_order(nents) - 3; 564 565 return index; 566 } 567 568 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 569 { 570 struct scsi_host_sg_pool *sgp; 571 572 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 573 mempool_free(sgl, sgp->pool); 574 } 575 576 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 577 { 578 struct scsi_host_sg_pool *sgp; 579 580 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 581 return mempool_alloc(sgp->pool, gfp_mask); 582 } 583 584 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 585 { 586 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) 587 return; 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 589 } 590 591 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) 592 { 593 struct scatterlist *first_chunk = NULL; 594 int ret; 595 596 BUG_ON(!nents); 597 598 if (mq) { 599 if (nents <= SCSI_MAX_SG_SEGMENTS) { 600 sdb->table.nents = nents; 601 sg_init_table(sdb->table.sgl, sdb->table.nents); 602 return 0; 603 } 604 first_chunk = sdb->table.sgl; 605 } 606 607 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 608 first_chunk, GFP_ATOMIC, scsi_sg_alloc); 609 if (unlikely(ret)) 610 scsi_free_sgtable(sdb, mq); 611 return ret; 612 } 613 614 static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 615 { 616 if (cmd->request->cmd_type == REQ_TYPE_FS) { 617 struct scsi_driver *drv = scsi_cmd_to_driver(cmd); 618 619 if (drv->uninit_command) 620 drv->uninit_command(cmd); 621 } 622 } 623 624 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) 625 { 626 if (cmd->sdb.table.nents) 627 scsi_free_sgtable(&cmd->sdb, true); 628 if (cmd->request->next_rq && cmd->request->next_rq->special) 629 scsi_free_sgtable(cmd->request->next_rq->special, true); 630 if (scsi_prot_sg_count(cmd)) 631 scsi_free_sgtable(cmd->prot_sdb, true); 632 } 633 634 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) 635 { 636 struct scsi_device *sdev = cmd->device; 637 struct Scsi_Host *shost = sdev->host; 638 unsigned long flags; 639 640 scsi_mq_free_sgtables(cmd); 641 scsi_uninit_cmd(cmd); 642 643 if (shost->use_cmd_list) { 644 BUG_ON(list_empty(&cmd->list)); 645 spin_lock_irqsave(&sdev->list_lock, flags); 646 list_del_init(&cmd->list); 647 spin_unlock_irqrestore(&sdev->list_lock, flags); 648 } 649 } 650 651 /* 652 * Function: scsi_release_buffers() 653 * 654 * Purpose: Free resources allocate for a scsi_command. 655 * 656 * Arguments: cmd - command that we are bailing. 657 * 658 * Lock status: Assumed that no lock is held upon entry. 659 * 660 * Returns: Nothing 661 * 662 * Notes: In the event that an upper level driver rejects a 663 * command, we must release resources allocated during 664 * the __init_io() function. Primarily this would involve 665 * the scatter-gather table. 666 */ 667 static void scsi_release_buffers(struct scsi_cmnd *cmd) 668 { 669 if (cmd->sdb.table.nents) 670 scsi_free_sgtable(&cmd->sdb, false); 671 672 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 673 674 if (scsi_prot_sg_count(cmd)) 675 scsi_free_sgtable(cmd->prot_sdb, false); 676 } 677 678 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) 679 { 680 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; 681 682 scsi_free_sgtable(bidi_sdb, false); 683 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 684 cmd->request->next_rq->special = NULL; 685 } 686 687 static bool scsi_end_request(struct request *req, int error, 688 unsigned int bytes, unsigned int bidi_bytes) 689 { 690 struct scsi_cmnd *cmd = req->special; 691 struct scsi_device *sdev = cmd->device; 692 struct request_queue *q = sdev->request_queue; 693 694 if (blk_update_request(req, error, bytes)) 695 return true; 696 697 /* Bidi request must be completed as a whole */ 698 if (unlikely(bidi_bytes) && 699 blk_update_request(req->next_rq, error, bidi_bytes)) 700 return true; 701 702 if (blk_queue_add_random(q)) 703 add_disk_randomness(req->rq_disk); 704 705 if (req->mq_ctx) { 706 /* 707 * In the MQ case the command gets freed by __blk_mq_end_request, 708 * so we have to do all cleanup that depends on it earlier. 709 * 710 * We also can't kick the queues from irq context, so we 711 * will have to defer it to a workqueue. 712 */ 713 scsi_mq_uninit_cmd(cmd); 714 715 __blk_mq_end_request(req, error); 716 717 if (scsi_target(sdev)->single_lun || 718 !list_empty(&sdev->host->starved_list)) 719 kblockd_schedule_work(&sdev->requeue_work); 720 else 721 blk_mq_start_stopped_hw_queues(q, true); 722 } else { 723 unsigned long flags; 724 725 if (bidi_bytes) 726 scsi_release_bidi_buffers(cmd); 727 728 spin_lock_irqsave(q->queue_lock, flags); 729 blk_finish_request(req, error); 730 spin_unlock_irqrestore(q->queue_lock, flags); 731 732 scsi_release_buffers(cmd); 733 734 scsi_put_command(cmd); 735 scsi_run_queue(q); 736 } 737 738 put_device(&sdev->sdev_gendev); 739 return false; 740 } 741 742 /** 743 * __scsi_error_from_host_byte - translate SCSI error code into errno 744 * @cmd: SCSI command (unused) 745 * @result: scsi error code 746 * 747 * Translate SCSI error code into standard UNIX errno. 748 * Return values: 749 * -ENOLINK temporary transport failure 750 * -EREMOTEIO permanent target failure, do not retry 751 * -EBADE permanent nexus failure, retry on other path 752 * -ENOSPC No write space available 753 * -ENODATA Medium error 754 * -EIO unspecified I/O error 755 */ 756 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) 757 { 758 int error = 0; 759 760 switch(host_byte(result)) { 761 case DID_TRANSPORT_FAILFAST: 762 error = -ENOLINK; 763 break; 764 case DID_TARGET_FAILURE: 765 set_host_byte(cmd, DID_OK); 766 error = -EREMOTEIO; 767 break; 768 case DID_NEXUS_FAILURE: 769 set_host_byte(cmd, DID_OK); 770 error = -EBADE; 771 break; 772 case DID_ALLOC_FAILURE: 773 set_host_byte(cmd, DID_OK); 774 error = -ENOSPC; 775 break; 776 case DID_MEDIUM_ERROR: 777 set_host_byte(cmd, DID_OK); 778 error = -ENODATA; 779 break; 780 default: 781 error = -EIO; 782 break; 783 } 784 785 return error; 786 } 787 788 /* 789 * Function: scsi_io_completion() 790 * 791 * Purpose: Completion processing for block device I/O requests. 792 * 793 * Arguments: cmd - command that is finished. 794 * 795 * Lock status: Assumed that no lock is held upon entry. 796 * 797 * Returns: Nothing 798 * 799 * Notes: We will finish off the specified number of sectors. If we 800 * are done, the command block will be released and the queue 801 * function will be goosed. If we are not done then we have to 802 * figure out what to do next: 803 * 804 * a) We can call scsi_requeue_command(). The request 805 * will be unprepared and put back on the queue. Then 806 * a new command will be created for it. This should 807 * be used if we made forward progress, or if we want 808 * to switch from READ(10) to READ(6) for example. 809 * 810 * b) We can call __scsi_queue_insert(). The request will 811 * be put back on the queue and retried using the same 812 * command as before, possibly after a delay. 813 * 814 * c) We can call scsi_end_request() with -EIO to fail 815 * the remainder of the request. 816 */ 817 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 818 { 819 int result = cmd->result; 820 struct request_queue *q = cmd->device->request_queue; 821 struct request *req = cmd->request; 822 int error = 0; 823 struct scsi_sense_hdr sshdr; 824 bool sense_valid = false; 825 int sense_deferred = 0, level = 0; 826 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 827 ACTION_DELAYED_RETRY} action; 828 unsigned long wait_for = (cmd->allowed + 1) * req->timeout; 829 830 if (result) { 831 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 832 if (sense_valid) 833 sense_deferred = scsi_sense_is_deferred(&sshdr); 834 } 835 836 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 837 if (result) { 838 if (sense_valid && req->sense) { 839 /* 840 * SG_IO wants current and deferred errors 841 */ 842 int len = 8 + cmd->sense_buffer[7]; 843 844 if (len > SCSI_SENSE_BUFFERSIZE) 845 len = SCSI_SENSE_BUFFERSIZE; 846 memcpy(req->sense, cmd->sense_buffer, len); 847 req->sense_len = len; 848 } 849 if (!sense_deferred) 850 error = __scsi_error_from_host_byte(cmd, result); 851 } 852 /* 853 * __scsi_error_from_host_byte may have reset the host_byte 854 */ 855 req->errors = cmd->result; 856 857 req->resid_len = scsi_get_resid(cmd); 858 859 if (scsi_bidi_cmnd(cmd)) { 860 /* 861 * Bidi commands Must be complete as a whole, 862 * both sides at once. 863 */ 864 req->next_rq->resid_len = scsi_in(cmd)->resid; 865 if (scsi_end_request(req, 0, blk_rq_bytes(req), 866 blk_rq_bytes(req->next_rq))) 867 BUG(); 868 return; 869 } 870 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 871 /* 872 * Certain non BLOCK_PC requests are commands that don't 873 * actually transfer anything (FLUSH), so cannot use 874 * good_bytes != blk_rq_bytes(req) as the signal for an error. 875 * This sets the error explicitly for the problem case. 876 */ 877 error = __scsi_error_from_host_byte(cmd, result); 878 } 879 880 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 881 BUG_ON(blk_bidi_rq(req)); 882 883 /* 884 * Next deal with any sectors which we were able to correctly 885 * handle. 886 */ 887 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, 888 "%u sectors total, %d bytes done.\n", 889 blk_rq_sectors(req), good_bytes)); 890 891 /* 892 * Recovered errors need reporting, but they're always treated 893 * as success, so fiddle the result code here. For BLOCK_PC 894 * we already took a copy of the original into rq->errors which 895 * is what gets returned to the user 896 */ 897 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 898 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 899 * print since caller wants ATA registers. Only occurs on 900 * SCSI ATA PASS_THROUGH commands when CK_COND=1 901 */ 902 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 903 ; 904 else if (!(req->cmd_flags & REQ_QUIET)) 905 scsi_print_sense(cmd); 906 result = 0; 907 /* BLOCK_PC may have set error */ 908 error = 0; 909 } 910 911 /* 912 * If we finished all bytes in the request we are done now. 913 */ 914 if (!scsi_end_request(req, error, good_bytes, 0)) 915 return; 916 917 /* 918 * Kill remainder if no retrys. 919 */ 920 if (error && scsi_noretry_cmd(cmd)) { 921 if (scsi_end_request(req, error, blk_rq_bytes(req), 0)) 922 BUG(); 923 return; 924 } 925 926 /* 927 * If there had been no error, but we have leftover bytes in the 928 * requeues just queue the command up again. 929 */ 930 if (result == 0) 931 goto requeue; 932 933 error = __scsi_error_from_host_byte(cmd, result); 934 935 if (host_byte(result) == DID_RESET) { 936 /* Third party bus reset or reset for error recovery 937 * reasons. Just retry the command and see what 938 * happens. 939 */ 940 action = ACTION_RETRY; 941 } else if (sense_valid && !sense_deferred) { 942 switch (sshdr.sense_key) { 943 case UNIT_ATTENTION: 944 if (cmd->device->removable) { 945 /* Detected disc change. Set a bit 946 * and quietly refuse further access. 947 */ 948 cmd->device->changed = 1; 949 action = ACTION_FAIL; 950 } else { 951 /* Must have been a power glitch, or a 952 * bus reset. Could not have been a 953 * media change, so we just retry the 954 * command and see what happens. 955 */ 956 action = ACTION_RETRY; 957 } 958 break; 959 case ILLEGAL_REQUEST: 960 /* If we had an ILLEGAL REQUEST returned, then 961 * we may have performed an unsupported 962 * command. The only thing this should be 963 * would be a ten byte read where only a six 964 * byte read was supported. Also, on a system 965 * where READ CAPACITY failed, we may have 966 * read past the end of the disk. 967 */ 968 if ((cmd->device->use_10_for_rw && 969 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 970 (cmd->cmnd[0] == READ_10 || 971 cmd->cmnd[0] == WRITE_10)) { 972 /* This will issue a new 6-byte command. */ 973 cmd->device->use_10_for_rw = 0; 974 action = ACTION_REPREP; 975 } else if (sshdr.asc == 0x10) /* DIX */ { 976 action = ACTION_FAIL; 977 error = -EILSEQ; 978 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ 979 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { 980 action = ACTION_FAIL; 981 error = -EREMOTEIO; 982 } else 983 action = ACTION_FAIL; 984 break; 985 case ABORTED_COMMAND: 986 action = ACTION_FAIL; 987 if (sshdr.asc == 0x10) /* DIF */ 988 error = -EILSEQ; 989 break; 990 case NOT_READY: 991 /* If the device is in the process of becoming 992 * ready, or has a temporary blockage, retry. 993 */ 994 if (sshdr.asc == 0x04) { 995 switch (sshdr.ascq) { 996 case 0x01: /* becoming ready */ 997 case 0x04: /* format in progress */ 998 case 0x05: /* rebuild in progress */ 999 case 0x06: /* recalculation in progress */ 1000 case 0x07: /* operation in progress */ 1001 case 0x08: /* Long write in progress */ 1002 case 0x09: /* self test in progress */ 1003 case 0x14: /* space allocation in progress */ 1004 action = ACTION_DELAYED_RETRY; 1005 break; 1006 default: 1007 action = ACTION_FAIL; 1008 break; 1009 } 1010 } else 1011 action = ACTION_FAIL; 1012 break; 1013 case VOLUME_OVERFLOW: 1014 /* See SSC3rXX or current. */ 1015 action = ACTION_FAIL; 1016 break; 1017 default: 1018 action = ACTION_FAIL; 1019 break; 1020 } 1021 } else 1022 action = ACTION_FAIL; 1023 1024 if (action != ACTION_FAIL && 1025 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) 1026 action = ACTION_FAIL; 1027 1028 switch (action) { 1029 case ACTION_FAIL: 1030 /* Give up and fail the remainder of the request */ 1031 if (!(req->cmd_flags & REQ_QUIET)) { 1032 static DEFINE_RATELIMIT_STATE(_rs, 1033 DEFAULT_RATELIMIT_INTERVAL, 1034 DEFAULT_RATELIMIT_BURST); 1035 1036 if (unlikely(scsi_logging_level)) 1037 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 1038 SCSI_LOG_MLCOMPLETE_BITS); 1039 1040 /* 1041 * if logging is enabled the failure will be printed 1042 * in scsi_log_completion(), so avoid duplicate messages 1043 */ 1044 if (!level && __ratelimit(&_rs)) { 1045 scsi_print_result(cmd, NULL, FAILED); 1046 if (driver_byte(result) & DRIVER_SENSE) 1047 scsi_print_sense(cmd); 1048 scsi_print_command(cmd); 1049 } 1050 } 1051 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) 1052 return; 1053 /*FALLTHRU*/ 1054 case ACTION_REPREP: 1055 requeue: 1056 /* Unprep the request and put it back at the head of the queue. 1057 * A new command will be prepared and issued. 1058 */ 1059 if (q->mq_ops) { 1060 cmd->request->cmd_flags &= ~REQ_DONTPREP; 1061 scsi_mq_uninit_cmd(cmd); 1062 scsi_mq_requeue_cmd(cmd); 1063 } else { 1064 scsi_release_buffers(cmd); 1065 scsi_requeue_command(q, cmd); 1066 } 1067 break; 1068 case ACTION_RETRY: 1069 /* Retry the same command immediately */ 1070 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 1071 break; 1072 case ACTION_DELAYED_RETRY: 1073 /* Retry the same command after a delay */ 1074 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 1075 break; 1076 } 1077 } 1078 1079 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) 1080 { 1081 int count; 1082 1083 /* 1084 * If sg table allocation fails, requeue request later. 1085 */ 1086 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1087 req->mq_ctx != NULL))) 1088 return BLKPREP_DEFER; 1089 1090 /* 1091 * Next, walk the list, and fill in the addresses and sizes of 1092 * each segment. 1093 */ 1094 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1095 BUG_ON(count > sdb->table.nents); 1096 sdb->table.nents = count; 1097 sdb->length = blk_rq_bytes(req); 1098 return BLKPREP_OK; 1099 } 1100 1101 /* 1102 * Function: scsi_init_io() 1103 * 1104 * Purpose: SCSI I/O initialize function. 1105 * 1106 * Arguments: cmd - Command descriptor we wish to initialize 1107 * 1108 * Returns: 0 on success 1109 * BLKPREP_DEFER if the failure is retryable 1110 * BLKPREP_KILL if the failure is fatal 1111 */ 1112 int scsi_init_io(struct scsi_cmnd *cmd) 1113 { 1114 struct scsi_device *sdev = cmd->device; 1115 struct request *rq = cmd->request; 1116 bool is_mq = (rq->mq_ctx != NULL); 1117 int error; 1118 1119 BUG_ON(!rq->nr_phys_segments); 1120 1121 error = scsi_init_sgtable(rq, &cmd->sdb); 1122 if (error) 1123 goto err_exit; 1124 1125 if (blk_bidi_rq(rq)) { 1126 if (!rq->q->mq_ops) { 1127 struct scsi_data_buffer *bidi_sdb = 1128 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC); 1129 if (!bidi_sdb) { 1130 error = BLKPREP_DEFER; 1131 goto err_exit; 1132 } 1133 1134 rq->next_rq->special = bidi_sdb; 1135 } 1136 1137 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); 1138 if (error) 1139 goto err_exit; 1140 } 1141 1142 if (blk_integrity_rq(rq)) { 1143 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1144 int ivecs, count; 1145 1146 if (prot_sdb == NULL) { 1147 /* 1148 * This can happen if someone (e.g. multipath) 1149 * queues a command to a device on an adapter 1150 * that does not support DIX. 1151 */ 1152 WARN_ON_ONCE(1); 1153 error = BLKPREP_KILL; 1154 goto err_exit; 1155 } 1156 1157 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1158 1159 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { 1160 error = BLKPREP_DEFER; 1161 goto err_exit; 1162 } 1163 1164 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1165 prot_sdb->table.sgl); 1166 BUG_ON(unlikely(count > ivecs)); 1167 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1168 1169 cmd->prot_sdb = prot_sdb; 1170 cmd->prot_sdb->table.nents = count; 1171 } 1172 1173 return BLKPREP_OK; 1174 err_exit: 1175 if (is_mq) { 1176 scsi_mq_free_sgtables(cmd); 1177 } else { 1178 scsi_release_buffers(cmd); 1179 cmd->request->special = NULL; 1180 scsi_put_command(cmd); 1181 put_device(&sdev->sdev_gendev); 1182 } 1183 return error; 1184 } 1185 EXPORT_SYMBOL(scsi_init_io); 1186 1187 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1188 struct request *req) 1189 { 1190 struct scsi_cmnd *cmd; 1191 1192 if (!req->special) { 1193 /* Bail if we can't get a reference to the device */ 1194 if (!get_device(&sdev->sdev_gendev)) 1195 return NULL; 1196 1197 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1198 if (unlikely(!cmd)) { 1199 put_device(&sdev->sdev_gendev); 1200 return NULL; 1201 } 1202 req->special = cmd; 1203 } else { 1204 cmd = req->special; 1205 } 1206 1207 /* pull a tag out of the request if we have one */ 1208 cmd->tag = req->tag; 1209 cmd->request = req; 1210 1211 cmd->cmnd = req->cmd; 1212 cmd->prot_op = SCSI_PROT_NORMAL; 1213 1214 return cmd; 1215 } 1216 1217 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1218 { 1219 struct scsi_cmnd *cmd = req->special; 1220 1221 /* 1222 * BLOCK_PC requests may transfer data, in which case they must 1223 * a bio attached to them. Or they might contain a SCSI command 1224 * that does not transfer data, in which case they may optionally 1225 * submit a request without an attached bio. 1226 */ 1227 if (req->bio) { 1228 int ret = scsi_init_io(cmd); 1229 if (unlikely(ret)) 1230 return ret; 1231 } else { 1232 BUG_ON(blk_rq_bytes(req)); 1233 1234 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1235 } 1236 1237 cmd->cmd_len = req->cmd_len; 1238 cmd->transfersize = blk_rq_bytes(req); 1239 cmd->allowed = req->retries; 1240 return BLKPREP_OK; 1241 } 1242 1243 /* 1244 * Setup a REQ_TYPE_FS command. These are simple request from filesystems 1245 * that still need to be translated to SCSI CDBs from the ULD. 1246 */ 1247 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1248 { 1249 struct scsi_cmnd *cmd = req->special; 1250 1251 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1252 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1253 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1254 if (ret != BLKPREP_OK) 1255 return ret; 1256 } 1257 1258 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1259 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1260 } 1261 1262 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) 1263 { 1264 struct scsi_cmnd *cmd = req->special; 1265 1266 if (!blk_rq_bytes(req)) 1267 cmd->sc_data_direction = DMA_NONE; 1268 else if (rq_data_dir(req) == WRITE) 1269 cmd->sc_data_direction = DMA_TO_DEVICE; 1270 else 1271 cmd->sc_data_direction = DMA_FROM_DEVICE; 1272 1273 switch (req->cmd_type) { 1274 case REQ_TYPE_FS: 1275 return scsi_setup_fs_cmnd(sdev, req); 1276 case REQ_TYPE_BLOCK_PC: 1277 return scsi_setup_blk_pc_cmnd(sdev, req); 1278 default: 1279 return BLKPREP_KILL; 1280 } 1281 } 1282 1283 static int 1284 scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1285 { 1286 int ret = BLKPREP_OK; 1287 1288 /* 1289 * If the device is not in running state we will reject some 1290 * or all commands. 1291 */ 1292 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1293 switch (sdev->sdev_state) { 1294 case SDEV_OFFLINE: 1295 case SDEV_TRANSPORT_OFFLINE: 1296 /* 1297 * If the device is offline we refuse to process any 1298 * commands. The device must be brought online 1299 * before trying any recovery commands. 1300 */ 1301 sdev_printk(KERN_ERR, sdev, 1302 "rejecting I/O to offline device\n"); 1303 ret = BLKPREP_KILL; 1304 break; 1305 case SDEV_DEL: 1306 /* 1307 * If the device is fully deleted, we refuse to 1308 * process any commands as well. 1309 */ 1310 sdev_printk(KERN_ERR, sdev, 1311 "rejecting I/O to dead device\n"); 1312 ret = BLKPREP_KILL; 1313 break; 1314 case SDEV_QUIESCE: 1315 case SDEV_BLOCK: 1316 case SDEV_CREATED_BLOCK: 1317 /* 1318 * If the devices is blocked we defer normal commands. 1319 */ 1320 if (!(req->cmd_flags & REQ_PREEMPT)) 1321 ret = BLKPREP_DEFER; 1322 break; 1323 default: 1324 /* 1325 * For any other not fully online state we only allow 1326 * special commands. In particular any user initiated 1327 * command is not allowed. 1328 */ 1329 if (!(req->cmd_flags & REQ_PREEMPT)) 1330 ret = BLKPREP_KILL; 1331 break; 1332 } 1333 } 1334 return ret; 1335 } 1336 1337 static int 1338 scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1339 { 1340 struct scsi_device *sdev = q->queuedata; 1341 1342 switch (ret) { 1343 case BLKPREP_KILL: 1344 req->errors = DID_NO_CONNECT << 16; 1345 /* release the command and kill it */ 1346 if (req->special) { 1347 struct scsi_cmnd *cmd = req->special; 1348 scsi_release_buffers(cmd); 1349 scsi_put_command(cmd); 1350 put_device(&sdev->sdev_gendev); 1351 req->special = NULL; 1352 } 1353 break; 1354 case BLKPREP_DEFER: 1355 /* 1356 * If we defer, the blk_peek_request() returns NULL, but the 1357 * queue must be restarted, so we schedule a callback to happen 1358 * shortly. 1359 */ 1360 if (atomic_read(&sdev->device_busy) == 0) 1361 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1362 break; 1363 default: 1364 req->cmd_flags |= REQ_DONTPREP; 1365 } 1366 1367 return ret; 1368 } 1369 1370 static int scsi_prep_fn(struct request_queue *q, struct request *req) 1371 { 1372 struct scsi_device *sdev = q->queuedata; 1373 struct scsi_cmnd *cmd; 1374 int ret; 1375 1376 ret = scsi_prep_state_check(sdev, req); 1377 if (ret != BLKPREP_OK) 1378 goto out; 1379 1380 cmd = scsi_get_cmd_from_req(sdev, req); 1381 if (unlikely(!cmd)) { 1382 ret = BLKPREP_DEFER; 1383 goto out; 1384 } 1385 1386 ret = scsi_setup_cmnd(sdev, req); 1387 out: 1388 return scsi_prep_return(q, req, ret); 1389 } 1390 1391 static void scsi_unprep_fn(struct request_queue *q, struct request *req) 1392 { 1393 scsi_uninit_cmd(req->special); 1394 } 1395 1396 /* 1397 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1398 * return 0. 1399 * 1400 * Called with the queue_lock held. 1401 */ 1402 static inline int scsi_dev_queue_ready(struct request_queue *q, 1403 struct scsi_device *sdev) 1404 { 1405 unsigned int busy; 1406 1407 busy = atomic_inc_return(&sdev->device_busy) - 1; 1408 if (atomic_read(&sdev->device_blocked)) { 1409 if (busy) 1410 goto out_dec; 1411 1412 /* 1413 * unblock after device_blocked iterates to zero 1414 */ 1415 if (atomic_dec_return(&sdev->device_blocked) > 0) { 1416 /* 1417 * For the MQ case we take care of this in the caller. 1418 */ 1419 if (!q->mq_ops) 1420 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1421 goto out_dec; 1422 } 1423 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, 1424 "unblocking device at zero depth\n")); 1425 } 1426 1427 if (busy >= sdev->queue_depth) 1428 goto out_dec; 1429 1430 return 1; 1431 out_dec: 1432 atomic_dec(&sdev->device_busy); 1433 return 0; 1434 } 1435 1436 /* 1437 * scsi_target_queue_ready: checks if there we can send commands to target 1438 * @sdev: scsi device on starget to check. 1439 */ 1440 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1441 struct scsi_device *sdev) 1442 { 1443 struct scsi_target *starget = scsi_target(sdev); 1444 unsigned int busy; 1445 1446 if (starget->single_lun) { 1447 spin_lock_irq(shost->host_lock); 1448 if (starget->starget_sdev_user && 1449 starget->starget_sdev_user != sdev) { 1450 spin_unlock_irq(shost->host_lock); 1451 return 0; 1452 } 1453 starget->starget_sdev_user = sdev; 1454 spin_unlock_irq(shost->host_lock); 1455 } 1456 1457 if (starget->can_queue <= 0) 1458 return 1; 1459 1460 busy = atomic_inc_return(&starget->target_busy) - 1; 1461 if (atomic_read(&starget->target_blocked) > 0) { 1462 if (busy) 1463 goto starved; 1464 1465 /* 1466 * unblock after target_blocked iterates to zero 1467 */ 1468 if (atomic_dec_return(&starget->target_blocked) > 0) 1469 goto out_dec; 1470 1471 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1472 "unblocking target at zero depth\n")); 1473 } 1474 1475 if (busy >= starget->can_queue) 1476 goto starved; 1477 1478 return 1; 1479 1480 starved: 1481 spin_lock_irq(shost->host_lock); 1482 list_move_tail(&sdev->starved_entry, &shost->starved_list); 1483 spin_unlock_irq(shost->host_lock); 1484 out_dec: 1485 if (starget->can_queue > 0) 1486 atomic_dec(&starget->target_busy); 1487 return 0; 1488 } 1489 1490 /* 1491 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1492 * return 0. We must end up running the queue again whenever 0 is 1493 * returned, else IO can hang. 1494 */ 1495 static inline int scsi_host_queue_ready(struct request_queue *q, 1496 struct Scsi_Host *shost, 1497 struct scsi_device *sdev) 1498 { 1499 unsigned int busy; 1500 1501 if (scsi_host_in_recovery(shost)) 1502 return 0; 1503 1504 busy = atomic_inc_return(&shost->host_busy) - 1; 1505 if (atomic_read(&shost->host_blocked) > 0) { 1506 if (busy) 1507 goto starved; 1508 1509 /* 1510 * unblock after host_blocked iterates to zero 1511 */ 1512 if (atomic_dec_return(&shost->host_blocked) > 0) 1513 goto out_dec; 1514 1515 SCSI_LOG_MLQUEUE(3, 1516 shost_printk(KERN_INFO, shost, 1517 "unblocking host at zero depth\n")); 1518 } 1519 1520 if (shost->can_queue > 0 && busy >= shost->can_queue) 1521 goto starved; 1522 if (shost->host_self_blocked) 1523 goto starved; 1524 1525 /* We're OK to process the command, so we can't be starved */ 1526 if (!list_empty(&sdev->starved_entry)) { 1527 spin_lock_irq(shost->host_lock); 1528 if (!list_empty(&sdev->starved_entry)) 1529 list_del_init(&sdev->starved_entry); 1530 spin_unlock_irq(shost->host_lock); 1531 } 1532 1533 return 1; 1534 1535 starved: 1536 spin_lock_irq(shost->host_lock); 1537 if (list_empty(&sdev->starved_entry)) 1538 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1539 spin_unlock_irq(shost->host_lock); 1540 out_dec: 1541 atomic_dec(&shost->host_busy); 1542 return 0; 1543 } 1544 1545 /* 1546 * Busy state exporting function for request stacking drivers. 1547 * 1548 * For efficiency, no lock is taken to check the busy state of 1549 * shost/starget/sdev, since the returned value is not guaranteed and 1550 * may be changed after request stacking drivers call the function, 1551 * regardless of taking lock or not. 1552 * 1553 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi 1554 * needs to return 'not busy'. Otherwise, request stacking drivers 1555 * may hold requests forever. 1556 */ 1557 static int scsi_lld_busy(struct request_queue *q) 1558 { 1559 struct scsi_device *sdev = q->queuedata; 1560 struct Scsi_Host *shost; 1561 1562 if (blk_queue_dying(q)) 1563 return 0; 1564 1565 shost = sdev->host; 1566 1567 /* 1568 * Ignore host/starget busy state. 1569 * Since block layer does not have a concept of fairness across 1570 * multiple queues, congestion of host/starget needs to be handled 1571 * in SCSI layer. 1572 */ 1573 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1574 return 1; 1575 1576 return 0; 1577 } 1578 1579 /* 1580 * Kill a request for a dead device 1581 */ 1582 static void scsi_kill_request(struct request *req, struct request_queue *q) 1583 { 1584 struct scsi_cmnd *cmd = req->special; 1585 struct scsi_device *sdev; 1586 struct scsi_target *starget; 1587 struct Scsi_Host *shost; 1588 1589 blk_start_request(req); 1590 1591 scmd_printk(KERN_INFO, cmd, "killing request\n"); 1592 1593 sdev = cmd->device; 1594 starget = scsi_target(sdev); 1595 shost = sdev->host; 1596 scsi_init_cmd_errh(cmd); 1597 cmd->result = DID_NO_CONNECT << 16; 1598 atomic_inc(&cmd->device->iorequest_cnt); 1599 1600 /* 1601 * SCSI request completion path will do scsi_device_unbusy(), 1602 * bump busy counts. To bump the counters, we need to dance 1603 * with the locks as normal issue path does. 1604 */ 1605 atomic_inc(&sdev->device_busy); 1606 atomic_inc(&shost->host_busy); 1607 if (starget->can_queue > 0) 1608 atomic_inc(&starget->target_busy); 1609 1610 blk_complete_request(req); 1611 } 1612 1613 static void scsi_softirq_done(struct request *rq) 1614 { 1615 struct scsi_cmnd *cmd = rq->special; 1616 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1617 int disposition; 1618 1619 INIT_LIST_HEAD(&cmd->eh_entry); 1620 1621 atomic_inc(&cmd->device->iodone_cnt); 1622 if (cmd->result) 1623 atomic_inc(&cmd->device->ioerr_cnt); 1624 1625 disposition = scsi_decide_disposition(cmd); 1626 if (disposition != SUCCESS && 1627 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1628 sdev_printk(KERN_ERR, cmd->device, 1629 "timing out command, waited %lus\n", 1630 wait_for/HZ); 1631 disposition = SUCCESS; 1632 } 1633 1634 scsi_log_completion(cmd, disposition); 1635 1636 switch (disposition) { 1637 case SUCCESS: 1638 scsi_finish_command(cmd); 1639 break; 1640 case NEEDS_RETRY: 1641 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1642 break; 1643 case ADD_TO_MLQUEUE: 1644 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1645 break; 1646 default: 1647 if (!scsi_eh_scmd_add(cmd, 0)) 1648 scsi_finish_command(cmd); 1649 } 1650 } 1651 1652 /** 1653 * scsi_dispatch_command - Dispatch a command to the low-level driver. 1654 * @cmd: command block we are dispatching. 1655 * 1656 * Return: nonzero return request was rejected and device's queue needs to be 1657 * plugged. 1658 */ 1659 static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 1660 { 1661 struct Scsi_Host *host = cmd->device->host; 1662 int rtn = 0; 1663 1664 atomic_inc(&cmd->device->iorequest_cnt); 1665 1666 /* check if the device is still usable */ 1667 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 1668 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 1669 * returns an immediate error upwards, and signals 1670 * that the device is no longer present */ 1671 cmd->result = DID_NO_CONNECT << 16; 1672 goto done; 1673 } 1674 1675 /* Check to see if the scsi lld made this device blocked. */ 1676 if (unlikely(scsi_device_blocked(cmd->device))) { 1677 /* 1678 * in blocked state, the command is just put back on 1679 * the device queue. The suspend state has already 1680 * blocked the queue so future requests should not 1681 * occur until the device transitions out of the 1682 * suspend state. 1683 */ 1684 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1685 "queuecommand : device blocked\n")); 1686 return SCSI_MLQUEUE_DEVICE_BUSY; 1687 } 1688 1689 /* Store the LUN value in cmnd, if needed. */ 1690 if (cmd->device->lun_in_cdb) 1691 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 1692 (cmd->device->lun << 5 & 0xe0); 1693 1694 scsi_log_send(cmd); 1695 1696 /* 1697 * Before we queue this command, check if the command 1698 * length exceeds what the host adapter can handle. 1699 */ 1700 if (cmd->cmd_len > cmd->device->host->max_cmd_len) { 1701 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1702 "queuecommand : command too long. " 1703 "cdb_size=%d host->max_cmd_len=%d\n", 1704 cmd->cmd_len, cmd->device->host->max_cmd_len)); 1705 cmd->result = (DID_ABORT << 16); 1706 goto done; 1707 } 1708 1709 if (unlikely(host->shost_state == SHOST_DEL)) { 1710 cmd->result = (DID_NO_CONNECT << 16); 1711 goto done; 1712 1713 } 1714 1715 trace_scsi_dispatch_cmd_start(cmd); 1716 rtn = host->hostt->queuecommand(host, cmd); 1717 if (rtn) { 1718 trace_scsi_dispatch_cmd_error(cmd, rtn); 1719 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && 1720 rtn != SCSI_MLQUEUE_TARGET_BUSY) 1721 rtn = SCSI_MLQUEUE_HOST_BUSY; 1722 1723 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1724 "queuecommand : request rejected\n")); 1725 } 1726 1727 return rtn; 1728 done: 1729 cmd->scsi_done(cmd); 1730 return 0; 1731 } 1732 1733 /** 1734 * scsi_done - Invoke completion on finished SCSI command. 1735 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 1736 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 1737 * 1738 * Description: This function is the mid-level's (SCSI Core) interrupt routine, 1739 * which regains ownership of the SCSI command (de facto) from a LLDD, and 1740 * calls blk_complete_request() for further processing. 1741 * 1742 * This function is interrupt context safe. 1743 */ 1744 static void scsi_done(struct scsi_cmnd *cmd) 1745 { 1746 trace_scsi_dispatch_cmd_done(cmd); 1747 blk_complete_request(cmd->request); 1748 } 1749 1750 /* 1751 * Function: scsi_request_fn() 1752 * 1753 * Purpose: Main strategy routine for SCSI. 1754 * 1755 * Arguments: q - Pointer to actual queue. 1756 * 1757 * Returns: Nothing 1758 * 1759 * Lock status: IO request lock assumed to be held when called. 1760 */ 1761 static void scsi_request_fn(struct request_queue *q) 1762 __releases(q->queue_lock) 1763 __acquires(q->queue_lock) 1764 { 1765 struct scsi_device *sdev = q->queuedata; 1766 struct Scsi_Host *shost; 1767 struct scsi_cmnd *cmd; 1768 struct request *req; 1769 1770 /* 1771 * To start with, we keep looping until the queue is empty, or until 1772 * the host is no longer able to accept any more requests. 1773 */ 1774 shost = sdev->host; 1775 for (;;) { 1776 int rtn; 1777 /* 1778 * get next queueable request. We do this early to make sure 1779 * that the request is fully prepared even if we cannot 1780 * accept it. 1781 */ 1782 req = blk_peek_request(q); 1783 if (!req) 1784 break; 1785 1786 if (unlikely(!scsi_device_online(sdev))) { 1787 sdev_printk(KERN_ERR, sdev, 1788 "rejecting I/O to offline device\n"); 1789 scsi_kill_request(req, q); 1790 continue; 1791 } 1792 1793 if (!scsi_dev_queue_ready(q, sdev)) 1794 break; 1795 1796 /* 1797 * Remove the request from the request list. 1798 */ 1799 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1800 blk_start_request(req); 1801 1802 spin_unlock_irq(q->queue_lock); 1803 cmd = req->special; 1804 if (unlikely(cmd == NULL)) { 1805 printk(KERN_CRIT "impossible request in %s.\n" 1806 "please mail a stack trace to " 1807 "linux-scsi@vger.kernel.org\n", 1808 __func__); 1809 blk_dump_rq_flags(req, "foo"); 1810 BUG(); 1811 } 1812 1813 /* 1814 * We hit this when the driver is using a host wide 1815 * tag map. For device level tag maps the queue_depth check 1816 * in the device ready fn would prevent us from trying 1817 * to allocate a tag. Since the map is a shared host resource 1818 * we add the dev to the starved list so it eventually gets 1819 * a run when a tag is freed. 1820 */ 1821 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { 1822 spin_lock_irq(shost->host_lock); 1823 if (list_empty(&sdev->starved_entry)) 1824 list_add_tail(&sdev->starved_entry, 1825 &shost->starved_list); 1826 spin_unlock_irq(shost->host_lock); 1827 goto not_ready; 1828 } 1829 1830 if (!scsi_target_queue_ready(shost, sdev)) 1831 goto not_ready; 1832 1833 if (!scsi_host_queue_ready(q, shost, sdev)) 1834 goto host_not_ready; 1835 1836 if (sdev->simple_tags) 1837 cmd->flags |= SCMD_TAGGED; 1838 else 1839 cmd->flags &= ~SCMD_TAGGED; 1840 1841 /* 1842 * Finally, initialize any error handling parameters, and set up 1843 * the timers for timeouts. 1844 */ 1845 scsi_init_cmd_errh(cmd); 1846 1847 /* 1848 * Dispatch the command to the low-level driver. 1849 */ 1850 cmd->scsi_done = scsi_done; 1851 rtn = scsi_dispatch_cmd(cmd); 1852 if (rtn) { 1853 scsi_queue_insert(cmd, rtn); 1854 spin_lock_irq(q->queue_lock); 1855 goto out_delay; 1856 } 1857 spin_lock_irq(q->queue_lock); 1858 } 1859 1860 return; 1861 1862 host_not_ready: 1863 if (scsi_target(sdev)->can_queue > 0) 1864 atomic_dec(&scsi_target(sdev)->target_busy); 1865 not_ready: 1866 /* 1867 * lock q, handle tag, requeue req, and decrement device_busy. We 1868 * must return with queue_lock held. 1869 * 1870 * Decrementing device_busy without checking it is OK, as all such 1871 * cases (host limits or settings) should run the queue at some 1872 * later time. 1873 */ 1874 spin_lock_irq(q->queue_lock); 1875 blk_requeue_request(q, req); 1876 atomic_dec(&sdev->device_busy); 1877 out_delay: 1878 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) 1879 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1880 } 1881 1882 static inline int prep_to_mq(int ret) 1883 { 1884 switch (ret) { 1885 case BLKPREP_OK: 1886 return 0; 1887 case BLKPREP_DEFER: 1888 return BLK_MQ_RQ_QUEUE_BUSY; 1889 default: 1890 return BLK_MQ_RQ_QUEUE_ERROR; 1891 } 1892 } 1893 1894 static int scsi_mq_prep_fn(struct request *req) 1895 { 1896 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1897 struct scsi_device *sdev = req->q->queuedata; 1898 struct Scsi_Host *shost = sdev->host; 1899 unsigned char *sense_buf = cmd->sense_buffer; 1900 struct scatterlist *sg; 1901 1902 memset(cmd, 0, sizeof(struct scsi_cmnd)); 1903 1904 req->special = cmd; 1905 1906 cmd->request = req; 1907 cmd->device = sdev; 1908 cmd->sense_buffer = sense_buf; 1909 1910 cmd->tag = req->tag; 1911 1912 cmd->cmnd = req->cmd; 1913 cmd->prot_op = SCSI_PROT_NORMAL; 1914 1915 INIT_LIST_HEAD(&cmd->list); 1916 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); 1917 cmd->jiffies_at_alloc = jiffies; 1918 1919 if (shost->use_cmd_list) { 1920 spin_lock_irq(&sdev->list_lock); 1921 list_add_tail(&cmd->list, &sdev->cmd_list); 1922 spin_unlock_irq(&sdev->list_lock); 1923 } 1924 1925 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; 1926 cmd->sdb.table.sgl = sg; 1927 1928 if (scsi_host_get_prot(shost)) { 1929 cmd->prot_sdb = (void *)sg + 1930 min_t(unsigned int, 1931 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * 1932 sizeof(struct scatterlist); 1933 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1934 1935 cmd->prot_sdb->table.sgl = 1936 (struct scatterlist *)(cmd->prot_sdb + 1); 1937 } 1938 1939 if (blk_bidi_rq(req)) { 1940 struct request *next_rq = req->next_rq; 1941 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq); 1942 1943 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer)); 1944 bidi_sdb->table.sgl = 1945 (struct scatterlist *)(bidi_sdb + 1); 1946 1947 next_rq->special = bidi_sdb; 1948 } 1949 1950 blk_mq_start_request(req); 1951 1952 return scsi_setup_cmnd(sdev, req); 1953 } 1954 1955 static void scsi_mq_done(struct scsi_cmnd *cmd) 1956 { 1957 trace_scsi_dispatch_cmd_done(cmd); 1958 blk_mq_complete_request(cmd->request); 1959 } 1960 1961 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1962 const struct blk_mq_queue_data *bd) 1963 { 1964 struct request *req = bd->rq; 1965 struct request_queue *q = req->q; 1966 struct scsi_device *sdev = q->queuedata; 1967 struct Scsi_Host *shost = sdev->host; 1968 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1969 int ret; 1970 int reason; 1971 1972 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); 1973 if (ret) 1974 goto out; 1975 1976 ret = BLK_MQ_RQ_QUEUE_BUSY; 1977 if (!get_device(&sdev->sdev_gendev)) 1978 goto out; 1979 1980 if (!scsi_dev_queue_ready(q, sdev)) 1981 goto out_put_device; 1982 if (!scsi_target_queue_ready(shost, sdev)) 1983 goto out_dec_device_busy; 1984 if (!scsi_host_queue_ready(q, shost, sdev)) 1985 goto out_dec_target_busy; 1986 1987 1988 if (!(req->cmd_flags & REQ_DONTPREP)) { 1989 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1990 if (ret) 1991 goto out_dec_host_busy; 1992 req->cmd_flags |= REQ_DONTPREP; 1993 } else { 1994 blk_mq_start_request(req); 1995 } 1996 1997 if (sdev->simple_tags) 1998 cmd->flags |= SCMD_TAGGED; 1999 else 2000 cmd->flags &= ~SCMD_TAGGED; 2001 2002 scsi_init_cmd_errh(cmd); 2003 cmd->scsi_done = scsi_mq_done; 2004 2005 reason = scsi_dispatch_cmd(cmd); 2006 if (reason) { 2007 scsi_set_blocked(cmd, reason); 2008 ret = BLK_MQ_RQ_QUEUE_BUSY; 2009 goto out_dec_host_busy; 2010 } 2011 2012 return BLK_MQ_RQ_QUEUE_OK; 2013 2014 out_dec_host_busy: 2015 atomic_dec(&shost->host_busy); 2016 out_dec_target_busy: 2017 if (scsi_target(sdev)->can_queue > 0) 2018 atomic_dec(&scsi_target(sdev)->target_busy); 2019 out_dec_device_busy: 2020 atomic_dec(&sdev->device_busy); 2021 out_put_device: 2022 put_device(&sdev->sdev_gendev); 2023 out: 2024 switch (ret) { 2025 case BLK_MQ_RQ_QUEUE_BUSY: 2026 blk_mq_stop_hw_queue(hctx); 2027 if (atomic_read(&sdev->device_busy) == 0 && 2028 !scsi_device_blocked(sdev)) 2029 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 2030 break; 2031 case BLK_MQ_RQ_QUEUE_ERROR: 2032 /* 2033 * Make sure to release all allocated ressources when 2034 * we hit an error, as we will never see this command 2035 * again. 2036 */ 2037 if (req->cmd_flags & REQ_DONTPREP) 2038 scsi_mq_uninit_cmd(cmd); 2039 break; 2040 default: 2041 break; 2042 } 2043 return ret; 2044 } 2045 2046 static enum blk_eh_timer_return scsi_timeout(struct request *req, 2047 bool reserved) 2048 { 2049 if (reserved) 2050 return BLK_EH_RESET_TIMER; 2051 return scsi_times_out(req); 2052 } 2053 2054 static int scsi_init_request(void *data, struct request *rq, 2055 unsigned int hctx_idx, unsigned int request_idx, 2056 unsigned int numa_node) 2057 { 2058 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2059 2060 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL, 2061 numa_node); 2062 if (!cmd->sense_buffer) 2063 return -ENOMEM; 2064 return 0; 2065 } 2066 2067 static void scsi_exit_request(void *data, struct request *rq, 2068 unsigned int hctx_idx, unsigned int request_idx) 2069 { 2070 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2071 2072 kfree(cmd->sense_buffer); 2073 } 2074 2075 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 2076 { 2077 struct device *host_dev; 2078 u64 bounce_limit = 0xffffffff; 2079 2080 if (shost->unchecked_isa_dma) 2081 return BLK_BOUNCE_ISA; 2082 /* 2083 * Platforms with virtual-DMA translation 2084 * hardware have no practical limit. 2085 */ 2086 if (!PCI_DMA_BUS_IS_PHYS) 2087 return BLK_BOUNCE_ANY; 2088 2089 host_dev = scsi_get_device(shost); 2090 if (host_dev && host_dev->dma_mask) 2091 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 2092 2093 return bounce_limit; 2094 } 2095 2096 static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) 2097 { 2098 struct device *dev = shost->dma_dev; 2099 2100 /* 2101 * this limit is imposed by hardware restrictions 2102 */ 2103 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 2104 SCSI_MAX_SG_CHAIN_SEGMENTS)); 2105 2106 if (scsi_host_prot_dma(shost)) { 2107 shost->sg_prot_tablesize = 2108 min_not_zero(shost->sg_prot_tablesize, 2109 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); 2110 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); 2111 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 2112 } 2113 2114 blk_queue_max_hw_sectors(q, shost->max_sectors); 2115 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 2116 blk_queue_segment_boundary(q, shost->dma_boundary); 2117 dma_set_seg_boundary(dev, shost->dma_boundary); 2118 2119 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 2120 2121 if (!shost->use_clustering) 2122 q->limits.cluster = 0; 2123 2124 /* 2125 * set a reasonable default alignment on word boundaries: the 2126 * host and device may alter it using 2127 * blk_queue_update_dma_alignment() later. 2128 */ 2129 blk_queue_dma_alignment(q, 0x03); 2130 } 2131 2132 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 2133 request_fn_proc *request_fn) 2134 { 2135 struct request_queue *q; 2136 2137 q = blk_init_queue(request_fn, NULL); 2138 if (!q) 2139 return NULL; 2140 __scsi_init_queue(shost, q); 2141 return q; 2142 } 2143 EXPORT_SYMBOL(__scsi_alloc_queue); 2144 2145 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 2146 { 2147 struct request_queue *q; 2148 2149 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 2150 if (!q) 2151 return NULL; 2152 2153 blk_queue_prep_rq(q, scsi_prep_fn); 2154 blk_queue_unprep_rq(q, scsi_unprep_fn); 2155 blk_queue_softirq_done(q, scsi_softirq_done); 2156 blk_queue_rq_timed_out(q, scsi_times_out); 2157 blk_queue_lld_busy(q, scsi_lld_busy); 2158 return q; 2159 } 2160 2161 static struct blk_mq_ops scsi_mq_ops = { 2162 .map_queue = blk_mq_map_queue, 2163 .queue_rq = scsi_queue_rq, 2164 .complete = scsi_softirq_done, 2165 .timeout = scsi_timeout, 2166 .init_request = scsi_init_request, 2167 .exit_request = scsi_exit_request, 2168 }; 2169 2170 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) 2171 { 2172 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); 2173 if (IS_ERR(sdev->request_queue)) 2174 return NULL; 2175 2176 sdev->request_queue->queuedata = sdev; 2177 __scsi_init_queue(sdev->host, sdev->request_queue); 2178 return sdev->request_queue; 2179 } 2180 2181 int scsi_mq_setup_tags(struct Scsi_Host *shost) 2182 { 2183 unsigned int cmd_size, sgl_size, tbl_size; 2184 2185 tbl_size = shost->sg_tablesize; 2186 if (tbl_size > SCSI_MAX_SG_SEGMENTS) 2187 tbl_size = SCSI_MAX_SG_SEGMENTS; 2188 sgl_size = tbl_size * sizeof(struct scatterlist); 2189 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 2190 if (scsi_host_get_prot(shost)) 2191 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; 2192 2193 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 2194 shost->tag_set.ops = &scsi_mq_ops; 2195 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; 2196 shost->tag_set.queue_depth = shost->can_queue; 2197 shost->tag_set.cmd_size = cmd_size; 2198 shost->tag_set.numa_node = NUMA_NO_NODE; 2199 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2200 shost->tag_set.driver_data = shost; 2201 2202 return blk_mq_alloc_tag_set(&shost->tag_set); 2203 } 2204 2205 void scsi_mq_destroy_tags(struct Scsi_Host *shost) 2206 { 2207 blk_mq_free_tag_set(&shost->tag_set); 2208 } 2209 2210 /* 2211 * Function: scsi_block_requests() 2212 * 2213 * Purpose: Utility function used by low-level drivers to prevent further 2214 * commands from being queued to the device. 2215 * 2216 * Arguments: shost - Host in question 2217 * 2218 * Returns: Nothing 2219 * 2220 * Lock status: No locks are assumed held. 2221 * 2222 * Notes: There is no timer nor any other means by which the requests 2223 * get unblocked other than the low-level driver calling 2224 * scsi_unblock_requests(). 2225 */ 2226 void scsi_block_requests(struct Scsi_Host *shost) 2227 { 2228 shost->host_self_blocked = 1; 2229 } 2230 EXPORT_SYMBOL(scsi_block_requests); 2231 2232 /* 2233 * Function: scsi_unblock_requests() 2234 * 2235 * Purpose: Utility function used by low-level drivers to allow further 2236 * commands from being queued to the device. 2237 * 2238 * Arguments: shost - Host in question 2239 * 2240 * Returns: Nothing 2241 * 2242 * Lock status: No locks are assumed held. 2243 * 2244 * Notes: There is no timer nor any other means by which the requests 2245 * get unblocked other than the low-level driver calling 2246 * scsi_unblock_requests(). 2247 * 2248 * This is done as an API function so that changes to the 2249 * internals of the scsi mid-layer won't require wholesale 2250 * changes to drivers that use this feature. 2251 */ 2252 void scsi_unblock_requests(struct Scsi_Host *shost) 2253 { 2254 shost->host_self_blocked = 0; 2255 scsi_run_host_queues(shost); 2256 } 2257 EXPORT_SYMBOL(scsi_unblock_requests); 2258 2259 int __init scsi_init_queue(void) 2260 { 2261 int i; 2262 2263 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 2264 sizeof(struct scsi_data_buffer), 2265 0, 0, NULL); 2266 if (!scsi_sdb_cache) { 2267 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 2268 return -ENOMEM; 2269 } 2270 2271 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2272 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2273 int size = sgp->size * sizeof(struct scatterlist); 2274 2275 sgp->slab = kmem_cache_create(sgp->name, size, 0, 2276 SLAB_HWCACHE_ALIGN, NULL); 2277 if (!sgp->slab) { 2278 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 2279 sgp->name); 2280 goto cleanup_sdb; 2281 } 2282 2283 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 2284 sgp->slab); 2285 if (!sgp->pool) { 2286 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 2287 sgp->name); 2288 goto cleanup_sdb; 2289 } 2290 } 2291 2292 return 0; 2293 2294 cleanup_sdb: 2295 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2296 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2297 if (sgp->pool) 2298 mempool_destroy(sgp->pool); 2299 if (sgp->slab) 2300 kmem_cache_destroy(sgp->slab); 2301 } 2302 kmem_cache_destroy(scsi_sdb_cache); 2303 2304 return -ENOMEM; 2305 } 2306 2307 void scsi_exit_queue(void) 2308 { 2309 int i; 2310 2311 kmem_cache_destroy(scsi_sdb_cache); 2312 2313 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2314 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2315 mempool_destroy(sgp->pool); 2316 kmem_cache_destroy(sgp->slab); 2317 } 2318 } 2319 2320 /** 2321 * scsi_mode_select - issue a mode select 2322 * @sdev: SCSI device to be queried 2323 * @pf: Page format bit (1 == standard, 0 == vendor specific) 2324 * @sp: Save page bit (0 == don't save, 1 == save) 2325 * @modepage: mode page being requested 2326 * @buffer: request buffer (may not be smaller than eight bytes) 2327 * @len: length of request buffer. 2328 * @timeout: command timeout 2329 * @retries: number of retries before failing 2330 * @data: returns a structure abstracting the mode header data 2331 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2332 * must be SCSI_SENSE_BUFFERSIZE big. 2333 * 2334 * Returns zero if successful; negative error number or scsi 2335 * status on error 2336 * 2337 */ 2338 int 2339 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 2340 unsigned char *buffer, int len, int timeout, int retries, 2341 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2342 { 2343 unsigned char cmd[10]; 2344 unsigned char *real_buffer; 2345 int ret; 2346 2347 memset(cmd, 0, sizeof(cmd)); 2348 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 2349 2350 if (sdev->use_10_for_ms) { 2351 if (len > 65535) 2352 return -EINVAL; 2353 real_buffer = kmalloc(8 + len, GFP_KERNEL); 2354 if (!real_buffer) 2355 return -ENOMEM; 2356 memcpy(real_buffer + 8, buffer, len); 2357 len += 8; 2358 real_buffer[0] = 0; 2359 real_buffer[1] = 0; 2360 real_buffer[2] = data->medium_type; 2361 real_buffer[3] = data->device_specific; 2362 real_buffer[4] = data->longlba ? 0x01 : 0; 2363 real_buffer[5] = 0; 2364 real_buffer[6] = data->block_descriptor_length >> 8; 2365 real_buffer[7] = data->block_descriptor_length; 2366 2367 cmd[0] = MODE_SELECT_10; 2368 cmd[7] = len >> 8; 2369 cmd[8] = len; 2370 } else { 2371 if (len > 255 || data->block_descriptor_length > 255 || 2372 data->longlba) 2373 return -EINVAL; 2374 2375 real_buffer = kmalloc(4 + len, GFP_KERNEL); 2376 if (!real_buffer) 2377 return -ENOMEM; 2378 memcpy(real_buffer + 4, buffer, len); 2379 len += 4; 2380 real_buffer[0] = 0; 2381 real_buffer[1] = data->medium_type; 2382 real_buffer[2] = data->device_specific; 2383 real_buffer[3] = data->block_descriptor_length; 2384 2385 2386 cmd[0] = MODE_SELECT; 2387 cmd[4] = len; 2388 } 2389 2390 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 2391 sshdr, timeout, retries, NULL); 2392 kfree(real_buffer); 2393 return ret; 2394 } 2395 EXPORT_SYMBOL_GPL(scsi_mode_select); 2396 2397 /** 2398 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 2399 * @sdev: SCSI device to be queried 2400 * @dbd: set if mode sense will allow block descriptors to be returned 2401 * @modepage: mode page being requested 2402 * @buffer: request buffer (may not be smaller than eight bytes) 2403 * @len: length of request buffer. 2404 * @timeout: command timeout 2405 * @retries: number of retries before failing 2406 * @data: returns a structure abstracting the mode header data 2407 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2408 * must be SCSI_SENSE_BUFFERSIZE big. 2409 * 2410 * Returns zero if unsuccessful, or the header offset (either 4 2411 * or 8 depending on whether a six or ten byte command was 2412 * issued) if successful. 2413 */ 2414 int 2415 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 2416 unsigned char *buffer, int len, int timeout, int retries, 2417 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2418 { 2419 unsigned char cmd[12]; 2420 int use_10_for_ms; 2421 int header_length; 2422 int result; 2423 struct scsi_sense_hdr my_sshdr; 2424 2425 memset(data, 0, sizeof(*data)); 2426 memset(&cmd[0], 0, 12); 2427 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 2428 cmd[2] = modepage; 2429 2430 /* caller might not be interested in sense, but we need it */ 2431 if (!sshdr) 2432 sshdr = &my_sshdr; 2433 2434 retry: 2435 use_10_for_ms = sdev->use_10_for_ms; 2436 2437 if (use_10_for_ms) { 2438 if (len < 8) 2439 len = 8; 2440 2441 cmd[0] = MODE_SENSE_10; 2442 cmd[8] = len; 2443 header_length = 8; 2444 } else { 2445 if (len < 4) 2446 len = 4; 2447 2448 cmd[0] = MODE_SENSE; 2449 cmd[4] = len; 2450 header_length = 4; 2451 } 2452 2453 memset(buffer, 0, len); 2454 2455 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2456 sshdr, timeout, retries, NULL); 2457 2458 /* This code looks awful: what it's doing is making sure an 2459 * ILLEGAL REQUEST sense return identifies the actual command 2460 * byte as the problem. MODE_SENSE commands can return 2461 * ILLEGAL REQUEST if the code page isn't supported */ 2462 2463 if (use_10_for_ms && !scsi_status_is_good(result) && 2464 (driver_byte(result) & DRIVER_SENSE)) { 2465 if (scsi_sense_valid(sshdr)) { 2466 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2467 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2468 /* 2469 * Invalid command operation code 2470 */ 2471 sdev->use_10_for_ms = 0; 2472 goto retry; 2473 } 2474 } 2475 } 2476 2477 if(scsi_status_is_good(result)) { 2478 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 2479 (modepage == 6 || modepage == 8))) { 2480 /* Initio breakage? */ 2481 header_length = 0; 2482 data->length = 13; 2483 data->medium_type = 0; 2484 data->device_specific = 0; 2485 data->longlba = 0; 2486 data->block_descriptor_length = 0; 2487 } else if(use_10_for_ms) { 2488 data->length = buffer[0]*256 + buffer[1] + 2; 2489 data->medium_type = buffer[2]; 2490 data->device_specific = buffer[3]; 2491 data->longlba = buffer[4] & 0x01; 2492 data->block_descriptor_length = buffer[6]*256 2493 + buffer[7]; 2494 } else { 2495 data->length = buffer[0] + 1; 2496 data->medium_type = buffer[1]; 2497 data->device_specific = buffer[2]; 2498 data->block_descriptor_length = buffer[3]; 2499 } 2500 data->header_length = header_length; 2501 } 2502 2503 return result; 2504 } 2505 EXPORT_SYMBOL(scsi_mode_sense); 2506 2507 /** 2508 * scsi_test_unit_ready - test if unit is ready 2509 * @sdev: scsi device to change the state of. 2510 * @timeout: command timeout 2511 * @retries: number of retries before failing 2512 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 2513 * returning sense. Make sure that this is cleared before passing 2514 * in. 2515 * 2516 * Returns zero if unsuccessful or an error if TUR failed. For 2517 * removable media, UNIT_ATTENTION sets ->changed flag. 2518 **/ 2519 int 2520 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2521 struct scsi_sense_hdr *sshdr_external) 2522 { 2523 char cmd[] = { 2524 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2525 }; 2526 struct scsi_sense_hdr *sshdr; 2527 int result; 2528 2529 if (!sshdr_external) 2530 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2531 else 2532 sshdr = sshdr_external; 2533 2534 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2535 do { 2536 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2537 timeout, retries, NULL); 2538 if (sdev->removable && scsi_sense_valid(sshdr) && 2539 sshdr->sense_key == UNIT_ATTENTION) 2540 sdev->changed = 1; 2541 } while (scsi_sense_valid(sshdr) && 2542 sshdr->sense_key == UNIT_ATTENTION && --retries); 2543 2544 if (!sshdr_external) 2545 kfree(sshdr); 2546 return result; 2547 } 2548 EXPORT_SYMBOL(scsi_test_unit_ready); 2549 2550 /** 2551 * scsi_device_set_state - Take the given device through the device state model. 2552 * @sdev: scsi device to change the state of. 2553 * @state: state to change to. 2554 * 2555 * Returns zero if unsuccessful or an error if the requested 2556 * transition is illegal. 2557 */ 2558 int 2559 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2560 { 2561 enum scsi_device_state oldstate = sdev->sdev_state; 2562 2563 if (state == oldstate) 2564 return 0; 2565 2566 switch (state) { 2567 case SDEV_CREATED: 2568 switch (oldstate) { 2569 case SDEV_CREATED_BLOCK: 2570 break; 2571 default: 2572 goto illegal; 2573 } 2574 break; 2575 2576 case SDEV_RUNNING: 2577 switch (oldstate) { 2578 case SDEV_CREATED: 2579 case SDEV_OFFLINE: 2580 case SDEV_TRANSPORT_OFFLINE: 2581 case SDEV_QUIESCE: 2582 case SDEV_BLOCK: 2583 break; 2584 default: 2585 goto illegal; 2586 } 2587 break; 2588 2589 case SDEV_QUIESCE: 2590 switch (oldstate) { 2591 case SDEV_RUNNING: 2592 case SDEV_OFFLINE: 2593 case SDEV_TRANSPORT_OFFLINE: 2594 break; 2595 default: 2596 goto illegal; 2597 } 2598 break; 2599 2600 case SDEV_OFFLINE: 2601 case SDEV_TRANSPORT_OFFLINE: 2602 switch (oldstate) { 2603 case SDEV_CREATED: 2604 case SDEV_RUNNING: 2605 case SDEV_QUIESCE: 2606 case SDEV_BLOCK: 2607 break; 2608 default: 2609 goto illegal; 2610 } 2611 break; 2612 2613 case SDEV_BLOCK: 2614 switch (oldstate) { 2615 case SDEV_RUNNING: 2616 case SDEV_CREATED_BLOCK: 2617 break; 2618 default: 2619 goto illegal; 2620 } 2621 break; 2622 2623 case SDEV_CREATED_BLOCK: 2624 switch (oldstate) { 2625 case SDEV_CREATED: 2626 break; 2627 default: 2628 goto illegal; 2629 } 2630 break; 2631 2632 case SDEV_CANCEL: 2633 switch (oldstate) { 2634 case SDEV_CREATED: 2635 case SDEV_RUNNING: 2636 case SDEV_QUIESCE: 2637 case SDEV_OFFLINE: 2638 case SDEV_TRANSPORT_OFFLINE: 2639 case SDEV_BLOCK: 2640 break; 2641 default: 2642 goto illegal; 2643 } 2644 break; 2645 2646 case SDEV_DEL: 2647 switch (oldstate) { 2648 case SDEV_CREATED: 2649 case SDEV_RUNNING: 2650 case SDEV_OFFLINE: 2651 case SDEV_TRANSPORT_OFFLINE: 2652 case SDEV_CANCEL: 2653 case SDEV_CREATED_BLOCK: 2654 break; 2655 default: 2656 goto illegal; 2657 } 2658 break; 2659 2660 } 2661 sdev->sdev_state = state; 2662 return 0; 2663 2664 illegal: 2665 SCSI_LOG_ERROR_RECOVERY(1, 2666 sdev_printk(KERN_ERR, sdev, 2667 "Illegal state transition %s->%s", 2668 scsi_device_state_name(oldstate), 2669 scsi_device_state_name(state)) 2670 ); 2671 return -EINVAL; 2672 } 2673 EXPORT_SYMBOL(scsi_device_set_state); 2674 2675 /** 2676 * sdev_evt_emit - emit a single SCSI device uevent 2677 * @sdev: associated SCSI device 2678 * @evt: event to emit 2679 * 2680 * Send a single uevent (scsi_event) to the associated scsi_device. 2681 */ 2682 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2683 { 2684 int idx = 0; 2685 char *envp[3]; 2686 2687 switch (evt->evt_type) { 2688 case SDEV_EVT_MEDIA_CHANGE: 2689 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2690 break; 2691 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2692 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; 2693 break; 2694 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2695 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; 2696 break; 2697 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2698 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; 2699 break; 2700 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2701 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; 2702 break; 2703 case SDEV_EVT_LUN_CHANGE_REPORTED: 2704 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2705 break; 2706 default: 2707 /* do nothing */ 2708 break; 2709 } 2710 2711 envp[idx++] = NULL; 2712 2713 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2714 } 2715 2716 /** 2717 * sdev_evt_thread - send a uevent for each scsi event 2718 * @work: work struct for scsi_device 2719 * 2720 * Dispatch queued events to their associated scsi_device kobjects 2721 * as uevents. 2722 */ 2723 void scsi_evt_thread(struct work_struct *work) 2724 { 2725 struct scsi_device *sdev; 2726 enum scsi_device_event evt_type; 2727 LIST_HEAD(event_list); 2728 2729 sdev = container_of(work, struct scsi_device, event_work); 2730 2731 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) 2732 if (test_and_clear_bit(evt_type, sdev->pending_events)) 2733 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); 2734 2735 while (1) { 2736 struct scsi_event *evt; 2737 struct list_head *this, *tmp; 2738 unsigned long flags; 2739 2740 spin_lock_irqsave(&sdev->list_lock, flags); 2741 list_splice_init(&sdev->event_list, &event_list); 2742 spin_unlock_irqrestore(&sdev->list_lock, flags); 2743 2744 if (list_empty(&event_list)) 2745 break; 2746 2747 list_for_each_safe(this, tmp, &event_list) { 2748 evt = list_entry(this, struct scsi_event, node); 2749 list_del(&evt->node); 2750 scsi_evt_emit(sdev, evt); 2751 kfree(evt); 2752 } 2753 } 2754 } 2755 2756 /** 2757 * sdev_evt_send - send asserted event to uevent thread 2758 * @sdev: scsi_device event occurred on 2759 * @evt: event to send 2760 * 2761 * Assert scsi device event asynchronously. 2762 */ 2763 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2764 { 2765 unsigned long flags; 2766 2767 #if 0 2768 /* FIXME: currently this check eliminates all media change events 2769 * for polled devices. Need to update to discriminate between AN 2770 * and polled events */ 2771 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2772 kfree(evt); 2773 return; 2774 } 2775 #endif 2776 2777 spin_lock_irqsave(&sdev->list_lock, flags); 2778 list_add_tail(&evt->node, &sdev->event_list); 2779 schedule_work(&sdev->event_work); 2780 spin_unlock_irqrestore(&sdev->list_lock, flags); 2781 } 2782 EXPORT_SYMBOL_GPL(sdev_evt_send); 2783 2784 /** 2785 * sdev_evt_alloc - allocate a new scsi event 2786 * @evt_type: type of event to allocate 2787 * @gfpflags: GFP flags for allocation 2788 * 2789 * Allocates and returns a new scsi_event. 2790 */ 2791 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2792 gfp_t gfpflags) 2793 { 2794 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2795 if (!evt) 2796 return NULL; 2797 2798 evt->evt_type = evt_type; 2799 INIT_LIST_HEAD(&evt->node); 2800 2801 /* evt_type-specific initialization, if any */ 2802 switch (evt_type) { 2803 case SDEV_EVT_MEDIA_CHANGE: 2804 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2805 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2806 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2807 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2808 case SDEV_EVT_LUN_CHANGE_REPORTED: 2809 default: 2810 /* do nothing */ 2811 break; 2812 } 2813 2814 return evt; 2815 } 2816 EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2817 2818 /** 2819 * sdev_evt_send_simple - send asserted event to uevent thread 2820 * @sdev: scsi_device event occurred on 2821 * @evt_type: type of event to send 2822 * @gfpflags: GFP flags for allocation 2823 * 2824 * Assert scsi device event asynchronously, given an event type. 2825 */ 2826 void sdev_evt_send_simple(struct scsi_device *sdev, 2827 enum scsi_device_event evt_type, gfp_t gfpflags) 2828 { 2829 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2830 if (!evt) { 2831 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2832 evt_type); 2833 return; 2834 } 2835 2836 sdev_evt_send(sdev, evt); 2837 } 2838 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2839 2840 /** 2841 * scsi_device_quiesce - Block user issued commands. 2842 * @sdev: scsi device to quiesce. 2843 * 2844 * This works by trying to transition to the SDEV_QUIESCE state 2845 * (which must be a legal transition). When the device is in this 2846 * state, only special requests will be accepted, all others will 2847 * be deferred. Since special requests may also be requeued requests, 2848 * a successful return doesn't guarantee the device will be 2849 * totally quiescent. 2850 * 2851 * Must be called with user context, may sleep. 2852 * 2853 * Returns zero if unsuccessful or an error if not. 2854 */ 2855 int 2856 scsi_device_quiesce(struct scsi_device *sdev) 2857 { 2858 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2859 if (err) 2860 return err; 2861 2862 scsi_run_queue(sdev->request_queue); 2863 while (atomic_read(&sdev->device_busy)) { 2864 msleep_interruptible(200); 2865 scsi_run_queue(sdev->request_queue); 2866 } 2867 return 0; 2868 } 2869 EXPORT_SYMBOL(scsi_device_quiesce); 2870 2871 /** 2872 * scsi_device_resume - Restart user issued commands to a quiesced device. 2873 * @sdev: scsi device to resume. 2874 * 2875 * Moves the device from quiesced back to running and restarts the 2876 * queues. 2877 * 2878 * Must be called with user context, may sleep. 2879 */ 2880 void scsi_device_resume(struct scsi_device *sdev) 2881 { 2882 /* check if the device state was mutated prior to resume, and if 2883 * so assume the state is being managed elsewhere (for example 2884 * device deleted during suspend) 2885 */ 2886 if (sdev->sdev_state != SDEV_QUIESCE || 2887 scsi_device_set_state(sdev, SDEV_RUNNING)) 2888 return; 2889 scsi_run_queue(sdev->request_queue); 2890 } 2891 EXPORT_SYMBOL(scsi_device_resume); 2892 2893 static void 2894 device_quiesce_fn(struct scsi_device *sdev, void *data) 2895 { 2896 scsi_device_quiesce(sdev); 2897 } 2898 2899 void 2900 scsi_target_quiesce(struct scsi_target *starget) 2901 { 2902 starget_for_each_device(starget, NULL, device_quiesce_fn); 2903 } 2904 EXPORT_SYMBOL(scsi_target_quiesce); 2905 2906 static void 2907 device_resume_fn(struct scsi_device *sdev, void *data) 2908 { 2909 scsi_device_resume(sdev); 2910 } 2911 2912 void 2913 scsi_target_resume(struct scsi_target *starget) 2914 { 2915 starget_for_each_device(starget, NULL, device_resume_fn); 2916 } 2917 EXPORT_SYMBOL(scsi_target_resume); 2918 2919 /** 2920 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2921 * @sdev: device to block 2922 * 2923 * Block request made by scsi lld's to temporarily stop all 2924 * scsi commands on the specified device. Called from interrupt 2925 * or normal process context. 2926 * 2927 * Returns zero if successful or error if not 2928 * 2929 * Notes: 2930 * This routine transitions the device to the SDEV_BLOCK state 2931 * (which must be a legal transition). When the device is in this 2932 * state, all commands are deferred until the scsi lld reenables 2933 * the device with scsi_device_unblock or device_block_tmo fires. 2934 */ 2935 int 2936 scsi_internal_device_block(struct scsi_device *sdev) 2937 { 2938 struct request_queue *q = sdev->request_queue; 2939 unsigned long flags; 2940 int err = 0; 2941 2942 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2943 if (err) { 2944 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2945 2946 if (err) 2947 return err; 2948 } 2949 2950 /* 2951 * The device has transitioned to SDEV_BLOCK. Stop the 2952 * block layer from calling the midlayer with this device's 2953 * request queue. 2954 */ 2955 if (q->mq_ops) { 2956 blk_mq_stop_hw_queues(q); 2957 } else { 2958 spin_lock_irqsave(q->queue_lock, flags); 2959 blk_stop_queue(q); 2960 spin_unlock_irqrestore(q->queue_lock, flags); 2961 } 2962 2963 return 0; 2964 } 2965 EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2966 2967 /** 2968 * scsi_internal_device_unblock - resume a device after a block request 2969 * @sdev: device to resume 2970 * @new_state: state to set devices to after unblocking 2971 * 2972 * Called by scsi lld's or the midlayer to restart the device queue 2973 * for the previously suspended scsi device. Called from interrupt or 2974 * normal process context. 2975 * 2976 * Returns zero if successful or error if not. 2977 * 2978 * Notes: 2979 * This routine transitions the device to the SDEV_RUNNING state 2980 * or to one of the offline states (which must be a legal transition) 2981 * allowing the midlayer to goose the queue for this device. 2982 */ 2983 int 2984 scsi_internal_device_unblock(struct scsi_device *sdev, 2985 enum scsi_device_state new_state) 2986 { 2987 struct request_queue *q = sdev->request_queue; 2988 unsigned long flags; 2989 2990 /* 2991 * Try to transition the scsi device to SDEV_RUNNING or one of the 2992 * offlined states and goose the device queue if successful. 2993 */ 2994 if ((sdev->sdev_state == SDEV_BLOCK) || 2995 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) 2996 sdev->sdev_state = new_state; 2997 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { 2998 if (new_state == SDEV_TRANSPORT_OFFLINE || 2999 new_state == SDEV_OFFLINE) 3000 sdev->sdev_state = new_state; 3001 else 3002 sdev->sdev_state = SDEV_CREATED; 3003 } else if (sdev->sdev_state != SDEV_CANCEL && 3004 sdev->sdev_state != SDEV_OFFLINE) 3005 return -EINVAL; 3006 3007 if (q->mq_ops) { 3008 blk_mq_start_stopped_hw_queues(q, false); 3009 } else { 3010 spin_lock_irqsave(q->queue_lock, flags); 3011 blk_start_queue(q); 3012 spin_unlock_irqrestore(q->queue_lock, flags); 3013 } 3014 3015 return 0; 3016 } 3017 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 3018 3019 static void 3020 device_block(struct scsi_device *sdev, void *data) 3021 { 3022 scsi_internal_device_block(sdev); 3023 } 3024 3025 static int 3026 target_block(struct device *dev, void *data) 3027 { 3028 if (scsi_is_target_device(dev)) 3029 starget_for_each_device(to_scsi_target(dev), NULL, 3030 device_block); 3031 return 0; 3032 } 3033 3034 void 3035 scsi_target_block(struct device *dev) 3036 { 3037 if (scsi_is_target_device(dev)) 3038 starget_for_each_device(to_scsi_target(dev), NULL, 3039 device_block); 3040 else 3041 device_for_each_child(dev, NULL, target_block); 3042 } 3043 EXPORT_SYMBOL_GPL(scsi_target_block); 3044 3045 static void 3046 device_unblock(struct scsi_device *sdev, void *data) 3047 { 3048 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); 3049 } 3050 3051 static int 3052 target_unblock(struct device *dev, void *data) 3053 { 3054 if (scsi_is_target_device(dev)) 3055 starget_for_each_device(to_scsi_target(dev), data, 3056 device_unblock); 3057 return 0; 3058 } 3059 3060 void 3061 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) 3062 { 3063 if (scsi_is_target_device(dev)) 3064 starget_for_each_device(to_scsi_target(dev), &new_state, 3065 device_unblock); 3066 else 3067 device_for_each_child(dev, &new_state, target_unblock); 3068 } 3069 EXPORT_SYMBOL_GPL(scsi_target_unblock); 3070 3071 /** 3072 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 3073 * @sgl: scatter-gather list 3074 * @sg_count: number of segments in sg 3075 * @offset: offset in bytes into sg, on return offset into the mapped area 3076 * @len: bytes to map, on return number of bytes mapped 3077 * 3078 * Returns virtual address of the start of the mapped page 3079 */ 3080 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 3081 size_t *offset, size_t *len) 3082 { 3083 int i; 3084 size_t sg_len = 0, len_complete = 0; 3085 struct scatterlist *sg; 3086 struct page *page; 3087 3088 WARN_ON(!irqs_disabled()); 3089 3090 for_each_sg(sgl, sg, sg_count, i) { 3091 len_complete = sg_len; /* Complete sg-entries */ 3092 sg_len += sg->length; 3093 if (sg_len > *offset) 3094 break; 3095 } 3096 3097 if (unlikely(i == sg_count)) { 3098 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 3099 "elements %d\n", 3100 __func__, sg_len, *offset, sg_count); 3101 WARN_ON(1); 3102 return NULL; 3103 } 3104 3105 /* Offset starting from the beginning of first page in this sg-entry */ 3106 *offset = *offset - len_complete + sg->offset; 3107 3108 /* Assumption: contiguous pages can be accessed as "page + i" */ 3109 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 3110 *offset &= ~PAGE_MASK; 3111 3112 /* Bytes in this sg-entry from *offset to the end of the page */ 3113 sg_len = PAGE_SIZE - *offset; 3114 if (*len > sg_len) 3115 *len = sg_len; 3116 3117 return kmap_atomic(page); 3118 } 3119 EXPORT_SYMBOL(scsi_kmap_atomic_sg); 3120 3121 /** 3122 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 3123 * @virt: virtual address to be unmapped 3124 */ 3125 void scsi_kunmap_atomic_sg(void *virt) 3126 { 3127 kunmap_atomic(virt); 3128 } 3129 EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 3130 3131 void sdev_disable_disk_events(struct scsi_device *sdev) 3132 { 3133 atomic_inc(&sdev->disk_events_disable_depth); 3134 } 3135 EXPORT_SYMBOL(sdev_disable_disk_events); 3136 3137 void sdev_enable_disk_events(struct scsi_device *sdev) 3138 { 3139 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) 3140 return; 3141 atomic_dec(&sdev->disk_events_disable_depth); 3142 } 3143 EXPORT_SYMBOL(sdev_enable_disk_events); 3144