1 /* 2 * Copyright (C) 1999 Eric Youngdale 3 * Copyright (C) 2014 Christoph Hellwig 4 * 5 * SCSI queueing library. 6 * Initial versions: Eric Youngdale (eric@andante.org). 7 * Based upon conversations with large numbers 8 * of people at Linux Expo. 9 */ 10 11 #include <linux/bio.h> 12 #include <linux/bitops.h> 13 #include <linux/blkdev.h> 14 #include <linux/completion.h> 15 #include <linux/kernel.h> 16 #include <linux/export.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/init.h> 20 #include <linux/pci.h> 21 #include <linux/delay.h> 22 #include <linux/hardirq.h> 23 #include <linux/scatterlist.h> 24 #include <linux/blk-mq.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_cmnd.h> 28 #include <scsi/scsi_dbg.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_driver.h> 31 #include <scsi/scsi_eh.h> 32 #include <scsi/scsi_host.h> 33 34 #include <trace/events/scsi.h> 35 36 #include "scsi_priv.h" 37 #include "scsi_logging.h" 38 39 40 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 41 #define SG_MEMPOOL_SIZE 2 42 43 struct scsi_host_sg_pool { 44 size_t size; 45 char *name; 46 struct kmem_cache *slab; 47 mempool_t *pool; 48 }; 49 50 #define SP(x) { x, "sgpool-" __stringify(x) } 51 #if (SCSI_MAX_SG_SEGMENTS < 32) 52 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 53 #endif 54 static struct scsi_host_sg_pool scsi_sg_pools[] = { 55 SP(8), 56 SP(16), 57 #if (SCSI_MAX_SG_SEGMENTS > 32) 58 SP(32), 59 #if (SCSI_MAX_SG_SEGMENTS > 64) 60 SP(64), 61 #if (SCSI_MAX_SG_SEGMENTS > 128) 62 SP(128), 63 #if (SCSI_MAX_SG_SEGMENTS > 256) 64 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 65 #endif 66 #endif 67 #endif 68 #endif 69 SP(SCSI_MAX_SG_SEGMENTS) 70 }; 71 #undef SP 72 73 struct kmem_cache *scsi_sdb_cache; 74 75 /* 76 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 77 * not change behaviour from the previous unplug mechanism, experimentation 78 * may prove this needs changing. 79 */ 80 #define SCSI_QUEUE_DELAY 3 81 82 static void 83 scsi_set_blocked(struct scsi_cmnd *cmd, int reason) 84 { 85 struct Scsi_Host *host = cmd->device->host; 86 struct scsi_device *device = cmd->device; 87 struct scsi_target *starget = scsi_target(device); 88 89 /* 90 * Set the appropriate busy bit for the device/host. 91 * 92 * If the host/device isn't busy, assume that something actually 93 * completed, and that we should be able to queue a command now. 94 * 95 * Note that the prior mid-layer assumption that any host could 96 * always queue at least one command is now broken. The mid-layer 97 * will implement a user specifiable stall (see 98 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 99 * if a command is requeued with no other commands outstanding 100 * either for the device or for the host. 101 */ 102 switch (reason) { 103 case SCSI_MLQUEUE_HOST_BUSY: 104 atomic_set(&host->host_blocked, host->max_host_blocked); 105 break; 106 case SCSI_MLQUEUE_DEVICE_BUSY: 107 case SCSI_MLQUEUE_EH_RETRY: 108 atomic_set(&device->device_blocked, 109 device->max_device_blocked); 110 break; 111 case SCSI_MLQUEUE_TARGET_BUSY: 112 atomic_set(&starget->target_blocked, 113 starget->max_target_blocked); 114 break; 115 } 116 } 117 118 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) 119 { 120 struct scsi_device *sdev = cmd->device; 121 struct request_queue *q = cmd->request->q; 122 123 blk_mq_requeue_request(cmd->request); 124 blk_mq_kick_requeue_list(q); 125 put_device(&sdev->sdev_gendev); 126 } 127 128 /** 129 * __scsi_queue_insert - private queue insertion 130 * @cmd: The SCSI command being requeued 131 * @reason: The reason for the requeue 132 * @unbusy: Whether the queue should be unbusied 133 * 134 * This is a private queue insertion. The public interface 135 * scsi_queue_insert() always assumes the queue should be unbusied 136 * because it's always called before the completion. This function is 137 * for a requeue after completion, which should only occur in this 138 * file. 139 */ 140 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 141 { 142 struct scsi_device *device = cmd->device; 143 struct request_queue *q = device->request_queue; 144 unsigned long flags; 145 146 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, 147 "Inserting command %p into mlqueue\n", cmd)); 148 149 scsi_set_blocked(cmd, reason); 150 151 /* 152 * Decrement the counters, since these commands are no longer 153 * active on the host/device. 154 */ 155 if (unbusy) 156 scsi_device_unbusy(device); 157 158 /* 159 * Requeue this command. It will go before all other commands 160 * that are already in the queue. Schedule requeue work under 161 * lock such that the kblockd_schedule_work() call happens 162 * before blk_cleanup_queue() finishes. 163 */ 164 cmd->result = 0; 165 if (q->mq_ops) { 166 scsi_mq_requeue_cmd(cmd); 167 return; 168 } 169 spin_lock_irqsave(q->queue_lock, flags); 170 blk_requeue_request(q, cmd->request); 171 kblockd_schedule_work(&device->requeue_work); 172 spin_unlock_irqrestore(q->queue_lock, flags); 173 } 174 175 /* 176 * Function: scsi_queue_insert() 177 * 178 * Purpose: Insert a command in the midlevel queue. 179 * 180 * Arguments: cmd - command that we are adding to queue. 181 * reason - why we are inserting command to queue. 182 * 183 * Lock status: Assumed that lock is not held upon entry. 184 * 185 * Returns: Nothing. 186 * 187 * Notes: We do this for one of two cases. Either the host is busy 188 * and it cannot accept any more commands for the time being, 189 * or the device returned QUEUE_FULL and can accept no more 190 * commands. 191 * Notes: This could be called either from an interrupt context or a 192 * normal process context. 193 */ 194 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 195 { 196 __scsi_queue_insert(cmd, reason, 1); 197 } 198 /** 199 * scsi_execute - insert request and wait for the result 200 * @sdev: scsi device 201 * @cmd: scsi command 202 * @data_direction: data direction 203 * @buffer: data buffer 204 * @bufflen: len of buffer 205 * @sense: optional sense buffer 206 * @timeout: request timeout in seconds 207 * @retries: number of times to retry request 208 * @flags: or into request flags; 209 * @resid: optional residual length 210 * 211 * returns the req->errors value which is the scsi_cmnd result 212 * field. 213 */ 214 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 215 int data_direction, void *buffer, unsigned bufflen, 216 unsigned char *sense, int timeout, int retries, u64 flags, 217 int *resid) 218 { 219 struct request *req; 220 int write = (data_direction == DMA_TO_DEVICE); 221 int ret = DRIVER_ERROR << 24; 222 223 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 224 if (!req) 225 return ret; 226 blk_rq_set_block_pc(req); 227 228 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 229 buffer, bufflen, __GFP_WAIT)) 230 goto out; 231 232 req->cmd_len = COMMAND_SIZE(cmd[0]); 233 memcpy(req->cmd, cmd, req->cmd_len); 234 req->sense = sense; 235 req->sense_len = 0; 236 req->retries = retries; 237 req->timeout = timeout; 238 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 239 240 /* 241 * head injection *required* here otherwise quiesce won't work 242 */ 243 blk_execute_rq(req->q, NULL, req, 1); 244 245 /* 246 * Some devices (USB mass-storage in particular) may transfer 247 * garbage data together with a residue indicating that the data 248 * is invalid. Prevent the garbage from being misinterpreted 249 * and prevent security leaks by zeroing out the excess data. 250 */ 251 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 252 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 253 254 if (resid) 255 *resid = req->resid_len; 256 ret = req->errors; 257 out: 258 blk_put_request(req); 259 260 return ret; 261 } 262 EXPORT_SYMBOL(scsi_execute); 263 264 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 265 int data_direction, void *buffer, unsigned bufflen, 266 struct scsi_sense_hdr *sshdr, int timeout, int retries, 267 int *resid, u64 flags) 268 { 269 char *sense = NULL; 270 int result; 271 272 if (sshdr) { 273 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 274 if (!sense) 275 return DRIVER_ERROR << 24; 276 } 277 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 278 sense, timeout, retries, flags, resid); 279 if (sshdr) 280 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 281 282 kfree(sense); 283 return result; 284 } 285 EXPORT_SYMBOL(scsi_execute_req_flags); 286 287 /* 288 * Function: scsi_init_cmd_errh() 289 * 290 * Purpose: Initialize cmd fields related to error handling. 291 * 292 * Arguments: cmd - command that is ready to be queued. 293 * 294 * Notes: This function has the job of initializing a number of 295 * fields related to error handling. Typically this will 296 * be called once for each command, as required. 297 */ 298 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 299 { 300 cmd->serial_number = 0; 301 scsi_set_resid(cmd, 0); 302 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 303 if (cmd->cmd_len == 0) 304 cmd->cmd_len = scsi_command_size(cmd->cmnd); 305 } 306 307 void scsi_device_unbusy(struct scsi_device *sdev) 308 { 309 struct Scsi_Host *shost = sdev->host; 310 struct scsi_target *starget = scsi_target(sdev); 311 unsigned long flags; 312 313 atomic_dec(&shost->host_busy); 314 if (starget->can_queue > 0) 315 atomic_dec(&starget->target_busy); 316 317 if (unlikely(scsi_host_in_recovery(shost) && 318 (shost->host_failed || shost->host_eh_scheduled))) { 319 spin_lock_irqsave(shost->host_lock, flags); 320 scsi_eh_wakeup(shost); 321 spin_unlock_irqrestore(shost->host_lock, flags); 322 } 323 324 atomic_dec(&sdev->device_busy); 325 } 326 327 static void scsi_kick_queue(struct request_queue *q) 328 { 329 if (q->mq_ops) 330 blk_mq_start_hw_queues(q); 331 else 332 blk_run_queue(q); 333 } 334 335 /* 336 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 337 * and call blk_run_queue for all the scsi_devices on the target - 338 * including current_sdev first. 339 * 340 * Called with *no* scsi locks held. 341 */ 342 static void scsi_single_lun_run(struct scsi_device *current_sdev) 343 { 344 struct Scsi_Host *shost = current_sdev->host; 345 struct scsi_device *sdev, *tmp; 346 struct scsi_target *starget = scsi_target(current_sdev); 347 unsigned long flags; 348 349 spin_lock_irqsave(shost->host_lock, flags); 350 starget->starget_sdev_user = NULL; 351 spin_unlock_irqrestore(shost->host_lock, flags); 352 353 /* 354 * Call blk_run_queue for all LUNs on the target, starting with 355 * current_sdev. We race with others (to set starget_sdev_user), 356 * but in most cases, we will be first. Ideally, each LU on the 357 * target would get some limited time or requests on the target. 358 */ 359 scsi_kick_queue(current_sdev->request_queue); 360 361 spin_lock_irqsave(shost->host_lock, flags); 362 if (starget->starget_sdev_user) 363 goto out; 364 list_for_each_entry_safe(sdev, tmp, &starget->devices, 365 same_target_siblings) { 366 if (sdev == current_sdev) 367 continue; 368 if (scsi_device_get(sdev)) 369 continue; 370 371 spin_unlock_irqrestore(shost->host_lock, flags); 372 scsi_kick_queue(sdev->request_queue); 373 spin_lock_irqsave(shost->host_lock, flags); 374 375 scsi_device_put(sdev); 376 } 377 out: 378 spin_unlock_irqrestore(shost->host_lock, flags); 379 } 380 381 static inline bool scsi_device_is_busy(struct scsi_device *sdev) 382 { 383 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) 384 return true; 385 if (atomic_read(&sdev->device_blocked) > 0) 386 return true; 387 return false; 388 } 389 390 static inline bool scsi_target_is_busy(struct scsi_target *starget) 391 { 392 if (starget->can_queue > 0) { 393 if (atomic_read(&starget->target_busy) >= starget->can_queue) 394 return true; 395 if (atomic_read(&starget->target_blocked) > 0) 396 return true; 397 } 398 return false; 399 } 400 401 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 402 { 403 if (shost->can_queue > 0 && 404 atomic_read(&shost->host_busy) >= shost->can_queue) 405 return true; 406 if (atomic_read(&shost->host_blocked) > 0) 407 return true; 408 if (shost->host_self_blocked) 409 return true; 410 return false; 411 } 412 413 static void scsi_starved_list_run(struct Scsi_Host *shost) 414 { 415 LIST_HEAD(starved_list); 416 struct scsi_device *sdev; 417 unsigned long flags; 418 419 spin_lock_irqsave(shost->host_lock, flags); 420 list_splice_init(&shost->starved_list, &starved_list); 421 422 while (!list_empty(&starved_list)) { 423 struct request_queue *slq; 424 425 /* 426 * As long as shost is accepting commands and we have 427 * starved queues, call blk_run_queue. scsi_request_fn 428 * drops the queue_lock and can add us back to the 429 * starved_list. 430 * 431 * host_lock protects the starved_list and starved_entry. 432 * scsi_request_fn must get the host_lock before checking 433 * or modifying starved_list or starved_entry. 434 */ 435 if (scsi_host_is_busy(shost)) 436 break; 437 438 sdev = list_entry(starved_list.next, 439 struct scsi_device, starved_entry); 440 list_del_init(&sdev->starved_entry); 441 if (scsi_target_is_busy(scsi_target(sdev))) { 442 list_move_tail(&sdev->starved_entry, 443 &shost->starved_list); 444 continue; 445 } 446 447 /* 448 * Once we drop the host lock, a racing scsi_remove_device() 449 * call may remove the sdev from the starved list and destroy 450 * it and the queue. Mitigate by taking a reference to the 451 * queue and never touching the sdev again after we drop the 452 * host lock. Note: if __scsi_remove_device() invokes 453 * blk_cleanup_queue() before the queue is run from this 454 * function then blk_run_queue() will return immediately since 455 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. 456 */ 457 slq = sdev->request_queue; 458 if (!blk_get_queue(slq)) 459 continue; 460 spin_unlock_irqrestore(shost->host_lock, flags); 461 462 scsi_kick_queue(slq); 463 blk_put_queue(slq); 464 465 spin_lock_irqsave(shost->host_lock, flags); 466 } 467 /* put any unprocessed entries back */ 468 list_splice(&starved_list, &shost->starved_list); 469 spin_unlock_irqrestore(shost->host_lock, flags); 470 } 471 472 /* 473 * Function: scsi_run_queue() 474 * 475 * Purpose: Select a proper request queue to serve next 476 * 477 * Arguments: q - last request's queue 478 * 479 * Returns: Nothing 480 * 481 * Notes: The previous command was completely finished, start 482 * a new one if possible. 483 */ 484 static void scsi_run_queue(struct request_queue *q) 485 { 486 struct scsi_device *sdev = q->queuedata; 487 488 if (scsi_target(sdev)->single_lun) 489 scsi_single_lun_run(sdev); 490 if (!list_empty(&sdev->host->starved_list)) 491 scsi_starved_list_run(sdev->host); 492 493 if (q->mq_ops) 494 blk_mq_start_stopped_hw_queues(q, false); 495 else 496 blk_run_queue(q); 497 } 498 499 void scsi_requeue_run_queue(struct work_struct *work) 500 { 501 struct scsi_device *sdev; 502 struct request_queue *q; 503 504 sdev = container_of(work, struct scsi_device, requeue_work); 505 q = sdev->request_queue; 506 scsi_run_queue(q); 507 } 508 509 /* 510 * Function: scsi_requeue_command() 511 * 512 * Purpose: Handle post-processing of completed commands. 513 * 514 * Arguments: q - queue to operate on 515 * cmd - command that may need to be requeued. 516 * 517 * Returns: Nothing 518 * 519 * Notes: After command completion, there may be blocks left 520 * over which weren't finished by the previous command 521 * this can be for a number of reasons - the main one is 522 * I/O errors in the middle of the request, in which case 523 * we need to request the blocks that come after the bad 524 * sector. 525 * Notes: Upon return, cmd is a stale pointer. 526 */ 527 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 528 { 529 struct scsi_device *sdev = cmd->device; 530 struct request *req = cmd->request; 531 unsigned long flags; 532 533 spin_lock_irqsave(q->queue_lock, flags); 534 blk_unprep_request(req); 535 req->special = NULL; 536 scsi_put_command(cmd); 537 blk_requeue_request(q, req); 538 spin_unlock_irqrestore(q->queue_lock, flags); 539 540 scsi_run_queue(q); 541 542 put_device(&sdev->sdev_gendev); 543 } 544 545 void scsi_next_command(struct scsi_cmnd *cmd) 546 { 547 struct scsi_device *sdev = cmd->device; 548 struct request_queue *q = sdev->request_queue; 549 550 scsi_put_command(cmd); 551 scsi_run_queue(q); 552 553 put_device(&sdev->sdev_gendev); 554 } 555 556 void scsi_run_host_queues(struct Scsi_Host *shost) 557 { 558 struct scsi_device *sdev; 559 560 shost_for_each_device(sdev, shost) 561 scsi_run_queue(sdev->request_queue); 562 } 563 564 static inline unsigned int scsi_sgtable_index(unsigned short nents) 565 { 566 unsigned int index; 567 568 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 569 570 if (nents <= 8) 571 index = 0; 572 else 573 index = get_count_order(nents) - 3; 574 575 return index; 576 } 577 578 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 579 { 580 struct scsi_host_sg_pool *sgp; 581 582 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 583 mempool_free(sgl, sgp->pool); 584 } 585 586 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 587 { 588 struct scsi_host_sg_pool *sgp; 589 590 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 591 return mempool_alloc(sgp->pool, gfp_mask); 592 } 593 594 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 595 { 596 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) 597 return; 598 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 599 } 600 601 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 602 gfp_t gfp_mask, bool mq) 603 { 604 struct scatterlist *first_chunk = NULL; 605 int ret; 606 607 BUG_ON(!nents); 608 609 if (mq) { 610 if (nents <= SCSI_MAX_SG_SEGMENTS) { 611 sdb->table.nents = nents; 612 sg_init_table(sdb->table.sgl, sdb->table.nents); 613 return 0; 614 } 615 first_chunk = sdb->table.sgl; 616 } 617 618 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 619 first_chunk, gfp_mask, scsi_sg_alloc); 620 if (unlikely(ret)) 621 scsi_free_sgtable(sdb, mq); 622 return ret; 623 } 624 625 static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 626 { 627 if (cmd->request->cmd_type == REQ_TYPE_FS) { 628 struct scsi_driver *drv = scsi_cmd_to_driver(cmd); 629 630 if (drv->uninit_command) 631 drv->uninit_command(cmd); 632 } 633 } 634 635 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) 636 { 637 if (cmd->sdb.table.nents) 638 scsi_free_sgtable(&cmd->sdb, true); 639 if (cmd->request->next_rq && cmd->request->next_rq->special) 640 scsi_free_sgtable(cmd->request->next_rq->special, true); 641 if (scsi_prot_sg_count(cmd)) 642 scsi_free_sgtable(cmd->prot_sdb, true); 643 } 644 645 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) 646 { 647 struct scsi_device *sdev = cmd->device; 648 unsigned long flags; 649 650 BUG_ON(list_empty(&cmd->list)); 651 652 scsi_mq_free_sgtables(cmd); 653 scsi_uninit_cmd(cmd); 654 655 spin_lock_irqsave(&sdev->list_lock, flags); 656 list_del_init(&cmd->list); 657 spin_unlock_irqrestore(&sdev->list_lock, flags); 658 } 659 660 /* 661 * Function: scsi_release_buffers() 662 * 663 * Purpose: Free resources allocate for a scsi_command. 664 * 665 * Arguments: cmd - command that we are bailing. 666 * 667 * Lock status: Assumed that no lock is held upon entry. 668 * 669 * Returns: Nothing 670 * 671 * Notes: In the event that an upper level driver rejects a 672 * command, we must release resources allocated during 673 * the __init_io() function. Primarily this would involve 674 * the scatter-gather table. 675 */ 676 static void scsi_release_buffers(struct scsi_cmnd *cmd) 677 { 678 if (cmd->sdb.table.nents) 679 scsi_free_sgtable(&cmd->sdb, false); 680 681 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 682 683 if (scsi_prot_sg_count(cmd)) 684 scsi_free_sgtable(cmd->prot_sdb, false); 685 } 686 687 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) 688 { 689 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; 690 691 scsi_free_sgtable(bidi_sdb, false); 692 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 693 cmd->request->next_rq->special = NULL; 694 } 695 696 static bool scsi_end_request(struct request *req, int error, 697 unsigned int bytes, unsigned int bidi_bytes) 698 { 699 struct scsi_cmnd *cmd = req->special; 700 struct scsi_device *sdev = cmd->device; 701 struct request_queue *q = sdev->request_queue; 702 703 if (blk_update_request(req, error, bytes)) 704 return true; 705 706 /* Bidi request must be completed as a whole */ 707 if (unlikely(bidi_bytes) && 708 blk_update_request(req->next_rq, error, bidi_bytes)) 709 return true; 710 711 if (blk_queue_add_random(q)) 712 add_disk_randomness(req->rq_disk); 713 714 if (req->mq_ctx) { 715 /* 716 * In the MQ case the command gets freed by __blk_mq_end_io, 717 * so we have to do all cleanup that depends on it earlier. 718 * 719 * We also can't kick the queues from irq context, so we 720 * will have to defer it to a workqueue. 721 */ 722 scsi_mq_uninit_cmd(cmd); 723 724 __blk_mq_end_io(req, error); 725 726 if (scsi_target(sdev)->single_lun || 727 !list_empty(&sdev->host->starved_list)) 728 kblockd_schedule_work(&sdev->requeue_work); 729 else 730 blk_mq_start_stopped_hw_queues(q, true); 731 732 put_device(&sdev->sdev_gendev); 733 } else { 734 unsigned long flags; 735 736 spin_lock_irqsave(q->queue_lock, flags); 737 blk_finish_request(req, error); 738 spin_unlock_irqrestore(q->queue_lock, flags); 739 740 if (bidi_bytes) 741 scsi_release_bidi_buffers(cmd); 742 scsi_release_buffers(cmd); 743 scsi_next_command(cmd); 744 } 745 746 return false; 747 } 748 749 /** 750 * __scsi_error_from_host_byte - translate SCSI error code into errno 751 * @cmd: SCSI command (unused) 752 * @result: scsi error code 753 * 754 * Translate SCSI error code into standard UNIX errno. 755 * Return values: 756 * -ENOLINK temporary transport failure 757 * -EREMOTEIO permanent target failure, do not retry 758 * -EBADE permanent nexus failure, retry on other path 759 * -ENOSPC No write space available 760 * -ENODATA Medium error 761 * -EIO unspecified I/O error 762 */ 763 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) 764 { 765 int error = 0; 766 767 switch(host_byte(result)) { 768 case DID_TRANSPORT_FAILFAST: 769 error = -ENOLINK; 770 break; 771 case DID_TARGET_FAILURE: 772 set_host_byte(cmd, DID_OK); 773 error = -EREMOTEIO; 774 break; 775 case DID_NEXUS_FAILURE: 776 set_host_byte(cmd, DID_OK); 777 error = -EBADE; 778 break; 779 case DID_ALLOC_FAILURE: 780 set_host_byte(cmd, DID_OK); 781 error = -ENOSPC; 782 break; 783 case DID_MEDIUM_ERROR: 784 set_host_byte(cmd, DID_OK); 785 error = -ENODATA; 786 break; 787 default: 788 error = -EIO; 789 break; 790 } 791 792 return error; 793 } 794 795 /* 796 * Function: scsi_io_completion() 797 * 798 * Purpose: Completion processing for block device I/O requests. 799 * 800 * Arguments: cmd - command that is finished. 801 * 802 * Lock status: Assumed that no lock is held upon entry. 803 * 804 * Returns: Nothing 805 * 806 * Notes: We will finish off the specified number of sectors. If we 807 * are done, the command block will be released and the queue 808 * function will be goosed. If we are not done then we have to 809 * figure out what to do next: 810 * 811 * a) We can call scsi_requeue_command(). The request 812 * will be unprepared and put back on the queue. Then 813 * a new command will be created for it. This should 814 * be used if we made forward progress, or if we want 815 * to switch from READ(10) to READ(6) for example. 816 * 817 * b) We can call __scsi_queue_insert(). The request will 818 * be put back on the queue and retried using the same 819 * command as before, possibly after a delay. 820 * 821 * c) We can call scsi_end_request() with -EIO to fail 822 * the remainder of the request. 823 */ 824 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 825 { 826 int result = cmd->result; 827 struct request_queue *q = cmd->device->request_queue; 828 struct request *req = cmd->request; 829 int error = 0; 830 struct scsi_sense_hdr sshdr; 831 int sense_valid = 0; 832 int sense_deferred = 0; 833 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 834 ACTION_DELAYED_RETRY} action; 835 unsigned long wait_for = (cmd->allowed + 1) * req->timeout; 836 837 if (result) { 838 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 839 if (sense_valid) 840 sense_deferred = scsi_sense_is_deferred(&sshdr); 841 } 842 843 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 844 if (result) { 845 if (sense_valid && req->sense) { 846 /* 847 * SG_IO wants current and deferred errors 848 */ 849 int len = 8 + cmd->sense_buffer[7]; 850 851 if (len > SCSI_SENSE_BUFFERSIZE) 852 len = SCSI_SENSE_BUFFERSIZE; 853 memcpy(req->sense, cmd->sense_buffer, len); 854 req->sense_len = len; 855 } 856 if (!sense_deferred) 857 error = __scsi_error_from_host_byte(cmd, result); 858 } 859 /* 860 * __scsi_error_from_host_byte may have reset the host_byte 861 */ 862 req->errors = cmd->result; 863 864 req->resid_len = scsi_get_resid(cmd); 865 866 if (scsi_bidi_cmnd(cmd)) { 867 /* 868 * Bidi commands Must be complete as a whole, 869 * both sides at once. 870 */ 871 req->next_rq->resid_len = scsi_in(cmd)->resid; 872 if (scsi_end_request(req, 0, blk_rq_bytes(req), 873 blk_rq_bytes(req->next_rq))) 874 BUG(); 875 return; 876 } 877 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 878 /* 879 * Certain non BLOCK_PC requests are commands that don't 880 * actually transfer anything (FLUSH), so cannot use 881 * good_bytes != blk_rq_bytes(req) as the signal for an error. 882 * This sets the error explicitly for the problem case. 883 */ 884 error = __scsi_error_from_host_byte(cmd, result); 885 } 886 887 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 888 BUG_ON(blk_bidi_rq(req)); 889 890 /* 891 * Next deal with any sectors which we were able to correctly 892 * handle. 893 */ 894 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, 895 "%u sectors total, %d bytes done.\n", 896 blk_rq_sectors(req), good_bytes)); 897 898 /* 899 * Recovered errors need reporting, but they're always treated 900 * as success, so fiddle the result code here. For BLOCK_PC 901 * we already took a copy of the original into rq->errors which 902 * is what gets returned to the user 903 */ 904 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 905 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 906 * print since caller wants ATA registers. Only occurs on 907 * SCSI ATA PASS_THROUGH commands when CK_COND=1 908 */ 909 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 910 ; 911 else if (!(req->cmd_flags & REQ_QUIET)) 912 scsi_print_sense("", cmd); 913 result = 0; 914 /* BLOCK_PC may have set error */ 915 error = 0; 916 } 917 918 /* 919 * If we finished all bytes in the request we are done now. 920 */ 921 if (!scsi_end_request(req, error, good_bytes, 0)) 922 return; 923 924 /* 925 * Kill remainder if no retrys. 926 */ 927 if (error && scsi_noretry_cmd(cmd)) { 928 if (scsi_end_request(req, error, blk_rq_bytes(req), 0)) 929 BUG(); 930 return; 931 } 932 933 /* 934 * If there had been no error, but we have leftover bytes in the 935 * requeues just queue the command up again. 936 */ 937 if (result == 0) 938 goto requeue; 939 940 error = __scsi_error_from_host_byte(cmd, result); 941 942 if (host_byte(result) == DID_RESET) { 943 /* Third party bus reset or reset for error recovery 944 * reasons. Just retry the command and see what 945 * happens. 946 */ 947 action = ACTION_RETRY; 948 } else if (sense_valid && !sense_deferred) { 949 switch (sshdr.sense_key) { 950 case UNIT_ATTENTION: 951 if (cmd->device->removable) { 952 /* Detected disc change. Set a bit 953 * and quietly refuse further access. 954 */ 955 cmd->device->changed = 1; 956 action = ACTION_FAIL; 957 } else { 958 /* Must have been a power glitch, or a 959 * bus reset. Could not have been a 960 * media change, so we just retry the 961 * command and see what happens. 962 */ 963 action = ACTION_RETRY; 964 } 965 break; 966 case ILLEGAL_REQUEST: 967 /* If we had an ILLEGAL REQUEST returned, then 968 * we may have performed an unsupported 969 * command. The only thing this should be 970 * would be a ten byte read where only a six 971 * byte read was supported. Also, on a system 972 * where READ CAPACITY failed, we may have 973 * read past the end of the disk. 974 */ 975 if ((cmd->device->use_10_for_rw && 976 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 977 (cmd->cmnd[0] == READ_10 || 978 cmd->cmnd[0] == WRITE_10)) { 979 /* This will issue a new 6-byte command. */ 980 cmd->device->use_10_for_rw = 0; 981 action = ACTION_REPREP; 982 } else if (sshdr.asc == 0x10) /* DIX */ { 983 action = ACTION_FAIL; 984 error = -EILSEQ; 985 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ 986 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { 987 action = ACTION_FAIL; 988 error = -EREMOTEIO; 989 } else 990 action = ACTION_FAIL; 991 break; 992 case ABORTED_COMMAND: 993 action = ACTION_FAIL; 994 if (sshdr.asc == 0x10) /* DIF */ 995 error = -EILSEQ; 996 break; 997 case NOT_READY: 998 /* If the device is in the process of becoming 999 * ready, or has a temporary blockage, retry. 1000 */ 1001 if (sshdr.asc == 0x04) { 1002 switch (sshdr.ascq) { 1003 case 0x01: /* becoming ready */ 1004 case 0x04: /* format in progress */ 1005 case 0x05: /* rebuild in progress */ 1006 case 0x06: /* recalculation in progress */ 1007 case 0x07: /* operation in progress */ 1008 case 0x08: /* Long write in progress */ 1009 case 0x09: /* self test in progress */ 1010 case 0x14: /* space allocation in progress */ 1011 action = ACTION_DELAYED_RETRY; 1012 break; 1013 default: 1014 action = ACTION_FAIL; 1015 break; 1016 } 1017 } else 1018 action = ACTION_FAIL; 1019 break; 1020 case VOLUME_OVERFLOW: 1021 /* See SSC3rXX or current. */ 1022 action = ACTION_FAIL; 1023 break; 1024 default: 1025 action = ACTION_FAIL; 1026 break; 1027 } 1028 } else 1029 action = ACTION_FAIL; 1030 1031 if (action != ACTION_FAIL && 1032 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) 1033 action = ACTION_FAIL; 1034 1035 switch (action) { 1036 case ACTION_FAIL: 1037 /* Give up and fail the remainder of the request */ 1038 if (!(req->cmd_flags & REQ_QUIET)) { 1039 scsi_print_result(cmd); 1040 if (driver_byte(result) & DRIVER_SENSE) 1041 scsi_print_sense("", cmd); 1042 scsi_print_command(cmd); 1043 } 1044 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) 1045 return; 1046 /*FALLTHRU*/ 1047 case ACTION_REPREP: 1048 requeue: 1049 /* Unprep the request and put it back at the head of the queue. 1050 * A new command will be prepared and issued. 1051 */ 1052 if (q->mq_ops) { 1053 cmd->request->cmd_flags &= ~REQ_DONTPREP; 1054 scsi_mq_uninit_cmd(cmd); 1055 scsi_mq_requeue_cmd(cmd); 1056 } else { 1057 scsi_release_buffers(cmd); 1058 scsi_requeue_command(q, cmd); 1059 } 1060 break; 1061 case ACTION_RETRY: 1062 /* Retry the same command immediately */ 1063 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 1064 break; 1065 case ACTION_DELAYED_RETRY: 1066 /* Retry the same command after a delay */ 1067 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 1068 break; 1069 } 1070 } 1071 1072 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1073 gfp_t gfp_mask) 1074 { 1075 int count; 1076 1077 /* 1078 * If sg table allocation fails, requeue request later. 1079 */ 1080 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1081 gfp_mask, req->mq_ctx != NULL))) 1082 return BLKPREP_DEFER; 1083 1084 /* 1085 * Next, walk the list, and fill in the addresses and sizes of 1086 * each segment. 1087 */ 1088 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1089 BUG_ON(count > sdb->table.nents); 1090 sdb->table.nents = count; 1091 sdb->length = blk_rq_bytes(req); 1092 return BLKPREP_OK; 1093 } 1094 1095 /* 1096 * Function: scsi_init_io() 1097 * 1098 * Purpose: SCSI I/O initialize function. 1099 * 1100 * Arguments: cmd - Command descriptor we wish to initialize 1101 * 1102 * Returns: 0 on success 1103 * BLKPREP_DEFER if the failure is retryable 1104 * BLKPREP_KILL if the failure is fatal 1105 */ 1106 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 1107 { 1108 struct scsi_device *sdev = cmd->device; 1109 struct request *rq = cmd->request; 1110 bool is_mq = (rq->mq_ctx != NULL); 1111 int error; 1112 1113 BUG_ON(!rq->nr_phys_segments); 1114 1115 error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); 1116 if (error) 1117 goto err_exit; 1118 1119 if (blk_bidi_rq(rq)) { 1120 if (!rq->q->mq_ops) { 1121 struct scsi_data_buffer *bidi_sdb = 1122 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC); 1123 if (!bidi_sdb) { 1124 error = BLKPREP_DEFER; 1125 goto err_exit; 1126 } 1127 1128 rq->next_rq->special = bidi_sdb; 1129 } 1130 1131 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special, 1132 GFP_ATOMIC); 1133 if (error) 1134 goto err_exit; 1135 } 1136 1137 if (blk_integrity_rq(rq)) { 1138 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1139 int ivecs, count; 1140 1141 BUG_ON(prot_sdb == NULL); 1142 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1143 1144 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask, is_mq)) { 1145 error = BLKPREP_DEFER; 1146 goto err_exit; 1147 } 1148 1149 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1150 prot_sdb->table.sgl); 1151 BUG_ON(unlikely(count > ivecs)); 1152 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1153 1154 cmd->prot_sdb = prot_sdb; 1155 cmd->prot_sdb->table.nents = count; 1156 } 1157 1158 return BLKPREP_OK; 1159 err_exit: 1160 if (is_mq) { 1161 scsi_mq_free_sgtables(cmd); 1162 } else { 1163 scsi_release_buffers(cmd); 1164 cmd->request->special = NULL; 1165 scsi_put_command(cmd); 1166 put_device(&sdev->sdev_gendev); 1167 } 1168 return error; 1169 } 1170 EXPORT_SYMBOL(scsi_init_io); 1171 1172 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1173 struct request *req) 1174 { 1175 struct scsi_cmnd *cmd; 1176 1177 if (!req->special) { 1178 /* Bail if we can't get a reference to the device */ 1179 if (!get_device(&sdev->sdev_gendev)) 1180 return NULL; 1181 1182 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1183 if (unlikely(!cmd)) { 1184 put_device(&sdev->sdev_gendev); 1185 return NULL; 1186 } 1187 req->special = cmd; 1188 } else { 1189 cmd = req->special; 1190 } 1191 1192 /* pull a tag out of the request if we have one */ 1193 cmd->tag = req->tag; 1194 cmd->request = req; 1195 1196 cmd->cmnd = req->cmd; 1197 cmd->prot_op = SCSI_PROT_NORMAL; 1198 1199 return cmd; 1200 } 1201 1202 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1203 { 1204 struct scsi_cmnd *cmd = req->special; 1205 1206 /* 1207 * BLOCK_PC requests may transfer data, in which case they must 1208 * a bio attached to them. Or they might contain a SCSI command 1209 * that does not transfer data, in which case they may optionally 1210 * submit a request without an attached bio. 1211 */ 1212 if (req->bio) { 1213 int ret = scsi_init_io(cmd, GFP_ATOMIC); 1214 if (unlikely(ret)) 1215 return ret; 1216 } else { 1217 BUG_ON(blk_rq_bytes(req)); 1218 1219 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1220 } 1221 1222 cmd->cmd_len = req->cmd_len; 1223 cmd->transfersize = blk_rq_bytes(req); 1224 cmd->allowed = req->retries; 1225 return BLKPREP_OK; 1226 } 1227 1228 /* 1229 * Setup a REQ_TYPE_FS command. These are simple request from filesystems 1230 * that still need to be translated to SCSI CDBs from the ULD. 1231 */ 1232 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1233 { 1234 struct scsi_cmnd *cmd = req->special; 1235 1236 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1237 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1238 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1239 if (ret != BLKPREP_OK) 1240 return ret; 1241 } 1242 1243 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1244 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1245 } 1246 1247 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) 1248 { 1249 struct scsi_cmnd *cmd = req->special; 1250 1251 if (!blk_rq_bytes(req)) 1252 cmd->sc_data_direction = DMA_NONE; 1253 else if (rq_data_dir(req) == WRITE) 1254 cmd->sc_data_direction = DMA_TO_DEVICE; 1255 else 1256 cmd->sc_data_direction = DMA_FROM_DEVICE; 1257 1258 switch (req->cmd_type) { 1259 case REQ_TYPE_FS: 1260 return scsi_setup_fs_cmnd(sdev, req); 1261 case REQ_TYPE_BLOCK_PC: 1262 return scsi_setup_blk_pc_cmnd(sdev, req); 1263 default: 1264 return BLKPREP_KILL; 1265 } 1266 } 1267 1268 static int 1269 scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1270 { 1271 int ret = BLKPREP_OK; 1272 1273 /* 1274 * If the device is not in running state we will reject some 1275 * or all commands. 1276 */ 1277 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1278 switch (sdev->sdev_state) { 1279 case SDEV_OFFLINE: 1280 case SDEV_TRANSPORT_OFFLINE: 1281 /* 1282 * If the device is offline we refuse to process any 1283 * commands. The device must be brought online 1284 * before trying any recovery commands. 1285 */ 1286 sdev_printk(KERN_ERR, sdev, 1287 "rejecting I/O to offline device\n"); 1288 ret = BLKPREP_KILL; 1289 break; 1290 case SDEV_DEL: 1291 /* 1292 * If the device is fully deleted, we refuse to 1293 * process any commands as well. 1294 */ 1295 sdev_printk(KERN_ERR, sdev, 1296 "rejecting I/O to dead device\n"); 1297 ret = BLKPREP_KILL; 1298 break; 1299 case SDEV_QUIESCE: 1300 case SDEV_BLOCK: 1301 case SDEV_CREATED_BLOCK: 1302 /* 1303 * If the devices is blocked we defer normal commands. 1304 */ 1305 if (!(req->cmd_flags & REQ_PREEMPT)) 1306 ret = BLKPREP_DEFER; 1307 break; 1308 default: 1309 /* 1310 * For any other not fully online state we only allow 1311 * special commands. In particular any user initiated 1312 * command is not allowed. 1313 */ 1314 if (!(req->cmd_flags & REQ_PREEMPT)) 1315 ret = BLKPREP_KILL; 1316 break; 1317 } 1318 } 1319 return ret; 1320 } 1321 1322 static int 1323 scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1324 { 1325 struct scsi_device *sdev = q->queuedata; 1326 1327 switch (ret) { 1328 case BLKPREP_KILL: 1329 req->errors = DID_NO_CONNECT << 16; 1330 /* release the command and kill it */ 1331 if (req->special) { 1332 struct scsi_cmnd *cmd = req->special; 1333 scsi_release_buffers(cmd); 1334 scsi_put_command(cmd); 1335 put_device(&sdev->sdev_gendev); 1336 req->special = NULL; 1337 } 1338 break; 1339 case BLKPREP_DEFER: 1340 /* 1341 * If we defer, the blk_peek_request() returns NULL, but the 1342 * queue must be restarted, so we schedule a callback to happen 1343 * shortly. 1344 */ 1345 if (atomic_read(&sdev->device_busy) == 0) 1346 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1347 break; 1348 default: 1349 req->cmd_flags |= REQ_DONTPREP; 1350 } 1351 1352 return ret; 1353 } 1354 1355 static int scsi_prep_fn(struct request_queue *q, struct request *req) 1356 { 1357 struct scsi_device *sdev = q->queuedata; 1358 struct scsi_cmnd *cmd; 1359 int ret; 1360 1361 ret = scsi_prep_state_check(sdev, req); 1362 if (ret != BLKPREP_OK) 1363 goto out; 1364 1365 cmd = scsi_get_cmd_from_req(sdev, req); 1366 if (unlikely(!cmd)) { 1367 ret = BLKPREP_DEFER; 1368 goto out; 1369 } 1370 1371 ret = scsi_setup_cmnd(sdev, req); 1372 out: 1373 return scsi_prep_return(q, req, ret); 1374 } 1375 1376 static void scsi_unprep_fn(struct request_queue *q, struct request *req) 1377 { 1378 scsi_uninit_cmd(req->special); 1379 } 1380 1381 /* 1382 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1383 * return 0. 1384 * 1385 * Called with the queue_lock held. 1386 */ 1387 static inline int scsi_dev_queue_ready(struct request_queue *q, 1388 struct scsi_device *sdev) 1389 { 1390 unsigned int busy; 1391 1392 busy = atomic_inc_return(&sdev->device_busy) - 1; 1393 if (atomic_read(&sdev->device_blocked)) { 1394 if (busy) 1395 goto out_dec; 1396 1397 /* 1398 * unblock after device_blocked iterates to zero 1399 */ 1400 if (atomic_dec_return(&sdev->device_blocked) > 0) { 1401 /* 1402 * For the MQ case we take care of this in the caller. 1403 */ 1404 if (!q->mq_ops) 1405 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1406 goto out_dec; 1407 } 1408 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, 1409 "unblocking device at zero depth\n")); 1410 } 1411 1412 if (busy >= sdev->queue_depth) 1413 goto out_dec; 1414 1415 return 1; 1416 out_dec: 1417 atomic_dec(&sdev->device_busy); 1418 return 0; 1419 } 1420 1421 /* 1422 * scsi_target_queue_ready: checks if there we can send commands to target 1423 * @sdev: scsi device on starget to check. 1424 */ 1425 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1426 struct scsi_device *sdev) 1427 { 1428 struct scsi_target *starget = scsi_target(sdev); 1429 unsigned int busy; 1430 1431 if (starget->single_lun) { 1432 spin_lock_irq(shost->host_lock); 1433 if (starget->starget_sdev_user && 1434 starget->starget_sdev_user != sdev) { 1435 spin_unlock_irq(shost->host_lock); 1436 return 0; 1437 } 1438 starget->starget_sdev_user = sdev; 1439 spin_unlock_irq(shost->host_lock); 1440 } 1441 1442 if (starget->can_queue <= 0) 1443 return 1; 1444 1445 busy = atomic_inc_return(&starget->target_busy) - 1; 1446 if (atomic_read(&starget->target_blocked) > 0) { 1447 if (busy) 1448 goto starved; 1449 1450 /* 1451 * unblock after target_blocked iterates to zero 1452 */ 1453 if (atomic_dec_return(&starget->target_blocked) > 0) 1454 goto out_dec; 1455 1456 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1457 "unblocking target at zero depth\n")); 1458 } 1459 1460 if (busy >= starget->can_queue) 1461 goto starved; 1462 1463 return 1; 1464 1465 starved: 1466 spin_lock_irq(shost->host_lock); 1467 list_move_tail(&sdev->starved_entry, &shost->starved_list); 1468 spin_unlock_irq(shost->host_lock); 1469 out_dec: 1470 if (starget->can_queue > 0) 1471 atomic_dec(&starget->target_busy); 1472 return 0; 1473 } 1474 1475 /* 1476 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1477 * return 0. We must end up running the queue again whenever 0 is 1478 * returned, else IO can hang. 1479 */ 1480 static inline int scsi_host_queue_ready(struct request_queue *q, 1481 struct Scsi_Host *shost, 1482 struct scsi_device *sdev) 1483 { 1484 unsigned int busy; 1485 1486 if (scsi_host_in_recovery(shost)) 1487 return 0; 1488 1489 busy = atomic_inc_return(&shost->host_busy) - 1; 1490 if (atomic_read(&shost->host_blocked) > 0) { 1491 if (busy) 1492 goto starved; 1493 1494 /* 1495 * unblock after host_blocked iterates to zero 1496 */ 1497 if (atomic_dec_return(&shost->host_blocked) > 0) 1498 goto out_dec; 1499 1500 SCSI_LOG_MLQUEUE(3, 1501 shost_printk(KERN_INFO, shost, 1502 "unblocking host at zero depth\n")); 1503 } 1504 1505 if (shost->can_queue > 0 && busy >= shost->can_queue) 1506 goto starved; 1507 if (shost->host_self_blocked) 1508 goto starved; 1509 1510 /* We're OK to process the command, so we can't be starved */ 1511 if (!list_empty(&sdev->starved_entry)) { 1512 spin_lock_irq(shost->host_lock); 1513 if (!list_empty(&sdev->starved_entry)) 1514 list_del_init(&sdev->starved_entry); 1515 spin_unlock_irq(shost->host_lock); 1516 } 1517 1518 return 1; 1519 1520 starved: 1521 spin_lock_irq(shost->host_lock); 1522 if (list_empty(&sdev->starved_entry)) 1523 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1524 spin_unlock_irq(shost->host_lock); 1525 out_dec: 1526 atomic_dec(&shost->host_busy); 1527 return 0; 1528 } 1529 1530 /* 1531 * Busy state exporting function for request stacking drivers. 1532 * 1533 * For efficiency, no lock is taken to check the busy state of 1534 * shost/starget/sdev, since the returned value is not guaranteed and 1535 * may be changed after request stacking drivers call the function, 1536 * regardless of taking lock or not. 1537 * 1538 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi 1539 * needs to return 'not busy'. Otherwise, request stacking drivers 1540 * may hold requests forever. 1541 */ 1542 static int scsi_lld_busy(struct request_queue *q) 1543 { 1544 struct scsi_device *sdev = q->queuedata; 1545 struct Scsi_Host *shost; 1546 1547 if (blk_queue_dying(q)) 1548 return 0; 1549 1550 shost = sdev->host; 1551 1552 /* 1553 * Ignore host/starget busy state. 1554 * Since block layer does not have a concept of fairness across 1555 * multiple queues, congestion of host/starget needs to be handled 1556 * in SCSI layer. 1557 */ 1558 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1559 return 1; 1560 1561 return 0; 1562 } 1563 1564 /* 1565 * Kill a request for a dead device 1566 */ 1567 static void scsi_kill_request(struct request *req, struct request_queue *q) 1568 { 1569 struct scsi_cmnd *cmd = req->special; 1570 struct scsi_device *sdev; 1571 struct scsi_target *starget; 1572 struct Scsi_Host *shost; 1573 1574 blk_start_request(req); 1575 1576 scmd_printk(KERN_INFO, cmd, "killing request\n"); 1577 1578 sdev = cmd->device; 1579 starget = scsi_target(sdev); 1580 shost = sdev->host; 1581 scsi_init_cmd_errh(cmd); 1582 cmd->result = DID_NO_CONNECT << 16; 1583 atomic_inc(&cmd->device->iorequest_cnt); 1584 1585 /* 1586 * SCSI request completion path will do scsi_device_unbusy(), 1587 * bump busy counts. To bump the counters, we need to dance 1588 * with the locks as normal issue path does. 1589 */ 1590 atomic_inc(&sdev->device_busy); 1591 atomic_inc(&shost->host_busy); 1592 if (starget->can_queue > 0) 1593 atomic_inc(&starget->target_busy); 1594 1595 blk_complete_request(req); 1596 } 1597 1598 static void scsi_softirq_done(struct request *rq) 1599 { 1600 struct scsi_cmnd *cmd = rq->special; 1601 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1602 int disposition; 1603 1604 INIT_LIST_HEAD(&cmd->eh_entry); 1605 1606 atomic_inc(&cmd->device->iodone_cnt); 1607 if (cmd->result) 1608 atomic_inc(&cmd->device->ioerr_cnt); 1609 1610 disposition = scsi_decide_disposition(cmd); 1611 if (disposition != SUCCESS && 1612 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1613 sdev_printk(KERN_ERR, cmd->device, 1614 "timing out command, waited %lus\n", 1615 wait_for/HZ); 1616 disposition = SUCCESS; 1617 } 1618 1619 scsi_log_completion(cmd, disposition); 1620 1621 switch (disposition) { 1622 case SUCCESS: 1623 scsi_finish_command(cmd); 1624 break; 1625 case NEEDS_RETRY: 1626 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1627 break; 1628 case ADD_TO_MLQUEUE: 1629 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1630 break; 1631 default: 1632 if (!scsi_eh_scmd_add(cmd, 0)) 1633 scsi_finish_command(cmd); 1634 } 1635 } 1636 1637 /** 1638 * scsi_done - Invoke completion on finished SCSI command. 1639 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 1640 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 1641 * 1642 * Description: This function is the mid-level's (SCSI Core) interrupt routine, 1643 * which regains ownership of the SCSI command (de facto) from a LLDD, and 1644 * calls blk_complete_request() for further processing. 1645 * 1646 * This function is interrupt context safe. 1647 */ 1648 static void scsi_done(struct scsi_cmnd *cmd) 1649 { 1650 trace_scsi_dispatch_cmd_done(cmd); 1651 blk_complete_request(cmd->request); 1652 } 1653 1654 /* 1655 * Function: scsi_request_fn() 1656 * 1657 * Purpose: Main strategy routine for SCSI. 1658 * 1659 * Arguments: q - Pointer to actual queue. 1660 * 1661 * Returns: Nothing 1662 * 1663 * Lock status: IO request lock assumed to be held when called. 1664 */ 1665 static void scsi_request_fn(struct request_queue *q) 1666 __releases(q->queue_lock) 1667 __acquires(q->queue_lock) 1668 { 1669 struct scsi_device *sdev = q->queuedata; 1670 struct Scsi_Host *shost; 1671 struct scsi_cmnd *cmd; 1672 struct request *req; 1673 1674 /* 1675 * To start with, we keep looping until the queue is empty, or until 1676 * the host is no longer able to accept any more requests. 1677 */ 1678 shost = sdev->host; 1679 for (;;) { 1680 int rtn; 1681 /* 1682 * get next queueable request. We do this early to make sure 1683 * that the request is fully prepared even if we cannot 1684 * accept it. 1685 */ 1686 req = blk_peek_request(q); 1687 if (!req) 1688 break; 1689 1690 if (unlikely(!scsi_device_online(sdev))) { 1691 sdev_printk(KERN_ERR, sdev, 1692 "rejecting I/O to offline device\n"); 1693 scsi_kill_request(req, q); 1694 continue; 1695 } 1696 1697 if (!scsi_dev_queue_ready(q, sdev)) 1698 break; 1699 1700 /* 1701 * Remove the request from the request list. 1702 */ 1703 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1704 blk_start_request(req); 1705 1706 spin_unlock_irq(q->queue_lock); 1707 cmd = req->special; 1708 if (unlikely(cmd == NULL)) { 1709 printk(KERN_CRIT "impossible request in %s.\n" 1710 "please mail a stack trace to " 1711 "linux-scsi@vger.kernel.org\n", 1712 __func__); 1713 blk_dump_rq_flags(req, "foo"); 1714 BUG(); 1715 } 1716 1717 /* 1718 * We hit this when the driver is using a host wide 1719 * tag map. For device level tag maps the queue_depth check 1720 * in the device ready fn would prevent us from trying 1721 * to allocate a tag. Since the map is a shared host resource 1722 * we add the dev to the starved list so it eventually gets 1723 * a run when a tag is freed. 1724 */ 1725 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1726 spin_lock_irq(shost->host_lock); 1727 if (list_empty(&sdev->starved_entry)) 1728 list_add_tail(&sdev->starved_entry, 1729 &shost->starved_list); 1730 spin_unlock_irq(shost->host_lock); 1731 goto not_ready; 1732 } 1733 1734 if (!scsi_target_queue_ready(shost, sdev)) 1735 goto not_ready; 1736 1737 if (!scsi_host_queue_ready(q, shost, sdev)) 1738 goto host_not_ready; 1739 1740 /* 1741 * Finally, initialize any error handling parameters, and set up 1742 * the timers for timeouts. 1743 */ 1744 scsi_init_cmd_errh(cmd); 1745 1746 /* 1747 * Dispatch the command to the low-level driver. 1748 */ 1749 cmd->scsi_done = scsi_done; 1750 rtn = scsi_dispatch_cmd(cmd); 1751 if (rtn) { 1752 scsi_queue_insert(cmd, rtn); 1753 spin_lock_irq(q->queue_lock); 1754 goto out_delay; 1755 } 1756 spin_lock_irq(q->queue_lock); 1757 } 1758 1759 return; 1760 1761 host_not_ready: 1762 if (scsi_target(sdev)->can_queue > 0) 1763 atomic_dec(&scsi_target(sdev)->target_busy); 1764 not_ready: 1765 /* 1766 * lock q, handle tag, requeue req, and decrement device_busy. We 1767 * must return with queue_lock held. 1768 * 1769 * Decrementing device_busy without checking it is OK, as all such 1770 * cases (host limits or settings) should run the queue at some 1771 * later time. 1772 */ 1773 spin_lock_irq(q->queue_lock); 1774 blk_requeue_request(q, req); 1775 atomic_dec(&sdev->device_busy); 1776 out_delay: 1777 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) 1778 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1779 } 1780 1781 static inline int prep_to_mq(int ret) 1782 { 1783 switch (ret) { 1784 case BLKPREP_OK: 1785 return 0; 1786 case BLKPREP_DEFER: 1787 return BLK_MQ_RQ_QUEUE_BUSY; 1788 default: 1789 return BLK_MQ_RQ_QUEUE_ERROR; 1790 } 1791 } 1792 1793 static int scsi_mq_prep_fn(struct request *req) 1794 { 1795 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1796 struct scsi_device *sdev = req->q->queuedata; 1797 struct Scsi_Host *shost = sdev->host; 1798 unsigned char *sense_buf = cmd->sense_buffer; 1799 struct scatterlist *sg; 1800 1801 memset(cmd, 0, sizeof(struct scsi_cmnd)); 1802 1803 req->special = cmd; 1804 1805 cmd->request = req; 1806 cmd->device = sdev; 1807 cmd->sense_buffer = sense_buf; 1808 1809 cmd->tag = req->tag; 1810 1811 cmd->cmnd = req->cmd; 1812 cmd->prot_op = SCSI_PROT_NORMAL; 1813 1814 INIT_LIST_HEAD(&cmd->list); 1815 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); 1816 cmd->jiffies_at_alloc = jiffies; 1817 1818 /* 1819 * XXX: cmd_list lookups are only used by two drivers, try to get 1820 * rid of this list in common code. 1821 */ 1822 spin_lock_irq(&sdev->list_lock); 1823 list_add_tail(&cmd->list, &sdev->cmd_list); 1824 spin_unlock_irq(&sdev->list_lock); 1825 1826 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; 1827 cmd->sdb.table.sgl = sg; 1828 1829 if (scsi_host_get_prot(shost)) { 1830 cmd->prot_sdb = (void *)sg + 1831 shost->sg_tablesize * sizeof(struct scatterlist); 1832 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1833 1834 cmd->prot_sdb->table.sgl = 1835 (struct scatterlist *)(cmd->prot_sdb + 1); 1836 } 1837 1838 if (blk_bidi_rq(req)) { 1839 struct request *next_rq = req->next_rq; 1840 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq); 1841 1842 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer)); 1843 bidi_sdb->table.sgl = 1844 (struct scatterlist *)(bidi_sdb + 1); 1845 1846 next_rq->special = bidi_sdb; 1847 } 1848 1849 return scsi_setup_cmnd(sdev, req); 1850 } 1851 1852 static void scsi_mq_done(struct scsi_cmnd *cmd) 1853 { 1854 trace_scsi_dispatch_cmd_done(cmd); 1855 blk_mq_complete_request(cmd->request); 1856 } 1857 1858 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) 1859 { 1860 struct request_queue *q = req->q; 1861 struct scsi_device *sdev = q->queuedata; 1862 struct Scsi_Host *shost = sdev->host; 1863 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1864 int ret; 1865 int reason; 1866 1867 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); 1868 if (ret) 1869 goto out; 1870 1871 ret = BLK_MQ_RQ_QUEUE_BUSY; 1872 if (!get_device(&sdev->sdev_gendev)) 1873 goto out; 1874 1875 if (!scsi_dev_queue_ready(q, sdev)) 1876 goto out_put_device; 1877 if (!scsi_target_queue_ready(shost, sdev)) 1878 goto out_dec_device_busy; 1879 if (!scsi_host_queue_ready(q, shost, sdev)) 1880 goto out_dec_target_busy; 1881 1882 if (!(req->cmd_flags & REQ_DONTPREP)) { 1883 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1884 if (ret) 1885 goto out_dec_host_busy; 1886 req->cmd_flags |= REQ_DONTPREP; 1887 } 1888 1889 scsi_init_cmd_errh(cmd); 1890 cmd->scsi_done = scsi_mq_done; 1891 1892 reason = scsi_dispatch_cmd(cmd); 1893 if (reason) { 1894 scsi_set_blocked(cmd, reason); 1895 ret = BLK_MQ_RQ_QUEUE_BUSY; 1896 goto out_dec_host_busy; 1897 } 1898 1899 return BLK_MQ_RQ_QUEUE_OK; 1900 1901 out_dec_host_busy: 1902 atomic_dec(&shost->host_busy); 1903 out_dec_target_busy: 1904 if (scsi_target(sdev)->can_queue > 0) 1905 atomic_dec(&scsi_target(sdev)->target_busy); 1906 out_dec_device_busy: 1907 atomic_dec(&sdev->device_busy); 1908 out_put_device: 1909 put_device(&sdev->sdev_gendev); 1910 out: 1911 switch (ret) { 1912 case BLK_MQ_RQ_QUEUE_BUSY: 1913 blk_mq_stop_hw_queue(hctx); 1914 if (atomic_read(&sdev->device_busy) == 0 && 1915 !scsi_device_blocked(sdev)) 1916 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 1917 break; 1918 case BLK_MQ_RQ_QUEUE_ERROR: 1919 /* 1920 * Make sure to release all allocated ressources when 1921 * we hit an error, as we will never see this command 1922 * again. 1923 */ 1924 if (req->cmd_flags & REQ_DONTPREP) 1925 scsi_mq_uninit_cmd(cmd); 1926 break; 1927 default: 1928 break; 1929 } 1930 return ret; 1931 } 1932 1933 static int scsi_init_request(void *data, struct request *rq, 1934 unsigned int hctx_idx, unsigned int request_idx, 1935 unsigned int numa_node) 1936 { 1937 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 1938 1939 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL, 1940 numa_node); 1941 if (!cmd->sense_buffer) 1942 return -ENOMEM; 1943 return 0; 1944 } 1945 1946 static void scsi_exit_request(void *data, struct request *rq, 1947 unsigned int hctx_idx, unsigned int request_idx) 1948 { 1949 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 1950 1951 kfree(cmd->sense_buffer); 1952 } 1953 1954 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1955 { 1956 struct device *host_dev; 1957 u64 bounce_limit = 0xffffffff; 1958 1959 if (shost->unchecked_isa_dma) 1960 return BLK_BOUNCE_ISA; 1961 /* 1962 * Platforms with virtual-DMA translation 1963 * hardware have no practical limit. 1964 */ 1965 if (!PCI_DMA_BUS_IS_PHYS) 1966 return BLK_BOUNCE_ANY; 1967 1968 host_dev = scsi_get_device(shost); 1969 if (host_dev && host_dev->dma_mask) 1970 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 1971 1972 return bounce_limit; 1973 } 1974 1975 static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) 1976 { 1977 struct device *dev = shost->dma_dev; 1978 1979 /* 1980 * this limit is imposed by hardware restrictions 1981 */ 1982 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 1983 SCSI_MAX_SG_CHAIN_SEGMENTS)); 1984 1985 if (scsi_host_prot_dma(shost)) { 1986 shost->sg_prot_tablesize = 1987 min_not_zero(shost->sg_prot_tablesize, 1988 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); 1989 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); 1990 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 1991 } 1992 1993 blk_queue_max_hw_sectors(q, shost->max_sectors); 1994 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1995 blk_queue_segment_boundary(q, shost->dma_boundary); 1996 dma_set_seg_boundary(dev, shost->dma_boundary); 1997 1998 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1999 2000 if (!shost->use_clustering) 2001 q->limits.cluster = 0; 2002 2003 /* 2004 * set a reasonable default alignment on word boundaries: the 2005 * host and device may alter it using 2006 * blk_queue_update_dma_alignment() later. 2007 */ 2008 blk_queue_dma_alignment(q, 0x03); 2009 } 2010 2011 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 2012 request_fn_proc *request_fn) 2013 { 2014 struct request_queue *q; 2015 2016 q = blk_init_queue(request_fn, NULL); 2017 if (!q) 2018 return NULL; 2019 __scsi_init_queue(shost, q); 2020 return q; 2021 } 2022 EXPORT_SYMBOL(__scsi_alloc_queue); 2023 2024 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 2025 { 2026 struct request_queue *q; 2027 2028 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 2029 if (!q) 2030 return NULL; 2031 2032 blk_queue_prep_rq(q, scsi_prep_fn); 2033 blk_queue_unprep_rq(q, scsi_unprep_fn); 2034 blk_queue_softirq_done(q, scsi_softirq_done); 2035 blk_queue_rq_timed_out(q, scsi_times_out); 2036 blk_queue_lld_busy(q, scsi_lld_busy); 2037 return q; 2038 } 2039 2040 static struct blk_mq_ops scsi_mq_ops = { 2041 .map_queue = blk_mq_map_queue, 2042 .queue_rq = scsi_queue_rq, 2043 .complete = scsi_softirq_done, 2044 .timeout = scsi_times_out, 2045 .init_request = scsi_init_request, 2046 .exit_request = scsi_exit_request, 2047 }; 2048 2049 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) 2050 { 2051 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); 2052 if (IS_ERR(sdev->request_queue)) 2053 return NULL; 2054 2055 sdev->request_queue->queuedata = sdev; 2056 __scsi_init_queue(sdev->host, sdev->request_queue); 2057 return sdev->request_queue; 2058 } 2059 2060 int scsi_mq_setup_tags(struct Scsi_Host *shost) 2061 { 2062 unsigned int cmd_size, sgl_size, tbl_size; 2063 2064 tbl_size = shost->sg_tablesize; 2065 if (tbl_size > SCSI_MAX_SG_SEGMENTS) 2066 tbl_size = SCSI_MAX_SG_SEGMENTS; 2067 sgl_size = tbl_size * sizeof(struct scatterlist); 2068 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 2069 if (scsi_host_get_prot(shost)) 2070 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; 2071 2072 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 2073 shost->tag_set.ops = &scsi_mq_ops; 2074 shost->tag_set.nr_hw_queues = 1; 2075 shost->tag_set.queue_depth = shost->can_queue; 2076 shost->tag_set.cmd_size = cmd_size; 2077 shost->tag_set.numa_node = NUMA_NO_NODE; 2078 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2079 shost->tag_set.driver_data = shost; 2080 2081 return blk_mq_alloc_tag_set(&shost->tag_set); 2082 } 2083 2084 void scsi_mq_destroy_tags(struct Scsi_Host *shost) 2085 { 2086 blk_mq_free_tag_set(&shost->tag_set); 2087 } 2088 2089 /* 2090 * Function: scsi_block_requests() 2091 * 2092 * Purpose: Utility function used by low-level drivers to prevent further 2093 * commands from being queued to the device. 2094 * 2095 * Arguments: shost - Host in question 2096 * 2097 * Returns: Nothing 2098 * 2099 * Lock status: No locks are assumed held. 2100 * 2101 * Notes: There is no timer nor any other means by which the requests 2102 * get unblocked other than the low-level driver calling 2103 * scsi_unblock_requests(). 2104 */ 2105 void scsi_block_requests(struct Scsi_Host *shost) 2106 { 2107 shost->host_self_blocked = 1; 2108 } 2109 EXPORT_SYMBOL(scsi_block_requests); 2110 2111 /* 2112 * Function: scsi_unblock_requests() 2113 * 2114 * Purpose: Utility function used by low-level drivers to allow further 2115 * commands from being queued to the device. 2116 * 2117 * Arguments: shost - Host in question 2118 * 2119 * Returns: Nothing 2120 * 2121 * Lock status: No locks are assumed held. 2122 * 2123 * Notes: There is no timer nor any other means by which the requests 2124 * get unblocked other than the low-level driver calling 2125 * scsi_unblock_requests(). 2126 * 2127 * This is done as an API function so that changes to the 2128 * internals of the scsi mid-layer won't require wholesale 2129 * changes to drivers that use this feature. 2130 */ 2131 void scsi_unblock_requests(struct Scsi_Host *shost) 2132 { 2133 shost->host_self_blocked = 0; 2134 scsi_run_host_queues(shost); 2135 } 2136 EXPORT_SYMBOL(scsi_unblock_requests); 2137 2138 int __init scsi_init_queue(void) 2139 { 2140 int i; 2141 2142 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 2143 sizeof(struct scsi_data_buffer), 2144 0, 0, NULL); 2145 if (!scsi_sdb_cache) { 2146 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 2147 return -ENOMEM; 2148 } 2149 2150 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2151 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2152 int size = sgp->size * sizeof(struct scatterlist); 2153 2154 sgp->slab = kmem_cache_create(sgp->name, size, 0, 2155 SLAB_HWCACHE_ALIGN, NULL); 2156 if (!sgp->slab) { 2157 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 2158 sgp->name); 2159 goto cleanup_sdb; 2160 } 2161 2162 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 2163 sgp->slab); 2164 if (!sgp->pool) { 2165 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 2166 sgp->name); 2167 goto cleanup_sdb; 2168 } 2169 } 2170 2171 return 0; 2172 2173 cleanup_sdb: 2174 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2175 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2176 if (sgp->pool) 2177 mempool_destroy(sgp->pool); 2178 if (sgp->slab) 2179 kmem_cache_destroy(sgp->slab); 2180 } 2181 kmem_cache_destroy(scsi_sdb_cache); 2182 2183 return -ENOMEM; 2184 } 2185 2186 void scsi_exit_queue(void) 2187 { 2188 int i; 2189 2190 kmem_cache_destroy(scsi_sdb_cache); 2191 2192 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2193 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2194 mempool_destroy(sgp->pool); 2195 kmem_cache_destroy(sgp->slab); 2196 } 2197 } 2198 2199 /** 2200 * scsi_mode_select - issue a mode select 2201 * @sdev: SCSI device to be queried 2202 * @pf: Page format bit (1 == standard, 0 == vendor specific) 2203 * @sp: Save page bit (0 == don't save, 1 == save) 2204 * @modepage: mode page being requested 2205 * @buffer: request buffer (may not be smaller than eight bytes) 2206 * @len: length of request buffer. 2207 * @timeout: command timeout 2208 * @retries: number of retries before failing 2209 * @data: returns a structure abstracting the mode header data 2210 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2211 * must be SCSI_SENSE_BUFFERSIZE big. 2212 * 2213 * Returns zero if successful; negative error number or scsi 2214 * status on error 2215 * 2216 */ 2217 int 2218 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 2219 unsigned char *buffer, int len, int timeout, int retries, 2220 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2221 { 2222 unsigned char cmd[10]; 2223 unsigned char *real_buffer; 2224 int ret; 2225 2226 memset(cmd, 0, sizeof(cmd)); 2227 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 2228 2229 if (sdev->use_10_for_ms) { 2230 if (len > 65535) 2231 return -EINVAL; 2232 real_buffer = kmalloc(8 + len, GFP_KERNEL); 2233 if (!real_buffer) 2234 return -ENOMEM; 2235 memcpy(real_buffer + 8, buffer, len); 2236 len += 8; 2237 real_buffer[0] = 0; 2238 real_buffer[1] = 0; 2239 real_buffer[2] = data->medium_type; 2240 real_buffer[3] = data->device_specific; 2241 real_buffer[4] = data->longlba ? 0x01 : 0; 2242 real_buffer[5] = 0; 2243 real_buffer[6] = data->block_descriptor_length >> 8; 2244 real_buffer[7] = data->block_descriptor_length; 2245 2246 cmd[0] = MODE_SELECT_10; 2247 cmd[7] = len >> 8; 2248 cmd[8] = len; 2249 } else { 2250 if (len > 255 || data->block_descriptor_length > 255 || 2251 data->longlba) 2252 return -EINVAL; 2253 2254 real_buffer = kmalloc(4 + len, GFP_KERNEL); 2255 if (!real_buffer) 2256 return -ENOMEM; 2257 memcpy(real_buffer + 4, buffer, len); 2258 len += 4; 2259 real_buffer[0] = 0; 2260 real_buffer[1] = data->medium_type; 2261 real_buffer[2] = data->device_specific; 2262 real_buffer[3] = data->block_descriptor_length; 2263 2264 2265 cmd[0] = MODE_SELECT; 2266 cmd[4] = len; 2267 } 2268 2269 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 2270 sshdr, timeout, retries, NULL); 2271 kfree(real_buffer); 2272 return ret; 2273 } 2274 EXPORT_SYMBOL_GPL(scsi_mode_select); 2275 2276 /** 2277 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 2278 * @sdev: SCSI device to be queried 2279 * @dbd: set if mode sense will allow block descriptors to be returned 2280 * @modepage: mode page being requested 2281 * @buffer: request buffer (may not be smaller than eight bytes) 2282 * @len: length of request buffer. 2283 * @timeout: command timeout 2284 * @retries: number of retries before failing 2285 * @data: returns a structure abstracting the mode header data 2286 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2287 * must be SCSI_SENSE_BUFFERSIZE big. 2288 * 2289 * Returns zero if unsuccessful, or the header offset (either 4 2290 * or 8 depending on whether a six or ten byte command was 2291 * issued) if successful. 2292 */ 2293 int 2294 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 2295 unsigned char *buffer, int len, int timeout, int retries, 2296 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2297 { 2298 unsigned char cmd[12]; 2299 int use_10_for_ms; 2300 int header_length; 2301 int result; 2302 struct scsi_sense_hdr my_sshdr; 2303 2304 memset(data, 0, sizeof(*data)); 2305 memset(&cmd[0], 0, 12); 2306 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 2307 cmd[2] = modepage; 2308 2309 /* caller might not be interested in sense, but we need it */ 2310 if (!sshdr) 2311 sshdr = &my_sshdr; 2312 2313 retry: 2314 use_10_for_ms = sdev->use_10_for_ms; 2315 2316 if (use_10_for_ms) { 2317 if (len < 8) 2318 len = 8; 2319 2320 cmd[0] = MODE_SENSE_10; 2321 cmd[8] = len; 2322 header_length = 8; 2323 } else { 2324 if (len < 4) 2325 len = 4; 2326 2327 cmd[0] = MODE_SENSE; 2328 cmd[4] = len; 2329 header_length = 4; 2330 } 2331 2332 memset(buffer, 0, len); 2333 2334 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2335 sshdr, timeout, retries, NULL); 2336 2337 /* This code looks awful: what it's doing is making sure an 2338 * ILLEGAL REQUEST sense return identifies the actual command 2339 * byte as the problem. MODE_SENSE commands can return 2340 * ILLEGAL REQUEST if the code page isn't supported */ 2341 2342 if (use_10_for_ms && !scsi_status_is_good(result) && 2343 (driver_byte(result) & DRIVER_SENSE)) { 2344 if (scsi_sense_valid(sshdr)) { 2345 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2346 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2347 /* 2348 * Invalid command operation code 2349 */ 2350 sdev->use_10_for_ms = 0; 2351 goto retry; 2352 } 2353 } 2354 } 2355 2356 if(scsi_status_is_good(result)) { 2357 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 2358 (modepage == 6 || modepage == 8))) { 2359 /* Initio breakage? */ 2360 header_length = 0; 2361 data->length = 13; 2362 data->medium_type = 0; 2363 data->device_specific = 0; 2364 data->longlba = 0; 2365 data->block_descriptor_length = 0; 2366 } else if(use_10_for_ms) { 2367 data->length = buffer[0]*256 + buffer[1] + 2; 2368 data->medium_type = buffer[2]; 2369 data->device_specific = buffer[3]; 2370 data->longlba = buffer[4] & 0x01; 2371 data->block_descriptor_length = buffer[6]*256 2372 + buffer[7]; 2373 } else { 2374 data->length = buffer[0] + 1; 2375 data->medium_type = buffer[1]; 2376 data->device_specific = buffer[2]; 2377 data->block_descriptor_length = buffer[3]; 2378 } 2379 data->header_length = header_length; 2380 } 2381 2382 return result; 2383 } 2384 EXPORT_SYMBOL(scsi_mode_sense); 2385 2386 /** 2387 * scsi_test_unit_ready - test if unit is ready 2388 * @sdev: scsi device to change the state of. 2389 * @timeout: command timeout 2390 * @retries: number of retries before failing 2391 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 2392 * returning sense. Make sure that this is cleared before passing 2393 * in. 2394 * 2395 * Returns zero if unsuccessful or an error if TUR failed. For 2396 * removable media, UNIT_ATTENTION sets ->changed flag. 2397 **/ 2398 int 2399 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2400 struct scsi_sense_hdr *sshdr_external) 2401 { 2402 char cmd[] = { 2403 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2404 }; 2405 struct scsi_sense_hdr *sshdr; 2406 int result; 2407 2408 if (!sshdr_external) 2409 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2410 else 2411 sshdr = sshdr_external; 2412 2413 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2414 do { 2415 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2416 timeout, retries, NULL); 2417 if (sdev->removable && scsi_sense_valid(sshdr) && 2418 sshdr->sense_key == UNIT_ATTENTION) 2419 sdev->changed = 1; 2420 } while (scsi_sense_valid(sshdr) && 2421 sshdr->sense_key == UNIT_ATTENTION && --retries); 2422 2423 if (!sshdr_external) 2424 kfree(sshdr); 2425 return result; 2426 } 2427 EXPORT_SYMBOL(scsi_test_unit_ready); 2428 2429 /** 2430 * scsi_device_set_state - Take the given device through the device state model. 2431 * @sdev: scsi device to change the state of. 2432 * @state: state to change to. 2433 * 2434 * Returns zero if unsuccessful or an error if the requested 2435 * transition is illegal. 2436 */ 2437 int 2438 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2439 { 2440 enum scsi_device_state oldstate = sdev->sdev_state; 2441 2442 if (state == oldstate) 2443 return 0; 2444 2445 switch (state) { 2446 case SDEV_CREATED: 2447 switch (oldstate) { 2448 case SDEV_CREATED_BLOCK: 2449 break; 2450 default: 2451 goto illegal; 2452 } 2453 break; 2454 2455 case SDEV_RUNNING: 2456 switch (oldstate) { 2457 case SDEV_CREATED: 2458 case SDEV_OFFLINE: 2459 case SDEV_TRANSPORT_OFFLINE: 2460 case SDEV_QUIESCE: 2461 case SDEV_BLOCK: 2462 break; 2463 default: 2464 goto illegal; 2465 } 2466 break; 2467 2468 case SDEV_QUIESCE: 2469 switch (oldstate) { 2470 case SDEV_RUNNING: 2471 case SDEV_OFFLINE: 2472 case SDEV_TRANSPORT_OFFLINE: 2473 break; 2474 default: 2475 goto illegal; 2476 } 2477 break; 2478 2479 case SDEV_OFFLINE: 2480 case SDEV_TRANSPORT_OFFLINE: 2481 switch (oldstate) { 2482 case SDEV_CREATED: 2483 case SDEV_RUNNING: 2484 case SDEV_QUIESCE: 2485 case SDEV_BLOCK: 2486 break; 2487 default: 2488 goto illegal; 2489 } 2490 break; 2491 2492 case SDEV_BLOCK: 2493 switch (oldstate) { 2494 case SDEV_RUNNING: 2495 case SDEV_CREATED_BLOCK: 2496 break; 2497 default: 2498 goto illegal; 2499 } 2500 break; 2501 2502 case SDEV_CREATED_BLOCK: 2503 switch (oldstate) { 2504 case SDEV_CREATED: 2505 break; 2506 default: 2507 goto illegal; 2508 } 2509 break; 2510 2511 case SDEV_CANCEL: 2512 switch (oldstate) { 2513 case SDEV_CREATED: 2514 case SDEV_RUNNING: 2515 case SDEV_QUIESCE: 2516 case SDEV_OFFLINE: 2517 case SDEV_TRANSPORT_OFFLINE: 2518 case SDEV_BLOCK: 2519 break; 2520 default: 2521 goto illegal; 2522 } 2523 break; 2524 2525 case SDEV_DEL: 2526 switch (oldstate) { 2527 case SDEV_CREATED: 2528 case SDEV_RUNNING: 2529 case SDEV_OFFLINE: 2530 case SDEV_TRANSPORT_OFFLINE: 2531 case SDEV_CANCEL: 2532 case SDEV_CREATED_BLOCK: 2533 break; 2534 default: 2535 goto illegal; 2536 } 2537 break; 2538 2539 } 2540 sdev->sdev_state = state; 2541 return 0; 2542 2543 illegal: 2544 SCSI_LOG_ERROR_RECOVERY(1, 2545 sdev_printk(KERN_ERR, sdev, 2546 "Illegal state transition %s->%s", 2547 scsi_device_state_name(oldstate), 2548 scsi_device_state_name(state)) 2549 ); 2550 return -EINVAL; 2551 } 2552 EXPORT_SYMBOL(scsi_device_set_state); 2553 2554 /** 2555 * sdev_evt_emit - emit a single SCSI device uevent 2556 * @sdev: associated SCSI device 2557 * @evt: event to emit 2558 * 2559 * Send a single uevent (scsi_event) to the associated scsi_device. 2560 */ 2561 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2562 { 2563 int idx = 0; 2564 char *envp[3]; 2565 2566 switch (evt->evt_type) { 2567 case SDEV_EVT_MEDIA_CHANGE: 2568 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2569 break; 2570 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2571 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; 2572 break; 2573 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2574 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; 2575 break; 2576 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2577 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; 2578 break; 2579 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2580 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; 2581 break; 2582 case SDEV_EVT_LUN_CHANGE_REPORTED: 2583 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2584 break; 2585 default: 2586 /* do nothing */ 2587 break; 2588 } 2589 2590 envp[idx++] = NULL; 2591 2592 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2593 } 2594 2595 /** 2596 * sdev_evt_thread - send a uevent for each scsi event 2597 * @work: work struct for scsi_device 2598 * 2599 * Dispatch queued events to their associated scsi_device kobjects 2600 * as uevents. 2601 */ 2602 void scsi_evt_thread(struct work_struct *work) 2603 { 2604 struct scsi_device *sdev; 2605 enum scsi_device_event evt_type; 2606 LIST_HEAD(event_list); 2607 2608 sdev = container_of(work, struct scsi_device, event_work); 2609 2610 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) 2611 if (test_and_clear_bit(evt_type, sdev->pending_events)) 2612 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); 2613 2614 while (1) { 2615 struct scsi_event *evt; 2616 struct list_head *this, *tmp; 2617 unsigned long flags; 2618 2619 spin_lock_irqsave(&sdev->list_lock, flags); 2620 list_splice_init(&sdev->event_list, &event_list); 2621 spin_unlock_irqrestore(&sdev->list_lock, flags); 2622 2623 if (list_empty(&event_list)) 2624 break; 2625 2626 list_for_each_safe(this, tmp, &event_list) { 2627 evt = list_entry(this, struct scsi_event, node); 2628 list_del(&evt->node); 2629 scsi_evt_emit(sdev, evt); 2630 kfree(evt); 2631 } 2632 } 2633 } 2634 2635 /** 2636 * sdev_evt_send - send asserted event to uevent thread 2637 * @sdev: scsi_device event occurred on 2638 * @evt: event to send 2639 * 2640 * Assert scsi device event asynchronously. 2641 */ 2642 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2643 { 2644 unsigned long flags; 2645 2646 #if 0 2647 /* FIXME: currently this check eliminates all media change events 2648 * for polled devices. Need to update to discriminate between AN 2649 * and polled events */ 2650 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2651 kfree(evt); 2652 return; 2653 } 2654 #endif 2655 2656 spin_lock_irqsave(&sdev->list_lock, flags); 2657 list_add_tail(&evt->node, &sdev->event_list); 2658 schedule_work(&sdev->event_work); 2659 spin_unlock_irqrestore(&sdev->list_lock, flags); 2660 } 2661 EXPORT_SYMBOL_GPL(sdev_evt_send); 2662 2663 /** 2664 * sdev_evt_alloc - allocate a new scsi event 2665 * @evt_type: type of event to allocate 2666 * @gfpflags: GFP flags for allocation 2667 * 2668 * Allocates and returns a new scsi_event. 2669 */ 2670 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2671 gfp_t gfpflags) 2672 { 2673 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2674 if (!evt) 2675 return NULL; 2676 2677 evt->evt_type = evt_type; 2678 INIT_LIST_HEAD(&evt->node); 2679 2680 /* evt_type-specific initialization, if any */ 2681 switch (evt_type) { 2682 case SDEV_EVT_MEDIA_CHANGE: 2683 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2684 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2685 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2686 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2687 case SDEV_EVT_LUN_CHANGE_REPORTED: 2688 default: 2689 /* do nothing */ 2690 break; 2691 } 2692 2693 return evt; 2694 } 2695 EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2696 2697 /** 2698 * sdev_evt_send_simple - send asserted event to uevent thread 2699 * @sdev: scsi_device event occurred on 2700 * @evt_type: type of event to send 2701 * @gfpflags: GFP flags for allocation 2702 * 2703 * Assert scsi device event asynchronously, given an event type. 2704 */ 2705 void sdev_evt_send_simple(struct scsi_device *sdev, 2706 enum scsi_device_event evt_type, gfp_t gfpflags) 2707 { 2708 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2709 if (!evt) { 2710 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2711 evt_type); 2712 return; 2713 } 2714 2715 sdev_evt_send(sdev, evt); 2716 } 2717 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2718 2719 /** 2720 * scsi_device_quiesce - Block user issued commands. 2721 * @sdev: scsi device to quiesce. 2722 * 2723 * This works by trying to transition to the SDEV_QUIESCE state 2724 * (which must be a legal transition). When the device is in this 2725 * state, only special requests will be accepted, all others will 2726 * be deferred. Since special requests may also be requeued requests, 2727 * a successful return doesn't guarantee the device will be 2728 * totally quiescent. 2729 * 2730 * Must be called with user context, may sleep. 2731 * 2732 * Returns zero if unsuccessful or an error if not. 2733 */ 2734 int 2735 scsi_device_quiesce(struct scsi_device *sdev) 2736 { 2737 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2738 if (err) 2739 return err; 2740 2741 scsi_run_queue(sdev->request_queue); 2742 while (atomic_read(&sdev->device_busy)) { 2743 msleep_interruptible(200); 2744 scsi_run_queue(sdev->request_queue); 2745 } 2746 return 0; 2747 } 2748 EXPORT_SYMBOL(scsi_device_quiesce); 2749 2750 /** 2751 * scsi_device_resume - Restart user issued commands to a quiesced device. 2752 * @sdev: scsi device to resume. 2753 * 2754 * Moves the device from quiesced back to running and restarts the 2755 * queues. 2756 * 2757 * Must be called with user context, may sleep. 2758 */ 2759 void scsi_device_resume(struct scsi_device *sdev) 2760 { 2761 /* check if the device state was mutated prior to resume, and if 2762 * so assume the state is being managed elsewhere (for example 2763 * device deleted during suspend) 2764 */ 2765 if (sdev->sdev_state != SDEV_QUIESCE || 2766 scsi_device_set_state(sdev, SDEV_RUNNING)) 2767 return; 2768 scsi_run_queue(sdev->request_queue); 2769 } 2770 EXPORT_SYMBOL(scsi_device_resume); 2771 2772 static void 2773 device_quiesce_fn(struct scsi_device *sdev, void *data) 2774 { 2775 scsi_device_quiesce(sdev); 2776 } 2777 2778 void 2779 scsi_target_quiesce(struct scsi_target *starget) 2780 { 2781 starget_for_each_device(starget, NULL, device_quiesce_fn); 2782 } 2783 EXPORT_SYMBOL(scsi_target_quiesce); 2784 2785 static void 2786 device_resume_fn(struct scsi_device *sdev, void *data) 2787 { 2788 scsi_device_resume(sdev); 2789 } 2790 2791 void 2792 scsi_target_resume(struct scsi_target *starget) 2793 { 2794 starget_for_each_device(starget, NULL, device_resume_fn); 2795 } 2796 EXPORT_SYMBOL(scsi_target_resume); 2797 2798 /** 2799 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2800 * @sdev: device to block 2801 * 2802 * Block request made by scsi lld's to temporarily stop all 2803 * scsi commands on the specified device. Called from interrupt 2804 * or normal process context. 2805 * 2806 * Returns zero if successful or error if not 2807 * 2808 * Notes: 2809 * This routine transitions the device to the SDEV_BLOCK state 2810 * (which must be a legal transition). When the device is in this 2811 * state, all commands are deferred until the scsi lld reenables 2812 * the device with scsi_device_unblock or device_block_tmo fires. 2813 */ 2814 int 2815 scsi_internal_device_block(struct scsi_device *sdev) 2816 { 2817 struct request_queue *q = sdev->request_queue; 2818 unsigned long flags; 2819 int err = 0; 2820 2821 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2822 if (err) { 2823 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2824 2825 if (err) 2826 return err; 2827 } 2828 2829 /* 2830 * The device has transitioned to SDEV_BLOCK. Stop the 2831 * block layer from calling the midlayer with this device's 2832 * request queue. 2833 */ 2834 if (q->mq_ops) { 2835 blk_mq_stop_hw_queues(q); 2836 } else { 2837 spin_lock_irqsave(q->queue_lock, flags); 2838 blk_stop_queue(q); 2839 spin_unlock_irqrestore(q->queue_lock, flags); 2840 } 2841 2842 return 0; 2843 } 2844 EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2845 2846 /** 2847 * scsi_internal_device_unblock - resume a device after a block request 2848 * @sdev: device to resume 2849 * @new_state: state to set devices to after unblocking 2850 * 2851 * Called by scsi lld's or the midlayer to restart the device queue 2852 * for the previously suspended scsi device. Called from interrupt or 2853 * normal process context. 2854 * 2855 * Returns zero if successful or error if not. 2856 * 2857 * Notes: 2858 * This routine transitions the device to the SDEV_RUNNING state 2859 * or to one of the offline states (which must be a legal transition) 2860 * allowing the midlayer to goose the queue for this device. 2861 */ 2862 int 2863 scsi_internal_device_unblock(struct scsi_device *sdev, 2864 enum scsi_device_state new_state) 2865 { 2866 struct request_queue *q = sdev->request_queue; 2867 unsigned long flags; 2868 2869 /* 2870 * Try to transition the scsi device to SDEV_RUNNING or one of the 2871 * offlined states and goose the device queue if successful. 2872 */ 2873 if ((sdev->sdev_state == SDEV_BLOCK) || 2874 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) 2875 sdev->sdev_state = new_state; 2876 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { 2877 if (new_state == SDEV_TRANSPORT_OFFLINE || 2878 new_state == SDEV_OFFLINE) 2879 sdev->sdev_state = new_state; 2880 else 2881 sdev->sdev_state = SDEV_CREATED; 2882 } else if (sdev->sdev_state != SDEV_CANCEL && 2883 sdev->sdev_state != SDEV_OFFLINE) 2884 return -EINVAL; 2885 2886 if (q->mq_ops) { 2887 blk_mq_start_stopped_hw_queues(q, false); 2888 } else { 2889 spin_lock_irqsave(q->queue_lock, flags); 2890 blk_start_queue(q); 2891 spin_unlock_irqrestore(q->queue_lock, flags); 2892 } 2893 2894 return 0; 2895 } 2896 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2897 2898 static void 2899 device_block(struct scsi_device *sdev, void *data) 2900 { 2901 scsi_internal_device_block(sdev); 2902 } 2903 2904 static int 2905 target_block(struct device *dev, void *data) 2906 { 2907 if (scsi_is_target_device(dev)) 2908 starget_for_each_device(to_scsi_target(dev), NULL, 2909 device_block); 2910 return 0; 2911 } 2912 2913 void 2914 scsi_target_block(struct device *dev) 2915 { 2916 if (scsi_is_target_device(dev)) 2917 starget_for_each_device(to_scsi_target(dev), NULL, 2918 device_block); 2919 else 2920 device_for_each_child(dev, NULL, target_block); 2921 } 2922 EXPORT_SYMBOL_GPL(scsi_target_block); 2923 2924 static void 2925 device_unblock(struct scsi_device *sdev, void *data) 2926 { 2927 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); 2928 } 2929 2930 static int 2931 target_unblock(struct device *dev, void *data) 2932 { 2933 if (scsi_is_target_device(dev)) 2934 starget_for_each_device(to_scsi_target(dev), data, 2935 device_unblock); 2936 return 0; 2937 } 2938 2939 void 2940 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) 2941 { 2942 if (scsi_is_target_device(dev)) 2943 starget_for_each_device(to_scsi_target(dev), &new_state, 2944 device_unblock); 2945 else 2946 device_for_each_child(dev, &new_state, target_unblock); 2947 } 2948 EXPORT_SYMBOL_GPL(scsi_target_unblock); 2949 2950 /** 2951 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2952 * @sgl: scatter-gather list 2953 * @sg_count: number of segments in sg 2954 * @offset: offset in bytes into sg, on return offset into the mapped area 2955 * @len: bytes to map, on return number of bytes mapped 2956 * 2957 * Returns virtual address of the start of the mapped page 2958 */ 2959 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2960 size_t *offset, size_t *len) 2961 { 2962 int i; 2963 size_t sg_len = 0, len_complete = 0; 2964 struct scatterlist *sg; 2965 struct page *page; 2966 2967 WARN_ON(!irqs_disabled()); 2968 2969 for_each_sg(sgl, sg, sg_count, i) { 2970 len_complete = sg_len; /* Complete sg-entries */ 2971 sg_len += sg->length; 2972 if (sg_len > *offset) 2973 break; 2974 } 2975 2976 if (unlikely(i == sg_count)) { 2977 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2978 "elements %d\n", 2979 __func__, sg_len, *offset, sg_count); 2980 WARN_ON(1); 2981 return NULL; 2982 } 2983 2984 /* Offset starting from the beginning of first page in this sg-entry */ 2985 *offset = *offset - len_complete + sg->offset; 2986 2987 /* Assumption: contiguous pages can be accessed as "page + i" */ 2988 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2989 *offset &= ~PAGE_MASK; 2990 2991 /* Bytes in this sg-entry from *offset to the end of the page */ 2992 sg_len = PAGE_SIZE - *offset; 2993 if (*len > sg_len) 2994 *len = sg_len; 2995 2996 return kmap_atomic(page); 2997 } 2998 EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2999 3000 /** 3001 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 3002 * @virt: virtual address to be unmapped 3003 */ 3004 void scsi_kunmap_atomic_sg(void *virt) 3005 { 3006 kunmap_atomic(virt); 3007 } 3008 EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 3009 3010 void sdev_disable_disk_events(struct scsi_device *sdev) 3011 { 3012 atomic_inc(&sdev->disk_events_disable_depth); 3013 } 3014 EXPORT_SYMBOL(sdev_disable_disk_events); 3015 3016 void sdev_enable_disk_events(struct scsi_device *sdev) 3017 { 3018 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) 3019 return; 3020 atomic_dec(&sdev->disk_events_disable_depth); 3021 } 3022 EXPORT_SYMBOL(sdev_enable_disk_events); 3023