1 /* 2 * Copyright (C) 1999 Eric Youngdale 3 * Copyright (C) 2014 Christoph Hellwig 4 * 5 * SCSI queueing library. 6 * Initial versions: Eric Youngdale (eric@andante.org). 7 * Based upon conversations with large numbers 8 * of people at Linux Expo. 9 */ 10 11 #include <linux/bio.h> 12 #include <linux/bitops.h> 13 #include <linux/blkdev.h> 14 #include <linux/completion.h> 15 #include <linux/kernel.h> 16 #include <linux/export.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/init.h> 20 #include <linux/pci.h> 21 #include <linux/delay.h> 22 #include <linux/hardirq.h> 23 #include <linux/scatterlist.h> 24 #include <linux/blk-mq.h> 25 #include <linux/ratelimit.h> 26 #include <asm/unaligned.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_dbg.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_driver.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_dh.h> 36 37 #include <trace/events/scsi.h> 38 39 #include "scsi_priv.h" 40 #include "scsi_logging.h" 41 42 43 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 44 #define SG_MEMPOOL_SIZE 2 45 46 struct scsi_host_sg_pool { 47 size_t size; 48 char *name; 49 struct kmem_cache *slab; 50 mempool_t *pool; 51 }; 52 53 #define SP(x) { .size = x, "sgpool-" __stringify(x) } 54 #if (SCSI_MAX_SG_SEGMENTS < 32) 55 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 56 #endif 57 static struct scsi_host_sg_pool scsi_sg_pools[] = { 58 SP(8), 59 SP(16), 60 #if (SCSI_MAX_SG_SEGMENTS > 32) 61 SP(32), 62 #if (SCSI_MAX_SG_SEGMENTS > 64) 63 SP(64), 64 #if (SCSI_MAX_SG_SEGMENTS > 128) 65 SP(128), 66 #if (SCSI_MAX_SG_SEGMENTS > 256) 67 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 68 #endif 69 #endif 70 #endif 71 #endif 72 SP(SCSI_MAX_SG_SEGMENTS) 73 }; 74 #undef SP 75 76 struct kmem_cache *scsi_sdb_cache; 77 78 /* 79 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 80 * not change behaviour from the previous unplug mechanism, experimentation 81 * may prove this needs changing. 82 */ 83 #define SCSI_QUEUE_DELAY 3 84 85 static void 86 scsi_set_blocked(struct scsi_cmnd *cmd, int reason) 87 { 88 struct Scsi_Host *host = cmd->device->host; 89 struct scsi_device *device = cmd->device; 90 struct scsi_target *starget = scsi_target(device); 91 92 /* 93 * Set the appropriate busy bit for the device/host. 94 * 95 * If the host/device isn't busy, assume that something actually 96 * completed, and that we should be able to queue a command now. 97 * 98 * Note that the prior mid-layer assumption that any host could 99 * always queue at least one command is now broken. The mid-layer 100 * will implement a user specifiable stall (see 101 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 102 * if a command is requeued with no other commands outstanding 103 * either for the device or for the host. 104 */ 105 switch (reason) { 106 case SCSI_MLQUEUE_HOST_BUSY: 107 atomic_set(&host->host_blocked, host->max_host_blocked); 108 break; 109 case SCSI_MLQUEUE_DEVICE_BUSY: 110 case SCSI_MLQUEUE_EH_RETRY: 111 atomic_set(&device->device_blocked, 112 device->max_device_blocked); 113 break; 114 case SCSI_MLQUEUE_TARGET_BUSY: 115 atomic_set(&starget->target_blocked, 116 starget->max_target_blocked); 117 break; 118 } 119 } 120 121 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) 122 { 123 struct scsi_device *sdev = cmd->device; 124 struct request_queue *q = cmd->request->q; 125 126 blk_mq_requeue_request(cmd->request); 127 blk_mq_kick_requeue_list(q); 128 put_device(&sdev->sdev_gendev); 129 } 130 131 /** 132 * __scsi_queue_insert - private queue insertion 133 * @cmd: The SCSI command being requeued 134 * @reason: The reason for the requeue 135 * @unbusy: Whether the queue should be unbusied 136 * 137 * This is a private queue insertion. The public interface 138 * scsi_queue_insert() always assumes the queue should be unbusied 139 * because it's always called before the completion. This function is 140 * for a requeue after completion, which should only occur in this 141 * file. 142 */ 143 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 144 { 145 struct scsi_device *device = cmd->device; 146 struct request_queue *q = device->request_queue; 147 unsigned long flags; 148 149 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, 150 "Inserting command %p into mlqueue\n", cmd)); 151 152 scsi_set_blocked(cmd, reason); 153 154 /* 155 * Decrement the counters, since these commands are no longer 156 * active on the host/device. 157 */ 158 if (unbusy) 159 scsi_device_unbusy(device); 160 161 /* 162 * Requeue this command. It will go before all other commands 163 * that are already in the queue. Schedule requeue work under 164 * lock such that the kblockd_schedule_work() call happens 165 * before blk_cleanup_queue() finishes. 166 */ 167 cmd->result = 0; 168 if (q->mq_ops) { 169 scsi_mq_requeue_cmd(cmd); 170 return; 171 } 172 spin_lock_irqsave(q->queue_lock, flags); 173 blk_requeue_request(q, cmd->request); 174 kblockd_schedule_work(&device->requeue_work); 175 spin_unlock_irqrestore(q->queue_lock, flags); 176 } 177 178 /* 179 * Function: scsi_queue_insert() 180 * 181 * Purpose: Insert a command in the midlevel queue. 182 * 183 * Arguments: cmd - command that we are adding to queue. 184 * reason - why we are inserting command to queue. 185 * 186 * Lock status: Assumed that lock is not held upon entry. 187 * 188 * Returns: Nothing. 189 * 190 * Notes: We do this for one of two cases. Either the host is busy 191 * and it cannot accept any more commands for the time being, 192 * or the device returned QUEUE_FULL and can accept no more 193 * commands. 194 * Notes: This could be called either from an interrupt context or a 195 * normal process context. 196 */ 197 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 198 { 199 __scsi_queue_insert(cmd, reason, 1); 200 } 201 /** 202 * scsi_execute - insert request and wait for the result 203 * @sdev: scsi device 204 * @cmd: scsi command 205 * @data_direction: data direction 206 * @buffer: data buffer 207 * @bufflen: len of buffer 208 * @sense: optional sense buffer 209 * @timeout: request timeout in seconds 210 * @retries: number of times to retry request 211 * @flags: or into request flags; 212 * @resid: optional residual length 213 * 214 * returns the req->errors value which is the scsi_cmnd result 215 * field. 216 */ 217 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 218 int data_direction, void *buffer, unsigned bufflen, 219 unsigned char *sense, int timeout, int retries, u64 flags, 220 int *resid) 221 { 222 struct request *req; 223 int write = (data_direction == DMA_TO_DEVICE); 224 int ret = DRIVER_ERROR << 24; 225 226 req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM); 227 if (IS_ERR(req)) 228 return ret; 229 blk_rq_set_block_pc(req); 230 231 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 232 buffer, bufflen, __GFP_RECLAIM)) 233 goto out; 234 235 req->cmd_len = COMMAND_SIZE(cmd[0]); 236 memcpy(req->cmd, cmd, req->cmd_len); 237 req->sense = sense; 238 req->sense_len = 0; 239 req->retries = retries; 240 req->timeout = timeout; 241 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 242 243 /* 244 * head injection *required* here otherwise quiesce won't work 245 */ 246 blk_execute_rq(req->q, NULL, req, 1); 247 248 /* 249 * Some devices (USB mass-storage in particular) may transfer 250 * garbage data together with a residue indicating that the data 251 * is invalid. Prevent the garbage from being misinterpreted 252 * and prevent security leaks by zeroing out the excess data. 253 */ 254 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 255 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 256 257 if (resid) 258 *resid = req->resid_len; 259 ret = req->errors; 260 out: 261 blk_put_request(req); 262 263 return ret; 264 } 265 EXPORT_SYMBOL(scsi_execute); 266 267 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 268 int data_direction, void *buffer, unsigned bufflen, 269 struct scsi_sense_hdr *sshdr, int timeout, int retries, 270 int *resid, u64 flags) 271 { 272 char *sense = NULL; 273 int result; 274 275 if (sshdr) { 276 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 277 if (!sense) 278 return DRIVER_ERROR << 24; 279 } 280 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 281 sense, timeout, retries, flags, resid); 282 if (sshdr) 283 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 284 285 kfree(sense); 286 return result; 287 } 288 EXPORT_SYMBOL(scsi_execute_req_flags); 289 290 /* 291 * Function: scsi_init_cmd_errh() 292 * 293 * Purpose: Initialize cmd fields related to error handling. 294 * 295 * Arguments: cmd - command that is ready to be queued. 296 * 297 * Notes: This function has the job of initializing a number of 298 * fields related to error handling. Typically this will 299 * be called once for each command, as required. 300 */ 301 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 302 { 303 cmd->serial_number = 0; 304 scsi_set_resid(cmd, 0); 305 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 306 if (cmd->cmd_len == 0) 307 cmd->cmd_len = scsi_command_size(cmd->cmnd); 308 } 309 310 void scsi_device_unbusy(struct scsi_device *sdev) 311 { 312 struct Scsi_Host *shost = sdev->host; 313 struct scsi_target *starget = scsi_target(sdev); 314 unsigned long flags; 315 316 atomic_dec(&shost->host_busy); 317 if (starget->can_queue > 0) 318 atomic_dec(&starget->target_busy); 319 320 if (unlikely(scsi_host_in_recovery(shost) && 321 (shost->host_failed || shost->host_eh_scheduled))) { 322 spin_lock_irqsave(shost->host_lock, flags); 323 scsi_eh_wakeup(shost); 324 spin_unlock_irqrestore(shost->host_lock, flags); 325 } 326 327 atomic_dec(&sdev->device_busy); 328 } 329 330 static void scsi_kick_queue(struct request_queue *q) 331 { 332 if (q->mq_ops) 333 blk_mq_start_hw_queues(q); 334 else 335 blk_run_queue(q); 336 } 337 338 /* 339 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 340 * and call blk_run_queue for all the scsi_devices on the target - 341 * including current_sdev first. 342 * 343 * Called with *no* scsi locks held. 344 */ 345 static void scsi_single_lun_run(struct scsi_device *current_sdev) 346 { 347 struct Scsi_Host *shost = current_sdev->host; 348 struct scsi_device *sdev, *tmp; 349 struct scsi_target *starget = scsi_target(current_sdev); 350 unsigned long flags; 351 352 spin_lock_irqsave(shost->host_lock, flags); 353 starget->starget_sdev_user = NULL; 354 spin_unlock_irqrestore(shost->host_lock, flags); 355 356 /* 357 * Call blk_run_queue for all LUNs on the target, starting with 358 * current_sdev. We race with others (to set starget_sdev_user), 359 * but in most cases, we will be first. Ideally, each LU on the 360 * target would get some limited time or requests on the target. 361 */ 362 scsi_kick_queue(current_sdev->request_queue); 363 364 spin_lock_irqsave(shost->host_lock, flags); 365 if (starget->starget_sdev_user) 366 goto out; 367 list_for_each_entry_safe(sdev, tmp, &starget->devices, 368 same_target_siblings) { 369 if (sdev == current_sdev) 370 continue; 371 if (scsi_device_get(sdev)) 372 continue; 373 374 spin_unlock_irqrestore(shost->host_lock, flags); 375 scsi_kick_queue(sdev->request_queue); 376 spin_lock_irqsave(shost->host_lock, flags); 377 378 scsi_device_put(sdev); 379 } 380 out: 381 spin_unlock_irqrestore(shost->host_lock, flags); 382 } 383 384 static inline bool scsi_device_is_busy(struct scsi_device *sdev) 385 { 386 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) 387 return true; 388 if (atomic_read(&sdev->device_blocked) > 0) 389 return true; 390 return false; 391 } 392 393 static inline bool scsi_target_is_busy(struct scsi_target *starget) 394 { 395 if (starget->can_queue > 0) { 396 if (atomic_read(&starget->target_busy) >= starget->can_queue) 397 return true; 398 if (atomic_read(&starget->target_blocked) > 0) 399 return true; 400 } 401 return false; 402 } 403 404 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 405 { 406 if (shost->can_queue > 0 && 407 atomic_read(&shost->host_busy) >= shost->can_queue) 408 return true; 409 if (atomic_read(&shost->host_blocked) > 0) 410 return true; 411 if (shost->host_self_blocked) 412 return true; 413 return false; 414 } 415 416 static void scsi_starved_list_run(struct Scsi_Host *shost) 417 { 418 LIST_HEAD(starved_list); 419 struct scsi_device *sdev; 420 unsigned long flags; 421 422 spin_lock_irqsave(shost->host_lock, flags); 423 list_splice_init(&shost->starved_list, &starved_list); 424 425 while (!list_empty(&starved_list)) { 426 struct request_queue *slq; 427 428 /* 429 * As long as shost is accepting commands and we have 430 * starved queues, call blk_run_queue. scsi_request_fn 431 * drops the queue_lock and can add us back to the 432 * starved_list. 433 * 434 * host_lock protects the starved_list and starved_entry. 435 * scsi_request_fn must get the host_lock before checking 436 * or modifying starved_list or starved_entry. 437 */ 438 if (scsi_host_is_busy(shost)) 439 break; 440 441 sdev = list_entry(starved_list.next, 442 struct scsi_device, starved_entry); 443 list_del_init(&sdev->starved_entry); 444 if (scsi_target_is_busy(scsi_target(sdev))) { 445 list_move_tail(&sdev->starved_entry, 446 &shost->starved_list); 447 continue; 448 } 449 450 /* 451 * Once we drop the host lock, a racing scsi_remove_device() 452 * call may remove the sdev from the starved list and destroy 453 * it and the queue. Mitigate by taking a reference to the 454 * queue and never touching the sdev again after we drop the 455 * host lock. Note: if __scsi_remove_device() invokes 456 * blk_cleanup_queue() before the queue is run from this 457 * function then blk_run_queue() will return immediately since 458 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. 459 */ 460 slq = sdev->request_queue; 461 if (!blk_get_queue(slq)) 462 continue; 463 spin_unlock_irqrestore(shost->host_lock, flags); 464 465 scsi_kick_queue(slq); 466 blk_put_queue(slq); 467 468 spin_lock_irqsave(shost->host_lock, flags); 469 } 470 /* put any unprocessed entries back */ 471 list_splice(&starved_list, &shost->starved_list); 472 spin_unlock_irqrestore(shost->host_lock, flags); 473 } 474 475 /* 476 * Function: scsi_run_queue() 477 * 478 * Purpose: Select a proper request queue to serve next 479 * 480 * Arguments: q - last request's queue 481 * 482 * Returns: Nothing 483 * 484 * Notes: The previous command was completely finished, start 485 * a new one if possible. 486 */ 487 static void scsi_run_queue(struct request_queue *q) 488 { 489 struct scsi_device *sdev = q->queuedata; 490 491 if (scsi_target(sdev)->single_lun) 492 scsi_single_lun_run(sdev); 493 if (!list_empty(&sdev->host->starved_list)) 494 scsi_starved_list_run(sdev->host); 495 496 if (q->mq_ops) 497 blk_mq_start_stopped_hw_queues(q, false); 498 else 499 blk_run_queue(q); 500 } 501 502 void scsi_requeue_run_queue(struct work_struct *work) 503 { 504 struct scsi_device *sdev; 505 struct request_queue *q; 506 507 sdev = container_of(work, struct scsi_device, requeue_work); 508 q = sdev->request_queue; 509 scsi_run_queue(q); 510 } 511 512 /* 513 * Function: scsi_requeue_command() 514 * 515 * Purpose: Handle post-processing of completed commands. 516 * 517 * Arguments: q - queue to operate on 518 * cmd - command that may need to be requeued. 519 * 520 * Returns: Nothing 521 * 522 * Notes: After command completion, there may be blocks left 523 * over which weren't finished by the previous command 524 * this can be for a number of reasons - the main one is 525 * I/O errors in the middle of the request, in which case 526 * we need to request the blocks that come after the bad 527 * sector. 528 * Notes: Upon return, cmd is a stale pointer. 529 */ 530 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 531 { 532 struct scsi_device *sdev = cmd->device; 533 struct request *req = cmd->request; 534 unsigned long flags; 535 536 spin_lock_irqsave(q->queue_lock, flags); 537 blk_unprep_request(req); 538 req->special = NULL; 539 scsi_put_command(cmd); 540 blk_requeue_request(q, req); 541 spin_unlock_irqrestore(q->queue_lock, flags); 542 543 scsi_run_queue(q); 544 545 put_device(&sdev->sdev_gendev); 546 } 547 548 void scsi_run_host_queues(struct Scsi_Host *shost) 549 { 550 struct scsi_device *sdev; 551 552 shost_for_each_device(sdev, shost) 553 scsi_run_queue(sdev->request_queue); 554 } 555 556 static inline unsigned int scsi_sgtable_index(unsigned short nents) 557 { 558 unsigned int index; 559 560 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 561 562 if (nents <= 8) 563 index = 0; 564 else 565 index = get_count_order(nents) - 3; 566 567 return index; 568 } 569 570 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 571 { 572 struct scsi_host_sg_pool *sgp; 573 574 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 575 mempool_free(sgl, sgp->pool); 576 } 577 578 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 579 { 580 struct scsi_host_sg_pool *sgp; 581 582 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 583 return mempool_alloc(sgp->pool, gfp_mask); 584 } 585 586 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 587 { 588 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) 589 return; 590 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 591 } 592 593 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) 594 { 595 struct scatterlist *first_chunk = NULL; 596 int ret; 597 598 BUG_ON(!nents); 599 600 if (mq) { 601 if (nents <= SCSI_MAX_SG_SEGMENTS) { 602 sdb->table.nents = sdb->table.orig_nents = nents; 603 sg_init_table(sdb->table.sgl, nents); 604 return 0; 605 } 606 first_chunk = sdb->table.sgl; 607 } 608 609 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 610 first_chunk, GFP_ATOMIC, scsi_sg_alloc); 611 if (unlikely(ret)) 612 scsi_free_sgtable(sdb, mq); 613 return ret; 614 } 615 616 static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 617 { 618 if (cmd->request->cmd_type == REQ_TYPE_FS) { 619 struct scsi_driver *drv = scsi_cmd_to_driver(cmd); 620 621 if (drv->uninit_command) 622 drv->uninit_command(cmd); 623 } 624 } 625 626 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) 627 { 628 if (cmd->sdb.table.nents) 629 scsi_free_sgtable(&cmd->sdb, true); 630 if (cmd->request->next_rq && cmd->request->next_rq->special) 631 scsi_free_sgtable(cmd->request->next_rq->special, true); 632 if (scsi_prot_sg_count(cmd)) 633 scsi_free_sgtable(cmd->prot_sdb, true); 634 } 635 636 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) 637 { 638 struct scsi_device *sdev = cmd->device; 639 struct Scsi_Host *shost = sdev->host; 640 unsigned long flags; 641 642 scsi_mq_free_sgtables(cmd); 643 scsi_uninit_cmd(cmd); 644 645 if (shost->use_cmd_list) { 646 BUG_ON(list_empty(&cmd->list)); 647 spin_lock_irqsave(&sdev->list_lock, flags); 648 list_del_init(&cmd->list); 649 spin_unlock_irqrestore(&sdev->list_lock, flags); 650 } 651 } 652 653 /* 654 * Function: scsi_release_buffers() 655 * 656 * Purpose: Free resources allocate for a scsi_command. 657 * 658 * Arguments: cmd - command that we are bailing. 659 * 660 * Lock status: Assumed that no lock is held upon entry. 661 * 662 * Returns: Nothing 663 * 664 * Notes: In the event that an upper level driver rejects a 665 * command, we must release resources allocated during 666 * the __init_io() function. Primarily this would involve 667 * the scatter-gather table. 668 */ 669 static void scsi_release_buffers(struct scsi_cmnd *cmd) 670 { 671 if (cmd->sdb.table.nents) 672 scsi_free_sgtable(&cmd->sdb, false); 673 674 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 675 676 if (scsi_prot_sg_count(cmd)) 677 scsi_free_sgtable(cmd->prot_sdb, false); 678 } 679 680 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) 681 { 682 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; 683 684 scsi_free_sgtable(bidi_sdb, false); 685 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 686 cmd->request->next_rq->special = NULL; 687 } 688 689 static bool scsi_end_request(struct request *req, int error, 690 unsigned int bytes, unsigned int bidi_bytes) 691 { 692 struct scsi_cmnd *cmd = req->special; 693 struct scsi_device *sdev = cmd->device; 694 struct request_queue *q = sdev->request_queue; 695 696 if (blk_update_request(req, error, bytes)) 697 return true; 698 699 /* Bidi request must be completed as a whole */ 700 if (unlikely(bidi_bytes) && 701 blk_update_request(req->next_rq, error, bidi_bytes)) 702 return true; 703 704 if (blk_queue_add_random(q)) 705 add_disk_randomness(req->rq_disk); 706 707 if (req->mq_ctx) { 708 /* 709 * In the MQ case the command gets freed by __blk_mq_end_request, 710 * so we have to do all cleanup that depends on it earlier. 711 * 712 * We also can't kick the queues from irq context, so we 713 * will have to defer it to a workqueue. 714 */ 715 scsi_mq_uninit_cmd(cmd); 716 717 __blk_mq_end_request(req, error); 718 719 if (scsi_target(sdev)->single_lun || 720 !list_empty(&sdev->host->starved_list)) 721 kblockd_schedule_work(&sdev->requeue_work); 722 else 723 blk_mq_start_stopped_hw_queues(q, true); 724 } else { 725 unsigned long flags; 726 727 if (bidi_bytes) 728 scsi_release_bidi_buffers(cmd); 729 730 spin_lock_irqsave(q->queue_lock, flags); 731 blk_finish_request(req, error); 732 spin_unlock_irqrestore(q->queue_lock, flags); 733 734 scsi_release_buffers(cmd); 735 736 scsi_put_command(cmd); 737 scsi_run_queue(q); 738 } 739 740 put_device(&sdev->sdev_gendev); 741 return false; 742 } 743 744 /** 745 * __scsi_error_from_host_byte - translate SCSI error code into errno 746 * @cmd: SCSI command (unused) 747 * @result: scsi error code 748 * 749 * Translate SCSI error code into standard UNIX errno. 750 * Return values: 751 * -ENOLINK temporary transport failure 752 * -EREMOTEIO permanent target failure, do not retry 753 * -EBADE permanent nexus failure, retry on other path 754 * -ENOSPC No write space available 755 * -ENODATA Medium error 756 * -EIO unspecified I/O error 757 */ 758 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) 759 { 760 int error = 0; 761 762 switch(host_byte(result)) { 763 case DID_TRANSPORT_FAILFAST: 764 error = -ENOLINK; 765 break; 766 case DID_TARGET_FAILURE: 767 set_host_byte(cmd, DID_OK); 768 error = -EREMOTEIO; 769 break; 770 case DID_NEXUS_FAILURE: 771 set_host_byte(cmd, DID_OK); 772 error = -EBADE; 773 break; 774 case DID_ALLOC_FAILURE: 775 set_host_byte(cmd, DID_OK); 776 error = -ENOSPC; 777 break; 778 case DID_MEDIUM_ERROR: 779 set_host_byte(cmd, DID_OK); 780 error = -ENODATA; 781 break; 782 default: 783 error = -EIO; 784 break; 785 } 786 787 return error; 788 } 789 790 /* 791 * Function: scsi_io_completion() 792 * 793 * Purpose: Completion processing for block device I/O requests. 794 * 795 * Arguments: cmd - command that is finished. 796 * 797 * Lock status: Assumed that no lock is held upon entry. 798 * 799 * Returns: Nothing 800 * 801 * Notes: We will finish off the specified number of sectors. If we 802 * are done, the command block will be released and the queue 803 * function will be goosed. If we are not done then we have to 804 * figure out what to do next: 805 * 806 * a) We can call scsi_requeue_command(). The request 807 * will be unprepared and put back on the queue. Then 808 * a new command will be created for it. This should 809 * be used if we made forward progress, or if we want 810 * to switch from READ(10) to READ(6) for example. 811 * 812 * b) We can call __scsi_queue_insert(). The request will 813 * be put back on the queue and retried using the same 814 * command as before, possibly after a delay. 815 * 816 * c) We can call scsi_end_request() with -EIO to fail 817 * the remainder of the request. 818 */ 819 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 820 { 821 int result = cmd->result; 822 struct request_queue *q = cmd->device->request_queue; 823 struct request *req = cmd->request; 824 int error = 0; 825 struct scsi_sense_hdr sshdr; 826 bool sense_valid = false; 827 int sense_deferred = 0, level = 0; 828 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 829 ACTION_DELAYED_RETRY} action; 830 unsigned long wait_for = (cmd->allowed + 1) * req->timeout; 831 832 if (result) { 833 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 834 if (sense_valid) 835 sense_deferred = scsi_sense_is_deferred(&sshdr); 836 } 837 838 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 839 if (result) { 840 if (sense_valid && req->sense) { 841 /* 842 * SG_IO wants current and deferred errors 843 */ 844 int len = 8 + cmd->sense_buffer[7]; 845 846 if (len > SCSI_SENSE_BUFFERSIZE) 847 len = SCSI_SENSE_BUFFERSIZE; 848 memcpy(req->sense, cmd->sense_buffer, len); 849 req->sense_len = len; 850 } 851 if (!sense_deferred) 852 error = __scsi_error_from_host_byte(cmd, result); 853 } 854 /* 855 * __scsi_error_from_host_byte may have reset the host_byte 856 */ 857 req->errors = cmd->result; 858 859 req->resid_len = scsi_get_resid(cmd); 860 861 if (scsi_bidi_cmnd(cmd)) { 862 /* 863 * Bidi commands Must be complete as a whole, 864 * both sides at once. 865 */ 866 req->next_rq->resid_len = scsi_in(cmd)->resid; 867 if (scsi_end_request(req, 0, blk_rq_bytes(req), 868 blk_rq_bytes(req->next_rq))) 869 BUG(); 870 return; 871 } 872 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 873 /* 874 * Certain non BLOCK_PC requests are commands that don't 875 * actually transfer anything (FLUSH), so cannot use 876 * good_bytes != blk_rq_bytes(req) as the signal for an error. 877 * This sets the error explicitly for the problem case. 878 */ 879 error = __scsi_error_from_host_byte(cmd, result); 880 } 881 882 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 883 BUG_ON(blk_bidi_rq(req)); 884 885 /* 886 * Next deal with any sectors which we were able to correctly 887 * handle. 888 */ 889 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, 890 "%u sectors total, %d bytes done.\n", 891 blk_rq_sectors(req), good_bytes)); 892 893 /* 894 * Recovered errors need reporting, but they're always treated 895 * as success, so fiddle the result code here. For BLOCK_PC 896 * we already took a copy of the original into rq->errors which 897 * is what gets returned to the user 898 */ 899 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 900 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 901 * print since caller wants ATA registers. Only occurs on 902 * SCSI ATA PASS_THROUGH commands when CK_COND=1 903 */ 904 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 905 ; 906 else if (!(req->cmd_flags & REQ_QUIET)) 907 scsi_print_sense(cmd); 908 result = 0; 909 /* BLOCK_PC may have set error */ 910 error = 0; 911 } 912 913 /* 914 * If we finished all bytes in the request we are done now. 915 */ 916 if (!scsi_end_request(req, error, good_bytes, 0)) 917 return; 918 919 /* 920 * Kill remainder if no retrys. 921 */ 922 if (error && scsi_noretry_cmd(cmd)) { 923 if (scsi_end_request(req, error, blk_rq_bytes(req), 0)) 924 BUG(); 925 return; 926 } 927 928 /* 929 * If there had been no error, but we have leftover bytes in the 930 * requeues just queue the command up again. 931 */ 932 if (result == 0) 933 goto requeue; 934 935 error = __scsi_error_from_host_byte(cmd, result); 936 937 if (host_byte(result) == DID_RESET) { 938 /* Third party bus reset or reset for error recovery 939 * reasons. Just retry the command and see what 940 * happens. 941 */ 942 action = ACTION_RETRY; 943 } else if (sense_valid && !sense_deferred) { 944 switch (sshdr.sense_key) { 945 case UNIT_ATTENTION: 946 if (cmd->device->removable) { 947 /* Detected disc change. Set a bit 948 * and quietly refuse further access. 949 */ 950 cmd->device->changed = 1; 951 action = ACTION_FAIL; 952 } else { 953 /* Must have been a power glitch, or a 954 * bus reset. Could not have been a 955 * media change, so we just retry the 956 * command and see what happens. 957 */ 958 action = ACTION_RETRY; 959 } 960 break; 961 case ILLEGAL_REQUEST: 962 /* If we had an ILLEGAL REQUEST returned, then 963 * we may have performed an unsupported 964 * command. The only thing this should be 965 * would be a ten byte read where only a six 966 * byte read was supported. Also, on a system 967 * where READ CAPACITY failed, we may have 968 * read past the end of the disk. 969 */ 970 if ((cmd->device->use_10_for_rw && 971 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 972 (cmd->cmnd[0] == READ_10 || 973 cmd->cmnd[0] == WRITE_10)) { 974 /* This will issue a new 6-byte command. */ 975 cmd->device->use_10_for_rw = 0; 976 action = ACTION_REPREP; 977 } else if (sshdr.asc == 0x10) /* DIX */ { 978 action = ACTION_FAIL; 979 error = -EILSEQ; 980 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ 981 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { 982 action = ACTION_FAIL; 983 error = -EREMOTEIO; 984 } else 985 action = ACTION_FAIL; 986 break; 987 case ABORTED_COMMAND: 988 action = ACTION_FAIL; 989 if (sshdr.asc == 0x10) /* DIF */ 990 error = -EILSEQ; 991 break; 992 case NOT_READY: 993 /* If the device is in the process of becoming 994 * ready, or has a temporary blockage, retry. 995 */ 996 if (sshdr.asc == 0x04) { 997 switch (sshdr.ascq) { 998 case 0x01: /* becoming ready */ 999 case 0x04: /* format in progress */ 1000 case 0x05: /* rebuild in progress */ 1001 case 0x06: /* recalculation in progress */ 1002 case 0x07: /* operation in progress */ 1003 case 0x08: /* Long write in progress */ 1004 case 0x09: /* self test in progress */ 1005 case 0x14: /* space allocation in progress */ 1006 action = ACTION_DELAYED_RETRY; 1007 break; 1008 default: 1009 action = ACTION_FAIL; 1010 break; 1011 } 1012 } else 1013 action = ACTION_FAIL; 1014 break; 1015 case VOLUME_OVERFLOW: 1016 /* See SSC3rXX or current. */ 1017 action = ACTION_FAIL; 1018 break; 1019 default: 1020 action = ACTION_FAIL; 1021 break; 1022 } 1023 } else 1024 action = ACTION_FAIL; 1025 1026 if (action != ACTION_FAIL && 1027 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) 1028 action = ACTION_FAIL; 1029 1030 switch (action) { 1031 case ACTION_FAIL: 1032 /* Give up and fail the remainder of the request */ 1033 if (!(req->cmd_flags & REQ_QUIET)) { 1034 static DEFINE_RATELIMIT_STATE(_rs, 1035 DEFAULT_RATELIMIT_INTERVAL, 1036 DEFAULT_RATELIMIT_BURST); 1037 1038 if (unlikely(scsi_logging_level)) 1039 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 1040 SCSI_LOG_MLCOMPLETE_BITS); 1041 1042 /* 1043 * if logging is enabled the failure will be printed 1044 * in scsi_log_completion(), so avoid duplicate messages 1045 */ 1046 if (!level && __ratelimit(&_rs)) { 1047 scsi_print_result(cmd, NULL, FAILED); 1048 if (driver_byte(result) & DRIVER_SENSE) 1049 scsi_print_sense(cmd); 1050 scsi_print_command(cmd); 1051 } 1052 } 1053 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) 1054 return; 1055 /*FALLTHRU*/ 1056 case ACTION_REPREP: 1057 requeue: 1058 /* Unprep the request and put it back at the head of the queue. 1059 * A new command will be prepared and issued. 1060 */ 1061 if (q->mq_ops) { 1062 cmd->request->cmd_flags &= ~REQ_DONTPREP; 1063 scsi_mq_uninit_cmd(cmd); 1064 scsi_mq_requeue_cmd(cmd); 1065 } else { 1066 scsi_release_buffers(cmd); 1067 scsi_requeue_command(q, cmd); 1068 } 1069 break; 1070 case ACTION_RETRY: 1071 /* Retry the same command immediately */ 1072 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 1073 break; 1074 case ACTION_DELAYED_RETRY: 1075 /* Retry the same command after a delay */ 1076 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 1077 break; 1078 } 1079 } 1080 1081 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) 1082 { 1083 int count; 1084 1085 /* 1086 * If sg table allocation fails, requeue request later. 1087 */ 1088 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1089 req->mq_ctx != NULL))) 1090 return BLKPREP_DEFER; 1091 1092 /* 1093 * Next, walk the list, and fill in the addresses and sizes of 1094 * each segment. 1095 */ 1096 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1097 BUG_ON(count > sdb->table.nents); 1098 sdb->table.nents = count; 1099 sdb->length = blk_rq_bytes(req); 1100 return BLKPREP_OK; 1101 } 1102 1103 /* 1104 * Function: scsi_init_io() 1105 * 1106 * Purpose: SCSI I/O initialize function. 1107 * 1108 * Arguments: cmd - Command descriptor we wish to initialize 1109 * 1110 * Returns: 0 on success 1111 * BLKPREP_DEFER if the failure is retryable 1112 * BLKPREP_KILL if the failure is fatal 1113 */ 1114 int scsi_init_io(struct scsi_cmnd *cmd) 1115 { 1116 struct scsi_device *sdev = cmd->device; 1117 struct request *rq = cmd->request; 1118 bool is_mq = (rq->mq_ctx != NULL); 1119 int error; 1120 1121 BUG_ON(!rq->nr_phys_segments); 1122 1123 error = scsi_init_sgtable(rq, &cmd->sdb); 1124 if (error) 1125 goto err_exit; 1126 1127 if (blk_bidi_rq(rq)) { 1128 if (!rq->q->mq_ops) { 1129 struct scsi_data_buffer *bidi_sdb = 1130 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC); 1131 if (!bidi_sdb) { 1132 error = BLKPREP_DEFER; 1133 goto err_exit; 1134 } 1135 1136 rq->next_rq->special = bidi_sdb; 1137 } 1138 1139 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); 1140 if (error) 1141 goto err_exit; 1142 } 1143 1144 if (blk_integrity_rq(rq)) { 1145 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1146 int ivecs, count; 1147 1148 if (prot_sdb == NULL) { 1149 /* 1150 * This can happen if someone (e.g. multipath) 1151 * queues a command to a device on an adapter 1152 * that does not support DIX. 1153 */ 1154 WARN_ON_ONCE(1); 1155 error = BLKPREP_KILL; 1156 goto err_exit; 1157 } 1158 1159 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1160 1161 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { 1162 error = BLKPREP_DEFER; 1163 goto err_exit; 1164 } 1165 1166 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1167 prot_sdb->table.sgl); 1168 BUG_ON(unlikely(count > ivecs)); 1169 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1170 1171 cmd->prot_sdb = prot_sdb; 1172 cmd->prot_sdb->table.nents = count; 1173 } 1174 1175 return BLKPREP_OK; 1176 err_exit: 1177 if (is_mq) { 1178 scsi_mq_free_sgtables(cmd); 1179 } else { 1180 scsi_release_buffers(cmd); 1181 cmd->request->special = NULL; 1182 scsi_put_command(cmd); 1183 put_device(&sdev->sdev_gendev); 1184 } 1185 return error; 1186 } 1187 EXPORT_SYMBOL(scsi_init_io); 1188 1189 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1190 struct request *req) 1191 { 1192 struct scsi_cmnd *cmd; 1193 1194 if (!req->special) { 1195 /* Bail if we can't get a reference to the device */ 1196 if (!get_device(&sdev->sdev_gendev)) 1197 return NULL; 1198 1199 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1200 if (unlikely(!cmd)) { 1201 put_device(&sdev->sdev_gendev); 1202 return NULL; 1203 } 1204 req->special = cmd; 1205 } else { 1206 cmd = req->special; 1207 } 1208 1209 /* pull a tag out of the request if we have one */ 1210 cmd->tag = req->tag; 1211 cmd->request = req; 1212 1213 cmd->cmnd = req->cmd; 1214 cmd->prot_op = SCSI_PROT_NORMAL; 1215 1216 return cmd; 1217 } 1218 1219 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1220 { 1221 struct scsi_cmnd *cmd = req->special; 1222 1223 /* 1224 * BLOCK_PC requests may transfer data, in which case they must 1225 * a bio attached to them. Or they might contain a SCSI command 1226 * that does not transfer data, in which case they may optionally 1227 * submit a request without an attached bio. 1228 */ 1229 if (req->bio) { 1230 int ret = scsi_init_io(cmd); 1231 if (unlikely(ret)) 1232 return ret; 1233 } else { 1234 BUG_ON(blk_rq_bytes(req)); 1235 1236 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1237 } 1238 1239 cmd->cmd_len = req->cmd_len; 1240 cmd->transfersize = blk_rq_bytes(req); 1241 cmd->allowed = req->retries; 1242 return BLKPREP_OK; 1243 } 1244 1245 /* 1246 * Setup a REQ_TYPE_FS command. These are simple request from filesystems 1247 * that still need to be translated to SCSI CDBs from the ULD. 1248 */ 1249 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1250 { 1251 struct scsi_cmnd *cmd = req->special; 1252 1253 if (unlikely(sdev->handler && sdev->handler->prep_fn)) { 1254 int ret = sdev->handler->prep_fn(sdev, req); 1255 if (ret != BLKPREP_OK) 1256 return ret; 1257 } 1258 1259 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1260 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1261 } 1262 1263 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) 1264 { 1265 struct scsi_cmnd *cmd = req->special; 1266 1267 if (!blk_rq_bytes(req)) 1268 cmd->sc_data_direction = DMA_NONE; 1269 else if (rq_data_dir(req) == WRITE) 1270 cmd->sc_data_direction = DMA_TO_DEVICE; 1271 else 1272 cmd->sc_data_direction = DMA_FROM_DEVICE; 1273 1274 switch (req->cmd_type) { 1275 case REQ_TYPE_FS: 1276 return scsi_setup_fs_cmnd(sdev, req); 1277 case REQ_TYPE_BLOCK_PC: 1278 return scsi_setup_blk_pc_cmnd(sdev, req); 1279 default: 1280 return BLKPREP_KILL; 1281 } 1282 } 1283 1284 static int 1285 scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1286 { 1287 int ret = BLKPREP_OK; 1288 1289 /* 1290 * If the device is not in running state we will reject some 1291 * or all commands. 1292 */ 1293 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1294 switch (sdev->sdev_state) { 1295 case SDEV_OFFLINE: 1296 case SDEV_TRANSPORT_OFFLINE: 1297 /* 1298 * If the device is offline we refuse to process any 1299 * commands. The device must be brought online 1300 * before trying any recovery commands. 1301 */ 1302 sdev_printk(KERN_ERR, sdev, 1303 "rejecting I/O to offline device\n"); 1304 ret = BLKPREP_KILL; 1305 break; 1306 case SDEV_DEL: 1307 /* 1308 * If the device is fully deleted, we refuse to 1309 * process any commands as well. 1310 */ 1311 sdev_printk(KERN_ERR, sdev, 1312 "rejecting I/O to dead device\n"); 1313 ret = BLKPREP_KILL; 1314 break; 1315 case SDEV_BLOCK: 1316 case SDEV_CREATED_BLOCK: 1317 ret = BLKPREP_DEFER; 1318 break; 1319 case SDEV_QUIESCE: 1320 /* 1321 * If the devices is blocked we defer normal commands. 1322 */ 1323 if (!(req->cmd_flags & REQ_PREEMPT)) 1324 ret = BLKPREP_DEFER; 1325 break; 1326 default: 1327 /* 1328 * For any other not fully online state we only allow 1329 * special commands. In particular any user initiated 1330 * command is not allowed. 1331 */ 1332 if (!(req->cmd_flags & REQ_PREEMPT)) 1333 ret = BLKPREP_KILL; 1334 break; 1335 } 1336 } 1337 return ret; 1338 } 1339 1340 static int 1341 scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1342 { 1343 struct scsi_device *sdev = q->queuedata; 1344 1345 switch (ret) { 1346 case BLKPREP_KILL: 1347 req->errors = DID_NO_CONNECT << 16; 1348 /* release the command and kill it */ 1349 if (req->special) { 1350 struct scsi_cmnd *cmd = req->special; 1351 scsi_release_buffers(cmd); 1352 scsi_put_command(cmd); 1353 put_device(&sdev->sdev_gendev); 1354 req->special = NULL; 1355 } 1356 break; 1357 case BLKPREP_DEFER: 1358 /* 1359 * If we defer, the blk_peek_request() returns NULL, but the 1360 * queue must be restarted, so we schedule a callback to happen 1361 * shortly. 1362 */ 1363 if (atomic_read(&sdev->device_busy) == 0) 1364 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1365 break; 1366 default: 1367 req->cmd_flags |= REQ_DONTPREP; 1368 } 1369 1370 return ret; 1371 } 1372 1373 static int scsi_prep_fn(struct request_queue *q, struct request *req) 1374 { 1375 struct scsi_device *sdev = q->queuedata; 1376 struct scsi_cmnd *cmd; 1377 int ret; 1378 1379 ret = scsi_prep_state_check(sdev, req); 1380 if (ret != BLKPREP_OK) 1381 goto out; 1382 1383 cmd = scsi_get_cmd_from_req(sdev, req); 1384 if (unlikely(!cmd)) { 1385 ret = BLKPREP_DEFER; 1386 goto out; 1387 } 1388 1389 ret = scsi_setup_cmnd(sdev, req); 1390 out: 1391 return scsi_prep_return(q, req, ret); 1392 } 1393 1394 static void scsi_unprep_fn(struct request_queue *q, struct request *req) 1395 { 1396 scsi_uninit_cmd(req->special); 1397 } 1398 1399 /* 1400 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1401 * return 0. 1402 * 1403 * Called with the queue_lock held. 1404 */ 1405 static inline int scsi_dev_queue_ready(struct request_queue *q, 1406 struct scsi_device *sdev) 1407 { 1408 unsigned int busy; 1409 1410 busy = atomic_inc_return(&sdev->device_busy) - 1; 1411 if (atomic_read(&sdev->device_blocked)) { 1412 if (busy) 1413 goto out_dec; 1414 1415 /* 1416 * unblock after device_blocked iterates to zero 1417 */ 1418 if (atomic_dec_return(&sdev->device_blocked) > 0) { 1419 /* 1420 * For the MQ case we take care of this in the caller. 1421 */ 1422 if (!q->mq_ops) 1423 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1424 goto out_dec; 1425 } 1426 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, 1427 "unblocking device at zero depth\n")); 1428 } 1429 1430 if (busy >= sdev->queue_depth) 1431 goto out_dec; 1432 1433 return 1; 1434 out_dec: 1435 atomic_dec(&sdev->device_busy); 1436 return 0; 1437 } 1438 1439 /* 1440 * scsi_target_queue_ready: checks if there we can send commands to target 1441 * @sdev: scsi device on starget to check. 1442 */ 1443 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1444 struct scsi_device *sdev) 1445 { 1446 struct scsi_target *starget = scsi_target(sdev); 1447 unsigned int busy; 1448 1449 if (starget->single_lun) { 1450 spin_lock_irq(shost->host_lock); 1451 if (starget->starget_sdev_user && 1452 starget->starget_sdev_user != sdev) { 1453 spin_unlock_irq(shost->host_lock); 1454 return 0; 1455 } 1456 starget->starget_sdev_user = sdev; 1457 spin_unlock_irq(shost->host_lock); 1458 } 1459 1460 if (starget->can_queue <= 0) 1461 return 1; 1462 1463 busy = atomic_inc_return(&starget->target_busy) - 1; 1464 if (atomic_read(&starget->target_blocked) > 0) { 1465 if (busy) 1466 goto starved; 1467 1468 /* 1469 * unblock after target_blocked iterates to zero 1470 */ 1471 if (atomic_dec_return(&starget->target_blocked) > 0) 1472 goto out_dec; 1473 1474 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1475 "unblocking target at zero depth\n")); 1476 } 1477 1478 if (busy >= starget->can_queue) 1479 goto starved; 1480 1481 return 1; 1482 1483 starved: 1484 spin_lock_irq(shost->host_lock); 1485 list_move_tail(&sdev->starved_entry, &shost->starved_list); 1486 spin_unlock_irq(shost->host_lock); 1487 out_dec: 1488 if (starget->can_queue > 0) 1489 atomic_dec(&starget->target_busy); 1490 return 0; 1491 } 1492 1493 /* 1494 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1495 * return 0. We must end up running the queue again whenever 0 is 1496 * returned, else IO can hang. 1497 */ 1498 static inline int scsi_host_queue_ready(struct request_queue *q, 1499 struct Scsi_Host *shost, 1500 struct scsi_device *sdev) 1501 { 1502 unsigned int busy; 1503 1504 if (scsi_host_in_recovery(shost)) 1505 return 0; 1506 1507 busy = atomic_inc_return(&shost->host_busy) - 1; 1508 if (atomic_read(&shost->host_blocked) > 0) { 1509 if (busy) 1510 goto starved; 1511 1512 /* 1513 * unblock after host_blocked iterates to zero 1514 */ 1515 if (atomic_dec_return(&shost->host_blocked) > 0) 1516 goto out_dec; 1517 1518 SCSI_LOG_MLQUEUE(3, 1519 shost_printk(KERN_INFO, shost, 1520 "unblocking host at zero depth\n")); 1521 } 1522 1523 if (shost->can_queue > 0 && busy >= shost->can_queue) 1524 goto starved; 1525 if (shost->host_self_blocked) 1526 goto starved; 1527 1528 /* We're OK to process the command, so we can't be starved */ 1529 if (!list_empty(&sdev->starved_entry)) { 1530 spin_lock_irq(shost->host_lock); 1531 if (!list_empty(&sdev->starved_entry)) 1532 list_del_init(&sdev->starved_entry); 1533 spin_unlock_irq(shost->host_lock); 1534 } 1535 1536 return 1; 1537 1538 starved: 1539 spin_lock_irq(shost->host_lock); 1540 if (list_empty(&sdev->starved_entry)) 1541 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1542 spin_unlock_irq(shost->host_lock); 1543 out_dec: 1544 atomic_dec(&shost->host_busy); 1545 return 0; 1546 } 1547 1548 /* 1549 * Busy state exporting function for request stacking drivers. 1550 * 1551 * For efficiency, no lock is taken to check the busy state of 1552 * shost/starget/sdev, since the returned value is not guaranteed and 1553 * may be changed after request stacking drivers call the function, 1554 * regardless of taking lock or not. 1555 * 1556 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi 1557 * needs to return 'not busy'. Otherwise, request stacking drivers 1558 * may hold requests forever. 1559 */ 1560 static int scsi_lld_busy(struct request_queue *q) 1561 { 1562 struct scsi_device *sdev = q->queuedata; 1563 struct Scsi_Host *shost; 1564 1565 if (blk_queue_dying(q)) 1566 return 0; 1567 1568 shost = sdev->host; 1569 1570 /* 1571 * Ignore host/starget busy state. 1572 * Since block layer does not have a concept of fairness across 1573 * multiple queues, congestion of host/starget needs to be handled 1574 * in SCSI layer. 1575 */ 1576 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1577 return 1; 1578 1579 return 0; 1580 } 1581 1582 /* 1583 * Kill a request for a dead device 1584 */ 1585 static void scsi_kill_request(struct request *req, struct request_queue *q) 1586 { 1587 struct scsi_cmnd *cmd = req->special; 1588 struct scsi_device *sdev; 1589 struct scsi_target *starget; 1590 struct Scsi_Host *shost; 1591 1592 blk_start_request(req); 1593 1594 scmd_printk(KERN_INFO, cmd, "killing request\n"); 1595 1596 sdev = cmd->device; 1597 starget = scsi_target(sdev); 1598 shost = sdev->host; 1599 scsi_init_cmd_errh(cmd); 1600 cmd->result = DID_NO_CONNECT << 16; 1601 atomic_inc(&cmd->device->iorequest_cnt); 1602 1603 /* 1604 * SCSI request completion path will do scsi_device_unbusy(), 1605 * bump busy counts. To bump the counters, we need to dance 1606 * with the locks as normal issue path does. 1607 */ 1608 atomic_inc(&sdev->device_busy); 1609 atomic_inc(&shost->host_busy); 1610 if (starget->can_queue > 0) 1611 atomic_inc(&starget->target_busy); 1612 1613 blk_complete_request(req); 1614 } 1615 1616 static void scsi_softirq_done(struct request *rq) 1617 { 1618 struct scsi_cmnd *cmd = rq->special; 1619 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1620 int disposition; 1621 1622 INIT_LIST_HEAD(&cmd->eh_entry); 1623 1624 atomic_inc(&cmd->device->iodone_cnt); 1625 if (cmd->result) 1626 atomic_inc(&cmd->device->ioerr_cnt); 1627 1628 disposition = scsi_decide_disposition(cmd); 1629 if (disposition != SUCCESS && 1630 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1631 sdev_printk(KERN_ERR, cmd->device, 1632 "timing out command, waited %lus\n", 1633 wait_for/HZ); 1634 disposition = SUCCESS; 1635 } 1636 1637 scsi_log_completion(cmd, disposition); 1638 1639 switch (disposition) { 1640 case SUCCESS: 1641 scsi_finish_command(cmd); 1642 break; 1643 case NEEDS_RETRY: 1644 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1645 break; 1646 case ADD_TO_MLQUEUE: 1647 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1648 break; 1649 default: 1650 if (!scsi_eh_scmd_add(cmd, 0)) 1651 scsi_finish_command(cmd); 1652 } 1653 } 1654 1655 /** 1656 * scsi_dispatch_command - Dispatch a command to the low-level driver. 1657 * @cmd: command block we are dispatching. 1658 * 1659 * Return: nonzero return request was rejected and device's queue needs to be 1660 * plugged. 1661 */ 1662 static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 1663 { 1664 struct Scsi_Host *host = cmd->device->host; 1665 int rtn = 0; 1666 1667 atomic_inc(&cmd->device->iorequest_cnt); 1668 1669 /* check if the device is still usable */ 1670 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 1671 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 1672 * returns an immediate error upwards, and signals 1673 * that the device is no longer present */ 1674 cmd->result = DID_NO_CONNECT << 16; 1675 goto done; 1676 } 1677 1678 /* Check to see if the scsi lld made this device blocked. */ 1679 if (unlikely(scsi_device_blocked(cmd->device))) { 1680 /* 1681 * in blocked state, the command is just put back on 1682 * the device queue. The suspend state has already 1683 * blocked the queue so future requests should not 1684 * occur until the device transitions out of the 1685 * suspend state. 1686 */ 1687 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1688 "queuecommand : device blocked\n")); 1689 return SCSI_MLQUEUE_DEVICE_BUSY; 1690 } 1691 1692 /* Store the LUN value in cmnd, if needed. */ 1693 if (cmd->device->lun_in_cdb) 1694 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 1695 (cmd->device->lun << 5 & 0xe0); 1696 1697 scsi_log_send(cmd); 1698 1699 /* 1700 * Before we queue this command, check if the command 1701 * length exceeds what the host adapter can handle. 1702 */ 1703 if (cmd->cmd_len > cmd->device->host->max_cmd_len) { 1704 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1705 "queuecommand : command too long. " 1706 "cdb_size=%d host->max_cmd_len=%d\n", 1707 cmd->cmd_len, cmd->device->host->max_cmd_len)); 1708 cmd->result = (DID_ABORT << 16); 1709 goto done; 1710 } 1711 1712 if (unlikely(host->shost_state == SHOST_DEL)) { 1713 cmd->result = (DID_NO_CONNECT << 16); 1714 goto done; 1715 1716 } 1717 1718 trace_scsi_dispatch_cmd_start(cmd); 1719 rtn = host->hostt->queuecommand(host, cmd); 1720 if (rtn) { 1721 trace_scsi_dispatch_cmd_error(cmd, rtn); 1722 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && 1723 rtn != SCSI_MLQUEUE_TARGET_BUSY) 1724 rtn = SCSI_MLQUEUE_HOST_BUSY; 1725 1726 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1727 "queuecommand : request rejected\n")); 1728 } 1729 1730 return rtn; 1731 done: 1732 cmd->scsi_done(cmd); 1733 return 0; 1734 } 1735 1736 /** 1737 * scsi_done - Invoke completion on finished SCSI command. 1738 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 1739 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 1740 * 1741 * Description: This function is the mid-level's (SCSI Core) interrupt routine, 1742 * which regains ownership of the SCSI command (de facto) from a LLDD, and 1743 * calls blk_complete_request() for further processing. 1744 * 1745 * This function is interrupt context safe. 1746 */ 1747 static void scsi_done(struct scsi_cmnd *cmd) 1748 { 1749 trace_scsi_dispatch_cmd_done(cmd); 1750 blk_complete_request(cmd->request); 1751 } 1752 1753 /* 1754 * Function: scsi_request_fn() 1755 * 1756 * Purpose: Main strategy routine for SCSI. 1757 * 1758 * Arguments: q - Pointer to actual queue. 1759 * 1760 * Returns: Nothing 1761 * 1762 * Lock status: IO request lock assumed to be held when called. 1763 */ 1764 static void scsi_request_fn(struct request_queue *q) 1765 __releases(q->queue_lock) 1766 __acquires(q->queue_lock) 1767 { 1768 struct scsi_device *sdev = q->queuedata; 1769 struct Scsi_Host *shost; 1770 struct scsi_cmnd *cmd; 1771 struct request *req; 1772 1773 /* 1774 * To start with, we keep looping until the queue is empty, or until 1775 * the host is no longer able to accept any more requests. 1776 */ 1777 shost = sdev->host; 1778 for (;;) { 1779 int rtn; 1780 /* 1781 * get next queueable request. We do this early to make sure 1782 * that the request is fully prepared even if we cannot 1783 * accept it. 1784 */ 1785 req = blk_peek_request(q); 1786 if (!req) 1787 break; 1788 1789 if (unlikely(!scsi_device_online(sdev))) { 1790 sdev_printk(KERN_ERR, sdev, 1791 "rejecting I/O to offline device\n"); 1792 scsi_kill_request(req, q); 1793 continue; 1794 } 1795 1796 if (!scsi_dev_queue_ready(q, sdev)) 1797 break; 1798 1799 /* 1800 * Remove the request from the request list. 1801 */ 1802 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1803 blk_start_request(req); 1804 1805 spin_unlock_irq(q->queue_lock); 1806 cmd = req->special; 1807 if (unlikely(cmd == NULL)) { 1808 printk(KERN_CRIT "impossible request in %s.\n" 1809 "please mail a stack trace to " 1810 "linux-scsi@vger.kernel.org\n", 1811 __func__); 1812 blk_dump_rq_flags(req, "foo"); 1813 BUG(); 1814 } 1815 1816 /* 1817 * We hit this when the driver is using a host wide 1818 * tag map. For device level tag maps the queue_depth check 1819 * in the device ready fn would prevent us from trying 1820 * to allocate a tag. Since the map is a shared host resource 1821 * we add the dev to the starved list so it eventually gets 1822 * a run when a tag is freed. 1823 */ 1824 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { 1825 spin_lock_irq(shost->host_lock); 1826 if (list_empty(&sdev->starved_entry)) 1827 list_add_tail(&sdev->starved_entry, 1828 &shost->starved_list); 1829 spin_unlock_irq(shost->host_lock); 1830 goto not_ready; 1831 } 1832 1833 if (!scsi_target_queue_ready(shost, sdev)) 1834 goto not_ready; 1835 1836 if (!scsi_host_queue_ready(q, shost, sdev)) 1837 goto host_not_ready; 1838 1839 if (sdev->simple_tags) 1840 cmd->flags |= SCMD_TAGGED; 1841 else 1842 cmd->flags &= ~SCMD_TAGGED; 1843 1844 /* 1845 * Finally, initialize any error handling parameters, and set up 1846 * the timers for timeouts. 1847 */ 1848 scsi_init_cmd_errh(cmd); 1849 1850 /* 1851 * Dispatch the command to the low-level driver. 1852 */ 1853 cmd->scsi_done = scsi_done; 1854 rtn = scsi_dispatch_cmd(cmd); 1855 if (rtn) { 1856 scsi_queue_insert(cmd, rtn); 1857 spin_lock_irq(q->queue_lock); 1858 goto out_delay; 1859 } 1860 spin_lock_irq(q->queue_lock); 1861 } 1862 1863 return; 1864 1865 host_not_ready: 1866 if (scsi_target(sdev)->can_queue > 0) 1867 atomic_dec(&scsi_target(sdev)->target_busy); 1868 not_ready: 1869 /* 1870 * lock q, handle tag, requeue req, and decrement device_busy. We 1871 * must return with queue_lock held. 1872 * 1873 * Decrementing device_busy without checking it is OK, as all such 1874 * cases (host limits or settings) should run the queue at some 1875 * later time. 1876 */ 1877 spin_lock_irq(q->queue_lock); 1878 blk_requeue_request(q, req); 1879 atomic_dec(&sdev->device_busy); 1880 out_delay: 1881 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) 1882 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1883 } 1884 1885 static inline int prep_to_mq(int ret) 1886 { 1887 switch (ret) { 1888 case BLKPREP_OK: 1889 return 0; 1890 case BLKPREP_DEFER: 1891 return BLK_MQ_RQ_QUEUE_BUSY; 1892 default: 1893 return BLK_MQ_RQ_QUEUE_ERROR; 1894 } 1895 } 1896 1897 static int scsi_mq_prep_fn(struct request *req) 1898 { 1899 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1900 struct scsi_device *sdev = req->q->queuedata; 1901 struct Scsi_Host *shost = sdev->host; 1902 unsigned char *sense_buf = cmd->sense_buffer; 1903 struct scatterlist *sg; 1904 1905 memset(cmd, 0, sizeof(struct scsi_cmnd)); 1906 1907 req->special = cmd; 1908 1909 cmd->request = req; 1910 cmd->device = sdev; 1911 cmd->sense_buffer = sense_buf; 1912 1913 cmd->tag = req->tag; 1914 1915 cmd->cmnd = req->cmd; 1916 cmd->prot_op = SCSI_PROT_NORMAL; 1917 1918 INIT_LIST_HEAD(&cmd->list); 1919 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); 1920 cmd->jiffies_at_alloc = jiffies; 1921 1922 if (shost->use_cmd_list) { 1923 spin_lock_irq(&sdev->list_lock); 1924 list_add_tail(&cmd->list, &sdev->cmd_list); 1925 spin_unlock_irq(&sdev->list_lock); 1926 } 1927 1928 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; 1929 cmd->sdb.table.sgl = sg; 1930 1931 if (scsi_host_get_prot(shost)) { 1932 cmd->prot_sdb = (void *)sg + 1933 min_t(unsigned int, 1934 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * 1935 sizeof(struct scatterlist); 1936 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1937 1938 cmd->prot_sdb->table.sgl = 1939 (struct scatterlist *)(cmd->prot_sdb + 1); 1940 } 1941 1942 if (blk_bidi_rq(req)) { 1943 struct request *next_rq = req->next_rq; 1944 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq); 1945 1946 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer)); 1947 bidi_sdb->table.sgl = 1948 (struct scatterlist *)(bidi_sdb + 1); 1949 1950 next_rq->special = bidi_sdb; 1951 } 1952 1953 blk_mq_start_request(req); 1954 1955 return scsi_setup_cmnd(sdev, req); 1956 } 1957 1958 static void scsi_mq_done(struct scsi_cmnd *cmd) 1959 { 1960 trace_scsi_dispatch_cmd_done(cmd); 1961 blk_mq_complete_request(cmd->request, cmd->request->errors); 1962 } 1963 1964 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1965 const struct blk_mq_queue_data *bd) 1966 { 1967 struct request *req = bd->rq; 1968 struct request_queue *q = req->q; 1969 struct scsi_device *sdev = q->queuedata; 1970 struct Scsi_Host *shost = sdev->host; 1971 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1972 int ret; 1973 int reason; 1974 1975 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); 1976 if (ret) 1977 goto out; 1978 1979 ret = BLK_MQ_RQ_QUEUE_BUSY; 1980 if (!get_device(&sdev->sdev_gendev)) 1981 goto out; 1982 1983 if (!scsi_dev_queue_ready(q, sdev)) 1984 goto out_put_device; 1985 if (!scsi_target_queue_ready(shost, sdev)) 1986 goto out_dec_device_busy; 1987 if (!scsi_host_queue_ready(q, shost, sdev)) 1988 goto out_dec_target_busy; 1989 1990 1991 if (!(req->cmd_flags & REQ_DONTPREP)) { 1992 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1993 if (ret) 1994 goto out_dec_host_busy; 1995 req->cmd_flags |= REQ_DONTPREP; 1996 } else { 1997 blk_mq_start_request(req); 1998 } 1999 2000 if (sdev->simple_tags) 2001 cmd->flags |= SCMD_TAGGED; 2002 else 2003 cmd->flags &= ~SCMD_TAGGED; 2004 2005 scsi_init_cmd_errh(cmd); 2006 cmd->scsi_done = scsi_mq_done; 2007 2008 reason = scsi_dispatch_cmd(cmd); 2009 if (reason) { 2010 scsi_set_blocked(cmd, reason); 2011 ret = BLK_MQ_RQ_QUEUE_BUSY; 2012 goto out_dec_host_busy; 2013 } 2014 2015 return BLK_MQ_RQ_QUEUE_OK; 2016 2017 out_dec_host_busy: 2018 atomic_dec(&shost->host_busy); 2019 out_dec_target_busy: 2020 if (scsi_target(sdev)->can_queue > 0) 2021 atomic_dec(&scsi_target(sdev)->target_busy); 2022 out_dec_device_busy: 2023 atomic_dec(&sdev->device_busy); 2024 out_put_device: 2025 put_device(&sdev->sdev_gendev); 2026 out: 2027 switch (ret) { 2028 case BLK_MQ_RQ_QUEUE_BUSY: 2029 blk_mq_stop_hw_queue(hctx); 2030 if (atomic_read(&sdev->device_busy) == 0 && 2031 !scsi_device_blocked(sdev)) 2032 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 2033 break; 2034 case BLK_MQ_RQ_QUEUE_ERROR: 2035 /* 2036 * Make sure to release all allocated ressources when 2037 * we hit an error, as we will never see this command 2038 * again. 2039 */ 2040 if (req->cmd_flags & REQ_DONTPREP) 2041 scsi_mq_uninit_cmd(cmd); 2042 break; 2043 default: 2044 break; 2045 } 2046 return ret; 2047 } 2048 2049 static enum blk_eh_timer_return scsi_timeout(struct request *req, 2050 bool reserved) 2051 { 2052 if (reserved) 2053 return BLK_EH_RESET_TIMER; 2054 return scsi_times_out(req); 2055 } 2056 2057 static int scsi_init_request(void *data, struct request *rq, 2058 unsigned int hctx_idx, unsigned int request_idx, 2059 unsigned int numa_node) 2060 { 2061 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2062 2063 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL, 2064 numa_node); 2065 if (!cmd->sense_buffer) 2066 return -ENOMEM; 2067 return 0; 2068 } 2069 2070 static void scsi_exit_request(void *data, struct request *rq, 2071 unsigned int hctx_idx, unsigned int request_idx) 2072 { 2073 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2074 2075 kfree(cmd->sense_buffer); 2076 } 2077 2078 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 2079 { 2080 struct device *host_dev; 2081 u64 bounce_limit = 0xffffffff; 2082 2083 if (shost->unchecked_isa_dma) 2084 return BLK_BOUNCE_ISA; 2085 /* 2086 * Platforms with virtual-DMA translation 2087 * hardware have no practical limit. 2088 */ 2089 if (!PCI_DMA_BUS_IS_PHYS) 2090 return BLK_BOUNCE_ANY; 2091 2092 host_dev = scsi_get_device(shost); 2093 if (host_dev && host_dev->dma_mask) 2094 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 2095 2096 return bounce_limit; 2097 } 2098 2099 static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) 2100 { 2101 struct device *dev = shost->dma_dev; 2102 2103 /* 2104 * this limit is imposed by hardware restrictions 2105 */ 2106 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 2107 SCSI_MAX_SG_CHAIN_SEGMENTS)); 2108 2109 if (scsi_host_prot_dma(shost)) { 2110 shost->sg_prot_tablesize = 2111 min_not_zero(shost->sg_prot_tablesize, 2112 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); 2113 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); 2114 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 2115 } 2116 2117 blk_queue_max_hw_sectors(q, shost->max_sectors); 2118 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 2119 blk_queue_segment_boundary(q, shost->dma_boundary); 2120 dma_set_seg_boundary(dev, shost->dma_boundary); 2121 2122 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 2123 2124 if (!shost->use_clustering) 2125 q->limits.cluster = 0; 2126 2127 /* 2128 * set a reasonable default alignment on word boundaries: the 2129 * host and device may alter it using 2130 * blk_queue_update_dma_alignment() later. 2131 */ 2132 blk_queue_dma_alignment(q, 0x03); 2133 } 2134 2135 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 2136 request_fn_proc *request_fn) 2137 { 2138 struct request_queue *q; 2139 2140 q = blk_init_queue(request_fn, NULL); 2141 if (!q) 2142 return NULL; 2143 __scsi_init_queue(shost, q); 2144 return q; 2145 } 2146 EXPORT_SYMBOL(__scsi_alloc_queue); 2147 2148 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 2149 { 2150 struct request_queue *q; 2151 2152 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 2153 if (!q) 2154 return NULL; 2155 2156 blk_queue_prep_rq(q, scsi_prep_fn); 2157 blk_queue_unprep_rq(q, scsi_unprep_fn); 2158 blk_queue_softirq_done(q, scsi_softirq_done); 2159 blk_queue_rq_timed_out(q, scsi_times_out); 2160 blk_queue_lld_busy(q, scsi_lld_busy); 2161 return q; 2162 } 2163 2164 static struct blk_mq_ops scsi_mq_ops = { 2165 .map_queue = blk_mq_map_queue, 2166 .queue_rq = scsi_queue_rq, 2167 .complete = scsi_softirq_done, 2168 .timeout = scsi_timeout, 2169 .init_request = scsi_init_request, 2170 .exit_request = scsi_exit_request, 2171 }; 2172 2173 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) 2174 { 2175 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); 2176 if (IS_ERR(sdev->request_queue)) 2177 return NULL; 2178 2179 sdev->request_queue->queuedata = sdev; 2180 __scsi_init_queue(sdev->host, sdev->request_queue); 2181 return sdev->request_queue; 2182 } 2183 2184 int scsi_mq_setup_tags(struct Scsi_Host *shost) 2185 { 2186 unsigned int cmd_size, sgl_size, tbl_size; 2187 2188 tbl_size = shost->sg_tablesize; 2189 if (tbl_size > SCSI_MAX_SG_SEGMENTS) 2190 tbl_size = SCSI_MAX_SG_SEGMENTS; 2191 sgl_size = tbl_size * sizeof(struct scatterlist); 2192 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 2193 if (scsi_host_get_prot(shost)) 2194 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; 2195 2196 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 2197 shost->tag_set.ops = &scsi_mq_ops; 2198 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; 2199 shost->tag_set.queue_depth = shost->can_queue; 2200 shost->tag_set.cmd_size = cmd_size; 2201 shost->tag_set.numa_node = NUMA_NO_NODE; 2202 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2203 shost->tag_set.flags |= 2204 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); 2205 shost->tag_set.driver_data = shost; 2206 2207 return blk_mq_alloc_tag_set(&shost->tag_set); 2208 } 2209 2210 void scsi_mq_destroy_tags(struct Scsi_Host *shost) 2211 { 2212 blk_mq_free_tag_set(&shost->tag_set); 2213 } 2214 2215 /* 2216 * Function: scsi_block_requests() 2217 * 2218 * Purpose: Utility function used by low-level drivers to prevent further 2219 * commands from being queued to the device. 2220 * 2221 * Arguments: shost - Host in question 2222 * 2223 * Returns: Nothing 2224 * 2225 * Lock status: No locks are assumed held. 2226 * 2227 * Notes: There is no timer nor any other means by which the requests 2228 * get unblocked other than the low-level driver calling 2229 * scsi_unblock_requests(). 2230 */ 2231 void scsi_block_requests(struct Scsi_Host *shost) 2232 { 2233 shost->host_self_blocked = 1; 2234 } 2235 EXPORT_SYMBOL(scsi_block_requests); 2236 2237 /* 2238 * Function: scsi_unblock_requests() 2239 * 2240 * Purpose: Utility function used by low-level drivers to allow further 2241 * commands from being queued to the device. 2242 * 2243 * Arguments: shost - Host in question 2244 * 2245 * Returns: Nothing 2246 * 2247 * Lock status: No locks are assumed held. 2248 * 2249 * Notes: There is no timer nor any other means by which the requests 2250 * get unblocked other than the low-level driver calling 2251 * scsi_unblock_requests(). 2252 * 2253 * This is done as an API function so that changes to the 2254 * internals of the scsi mid-layer won't require wholesale 2255 * changes to drivers that use this feature. 2256 */ 2257 void scsi_unblock_requests(struct Scsi_Host *shost) 2258 { 2259 shost->host_self_blocked = 0; 2260 scsi_run_host_queues(shost); 2261 } 2262 EXPORT_SYMBOL(scsi_unblock_requests); 2263 2264 int __init scsi_init_queue(void) 2265 { 2266 int i; 2267 2268 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 2269 sizeof(struct scsi_data_buffer), 2270 0, 0, NULL); 2271 if (!scsi_sdb_cache) { 2272 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 2273 return -ENOMEM; 2274 } 2275 2276 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2277 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2278 int size = sgp->size * sizeof(struct scatterlist); 2279 2280 sgp->slab = kmem_cache_create(sgp->name, size, 0, 2281 SLAB_HWCACHE_ALIGN, NULL); 2282 if (!sgp->slab) { 2283 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 2284 sgp->name); 2285 goto cleanup_sdb; 2286 } 2287 2288 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 2289 sgp->slab); 2290 if (!sgp->pool) { 2291 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 2292 sgp->name); 2293 goto cleanup_sdb; 2294 } 2295 } 2296 2297 return 0; 2298 2299 cleanup_sdb: 2300 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2301 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2302 if (sgp->pool) 2303 mempool_destroy(sgp->pool); 2304 if (sgp->slab) 2305 kmem_cache_destroy(sgp->slab); 2306 } 2307 kmem_cache_destroy(scsi_sdb_cache); 2308 2309 return -ENOMEM; 2310 } 2311 2312 void scsi_exit_queue(void) 2313 { 2314 int i; 2315 2316 kmem_cache_destroy(scsi_sdb_cache); 2317 2318 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2319 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2320 mempool_destroy(sgp->pool); 2321 kmem_cache_destroy(sgp->slab); 2322 } 2323 } 2324 2325 /** 2326 * scsi_mode_select - issue a mode select 2327 * @sdev: SCSI device to be queried 2328 * @pf: Page format bit (1 == standard, 0 == vendor specific) 2329 * @sp: Save page bit (0 == don't save, 1 == save) 2330 * @modepage: mode page being requested 2331 * @buffer: request buffer (may not be smaller than eight bytes) 2332 * @len: length of request buffer. 2333 * @timeout: command timeout 2334 * @retries: number of retries before failing 2335 * @data: returns a structure abstracting the mode header data 2336 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2337 * must be SCSI_SENSE_BUFFERSIZE big. 2338 * 2339 * Returns zero if successful; negative error number or scsi 2340 * status on error 2341 * 2342 */ 2343 int 2344 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 2345 unsigned char *buffer, int len, int timeout, int retries, 2346 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2347 { 2348 unsigned char cmd[10]; 2349 unsigned char *real_buffer; 2350 int ret; 2351 2352 memset(cmd, 0, sizeof(cmd)); 2353 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 2354 2355 if (sdev->use_10_for_ms) { 2356 if (len > 65535) 2357 return -EINVAL; 2358 real_buffer = kmalloc(8 + len, GFP_KERNEL); 2359 if (!real_buffer) 2360 return -ENOMEM; 2361 memcpy(real_buffer + 8, buffer, len); 2362 len += 8; 2363 real_buffer[0] = 0; 2364 real_buffer[1] = 0; 2365 real_buffer[2] = data->medium_type; 2366 real_buffer[3] = data->device_specific; 2367 real_buffer[4] = data->longlba ? 0x01 : 0; 2368 real_buffer[5] = 0; 2369 real_buffer[6] = data->block_descriptor_length >> 8; 2370 real_buffer[7] = data->block_descriptor_length; 2371 2372 cmd[0] = MODE_SELECT_10; 2373 cmd[7] = len >> 8; 2374 cmd[8] = len; 2375 } else { 2376 if (len > 255 || data->block_descriptor_length > 255 || 2377 data->longlba) 2378 return -EINVAL; 2379 2380 real_buffer = kmalloc(4 + len, GFP_KERNEL); 2381 if (!real_buffer) 2382 return -ENOMEM; 2383 memcpy(real_buffer + 4, buffer, len); 2384 len += 4; 2385 real_buffer[0] = 0; 2386 real_buffer[1] = data->medium_type; 2387 real_buffer[2] = data->device_specific; 2388 real_buffer[3] = data->block_descriptor_length; 2389 2390 2391 cmd[0] = MODE_SELECT; 2392 cmd[4] = len; 2393 } 2394 2395 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 2396 sshdr, timeout, retries, NULL); 2397 kfree(real_buffer); 2398 return ret; 2399 } 2400 EXPORT_SYMBOL_GPL(scsi_mode_select); 2401 2402 /** 2403 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 2404 * @sdev: SCSI device to be queried 2405 * @dbd: set if mode sense will allow block descriptors to be returned 2406 * @modepage: mode page being requested 2407 * @buffer: request buffer (may not be smaller than eight bytes) 2408 * @len: length of request buffer. 2409 * @timeout: command timeout 2410 * @retries: number of retries before failing 2411 * @data: returns a structure abstracting the mode header data 2412 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2413 * must be SCSI_SENSE_BUFFERSIZE big. 2414 * 2415 * Returns zero if unsuccessful, or the header offset (either 4 2416 * or 8 depending on whether a six or ten byte command was 2417 * issued) if successful. 2418 */ 2419 int 2420 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 2421 unsigned char *buffer, int len, int timeout, int retries, 2422 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2423 { 2424 unsigned char cmd[12]; 2425 int use_10_for_ms; 2426 int header_length; 2427 int result, retry_count = retries; 2428 struct scsi_sense_hdr my_sshdr; 2429 2430 memset(data, 0, sizeof(*data)); 2431 memset(&cmd[0], 0, 12); 2432 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 2433 cmd[2] = modepage; 2434 2435 /* caller might not be interested in sense, but we need it */ 2436 if (!sshdr) 2437 sshdr = &my_sshdr; 2438 2439 retry: 2440 use_10_for_ms = sdev->use_10_for_ms; 2441 2442 if (use_10_for_ms) { 2443 if (len < 8) 2444 len = 8; 2445 2446 cmd[0] = MODE_SENSE_10; 2447 cmd[8] = len; 2448 header_length = 8; 2449 } else { 2450 if (len < 4) 2451 len = 4; 2452 2453 cmd[0] = MODE_SENSE; 2454 cmd[4] = len; 2455 header_length = 4; 2456 } 2457 2458 memset(buffer, 0, len); 2459 2460 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2461 sshdr, timeout, retries, NULL); 2462 2463 /* This code looks awful: what it's doing is making sure an 2464 * ILLEGAL REQUEST sense return identifies the actual command 2465 * byte as the problem. MODE_SENSE commands can return 2466 * ILLEGAL REQUEST if the code page isn't supported */ 2467 2468 if (use_10_for_ms && !scsi_status_is_good(result) && 2469 (driver_byte(result) & DRIVER_SENSE)) { 2470 if (scsi_sense_valid(sshdr)) { 2471 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2472 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2473 /* 2474 * Invalid command operation code 2475 */ 2476 sdev->use_10_for_ms = 0; 2477 goto retry; 2478 } 2479 } 2480 } 2481 2482 if(scsi_status_is_good(result)) { 2483 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 2484 (modepage == 6 || modepage == 8))) { 2485 /* Initio breakage? */ 2486 header_length = 0; 2487 data->length = 13; 2488 data->medium_type = 0; 2489 data->device_specific = 0; 2490 data->longlba = 0; 2491 data->block_descriptor_length = 0; 2492 } else if(use_10_for_ms) { 2493 data->length = buffer[0]*256 + buffer[1] + 2; 2494 data->medium_type = buffer[2]; 2495 data->device_specific = buffer[3]; 2496 data->longlba = buffer[4] & 0x01; 2497 data->block_descriptor_length = buffer[6]*256 2498 + buffer[7]; 2499 } else { 2500 data->length = buffer[0] + 1; 2501 data->medium_type = buffer[1]; 2502 data->device_specific = buffer[2]; 2503 data->block_descriptor_length = buffer[3]; 2504 } 2505 data->header_length = header_length; 2506 } else if ((status_byte(result) == CHECK_CONDITION) && 2507 scsi_sense_valid(sshdr) && 2508 sshdr->sense_key == UNIT_ATTENTION && retry_count) { 2509 retry_count--; 2510 goto retry; 2511 } 2512 2513 return result; 2514 } 2515 EXPORT_SYMBOL(scsi_mode_sense); 2516 2517 /** 2518 * scsi_test_unit_ready - test if unit is ready 2519 * @sdev: scsi device to change the state of. 2520 * @timeout: command timeout 2521 * @retries: number of retries before failing 2522 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 2523 * returning sense. Make sure that this is cleared before passing 2524 * in. 2525 * 2526 * Returns zero if unsuccessful or an error if TUR failed. For 2527 * removable media, UNIT_ATTENTION sets ->changed flag. 2528 **/ 2529 int 2530 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2531 struct scsi_sense_hdr *sshdr_external) 2532 { 2533 char cmd[] = { 2534 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2535 }; 2536 struct scsi_sense_hdr *sshdr; 2537 int result; 2538 2539 if (!sshdr_external) 2540 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2541 else 2542 sshdr = sshdr_external; 2543 2544 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2545 do { 2546 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2547 timeout, retries, NULL); 2548 if (sdev->removable && scsi_sense_valid(sshdr) && 2549 sshdr->sense_key == UNIT_ATTENTION) 2550 sdev->changed = 1; 2551 } while (scsi_sense_valid(sshdr) && 2552 sshdr->sense_key == UNIT_ATTENTION && --retries); 2553 2554 if (!sshdr_external) 2555 kfree(sshdr); 2556 return result; 2557 } 2558 EXPORT_SYMBOL(scsi_test_unit_ready); 2559 2560 /** 2561 * scsi_device_set_state - Take the given device through the device state model. 2562 * @sdev: scsi device to change the state of. 2563 * @state: state to change to. 2564 * 2565 * Returns zero if unsuccessful or an error if the requested 2566 * transition is illegal. 2567 */ 2568 int 2569 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2570 { 2571 enum scsi_device_state oldstate = sdev->sdev_state; 2572 2573 if (state == oldstate) 2574 return 0; 2575 2576 switch (state) { 2577 case SDEV_CREATED: 2578 switch (oldstate) { 2579 case SDEV_CREATED_BLOCK: 2580 break; 2581 default: 2582 goto illegal; 2583 } 2584 break; 2585 2586 case SDEV_RUNNING: 2587 switch (oldstate) { 2588 case SDEV_CREATED: 2589 case SDEV_OFFLINE: 2590 case SDEV_TRANSPORT_OFFLINE: 2591 case SDEV_QUIESCE: 2592 case SDEV_BLOCK: 2593 break; 2594 default: 2595 goto illegal; 2596 } 2597 break; 2598 2599 case SDEV_QUIESCE: 2600 switch (oldstate) { 2601 case SDEV_RUNNING: 2602 case SDEV_OFFLINE: 2603 case SDEV_TRANSPORT_OFFLINE: 2604 break; 2605 default: 2606 goto illegal; 2607 } 2608 break; 2609 2610 case SDEV_OFFLINE: 2611 case SDEV_TRANSPORT_OFFLINE: 2612 switch (oldstate) { 2613 case SDEV_CREATED: 2614 case SDEV_RUNNING: 2615 case SDEV_QUIESCE: 2616 case SDEV_BLOCK: 2617 break; 2618 default: 2619 goto illegal; 2620 } 2621 break; 2622 2623 case SDEV_BLOCK: 2624 switch (oldstate) { 2625 case SDEV_RUNNING: 2626 case SDEV_CREATED_BLOCK: 2627 break; 2628 default: 2629 goto illegal; 2630 } 2631 break; 2632 2633 case SDEV_CREATED_BLOCK: 2634 switch (oldstate) { 2635 case SDEV_CREATED: 2636 break; 2637 default: 2638 goto illegal; 2639 } 2640 break; 2641 2642 case SDEV_CANCEL: 2643 switch (oldstate) { 2644 case SDEV_CREATED: 2645 case SDEV_RUNNING: 2646 case SDEV_QUIESCE: 2647 case SDEV_OFFLINE: 2648 case SDEV_TRANSPORT_OFFLINE: 2649 case SDEV_BLOCK: 2650 break; 2651 default: 2652 goto illegal; 2653 } 2654 break; 2655 2656 case SDEV_DEL: 2657 switch (oldstate) { 2658 case SDEV_CREATED: 2659 case SDEV_RUNNING: 2660 case SDEV_OFFLINE: 2661 case SDEV_TRANSPORT_OFFLINE: 2662 case SDEV_CANCEL: 2663 case SDEV_CREATED_BLOCK: 2664 break; 2665 default: 2666 goto illegal; 2667 } 2668 break; 2669 2670 } 2671 sdev->sdev_state = state; 2672 return 0; 2673 2674 illegal: 2675 SCSI_LOG_ERROR_RECOVERY(1, 2676 sdev_printk(KERN_ERR, sdev, 2677 "Illegal state transition %s->%s", 2678 scsi_device_state_name(oldstate), 2679 scsi_device_state_name(state)) 2680 ); 2681 return -EINVAL; 2682 } 2683 EXPORT_SYMBOL(scsi_device_set_state); 2684 2685 /** 2686 * sdev_evt_emit - emit a single SCSI device uevent 2687 * @sdev: associated SCSI device 2688 * @evt: event to emit 2689 * 2690 * Send a single uevent (scsi_event) to the associated scsi_device. 2691 */ 2692 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2693 { 2694 int idx = 0; 2695 char *envp[3]; 2696 2697 switch (evt->evt_type) { 2698 case SDEV_EVT_MEDIA_CHANGE: 2699 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2700 break; 2701 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2702 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; 2703 break; 2704 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2705 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; 2706 break; 2707 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2708 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; 2709 break; 2710 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2711 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; 2712 break; 2713 case SDEV_EVT_LUN_CHANGE_REPORTED: 2714 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2715 break; 2716 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: 2717 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED"; 2718 break; 2719 default: 2720 /* do nothing */ 2721 break; 2722 } 2723 2724 envp[idx++] = NULL; 2725 2726 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2727 } 2728 2729 /** 2730 * sdev_evt_thread - send a uevent for each scsi event 2731 * @work: work struct for scsi_device 2732 * 2733 * Dispatch queued events to their associated scsi_device kobjects 2734 * as uevents. 2735 */ 2736 void scsi_evt_thread(struct work_struct *work) 2737 { 2738 struct scsi_device *sdev; 2739 enum scsi_device_event evt_type; 2740 LIST_HEAD(event_list); 2741 2742 sdev = container_of(work, struct scsi_device, event_work); 2743 2744 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) 2745 if (test_and_clear_bit(evt_type, sdev->pending_events)) 2746 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); 2747 2748 while (1) { 2749 struct scsi_event *evt; 2750 struct list_head *this, *tmp; 2751 unsigned long flags; 2752 2753 spin_lock_irqsave(&sdev->list_lock, flags); 2754 list_splice_init(&sdev->event_list, &event_list); 2755 spin_unlock_irqrestore(&sdev->list_lock, flags); 2756 2757 if (list_empty(&event_list)) 2758 break; 2759 2760 list_for_each_safe(this, tmp, &event_list) { 2761 evt = list_entry(this, struct scsi_event, node); 2762 list_del(&evt->node); 2763 scsi_evt_emit(sdev, evt); 2764 kfree(evt); 2765 } 2766 } 2767 } 2768 2769 /** 2770 * sdev_evt_send - send asserted event to uevent thread 2771 * @sdev: scsi_device event occurred on 2772 * @evt: event to send 2773 * 2774 * Assert scsi device event asynchronously. 2775 */ 2776 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2777 { 2778 unsigned long flags; 2779 2780 #if 0 2781 /* FIXME: currently this check eliminates all media change events 2782 * for polled devices. Need to update to discriminate between AN 2783 * and polled events */ 2784 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2785 kfree(evt); 2786 return; 2787 } 2788 #endif 2789 2790 spin_lock_irqsave(&sdev->list_lock, flags); 2791 list_add_tail(&evt->node, &sdev->event_list); 2792 schedule_work(&sdev->event_work); 2793 spin_unlock_irqrestore(&sdev->list_lock, flags); 2794 } 2795 EXPORT_SYMBOL_GPL(sdev_evt_send); 2796 2797 /** 2798 * sdev_evt_alloc - allocate a new scsi event 2799 * @evt_type: type of event to allocate 2800 * @gfpflags: GFP flags for allocation 2801 * 2802 * Allocates and returns a new scsi_event. 2803 */ 2804 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2805 gfp_t gfpflags) 2806 { 2807 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2808 if (!evt) 2809 return NULL; 2810 2811 evt->evt_type = evt_type; 2812 INIT_LIST_HEAD(&evt->node); 2813 2814 /* evt_type-specific initialization, if any */ 2815 switch (evt_type) { 2816 case SDEV_EVT_MEDIA_CHANGE: 2817 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2818 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2819 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2820 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2821 case SDEV_EVT_LUN_CHANGE_REPORTED: 2822 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: 2823 default: 2824 /* do nothing */ 2825 break; 2826 } 2827 2828 return evt; 2829 } 2830 EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2831 2832 /** 2833 * sdev_evt_send_simple - send asserted event to uevent thread 2834 * @sdev: scsi_device event occurred on 2835 * @evt_type: type of event to send 2836 * @gfpflags: GFP flags for allocation 2837 * 2838 * Assert scsi device event asynchronously, given an event type. 2839 */ 2840 void sdev_evt_send_simple(struct scsi_device *sdev, 2841 enum scsi_device_event evt_type, gfp_t gfpflags) 2842 { 2843 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2844 if (!evt) { 2845 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2846 evt_type); 2847 return; 2848 } 2849 2850 sdev_evt_send(sdev, evt); 2851 } 2852 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2853 2854 /** 2855 * scsi_device_quiesce - Block user issued commands. 2856 * @sdev: scsi device to quiesce. 2857 * 2858 * This works by trying to transition to the SDEV_QUIESCE state 2859 * (which must be a legal transition). When the device is in this 2860 * state, only special requests will be accepted, all others will 2861 * be deferred. Since special requests may also be requeued requests, 2862 * a successful return doesn't guarantee the device will be 2863 * totally quiescent. 2864 * 2865 * Must be called with user context, may sleep. 2866 * 2867 * Returns zero if unsuccessful or an error if not. 2868 */ 2869 int 2870 scsi_device_quiesce(struct scsi_device *sdev) 2871 { 2872 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2873 if (err) 2874 return err; 2875 2876 scsi_run_queue(sdev->request_queue); 2877 while (atomic_read(&sdev->device_busy)) { 2878 msleep_interruptible(200); 2879 scsi_run_queue(sdev->request_queue); 2880 } 2881 return 0; 2882 } 2883 EXPORT_SYMBOL(scsi_device_quiesce); 2884 2885 /** 2886 * scsi_device_resume - Restart user issued commands to a quiesced device. 2887 * @sdev: scsi device to resume. 2888 * 2889 * Moves the device from quiesced back to running and restarts the 2890 * queues. 2891 * 2892 * Must be called with user context, may sleep. 2893 */ 2894 void scsi_device_resume(struct scsi_device *sdev) 2895 { 2896 /* check if the device state was mutated prior to resume, and if 2897 * so assume the state is being managed elsewhere (for example 2898 * device deleted during suspend) 2899 */ 2900 if (sdev->sdev_state != SDEV_QUIESCE || 2901 scsi_device_set_state(sdev, SDEV_RUNNING)) 2902 return; 2903 scsi_run_queue(sdev->request_queue); 2904 } 2905 EXPORT_SYMBOL(scsi_device_resume); 2906 2907 static void 2908 device_quiesce_fn(struct scsi_device *sdev, void *data) 2909 { 2910 scsi_device_quiesce(sdev); 2911 } 2912 2913 void 2914 scsi_target_quiesce(struct scsi_target *starget) 2915 { 2916 starget_for_each_device(starget, NULL, device_quiesce_fn); 2917 } 2918 EXPORT_SYMBOL(scsi_target_quiesce); 2919 2920 static void 2921 device_resume_fn(struct scsi_device *sdev, void *data) 2922 { 2923 scsi_device_resume(sdev); 2924 } 2925 2926 void 2927 scsi_target_resume(struct scsi_target *starget) 2928 { 2929 starget_for_each_device(starget, NULL, device_resume_fn); 2930 } 2931 EXPORT_SYMBOL(scsi_target_resume); 2932 2933 /** 2934 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2935 * @sdev: device to block 2936 * 2937 * Block request made by scsi lld's to temporarily stop all 2938 * scsi commands on the specified device. Called from interrupt 2939 * or normal process context. 2940 * 2941 * Returns zero if successful or error if not 2942 * 2943 * Notes: 2944 * This routine transitions the device to the SDEV_BLOCK state 2945 * (which must be a legal transition). When the device is in this 2946 * state, all commands are deferred until the scsi lld reenables 2947 * the device with scsi_device_unblock or device_block_tmo fires. 2948 */ 2949 int 2950 scsi_internal_device_block(struct scsi_device *sdev) 2951 { 2952 struct request_queue *q = sdev->request_queue; 2953 unsigned long flags; 2954 int err = 0; 2955 2956 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2957 if (err) { 2958 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2959 2960 if (err) 2961 return err; 2962 } 2963 2964 /* 2965 * The device has transitioned to SDEV_BLOCK. Stop the 2966 * block layer from calling the midlayer with this device's 2967 * request queue. 2968 */ 2969 if (q->mq_ops) { 2970 blk_mq_stop_hw_queues(q); 2971 } else { 2972 spin_lock_irqsave(q->queue_lock, flags); 2973 blk_stop_queue(q); 2974 spin_unlock_irqrestore(q->queue_lock, flags); 2975 } 2976 2977 return 0; 2978 } 2979 EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2980 2981 /** 2982 * scsi_internal_device_unblock - resume a device after a block request 2983 * @sdev: device to resume 2984 * @new_state: state to set devices to after unblocking 2985 * 2986 * Called by scsi lld's or the midlayer to restart the device queue 2987 * for the previously suspended scsi device. Called from interrupt or 2988 * normal process context. 2989 * 2990 * Returns zero if successful or error if not. 2991 * 2992 * Notes: 2993 * This routine transitions the device to the SDEV_RUNNING state 2994 * or to one of the offline states (which must be a legal transition) 2995 * allowing the midlayer to goose the queue for this device. 2996 */ 2997 int 2998 scsi_internal_device_unblock(struct scsi_device *sdev, 2999 enum scsi_device_state new_state) 3000 { 3001 struct request_queue *q = sdev->request_queue; 3002 unsigned long flags; 3003 3004 /* 3005 * Try to transition the scsi device to SDEV_RUNNING or one of the 3006 * offlined states and goose the device queue if successful. 3007 */ 3008 if ((sdev->sdev_state == SDEV_BLOCK) || 3009 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) 3010 sdev->sdev_state = new_state; 3011 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { 3012 if (new_state == SDEV_TRANSPORT_OFFLINE || 3013 new_state == SDEV_OFFLINE) 3014 sdev->sdev_state = new_state; 3015 else 3016 sdev->sdev_state = SDEV_CREATED; 3017 } else if (sdev->sdev_state != SDEV_CANCEL && 3018 sdev->sdev_state != SDEV_OFFLINE) 3019 return -EINVAL; 3020 3021 if (q->mq_ops) { 3022 blk_mq_start_stopped_hw_queues(q, false); 3023 } else { 3024 spin_lock_irqsave(q->queue_lock, flags); 3025 blk_start_queue(q); 3026 spin_unlock_irqrestore(q->queue_lock, flags); 3027 } 3028 3029 return 0; 3030 } 3031 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 3032 3033 static void 3034 device_block(struct scsi_device *sdev, void *data) 3035 { 3036 scsi_internal_device_block(sdev); 3037 } 3038 3039 static int 3040 target_block(struct device *dev, void *data) 3041 { 3042 if (scsi_is_target_device(dev)) 3043 starget_for_each_device(to_scsi_target(dev), NULL, 3044 device_block); 3045 return 0; 3046 } 3047 3048 void 3049 scsi_target_block(struct device *dev) 3050 { 3051 if (scsi_is_target_device(dev)) 3052 starget_for_each_device(to_scsi_target(dev), NULL, 3053 device_block); 3054 else 3055 device_for_each_child(dev, NULL, target_block); 3056 } 3057 EXPORT_SYMBOL_GPL(scsi_target_block); 3058 3059 static void 3060 device_unblock(struct scsi_device *sdev, void *data) 3061 { 3062 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); 3063 } 3064 3065 static int 3066 target_unblock(struct device *dev, void *data) 3067 { 3068 if (scsi_is_target_device(dev)) 3069 starget_for_each_device(to_scsi_target(dev), data, 3070 device_unblock); 3071 return 0; 3072 } 3073 3074 void 3075 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) 3076 { 3077 if (scsi_is_target_device(dev)) 3078 starget_for_each_device(to_scsi_target(dev), &new_state, 3079 device_unblock); 3080 else 3081 device_for_each_child(dev, &new_state, target_unblock); 3082 } 3083 EXPORT_SYMBOL_GPL(scsi_target_unblock); 3084 3085 /** 3086 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 3087 * @sgl: scatter-gather list 3088 * @sg_count: number of segments in sg 3089 * @offset: offset in bytes into sg, on return offset into the mapped area 3090 * @len: bytes to map, on return number of bytes mapped 3091 * 3092 * Returns virtual address of the start of the mapped page 3093 */ 3094 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 3095 size_t *offset, size_t *len) 3096 { 3097 int i; 3098 size_t sg_len = 0, len_complete = 0; 3099 struct scatterlist *sg; 3100 struct page *page; 3101 3102 WARN_ON(!irqs_disabled()); 3103 3104 for_each_sg(sgl, sg, sg_count, i) { 3105 len_complete = sg_len; /* Complete sg-entries */ 3106 sg_len += sg->length; 3107 if (sg_len > *offset) 3108 break; 3109 } 3110 3111 if (unlikely(i == sg_count)) { 3112 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 3113 "elements %d\n", 3114 __func__, sg_len, *offset, sg_count); 3115 WARN_ON(1); 3116 return NULL; 3117 } 3118 3119 /* Offset starting from the beginning of first page in this sg-entry */ 3120 *offset = *offset - len_complete + sg->offset; 3121 3122 /* Assumption: contiguous pages can be accessed as "page + i" */ 3123 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 3124 *offset &= ~PAGE_MASK; 3125 3126 /* Bytes in this sg-entry from *offset to the end of the page */ 3127 sg_len = PAGE_SIZE - *offset; 3128 if (*len > sg_len) 3129 *len = sg_len; 3130 3131 return kmap_atomic(page); 3132 } 3133 EXPORT_SYMBOL(scsi_kmap_atomic_sg); 3134 3135 /** 3136 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 3137 * @virt: virtual address to be unmapped 3138 */ 3139 void scsi_kunmap_atomic_sg(void *virt) 3140 { 3141 kunmap_atomic(virt); 3142 } 3143 EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 3144 3145 void sdev_disable_disk_events(struct scsi_device *sdev) 3146 { 3147 atomic_inc(&sdev->disk_events_disable_depth); 3148 } 3149 EXPORT_SYMBOL(sdev_disable_disk_events); 3150 3151 void sdev_enable_disk_events(struct scsi_device *sdev) 3152 { 3153 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) 3154 return; 3155 atomic_dec(&sdev->disk_events_disable_depth); 3156 } 3157 EXPORT_SYMBOL(sdev_enable_disk_events); 3158 3159 /** 3160 * scsi_vpd_lun_id - return a unique device identification 3161 * @sdev: SCSI device 3162 * @id: buffer for the identification 3163 * @id_len: length of the buffer 3164 * 3165 * Copies a unique device identification into @id based 3166 * on the information in the VPD page 0x83 of the device. 3167 * The string will be formatted as a SCSI name string. 3168 * 3169 * Returns the length of the identification or error on failure. 3170 * If the identifier is longer than the supplied buffer the actual 3171 * identifier length is returned and the buffer is not zero-padded. 3172 */ 3173 int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) 3174 { 3175 u8 cur_id_type = 0xff; 3176 u8 cur_id_size = 0; 3177 unsigned char *d, *cur_id_str; 3178 unsigned char __rcu *vpd_pg83; 3179 int id_size = -EINVAL; 3180 3181 rcu_read_lock(); 3182 vpd_pg83 = rcu_dereference(sdev->vpd_pg83); 3183 if (!vpd_pg83) { 3184 rcu_read_unlock(); 3185 return -ENXIO; 3186 } 3187 3188 /* 3189 * Look for the correct descriptor. 3190 * Order of preference for lun descriptor: 3191 * - SCSI name string 3192 * - NAA IEEE Registered Extended 3193 * - EUI-64 based 16-byte 3194 * - EUI-64 based 12-byte 3195 * - NAA IEEE Registered 3196 * - NAA IEEE Extended 3197 * as longer descriptors reduce the likelyhood 3198 * of identification clashes. 3199 */ 3200 3201 /* The id string must be at least 20 bytes + terminating NULL byte */ 3202 if (id_len < 21) { 3203 rcu_read_unlock(); 3204 return -EINVAL; 3205 } 3206 3207 memset(id, 0, id_len); 3208 d = vpd_pg83 + 4; 3209 while (d < vpd_pg83 + sdev->vpd_pg83_len) { 3210 /* Skip designators not referring to the LUN */ 3211 if ((d[1] & 0x30) != 0x00) 3212 goto next_desig; 3213 3214 switch (d[1] & 0xf) { 3215 case 0x2: 3216 /* EUI-64 */ 3217 if (cur_id_size > d[3]) 3218 break; 3219 /* Prefer NAA IEEE Registered Extended */ 3220 if (cur_id_type == 0x3 && 3221 cur_id_size == d[3]) 3222 break; 3223 cur_id_size = d[3]; 3224 cur_id_str = d + 4; 3225 cur_id_type = d[1] & 0xf; 3226 switch (cur_id_size) { 3227 case 8: 3228 id_size = snprintf(id, id_len, 3229 "eui.%8phN", 3230 cur_id_str); 3231 break; 3232 case 12: 3233 id_size = snprintf(id, id_len, 3234 "eui.%12phN", 3235 cur_id_str); 3236 break; 3237 case 16: 3238 id_size = snprintf(id, id_len, 3239 "eui.%16phN", 3240 cur_id_str); 3241 break; 3242 default: 3243 cur_id_size = 0; 3244 break; 3245 } 3246 break; 3247 case 0x3: 3248 /* NAA */ 3249 if (cur_id_size > d[3]) 3250 break; 3251 cur_id_size = d[3]; 3252 cur_id_str = d + 4; 3253 cur_id_type = d[1] & 0xf; 3254 switch (cur_id_size) { 3255 case 8: 3256 id_size = snprintf(id, id_len, 3257 "naa.%8phN", 3258 cur_id_str); 3259 break; 3260 case 16: 3261 id_size = snprintf(id, id_len, 3262 "naa.%16phN", 3263 cur_id_str); 3264 break; 3265 default: 3266 cur_id_size = 0; 3267 break; 3268 } 3269 break; 3270 case 0x8: 3271 /* SCSI name string */ 3272 if (cur_id_size + 4 > d[3]) 3273 break; 3274 /* Prefer others for truncated descriptor */ 3275 if (cur_id_size && d[3] > id_len) 3276 break; 3277 cur_id_size = id_size = d[3]; 3278 cur_id_str = d + 4; 3279 cur_id_type = d[1] & 0xf; 3280 if (cur_id_size >= id_len) 3281 cur_id_size = id_len - 1; 3282 memcpy(id, cur_id_str, cur_id_size); 3283 /* Decrease priority for truncated descriptor */ 3284 if (cur_id_size != id_size) 3285 cur_id_size = 6; 3286 break; 3287 default: 3288 break; 3289 } 3290 next_desig: 3291 d += d[3] + 4; 3292 } 3293 rcu_read_unlock(); 3294 3295 return id_size; 3296 } 3297 EXPORT_SYMBOL(scsi_vpd_lun_id); 3298 3299 /* 3300 * scsi_vpd_tpg_id - return a target port group identifier 3301 * @sdev: SCSI device 3302 * 3303 * Returns the Target Port Group identifier from the information 3304 * froom VPD page 0x83 of the device. 3305 * 3306 * Returns the identifier or error on failure. 3307 */ 3308 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) 3309 { 3310 unsigned char *d; 3311 unsigned char __rcu *vpd_pg83; 3312 int group_id = -EAGAIN, rel_port = -1; 3313 3314 rcu_read_lock(); 3315 vpd_pg83 = rcu_dereference(sdev->vpd_pg83); 3316 if (!vpd_pg83) { 3317 rcu_read_unlock(); 3318 return -ENXIO; 3319 } 3320 3321 d = sdev->vpd_pg83 + 4; 3322 while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) { 3323 switch (d[1] & 0xf) { 3324 case 0x4: 3325 /* Relative target port */ 3326 rel_port = get_unaligned_be16(&d[6]); 3327 break; 3328 case 0x5: 3329 /* Target port group */ 3330 group_id = get_unaligned_be16(&d[6]); 3331 break; 3332 default: 3333 break; 3334 } 3335 d += d[3] + 4; 3336 } 3337 rcu_read_unlock(); 3338 3339 if (group_id >= 0 && rel_id && rel_port != -1) 3340 *rel_id = rel_port; 3341 3342 return group_id; 3343 } 3344 EXPORT_SYMBOL(scsi_vpd_tpg_id); 3345