1 /* 2 * Copyright (C) 1999 Eric Youngdale 3 * Copyright (C) 2014 Christoph Hellwig 4 * 5 * SCSI queueing library. 6 * Initial versions: Eric Youngdale (eric@andante.org). 7 * Based upon conversations with large numbers 8 * of people at Linux Expo. 9 */ 10 11 #include <linux/bio.h> 12 #include <linux/bitops.h> 13 #include <linux/blkdev.h> 14 #include <linux/completion.h> 15 #include <linux/kernel.h> 16 #include <linux/export.h> 17 #include <linux/init.h> 18 #include <linux/pci.h> 19 #include <linux/delay.h> 20 #include <linux/hardirq.h> 21 #include <linux/scatterlist.h> 22 #include <linux/blk-mq.h> 23 #include <linux/ratelimit.h> 24 #include <asm/unaligned.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_cmnd.h> 28 #include <scsi/scsi_dbg.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_driver.h> 31 #include <scsi/scsi_eh.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport.h> /* __scsi_init_queue() */ 34 #include <scsi/scsi_dh.h> 35 36 #include <trace/events/scsi.h> 37 38 #include "scsi_debugfs.h" 39 #include "scsi_priv.h" 40 #include "scsi_logging.h" 41 42 static struct kmem_cache *scsi_sdb_cache; 43 static struct kmem_cache *scsi_sense_cache; 44 static struct kmem_cache *scsi_sense_isadma_cache; 45 static DEFINE_MUTEX(scsi_sense_cache_mutex); 46 47 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); 48 49 static inline struct kmem_cache * 50 scsi_select_sense_cache(bool unchecked_isa_dma) 51 { 52 return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; 53 } 54 55 static void scsi_free_sense_buffer(bool unchecked_isa_dma, 56 unsigned char *sense_buffer) 57 { 58 kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma), 59 sense_buffer); 60 } 61 62 static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma, 63 gfp_t gfp_mask, int numa_node) 64 { 65 return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma), 66 gfp_mask, numa_node); 67 } 68 69 int scsi_init_sense_cache(struct Scsi_Host *shost) 70 { 71 struct kmem_cache *cache; 72 int ret = 0; 73 74 cache = scsi_select_sense_cache(shost->unchecked_isa_dma); 75 if (cache) 76 return 0; 77 78 mutex_lock(&scsi_sense_cache_mutex); 79 if (shost->unchecked_isa_dma) { 80 scsi_sense_isadma_cache = 81 kmem_cache_create("scsi_sense_cache(DMA)", 82 SCSI_SENSE_BUFFERSIZE, 0, 83 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); 84 if (!scsi_sense_isadma_cache) 85 ret = -ENOMEM; 86 } else { 87 scsi_sense_cache = 88 kmem_cache_create_usercopy("scsi_sense_cache", 89 SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, 90 0, SCSI_SENSE_BUFFERSIZE, NULL); 91 if (!scsi_sense_cache) 92 ret = -ENOMEM; 93 } 94 95 mutex_unlock(&scsi_sense_cache_mutex); 96 return ret; 97 } 98 99 /* 100 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 101 * not change behaviour from the previous unplug mechanism, experimentation 102 * may prove this needs changing. 103 */ 104 #define SCSI_QUEUE_DELAY 3 105 106 static void 107 scsi_set_blocked(struct scsi_cmnd *cmd, int reason) 108 { 109 struct Scsi_Host *host = cmd->device->host; 110 struct scsi_device *device = cmd->device; 111 struct scsi_target *starget = scsi_target(device); 112 113 /* 114 * Set the appropriate busy bit for the device/host. 115 * 116 * If the host/device isn't busy, assume that something actually 117 * completed, and that we should be able to queue a command now. 118 * 119 * Note that the prior mid-layer assumption that any host could 120 * always queue at least one command is now broken. The mid-layer 121 * will implement a user specifiable stall (see 122 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 123 * if a command is requeued with no other commands outstanding 124 * either for the device or for the host. 125 */ 126 switch (reason) { 127 case SCSI_MLQUEUE_HOST_BUSY: 128 atomic_set(&host->host_blocked, host->max_host_blocked); 129 break; 130 case SCSI_MLQUEUE_DEVICE_BUSY: 131 case SCSI_MLQUEUE_EH_RETRY: 132 atomic_set(&device->device_blocked, 133 device->max_device_blocked); 134 break; 135 case SCSI_MLQUEUE_TARGET_BUSY: 136 atomic_set(&starget->target_blocked, 137 starget->max_target_blocked); 138 break; 139 } 140 } 141 142 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) 143 { 144 struct scsi_device *sdev = cmd->device; 145 146 if (cmd->request->rq_flags & RQF_DONTPREP) { 147 cmd->request->rq_flags &= ~RQF_DONTPREP; 148 scsi_mq_uninit_cmd(cmd); 149 } else { 150 WARN_ON_ONCE(true); 151 } 152 blk_mq_requeue_request(cmd->request, true); 153 put_device(&sdev->sdev_gendev); 154 } 155 156 /** 157 * __scsi_queue_insert - private queue insertion 158 * @cmd: The SCSI command being requeued 159 * @reason: The reason for the requeue 160 * @unbusy: Whether the queue should be unbusied 161 * 162 * This is a private queue insertion. The public interface 163 * scsi_queue_insert() always assumes the queue should be unbusied 164 * because it's always called before the completion. This function is 165 * for a requeue after completion, which should only occur in this 166 * file. 167 */ 168 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) 169 { 170 struct scsi_device *device = cmd->device; 171 struct request_queue *q = device->request_queue; 172 unsigned long flags; 173 174 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, 175 "Inserting command %p into mlqueue\n", cmd)); 176 177 scsi_set_blocked(cmd, reason); 178 179 /* 180 * Decrement the counters, since these commands are no longer 181 * active on the host/device. 182 */ 183 if (unbusy) 184 scsi_device_unbusy(device); 185 186 /* 187 * Requeue this command. It will go before all other commands 188 * that are already in the queue. Schedule requeue work under 189 * lock such that the kblockd_schedule_work() call happens 190 * before blk_cleanup_queue() finishes. 191 */ 192 cmd->result = 0; 193 if (q->mq_ops) { 194 /* 195 * Before a SCSI command is dispatched, 196 * get_device(&sdev->sdev_gendev) is called and the host, 197 * target and device busy counters are increased. Since 198 * requeuing a request causes these actions to be repeated and 199 * since scsi_device_unbusy() has already been called, 200 * put_device(&device->sdev_gendev) must still be called. Call 201 * put_device() after blk_mq_requeue_request() to avoid that 202 * removal of the SCSI device can start before requeueing has 203 * happened. 204 */ 205 blk_mq_requeue_request(cmd->request, true); 206 put_device(&device->sdev_gendev); 207 return; 208 } 209 spin_lock_irqsave(q->queue_lock, flags); 210 blk_requeue_request(q, cmd->request); 211 kblockd_schedule_work(&device->requeue_work); 212 spin_unlock_irqrestore(q->queue_lock, flags); 213 } 214 215 /* 216 * Function: scsi_queue_insert() 217 * 218 * Purpose: Insert a command in the midlevel queue. 219 * 220 * Arguments: cmd - command that we are adding to queue. 221 * reason - why we are inserting command to queue. 222 * 223 * Lock status: Assumed that lock is not held upon entry. 224 * 225 * Returns: Nothing. 226 * 227 * Notes: We do this for one of two cases. Either the host is busy 228 * and it cannot accept any more commands for the time being, 229 * or the device returned QUEUE_FULL and can accept no more 230 * commands. 231 * Notes: This could be called either from an interrupt context or a 232 * normal process context. 233 */ 234 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 235 { 236 __scsi_queue_insert(cmd, reason, true); 237 } 238 239 240 /** 241 * scsi_execute - insert request and wait for the result 242 * @sdev: scsi device 243 * @cmd: scsi command 244 * @data_direction: data direction 245 * @buffer: data buffer 246 * @bufflen: len of buffer 247 * @sense: optional sense buffer 248 * @sshdr: optional decoded sense header 249 * @timeout: request timeout in seconds 250 * @retries: number of times to retry request 251 * @flags: flags for ->cmd_flags 252 * @rq_flags: flags for ->rq_flags 253 * @resid: optional residual length 254 * 255 * Returns the scsi_cmnd result field if a command was executed, or a negative 256 * Linux error code if we didn't get that far. 257 */ 258 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 259 int data_direction, void *buffer, unsigned bufflen, 260 unsigned char *sense, struct scsi_sense_hdr *sshdr, 261 int timeout, int retries, u64 flags, req_flags_t rq_flags, 262 int *resid) 263 { 264 struct request *req; 265 struct scsi_request *rq; 266 int ret = DRIVER_ERROR << 24; 267 268 req = blk_get_request_flags(sdev->request_queue, 269 data_direction == DMA_TO_DEVICE ? 270 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT); 271 if (IS_ERR(req)) 272 return ret; 273 rq = scsi_req(req); 274 275 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 276 buffer, bufflen, __GFP_RECLAIM)) 277 goto out; 278 279 rq->cmd_len = COMMAND_SIZE(cmd[0]); 280 memcpy(rq->cmd, cmd, rq->cmd_len); 281 rq->retries = retries; 282 req->timeout = timeout; 283 req->cmd_flags |= flags; 284 req->rq_flags |= rq_flags | RQF_QUIET; 285 286 /* 287 * head injection *required* here otherwise quiesce won't work 288 */ 289 blk_execute_rq(req->q, NULL, req, 1); 290 291 /* 292 * Some devices (USB mass-storage in particular) may transfer 293 * garbage data together with a residue indicating that the data 294 * is invalid. Prevent the garbage from being misinterpreted 295 * and prevent security leaks by zeroing out the excess data. 296 */ 297 if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen)) 298 memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len); 299 300 if (resid) 301 *resid = rq->resid_len; 302 if (sense && rq->sense_len) 303 memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); 304 if (sshdr) 305 scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); 306 ret = rq->result; 307 out: 308 blk_put_request(req); 309 310 return ret; 311 } 312 EXPORT_SYMBOL(scsi_execute); 313 314 /* 315 * Function: scsi_init_cmd_errh() 316 * 317 * Purpose: Initialize cmd fields related to error handling. 318 * 319 * Arguments: cmd - command that is ready to be queued. 320 * 321 * Notes: This function has the job of initializing a number of 322 * fields related to error handling. Typically this will 323 * be called once for each command, as required. 324 */ 325 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 326 { 327 cmd->serial_number = 0; 328 scsi_set_resid(cmd, 0); 329 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 330 if (cmd->cmd_len == 0) 331 cmd->cmd_len = scsi_command_size(cmd->cmnd); 332 } 333 334 /* 335 * Decrement the host_busy counter and wake up the error handler if necessary. 336 * Avoid as follows that the error handler is not woken up if shost->host_busy 337 * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination 338 * with an RCU read lock in this function to ensure that this function in its 339 * entirety either finishes before scsi_eh_scmd_add() increases the 340 * host_failed counter or that it notices the shost state change made by 341 * scsi_eh_scmd_add(). 342 */ 343 static void scsi_dec_host_busy(struct Scsi_Host *shost) 344 { 345 unsigned long flags; 346 347 rcu_read_lock(); 348 atomic_dec(&shost->host_busy); 349 if (unlikely(scsi_host_in_recovery(shost))) { 350 spin_lock_irqsave(shost->host_lock, flags); 351 if (shost->host_failed || shost->host_eh_scheduled) 352 scsi_eh_wakeup(shost); 353 spin_unlock_irqrestore(shost->host_lock, flags); 354 } 355 rcu_read_unlock(); 356 } 357 358 void scsi_device_unbusy(struct scsi_device *sdev) 359 { 360 struct Scsi_Host *shost = sdev->host; 361 struct scsi_target *starget = scsi_target(sdev); 362 363 scsi_dec_host_busy(shost); 364 365 if (starget->can_queue > 0) 366 atomic_dec(&starget->target_busy); 367 368 atomic_dec(&sdev->device_busy); 369 } 370 371 static void scsi_kick_queue(struct request_queue *q) 372 { 373 if (q->mq_ops) 374 blk_mq_start_hw_queues(q); 375 else 376 blk_run_queue(q); 377 } 378 379 /* 380 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 381 * and call blk_run_queue for all the scsi_devices on the target - 382 * including current_sdev first. 383 * 384 * Called with *no* scsi locks held. 385 */ 386 static void scsi_single_lun_run(struct scsi_device *current_sdev) 387 { 388 struct Scsi_Host *shost = current_sdev->host; 389 struct scsi_device *sdev, *tmp; 390 struct scsi_target *starget = scsi_target(current_sdev); 391 unsigned long flags; 392 393 spin_lock_irqsave(shost->host_lock, flags); 394 starget->starget_sdev_user = NULL; 395 spin_unlock_irqrestore(shost->host_lock, flags); 396 397 /* 398 * Call blk_run_queue for all LUNs on the target, starting with 399 * current_sdev. We race with others (to set starget_sdev_user), 400 * but in most cases, we will be first. Ideally, each LU on the 401 * target would get some limited time or requests on the target. 402 */ 403 scsi_kick_queue(current_sdev->request_queue); 404 405 spin_lock_irqsave(shost->host_lock, flags); 406 if (starget->starget_sdev_user) 407 goto out; 408 list_for_each_entry_safe(sdev, tmp, &starget->devices, 409 same_target_siblings) { 410 if (sdev == current_sdev) 411 continue; 412 if (scsi_device_get(sdev)) 413 continue; 414 415 spin_unlock_irqrestore(shost->host_lock, flags); 416 scsi_kick_queue(sdev->request_queue); 417 spin_lock_irqsave(shost->host_lock, flags); 418 419 scsi_device_put(sdev); 420 } 421 out: 422 spin_unlock_irqrestore(shost->host_lock, flags); 423 } 424 425 static inline bool scsi_device_is_busy(struct scsi_device *sdev) 426 { 427 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) 428 return true; 429 if (atomic_read(&sdev->device_blocked) > 0) 430 return true; 431 return false; 432 } 433 434 static inline bool scsi_target_is_busy(struct scsi_target *starget) 435 { 436 if (starget->can_queue > 0) { 437 if (atomic_read(&starget->target_busy) >= starget->can_queue) 438 return true; 439 if (atomic_read(&starget->target_blocked) > 0) 440 return true; 441 } 442 return false; 443 } 444 445 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 446 { 447 if (shost->can_queue > 0 && 448 atomic_read(&shost->host_busy) >= shost->can_queue) 449 return true; 450 if (atomic_read(&shost->host_blocked) > 0) 451 return true; 452 if (shost->host_self_blocked) 453 return true; 454 return false; 455 } 456 457 static void scsi_starved_list_run(struct Scsi_Host *shost) 458 { 459 LIST_HEAD(starved_list); 460 struct scsi_device *sdev; 461 unsigned long flags; 462 463 spin_lock_irqsave(shost->host_lock, flags); 464 list_splice_init(&shost->starved_list, &starved_list); 465 466 while (!list_empty(&starved_list)) { 467 struct request_queue *slq; 468 469 /* 470 * As long as shost is accepting commands and we have 471 * starved queues, call blk_run_queue. scsi_request_fn 472 * drops the queue_lock and can add us back to the 473 * starved_list. 474 * 475 * host_lock protects the starved_list and starved_entry. 476 * scsi_request_fn must get the host_lock before checking 477 * or modifying starved_list or starved_entry. 478 */ 479 if (scsi_host_is_busy(shost)) 480 break; 481 482 sdev = list_entry(starved_list.next, 483 struct scsi_device, starved_entry); 484 list_del_init(&sdev->starved_entry); 485 if (scsi_target_is_busy(scsi_target(sdev))) { 486 list_move_tail(&sdev->starved_entry, 487 &shost->starved_list); 488 continue; 489 } 490 491 /* 492 * Once we drop the host lock, a racing scsi_remove_device() 493 * call may remove the sdev from the starved list and destroy 494 * it and the queue. Mitigate by taking a reference to the 495 * queue and never touching the sdev again after we drop the 496 * host lock. Note: if __scsi_remove_device() invokes 497 * blk_cleanup_queue() before the queue is run from this 498 * function then blk_run_queue() will return immediately since 499 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. 500 */ 501 slq = sdev->request_queue; 502 if (!blk_get_queue(slq)) 503 continue; 504 spin_unlock_irqrestore(shost->host_lock, flags); 505 506 scsi_kick_queue(slq); 507 blk_put_queue(slq); 508 509 spin_lock_irqsave(shost->host_lock, flags); 510 } 511 /* put any unprocessed entries back */ 512 list_splice(&starved_list, &shost->starved_list); 513 spin_unlock_irqrestore(shost->host_lock, flags); 514 } 515 516 /* 517 * Function: scsi_run_queue() 518 * 519 * Purpose: Select a proper request queue to serve next 520 * 521 * Arguments: q - last request's queue 522 * 523 * Returns: Nothing 524 * 525 * Notes: The previous command was completely finished, start 526 * a new one if possible. 527 */ 528 static void scsi_run_queue(struct request_queue *q) 529 { 530 struct scsi_device *sdev = q->queuedata; 531 532 if (scsi_target(sdev)->single_lun) 533 scsi_single_lun_run(sdev); 534 if (!list_empty(&sdev->host->starved_list)) 535 scsi_starved_list_run(sdev->host); 536 537 if (q->mq_ops) 538 blk_mq_run_hw_queues(q, false); 539 else 540 blk_run_queue(q); 541 } 542 543 void scsi_requeue_run_queue(struct work_struct *work) 544 { 545 struct scsi_device *sdev; 546 struct request_queue *q; 547 548 sdev = container_of(work, struct scsi_device, requeue_work); 549 q = sdev->request_queue; 550 scsi_run_queue(q); 551 } 552 553 /* 554 * Function: scsi_requeue_command() 555 * 556 * Purpose: Handle post-processing of completed commands. 557 * 558 * Arguments: q - queue to operate on 559 * cmd - command that may need to be requeued. 560 * 561 * Returns: Nothing 562 * 563 * Notes: After command completion, there may be blocks left 564 * over which weren't finished by the previous command 565 * this can be for a number of reasons - the main one is 566 * I/O errors in the middle of the request, in which case 567 * we need to request the blocks that come after the bad 568 * sector. 569 * Notes: Upon return, cmd is a stale pointer. 570 */ 571 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 572 { 573 struct scsi_device *sdev = cmd->device; 574 struct request *req = cmd->request; 575 unsigned long flags; 576 577 spin_lock_irqsave(q->queue_lock, flags); 578 blk_unprep_request(req); 579 req->special = NULL; 580 scsi_put_command(cmd); 581 blk_requeue_request(q, req); 582 spin_unlock_irqrestore(q->queue_lock, flags); 583 584 scsi_run_queue(q); 585 586 put_device(&sdev->sdev_gendev); 587 } 588 589 void scsi_run_host_queues(struct Scsi_Host *shost) 590 { 591 struct scsi_device *sdev; 592 593 shost_for_each_device(sdev, shost) 594 scsi_run_queue(sdev->request_queue); 595 } 596 597 static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 598 { 599 if (!blk_rq_is_passthrough(cmd->request)) { 600 struct scsi_driver *drv = scsi_cmd_to_driver(cmd); 601 602 if (drv->uninit_command) 603 drv->uninit_command(cmd); 604 } 605 } 606 607 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) 608 { 609 struct scsi_data_buffer *sdb; 610 611 if (cmd->sdb.table.nents) 612 sg_free_table_chained(&cmd->sdb.table, true); 613 if (cmd->request->next_rq) { 614 sdb = cmd->request->next_rq->special; 615 if (sdb) 616 sg_free_table_chained(&sdb->table, true); 617 } 618 if (scsi_prot_sg_count(cmd)) 619 sg_free_table_chained(&cmd->prot_sdb->table, true); 620 } 621 622 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) 623 { 624 scsi_mq_free_sgtables(cmd); 625 scsi_uninit_cmd(cmd); 626 scsi_del_cmd_from_list(cmd); 627 } 628 629 /* 630 * Function: scsi_release_buffers() 631 * 632 * Purpose: Free resources allocate for a scsi_command. 633 * 634 * Arguments: cmd - command that we are bailing. 635 * 636 * Lock status: Assumed that no lock is held upon entry. 637 * 638 * Returns: Nothing 639 * 640 * Notes: In the event that an upper level driver rejects a 641 * command, we must release resources allocated during 642 * the __init_io() function. Primarily this would involve 643 * the scatter-gather table. 644 */ 645 static void scsi_release_buffers(struct scsi_cmnd *cmd) 646 { 647 if (cmd->sdb.table.nents) 648 sg_free_table_chained(&cmd->sdb.table, false); 649 650 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 651 652 if (scsi_prot_sg_count(cmd)) 653 sg_free_table_chained(&cmd->prot_sdb->table, false); 654 } 655 656 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) 657 { 658 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; 659 660 sg_free_table_chained(&bidi_sdb->table, false); 661 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 662 cmd->request->next_rq->special = NULL; 663 } 664 665 static bool scsi_end_request(struct request *req, blk_status_t error, 666 unsigned int bytes, unsigned int bidi_bytes) 667 { 668 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 669 struct scsi_device *sdev = cmd->device; 670 struct request_queue *q = sdev->request_queue; 671 672 if (blk_update_request(req, error, bytes)) 673 return true; 674 675 /* Bidi request must be completed as a whole */ 676 if (unlikely(bidi_bytes) && 677 blk_update_request(req->next_rq, error, bidi_bytes)) 678 return true; 679 680 if (blk_queue_add_random(q)) 681 add_disk_randomness(req->rq_disk); 682 683 if (!blk_rq_is_scsi(req)) { 684 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); 685 cmd->flags &= ~SCMD_INITIALIZED; 686 destroy_rcu_head(&cmd->rcu); 687 } 688 689 if (req->mq_ctx) { 690 /* 691 * In the MQ case the command gets freed by __blk_mq_end_request, 692 * so we have to do all cleanup that depends on it earlier. 693 * 694 * We also can't kick the queues from irq context, so we 695 * will have to defer it to a workqueue. 696 */ 697 scsi_mq_uninit_cmd(cmd); 698 699 __blk_mq_end_request(req, error); 700 701 if (scsi_target(sdev)->single_lun || 702 !list_empty(&sdev->host->starved_list)) 703 kblockd_schedule_work(&sdev->requeue_work); 704 else 705 blk_mq_run_hw_queues(q, true); 706 } else { 707 unsigned long flags; 708 709 if (bidi_bytes) 710 scsi_release_bidi_buffers(cmd); 711 scsi_release_buffers(cmd); 712 scsi_put_command(cmd); 713 714 spin_lock_irqsave(q->queue_lock, flags); 715 blk_finish_request(req, error); 716 spin_unlock_irqrestore(q->queue_lock, flags); 717 718 scsi_run_queue(q); 719 } 720 721 put_device(&sdev->sdev_gendev); 722 return false; 723 } 724 725 /** 726 * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t 727 * @cmd: SCSI command 728 * @result: scsi error code 729 * 730 * Translate a SCSI result code into a blk_status_t value. May reset the host 731 * byte of @cmd->result. 732 */ 733 static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) 734 { 735 switch (host_byte(result)) { 736 case DID_OK: 737 /* 738 * Also check the other bytes than the status byte in result 739 * to handle the case when a SCSI LLD sets result to 740 * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION. 741 */ 742 if (scsi_status_is_good(result) && (result & ~0xff) == 0) 743 return BLK_STS_OK; 744 return BLK_STS_IOERR; 745 case DID_TRANSPORT_FAILFAST: 746 return BLK_STS_TRANSPORT; 747 case DID_TARGET_FAILURE: 748 set_host_byte(cmd, DID_OK); 749 return BLK_STS_TARGET; 750 case DID_NEXUS_FAILURE: 751 return BLK_STS_NEXUS; 752 case DID_ALLOC_FAILURE: 753 set_host_byte(cmd, DID_OK); 754 return BLK_STS_NOSPC; 755 case DID_MEDIUM_ERROR: 756 set_host_byte(cmd, DID_OK); 757 return BLK_STS_MEDIUM; 758 default: 759 return BLK_STS_IOERR; 760 } 761 } 762 763 /* 764 * Function: scsi_io_completion() 765 * 766 * Purpose: Completion processing for block device I/O requests. 767 * 768 * Arguments: cmd - command that is finished. 769 * 770 * Lock status: Assumed that no lock is held upon entry. 771 * 772 * Returns: Nothing 773 * 774 * Notes: We will finish off the specified number of sectors. If we 775 * are done, the command block will be released and the queue 776 * function will be goosed. If we are not done then we have to 777 * figure out what to do next: 778 * 779 * a) We can call scsi_requeue_command(). The request 780 * will be unprepared and put back on the queue. Then 781 * a new command will be created for it. This should 782 * be used if we made forward progress, or if we want 783 * to switch from READ(10) to READ(6) for example. 784 * 785 * b) We can call __scsi_queue_insert(). The request will 786 * be put back on the queue and retried using the same 787 * command as before, possibly after a delay. 788 * 789 * c) We can call scsi_end_request() with -EIO to fail 790 * the remainder of the request. 791 */ 792 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 793 { 794 int result = cmd->result; 795 struct request_queue *q = cmd->device->request_queue; 796 struct request *req = cmd->request; 797 blk_status_t error = BLK_STS_OK; 798 struct scsi_sense_hdr sshdr; 799 bool sense_valid = false; 800 int sense_deferred = 0, level = 0; 801 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 802 ACTION_DELAYED_RETRY} action; 803 unsigned long wait_for = (cmd->allowed + 1) * req->timeout; 804 805 if (result) { 806 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 807 if (sense_valid) 808 sense_deferred = scsi_sense_is_deferred(&sshdr); 809 } 810 811 if (blk_rq_is_passthrough(req)) { 812 if (result) { 813 if (sense_valid) { 814 /* 815 * SG_IO wants current and deferred errors 816 */ 817 scsi_req(req)->sense_len = 818 min(8 + cmd->sense_buffer[7], 819 SCSI_SENSE_BUFFERSIZE); 820 } 821 if (!sense_deferred) 822 error = scsi_result_to_blk_status(cmd, result); 823 } 824 /* 825 * scsi_result_to_blk_status may have reset the host_byte 826 */ 827 scsi_req(req)->result = cmd->result; 828 scsi_req(req)->resid_len = scsi_get_resid(cmd); 829 830 if (scsi_bidi_cmnd(cmd)) { 831 /* 832 * Bidi commands Must be complete as a whole, 833 * both sides at once. 834 */ 835 scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; 836 if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req), 837 blk_rq_bytes(req->next_rq))) 838 BUG(); 839 return; 840 } 841 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 842 /* 843 * Flush commands do not transfers any data, and thus cannot use 844 * good_bytes != blk_rq_bytes(req) as the signal for an error. 845 * This sets the error explicitly for the problem case. 846 */ 847 error = scsi_result_to_blk_status(cmd, result); 848 } 849 850 /* no bidi support for !blk_rq_is_passthrough yet */ 851 BUG_ON(blk_bidi_rq(req)); 852 853 /* 854 * Next deal with any sectors which we were able to correctly 855 * handle. 856 */ 857 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, 858 "%u sectors total, %d bytes done.\n", 859 blk_rq_sectors(req), good_bytes)); 860 861 /* 862 * Recovered errors need reporting, but they're always treated as 863 * success, so fiddle the result code here. For passthrough requests 864 * we already took a copy of the original into sreq->result which 865 * is what gets returned to the user 866 */ 867 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 868 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 869 * print since caller wants ATA registers. Only occurs on 870 * SCSI ATA PASS_THROUGH commands when CK_COND=1 871 */ 872 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 873 ; 874 else if (!(req->rq_flags & RQF_QUIET)) 875 scsi_print_sense(cmd); 876 result = 0; 877 /* for passthrough error may be set */ 878 error = BLK_STS_OK; 879 } 880 /* 881 * Another corner case: the SCSI status byte is non-zero but 'good'. 882 * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when 883 * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD 884 * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related 885 * intermediate statuses (both obsolete in SAM-4) as good. 886 */ 887 if (status_byte(result) && scsi_status_is_good(result)) { 888 result = 0; 889 error = BLK_STS_OK; 890 } 891 892 /* 893 * special case: failed zero length commands always need to 894 * drop down into the retry code. Otherwise, if we finished 895 * all bytes in the request we are done now. 896 */ 897 if (!(blk_rq_bytes(req) == 0 && error) && 898 !scsi_end_request(req, error, good_bytes, 0)) 899 return; 900 901 /* 902 * Kill remainder if no retrys. 903 */ 904 if (error && scsi_noretry_cmd(cmd)) { 905 if (scsi_end_request(req, error, blk_rq_bytes(req), 0)) 906 BUG(); 907 return; 908 } 909 910 /* 911 * If there had been no error, but we have leftover bytes in the 912 * requeues just queue the command up again. 913 */ 914 if (result == 0) 915 goto requeue; 916 917 error = scsi_result_to_blk_status(cmd, result); 918 919 if (host_byte(result) == DID_RESET) { 920 /* Third party bus reset or reset for error recovery 921 * reasons. Just retry the command and see what 922 * happens. 923 */ 924 action = ACTION_RETRY; 925 } else if (sense_valid && !sense_deferred) { 926 switch (sshdr.sense_key) { 927 case UNIT_ATTENTION: 928 if (cmd->device->removable) { 929 /* Detected disc change. Set a bit 930 * and quietly refuse further access. 931 */ 932 cmd->device->changed = 1; 933 action = ACTION_FAIL; 934 } else { 935 /* Must have been a power glitch, or a 936 * bus reset. Could not have been a 937 * media change, so we just retry the 938 * command and see what happens. 939 */ 940 action = ACTION_RETRY; 941 } 942 break; 943 case ILLEGAL_REQUEST: 944 /* If we had an ILLEGAL REQUEST returned, then 945 * we may have performed an unsupported 946 * command. The only thing this should be 947 * would be a ten byte read where only a six 948 * byte read was supported. Also, on a system 949 * where READ CAPACITY failed, we may have 950 * read past the end of the disk. 951 */ 952 if ((cmd->device->use_10_for_rw && 953 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 954 (cmd->cmnd[0] == READ_10 || 955 cmd->cmnd[0] == WRITE_10)) { 956 /* This will issue a new 6-byte command. */ 957 cmd->device->use_10_for_rw = 0; 958 action = ACTION_REPREP; 959 } else if (sshdr.asc == 0x10) /* DIX */ { 960 action = ACTION_FAIL; 961 error = BLK_STS_PROTECTION; 962 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ 963 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { 964 action = ACTION_FAIL; 965 error = BLK_STS_TARGET; 966 } else 967 action = ACTION_FAIL; 968 break; 969 case ABORTED_COMMAND: 970 action = ACTION_FAIL; 971 if (sshdr.asc == 0x10) /* DIF */ 972 error = BLK_STS_PROTECTION; 973 break; 974 case NOT_READY: 975 /* If the device is in the process of becoming 976 * ready, or has a temporary blockage, retry. 977 */ 978 if (sshdr.asc == 0x04) { 979 switch (sshdr.ascq) { 980 case 0x01: /* becoming ready */ 981 case 0x04: /* format in progress */ 982 case 0x05: /* rebuild in progress */ 983 case 0x06: /* recalculation in progress */ 984 case 0x07: /* operation in progress */ 985 case 0x08: /* Long write in progress */ 986 case 0x09: /* self test in progress */ 987 case 0x14: /* space allocation in progress */ 988 action = ACTION_DELAYED_RETRY; 989 break; 990 default: 991 action = ACTION_FAIL; 992 break; 993 } 994 } else 995 action = ACTION_FAIL; 996 break; 997 case VOLUME_OVERFLOW: 998 /* See SSC3rXX or current. */ 999 action = ACTION_FAIL; 1000 break; 1001 default: 1002 action = ACTION_FAIL; 1003 break; 1004 } 1005 } else 1006 action = ACTION_FAIL; 1007 1008 if (action != ACTION_FAIL && 1009 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) 1010 action = ACTION_FAIL; 1011 1012 switch (action) { 1013 case ACTION_FAIL: 1014 /* Give up and fail the remainder of the request */ 1015 if (!(req->rq_flags & RQF_QUIET)) { 1016 static DEFINE_RATELIMIT_STATE(_rs, 1017 DEFAULT_RATELIMIT_INTERVAL, 1018 DEFAULT_RATELIMIT_BURST); 1019 1020 if (unlikely(scsi_logging_level)) 1021 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 1022 SCSI_LOG_MLCOMPLETE_BITS); 1023 1024 /* 1025 * if logging is enabled the failure will be printed 1026 * in scsi_log_completion(), so avoid duplicate messages 1027 */ 1028 if (!level && __ratelimit(&_rs)) { 1029 scsi_print_result(cmd, NULL, FAILED); 1030 if (driver_byte(result) & DRIVER_SENSE) 1031 scsi_print_sense(cmd); 1032 scsi_print_command(cmd); 1033 } 1034 } 1035 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) 1036 return; 1037 /*FALLTHRU*/ 1038 case ACTION_REPREP: 1039 requeue: 1040 /* Unprep the request and put it back at the head of the queue. 1041 * A new command will be prepared and issued. 1042 */ 1043 if (q->mq_ops) { 1044 scsi_mq_requeue_cmd(cmd); 1045 } else { 1046 scsi_release_buffers(cmd); 1047 scsi_requeue_command(q, cmd); 1048 } 1049 break; 1050 case ACTION_RETRY: 1051 /* Retry the same command immediately */ 1052 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false); 1053 break; 1054 case ACTION_DELAYED_RETRY: 1055 /* Retry the same command after a delay */ 1056 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false); 1057 break; 1058 } 1059 } 1060 1061 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) 1062 { 1063 int count; 1064 1065 /* 1066 * If sg table allocation fails, requeue request later. 1067 */ 1068 if (unlikely(sg_alloc_table_chained(&sdb->table, 1069 blk_rq_nr_phys_segments(req), sdb->table.sgl))) 1070 return BLKPREP_DEFER; 1071 1072 /* 1073 * Next, walk the list, and fill in the addresses and sizes of 1074 * each segment. 1075 */ 1076 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1077 BUG_ON(count > sdb->table.nents); 1078 sdb->table.nents = count; 1079 sdb->length = blk_rq_payload_bytes(req); 1080 return BLKPREP_OK; 1081 } 1082 1083 /* 1084 * Function: scsi_init_io() 1085 * 1086 * Purpose: SCSI I/O initialize function. 1087 * 1088 * Arguments: cmd - Command descriptor we wish to initialize 1089 * 1090 * Returns: 0 on success 1091 * BLKPREP_DEFER if the failure is retryable 1092 * BLKPREP_KILL if the failure is fatal 1093 */ 1094 int scsi_init_io(struct scsi_cmnd *cmd) 1095 { 1096 struct scsi_device *sdev = cmd->device; 1097 struct request *rq = cmd->request; 1098 bool is_mq = (rq->mq_ctx != NULL); 1099 int error = BLKPREP_KILL; 1100 1101 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq))) 1102 goto err_exit; 1103 1104 error = scsi_init_sgtable(rq, &cmd->sdb); 1105 if (error) 1106 goto err_exit; 1107 1108 if (blk_bidi_rq(rq)) { 1109 if (!rq->q->mq_ops) { 1110 struct scsi_data_buffer *bidi_sdb = 1111 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC); 1112 if (!bidi_sdb) { 1113 error = BLKPREP_DEFER; 1114 goto err_exit; 1115 } 1116 1117 rq->next_rq->special = bidi_sdb; 1118 } 1119 1120 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); 1121 if (error) 1122 goto err_exit; 1123 } 1124 1125 if (blk_integrity_rq(rq)) { 1126 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1127 int ivecs, count; 1128 1129 if (prot_sdb == NULL) { 1130 /* 1131 * This can happen if someone (e.g. multipath) 1132 * queues a command to a device on an adapter 1133 * that does not support DIX. 1134 */ 1135 WARN_ON_ONCE(1); 1136 error = BLKPREP_KILL; 1137 goto err_exit; 1138 } 1139 1140 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1141 1142 if (sg_alloc_table_chained(&prot_sdb->table, ivecs, 1143 prot_sdb->table.sgl)) { 1144 error = BLKPREP_DEFER; 1145 goto err_exit; 1146 } 1147 1148 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1149 prot_sdb->table.sgl); 1150 BUG_ON(unlikely(count > ivecs)); 1151 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1152 1153 cmd->prot_sdb = prot_sdb; 1154 cmd->prot_sdb->table.nents = count; 1155 } 1156 1157 return BLKPREP_OK; 1158 err_exit: 1159 if (is_mq) { 1160 scsi_mq_free_sgtables(cmd); 1161 } else { 1162 scsi_release_buffers(cmd); 1163 cmd->request->special = NULL; 1164 scsi_put_command(cmd); 1165 put_device(&sdev->sdev_gendev); 1166 } 1167 return error; 1168 } 1169 EXPORT_SYMBOL(scsi_init_io); 1170 1171 /** 1172 * scsi_initialize_rq - initialize struct scsi_cmnd partially 1173 * @rq: Request associated with the SCSI command to be initialized. 1174 * 1175 * This function initializes the members of struct scsi_cmnd that must be 1176 * initialized before request processing starts and that won't be 1177 * reinitialized if a SCSI command is requeued. 1178 * 1179 * Called from inside blk_get_request() for pass-through requests and from 1180 * inside scsi_init_command() for filesystem requests. 1181 */ 1182 static void scsi_initialize_rq(struct request *rq) 1183 { 1184 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 1185 1186 scsi_req_init(&cmd->req); 1187 init_rcu_head(&cmd->rcu); 1188 cmd->jiffies_at_alloc = jiffies; 1189 cmd->retries = 0; 1190 } 1191 1192 /* Add a command to the list used by the aacraid and dpt_i2o drivers */ 1193 void scsi_add_cmd_to_list(struct scsi_cmnd *cmd) 1194 { 1195 struct scsi_device *sdev = cmd->device; 1196 struct Scsi_Host *shost = sdev->host; 1197 unsigned long flags; 1198 1199 if (shost->use_cmd_list) { 1200 spin_lock_irqsave(&sdev->list_lock, flags); 1201 list_add_tail(&cmd->list, &sdev->cmd_list); 1202 spin_unlock_irqrestore(&sdev->list_lock, flags); 1203 } 1204 } 1205 1206 /* Remove a command from the list used by the aacraid and dpt_i2o drivers */ 1207 void scsi_del_cmd_from_list(struct scsi_cmnd *cmd) 1208 { 1209 struct scsi_device *sdev = cmd->device; 1210 struct Scsi_Host *shost = sdev->host; 1211 unsigned long flags; 1212 1213 if (shost->use_cmd_list) { 1214 spin_lock_irqsave(&sdev->list_lock, flags); 1215 BUG_ON(list_empty(&cmd->list)); 1216 list_del_init(&cmd->list); 1217 spin_unlock_irqrestore(&sdev->list_lock, flags); 1218 } 1219 } 1220 1221 /* Called after a request has been started. */ 1222 void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) 1223 { 1224 void *buf = cmd->sense_buffer; 1225 void *prot = cmd->prot_sdb; 1226 struct request *rq = blk_mq_rq_from_pdu(cmd); 1227 unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS; 1228 unsigned long jiffies_at_alloc; 1229 int retries; 1230 1231 if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { 1232 flags |= SCMD_INITIALIZED; 1233 scsi_initialize_rq(rq); 1234 } 1235 1236 jiffies_at_alloc = cmd->jiffies_at_alloc; 1237 retries = cmd->retries; 1238 /* zero out the cmd, except for the embedded scsi_request */ 1239 memset((char *)cmd + sizeof(cmd->req), 0, 1240 sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size); 1241 1242 cmd->device = dev; 1243 cmd->sense_buffer = buf; 1244 cmd->prot_sdb = prot; 1245 cmd->flags = flags; 1246 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); 1247 cmd->jiffies_at_alloc = jiffies_at_alloc; 1248 cmd->retries = retries; 1249 1250 scsi_add_cmd_to_list(cmd); 1251 } 1252 1253 static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) 1254 { 1255 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1256 1257 /* 1258 * Passthrough requests may transfer data, in which case they must 1259 * a bio attached to them. Or they might contain a SCSI command 1260 * that does not transfer data, in which case they may optionally 1261 * submit a request without an attached bio. 1262 */ 1263 if (req->bio) { 1264 int ret = scsi_init_io(cmd); 1265 if (unlikely(ret)) 1266 return ret; 1267 } else { 1268 BUG_ON(blk_rq_bytes(req)); 1269 1270 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1271 } 1272 1273 cmd->cmd_len = scsi_req(req)->cmd_len; 1274 cmd->cmnd = scsi_req(req)->cmd; 1275 cmd->transfersize = blk_rq_bytes(req); 1276 cmd->allowed = scsi_req(req)->retries; 1277 return BLKPREP_OK; 1278 } 1279 1280 /* 1281 * Setup a normal block command. These are simple request from filesystems 1282 * that still need to be translated to SCSI CDBs from the ULD. 1283 */ 1284 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1285 { 1286 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1287 1288 if (unlikely(sdev->handler && sdev->handler->prep_fn)) { 1289 int ret = sdev->handler->prep_fn(sdev, req); 1290 if (ret != BLKPREP_OK) 1291 return ret; 1292 } 1293 1294 cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; 1295 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1296 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1297 } 1298 1299 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) 1300 { 1301 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1302 1303 if (!blk_rq_bytes(req)) 1304 cmd->sc_data_direction = DMA_NONE; 1305 else if (rq_data_dir(req) == WRITE) 1306 cmd->sc_data_direction = DMA_TO_DEVICE; 1307 else 1308 cmd->sc_data_direction = DMA_FROM_DEVICE; 1309 1310 if (blk_rq_is_scsi(req)) 1311 return scsi_setup_scsi_cmnd(sdev, req); 1312 else 1313 return scsi_setup_fs_cmnd(sdev, req); 1314 } 1315 1316 static int 1317 scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1318 { 1319 int ret = BLKPREP_OK; 1320 1321 /* 1322 * If the device is not in running state we will reject some 1323 * or all commands. 1324 */ 1325 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1326 switch (sdev->sdev_state) { 1327 case SDEV_OFFLINE: 1328 case SDEV_TRANSPORT_OFFLINE: 1329 /* 1330 * If the device is offline we refuse to process any 1331 * commands. The device must be brought online 1332 * before trying any recovery commands. 1333 */ 1334 sdev_printk(KERN_ERR, sdev, 1335 "rejecting I/O to offline device\n"); 1336 ret = BLKPREP_KILL; 1337 break; 1338 case SDEV_DEL: 1339 /* 1340 * If the device is fully deleted, we refuse to 1341 * process any commands as well. 1342 */ 1343 sdev_printk(KERN_ERR, sdev, 1344 "rejecting I/O to dead device\n"); 1345 ret = BLKPREP_KILL; 1346 break; 1347 case SDEV_BLOCK: 1348 case SDEV_CREATED_BLOCK: 1349 ret = BLKPREP_DEFER; 1350 break; 1351 case SDEV_QUIESCE: 1352 /* 1353 * If the devices is blocked we defer normal commands. 1354 */ 1355 if (req && !(req->rq_flags & RQF_PREEMPT)) 1356 ret = BLKPREP_DEFER; 1357 break; 1358 default: 1359 /* 1360 * For any other not fully online state we only allow 1361 * special commands. In particular any user initiated 1362 * command is not allowed. 1363 */ 1364 if (req && !(req->rq_flags & RQF_PREEMPT)) 1365 ret = BLKPREP_KILL; 1366 break; 1367 } 1368 } 1369 return ret; 1370 } 1371 1372 static int 1373 scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1374 { 1375 struct scsi_device *sdev = q->queuedata; 1376 1377 switch (ret) { 1378 case BLKPREP_KILL: 1379 case BLKPREP_INVALID: 1380 scsi_req(req)->result = DID_NO_CONNECT << 16; 1381 /* release the command and kill it */ 1382 if (req->special) { 1383 struct scsi_cmnd *cmd = req->special; 1384 scsi_release_buffers(cmd); 1385 scsi_put_command(cmd); 1386 put_device(&sdev->sdev_gendev); 1387 req->special = NULL; 1388 } 1389 break; 1390 case BLKPREP_DEFER: 1391 /* 1392 * If we defer, the blk_peek_request() returns NULL, but the 1393 * queue must be restarted, so we schedule a callback to happen 1394 * shortly. 1395 */ 1396 if (atomic_read(&sdev->device_busy) == 0) 1397 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1398 break; 1399 default: 1400 req->rq_flags |= RQF_DONTPREP; 1401 } 1402 1403 return ret; 1404 } 1405 1406 static int scsi_prep_fn(struct request_queue *q, struct request *req) 1407 { 1408 struct scsi_device *sdev = q->queuedata; 1409 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1410 int ret; 1411 1412 ret = scsi_prep_state_check(sdev, req); 1413 if (ret != BLKPREP_OK) 1414 goto out; 1415 1416 if (!req->special) { 1417 /* Bail if we can't get a reference to the device */ 1418 if (unlikely(!get_device(&sdev->sdev_gendev))) { 1419 ret = BLKPREP_DEFER; 1420 goto out; 1421 } 1422 1423 scsi_init_command(sdev, cmd); 1424 req->special = cmd; 1425 } 1426 1427 cmd->tag = req->tag; 1428 cmd->request = req; 1429 cmd->prot_op = SCSI_PROT_NORMAL; 1430 1431 ret = scsi_setup_cmnd(sdev, req); 1432 out: 1433 return scsi_prep_return(q, req, ret); 1434 } 1435 1436 static void scsi_unprep_fn(struct request_queue *q, struct request *req) 1437 { 1438 scsi_uninit_cmd(blk_mq_rq_to_pdu(req)); 1439 } 1440 1441 /* 1442 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1443 * return 0. 1444 * 1445 * Called with the queue_lock held. 1446 */ 1447 static inline int scsi_dev_queue_ready(struct request_queue *q, 1448 struct scsi_device *sdev) 1449 { 1450 unsigned int busy; 1451 1452 busy = atomic_inc_return(&sdev->device_busy) - 1; 1453 if (atomic_read(&sdev->device_blocked)) { 1454 if (busy) 1455 goto out_dec; 1456 1457 /* 1458 * unblock after device_blocked iterates to zero 1459 */ 1460 if (atomic_dec_return(&sdev->device_blocked) > 0) { 1461 /* 1462 * For the MQ case we take care of this in the caller. 1463 */ 1464 if (!q->mq_ops) 1465 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1466 goto out_dec; 1467 } 1468 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, 1469 "unblocking device at zero depth\n")); 1470 } 1471 1472 if (busy >= sdev->queue_depth) 1473 goto out_dec; 1474 1475 return 1; 1476 out_dec: 1477 atomic_dec(&sdev->device_busy); 1478 return 0; 1479 } 1480 1481 /* 1482 * scsi_target_queue_ready: checks if there we can send commands to target 1483 * @sdev: scsi device on starget to check. 1484 */ 1485 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1486 struct scsi_device *sdev) 1487 { 1488 struct scsi_target *starget = scsi_target(sdev); 1489 unsigned int busy; 1490 1491 if (starget->single_lun) { 1492 spin_lock_irq(shost->host_lock); 1493 if (starget->starget_sdev_user && 1494 starget->starget_sdev_user != sdev) { 1495 spin_unlock_irq(shost->host_lock); 1496 return 0; 1497 } 1498 starget->starget_sdev_user = sdev; 1499 spin_unlock_irq(shost->host_lock); 1500 } 1501 1502 if (starget->can_queue <= 0) 1503 return 1; 1504 1505 busy = atomic_inc_return(&starget->target_busy) - 1; 1506 if (atomic_read(&starget->target_blocked) > 0) { 1507 if (busy) 1508 goto starved; 1509 1510 /* 1511 * unblock after target_blocked iterates to zero 1512 */ 1513 if (atomic_dec_return(&starget->target_blocked) > 0) 1514 goto out_dec; 1515 1516 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1517 "unblocking target at zero depth\n")); 1518 } 1519 1520 if (busy >= starget->can_queue) 1521 goto starved; 1522 1523 return 1; 1524 1525 starved: 1526 spin_lock_irq(shost->host_lock); 1527 list_move_tail(&sdev->starved_entry, &shost->starved_list); 1528 spin_unlock_irq(shost->host_lock); 1529 out_dec: 1530 if (starget->can_queue > 0) 1531 atomic_dec(&starget->target_busy); 1532 return 0; 1533 } 1534 1535 /* 1536 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1537 * return 0. We must end up running the queue again whenever 0 is 1538 * returned, else IO can hang. 1539 */ 1540 static inline int scsi_host_queue_ready(struct request_queue *q, 1541 struct Scsi_Host *shost, 1542 struct scsi_device *sdev) 1543 { 1544 unsigned int busy; 1545 1546 if (scsi_host_in_recovery(shost)) 1547 return 0; 1548 1549 busy = atomic_inc_return(&shost->host_busy) - 1; 1550 if (atomic_read(&shost->host_blocked) > 0) { 1551 if (busy) 1552 goto starved; 1553 1554 /* 1555 * unblock after host_blocked iterates to zero 1556 */ 1557 if (atomic_dec_return(&shost->host_blocked) > 0) 1558 goto out_dec; 1559 1560 SCSI_LOG_MLQUEUE(3, 1561 shost_printk(KERN_INFO, shost, 1562 "unblocking host at zero depth\n")); 1563 } 1564 1565 if (shost->can_queue > 0 && busy >= shost->can_queue) 1566 goto starved; 1567 if (shost->host_self_blocked) 1568 goto starved; 1569 1570 /* We're OK to process the command, so we can't be starved */ 1571 if (!list_empty(&sdev->starved_entry)) { 1572 spin_lock_irq(shost->host_lock); 1573 if (!list_empty(&sdev->starved_entry)) 1574 list_del_init(&sdev->starved_entry); 1575 spin_unlock_irq(shost->host_lock); 1576 } 1577 1578 return 1; 1579 1580 starved: 1581 spin_lock_irq(shost->host_lock); 1582 if (list_empty(&sdev->starved_entry)) 1583 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1584 spin_unlock_irq(shost->host_lock); 1585 out_dec: 1586 scsi_dec_host_busy(shost); 1587 return 0; 1588 } 1589 1590 /* 1591 * Busy state exporting function for request stacking drivers. 1592 * 1593 * For efficiency, no lock is taken to check the busy state of 1594 * shost/starget/sdev, since the returned value is not guaranteed and 1595 * may be changed after request stacking drivers call the function, 1596 * regardless of taking lock or not. 1597 * 1598 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi 1599 * needs to return 'not busy'. Otherwise, request stacking drivers 1600 * may hold requests forever. 1601 */ 1602 static int scsi_lld_busy(struct request_queue *q) 1603 { 1604 struct scsi_device *sdev = q->queuedata; 1605 struct Scsi_Host *shost; 1606 1607 if (blk_queue_dying(q)) 1608 return 0; 1609 1610 shost = sdev->host; 1611 1612 /* 1613 * Ignore host/starget busy state. 1614 * Since block layer does not have a concept of fairness across 1615 * multiple queues, congestion of host/starget needs to be handled 1616 * in SCSI layer. 1617 */ 1618 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1619 return 1; 1620 1621 return 0; 1622 } 1623 1624 /* 1625 * Kill a request for a dead device 1626 */ 1627 static void scsi_kill_request(struct request *req, struct request_queue *q) 1628 { 1629 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1630 struct scsi_device *sdev; 1631 struct scsi_target *starget; 1632 struct Scsi_Host *shost; 1633 1634 blk_start_request(req); 1635 1636 scmd_printk(KERN_INFO, cmd, "killing request\n"); 1637 1638 sdev = cmd->device; 1639 starget = scsi_target(sdev); 1640 shost = sdev->host; 1641 scsi_init_cmd_errh(cmd); 1642 cmd->result = DID_NO_CONNECT << 16; 1643 atomic_inc(&cmd->device->iorequest_cnt); 1644 1645 /* 1646 * SCSI request completion path will do scsi_device_unbusy(), 1647 * bump busy counts. To bump the counters, we need to dance 1648 * with the locks as normal issue path does. 1649 */ 1650 atomic_inc(&sdev->device_busy); 1651 atomic_inc(&shost->host_busy); 1652 if (starget->can_queue > 0) 1653 atomic_inc(&starget->target_busy); 1654 1655 blk_complete_request(req); 1656 } 1657 1658 static void scsi_softirq_done(struct request *rq) 1659 { 1660 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 1661 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1662 int disposition; 1663 1664 INIT_LIST_HEAD(&cmd->eh_entry); 1665 1666 atomic_inc(&cmd->device->iodone_cnt); 1667 if (cmd->result) 1668 atomic_inc(&cmd->device->ioerr_cnt); 1669 1670 disposition = scsi_decide_disposition(cmd); 1671 if (disposition != SUCCESS && 1672 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1673 sdev_printk(KERN_ERR, cmd->device, 1674 "timing out command, waited %lus\n", 1675 wait_for/HZ); 1676 disposition = SUCCESS; 1677 } 1678 1679 scsi_log_completion(cmd, disposition); 1680 1681 switch (disposition) { 1682 case SUCCESS: 1683 scsi_finish_command(cmd); 1684 break; 1685 case NEEDS_RETRY: 1686 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1687 break; 1688 case ADD_TO_MLQUEUE: 1689 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1690 break; 1691 default: 1692 scsi_eh_scmd_add(cmd); 1693 break; 1694 } 1695 } 1696 1697 /** 1698 * scsi_dispatch_command - Dispatch a command to the low-level driver. 1699 * @cmd: command block we are dispatching. 1700 * 1701 * Return: nonzero return request was rejected and device's queue needs to be 1702 * plugged. 1703 */ 1704 static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 1705 { 1706 struct Scsi_Host *host = cmd->device->host; 1707 int rtn = 0; 1708 1709 atomic_inc(&cmd->device->iorequest_cnt); 1710 1711 /* check if the device is still usable */ 1712 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 1713 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 1714 * returns an immediate error upwards, and signals 1715 * that the device is no longer present */ 1716 cmd->result = DID_NO_CONNECT << 16; 1717 goto done; 1718 } 1719 1720 /* Check to see if the scsi lld made this device blocked. */ 1721 if (unlikely(scsi_device_blocked(cmd->device))) { 1722 /* 1723 * in blocked state, the command is just put back on 1724 * the device queue. The suspend state has already 1725 * blocked the queue so future requests should not 1726 * occur until the device transitions out of the 1727 * suspend state. 1728 */ 1729 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1730 "queuecommand : device blocked\n")); 1731 return SCSI_MLQUEUE_DEVICE_BUSY; 1732 } 1733 1734 /* Store the LUN value in cmnd, if needed. */ 1735 if (cmd->device->lun_in_cdb) 1736 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 1737 (cmd->device->lun << 5 & 0xe0); 1738 1739 scsi_log_send(cmd); 1740 1741 /* 1742 * Before we queue this command, check if the command 1743 * length exceeds what the host adapter can handle. 1744 */ 1745 if (cmd->cmd_len > cmd->device->host->max_cmd_len) { 1746 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1747 "queuecommand : command too long. " 1748 "cdb_size=%d host->max_cmd_len=%d\n", 1749 cmd->cmd_len, cmd->device->host->max_cmd_len)); 1750 cmd->result = (DID_ABORT << 16); 1751 goto done; 1752 } 1753 1754 if (unlikely(host->shost_state == SHOST_DEL)) { 1755 cmd->result = (DID_NO_CONNECT << 16); 1756 goto done; 1757 1758 } 1759 1760 trace_scsi_dispatch_cmd_start(cmd); 1761 rtn = host->hostt->queuecommand(host, cmd); 1762 if (rtn) { 1763 trace_scsi_dispatch_cmd_error(cmd, rtn); 1764 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && 1765 rtn != SCSI_MLQUEUE_TARGET_BUSY) 1766 rtn = SCSI_MLQUEUE_HOST_BUSY; 1767 1768 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1769 "queuecommand : request rejected\n")); 1770 } 1771 1772 return rtn; 1773 done: 1774 cmd->scsi_done(cmd); 1775 return 0; 1776 } 1777 1778 /** 1779 * scsi_done - Invoke completion on finished SCSI command. 1780 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 1781 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 1782 * 1783 * Description: This function is the mid-level's (SCSI Core) interrupt routine, 1784 * which regains ownership of the SCSI command (de facto) from a LLDD, and 1785 * calls blk_complete_request() for further processing. 1786 * 1787 * This function is interrupt context safe. 1788 */ 1789 static void scsi_done(struct scsi_cmnd *cmd) 1790 { 1791 trace_scsi_dispatch_cmd_done(cmd); 1792 blk_complete_request(cmd->request); 1793 } 1794 1795 /* 1796 * Function: scsi_request_fn() 1797 * 1798 * Purpose: Main strategy routine for SCSI. 1799 * 1800 * Arguments: q - Pointer to actual queue. 1801 * 1802 * Returns: Nothing 1803 * 1804 * Lock status: request queue lock assumed to be held when called. 1805 * 1806 * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order 1807 * protection for ZBC disks. 1808 */ 1809 static void scsi_request_fn(struct request_queue *q) 1810 __releases(q->queue_lock) 1811 __acquires(q->queue_lock) 1812 { 1813 struct scsi_device *sdev = q->queuedata; 1814 struct Scsi_Host *shost; 1815 struct scsi_cmnd *cmd; 1816 struct request *req; 1817 1818 /* 1819 * To start with, we keep looping until the queue is empty, or until 1820 * the host is no longer able to accept any more requests. 1821 */ 1822 shost = sdev->host; 1823 for (;;) { 1824 int rtn; 1825 /* 1826 * get next queueable request. We do this early to make sure 1827 * that the request is fully prepared even if we cannot 1828 * accept it. 1829 */ 1830 req = blk_peek_request(q); 1831 if (!req) 1832 break; 1833 1834 if (unlikely(!scsi_device_online(sdev))) { 1835 sdev_printk(KERN_ERR, sdev, 1836 "rejecting I/O to offline device\n"); 1837 scsi_kill_request(req, q); 1838 continue; 1839 } 1840 1841 if (!scsi_dev_queue_ready(q, sdev)) 1842 break; 1843 1844 /* 1845 * Remove the request from the request list. 1846 */ 1847 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1848 blk_start_request(req); 1849 1850 spin_unlock_irq(q->queue_lock); 1851 cmd = blk_mq_rq_to_pdu(req); 1852 if (cmd != req->special) { 1853 printk(KERN_CRIT "impossible request in %s.\n" 1854 "please mail a stack trace to " 1855 "linux-scsi@vger.kernel.org\n", 1856 __func__); 1857 blk_dump_rq_flags(req, "foo"); 1858 BUG(); 1859 } 1860 1861 /* 1862 * We hit this when the driver is using a host wide 1863 * tag map. For device level tag maps the queue_depth check 1864 * in the device ready fn would prevent us from trying 1865 * to allocate a tag. Since the map is a shared host resource 1866 * we add the dev to the starved list so it eventually gets 1867 * a run when a tag is freed. 1868 */ 1869 if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) { 1870 spin_lock_irq(shost->host_lock); 1871 if (list_empty(&sdev->starved_entry)) 1872 list_add_tail(&sdev->starved_entry, 1873 &shost->starved_list); 1874 spin_unlock_irq(shost->host_lock); 1875 goto not_ready; 1876 } 1877 1878 if (!scsi_target_queue_ready(shost, sdev)) 1879 goto not_ready; 1880 1881 if (!scsi_host_queue_ready(q, shost, sdev)) 1882 goto host_not_ready; 1883 1884 if (sdev->simple_tags) 1885 cmd->flags |= SCMD_TAGGED; 1886 else 1887 cmd->flags &= ~SCMD_TAGGED; 1888 1889 /* 1890 * Finally, initialize any error handling parameters, and set up 1891 * the timers for timeouts. 1892 */ 1893 scsi_init_cmd_errh(cmd); 1894 1895 /* 1896 * Dispatch the command to the low-level driver. 1897 */ 1898 cmd->scsi_done = scsi_done; 1899 rtn = scsi_dispatch_cmd(cmd); 1900 if (rtn) { 1901 scsi_queue_insert(cmd, rtn); 1902 spin_lock_irq(q->queue_lock); 1903 goto out_delay; 1904 } 1905 spin_lock_irq(q->queue_lock); 1906 } 1907 1908 return; 1909 1910 host_not_ready: 1911 if (scsi_target(sdev)->can_queue > 0) 1912 atomic_dec(&scsi_target(sdev)->target_busy); 1913 not_ready: 1914 /* 1915 * lock q, handle tag, requeue req, and decrement device_busy. We 1916 * must return with queue_lock held. 1917 * 1918 * Decrementing device_busy without checking it is OK, as all such 1919 * cases (host limits or settings) should run the queue at some 1920 * later time. 1921 */ 1922 spin_lock_irq(q->queue_lock); 1923 blk_requeue_request(q, req); 1924 atomic_dec(&sdev->device_busy); 1925 out_delay: 1926 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) 1927 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1928 } 1929 1930 static inline blk_status_t prep_to_mq(int ret) 1931 { 1932 switch (ret) { 1933 case BLKPREP_OK: 1934 return BLK_STS_OK; 1935 case BLKPREP_DEFER: 1936 return BLK_STS_RESOURCE; 1937 default: 1938 return BLK_STS_IOERR; 1939 } 1940 } 1941 1942 /* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ 1943 static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost) 1944 { 1945 return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) * 1946 sizeof(struct scatterlist); 1947 } 1948 1949 static int scsi_mq_prep_fn(struct request *req) 1950 { 1951 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1952 struct scsi_device *sdev = req->q->queuedata; 1953 struct Scsi_Host *shost = sdev->host; 1954 struct scatterlist *sg; 1955 1956 scsi_init_command(sdev, cmd); 1957 1958 req->special = cmd; 1959 1960 cmd->request = req; 1961 1962 cmd->tag = req->tag; 1963 cmd->prot_op = SCSI_PROT_NORMAL; 1964 1965 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; 1966 cmd->sdb.table.sgl = sg; 1967 1968 if (scsi_host_get_prot(shost)) { 1969 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1970 1971 cmd->prot_sdb->table.sgl = 1972 (struct scatterlist *)(cmd->prot_sdb + 1); 1973 } 1974 1975 if (blk_bidi_rq(req)) { 1976 struct request *next_rq = req->next_rq; 1977 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq); 1978 1979 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer)); 1980 bidi_sdb->table.sgl = 1981 (struct scatterlist *)(bidi_sdb + 1); 1982 1983 next_rq->special = bidi_sdb; 1984 } 1985 1986 blk_mq_start_request(req); 1987 1988 return scsi_setup_cmnd(sdev, req); 1989 } 1990 1991 static void scsi_mq_done(struct scsi_cmnd *cmd) 1992 { 1993 trace_scsi_dispatch_cmd_done(cmd); 1994 blk_mq_complete_request(cmd->request); 1995 } 1996 1997 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) 1998 { 1999 struct request_queue *q = hctx->queue; 2000 struct scsi_device *sdev = q->queuedata; 2001 2002 atomic_dec(&sdev->device_busy); 2003 put_device(&sdev->sdev_gendev); 2004 } 2005 2006 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) 2007 { 2008 struct request_queue *q = hctx->queue; 2009 struct scsi_device *sdev = q->queuedata; 2010 2011 if (!get_device(&sdev->sdev_gendev)) 2012 goto out; 2013 if (!scsi_dev_queue_ready(q, sdev)) 2014 goto out_put_device; 2015 2016 return true; 2017 2018 out_put_device: 2019 put_device(&sdev->sdev_gendev); 2020 out: 2021 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) 2022 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); 2023 return false; 2024 } 2025 2026 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 2027 const struct blk_mq_queue_data *bd) 2028 { 2029 struct request *req = bd->rq; 2030 struct request_queue *q = req->q; 2031 struct scsi_device *sdev = q->queuedata; 2032 struct Scsi_Host *shost = sdev->host; 2033 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 2034 blk_status_t ret; 2035 int reason; 2036 2037 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); 2038 if (ret != BLK_STS_OK) 2039 goto out_put_budget; 2040 2041 ret = BLK_STS_RESOURCE; 2042 if (!scsi_target_queue_ready(shost, sdev)) 2043 goto out_put_budget; 2044 if (!scsi_host_queue_ready(q, shost, sdev)) 2045 goto out_dec_target_busy; 2046 2047 if (!(req->rq_flags & RQF_DONTPREP)) { 2048 ret = prep_to_mq(scsi_mq_prep_fn(req)); 2049 if (ret != BLK_STS_OK) 2050 goto out_dec_host_busy; 2051 req->rq_flags |= RQF_DONTPREP; 2052 } else { 2053 blk_mq_start_request(req); 2054 } 2055 2056 if (sdev->simple_tags) 2057 cmd->flags |= SCMD_TAGGED; 2058 else 2059 cmd->flags &= ~SCMD_TAGGED; 2060 2061 scsi_init_cmd_errh(cmd); 2062 cmd->scsi_done = scsi_mq_done; 2063 2064 reason = scsi_dispatch_cmd(cmd); 2065 if (reason) { 2066 scsi_set_blocked(cmd, reason); 2067 ret = BLK_STS_RESOURCE; 2068 goto out_dec_host_busy; 2069 } 2070 2071 return BLK_STS_OK; 2072 2073 out_dec_host_busy: 2074 scsi_dec_host_busy(shost); 2075 out_dec_target_busy: 2076 if (scsi_target(sdev)->can_queue > 0) 2077 atomic_dec(&scsi_target(sdev)->target_busy); 2078 out_put_budget: 2079 scsi_mq_put_budget(hctx); 2080 switch (ret) { 2081 case BLK_STS_OK: 2082 break; 2083 case BLK_STS_RESOURCE: 2084 if (atomic_read(&sdev->device_busy) || 2085 scsi_device_blocked(sdev)) 2086 ret = BLK_STS_DEV_RESOURCE; 2087 break; 2088 default: 2089 /* 2090 * Make sure to release all allocated ressources when 2091 * we hit an error, as we will never see this command 2092 * again. 2093 */ 2094 if (req->rq_flags & RQF_DONTPREP) 2095 scsi_mq_uninit_cmd(cmd); 2096 break; 2097 } 2098 return ret; 2099 } 2100 2101 static enum blk_eh_timer_return scsi_timeout(struct request *req, 2102 bool reserved) 2103 { 2104 if (reserved) 2105 return BLK_EH_RESET_TIMER; 2106 return scsi_times_out(req); 2107 } 2108 2109 static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 2110 unsigned int hctx_idx, unsigned int numa_node) 2111 { 2112 struct Scsi_Host *shost = set->driver_data; 2113 const bool unchecked_isa_dma = shost->unchecked_isa_dma; 2114 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2115 struct scatterlist *sg; 2116 2117 if (unchecked_isa_dma) 2118 cmd->flags |= SCMD_UNCHECKED_ISA_DMA; 2119 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, 2120 GFP_KERNEL, numa_node); 2121 if (!cmd->sense_buffer) 2122 return -ENOMEM; 2123 cmd->req.sense = cmd->sense_buffer; 2124 2125 if (scsi_host_get_prot(shost)) { 2126 sg = (void *)cmd + sizeof(struct scsi_cmnd) + 2127 shost->hostt->cmd_size; 2128 cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost); 2129 } 2130 2131 return 0; 2132 } 2133 2134 static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, 2135 unsigned int hctx_idx) 2136 { 2137 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2138 2139 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, 2140 cmd->sense_buffer); 2141 } 2142 2143 static int scsi_map_queues(struct blk_mq_tag_set *set) 2144 { 2145 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); 2146 2147 if (shost->hostt->map_queues) 2148 return shost->hostt->map_queues(shost); 2149 return blk_mq_map_queues(set); 2150 } 2151 2152 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 2153 { 2154 struct device *host_dev; 2155 u64 bounce_limit = 0xffffffff; 2156 2157 if (shost->unchecked_isa_dma) 2158 return BLK_BOUNCE_ISA; 2159 /* 2160 * Platforms with virtual-DMA translation 2161 * hardware have no practical limit. 2162 */ 2163 if (!PCI_DMA_BUS_IS_PHYS) 2164 return BLK_BOUNCE_ANY; 2165 2166 host_dev = scsi_get_device(shost); 2167 if (host_dev && host_dev->dma_mask) 2168 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 2169 2170 return bounce_limit; 2171 } 2172 2173 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) 2174 { 2175 struct device *dev = shost->dma_dev; 2176 2177 /* 2178 * this limit is imposed by hardware restrictions 2179 */ 2180 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 2181 SG_MAX_SEGMENTS)); 2182 2183 if (scsi_host_prot_dma(shost)) { 2184 shost->sg_prot_tablesize = 2185 min_not_zero(shost->sg_prot_tablesize, 2186 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); 2187 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); 2188 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 2189 } 2190 2191 blk_queue_max_hw_sectors(q, shost->max_sectors); 2192 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 2193 blk_queue_segment_boundary(q, shost->dma_boundary); 2194 dma_set_seg_boundary(dev, shost->dma_boundary); 2195 2196 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 2197 2198 if (!shost->use_clustering) 2199 q->limits.cluster = 0; 2200 2201 /* 2202 * Set a reasonable default alignment: The larger of 32-byte (dword), 2203 * which is a common minimum for HBAs, and the minimum DMA alignment, 2204 * which is set by the platform. 2205 * 2206 * Devices that require a bigger alignment can increase it later. 2207 */ 2208 blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); 2209 } 2210 EXPORT_SYMBOL_GPL(__scsi_init_queue); 2211 2212 static int scsi_old_init_rq(struct request_queue *q, struct request *rq, 2213 gfp_t gfp) 2214 { 2215 struct Scsi_Host *shost = q->rq_alloc_data; 2216 const bool unchecked_isa_dma = shost->unchecked_isa_dma; 2217 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2218 2219 memset(cmd, 0, sizeof(*cmd)); 2220 2221 if (unchecked_isa_dma) 2222 cmd->flags |= SCMD_UNCHECKED_ISA_DMA; 2223 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp, 2224 NUMA_NO_NODE); 2225 if (!cmd->sense_buffer) 2226 goto fail; 2227 cmd->req.sense = cmd->sense_buffer; 2228 2229 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { 2230 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp); 2231 if (!cmd->prot_sdb) 2232 goto fail_free_sense; 2233 } 2234 2235 return 0; 2236 2237 fail_free_sense: 2238 scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer); 2239 fail: 2240 return -ENOMEM; 2241 } 2242 2243 static void scsi_old_exit_rq(struct request_queue *q, struct request *rq) 2244 { 2245 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2246 2247 if (cmd->prot_sdb) 2248 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); 2249 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, 2250 cmd->sense_buffer); 2251 } 2252 2253 struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev) 2254 { 2255 struct Scsi_Host *shost = sdev->host; 2256 struct request_queue *q; 2257 2258 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); 2259 if (!q) 2260 return NULL; 2261 q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; 2262 q->rq_alloc_data = shost; 2263 q->request_fn = scsi_request_fn; 2264 q->init_rq_fn = scsi_old_init_rq; 2265 q->exit_rq_fn = scsi_old_exit_rq; 2266 q->initialize_rq_fn = scsi_initialize_rq; 2267 2268 if (blk_init_allocated_queue(q) < 0) { 2269 blk_cleanup_queue(q); 2270 return NULL; 2271 } 2272 2273 __scsi_init_queue(shost, q); 2274 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); 2275 blk_queue_prep_rq(q, scsi_prep_fn); 2276 blk_queue_unprep_rq(q, scsi_unprep_fn); 2277 blk_queue_softirq_done(q, scsi_softirq_done); 2278 blk_queue_rq_timed_out(q, scsi_times_out); 2279 blk_queue_lld_busy(q, scsi_lld_busy); 2280 return q; 2281 } 2282 2283 static const struct blk_mq_ops scsi_mq_ops = { 2284 .get_budget = scsi_mq_get_budget, 2285 .put_budget = scsi_mq_put_budget, 2286 .queue_rq = scsi_queue_rq, 2287 .complete = scsi_softirq_done, 2288 .timeout = scsi_timeout, 2289 #ifdef CONFIG_BLK_DEBUG_FS 2290 .show_rq = scsi_show_rq, 2291 #endif 2292 .init_request = scsi_mq_init_request, 2293 .exit_request = scsi_mq_exit_request, 2294 .initialize_rq_fn = scsi_initialize_rq, 2295 .map_queues = scsi_map_queues, 2296 }; 2297 2298 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) 2299 { 2300 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); 2301 if (IS_ERR(sdev->request_queue)) 2302 return NULL; 2303 2304 sdev->request_queue->queuedata = sdev; 2305 __scsi_init_queue(sdev->host, sdev->request_queue); 2306 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue); 2307 return sdev->request_queue; 2308 } 2309 2310 int scsi_mq_setup_tags(struct Scsi_Host *shost) 2311 { 2312 unsigned int cmd_size, sgl_size; 2313 2314 sgl_size = scsi_mq_sgl_size(shost); 2315 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 2316 if (scsi_host_get_prot(shost)) 2317 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; 2318 2319 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 2320 shost->tag_set.ops = &scsi_mq_ops; 2321 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; 2322 shost->tag_set.queue_depth = shost->can_queue; 2323 shost->tag_set.cmd_size = cmd_size; 2324 shost->tag_set.numa_node = NUMA_NO_NODE; 2325 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2326 shost->tag_set.flags |= 2327 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); 2328 shost->tag_set.driver_data = shost; 2329 2330 return blk_mq_alloc_tag_set(&shost->tag_set); 2331 } 2332 2333 void scsi_mq_destroy_tags(struct Scsi_Host *shost) 2334 { 2335 blk_mq_free_tag_set(&shost->tag_set); 2336 } 2337 2338 /** 2339 * scsi_device_from_queue - return sdev associated with a request_queue 2340 * @q: The request queue to return the sdev from 2341 * 2342 * Return the sdev associated with a request queue or NULL if the 2343 * request_queue does not reference a SCSI device. 2344 */ 2345 struct scsi_device *scsi_device_from_queue(struct request_queue *q) 2346 { 2347 struct scsi_device *sdev = NULL; 2348 2349 if (q->mq_ops) { 2350 if (q->mq_ops == &scsi_mq_ops) 2351 sdev = q->queuedata; 2352 } else if (q->request_fn == scsi_request_fn) 2353 sdev = q->queuedata; 2354 if (!sdev || !get_device(&sdev->sdev_gendev)) 2355 sdev = NULL; 2356 2357 return sdev; 2358 } 2359 EXPORT_SYMBOL_GPL(scsi_device_from_queue); 2360 2361 /* 2362 * Function: scsi_block_requests() 2363 * 2364 * Purpose: Utility function used by low-level drivers to prevent further 2365 * commands from being queued to the device. 2366 * 2367 * Arguments: shost - Host in question 2368 * 2369 * Returns: Nothing 2370 * 2371 * Lock status: No locks are assumed held. 2372 * 2373 * Notes: There is no timer nor any other means by which the requests 2374 * get unblocked other than the low-level driver calling 2375 * scsi_unblock_requests(). 2376 */ 2377 void scsi_block_requests(struct Scsi_Host *shost) 2378 { 2379 shost->host_self_blocked = 1; 2380 } 2381 EXPORT_SYMBOL(scsi_block_requests); 2382 2383 /* 2384 * Function: scsi_unblock_requests() 2385 * 2386 * Purpose: Utility function used by low-level drivers to allow further 2387 * commands from being queued to the device. 2388 * 2389 * Arguments: shost - Host in question 2390 * 2391 * Returns: Nothing 2392 * 2393 * Lock status: No locks are assumed held. 2394 * 2395 * Notes: There is no timer nor any other means by which the requests 2396 * get unblocked other than the low-level driver calling 2397 * scsi_unblock_requests(). 2398 * 2399 * This is done as an API function so that changes to the 2400 * internals of the scsi mid-layer won't require wholesale 2401 * changes to drivers that use this feature. 2402 */ 2403 void scsi_unblock_requests(struct Scsi_Host *shost) 2404 { 2405 shost->host_self_blocked = 0; 2406 scsi_run_host_queues(shost); 2407 } 2408 EXPORT_SYMBOL(scsi_unblock_requests); 2409 2410 int __init scsi_init_queue(void) 2411 { 2412 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 2413 sizeof(struct scsi_data_buffer), 2414 0, 0, NULL); 2415 if (!scsi_sdb_cache) { 2416 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 2417 return -ENOMEM; 2418 } 2419 2420 return 0; 2421 } 2422 2423 void scsi_exit_queue(void) 2424 { 2425 kmem_cache_destroy(scsi_sense_cache); 2426 kmem_cache_destroy(scsi_sense_isadma_cache); 2427 kmem_cache_destroy(scsi_sdb_cache); 2428 } 2429 2430 /** 2431 * scsi_mode_select - issue a mode select 2432 * @sdev: SCSI device to be queried 2433 * @pf: Page format bit (1 == standard, 0 == vendor specific) 2434 * @sp: Save page bit (0 == don't save, 1 == save) 2435 * @modepage: mode page being requested 2436 * @buffer: request buffer (may not be smaller than eight bytes) 2437 * @len: length of request buffer. 2438 * @timeout: command timeout 2439 * @retries: number of retries before failing 2440 * @data: returns a structure abstracting the mode header data 2441 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2442 * must be SCSI_SENSE_BUFFERSIZE big. 2443 * 2444 * Returns zero if successful; negative error number or scsi 2445 * status on error 2446 * 2447 */ 2448 int 2449 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 2450 unsigned char *buffer, int len, int timeout, int retries, 2451 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2452 { 2453 unsigned char cmd[10]; 2454 unsigned char *real_buffer; 2455 int ret; 2456 2457 memset(cmd, 0, sizeof(cmd)); 2458 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 2459 2460 if (sdev->use_10_for_ms) { 2461 if (len > 65535) 2462 return -EINVAL; 2463 real_buffer = kmalloc(8 + len, GFP_KERNEL); 2464 if (!real_buffer) 2465 return -ENOMEM; 2466 memcpy(real_buffer + 8, buffer, len); 2467 len += 8; 2468 real_buffer[0] = 0; 2469 real_buffer[1] = 0; 2470 real_buffer[2] = data->medium_type; 2471 real_buffer[3] = data->device_specific; 2472 real_buffer[4] = data->longlba ? 0x01 : 0; 2473 real_buffer[5] = 0; 2474 real_buffer[6] = data->block_descriptor_length >> 8; 2475 real_buffer[7] = data->block_descriptor_length; 2476 2477 cmd[0] = MODE_SELECT_10; 2478 cmd[7] = len >> 8; 2479 cmd[8] = len; 2480 } else { 2481 if (len > 255 || data->block_descriptor_length > 255 || 2482 data->longlba) 2483 return -EINVAL; 2484 2485 real_buffer = kmalloc(4 + len, GFP_KERNEL); 2486 if (!real_buffer) 2487 return -ENOMEM; 2488 memcpy(real_buffer + 4, buffer, len); 2489 len += 4; 2490 real_buffer[0] = 0; 2491 real_buffer[1] = data->medium_type; 2492 real_buffer[2] = data->device_specific; 2493 real_buffer[3] = data->block_descriptor_length; 2494 2495 2496 cmd[0] = MODE_SELECT; 2497 cmd[4] = len; 2498 } 2499 2500 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 2501 sshdr, timeout, retries, NULL); 2502 kfree(real_buffer); 2503 return ret; 2504 } 2505 EXPORT_SYMBOL_GPL(scsi_mode_select); 2506 2507 /** 2508 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 2509 * @sdev: SCSI device to be queried 2510 * @dbd: set if mode sense will allow block descriptors to be returned 2511 * @modepage: mode page being requested 2512 * @buffer: request buffer (may not be smaller than eight bytes) 2513 * @len: length of request buffer. 2514 * @timeout: command timeout 2515 * @retries: number of retries before failing 2516 * @data: returns a structure abstracting the mode header data 2517 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2518 * must be SCSI_SENSE_BUFFERSIZE big. 2519 * 2520 * Returns zero if unsuccessful, or the header offset (either 4 2521 * or 8 depending on whether a six or ten byte command was 2522 * issued) if successful. 2523 */ 2524 int 2525 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 2526 unsigned char *buffer, int len, int timeout, int retries, 2527 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2528 { 2529 unsigned char cmd[12]; 2530 int use_10_for_ms; 2531 int header_length; 2532 int result, retry_count = retries; 2533 struct scsi_sense_hdr my_sshdr; 2534 2535 memset(data, 0, sizeof(*data)); 2536 memset(&cmd[0], 0, 12); 2537 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 2538 cmd[2] = modepage; 2539 2540 /* caller might not be interested in sense, but we need it */ 2541 if (!sshdr) 2542 sshdr = &my_sshdr; 2543 2544 retry: 2545 use_10_for_ms = sdev->use_10_for_ms; 2546 2547 if (use_10_for_ms) { 2548 if (len < 8) 2549 len = 8; 2550 2551 cmd[0] = MODE_SENSE_10; 2552 cmd[8] = len; 2553 header_length = 8; 2554 } else { 2555 if (len < 4) 2556 len = 4; 2557 2558 cmd[0] = MODE_SENSE; 2559 cmd[4] = len; 2560 header_length = 4; 2561 } 2562 2563 memset(buffer, 0, len); 2564 2565 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2566 sshdr, timeout, retries, NULL); 2567 2568 /* This code looks awful: what it's doing is making sure an 2569 * ILLEGAL REQUEST sense return identifies the actual command 2570 * byte as the problem. MODE_SENSE commands can return 2571 * ILLEGAL REQUEST if the code page isn't supported */ 2572 2573 if (use_10_for_ms && !scsi_status_is_good(result) && 2574 (driver_byte(result) & DRIVER_SENSE)) { 2575 if (scsi_sense_valid(sshdr)) { 2576 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2577 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2578 /* 2579 * Invalid command operation code 2580 */ 2581 sdev->use_10_for_ms = 0; 2582 goto retry; 2583 } 2584 } 2585 } 2586 2587 if(scsi_status_is_good(result)) { 2588 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 2589 (modepage == 6 || modepage == 8))) { 2590 /* Initio breakage? */ 2591 header_length = 0; 2592 data->length = 13; 2593 data->medium_type = 0; 2594 data->device_specific = 0; 2595 data->longlba = 0; 2596 data->block_descriptor_length = 0; 2597 } else if(use_10_for_ms) { 2598 data->length = buffer[0]*256 + buffer[1] + 2; 2599 data->medium_type = buffer[2]; 2600 data->device_specific = buffer[3]; 2601 data->longlba = buffer[4] & 0x01; 2602 data->block_descriptor_length = buffer[6]*256 2603 + buffer[7]; 2604 } else { 2605 data->length = buffer[0] + 1; 2606 data->medium_type = buffer[1]; 2607 data->device_specific = buffer[2]; 2608 data->block_descriptor_length = buffer[3]; 2609 } 2610 data->header_length = header_length; 2611 } else if ((status_byte(result) == CHECK_CONDITION) && 2612 scsi_sense_valid(sshdr) && 2613 sshdr->sense_key == UNIT_ATTENTION && retry_count) { 2614 retry_count--; 2615 goto retry; 2616 } 2617 2618 return result; 2619 } 2620 EXPORT_SYMBOL(scsi_mode_sense); 2621 2622 /** 2623 * scsi_test_unit_ready - test if unit is ready 2624 * @sdev: scsi device to change the state of. 2625 * @timeout: command timeout 2626 * @retries: number of retries before failing 2627 * @sshdr: outpout pointer for decoded sense information. 2628 * 2629 * Returns zero if unsuccessful or an error if TUR failed. For 2630 * removable media, UNIT_ATTENTION sets ->changed flag. 2631 **/ 2632 int 2633 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2634 struct scsi_sense_hdr *sshdr) 2635 { 2636 char cmd[] = { 2637 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2638 }; 2639 int result; 2640 2641 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2642 do { 2643 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2644 timeout, 1, NULL); 2645 if (sdev->removable && scsi_sense_valid(sshdr) && 2646 sshdr->sense_key == UNIT_ATTENTION) 2647 sdev->changed = 1; 2648 } while (scsi_sense_valid(sshdr) && 2649 sshdr->sense_key == UNIT_ATTENTION && --retries); 2650 2651 return result; 2652 } 2653 EXPORT_SYMBOL(scsi_test_unit_ready); 2654 2655 /** 2656 * scsi_device_set_state - Take the given device through the device state model. 2657 * @sdev: scsi device to change the state of. 2658 * @state: state to change to. 2659 * 2660 * Returns zero if successful or an error if the requested 2661 * transition is illegal. 2662 */ 2663 int 2664 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2665 { 2666 enum scsi_device_state oldstate = sdev->sdev_state; 2667 2668 if (state == oldstate) 2669 return 0; 2670 2671 switch (state) { 2672 case SDEV_CREATED: 2673 switch (oldstate) { 2674 case SDEV_CREATED_BLOCK: 2675 break; 2676 default: 2677 goto illegal; 2678 } 2679 break; 2680 2681 case SDEV_RUNNING: 2682 switch (oldstate) { 2683 case SDEV_CREATED: 2684 case SDEV_OFFLINE: 2685 case SDEV_TRANSPORT_OFFLINE: 2686 case SDEV_QUIESCE: 2687 case SDEV_BLOCK: 2688 break; 2689 default: 2690 goto illegal; 2691 } 2692 break; 2693 2694 case SDEV_QUIESCE: 2695 switch (oldstate) { 2696 case SDEV_RUNNING: 2697 case SDEV_OFFLINE: 2698 case SDEV_TRANSPORT_OFFLINE: 2699 break; 2700 default: 2701 goto illegal; 2702 } 2703 break; 2704 2705 case SDEV_OFFLINE: 2706 case SDEV_TRANSPORT_OFFLINE: 2707 switch (oldstate) { 2708 case SDEV_CREATED: 2709 case SDEV_RUNNING: 2710 case SDEV_QUIESCE: 2711 case SDEV_BLOCK: 2712 break; 2713 default: 2714 goto illegal; 2715 } 2716 break; 2717 2718 case SDEV_BLOCK: 2719 switch (oldstate) { 2720 case SDEV_RUNNING: 2721 case SDEV_CREATED_BLOCK: 2722 break; 2723 default: 2724 goto illegal; 2725 } 2726 break; 2727 2728 case SDEV_CREATED_BLOCK: 2729 switch (oldstate) { 2730 case SDEV_CREATED: 2731 break; 2732 default: 2733 goto illegal; 2734 } 2735 break; 2736 2737 case SDEV_CANCEL: 2738 switch (oldstate) { 2739 case SDEV_CREATED: 2740 case SDEV_RUNNING: 2741 case SDEV_QUIESCE: 2742 case SDEV_OFFLINE: 2743 case SDEV_TRANSPORT_OFFLINE: 2744 break; 2745 default: 2746 goto illegal; 2747 } 2748 break; 2749 2750 case SDEV_DEL: 2751 switch (oldstate) { 2752 case SDEV_CREATED: 2753 case SDEV_RUNNING: 2754 case SDEV_OFFLINE: 2755 case SDEV_TRANSPORT_OFFLINE: 2756 case SDEV_CANCEL: 2757 case SDEV_BLOCK: 2758 case SDEV_CREATED_BLOCK: 2759 break; 2760 default: 2761 goto illegal; 2762 } 2763 break; 2764 2765 } 2766 sdev->sdev_state = state; 2767 return 0; 2768 2769 illegal: 2770 SCSI_LOG_ERROR_RECOVERY(1, 2771 sdev_printk(KERN_ERR, sdev, 2772 "Illegal state transition %s->%s", 2773 scsi_device_state_name(oldstate), 2774 scsi_device_state_name(state)) 2775 ); 2776 return -EINVAL; 2777 } 2778 EXPORT_SYMBOL(scsi_device_set_state); 2779 2780 /** 2781 * sdev_evt_emit - emit a single SCSI device uevent 2782 * @sdev: associated SCSI device 2783 * @evt: event to emit 2784 * 2785 * Send a single uevent (scsi_event) to the associated scsi_device. 2786 */ 2787 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2788 { 2789 int idx = 0; 2790 char *envp[3]; 2791 2792 switch (evt->evt_type) { 2793 case SDEV_EVT_MEDIA_CHANGE: 2794 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2795 break; 2796 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2797 scsi_rescan_device(&sdev->sdev_gendev); 2798 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; 2799 break; 2800 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2801 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; 2802 break; 2803 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2804 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; 2805 break; 2806 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2807 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; 2808 break; 2809 case SDEV_EVT_LUN_CHANGE_REPORTED: 2810 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2811 break; 2812 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: 2813 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED"; 2814 break; 2815 case SDEV_EVT_POWER_ON_RESET_OCCURRED: 2816 envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED"; 2817 break; 2818 default: 2819 /* do nothing */ 2820 break; 2821 } 2822 2823 envp[idx++] = NULL; 2824 2825 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2826 } 2827 2828 /** 2829 * sdev_evt_thread - send a uevent for each scsi event 2830 * @work: work struct for scsi_device 2831 * 2832 * Dispatch queued events to their associated scsi_device kobjects 2833 * as uevents. 2834 */ 2835 void scsi_evt_thread(struct work_struct *work) 2836 { 2837 struct scsi_device *sdev; 2838 enum scsi_device_event evt_type; 2839 LIST_HEAD(event_list); 2840 2841 sdev = container_of(work, struct scsi_device, event_work); 2842 2843 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) 2844 if (test_and_clear_bit(evt_type, sdev->pending_events)) 2845 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); 2846 2847 while (1) { 2848 struct scsi_event *evt; 2849 struct list_head *this, *tmp; 2850 unsigned long flags; 2851 2852 spin_lock_irqsave(&sdev->list_lock, flags); 2853 list_splice_init(&sdev->event_list, &event_list); 2854 spin_unlock_irqrestore(&sdev->list_lock, flags); 2855 2856 if (list_empty(&event_list)) 2857 break; 2858 2859 list_for_each_safe(this, tmp, &event_list) { 2860 evt = list_entry(this, struct scsi_event, node); 2861 list_del(&evt->node); 2862 scsi_evt_emit(sdev, evt); 2863 kfree(evt); 2864 } 2865 } 2866 } 2867 2868 /** 2869 * sdev_evt_send - send asserted event to uevent thread 2870 * @sdev: scsi_device event occurred on 2871 * @evt: event to send 2872 * 2873 * Assert scsi device event asynchronously. 2874 */ 2875 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2876 { 2877 unsigned long flags; 2878 2879 #if 0 2880 /* FIXME: currently this check eliminates all media change events 2881 * for polled devices. Need to update to discriminate between AN 2882 * and polled events */ 2883 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2884 kfree(evt); 2885 return; 2886 } 2887 #endif 2888 2889 spin_lock_irqsave(&sdev->list_lock, flags); 2890 list_add_tail(&evt->node, &sdev->event_list); 2891 schedule_work(&sdev->event_work); 2892 spin_unlock_irqrestore(&sdev->list_lock, flags); 2893 } 2894 EXPORT_SYMBOL_GPL(sdev_evt_send); 2895 2896 /** 2897 * sdev_evt_alloc - allocate a new scsi event 2898 * @evt_type: type of event to allocate 2899 * @gfpflags: GFP flags for allocation 2900 * 2901 * Allocates and returns a new scsi_event. 2902 */ 2903 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2904 gfp_t gfpflags) 2905 { 2906 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2907 if (!evt) 2908 return NULL; 2909 2910 evt->evt_type = evt_type; 2911 INIT_LIST_HEAD(&evt->node); 2912 2913 /* evt_type-specific initialization, if any */ 2914 switch (evt_type) { 2915 case SDEV_EVT_MEDIA_CHANGE: 2916 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2917 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2918 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2919 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2920 case SDEV_EVT_LUN_CHANGE_REPORTED: 2921 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: 2922 case SDEV_EVT_POWER_ON_RESET_OCCURRED: 2923 default: 2924 /* do nothing */ 2925 break; 2926 } 2927 2928 return evt; 2929 } 2930 EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2931 2932 /** 2933 * sdev_evt_send_simple - send asserted event to uevent thread 2934 * @sdev: scsi_device event occurred on 2935 * @evt_type: type of event to send 2936 * @gfpflags: GFP flags for allocation 2937 * 2938 * Assert scsi device event asynchronously, given an event type. 2939 */ 2940 void sdev_evt_send_simple(struct scsi_device *sdev, 2941 enum scsi_device_event evt_type, gfp_t gfpflags) 2942 { 2943 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2944 if (!evt) { 2945 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2946 evt_type); 2947 return; 2948 } 2949 2950 sdev_evt_send(sdev, evt); 2951 } 2952 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2953 2954 /** 2955 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() 2956 * @sdev: SCSI device to count the number of scsi_request_fn() callers for. 2957 */ 2958 static int scsi_request_fn_active(struct scsi_device *sdev) 2959 { 2960 struct request_queue *q = sdev->request_queue; 2961 int request_fn_active; 2962 2963 WARN_ON_ONCE(sdev->host->use_blk_mq); 2964 2965 spin_lock_irq(q->queue_lock); 2966 request_fn_active = q->request_fn_active; 2967 spin_unlock_irq(q->queue_lock); 2968 2969 return request_fn_active; 2970 } 2971 2972 /** 2973 * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls 2974 * @sdev: SCSI device pointer. 2975 * 2976 * Wait until the ongoing shost->hostt->queuecommand() calls that are 2977 * invoked from scsi_request_fn() have finished. 2978 */ 2979 static void scsi_wait_for_queuecommand(struct scsi_device *sdev) 2980 { 2981 WARN_ON_ONCE(sdev->host->use_blk_mq); 2982 2983 while (scsi_request_fn_active(sdev)) 2984 msleep(20); 2985 } 2986 2987 /** 2988 * scsi_device_quiesce - Block user issued commands. 2989 * @sdev: scsi device to quiesce. 2990 * 2991 * This works by trying to transition to the SDEV_QUIESCE state 2992 * (which must be a legal transition). When the device is in this 2993 * state, only special requests will be accepted, all others will 2994 * be deferred. Since special requests may also be requeued requests, 2995 * a successful return doesn't guarantee the device will be 2996 * totally quiescent. 2997 * 2998 * Must be called with user context, may sleep. 2999 * 3000 * Returns zero if unsuccessful or an error if not. 3001 */ 3002 int 3003 scsi_device_quiesce(struct scsi_device *sdev) 3004 { 3005 struct request_queue *q = sdev->request_queue; 3006 int err; 3007 3008 /* 3009 * It is allowed to call scsi_device_quiesce() multiple times from 3010 * the same context but concurrent scsi_device_quiesce() calls are 3011 * not allowed. 3012 */ 3013 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); 3014 3015 blk_set_preempt_only(q); 3016 3017 blk_mq_freeze_queue(q); 3018 /* 3019 * Ensure that the effect of blk_set_preempt_only() will be visible 3020 * for percpu_ref_tryget() callers that occur after the queue 3021 * unfreeze even if the queue was already frozen before this function 3022 * was called. See also https://lwn.net/Articles/573497/. 3023 */ 3024 synchronize_rcu(); 3025 blk_mq_unfreeze_queue(q); 3026 3027 mutex_lock(&sdev->state_mutex); 3028 err = scsi_device_set_state(sdev, SDEV_QUIESCE); 3029 if (err == 0) 3030 sdev->quiesced_by = current; 3031 else 3032 blk_clear_preempt_only(q); 3033 mutex_unlock(&sdev->state_mutex); 3034 3035 return err; 3036 } 3037 EXPORT_SYMBOL(scsi_device_quiesce); 3038 3039 /** 3040 * scsi_device_resume - Restart user issued commands to a quiesced device. 3041 * @sdev: scsi device to resume. 3042 * 3043 * Moves the device from quiesced back to running and restarts the 3044 * queues. 3045 * 3046 * Must be called with user context, may sleep. 3047 */ 3048 void scsi_device_resume(struct scsi_device *sdev) 3049 { 3050 /* check if the device state was mutated prior to resume, and if 3051 * so assume the state is being managed elsewhere (for example 3052 * device deleted during suspend) 3053 */ 3054 mutex_lock(&sdev->state_mutex); 3055 WARN_ON_ONCE(!sdev->quiesced_by); 3056 sdev->quiesced_by = NULL; 3057 blk_clear_preempt_only(sdev->request_queue); 3058 if (sdev->sdev_state == SDEV_QUIESCE) 3059 scsi_device_set_state(sdev, SDEV_RUNNING); 3060 mutex_unlock(&sdev->state_mutex); 3061 } 3062 EXPORT_SYMBOL(scsi_device_resume); 3063 3064 static void 3065 device_quiesce_fn(struct scsi_device *sdev, void *data) 3066 { 3067 scsi_device_quiesce(sdev); 3068 } 3069 3070 void 3071 scsi_target_quiesce(struct scsi_target *starget) 3072 { 3073 starget_for_each_device(starget, NULL, device_quiesce_fn); 3074 } 3075 EXPORT_SYMBOL(scsi_target_quiesce); 3076 3077 static void 3078 device_resume_fn(struct scsi_device *sdev, void *data) 3079 { 3080 scsi_device_resume(sdev); 3081 } 3082 3083 void 3084 scsi_target_resume(struct scsi_target *starget) 3085 { 3086 starget_for_each_device(starget, NULL, device_resume_fn); 3087 } 3088 EXPORT_SYMBOL(scsi_target_resume); 3089 3090 /** 3091 * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state 3092 * @sdev: device to block 3093 * 3094 * Pause SCSI command processing on the specified device. Does not sleep. 3095 * 3096 * Returns zero if successful or a negative error code upon failure. 3097 * 3098 * Notes: 3099 * This routine transitions the device to the SDEV_BLOCK state (which must be 3100 * a legal transition). When the device is in this state, command processing 3101 * is paused until the device leaves the SDEV_BLOCK state. See also 3102 * scsi_internal_device_unblock_nowait(). 3103 */ 3104 int scsi_internal_device_block_nowait(struct scsi_device *sdev) 3105 { 3106 struct request_queue *q = sdev->request_queue; 3107 unsigned long flags; 3108 int err = 0; 3109 3110 err = scsi_device_set_state(sdev, SDEV_BLOCK); 3111 if (err) { 3112 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 3113 3114 if (err) 3115 return err; 3116 } 3117 3118 /* 3119 * The device has transitioned to SDEV_BLOCK. Stop the 3120 * block layer from calling the midlayer with this device's 3121 * request queue. 3122 */ 3123 if (q->mq_ops) { 3124 blk_mq_quiesce_queue_nowait(q); 3125 } else { 3126 spin_lock_irqsave(q->queue_lock, flags); 3127 blk_stop_queue(q); 3128 spin_unlock_irqrestore(q->queue_lock, flags); 3129 } 3130 3131 return 0; 3132 } 3133 EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); 3134 3135 /** 3136 * scsi_internal_device_block - try to transition to the SDEV_BLOCK state 3137 * @sdev: device to block 3138 * 3139 * Pause SCSI command processing on the specified device and wait until all 3140 * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. 3141 * 3142 * Returns zero if successful or a negative error code upon failure. 3143 * 3144 * Note: 3145 * This routine transitions the device to the SDEV_BLOCK state (which must be 3146 * a legal transition). When the device is in this state, command processing 3147 * is paused until the device leaves the SDEV_BLOCK state. See also 3148 * scsi_internal_device_unblock(). 3149 * 3150 * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after 3151 * scsi_internal_device_block() has blocked a SCSI device and also 3152 * remove the rport mutex lock and unlock calls from srp_queuecommand(). 3153 */ 3154 static int scsi_internal_device_block(struct scsi_device *sdev) 3155 { 3156 struct request_queue *q = sdev->request_queue; 3157 int err; 3158 3159 mutex_lock(&sdev->state_mutex); 3160 err = scsi_internal_device_block_nowait(sdev); 3161 if (err == 0) { 3162 if (q->mq_ops) 3163 blk_mq_quiesce_queue(q); 3164 else 3165 scsi_wait_for_queuecommand(sdev); 3166 } 3167 mutex_unlock(&sdev->state_mutex); 3168 3169 return err; 3170 } 3171 3172 void scsi_start_queue(struct scsi_device *sdev) 3173 { 3174 struct request_queue *q = sdev->request_queue; 3175 unsigned long flags; 3176 3177 if (q->mq_ops) { 3178 blk_mq_unquiesce_queue(q); 3179 } else { 3180 spin_lock_irqsave(q->queue_lock, flags); 3181 blk_start_queue(q); 3182 spin_unlock_irqrestore(q->queue_lock, flags); 3183 } 3184 } 3185 3186 /** 3187 * scsi_internal_device_unblock_nowait - resume a device after a block request 3188 * @sdev: device to resume 3189 * @new_state: state to set the device to after unblocking 3190 * 3191 * Restart the device queue for a previously suspended SCSI device. Does not 3192 * sleep. 3193 * 3194 * Returns zero if successful or a negative error code upon failure. 3195 * 3196 * Notes: 3197 * This routine transitions the device to the SDEV_RUNNING state or to one of 3198 * the offline states (which must be a legal transition) allowing the midlayer 3199 * to goose the queue for this device. 3200 */ 3201 int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, 3202 enum scsi_device_state new_state) 3203 { 3204 /* 3205 * Try to transition the scsi device to SDEV_RUNNING or one of the 3206 * offlined states and goose the device queue if successful. 3207 */ 3208 switch (sdev->sdev_state) { 3209 case SDEV_BLOCK: 3210 case SDEV_TRANSPORT_OFFLINE: 3211 sdev->sdev_state = new_state; 3212 break; 3213 case SDEV_CREATED_BLOCK: 3214 if (new_state == SDEV_TRANSPORT_OFFLINE || 3215 new_state == SDEV_OFFLINE) 3216 sdev->sdev_state = new_state; 3217 else 3218 sdev->sdev_state = SDEV_CREATED; 3219 break; 3220 case SDEV_CANCEL: 3221 case SDEV_OFFLINE: 3222 break; 3223 default: 3224 return -EINVAL; 3225 } 3226 scsi_start_queue(sdev); 3227 3228 return 0; 3229 } 3230 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); 3231 3232 /** 3233 * scsi_internal_device_unblock - resume a device after a block request 3234 * @sdev: device to resume 3235 * @new_state: state to set the device to after unblocking 3236 * 3237 * Restart the device queue for a previously suspended SCSI device. May sleep. 3238 * 3239 * Returns zero if successful or a negative error code upon failure. 3240 * 3241 * Notes: 3242 * This routine transitions the device to the SDEV_RUNNING state or to one of 3243 * the offline states (which must be a legal transition) allowing the midlayer 3244 * to goose the queue for this device. 3245 */ 3246 static int scsi_internal_device_unblock(struct scsi_device *sdev, 3247 enum scsi_device_state new_state) 3248 { 3249 int ret; 3250 3251 mutex_lock(&sdev->state_mutex); 3252 ret = scsi_internal_device_unblock_nowait(sdev, new_state); 3253 mutex_unlock(&sdev->state_mutex); 3254 3255 return ret; 3256 } 3257 3258 static void 3259 device_block(struct scsi_device *sdev, void *data) 3260 { 3261 scsi_internal_device_block(sdev); 3262 } 3263 3264 static int 3265 target_block(struct device *dev, void *data) 3266 { 3267 if (scsi_is_target_device(dev)) 3268 starget_for_each_device(to_scsi_target(dev), NULL, 3269 device_block); 3270 return 0; 3271 } 3272 3273 void 3274 scsi_target_block(struct device *dev) 3275 { 3276 if (scsi_is_target_device(dev)) 3277 starget_for_each_device(to_scsi_target(dev), NULL, 3278 device_block); 3279 else 3280 device_for_each_child(dev, NULL, target_block); 3281 } 3282 EXPORT_SYMBOL_GPL(scsi_target_block); 3283 3284 static void 3285 device_unblock(struct scsi_device *sdev, void *data) 3286 { 3287 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); 3288 } 3289 3290 static int 3291 target_unblock(struct device *dev, void *data) 3292 { 3293 if (scsi_is_target_device(dev)) 3294 starget_for_each_device(to_scsi_target(dev), data, 3295 device_unblock); 3296 return 0; 3297 } 3298 3299 void 3300 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) 3301 { 3302 if (scsi_is_target_device(dev)) 3303 starget_for_each_device(to_scsi_target(dev), &new_state, 3304 device_unblock); 3305 else 3306 device_for_each_child(dev, &new_state, target_unblock); 3307 } 3308 EXPORT_SYMBOL_GPL(scsi_target_unblock); 3309 3310 /** 3311 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 3312 * @sgl: scatter-gather list 3313 * @sg_count: number of segments in sg 3314 * @offset: offset in bytes into sg, on return offset into the mapped area 3315 * @len: bytes to map, on return number of bytes mapped 3316 * 3317 * Returns virtual address of the start of the mapped page 3318 */ 3319 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 3320 size_t *offset, size_t *len) 3321 { 3322 int i; 3323 size_t sg_len = 0, len_complete = 0; 3324 struct scatterlist *sg; 3325 struct page *page; 3326 3327 WARN_ON(!irqs_disabled()); 3328 3329 for_each_sg(sgl, sg, sg_count, i) { 3330 len_complete = sg_len; /* Complete sg-entries */ 3331 sg_len += sg->length; 3332 if (sg_len > *offset) 3333 break; 3334 } 3335 3336 if (unlikely(i == sg_count)) { 3337 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 3338 "elements %d\n", 3339 __func__, sg_len, *offset, sg_count); 3340 WARN_ON(1); 3341 return NULL; 3342 } 3343 3344 /* Offset starting from the beginning of first page in this sg-entry */ 3345 *offset = *offset - len_complete + sg->offset; 3346 3347 /* Assumption: contiguous pages can be accessed as "page + i" */ 3348 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 3349 *offset &= ~PAGE_MASK; 3350 3351 /* Bytes in this sg-entry from *offset to the end of the page */ 3352 sg_len = PAGE_SIZE - *offset; 3353 if (*len > sg_len) 3354 *len = sg_len; 3355 3356 return kmap_atomic(page); 3357 } 3358 EXPORT_SYMBOL(scsi_kmap_atomic_sg); 3359 3360 /** 3361 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 3362 * @virt: virtual address to be unmapped 3363 */ 3364 void scsi_kunmap_atomic_sg(void *virt) 3365 { 3366 kunmap_atomic(virt); 3367 } 3368 EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 3369 3370 void sdev_disable_disk_events(struct scsi_device *sdev) 3371 { 3372 atomic_inc(&sdev->disk_events_disable_depth); 3373 } 3374 EXPORT_SYMBOL(sdev_disable_disk_events); 3375 3376 void sdev_enable_disk_events(struct scsi_device *sdev) 3377 { 3378 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) 3379 return; 3380 atomic_dec(&sdev->disk_events_disable_depth); 3381 } 3382 EXPORT_SYMBOL(sdev_enable_disk_events); 3383 3384 /** 3385 * scsi_vpd_lun_id - return a unique device identification 3386 * @sdev: SCSI device 3387 * @id: buffer for the identification 3388 * @id_len: length of the buffer 3389 * 3390 * Copies a unique device identification into @id based 3391 * on the information in the VPD page 0x83 of the device. 3392 * The string will be formatted as a SCSI name string. 3393 * 3394 * Returns the length of the identification or error on failure. 3395 * If the identifier is longer than the supplied buffer the actual 3396 * identifier length is returned and the buffer is not zero-padded. 3397 */ 3398 int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) 3399 { 3400 u8 cur_id_type = 0xff; 3401 u8 cur_id_size = 0; 3402 const unsigned char *d, *cur_id_str; 3403 const struct scsi_vpd *vpd_pg83; 3404 int id_size = -EINVAL; 3405 3406 rcu_read_lock(); 3407 vpd_pg83 = rcu_dereference(sdev->vpd_pg83); 3408 if (!vpd_pg83) { 3409 rcu_read_unlock(); 3410 return -ENXIO; 3411 } 3412 3413 /* 3414 * Look for the correct descriptor. 3415 * Order of preference for lun descriptor: 3416 * - SCSI name string 3417 * - NAA IEEE Registered Extended 3418 * - EUI-64 based 16-byte 3419 * - EUI-64 based 12-byte 3420 * - NAA IEEE Registered 3421 * - NAA IEEE Extended 3422 * - T10 Vendor ID 3423 * as longer descriptors reduce the likelyhood 3424 * of identification clashes. 3425 */ 3426 3427 /* The id string must be at least 20 bytes + terminating NULL byte */ 3428 if (id_len < 21) { 3429 rcu_read_unlock(); 3430 return -EINVAL; 3431 } 3432 3433 memset(id, 0, id_len); 3434 d = vpd_pg83->data + 4; 3435 while (d < vpd_pg83->data + vpd_pg83->len) { 3436 /* Skip designators not referring to the LUN */ 3437 if ((d[1] & 0x30) != 0x00) 3438 goto next_desig; 3439 3440 switch (d[1] & 0xf) { 3441 case 0x1: 3442 /* T10 Vendor ID */ 3443 if (cur_id_size > d[3]) 3444 break; 3445 /* Prefer anything */ 3446 if (cur_id_type > 0x01 && cur_id_type != 0xff) 3447 break; 3448 cur_id_size = d[3]; 3449 if (cur_id_size + 4 > id_len) 3450 cur_id_size = id_len - 4; 3451 cur_id_str = d + 4; 3452 cur_id_type = d[1] & 0xf; 3453 id_size = snprintf(id, id_len, "t10.%*pE", 3454 cur_id_size, cur_id_str); 3455 break; 3456 case 0x2: 3457 /* EUI-64 */ 3458 if (cur_id_size > d[3]) 3459 break; 3460 /* Prefer NAA IEEE Registered Extended */ 3461 if (cur_id_type == 0x3 && 3462 cur_id_size == d[3]) 3463 break; 3464 cur_id_size = d[3]; 3465 cur_id_str = d + 4; 3466 cur_id_type = d[1] & 0xf; 3467 switch (cur_id_size) { 3468 case 8: 3469 id_size = snprintf(id, id_len, 3470 "eui.%8phN", 3471 cur_id_str); 3472 break; 3473 case 12: 3474 id_size = snprintf(id, id_len, 3475 "eui.%12phN", 3476 cur_id_str); 3477 break; 3478 case 16: 3479 id_size = snprintf(id, id_len, 3480 "eui.%16phN", 3481 cur_id_str); 3482 break; 3483 default: 3484 cur_id_size = 0; 3485 break; 3486 } 3487 break; 3488 case 0x3: 3489 /* NAA */ 3490 if (cur_id_size > d[3]) 3491 break; 3492 cur_id_size = d[3]; 3493 cur_id_str = d + 4; 3494 cur_id_type = d[1] & 0xf; 3495 switch (cur_id_size) { 3496 case 8: 3497 id_size = snprintf(id, id_len, 3498 "naa.%8phN", 3499 cur_id_str); 3500 break; 3501 case 16: 3502 id_size = snprintf(id, id_len, 3503 "naa.%16phN", 3504 cur_id_str); 3505 break; 3506 default: 3507 cur_id_size = 0; 3508 break; 3509 } 3510 break; 3511 case 0x8: 3512 /* SCSI name string */ 3513 if (cur_id_size + 4 > d[3]) 3514 break; 3515 /* Prefer others for truncated descriptor */ 3516 if (cur_id_size && d[3] > id_len) 3517 break; 3518 cur_id_size = id_size = d[3]; 3519 cur_id_str = d + 4; 3520 cur_id_type = d[1] & 0xf; 3521 if (cur_id_size >= id_len) 3522 cur_id_size = id_len - 1; 3523 memcpy(id, cur_id_str, cur_id_size); 3524 /* Decrease priority for truncated descriptor */ 3525 if (cur_id_size != id_size) 3526 cur_id_size = 6; 3527 break; 3528 default: 3529 break; 3530 } 3531 next_desig: 3532 d += d[3] + 4; 3533 } 3534 rcu_read_unlock(); 3535 3536 return id_size; 3537 } 3538 EXPORT_SYMBOL(scsi_vpd_lun_id); 3539 3540 /* 3541 * scsi_vpd_tpg_id - return a target port group identifier 3542 * @sdev: SCSI device 3543 * 3544 * Returns the Target Port Group identifier from the information 3545 * froom VPD page 0x83 of the device. 3546 * 3547 * Returns the identifier or error on failure. 3548 */ 3549 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) 3550 { 3551 const unsigned char *d; 3552 const struct scsi_vpd *vpd_pg83; 3553 int group_id = -EAGAIN, rel_port = -1; 3554 3555 rcu_read_lock(); 3556 vpd_pg83 = rcu_dereference(sdev->vpd_pg83); 3557 if (!vpd_pg83) { 3558 rcu_read_unlock(); 3559 return -ENXIO; 3560 } 3561 3562 d = vpd_pg83->data + 4; 3563 while (d < vpd_pg83->data + vpd_pg83->len) { 3564 switch (d[1] & 0xf) { 3565 case 0x4: 3566 /* Relative target port */ 3567 rel_port = get_unaligned_be16(&d[6]); 3568 break; 3569 case 0x5: 3570 /* Target port group */ 3571 group_id = get_unaligned_be16(&d[6]); 3572 break; 3573 default: 3574 break; 3575 } 3576 d += d[3] + 4; 3577 } 3578 rcu_read_unlock(); 3579 3580 if (group_id >= 0 && rel_id && rel_port != -1) 3581 *rel_id = rel_port; 3582 3583 return group_id; 3584 } 3585 EXPORT_SYMBOL(scsi_vpd_tpg_id); 3586