1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * scsi_scan.c 4 * 5 * Copyright (C) 2000 Eric Youngdale, 6 * Copyright (C) 2002 Patrick Mansfield 7 * 8 * The general scanning/probing algorithm is as follows, exceptions are 9 * made to it depending on device specific flags, compilation options, and 10 * global variable (boot or module load time) settings. 11 * 12 * A specific LUN is scanned via an INQUIRY command; if the LUN has a 13 * device attached, a scsi_device is allocated and setup for it. 14 * 15 * For every id of every channel on the given host: 16 * 17 * Scan LUN 0; if the target responds to LUN 0 (even if there is no 18 * device or storage attached to LUN 0): 19 * 20 * If LUN 0 has a device attached, allocate and setup a 21 * scsi_device for it. 22 * 23 * If target is SCSI-3 or up, issue a REPORT LUN, and scan 24 * all of the LUNs returned by the REPORT LUN; else, 25 * sequentially scan LUNs up until some maximum is reached, 26 * or a LUN is seen that cannot have a device attached to it. 27 */ 28 29 #include <linux/module.h> 30 #include <linux/moduleparam.h> 31 #include <linux/init.h> 32 #include <linux/blkdev.h> 33 #include <linux/delay.h> 34 #include <linux/kthread.h> 35 #include <linux/spinlock.h> 36 #include <linux/async.h> 37 #include <linux/slab.h> 38 #include <asm/unaligned.h> 39 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_device.h> 43 #include <scsi/scsi_driver.h> 44 #include <scsi/scsi_devinfo.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_transport.h> 47 #include <scsi/scsi_dh.h> 48 #include <scsi/scsi_eh.h> 49 50 #include "scsi_priv.h" 51 #include "scsi_logging.h" 52 53 #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \ 54 " SCSI scanning, some SCSI devices might not be configured\n" 55 56 /* 57 * Default timeout 58 */ 59 #define SCSI_TIMEOUT (2*HZ) 60 #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ) 61 62 /* 63 * Prefix values for the SCSI id's (stored in sysfs name field) 64 */ 65 #define SCSI_UID_SER_NUM 'S' 66 #define SCSI_UID_UNKNOWN 'Z' 67 68 /* 69 * Return values of some of the scanning functions. 70 * 71 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this 72 * includes allocation or general failures preventing IO from being sent. 73 * 74 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available 75 * on the given LUN. 76 * 77 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a 78 * given LUN. 79 */ 80 #define SCSI_SCAN_NO_RESPONSE 0 81 #define SCSI_SCAN_TARGET_PRESENT 1 82 #define SCSI_SCAN_LUN_PRESENT 2 83 84 static const char *scsi_null_device_strs = "nullnullnullnull"; 85 86 #define MAX_SCSI_LUNS 512 87 88 static u64 max_scsi_luns = MAX_SCSI_LUNS; 89 90 module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR); 91 MODULE_PARM_DESC(max_luns, 92 "last scsi LUN (should be between 1 and 2^64-1)"); 93 94 #ifdef CONFIG_SCSI_SCAN_ASYNC 95 #define SCSI_SCAN_TYPE_DEFAULT "async" 96 #else 97 #define SCSI_SCAN_TYPE_DEFAULT "sync" 98 #endif 99 100 static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; 101 102 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), 103 S_IRUGO|S_IWUSR); 104 MODULE_PARM_DESC(scan, "sync, async, manual, or none. " 105 "Setting to 'manual' disables automatic scanning, but allows " 106 "for manual device scan via the 'scan' sysfs attribute."); 107 108 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; 109 110 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 111 MODULE_PARM_DESC(inq_timeout, 112 "Timeout (in seconds) waiting for devices to answer INQUIRY." 113 " Default is 20. Some devices may need more; most need less."); 114 115 /* This lock protects only this list */ 116 static DEFINE_SPINLOCK(async_scan_lock); 117 static LIST_HEAD(scanning_hosts); 118 119 struct async_scan_data { 120 struct list_head list; 121 struct Scsi_Host *shost; 122 struct completion prev_finished; 123 }; 124 125 /* 126 * scsi_enable_async_suspend - Enable async suspend and resume 127 */ 128 void scsi_enable_async_suspend(struct device *dev) 129 { 130 /* 131 * If a user has disabled async probing a likely reason is due to a 132 * storage enclosure that does not inject staggered spin-ups. For 133 * safety, make resume synchronous as well in that case. 134 */ 135 if (strncmp(scsi_scan_type, "async", 5) != 0) 136 return; 137 /* Enable asynchronous suspend and resume. */ 138 device_enable_async_suspend(dev); 139 } 140 141 /** 142 * scsi_complete_async_scans - Wait for asynchronous scans to complete 143 * 144 * When this function returns, any host which started scanning before 145 * this function was called will have finished its scan. Hosts which 146 * started scanning after this function was called may or may not have 147 * finished. 148 */ 149 int scsi_complete_async_scans(void) 150 { 151 struct async_scan_data *data; 152 153 do { 154 if (list_empty(&scanning_hosts)) 155 return 0; 156 /* If we can't get memory immediately, that's OK. Just 157 * sleep a little. Even if we never get memory, the async 158 * scans will finish eventually. 159 */ 160 data = kmalloc(sizeof(*data), GFP_KERNEL); 161 if (!data) 162 msleep(1); 163 } while (!data); 164 165 data->shost = NULL; 166 init_completion(&data->prev_finished); 167 168 spin_lock(&async_scan_lock); 169 /* Check that there's still somebody else on the list */ 170 if (list_empty(&scanning_hosts)) 171 goto done; 172 list_add_tail(&data->list, &scanning_hosts); 173 spin_unlock(&async_scan_lock); 174 175 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n"); 176 wait_for_completion(&data->prev_finished); 177 178 spin_lock(&async_scan_lock); 179 list_del(&data->list); 180 if (!list_empty(&scanning_hosts)) { 181 struct async_scan_data *next = list_entry(scanning_hosts.next, 182 struct async_scan_data, list); 183 complete(&next->prev_finished); 184 } 185 done: 186 spin_unlock(&async_scan_lock); 187 188 kfree(data); 189 return 0; 190 } 191 192 /** 193 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 194 * @sdev: scsi device to send command to 195 * @result: area to store the result of the MODE SENSE 196 * 197 * Description: 198 * Send a vendor specific MODE SENSE (not a MODE SELECT) command. 199 * Called for BLIST_KEY devices. 200 **/ 201 static void scsi_unlock_floptical(struct scsi_device *sdev, 202 unsigned char *result) 203 { 204 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 205 206 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n"); 207 scsi_cmd[0] = MODE_SENSE; 208 scsi_cmd[1] = 0; 209 scsi_cmd[2] = 0x2e; 210 scsi_cmd[3] = 0; 211 scsi_cmd[4] = 0x2a; /* size */ 212 scsi_cmd[5] = 0; 213 scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a, 214 SCSI_TIMEOUT, 3, NULL); 215 } 216 217 static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, 218 unsigned int depth) 219 { 220 int new_shift = sbitmap_calculate_shift(depth); 221 bool need_alloc = !sdev->budget_map.map; 222 bool need_free = false; 223 int ret; 224 struct sbitmap sb_backup; 225 226 depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev)); 227 228 /* 229 * realloc if new shift is calculated, which is caused by setting 230 * up one new default queue depth after calling ->slave_configure 231 */ 232 if (!need_alloc && new_shift != sdev->budget_map.shift) 233 need_alloc = need_free = true; 234 235 if (!need_alloc) 236 return 0; 237 238 /* 239 * Request queue has to be frozen for reallocating budget map, 240 * and here disk isn't added yet, so freezing is pretty fast 241 */ 242 if (need_free) { 243 blk_mq_freeze_queue(sdev->request_queue); 244 sb_backup = sdev->budget_map; 245 } 246 ret = sbitmap_init_node(&sdev->budget_map, 247 scsi_device_max_queue_depth(sdev), 248 new_shift, GFP_KERNEL, 249 sdev->request_queue->node, false, true); 250 if (!ret) 251 sbitmap_resize(&sdev->budget_map, depth); 252 253 if (need_free) { 254 if (ret) 255 sdev->budget_map = sb_backup; 256 else 257 sbitmap_free(&sb_backup); 258 ret = 0; 259 blk_mq_unfreeze_queue(sdev->request_queue); 260 } 261 return ret; 262 } 263 264 /** 265 * scsi_alloc_sdev - allocate and setup a scsi_Device 266 * @starget: which target to allocate a &scsi_device for 267 * @lun: which lun 268 * @hostdata: usually NULL and set by ->slave_alloc instead 269 * 270 * Description: 271 * Allocate, initialize for io, and return a pointer to a scsi_Device. 272 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and 273 * adds scsi_Device to the appropriate list. 274 * 275 * Return value: 276 * scsi_Device pointer, or NULL on failure. 277 **/ 278 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, 279 u64 lun, void *hostdata) 280 { 281 unsigned int depth; 282 struct scsi_device *sdev; 283 struct request_queue *q; 284 int display_failure_msg = 1, ret; 285 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 286 287 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, 288 GFP_KERNEL); 289 if (!sdev) 290 goto out; 291 292 sdev->vendor = scsi_null_device_strs; 293 sdev->model = scsi_null_device_strs; 294 sdev->rev = scsi_null_device_strs; 295 sdev->host = shost; 296 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; 297 sdev->id = starget->id; 298 sdev->lun = lun; 299 sdev->channel = starget->channel; 300 mutex_init(&sdev->state_mutex); 301 sdev->sdev_state = SDEV_CREATED; 302 INIT_LIST_HEAD(&sdev->siblings); 303 INIT_LIST_HEAD(&sdev->same_target_siblings); 304 INIT_LIST_HEAD(&sdev->starved_entry); 305 INIT_LIST_HEAD(&sdev->event_list); 306 spin_lock_init(&sdev->list_lock); 307 mutex_init(&sdev->inquiry_mutex); 308 INIT_WORK(&sdev->event_work, scsi_evt_thread); 309 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); 310 311 sdev->sdev_gendev.parent = get_device(&starget->dev); 312 sdev->sdev_target = starget; 313 314 /* usually NULL and set by ->slave_alloc instead */ 315 sdev->hostdata = hostdata; 316 317 /* if the device needs this changing, it may do so in the 318 * slave_configure function */ 319 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; 320 321 /* 322 * Some low level driver could use device->type 323 */ 324 sdev->type = -1; 325 326 /* 327 * Assume that the device will have handshaking problems, 328 * and then fix this field later if it turns out it 329 * doesn't 330 */ 331 sdev->borken = 1; 332 333 sdev->sg_reserved_size = INT_MAX; 334 335 q = blk_mq_init_queue(&sdev->host->tag_set); 336 if (IS_ERR(q)) { 337 /* release fn is set up in scsi_sysfs_device_initialise, so 338 * have to free and put manually here */ 339 put_device(&starget->dev); 340 kfree(sdev); 341 goto out; 342 } 343 kref_get(&sdev->host->tagset_refcnt); 344 sdev->request_queue = q; 345 q->queuedata = sdev; 346 __scsi_init_queue(sdev->host, q); 347 348 depth = sdev->host->cmd_per_lun ?: 1; 349 350 /* 351 * Use .can_queue as budget map's depth because we have to 352 * support adjusting queue depth from sysfs. Meantime use 353 * default device queue depth to figure out sbitmap shift 354 * since we use this queue depth most of times. 355 */ 356 if (scsi_realloc_sdev_budget_map(sdev, depth)) { 357 put_device(&starget->dev); 358 kfree(sdev); 359 goto out; 360 } 361 362 scsi_change_queue_depth(sdev, depth); 363 364 scsi_sysfs_device_initialize(sdev); 365 366 if (shost->hostt->slave_alloc) { 367 ret = shost->hostt->slave_alloc(sdev); 368 if (ret) { 369 /* 370 * if LLDD reports slave not present, don't clutter 371 * console with alloc failure messages 372 */ 373 if (ret == -ENXIO) 374 display_failure_msg = 0; 375 goto out_device_destroy; 376 } 377 } 378 379 return sdev; 380 381 out_device_destroy: 382 __scsi_remove_device(sdev); 383 out: 384 if (display_failure_msg) 385 printk(ALLOC_FAILURE_MSG, __func__); 386 return NULL; 387 } 388 389 static void scsi_target_destroy(struct scsi_target *starget) 390 { 391 struct device *dev = &starget->dev; 392 struct Scsi_Host *shost = dev_to_shost(dev->parent); 393 unsigned long flags; 394 395 BUG_ON(starget->state == STARGET_DEL); 396 starget->state = STARGET_DEL; 397 transport_destroy_device(dev); 398 spin_lock_irqsave(shost->host_lock, flags); 399 if (shost->hostt->target_destroy) 400 shost->hostt->target_destroy(starget); 401 list_del_init(&starget->siblings); 402 spin_unlock_irqrestore(shost->host_lock, flags); 403 put_device(dev); 404 } 405 406 static void scsi_target_dev_release(struct device *dev) 407 { 408 struct device *parent = dev->parent; 409 struct scsi_target *starget = to_scsi_target(dev); 410 411 kfree(starget); 412 put_device(parent); 413 } 414 415 static struct device_type scsi_target_type = { 416 .name = "scsi_target", 417 .release = scsi_target_dev_release, 418 }; 419 420 int scsi_is_target_device(const struct device *dev) 421 { 422 return dev->type == &scsi_target_type; 423 } 424 EXPORT_SYMBOL(scsi_is_target_device); 425 426 static struct scsi_target *__scsi_find_target(struct device *parent, 427 int channel, uint id) 428 { 429 struct scsi_target *starget, *found_starget = NULL; 430 struct Scsi_Host *shost = dev_to_shost(parent); 431 /* 432 * Search for an existing target for this sdev. 433 */ 434 list_for_each_entry(starget, &shost->__targets, siblings) { 435 if (starget->id == id && 436 starget->channel == channel) { 437 found_starget = starget; 438 break; 439 } 440 } 441 if (found_starget) 442 get_device(&found_starget->dev); 443 444 return found_starget; 445 } 446 447 /** 448 * scsi_target_reap_ref_release - remove target from visibility 449 * @kref: the reap_ref in the target being released 450 * 451 * Called on last put of reap_ref, which is the indication that no device 452 * under this target is visible anymore, so render the target invisible in 453 * sysfs. Note: we have to be in user context here because the target reaps 454 * should be done in places where the scsi device visibility is being removed. 455 */ 456 static void scsi_target_reap_ref_release(struct kref *kref) 457 { 458 struct scsi_target *starget 459 = container_of(kref, struct scsi_target, reap_ref); 460 461 /* 462 * if we get here and the target is still in a CREATED state that 463 * means it was allocated but never made visible (because a scan 464 * turned up no LUNs), so don't call device_del() on it. 465 */ 466 if ((starget->state != STARGET_CREATED) && 467 (starget->state != STARGET_CREATED_REMOVE)) { 468 transport_remove_device(&starget->dev); 469 device_del(&starget->dev); 470 } 471 scsi_target_destroy(starget); 472 } 473 474 static void scsi_target_reap_ref_put(struct scsi_target *starget) 475 { 476 kref_put(&starget->reap_ref, scsi_target_reap_ref_release); 477 } 478 479 /** 480 * scsi_alloc_target - allocate a new or find an existing target 481 * @parent: parent of the target (need not be a scsi host) 482 * @channel: target channel number (zero if no channels) 483 * @id: target id number 484 * 485 * Return an existing target if one exists, provided it hasn't already 486 * gone into STARGET_DEL state, otherwise allocate a new target. 487 * 488 * The target is returned with an incremented reference, so the caller 489 * is responsible for both reaping and doing a last put 490 */ 491 static struct scsi_target *scsi_alloc_target(struct device *parent, 492 int channel, uint id) 493 { 494 struct Scsi_Host *shost = dev_to_shost(parent); 495 struct device *dev = NULL; 496 unsigned long flags; 497 const int size = sizeof(struct scsi_target) 498 + shost->transportt->target_size; 499 struct scsi_target *starget; 500 struct scsi_target *found_target; 501 int error, ref_got; 502 503 starget = kzalloc(size, GFP_KERNEL); 504 if (!starget) { 505 printk(KERN_ERR "%s: allocation failure\n", __func__); 506 return NULL; 507 } 508 dev = &starget->dev; 509 device_initialize(dev); 510 kref_init(&starget->reap_ref); 511 dev->parent = get_device(parent); 512 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); 513 dev->bus = &scsi_bus_type; 514 dev->type = &scsi_target_type; 515 scsi_enable_async_suspend(dev); 516 starget->id = id; 517 starget->channel = channel; 518 starget->can_queue = 0; 519 INIT_LIST_HEAD(&starget->siblings); 520 INIT_LIST_HEAD(&starget->devices); 521 starget->state = STARGET_CREATED; 522 starget->scsi_level = SCSI_2; 523 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; 524 retry: 525 spin_lock_irqsave(shost->host_lock, flags); 526 527 found_target = __scsi_find_target(parent, channel, id); 528 if (found_target) 529 goto found; 530 531 list_add_tail(&starget->siblings, &shost->__targets); 532 spin_unlock_irqrestore(shost->host_lock, flags); 533 /* allocate and add */ 534 transport_setup_device(dev); 535 if (shost->hostt->target_alloc) { 536 error = shost->hostt->target_alloc(starget); 537 538 if(error) { 539 if (error != -ENXIO) 540 dev_err(dev, "target allocation failed, error %d\n", error); 541 /* don't want scsi_target_reap to do the final 542 * put because it will be under the host lock */ 543 scsi_target_destroy(starget); 544 return NULL; 545 } 546 } 547 get_device(dev); 548 549 return starget; 550 551 found: 552 /* 553 * release routine already fired if kref is zero, so if we can still 554 * take the reference, the target must be alive. If we can't, it must 555 * be dying and we need to wait for a new target 556 */ 557 ref_got = kref_get_unless_zero(&found_target->reap_ref); 558 559 spin_unlock_irqrestore(shost->host_lock, flags); 560 if (ref_got) { 561 put_device(dev); 562 return found_target; 563 } 564 /* 565 * Unfortunately, we found a dying target; need to wait until it's 566 * dead before we can get a new one. There is an anomaly here. We 567 * *should* call scsi_target_reap() to balance the kref_get() of the 568 * reap_ref above. However, since the target being released, it's 569 * already invisible and the reap_ref is irrelevant. If we call 570 * scsi_target_reap() we might spuriously do another device_del() on 571 * an already invisible target. 572 */ 573 put_device(&found_target->dev); 574 /* 575 * length of time is irrelevant here, we just want to yield the CPU 576 * for a tick to avoid busy waiting for the target to die. 577 */ 578 msleep(1); 579 goto retry; 580 } 581 582 /** 583 * scsi_target_reap - check to see if target is in use and destroy if not 584 * @starget: target to be checked 585 * 586 * This is used after removing a LUN or doing a last put of the target 587 * it checks atomically that nothing is using the target and removes 588 * it if so. 589 */ 590 void scsi_target_reap(struct scsi_target *starget) 591 { 592 /* 593 * serious problem if this triggers: STARGET_DEL is only set in the if 594 * the reap_ref drops to zero, so we're trying to do another final put 595 * on an already released kref 596 */ 597 BUG_ON(starget->state == STARGET_DEL); 598 scsi_target_reap_ref_put(starget); 599 } 600 601 /** 602 * scsi_sanitize_inquiry_string - remove non-graphical chars from an 603 * INQUIRY result string 604 * @s: INQUIRY result string to sanitize 605 * @len: length of the string 606 * 607 * Description: 608 * The SCSI spec says that INQUIRY vendor, product, and revision 609 * strings must consist entirely of graphic ASCII characters, 610 * padded on the right with spaces. Since not all devices obey 611 * this rule, we will replace non-graphic or non-ASCII characters 612 * with spaces. Exception: a NUL character is interpreted as a 613 * string terminator, so all the following characters are set to 614 * spaces. 615 **/ 616 void scsi_sanitize_inquiry_string(unsigned char *s, int len) 617 { 618 int terminated = 0; 619 620 for (; len > 0; (--len, ++s)) { 621 if (*s == 0) 622 terminated = 1; 623 if (terminated || *s < 0x20 || *s > 0x7e) 624 *s = ' '; 625 } 626 } 627 EXPORT_SYMBOL(scsi_sanitize_inquiry_string); 628 629 /** 630 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 631 * @sdev: scsi_device to probe 632 * @inq_result: area to store the INQUIRY result 633 * @result_len: len of inq_result 634 * @bflags: store any bflags found here 635 * 636 * Description: 637 * Probe the lun associated with @req using a standard SCSI INQUIRY; 638 * 639 * If the INQUIRY is successful, zero is returned and the 640 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 641 * are copied to the scsi_device any flags value is stored in *@bflags. 642 **/ 643 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, 644 int result_len, blist_flags_t *bflags) 645 { 646 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 647 int first_inquiry_len, try_inquiry_len, next_inquiry_len; 648 int response_len = 0; 649 int pass, count, result, resid; 650 struct scsi_sense_hdr sshdr; 651 const struct scsi_exec_args exec_args = { 652 .sshdr = &sshdr, 653 .resid = &resid, 654 }; 655 656 *bflags = 0; 657 658 /* Perform up to 3 passes. The first pass uses a conservative 659 * transfer length of 36 unless sdev->inquiry_len specifies a 660 * different value. */ 661 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; 662 try_inquiry_len = first_inquiry_len; 663 pass = 1; 664 665 next_pass: 666 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 667 "scsi scan: INQUIRY pass %d length %d\n", 668 pass, try_inquiry_len)); 669 670 /* Each pass gets up to three chances to ignore Unit Attention */ 671 for (count = 0; count < 3; ++count) { 672 memset(scsi_cmd, 0, 6); 673 scsi_cmd[0] = INQUIRY; 674 scsi_cmd[4] = (unsigned char) try_inquiry_len; 675 676 memset(inq_result, 0, try_inquiry_len); 677 678 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, 679 inq_result, try_inquiry_len, 680 HZ / 2 + HZ * scsi_inq_timeout, 3, 681 &exec_args); 682 683 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 684 "scsi scan: INQUIRY %s with code 0x%x\n", 685 result ? "failed" : "successful", result)); 686 687 if (result > 0) { 688 /* 689 * not-ready to ready transition [asc/ascq=0x28/0x0] 690 * or power-on, reset [asc/ascq=0x29/0x0], continue. 691 * INQUIRY should not yield UNIT_ATTENTION 692 * but many buggy devices do so anyway. 693 */ 694 if (scsi_status_is_check_condition(result) && 695 scsi_sense_valid(&sshdr)) { 696 if ((sshdr.sense_key == UNIT_ATTENTION) && 697 ((sshdr.asc == 0x28) || 698 (sshdr.asc == 0x29)) && 699 (sshdr.ascq == 0)) 700 continue; 701 } 702 } else if (result == 0) { 703 /* 704 * if nothing was transferred, we try 705 * again. It's a workaround for some USB 706 * devices. 707 */ 708 if (resid == try_inquiry_len) 709 continue; 710 } 711 break; 712 } 713 714 if (result == 0) { 715 scsi_sanitize_inquiry_string(&inq_result[8], 8); 716 scsi_sanitize_inquiry_string(&inq_result[16], 16); 717 scsi_sanitize_inquiry_string(&inq_result[32], 4); 718 719 response_len = inq_result[4] + 5; 720 if (response_len > 255) 721 response_len = first_inquiry_len; /* sanity */ 722 723 /* 724 * Get any flags for this device. 725 * 726 * XXX add a bflags to scsi_device, and replace the 727 * corresponding bit fields in scsi_device, so bflags 728 * need not be passed as an argument. 729 */ 730 *bflags = scsi_get_device_flags(sdev, &inq_result[8], 731 &inq_result[16]); 732 733 /* When the first pass succeeds we gain information about 734 * what larger transfer lengths might work. */ 735 if (pass == 1) { 736 if (BLIST_INQUIRY_36 & *bflags) 737 next_inquiry_len = 36; 738 /* 739 * LLD specified a maximum sdev->inquiry_len 740 * but device claims it has more data. Capping 741 * the length only makes sense for legacy 742 * devices. If a device supports SPC-4 (2014) 743 * or newer, assume that it is safe to ask for 744 * as much as the device says it supports. 745 */ 746 else if (sdev->inquiry_len && 747 response_len > sdev->inquiry_len && 748 (inq_result[2] & 0x7) < 6) /* SPC-4 */ 749 next_inquiry_len = sdev->inquiry_len; 750 else 751 next_inquiry_len = response_len; 752 753 /* If more data is available perform the second pass */ 754 if (next_inquiry_len > try_inquiry_len) { 755 try_inquiry_len = next_inquiry_len; 756 pass = 2; 757 goto next_pass; 758 } 759 } 760 761 } else if (pass == 2) { 762 sdev_printk(KERN_INFO, sdev, 763 "scsi scan: %d byte inquiry failed. " 764 "Consider BLIST_INQUIRY_36 for this device\n", 765 try_inquiry_len); 766 767 /* If this pass failed, the third pass goes back and transfers 768 * the same amount as we successfully got in the first pass. */ 769 try_inquiry_len = first_inquiry_len; 770 pass = 3; 771 goto next_pass; 772 } 773 774 /* If the last transfer attempt got an error, assume the 775 * peripheral doesn't exist or is dead. */ 776 if (result) 777 return -EIO; 778 779 /* Don't report any more data than the device says is valid */ 780 sdev->inquiry_len = min(try_inquiry_len, response_len); 781 782 /* 783 * XXX Abort if the response length is less than 36? If less than 784 * 32, the lookup of the device flags (above) could be invalid, 785 * and it would be possible to take an incorrect action - we do 786 * not want to hang because of a short INQUIRY. On the flip side, 787 * if the device is spun down or becoming ready (and so it gives a 788 * short INQUIRY), an abort here prevents any further use of the 789 * device, including spin up. 790 * 791 * On the whole, the best approach seems to be to assume the first 792 * 36 bytes are valid no matter what the device says. That's 793 * better than copying < 36 bytes to the inquiry-result buffer 794 * and displaying garbage for the Vendor, Product, or Revision 795 * strings. 796 */ 797 if (sdev->inquiry_len < 36) { 798 if (!sdev->host->short_inquiry) { 799 shost_printk(KERN_INFO, sdev->host, 800 "scsi scan: INQUIRY result too short (%d)," 801 " using 36\n", sdev->inquiry_len); 802 sdev->host->short_inquiry = 1; 803 } 804 sdev->inquiry_len = 36; 805 } 806 807 /* 808 * Related to the above issue: 809 * 810 * XXX Devices (disk or all?) should be sent a TEST UNIT READY, 811 * and if not ready, sent a START_STOP to start (maybe spin up) and 812 * then send the INQUIRY again, since the INQUIRY can change after 813 * a device is initialized. 814 * 815 * Ideally, start a device if explicitly asked to do so. This 816 * assumes that a device is spun up on power on, spun down on 817 * request, and then spun up on request. 818 */ 819 820 /* 821 * The scanning code needs to know the scsi_level, even if no 822 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so 823 * non-zero LUNs can be scanned. 824 */ 825 sdev->scsi_level = inq_result[2] & 0x0f; 826 if (sdev->scsi_level >= 2 || 827 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 828 sdev->scsi_level++; 829 sdev->sdev_target->scsi_level = sdev->scsi_level; 830 831 /* 832 * If SCSI-2 or lower, and if the transport requires it, 833 * store the LUN value in CDB[1]. 834 */ 835 sdev->lun_in_cdb = 0; 836 if (sdev->scsi_level <= SCSI_2 && 837 sdev->scsi_level != SCSI_UNKNOWN && 838 !sdev->host->no_scsi2_lun_in_cdb) 839 sdev->lun_in_cdb = 1; 840 841 return 0; 842 } 843 844 /** 845 * scsi_add_lun - allocate and fully initialze a scsi_device 846 * @sdev: holds information to be stored in the new scsi_device 847 * @inq_result: holds the result of a previous INQUIRY to the LUN 848 * @bflags: black/white list flag 849 * @async: 1 if this device is being scanned asynchronously 850 * 851 * Description: 852 * Initialize the scsi_device @sdev. Optionally set fields based 853 * on values in *@bflags. 854 * 855 * Return: 856 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 857 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 858 **/ 859 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 860 blist_flags_t *bflags, int async) 861 { 862 int ret; 863 864 /* 865 * XXX do not save the inquiry, since it can change underneath us, 866 * save just vendor/model/rev. 867 * 868 * Rather than save it and have an ioctl that retrieves the saved 869 * value, have an ioctl that executes the same INQUIRY code used 870 * in scsi_probe_lun, let user level programs doing INQUIRY 871 * scanning run at their own risk, or supply a user level program 872 * that can correctly scan. 873 */ 874 875 /* 876 * Copy at least 36 bytes of INQUIRY data, so that we don't 877 * dereference unallocated memory when accessing the Vendor, 878 * Product, and Revision strings. Badly behaved devices may set 879 * the INQUIRY Additional Length byte to a small value, indicating 880 * these strings are invalid, but often they contain plausible data 881 * nonetheless. It doesn't matter if the device sent < 36 bytes 882 * total, since scsi_probe_lun() initializes inq_result with 0s. 883 */ 884 sdev->inquiry = kmemdup(inq_result, 885 max_t(size_t, sdev->inquiry_len, 36), 886 GFP_KERNEL); 887 if (sdev->inquiry == NULL) 888 return SCSI_SCAN_NO_RESPONSE; 889 890 sdev->vendor = (char *) (sdev->inquiry + 8); 891 sdev->model = (char *) (sdev->inquiry + 16); 892 sdev->rev = (char *) (sdev->inquiry + 32); 893 894 if (strncmp(sdev->vendor, "ATA ", 8) == 0) { 895 /* 896 * sata emulation layer device. This is a hack to work around 897 * the SATL power management specifications which state that 898 * when the SATL detects the device has gone into standby 899 * mode, it shall respond with NOT READY. 900 */ 901 sdev->allow_restart = 1; 902 } 903 904 if (*bflags & BLIST_ISROM) { 905 sdev->type = TYPE_ROM; 906 sdev->removable = 1; 907 } else { 908 sdev->type = (inq_result[0] & 0x1f); 909 sdev->removable = (inq_result[1] & 0x80) >> 7; 910 911 /* 912 * some devices may respond with wrong type for 913 * well-known logical units. Force well-known type 914 * to enumerate them correctly. 915 */ 916 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) { 917 sdev_printk(KERN_WARNING, sdev, 918 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n", 919 __func__, sdev->type, (unsigned int)sdev->lun); 920 sdev->type = TYPE_WLUN; 921 } 922 923 } 924 925 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { 926 /* RBC and MMC devices can return SCSI-3 compliance and yet 927 * still not support REPORT LUNS, so make them act as 928 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is 929 * specifically set */ 930 if ((*bflags & BLIST_REPORTLUN2) == 0) 931 *bflags |= BLIST_NOREPORTLUN; 932 } 933 934 /* 935 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI 936 * spec says: The device server is capable of supporting the 937 * specified peripheral device type on this logical unit. However, 938 * the physical device is not currently connected to this logical 939 * unit. 940 * 941 * The above is vague, as it implies that we could treat 001 and 942 * 011 the same. Stay compatible with previous code, and create a 943 * scsi_device for a PQ of 1 944 * 945 * Don't set the device offline here; rather let the upper 946 * level drivers eval the PQ to decide whether they should 947 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check. 948 */ 949 950 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; 951 sdev->lockable = sdev->removable; 952 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); 953 954 if (sdev->scsi_level >= SCSI_3 || 955 (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) 956 sdev->ppr = 1; 957 if (inq_result[7] & 0x60) 958 sdev->wdtr = 1; 959 if (inq_result[7] & 0x10) 960 sdev->sdtr = 1; 961 962 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d " 963 "ANSI: %d%s\n", scsi_device_type(sdev->type), 964 sdev->vendor, sdev->model, sdev->rev, 965 sdev->inq_periph_qual, inq_result[2] & 0x07, 966 (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); 967 968 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && 969 !(*bflags & BLIST_NOTQ)) { 970 sdev->tagged_supported = 1; 971 sdev->simple_tags = 1; 972 } 973 974 /* 975 * Some devices (Texel CD ROM drives) have handshaking problems 976 * when used with the Seagate controllers. borken is initialized 977 * to 1, and then set it to 0 here. 978 */ 979 if ((*bflags & BLIST_BORKEN) == 0) 980 sdev->borken = 0; 981 982 if (*bflags & BLIST_NO_ULD_ATTACH) 983 sdev->no_uld_attach = 1; 984 985 /* 986 * Apparently some really broken devices (contrary to the SCSI 987 * standards) need to be selected without asserting ATN 988 */ 989 if (*bflags & BLIST_SELECT_NO_ATN) 990 sdev->select_no_atn = 1; 991 992 /* 993 * Maximum 512 sector transfer length 994 * broken RA4x00 Compaq Disk Array 995 */ 996 if (*bflags & BLIST_MAX_512) 997 blk_queue_max_hw_sectors(sdev->request_queue, 512); 998 /* 999 * Max 1024 sector transfer length for targets that report incorrect 1000 * max/optimal lengths and relied on the old block layer safe default 1001 */ 1002 else if (*bflags & BLIST_MAX_1024) 1003 blk_queue_max_hw_sectors(sdev->request_queue, 1024); 1004 1005 /* 1006 * Some devices may not want to have a start command automatically 1007 * issued when a device is added. 1008 */ 1009 if (*bflags & BLIST_NOSTARTONADD) 1010 sdev->no_start_on_add = 1; 1011 1012 if (*bflags & BLIST_SINGLELUN) 1013 scsi_target(sdev)->single_lun = 1; 1014 1015 sdev->use_10_for_rw = 1; 1016 1017 /* some devices don't like REPORT SUPPORTED OPERATION CODES 1018 * and will simply timeout causing sd_mod init to take a very 1019 * very long time */ 1020 if (*bflags & BLIST_NO_RSOC) 1021 sdev->no_report_opcodes = 1; 1022 1023 /* set the device running here so that slave configure 1024 * may do I/O */ 1025 mutex_lock(&sdev->state_mutex); 1026 ret = scsi_device_set_state(sdev, SDEV_RUNNING); 1027 if (ret) 1028 ret = scsi_device_set_state(sdev, SDEV_BLOCK); 1029 mutex_unlock(&sdev->state_mutex); 1030 1031 if (ret) { 1032 sdev_printk(KERN_ERR, sdev, 1033 "in wrong state %s to complete scan\n", 1034 scsi_device_state_name(sdev->sdev_state)); 1035 return SCSI_SCAN_NO_RESPONSE; 1036 } 1037 1038 if (*bflags & BLIST_NOT_LOCKABLE) 1039 sdev->lockable = 0; 1040 1041 if (*bflags & BLIST_RETRY_HWERROR) 1042 sdev->retry_hwerror = 1; 1043 1044 if (*bflags & BLIST_NO_DIF) 1045 sdev->no_dif = 1; 1046 1047 if (*bflags & BLIST_UNMAP_LIMIT_WS) 1048 sdev->unmap_limit_for_ws = 1; 1049 1050 if (*bflags & BLIST_IGN_MEDIA_CHANGE) 1051 sdev->ignore_media_change = 1; 1052 1053 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; 1054 1055 if (*bflags & BLIST_TRY_VPD_PAGES) 1056 sdev->try_vpd_pages = 1; 1057 else if (*bflags & BLIST_SKIP_VPD_PAGES) 1058 sdev->skip_vpd_pages = 1; 1059 1060 if (*bflags & BLIST_NO_VPD_SIZE) 1061 sdev->no_vpd_size = 1; 1062 1063 transport_configure_device(&sdev->sdev_gendev); 1064 1065 if (sdev->host->hostt->slave_configure) { 1066 ret = sdev->host->hostt->slave_configure(sdev); 1067 if (ret) { 1068 /* 1069 * if LLDD reports slave not present, don't clutter 1070 * console with alloc failure messages 1071 */ 1072 if (ret != -ENXIO) { 1073 sdev_printk(KERN_ERR, sdev, 1074 "failed to configure device\n"); 1075 } 1076 return SCSI_SCAN_NO_RESPONSE; 1077 } 1078 1079 /* 1080 * The queue_depth is often changed in ->slave_configure. 1081 * Set up budget map again since memory consumption of 1082 * the map depends on actual queue depth. 1083 */ 1084 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); 1085 } 1086 1087 if (sdev->scsi_level >= SCSI_3) 1088 scsi_attach_vpd(sdev); 1089 1090 scsi_cdl_check(sdev); 1091 1092 sdev->max_queue_depth = sdev->queue_depth; 1093 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth); 1094 sdev->sdev_bflags = *bflags; 1095 1096 /* 1097 * Ok, the device is now all set up, we can 1098 * register it and tell the rest of the kernel 1099 * about it. 1100 */ 1101 if (!async && scsi_sysfs_add_sdev(sdev) != 0) 1102 return SCSI_SCAN_NO_RESPONSE; 1103 1104 return SCSI_SCAN_LUN_PRESENT; 1105 } 1106 1107 #ifdef CONFIG_SCSI_LOGGING 1108 /** 1109 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace 1110 * @buf: Output buffer with at least end-first+1 bytes of space 1111 * @inq: Inquiry buffer (input) 1112 * @first: Offset of string into inq 1113 * @end: Index after last character in inq 1114 */ 1115 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, 1116 unsigned first, unsigned end) 1117 { 1118 unsigned term = 0, idx; 1119 1120 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) { 1121 if (inq[idx+first] > ' ') { 1122 buf[idx] = inq[idx+first]; 1123 term = idx+1; 1124 } else { 1125 buf[idx] = ' '; 1126 } 1127 } 1128 buf[term] = 0; 1129 return buf; 1130 } 1131 #endif 1132 1133 /** 1134 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it 1135 * @starget: pointer to target device structure 1136 * @lun: LUN of target device 1137 * @bflagsp: store bflags here if not NULL 1138 * @sdevp: probe the LUN corresponding to this scsi_device 1139 * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only 1140 * needed on first scan 1141 * @hostdata: passed to scsi_alloc_sdev() 1142 * 1143 * Description: 1144 * Call scsi_probe_lun, if a LUN with an attached device is found, 1145 * allocate and set it up by calling scsi_add_lun. 1146 * 1147 * Return: 1148 * 1149 * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 1150 * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is 1151 * attached at the LUN 1152 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 1153 **/ 1154 static int scsi_probe_and_add_lun(struct scsi_target *starget, 1155 u64 lun, blist_flags_t *bflagsp, 1156 struct scsi_device **sdevp, 1157 enum scsi_scan_mode rescan, 1158 void *hostdata) 1159 { 1160 struct scsi_device *sdev; 1161 unsigned char *result; 1162 blist_flags_t bflags; 1163 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256; 1164 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1165 1166 /* 1167 * The rescan flag is used as an optimization, the first scan of a 1168 * host adapter calls into here with rescan == 0. 1169 */ 1170 sdev = scsi_device_lookup_by_target(starget, lun); 1171 if (sdev) { 1172 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) { 1173 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 1174 "scsi scan: device exists on %s\n", 1175 dev_name(&sdev->sdev_gendev))); 1176 if (sdevp) 1177 *sdevp = sdev; 1178 else 1179 scsi_device_put(sdev); 1180 1181 if (bflagsp) 1182 *bflagsp = scsi_get_device_flags(sdev, 1183 sdev->vendor, 1184 sdev->model); 1185 return SCSI_SCAN_LUN_PRESENT; 1186 } 1187 scsi_device_put(sdev); 1188 } else 1189 sdev = scsi_alloc_sdev(starget, lun, hostdata); 1190 if (!sdev) 1191 goto out; 1192 1193 result = kmalloc(result_len, GFP_KERNEL); 1194 if (!result) 1195 goto out_free_sdev; 1196 1197 if (scsi_probe_lun(sdev, result, result_len, &bflags)) 1198 goto out_free_result; 1199 1200 if (bflagsp) 1201 *bflagsp = bflags; 1202 /* 1203 * result contains valid SCSI INQUIRY data. 1204 */ 1205 if ((result[0] >> 5) == 3) { 1206 /* 1207 * For a Peripheral qualifier 3 (011b), the SCSI 1208 * spec says: The device server is not capable of 1209 * supporting a physical device on this logical 1210 * unit. 1211 * 1212 * For disks, this implies that there is no 1213 * logical disk configured at sdev->lun, but there 1214 * is a target id responding. 1215 */ 1216 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:" 1217 " peripheral qualifier of 3, device not" 1218 " added\n")) 1219 if (lun == 0) { 1220 SCSI_LOG_SCAN_BUS(1, { 1221 unsigned char vend[9]; 1222 unsigned char mod[17]; 1223 1224 sdev_printk(KERN_INFO, sdev, 1225 "scsi scan: consider passing scsi_mod." 1226 "dev_flags=%s:%s:0x240 or 0x1000240\n", 1227 scsi_inq_str(vend, result, 8, 16), 1228 scsi_inq_str(mod, result, 16, 32)); 1229 }); 1230 1231 } 1232 1233 res = SCSI_SCAN_TARGET_PRESENT; 1234 goto out_free_result; 1235 } 1236 1237 /* 1238 * Some targets may set slight variations of PQ and PDT to signal 1239 * that no LUN is present, so don't add sdev in these cases. 1240 * Two specific examples are: 1241 * 1) NetApp targets: return PQ=1, PDT=0x1f 1242 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" 1243 * in the UFI 1.0 spec (we cannot rely on reserved bits). 1244 * 1245 * References: 1246 * 1) SCSI SPC-3, pp. 145-146 1247 * PQ=1: "A peripheral device having the specified peripheral 1248 * device type is not connected to this logical unit. However, the 1249 * device server is capable of supporting the specified peripheral 1250 * device type on this logical unit." 1251 * PDT=0x1f: "Unknown or no device type" 1252 * 2) USB UFI 1.0, p. 20 1253 * PDT=00h Direct-access device (floppy) 1254 * PDT=1Fh none (no FDD connected to the requested logical unit) 1255 */ 1256 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && 1257 (result[0] & 0x1f) == 0x1f && 1258 !scsi_is_wlun(lun)) { 1259 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 1260 "scsi scan: peripheral device type" 1261 " of 31, no device added\n")); 1262 res = SCSI_SCAN_TARGET_PRESENT; 1263 goto out_free_result; 1264 } 1265 1266 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan); 1267 if (res == SCSI_SCAN_LUN_PRESENT) { 1268 if (bflags & BLIST_KEY) { 1269 sdev->lockable = 0; 1270 scsi_unlock_floptical(sdev, result); 1271 } 1272 } 1273 1274 out_free_result: 1275 kfree(result); 1276 out_free_sdev: 1277 if (res == SCSI_SCAN_LUN_PRESENT) { 1278 if (sdevp) { 1279 if (scsi_device_get(sdev) == 0) { 1280 *sdevp = sdev; 1281 } else { 1282 __scsi_remove_device(sdev); 1283 res = SCSI_SCAN_NO_RESPONSE; 1284 } 1285 } 1286 } else 1287 __scsi_remove_device(sdev); 1288 out: 1289 return res; 1290 } 1291 1292 /** 1293 * scsi_sequential_lun_scan - sequentially scan a SCSI target 1294 * @starget: pointer to target structure to scan 1295 * @bflags: black/white list flag for LUN 0 1296 * @scsi_level: Which version of the standard does this device adhere to 1297 * @rescan: passed to scsi_probe_add_lun() 1298 * 1299 * Description: 1300 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been 1301 * scanned) to some maximum lun until a LUN is found with no device 1302 * attached. Use the bflags to figure out any oddities. 1303 * 1304 * Modifies sdevscan->lun. 1305 **/ 1306 static void scsi_sequential_lun_scan(struct scsi_target *starget, 1307 blist_flags_t bflags, int scsi_level, 1308 enum scsi_scan_mode rescan) 1309 { 1310 uint max_dev_lun; 1311 u64 sparse_lun, lun; 1312 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1313 1314 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget, 1315 "scsi scan: Sequential scan\n")); 1316 1317 max_dev_lun = min(max_scsi_luns, shost->max_lun); 1318 /* 1319 * If this device is known to support sparse multiple units, 1320 * override the other settings, and scan all of them. Normally, 1321 * SCSI-3 devices should be scanned via the REPORT LUNS. 1322 */ 1323 if (bflags & BLIST_SPARSELUN) { 1324 max_dev_lun = shost->max_lun; 1325 sparse_lun = 1; 1326 } else 1327 sparse_lun = 0; 1328 1329 /* 1330 * If less than SCSI_1_CCS, and no special lun scanning, stop 1331 * scanning; this matches 2.4 behaviour, but could just be a bug 1332 * (to continue scanning a SCSI_1_CCS device). 1333 * 1334 * This test is broken. We might not have any device on lun0 for 1335 * a sparselun device, and if that's the case then how would we 1336 * know the real scsi_level, eh? It might make sense to just not 1337 * scan any SCSI_1 device for non-0 luns, but that check would best 1338 * go into scsi_alloc_sdev() and just have it return null when asked 1339 * to alloc an sdev for lun > 0 on an already found SCSI_1 device. 1340 * 1341 if ((sdevscan->scsi_level < SCSI_1_CCS) && 1342 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN)) 1343 == 0)) 1344 return; 1345 */ 1346 /* 1347 * If this device is known to support multiple units, override 1348 * the other settings, and scan all of them. 1349 */ 1350 if (bflags & BLIST_FORCELUN) 1351 max_dev_lun = shost->max_lun; 1352 /* 1353 * REGAL CDC-4X: avoid hang after LUN 4 1354 */ 1355 if (bflags & BLIST_MAX5LUN) 1356 max_dev_lun = min(5U, max_dev_lun); 1357 /* 1358 * Do not scan SCSI-2 or lower device past LUN 7, unless 1359 * BLIST_LARGELUN. 1360 */ 1361 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN)) 1362 max_dev_lun = min(8U, max_dev_lun); 1363 else 1364 max_dev_lun = min(256U, max_dev_lun); 1365 1366 /* 1367 * We have already scanned LUN 0, so start at LUN 1. Keep scanning 1368 * until we reach the max, or no LUN is found and we are not 1369 * sparse_lun. 1370 */ 1371 for (lun = 1; lun < max_dev_lun; ++lun) 1372 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, 1373 NULL) != SCSI_SCAN_LUN_PRESENT) && 1374 !sparse_lun) 1375 return; 1376 } 1377 1378 /** 1379 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results 1380 * @starget: which target 1381 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN 1382 * @rescan: nonzero if we can skip code only needed on first scan 1383 * 1384 * Description: 1385 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command. 1386 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun. 1387 * 1388 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8 1389 * LUNs even if it's older than SCSI-3. 1390 * If BLIST_NOREPORTLUN is set, return 1 always. 1391 * If BLIST_NOLUN is set, return 0 always. 1392 * If starget->no_report_luns is set, return 1 always. 1393 * 1394 * Return: 1395 * 0: scan completed (or no memory, so further scanning is futile) 1396 * 1: could not scan with REPORT LUN 1397 **/ 1398 static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags, 1399 enum scsi_scan_mode rescan) 1400 { 1401 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 1402 unsigned int length; 1403 u64 lun; 1404 unsigned int num_luns; 1405 unsigned int retries; 1406 int result; 1407 struct scsi_lun *lunp, *lun_data; 1408 struct scsi_sense_hdr sshdr; 1409 struct scsi_device *sdev; 1410 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1411 const struct scsi_exec_args exec_args = { 1412 .sshdr = &sshdr, 1413 }; 1414 int ret = 0; 1415 1416 /* 1417 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. 1418 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does 1419 * support more than 8 LUNs. 1420 * Don't attempt if the target doesn't support REPORT LUNS. 1421 */ 1422 if (bflags & BLIST_NOREPORTLUN) 1423 return 1; 1424 if (starget->scsi_level < SCSI_2 && 1425 starget->scsi_level != SCSI_UNKNOWN) 1426 return 1; 1427 if (starget->scsi_level < SCSI_3 && 1428 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) 1429 return 1; 1430 if (bflags & BLIST_NOLUN) 1431 return 0; 1432 if (starget->no_report_luns) 1433 return 1; 1434 1435 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) { 1436 sdev = scsi_alloc_sdev(starget, 0, NULL); 1437 if (!sdev) 1438 return 0; 1439 if (scsi_device_get(sdev)) { 1440 __scsi_remove_device(sdev); 1441 return 0; 1442 } 1443 } 1444 1445 /* 1446 * Allocate enough to hold the header (the same size as one scsi_lun) 1447 * plus the number of luns we are requesting. 511 was the default 1448 * value of the now removed max_report_luns parameter. 1449 */ 1450 length = (511 + 1) * sizeof(struct scsi_lun); 1451 retry: 1452 lun_data = kmalloc(length, GFP_KERNEL); 1453 if (!lun_data) { 1454 printk(ALLOC_FAILURE_MSG, __func__); 1455 goto out; 1456 } 1457 1458 scsi_cmd[0] = REPORT_LUNS; 1459 1460 /* 1461 * bytes 1 - 5: reserved, set to zero. 1462 */ 1463 memset(&scsi_cmd[1], 0, 5); 1464 1465 /* 1466 * bytes 6 - 9: length of the command. 1467 */ 1468 put_unaligned_be32(length, &scsi_cmd[6]); 1469 1470 scsi_cmd[10] = 0; /* reserved */ 1471 scsi_cmd[11] = 0; /* control */ 1472 1473 /* 1474 * We can get a UNIT ATTENTION, for example a power on/reset, so 1475 * retry a few times (like sd.c does for TEST UNIT READY). 1476 * Experience shows some combinations of adapter/devices get at 1477 * least two power on/resets. 1478 * 1479 * Illegal requests (for devices that do not support REPORT LUNS) 1480 * should come through as a check condition, and will not generate 1481 * a retry. 1482 */ 1483 for (retries = 0; retries < 3; retries++) { 1484 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, 1485 "scsi scan: Sending REPORT LUNS to (try %d)\n", 1486 retries)); 1487 1488 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, 1489 lun_data, length, 1490 SCSI_REPORT_LUNS_TIMEOUT, 3, 1491 &exec_args); 1492 1493 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, 1494 "scsi scan: REPORT LUNS" 1495 " %s (try %d) result 0x%x\n", 1496 result ? "failed" : "successful", 1497 retries, result)); 1498 if (result == 0) 1499 break; 1500 else if (scsi_sense_valid(&sshdr)) { 1501 if (sshdr.sense_key != UNIT_ATTENTION) 1502 break; 1503 } 1504 } 1505 1506 if (result) { 1507 /* 1508 * The device probably does not support a REPORT LUN command 1509 */ 1510 ret = 1; 1511 goto out_err; 1512 } 1513 1514 /* 1515 * Get the length from the first four bytes of lun_data. 1516 */ 1517 if (get_unaligned_be32(lun_data->scsi_lun) + 1518 sizeof(struct scsi_lun) > length) { 1519 length = get_unaligned_be32(lun_data->scsi_lun) + 1520 sizeof(struct scsi_lun); 1521 kfree(lun_data); 1522 goto retry; 1523 } 1524 length = get_unaligned_be32(lun_data->scsi_lun); 1525 1526 num_luns = (length / sizeof(struct scsi_lun)); 1527 1528 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, 1529 "scsi scan: REPORT LUN scan\n")); 1530 1531 /* 1532 * Scan the luns in lun_data. The entry at offset 0 is really 1533 * the header, so start at 1 and go up to and including num_luns. 1534 */ 1535 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { 1536 lun = scsilun_to_int(lunp); 1537 1538 if (lun > sdev->host->max_lun) { 1539 sdev_printk(KERN_WARNING, sdev, 1540 "lun%llu has a LUN larger than" 1541 " allowed by the host adapter\n", lun); 1542 } else { 1543 int res; 1544 1545 res = scsi_probe_and_add_lun(starget, 1546 lun, NULL, NULL, rescan, NULL); 1547 if (res == SCSI_SCAN_NO_RESPONSE) { 1548 /* 1549 * Got some results, but now none, abort. 1550 */ 1551 sdev_printk(KERN_ERR, sdev, 1552 "Unexpected response" 1553 " from lun %llu while scanning, scan" 1554 " aborted\n", (unsigned long long)lun); 1555 break; 1556 } 1557 } 1558 } 1559 1560 out_err: 1561 kfree(lun_data); 1562 out: 1563 if (scsi_device_created(sdev)) 1564 /* 1565 * the sdev we used didn't appear in the report luns scan 1566 */ 1567 __scsi_remove_device(sdev); 1568 scsi_device_put(sdev); 1569 return ret; 1570 } 1571 1572 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, 1573 uint id, u64 lun, void *hostdata) 1574 { 1575 struct scsi_device *sdev = ERR_PTR(-ENODEV); 1576 struct device *parent = &shost->shost_gendev; 1577 struct scsi_target *starget; 1578 1579 if (strncmp(scsi_scan_type, "none", 4) == 0) 1580 return ERR_PTR(-ENODEV); 1581 1582 starget = scsi_alloc_target(parent, channel, id); 1583 if (!starget) 1584 return ERR_PTR(-ENOMEM); 1585 scsi_autopm_get_target(starget); 1586 1587 mutex_lock(&shost->scan_mutex); 1588 if (!shost->async_scan) 1589 scsi_complete_async_scans(); 1590 1591 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1592 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1593 SCSI_SCAN_RESCAN, hostdata); 1594 scsi_autopm_put_host(shost); 1595 } 1596 mutex_unlock(&shost->scan_mutex); 1597 scsi_autopm_put_target(starget); 1598 /* 1599 * paired with scsi_alloc_target(). Target will be destroyed unless 1600 * scsi_probe_and_add_lun made an underlying device visible 1601 */ 1602 scsi_target_reap(starget); 1603 put_device(&starget->dev); 1604 1605 return sdev; 1606 } 1607 EXPORT_SYMBOL(__scsi_add_device); 1608 1609 int scsi_add_device(struct Scsi_Host *host, uint channel, 1610 uint target, u64 lun) 1611 { 1612 struct scsi_device *sdev = 1613 __scsi_add_device(host, channel, target, lun, NULL); 1614 if (IS_ERR(sdev)) 1615 return PTR_ERR(sdev); 1616 1617 scsi_device_put(sdev); 1618 return 0; 1619 } 1620 EXPORT_SYMBOL(scsi_add_device); 1621 1622 int scsi_resume_device(struct scsi_device *sdev) 1623 { 1624 struct device *dev = &sdev->sdev_gendev; 1625 int ret = 0; 1626 1627 device_lock(dev); 1628 1629 /* 1630 * Bail out if the device or its queue are not running. Otherwise, 1631 * the rescan may block waiting for commands to be executed, with us 1632 * holding the device lock. This can result in a potential deadlock 1633 * in the power management core code when system resume is on-going. 1634 */ 1635 if (sdev->sdev_state != SDEV_RUNNING || 1636 blk_queue_pm_only(sdev->request_queue)) { 1637 ret = -EWOULDBLOCK; 1638 goto unlock; 1639 } 1640 1641 if (dev->driver && try_module_get(dev->driver->owner)) { 1642 struct scsi_driver *drv = to_scsi_driver(dev->driver); 1643 1644 if (drv->resume) 1645 ret = drv->resume(dev); 1646 module_put(dev->driver->owner); 1647 } 1648 1649 unlock: 1650 device_unlock(dev); 1651 1652 return ret; 1653 } 1654 EXPORT_SYMBOL(scsi_resume_device); 1655 1656 int scsi_rescan_device(struct scsi_device *sdev) 1657 { 1658 struct device *dev = &sdev->sdev_gendev; 1659 int ret = 0; 1660 1661 device_lock(dev); 1662 1663 /* 1664 * Bail out if the device or its queue are not running. Otherwise, 1665 * the rescan may block waiting for commands to be executed, with us 1666 * holding the device lock. This can result in a potential deadlock 1667 * in the power management core code when system resume is on-going. 1668 */ 1669 if (sdev->sdev_state != SDEV_RUNNING || 1670 blk_queue_pm_only(sdev->request_queue)) { 1671 ret = -EWOULDBLOCK; 1672 goto unlock; 1673 } 1674 1675 scsi_attach_vpd(sdev); 1676 scsi_cdl_check(sdev); 1677 1678 if (sdev->handler && sdev->handler->rescan) 1679 sdev->handler->rescan(sdev); 1680 1681 if (dev->driver && try_module_get(dev->driver->owner)) { 1682 struct scsi_driver *drv = to_scsi_driver(dev->driver); 1683 1684 if (drv->rescan) 1685 drv->rescan(dev); 1686 module_put(dev->driver->owner); 1687 } 1688 1689 unlock: 1690 device_unlock(dev); 1691 1692 return ret; 1693 } 1694 EXPORT_SYMBOL(scsi_rescan_device); 1695 1696 static void __scsi_scan_target(struct device *parent, unsigned int channel, 1697 unsigned int id, u64 lun, enum scsi_scan_mode rescan) 1698 { 1699 struct Scsi_Host *shost = dev_to_shost(parent); 1700 blist_flags_t bflags = 0; 1701 int res; 1702 struct scsi_target *starget; 1703 1704 if (shost->this_id == id) 1705 /* 1706 * Don't scan the host adapter 1707 */ 1708 return; 1709 1710 starget = scsi_alloc_target(parent, channel, id); 1711 if (!starget) 1712 return; 1713 scsi_autopm_get_target(starget); 1714 1715 if (lun != SCAN_WILD_CARD) { 1716 /* 1717 * Scan for a specific host/chan/id/lun. 1718 */ 1719 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL); 1720 goto out_reap; 1721 } 1722 1723 /* 1724 * Scan LUN 0, if there is some response, scan further. Ideally, we 1725 * would not configure LUN 0 until all LUNs are scanned. 1726 */ 1727 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL); 1728 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) { 1729 if (scsi_report_lun_scan(starget, bflags, rescan) != 0) 1730 /* 1731 * The REPORT LUN did not scan the target, 1732 * do a sequential scan. 1733 */ 1734 scsi_sequential_lun_scan(starget, bflags, 1735 starget->scsi_level, rescan); 1736 } 1737 1738 out_reap: 1739 scsi_autopm_put_target(starget); 1740 /* 1741 * paired with scsi_alloc_target(): determine if the target has 1742 * any children at all and if not, nuke it 1743 */ 1744 scsi_target_reap(starget); 1745 1746 put_device(&starget->dev); 1747 } 1748 1749 /** 1750 * scsi_scan_target - scan a target id, possibly including all LUNs on the target. 1751 * @parent: host to scan 1752 * @channel: channel to scan 1753 * @id: target id to scan 1754 * @lun: Specific LUN to scan or SCAN_WILD_CARD 1755 * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for 1756 * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs, 1757 * and SCSI_SCAN_MANUAL to force scanning even if 1758 * 'scan=manual' is set. 1759 * 1760 * Description: 1761 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, 1762 * and possibly all LUNs on the target id. 1763 * 1764 * First try a REPORT LUN scan, if that does not scan the target, do a 1765 * sequential scan of LUNs on the target id. 1766 **/ 1767 void scsi_scan_target(struct device *parent, unsigned int channel, 1768 unsigned int id, u64 lun, enum scsi_scan_mode rescan) 1769 { 1770 struct Scsi_Host *shost = dev_to_shost(parent); 1771 1772 if (strncmp(scsi_scan_type, "none", 4) == 0) 1773 return; 1774 1775 if (rescan != SCSI_SCAN_MANUAL && 1776 strncmp(scsi_scan_type, "manual", 6) == 0) 1777 return; 1778 1779 mutex_lock(&shost->scan_mutex); 1780 if (!shost->async_scan) 1781 scsi_complete_async_scans(); 1782 1783 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1784 __scsi_scan_target(parent, channel, id, lun, rescan); 1785 scsi_autopm_put_host(shost); 1786 } 1787 mutex_unlock(&shost->scan_mutex); 1788 } 1789 EXPORT_SYMBOL(scsi_scan_target); 1790 1791 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, 1792 unsigned int id, u64 lun, 1793 enum scsi_scan_mode rescan) 1794 { 1795 uint order_id; 1796 1797 if (id == SCAN_WILD_CARD) 1798 for (id = 0; id < shost->max_id; ++id) { 1799 /* 1800 * XXX adapter drivers when possible (FCP, iSCSI) 1801 * could modify max_id to match the current max, 1802 * not the absolute max. 1803 * 1804 * XXX add a shost id iterator, so for example, 1805 * the FC ID can be the same as a target id 1806 * without a huge overhead of sparse id's. 1807 */ 1808 if (shost->reverse_ordering) 1809 /* 1810 * Scan from high to low id. 1811 */ 1812 order_id = shost->max_id - id - 1; 1813 else 1814 order_id = id; 1815 __scsi_scan_target(&shost->shost_gendev, channel, 1816 order_id, lun, rescan); 1817 } 1818 else 1819 __scsi_scan_target(&shost->shost_gendev, channel, 1820 id, lun, rescan); 1821 } 1822 1823 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, 1824 unsigned int id, u64 lun, 1825 enum scsi_scan_mode rescan) 1826 { 1827 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, 1828 "%s: <%u:%u:%llu>\n", 1829 __func__, channel, id, lun)); 1830 1831 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1832 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1833 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun))) 1834 return -EINVAL; 1835 1836 mutex_lock(&shost->scan_mutex); 1837 if (!shost->async_scan) 1838 scsi_complete_async_scans(); 1839 1840 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1841 if (channel == SCAN_WILD_CARD) 1842 for (channel = 0; channel <= shost->max_channel; 1843 channel++) 1844 scsi_scan_channel(shost, channel, id, lun, 1845 rescan); 1846 else 1847 scsi_scan_channel(shost, channel, id, lun, rescan); 1848 scsi_autopm_put_host(shost); 1849 } 1850 mutex_unlock(&shost->scan_mutex); 1851 1852 return 0; 1853 } 1854 1855 static void scsi_sysfs_add_devices(struct Scsi_Host *shost) 1856 { 1857 struct scsi_device *sdev; 1858 shost_for_each_device(sdev, shost) { 1859 /* target removed before the device could be added */ 1860 if (sdev->sdev_state == SDEV_DEL) 1861 continue; 1862 /* If device is already visible, skip adding it to sysfs */ 1863 if (sdev->is_visible) 1864 continue; 1865 if (!scsi_host_scan_allowed(shost) || 1866 scsi_sysfs_add_sdev(sdev) != 0) 1867 __scsi_remove_device(sdev); 1868 } 1869 } 1870 1871 /** 1872 * scsi_prep_async_scan - prepare for an async scan 1873 * @shost: the host which will be scanned 1874 * Returns: a cookie to be passed to scsi_finish_async_scan() 1875 * 1876 * Tells the midlayer this host is going to do an asynchronous scan. 1877 * It reserves the host's position in the scanning list and ensures 1878 * that other asynchronous scans started after this one won't affect the 1879 * ordering of the discovered devices. 1880 */ 1881 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) 1882 { 1883 struct async_scan_data *data = NULL; 1884 unsigned long flags; 1885 1886 if (strncmp(scsi_scan_type, "sync", 4) == 0) 1887 return NULL; 1888 1889 mutex_lock(&shost->scan_mutex); 1890 if (shost->async_scan) { 1891 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); 1892 goto err; 1893 } 1894 1895 data = kmalloc(sizeof(*data), GFP_KERNEL); 1896 if (!data) 1897 goto err; 1898 data->shost = scsi_host_get(shost); 1899 if (!data->shost) 1900 goto err; 1901 init_completion(&data->prev_finished); 1902 1903 spin_lock_irqsave(shost->host_lock, flags); 1904 shost->async_scan = 1; 1905 spin_unlock_irqrestore(shost->host_lock, flags); 1906 mutex_unlock(&shost->scan_mutex); 1907 1908 spin_lock(&async_scan_lock); 1909 if (list_empty(&scanning_hosts)) 1910 complete(&data->prev_finished); 1911 list_add_tail(&data->list, &scanning_hosts); 1912 spin_unlock(&async_scan_lock); 1913 1914 return data; 1915 1916 err: 1917 mutex_unlock(&shost->scan_mutex); 1918 kfree(data); 1919 return NULL; 1920 } 1921 1922 /** 1923 * scsi_finish_async_scan - asynchronous scan has finished 1924 * @data: cookie returned from earlier call to scsi_prep_async_scan() 1925 * 1926 * All the devices currently attached to this host have been found. 1927 * This function announces all the devices it has found to the rest 1928 * of the system. 1929 */ 1930 static void scsi_finish_async_scan(struct async_scan_data *data) 1931 { 1932 struct Scsi_Host *shost; 1933 unsigned long flags; 1934 1935 if (!data) 1936 return; 1937 1938 shost = data->shost; 1939 1940 mutex_lock(&shost->scan_mutex); 1941 1942 if (!shost->async_scan) { 1943 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__); 1944 dump_stack(); 1945 mutex_unlock(&shost->scan_mutex); 1946 return; 1947 } 1948 1949 wait_for_completion(&data->prev_finished); 1950 1951 scsi_sysfs_add_devices(shost); 1952 1953 spin_lock_irqsave(shost->host_lock, flags); 1954 shost->async_scan = 0; 1955 spin_unlock_irqrestore(shost->host_lock, flags); 1956 1957 mutex_unlock(&shost->scan_mutex); 1958 1959 spin_lock(&async_scan_lock); 1960 list_del(&data->list); 1961 if (!list_empty(&scanning_hosts)) { 1962 struct async_scan_data *next = list_entry(scanning_hosts.next, 1963 struct async_scan_data, list); 1964 complete(&next->prev_finished); 1965 } 1966 spin_unlock(&async_scan_lock); 1967 1968 scsi_autopm_put_host(shost); 1969 scsi_host_put(shost); 1970 kfree(data); 1971 } 1972 1973 static void do_scsi_scan_host(struct Scsi_Host *shost) 1974 { 1975 if (shost->hostt->scan_finished) { 1976 unsigned long start = jiffies; 1977 if (shost->hostt->scan_start) 1978 shost->hostt->scan_start(shost); 1979 1980 while (!shost->hostt->scan_finished(shost, jiffies - start)) 1981 msleep(10); 1982 } else { 1983 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, 1984 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 1985 } 1986 } 1987 1988 static void do_scan_async(void *_data, async_cookie_t c) 1989 { 1990 struct async_scan_data *data = _data; 1991 struct Scsi_Host *shost = data->shost; 1992 1993 do_scsi_scan_host(shost); 1994 scsi_finish_async_scan(data); 1995 } 1996 1997 /** 1998 * scsi_scan_host - scan the given adapter 1999 * @shost: adapter to scan 2000 **/ 2001 void scsi_scan_host(struct Scsi_Host *shost) 2002 { 2003 struct async_scan_data *data; 2004 2005 if (strncmp(scsi_scan_type, "none", 4) == 0 || 2006 strncmp(scsi_scan_type, "manual", 6) == 0) 2007 return; 2008 if (scsi_autopm_get_host(shost) < 0) 2009 return; 2010 2011 data = scsi_prep_async_scan(shost); 2012 if (!data) { 2013 do_scsi_scan_host(shost); 2014 scsi_autopm_put_host(shost); 2015 return; 2016 } 2017 2018 /* register with the async subsystem so wait_for_device_probe() 2019 * will flush this work 2020 */ 2021 async_schedule(do_scan_async, data); 2022 2023 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ 2024 } 2025 EXPORT_SYMBOL(scsi_scan_host); 2026 2027 void scsi_forget_host(struct Scsi_Host *shost) 2028 { 2029 struct scsi_device *sdev; 2030 unsigned long flags; 2031 2032 restart: 2033 spin_lock_irqsave(shost->host_lock, flags); 2034 list_for_each_entry(sdev, &shost->__devices, siblings) { 2035 if (sdev->sdev_state == SDEV_DEL) 2036 continue; 2037 spin_unlock_irqrestore(shost->host_lock, flags); 2038 __scsi_remove_device(sdev); 2039 goto restart; 2040 } 2041 spin_unlock_irqrestore(shost->host_lock, flags); 2042 } 2043 2044