1 /* 2 * scsi_sysfs.c 3 * 4 * SCSI sysfs interface routines. 5 * 6 * Created to pull SCSI mid layer sysfs routines into one file. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/blkdev.h> 13 #include <linux/device.h> 14 #include <linux/pm_runtime.h> 15 16 #include <scsi/scsi.h> 17 #include <scsi/scsi_device.h> 18 #include <scsi/scsi_host.h> 19 #include <scsi/scsi_tcq.h> 20 #include <scsi/scsi_transport.h> 21 #include <scsi/scsi_driver.h> 22 23 #include "scsi_priv.h" 24 #include "scsi_logging.h" 25 26 static struct device_type scsi_dev_type; 27 28 static const struct { 29 enum scsi_device_state value; 30 char *name; 31 } sdev_states[] = { 32 { SDEV_CREATED, "created" }, 33 { SDEV_RUNNING, "running" }, 34 { SDEV_CANCEL, "cancel" }, 35 { SDEV_DEL, "deleted" }, 36 { SDEV_QUIESCE, "quiesce" }, 37 { SDEV_OFFLINE, "offline" }, 38 { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, 39 { SDEV_BLOCK, "blocked" }, 40 { SDEV_CREATED_BLOCK, "created-blocked" }, 41 }; 42 43 const char *scsi_device_state_name(enum scsi_device_state state) 44 { 45 int i; 46 char *name = NULL; 47 48 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { 49 if (sdev_states[i].value == state) { 50 name = sdev_states[i].name; 51 break; 52 } 53 } 54 return name; 55 } 56 57 static const struct { 58 enum scsi_host_state value; 59 char *name; 60 } shost_states[] = { 61 { SHOST_CREATED, "created" }, 62 { SHOST_RUNNING, "running" }, 63 { SHOST_CANCEL, "cancel" }, 64 { SHOST_DEL, "deleted" }, 65 { SHOST_RECOVERY, "recovery" }, 66 { SHOST_CANCEL_RECOVERY, "cancel/recovery" }, 67 { SHOST_DEL_RECOVERY, "deleted/recovery", }, 68 }; 69 const char *scsi_host_state_name(enum scsi_host_state state) 70 { 71 int i; 72 char *name = NULL; 73 74 for (i = 0; i < ARRAY_SIZE(shost_states); i++) { 75 if (shost_states[i].value == state) { 76 name = shost_states[i].name; 77 break; 78 } 79 } 80 return name; 81 } 82 83 static int check_set(unsigned int *val, char *src) 84 { 85 char *last; 86 87 if (strncmp(src, "-", 20) == 0) { 88 *val = SCAN_WILD_CARD; 89 } else { 90 /* 91 * Doesn't check for int overflow 92 */ 93 *val = simple_strtoul(src, &last, 0); 94 if (*last != '\0') 95 return 1; 96 } 97 return 0; 98 } 99 100 static int scsi_scan(struct Scsi_Host *shost, const char *str) 101 { 102 char s1[15], s2[15], s3[15], junk; 103 unsigned int channel, id, lun; 104 int res; 105 106 res = sscanf(str, "%10s %10s %10s %c", s1, s2, s3, &junk); 107 if (res != 3) 108 return -EINVAL; 109 if (check_set(&channel, s1)) 110 return -EINVAL; 111 if (check_set(&id, s2)) 112 return -EINVAL; 113 if (check_set(&lun, s3)) 114 return -EINVAL; 115 if (shost->transportt->user_scan) 116 res = shost->transportt->user_scan(shost, channel, id, lun); 117 else 118 res = scsi_scan_host_selected(shost, channel, id, lun, 1); 119 return res; 120 } 121 122 /* 123 * shost_show_function: macro to create an attr function that can be used to 124 * show a non-bit field. 125 */ 126 #define shost_show_function(name, field, format_string) \ 127 static ssize_t \ 128 show_##name (struct device *dev, struct device_attribute *attr, \ 129 char *buf) \ 130 { \ 131 struct Scsi_Host *shost = class_to_shost(dev); \ 132 return snprintf (buf, 20, format_string, shost->field); \ 133 } 134 135 /* 136 * shost_rd_attr: macro to create a function and attribute variable for a 137 * read only field. 138 */ 139 #define shost_rd_attr2(name, field, format_string) \ 140 shost_show_function(name, field, format_string) \ 141 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); 142 143 #define shost_rd_attr(field, format_string) \ 144 shost_rd_attr2(field, field, format_string) 145 146 /* 147 * Create the actual show/store functions and data structures. 148 */ 149 150 static ssize_t 151 store_scan(struct device *dev, struct device_attribute *attr, 152 const char *buf, size_t count) 153 { 154 struct Scsi_Host *shost = class_to_shost(dev); 155 int res; 156 157 res = scsi_scan(shost, buf); 158 if (res == 0) 159 res = count; 160 return res; 161 }; 162 static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); 163 164 static ssize_t 165 store_shost_state(struct device *dev, struct device_attribute *attr, 166 const char *buf, size_t count) 167 { 168 int i; 169 struct Scsi_Host *shost = class_to_shost(dev); 170 enum scsi_host_state state = 0; 171 172 for (i = 0; i < ARRAY_SIZE(shost_states); i++) { 173 const int len = strlen(shost_states[i].name); 174 if (strncmp(shost_states[i].name, buf, len) == 0 && 175 buf[len] == '\n') { 176 state = shost_states[i].value; 177 break; 178 } 179 } 180 if (!state) 181 return -EINVAL; 182 183 if (scsi_host_set_state(shost, state)) 184 return -EINVAL; 185 return count; 186 } 187 188 static ssize_t 189 show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) 190 { 191 struct Scsi_Host *shost = class_to_shost(dev); 192 const char *name = scsi_host_state_name(shost->shost_state); 193 194 if (!name) 195 return -EINVAL; 196 197 return snprintf(buf, 20, "%s\n", name); 198 } 199 200 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ 201 struct device_attribute dev_attr_hstate = 202 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); 203 204 static ssize_t 205 show_shost_mode(unsigned int mode, char *buf) 206 { 207 ssize_t len = 0; 208 209 if (mode & MODE_INITIATOR) 210 len = sprintf(buf, "%s", "Initiator"); 211 212 if (mode & MODE_TARGET) 213 len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target"); 214 215 len += sprintf(buf + len, "\n"); 216 217 return len; 218 } 219 220 static ssize_t 221 show_shost_supported_mode(struct device *dev, struct device_attribute *attr, 222 char *buf) 223 { 224 struct Scsi_Host *shost = class_to_shost(dev); 225 unsigned int supported_mode = shost->hostt->supported_mode; 226 227 if (supported_mode == MODE_UNKNOWN) 228 /* by default this should be initiator */ 229 supported_mode = MODE_INITIATOR; 230 231 return show_shost_mode(supported_mode, buf); 232 } 233 234 static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); 235 236 static ssize_t 237 show_shost_active_mode(struct device *dev, 238 struct device_attribute *attr, char *buf) 239 { 240 struct Scsi_Host *shost = class_to_shost(dev); 241 242 if (shost->active_mode == MODE_UNKNOWN) 243 return snprintf(buf, 20, "unknown\n"); 244 else 245 return show_shost_mode(shost->active_mode, buf); 246 } 247 248 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); 249 250 static int check_reset_type(const char *str) 251 { 252 if (sysfs_streq(str, "adapter")) 253 return SCSI_ADAPTER_RESET; 254 else if (sysfs_streq(str, "firmware")) 255 return SCSI_FIRMWARE_RESET; 256 else 257 return 0; 258 } 259 260 static ssize_t 261 store_host_reset(struct device *dev, struct device_attribute *attr, 262 const char *buf, size_t count) 263 { 264 struct Scsi_Host *shost = class_to_shost(dev); 265 struct scsi_host_template *sht = shost->hostt; 266 int ret = -EINVAL; 267 int type; 268 269 type = check_reset_type(buf); 270 if (!type) 271 goto exit_store_host_reset; 272 273 if (sht->host_reset) 274 ret = sht->host_reset(shost, type); 275 276 exit_store_host_reset: 277 if (ret == 0) 278 ret = count; 279 return ret; 280 } 281 282 static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); 283 284 static ssize_t 285 show_shost_eh_deadline(struct device *dev, 286 struct device_attribute *attr, char *buf) 287 { 288 struct Scsi_Host *shost = class_to_shost(dev); 289 290 return sprintf(buf, "%d\n", shost->eh_deadline / HZ); 291 } 292 293 static ssize_t 294 store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, 295 const char *buf, size_t count) 296 { 297 struct Scsi_Host *shost = class_to_shost(dev); 298 int ret = -EINVAL; 299 int deadline; 300 unsigned long flags; 301 302 if (shost->transportt && shost->transportt->eh_strategy_handler) 303 return ret; 304 305 if (sscanf(buf, "%d\n", &deadline) == 1) { 306 spin_lock_irqsave(shost->host_lock, flags); 307 if (scsi_host_in_recovery(shost)) 308 ret = -EBUSY; 309 else { 310 shost->eh_deadline = deadline * HZ; 311 ret = count; 312 } 313 spin_unlock_irqrestore(shost->host_lock, flags); 314 } 315 return ret; 316 } 317 318 static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); 319 320 shost_rd_attr(unique_id, "%u\n"); 321 shost_rd_attr(host_busy, "%hu\n"); 322 shost_rd_attr(cmd_per_lun, "%hd\n"); 323 shost_rd_attr(can_queue, "%hd\n"); 324 shost_rd_attr(sg_tablesize, "%hu\n"); 325 shost_rd_attr(sg_prot_tablesize, "%hu\n"); 326 shost_rd_attr(unchecked_isa_dma, "%d\n"); 327 shost_rd_attr(prot_capabilities, "%u\n"); 328 shost_rd_attr(prot_guard_type, "%hd\n"); 329 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); 330 331 static struct attribute *scsi_sysfs_shost_attrs[] = { 332 &dev_attr_unique_id.attr, 333 &dev_attr_host_busy.attr, 334 &dev_attr_cmd_per_lun.attr, 335 &dev_attr_can_queue.attr, 336 &dev_attr_sg_tablesize.attr, 337 &dev_attr_sg_prot_tablesize.attr, 338 &dev_attr_unchecked_isa_dma.attr, 339 &dev_attr_proc_name.attr, 340 &dev_attr_scan.attr, 341 &dev_attr_hstate.attr, 342 &dev_attr_supported_mode.attr, 343 &dev_attr_active_mode.attr, 344 &dev_attr_prot_capabilities.attr, 345 &dev_attr_prot_guard_type.attr, 346 &dev_attr_host_reset.attr, 347 &dev_attr_eh_deadline.attr, 348 NULL 349 }; 350 351 struct attribute_group scsi_shost_attr_group = { 352 .attrs = scsi_sysfs_shost_attrs, 353 }; 354 355 const struct attribute_group *scsi_sysfs_shost_attr_groups[] = { 356 &scsi_shost_attr_group, 357 NULL 358 }; 359 360 static void scsi_device_cls_release(struct device *class_dev) 361 { 362 struct scsi_device *sdev; 363 364 sdev = class_to_sdev(class_dev); 365 put_device(&sdev->sdev_gendev); 366 } 367 368 static void scsi_device_dev_release_usercontext(struct work_struct *work) 369 { 370 struct scsi_device *sdev; 371 struct device *parent; 372 struct scsi_target *starget; 373 struct list_head *this, *tmp; 374 unsigned long flags; 375 376 sdev = container_of(work, struct scsi_device, ew.work); 377 378 parent = sdev->sdev_gendev.parent; 379 starget = to_scsi_target(parent); 380 381 spin_lock_irqsave(sdev->host->host_lock, flags); 382 starget->reap_ref++; 383 list_del(&sdev->siblings); 384 list_del(&sdev->same_target_siblings); 385 list_del(&sdev->starved_entry); 386 spin_unlock_irqrestore(sdev->host->host_lock, flags); 387 388 cancel_work_sync(&sdev->event_work); 389 390 list_for_each_safe(this, tmp, &sdev->event_list) { 391 struct scsi_event *evt; 392 393 evt = list_entry(this, struct scsi_event, node); 394 list_del(&evt->node); 395 kfree(evt); 396 } 397 398 blk_put_queue(sdev->request_queue); 399 /* NULL queue means the device can't be used */ 400 sdev->request_queue = NULL; 401 402 scsi_target_reap(scsi_target(sdev)); 403 404 kfree(sdev->inquiry); 405 kfree(sdev); 406 407 if (parent) 408 put_device(parent); 409 } 410 411 static void scsi_device_dev_release(struct device *dev) 412 { 413 struct scsi_device *sdp = to_scsi_device(dev); 414 execute_in_process_context(scsi_device_dev_release_usercontext, 415 &sdp->ew); 416 } 417 418 static struct class sdev_class = { 419 .name = "scsi_device", 420 .dev_release = scsi_device_cls_release, 421 }; 422 423 /* all probing is done in the individual ->probe routines */ 424 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) 425 { 426 struct scsi_device *sdp; 427 428 if (dev->type != &scsi_dev_type) 429 return 0; 430 431 sdp = to_scsi_device(dev); 432 if (sdp->no_uld_attach) 433 return 0; 434 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 435 } 436 437 static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 438 { 439 struct scsi_device *sdev; 440 441 if (dev->type != &scsi_dev_type) 442 return 0; 443 444 sdev = to_scsi_device(dev); 445 446 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type); 447 return 0; 448 } 449 450 struct bus_type scsi_bus_type = { 451 .name = "scsi", 452 .match = scsi_bus_match, 453 .uevent = scsi_bus_uevent, 454 #ifdef CONFIG_PM 455 .pm = &scsi_bus_pm_ops, 456 #endif 457 }; 458 EXPORT_SYMBOL_GPL(scsi_bus_type); 459 460 int scsi_sysfs_register(void) 461 { 462 int error; 463 464 error = bus_register(&scsi_bus_type); 465 if (!error) { 466 error = class_register(&sdev_class); 467 if (error) 468 bus_unregister(&scsi_bus_type); 469 } 470 471 return error; 472 } 473 474 void scsi_sysfs_unregister(void) 475 { 476 class_unregister(&sdev_class); 477 bus_unregister(&scsi_bus_type); 478 } 479 480 /* 481 * sdev_show_function: macro to create an attr function that can be used to 482 * show a non-bit field. 483 */ 484 #define sdev_show_function(field, format_string) \ 485 static ssize_t \ 486 sdev_show_##field (struct device *dev, struct device_attribute *attr, \ 487 char *buf) \ 488 { \ 489 struct scsi_device *sdev; \ 490 sdev = to_scsi_device(dev); \ 491 return snprintf (buf, 20, format_string, sdev->field); \ 492 } \ 493 494 /* 495 * sdev_rd_attr: macro to create a function and attribute variable for a 496 * read only field. 497 */ 498 #define sdev_rd_attr(field, format_string) \ 499 sdev_show_function(field, format_string) \ 500 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); 501 502 503 /* 504 * sdev_rw_attr: create a function and attribute variable for a 505 * read/write field. 506 */ 507 #define sdev_rw_attr(field, format_string) \ 508 sdev_show_function(field, format_string) \ 509 \ 510 static ssize_t \ 511 sdev_store_##field (struct device *dev, struct device_attribute *attr, \ 512 const char *buf, size_t count) \ 513 { \ 514 struct scsi_device *sdev; \ 515 sdev = to_scsi_device(dev); \ 516 sscanf (buf, format_string, &sdev->field); \ 517 return count; \ 518 } \ 519 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); 520 521 /* Currently we don't export bit fields, but we might in future, 522 * so leave this code in */ 523 #if 0 524 /* 525 * sdev_rd_attr: create a function and attribute variable for a 526 * read/write bit field. 527 */ 528 #define sdev_rw_attr_bit(field) \ 529 sdev_show_function(field, "%d\n") \ 530 \ 531 static ssize_t \ 532 sdev_store_##field (struct device *dev, struct device_attribute *attr, \ 533 const char *buf, size_t count) \ 534 { \ 535 int ret; \ 536 struct scsi_device *sdev; \ 537 ret = scsi_sdev_check_buf_bit(buf); \ 538 if (ret >= 0) { \ 539 sdev = to_scsi_device(dev); \ 540 sdev->field = ret; \ 541 ret = count; \ 542 } \ 543 return ret; \ 544 } \ 545 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); 546 547 /* 548 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1", 549 * else return -EINVAL. 550 */ 551 static int scsi_sdev_check_buf_bit(const char *buf) 552 { 553 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { 554 if (buf[0] == '1') 555 return 1; 556 else if (buf[0] == '0') 557 return 0; 558 else 559 return -EINVAL; 560 } else 561 return -EINVAL; 562 } 563 #endif 564 /* 565 * Create the actual show/store functions and data structures. 566 */ 567 sdev_rd_attr (device_blocked, "%d\n"); 568 sdev_rd_attr (queue_depth, "%d\n"); 569 sdev_rd_attr (device_busy, "%d\n"); 570 sdev_rd_attr (type, "%d\n"); 571 sdev_rd_attr (scsi_level, "%d\n"); 572 sdev_rd_attr (vendor, "%.8s\n"); 573 sdev_rd_attr (model, "%.16s\n"); 574 sdev_rd_attr (rev, "%.4s\n"); 575 576 /* 577 * TODO: can we make these symlinks to the block layer ones? 578 */ 579 static ssize_t 580 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) 581 { 582 struct scsi_device *sdev; 583 sdev = to_scsi_device(dev); 584 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); 585 } 586 587 static ssize_t 588 sdev_store_timeout (struct device *dev, struct device_attribute *attr, 589 const char *buf, size_t count) 590 { 591 struct scsi_device *sdev; 592 int timeout; 593 sdev = to_scsi_device(dev); 594 sscanf (buf, "%d\n", &timeout); 595 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); 596 return count; 597 } 598 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); 599 600 static ssize_t 601 sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) 602 { 603 struct scsi_device *sdev; 604 sdev = to_scsi_device(dev); 605 return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); 606 } 607 608 static ssize_t 609 sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, 610 const char *buf, size_t count) 611 { 612 struct scsi_device *sdev; 613 unsigned int eh_timeout; 614 int err; 615 616 if (!capable(CAP_SYS_ADMIN)) 617 return -EACCES; 618 619 sdev = to_scsi_device(dev); 620 err = kstrtouint(buf, 10, &eh_timeout); 621 if (err) 622 return err; 623 sdev->eh_timeout = eh_timeout * HZ; 624 625 return count; 626 } 627 static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); 628 629 static ssize_t 630 store_rescan_field (struct device *dev, struct device_attribute *attr, 631 const char *buf, size_t count) 632 { 633 scsi_rescan_device(dev); 634 return count; 635 } 636 static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); 637 638 static void sdev_store_delete_callback(struct device *dev) 639 { 640 scsi_remove_device(to_scsi_device(dev)); 641 } 642 643 static ssize_t 644 sdev_store_delete(struct device *dev, struct device_attribute *attr, 645 const char *buf, size_t count) 646 { 647 int rc; 648 649 /* An attribute cannot be unregistered by one of its own methods, 650 * so we have to use this roundabout approach. 651 */ 652 rc = device_schedule_callback(dev, sdev_store_delete_callback); 653 if (rc) 654 count = rc; 655 return count; 656 }; 657 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); 658 659 static ssize_t 660 store_state_field(struct device *dev, struct device_attribute *attr, 661 const char *buf, size_t count) 662 { 663 int i; 664 struct scsi_device *sdev = to_scsi_device(dev); 665 enum scsi_device_state state = 0; 666 667 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { 668 const int len = strlen(sdev_states[i].name); 669 if (strncmp(sdev_states[i].name, buf, len) == 0 && 670 buf[len] == '\n') { 671 state = sdev_states[i].value; 672 break; 673 } 674 } 675 if (!state) 676 return -EINVAL; 677 678 if (scsi_device_set_state(sdev, state)) 679 return -EINVAL; 680 return count; 681 } 682 683 static ssize_t 684 show_state_field(struct device *dev, struct device_attribute *attr, char *buf) 685 { 686 struct scsi_device *sdev = to_scsi_device(dev); 687 const char *name = scsi_device_state_name(sdev->sdev_state); 688 689 if (!name) 690 return -EINVAL; 691 692 return snprintf(buf, 20, "%s\n", name); 693 } 694 695 static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field); 696 697 static ssize_t 698 show_queue_type_field(struct device *dev, struct device_attribute *attr, 699 char *buf) 700 { 701 struct scsi_device *sdev = to_scsi_device(dev); 702 const char *name = "none"; 703 704 if (sdev->ordered_tags) 705 name = "ordered"; 706 else if (sdev->simple_tags) 707 name = "simple"; 708 709 return snprintf(buf, 20, "%s\n", name); 710 } 711 712 static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL); 713 714 static ssize_t 715 show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf) 716 { 717 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); 718 } 719 720 static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL); 721 722 #define show_sdev_iostat(field) \ 723 static ssize_t \ 724 show_iostat_##field(struct device *dev, struct device_attribute *attr, \ 725 char *buf) \ 726 { \ 727 struct scsi_device *sdev = to_scsi_device(dev); \ 728 unsigned long long count = atomic_read(&sdev->field); \ 729 return snprintf(buf, 20, "0x%llx\n", count); \ 730 } \ 731 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) 732 733 show_sdev_iostat(iorequest_cnt); 734 show_sdev_iostat(iodone_cnt); 735 show_sdev_iostat(ioerr_cnt); 736 737 static ssize_t 738 sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) 739 { 740 struct scsi_device *sdev; 741 sdev = to_scsi_device(dev); 742 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type); 743 } 744 static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); 745 746 #define DECLARE_EVT_SHOW(name, Cap_name) \ 747 static ssize_t \ 748 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ 749 char *buf) \ 750 { \ 751 struct scsi_device *sdev = to_scsi_device(dev); \ 752 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ 753 return snprintf(buf, 20, "%d\n", val); \ 754 } 755 756 #define DECLARE_EVT_STORE(name, Cap_name) \ 757 static ssize_t \ 758 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ 759 const char *buf, size_t count) \ 760 { \ 761 struct scsi_device *sdev = to_scsi_device(dev); \ 762 int val = simple_strtoul(buf, NULL, 0); \ 763 if (val == 0) \ 764 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ 765 else if (val == 1) \ 766 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ 767 else \ 768 return -EINVAL; \ 769 return count; \ 770 } 771 772 #define DECLARE_EVT(name, Cap_name) \ 773 DECLARE_EVT_SHOW(name, Cap_name) \ 774 DECLARE_EVT_STORE(name, Cap_name) \ 775 static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ 776 sdev_store_evt_##name); 777 #define REF_EVT(name) &dev_attr_evt_##name.attr 778 779 DECLARE_EVT(media_change, MEDIA_CHANGE) 780 DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) 781 DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) 782 DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) 783 DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) 784 DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) 785 786 /* Default template for device attributes. May NOT be modified */ 787 static struct attribute *scsi_sdev_attrs[] = { 788 &dev_attr_device_blocked.attr, 789 &dev_attr_type.attr, 790 &dev_attr_scsi_level.attr, 791 &dev_attr_device_busy.attr, 792 &dev_attr_vendor.attr, 793 &dev_attr_model.attr, 794 &dev_attr_rev.attr, 795 &dev_attr_rescan.attr, 796 &dev_attr_delete.attr, 797 &dev_attr_state.attr, 798 &dev_attr_timeout.attr, 799 &dev_attr_eh_timeout.attr, 800 &dev_attr_iocounterbits.attr, 801 &dev_attr_iorequest_cnt.attr, 802 &dev_attr_iodone_cnt.attr, 803 &dev_attr_ioerr_cnt.attr, 804 &dev_attr_modalias.attr, 805 REF_EVT(media_change), 806 REF_EVT(inquiry_change_reported), 807 REF_EVT(capacity_change_reported), 808 REF_EVT(soft_threshold_reached), 809 REF_EVT(mode_parameter_change_reported), 810 REF_EVT(lun_change_reported), 811 NULL 812 }; 813 814 static struct attribute_group scsi_sdev_attr_group = { 815 .attrs = scsi_sdev_attrs, 816 }; 817 818 static const struct attribute_group *scsi_sdev_attr_groups[] = { 819 &scsi_sdev_attr_group, 820 NULL 821 }; 822 823 static ssize_t 824 sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, 825 const char *buf, size_t count) 826 { 827 int depth, retval; 828 struct scsi_device *sdev = to_scsi_device(dev); 829 struct scsi_host_template *sht = sdev->host->hostt; 830 831 if (!sht->change_queue_depth) 832 return -EINVAL; 833 834 depth = simple_strtoul(buf, NULL, 0); 835 836 if (depth < 1) 837 return -EINVAL; 838 839 retval = sht->change_queue_depth(sdev, depth, 840 SCSI_QDEPTH_DEFAULT); 841 if (retval < 0) 842 return retval; 843 844 sdev->max_queue_depth = sdev->queue_depth; 845 846 return count; 847 } 848 849 static struct device_attribute sdev_attr_queue_depth_rw = 850 __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, 851 sdev_store_queue_depth_rw); 852 853 static ssize_t 854 sdev_show_queue_ramp_up_period(struct device *dev, 855 struct device_attribute *attr, 856 char *buf) 857 { 858 struct scsi_device *sdev; 859 sdev = to_scsi_device(dev); 860 return snprintf(buf, 20, "%u\n", 861 jiffies_to_msecs(sdev->queue_ramp_up_period)); 862 } 863 864 static ssize_t 865 sdev_store_queue_ramp_up_period(struct device *dev, 866 struct device_attribute *attr, 867 const char *buf, size_t count) 868 { 869 struct scsi_device *sdev = to_scsi_device(dev); 870 unsigned long period; 871 872 if (strict_strtoul(buf, 10, &period)) 873 return -EINVAL; 874 875 sdev->queue_ramp_up_period = msecs_to_jiffies(period); 876 return period; 877 } 878 879 static struct device_attribute sdev_attr_queue_ramp_up_period = 880 __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, 881 sdev_show_queue_ramp_up_period, 882 sdev_store_queue_ramp_up_period); 883 884 static ssize_t 885 sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, 886 const char *buf, size_t count) 887 { 888 struct scsi_device *sdev = to_scsi_device(dev); 889 struct scsi_host_template *sht = sdev->host->hostt; 890 int tag_type = 0, retval; 891 int prev_tag_type = scsi_get_tag_type(sdev); 892 893 if (!sdev->tagged_supported || !sht->change_queue_type) 894 return -EINVAL; 895 896 if (strncmp(buf, "ordered", 7) == 0) 897 tag_type = MSG_ORDERED_TAG; 898 else if (strncmp(buf, "simple", 6) == 0) 899 tag_type = MSG_SIMPLE_TAG; 900 else if (strncmp(buf, "none", 4) != 0) 901 return -EINVAL; 902 903 if (tag_type == prev_tag_type) 904 return count; 905 906 retval = sht->change_queue_type(sdev, tag_type); 907 if (retval < 0) 908 return retval; 909 910 return count; 911 } 912 913 static int scsi_target_add(struct scsi_target *starget) 914 { 915 int error; 916 917 if (starget->state != STARGET_CREATED) 918 return 0; 919 920 error = device_add(&starget->dev); 921 if (error) { 922 dev_err(&starget->dev, "target device_add failed, error %d\n", error); 923 return error; 924 } 925 transport_add_device(&starget->dev); 926 starget->state = STARGET_RUNNING; 927 928 pm_runtime_set_active(&starget->dev); 929 pm_runtime_enable(&starget->dev); 930 device_enable_async_suspend(&starget->dev); 931 932 return 0; 933 } 934 935 static struct device_attribute sdev_attr_queue_type_rw = 936 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, 937 sdev_store_queue_type_rw); 938 939 /** 940 * scsi_sysfs_add_sdev - add scsi device to sysfs 941 * @sdev: scsi_device to add 942 * 943 * Return value: 944 * 0 on Success / non-zero on Failure 945 **/ 946 int scsi_sysfs_add_sdev(struct scsi_device *sdev) 947 { 948 int error, i; 949 struct request_queue *rq = sdev->request_queue; 950 struct scsi_target *starget = sdev->sdev_target; 951 952 error = scsi_device_set_state(sdev, SDEV_RUNNING); 953 if (error) 954 return error; 955 956 error = scsi_target_add(starget); 957 if (error) 958 return error; 959 960 transport_configure_device(&starget->dev); 961 962 device_enable_async_suspend(&sdev->sdev_gendev); 963 scsi_autopm_get_target(starget); 964 pm_runtime_set_active(&sdev->sdev_gendev); 965 pm_runtime_forbid(&sdev->sdev_gendev); 966 pm_runtime_enable(&sdev->sdev_gendev); 967 scsi_autopm_put_target(starget); 968 969 /* The following call will keep sdev active indefinitely, until 970 * its driver does a corresponding scsi_autopm_pm_device(). Only 971 * drivers supporting autosuspend will do this. 972 */ 973 scsi_autopm_get_device(sdev); 974 975 error = device_add(&sdev->sdev_gendev); 976 if (error) { 977 sdev_printk(KERN_INFO, sdev, 978 "failed to add device: %d\n", error); 979 return error; 980 } 981 device_enable_async_suspend(&sdev->sdev_dev); 982 error = device_add(&sdev->sdev_dev); 983 if (error) { 984 sdev_printk(KERN_INFO, sdev, 985 "failed to add class device: %d\n", error); 986 device_del(&sdev->sdev_gendev); 987 return error; 988 } 989 transport_add_device(&sdev->sdev_gendev); 990 sdev->is_visible = 1; 991 992 /* create queue files, which may be writable, depending on the host */ 993 if (sdev->host->hostt->change_queue_depth) { 994 error = device_create_file(&sdev->sdev_gendev, 995 &sdev_attr_queue_depth_rw); 996 error = device_create_file(&sdev->sdev_gendev, 997 &sdev_attr_queue_ramp_up_period); 998 } 999 else 1000 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); 1001 if (error) 1002 return error; 1003 1004 if (sdev->host->hostt->change_queue_type) 1005 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); 1006 else 1007 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); 1008 if (error) 1009 return error; 1010 1011 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 1012 1013 if (error) 1014 /* we're treating error on bsg register as non-fatal, 1015 * so pretend nothing went wrong */ 1016 sdev_printk(KERN_INFO, sdev, 1017 "Failed to register bsg queue, errno=%d\n", error); 1018 1019 /* add additional host specific attributes */ 1020 if (sdev->host->hostt->sdev_attrs) { 1021 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { 1022 error = device_create_file(&sdev->sdev_gendev, 1023 sdev->host->hostt->sdev_attrs[i]); 1024 if (error) 1025 return error; 1026 } 1027 } 1028 1029 return error; 1030 } 1031 1032 void __scsi_remove_device(struct scsi_device *sdev) 1033 { 1034 struct device *dev = &sdev->sdev_gendev; 1035 1036 if (sdev->is_visible) { 1037 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 1038 return; 1039 1040 bsg_unregister_queue(sdev->request_queue); 1041 device_unregister(&sdev->sdev_dev); 1042 transport_remove_device(dev); 1043 device_del(dev); 1044 } else 1045 put_device(&sdev->sdev_dev); 1046 1047 /* 1048 * Stop accepting new requests and wait until all queuecommand() and 1049 * scsi_run_queue() invocations have finished before tearing down the 1050 * device. 1051 */ 1052 scsi_device_set_state(sdev, SDEV_DEL); 1053 blk_cleanup_queue(sdev->request_queue); 1054 cancel_work_sync(&sdev->requeue_work); 1055 1056 if (sdev->host->hostt->slave_destroy) 1057 sdev->host->hostt->slave_destroy(sdev); 1058 transport_destroy_device(dev); 1059 1060 put_device(dev); 1061 } 1062 1063 /** 1064 * scsi_remove_device - unregister a device from the scsi bus 1065 * @sdev: scsi_device to unregister 1066 **/ 1067 void scsi_remove_device(struct scsi_device *sdev) 1068 { 1069 struct Scsi_Host *shost = sdev->host; 1070 1071 mutex_lock(&shost->scan_mutex); 1072 __scsi_remove_device(sdev); 1073 mutex_unlock(&shost->scan_mutex); 1074 } 1075 EXPORT_SYMBOL(scsi_remove_device); 1076 1077 static void __scsi_remove_target(struct scsi_target *starget) 1078 { 1079 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1080 unsigned long flags; 1081 struct scsi_device *sdev; 1082 1083 spin_lock_irqsave(shost->host_lock, flags); 1084 restart: 1085 list_for_each_entry(sdev, &shost->__devices, siblings) { 1086 if (sdev->channel != starget->channel || 1087 sdev->id != starget->id || 1088 scsi_device_get(sdev)) 1089 continue; 1090 spin_unlock_irqrestore(shost->host_lock, flags); 1091 scsi_remove_device(sdev); 1092 scsi_device_put(sdev); 1093 spin_lock_irqsave(shost->host_lock, flags); 1094 goto restart; 1095 } 1096 spin_unlock_irqrestore(shost->host_lock, flags); 1097 } 1098 1099 /** 1100 * scsi_remove_target - try to remove a target and all its devices 1101 * @dev: generic starget or parent of generic stargets to be removed 1102 * 1103 * Note: This is slightly racy. It is possible that if the user 1104 * requests the addition of another device then the target won't be 1105 * removed. 1106 */ 1107 void scsi_remove_target(struct device *dev) 1108 { 1109 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1110 struct scsi_target *starget, *last = NULL; 1111 unsigned long flags; 1112 1113 /* remove targets being careful to lookup next entry before 1114 * deleting the last 1115 */ 1116 spin_lock_irqsave(shost->host_lock, flags); 1117 list_for_each_entry(starget, &shost->__targets, siblings) { 1118 if (starget->state == STARGET_DEL) 1119 continue; 1120 if (starget->dev.parent == dev || &starget->dev == dev) { 1121 /* assuming new targets arrive at the end */ 1122 starget->reap_ref++; 1123 spin_unlock_irqrestore(shost->host_lock, flags); 1124 if (last) 1125 scsi_target_reap(last); 1126 last = starget; 1127 __scsi_remove_target(starget); 1128 spin_lock_irqsave(shost->host_lock, flags); 1129 } 1130 } 1131 spin_unlock_irqrestore(shost->host_lock, flags); 1132 1133 if (last) 1134 scsi_target_reap(last); 1135 } 1136 EXPORT_SYMBOL(scsi_remove_target); 1137 1138 int scsi_register_driver(struct device_driver *drv) 1139 { 1140 drv->bus = &scsi_bus_type; 1141 1142 return driver_register(drv); 1143 } 1144 EXPORT_SYMBOL(scsi_register_driver); 1145 1146 int scsi_register_interface(struct class_interface *intf) 1147 { 1148 intf->class = &sdev_class; 1149 1150 return class_interface_register(intf); 1151 } 1152 EXPORT_SYMBOL(scsi_register_interface); 1153 1154 /** 1155 * scsi_sysfs_add_host - add scsi host to subsystem 1156 * @shost: scsi host struct to add to subsystem 1157 **/ 1158 int scsi_sysfs_add_host(struct Scsi_Host *shost) 1159 { 1160 int error, i; 1161 1162 /* add host specific attributes */ 1163 if (shost->hostt->shost_attrs) { 1164 for (i = 0; shost->hostt->shost_attrs[i]; i++) { 1165 error = device_create_file(&shost->shost_dev, 1166 shost->hostt->shost_attrs[i]); 1167 if (error) 1168 return error; 1169 } 1170 } 1171 1172 transport_register_device(&shost->shost_gendev); 1173 transport_configure_device(&shost->shost_gendev); 1174 return 0; 1175 } 1176 1177 static struct device_type scsi_dev_type = { 1178 .name = "scsi_device", 1179 .release = scsi_device_dev_release, 1180 .groups = scsi_sdev_attr_groups, 1181 }; 1182 1183 void scsi_sysfs_device_initialize(struct scsi_device *sdev) 1184 { 1185 unsigned long flags; 1186 struct Scsi_Host *shost = sdev->host; 1187 struct scsi_target *starget = sdev->sdev_target; 1188 1189 device_initialize(&sdev->sdev_gendev); 1190 sdev->sdev_gendev.bus = &scsi_bus_type; 1191 sdev->sdev_gendev.type = &scsi_dev_type; 1192 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d", 1193 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1194 1195 device_initialize(&sdev->sdev_dev); 1196 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); 1197 sdev->sdev_dev.class = &sdev_class; 1198 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d", 1199 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1200 sdev->scsi_level = starget->scsi_level; 1201 transport_setup_device(&sdev->sdev_gendev); 1202 spin_lock_irqsave(shost->host_lock, flags); 1203 list_add_tail(&sdev->same_target_siblings, &starget->devices); 1204 list_add_tail(&sdev->siblings, &shost->__devices); 1205 spin_unlock_irqrestore(shost->host_lock, flags); 1206 } 1207 1208 int scsi_is_sdev_device(const struct device *dev) 1209 { 1210 return dev->type == &scsi_dev_type; 1211 } 1212 EXPORT_SYMBOL(scsi_is_sdev_device); 1213 1214 /* A blank transport template that is used in drivers that don't 1215 * yet implement Transport Attributes */ 1216 struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, }; 1217