1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * gendisk handling 4 * 5 * Portions Copyright (C) 2020 Christoph Hellwig 6 */ 7 8 #include <linux/module.h> 9 #include <linux/ctype.h> 10 #include <linux/fs.h> 11 #include <linux/genhd.h> 12 #include <linux/kdev_t.h> 13 #include <linux/kernel.h> 14 #include <linux/blkdev.h> 15 #include <linux/backing-dev.h> 16 #include <linux/init.h> 17 #include <linux/spinlock.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/slab.h> 21 #include <linux/kmod.h> 22 #include <linux/mutex.h> 23 #include <linux/idr.h> 24 #include <linux/log2.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/badblocks.h> 27 28 #include "blk.h" 29 30 static struct kobject *block_depr; 31 32 /* for extended dynamic devt allocation, currently only one major is used */ 33 #define NR_EXT_DEVT (1 << MINORBITS) 34 static DEFINE_IDA(ext_devt_ida); 35 36 void set_capacity(struct gendisk *disk, sector_t sectors) 37 { 38 struct block_device *bdev = disk->part0; 39 40 spin_lock(&bdev->bd_size_lock); 41 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); 42 spin_unlock(&bdev->bd_size_lock); 43 } 44 EXPORT_SYMBOL(set_capacity); 45 46 /* 47 * Set disk capacity and notify if the size is not currently zero and will not 48 * be set to zero. Returns true if a uevent was sent, otherwise false. 49 */ 50 bool set_capacity_and_notify(struct gendisk *disk, sector_t size) 51 { 52 sector_t capacity = get_capacity(disk); 53 char *envp[] = { "RESIZE=1", NULL }; 54 55 set_capacity(disk, size); 56 57 /* 58 * Only print a message and send a uevent if the gendisk is user visible 59 * and alive. This avoids spamming the log and udev when setting the 60 * initial capacity during probing. 61 */ 62 if (size == capacity || 63 (disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP) 64 return false; 65 66 pr_info("%s: detected capacity change from %lld to %lld\n", 67 disk->disk_name, capacity, size); 68 69 /* 70 * Historically we did not send a uevent for changes to/from an empty 71 * device. 72 */ 73 if (!capacity || !size) 74 return false; 75 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); 76 return true; 77 } 78 EXPORT_SYMBOL_GPL(set_capacity_and_notify); 79 80 /* 81 * Format the device name of the indicated disk into the supplied buffer and 82 * return a pointer to that same buffer for convenience. 83 */ 84 char *disk_name(struct gendisk *hd, int partno, char *buf) 85 { 86 if (!partno) 87 snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name); 88 else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) 89 snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno); 90 else 91 snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno); 92 93 return buf; 94 } 95 96 const char *bdevname(struct block_device *bdev, char *buf) 97 { 98 return disk_name(bdev->bd_disk, bdev->bd_partno, buf); 99 } 100 EXPORT_SYMBOL(bdevname); 101 102 static void part_stat_read_all(struct block_device *part, 103 struct disk_stats *stat) 104 { 105 int cpu; 106 107 memset(stat, 0, sizeof(struct disk_stats)); 108 for_each_possible_cpu(cpu) { 109 struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu); 110 int group; 111 112 for (group = 0; group < NR_STAT_GROUPS; group++) { 113 stat->nsecs[group] += ptr->nsecs[group]; 114 stat->sectors[group] += ptr->sectors[group]; 115 stat->ios[group] += ptr->ios[group]; 116 stat->merges[group] += ptr->merges[group]; 117 } 118 119 stat->io_ticks += ptr->io_ticks; 120 } 121 } 122 123 static unsigned int part_in_flight(struct block_device *part) 124 { 125 unsigned int inflight = 0; 126 int cpu; 127 128 for_each_possible_cpu(cpu) { 129 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + 130 part_stat_local_read_cpu(part, in_flight[1], cpu); 131 } 132 if ((int)inflight < 0) 133 inflight = 0; 134 135 return inflight; 136 } 137 138 static void part_in_flight_rw(struct block_device *part, 139 unsigned int inflight[2]) 140 { 141 int cpu; 142 143 inflight[0] = 0; 144 inflight[1] = 0; 145 for_each_possible_cpu(cpu) { 146 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); 147 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu); 148 } 149 if ((int)inflight[0] < 0) 150 inflight[0] = 0; 151 if ((int)inflight[1] < 0) 152 inflight[1] = 0; 153 } 154 155 /* 156 * Can be deleted altogether. Later. 157 * 158 */ 159 #define BLKDEV_MAJOR_HASH_SIZE 255 160 static struct blk_major_name { 161 struct blk_major_name *next; 162 int major; 163 char name[16]; 164 void (*probe)(dev_t devt); 165 } *major_names[BLKDEV_MAJOR_HASH_SIZE]; 166 static DEFINE_MUTEX(major_names_lock); 167 168 /* index in the above - for now: assume no multimajor ranges */ 169 static inline int major_to_index(unsigned major) 170 { 171 return major % BLKDEV_MAJOR_HASH_SIZE; 172 } 173 174 #ifdef CONFIG_PROC_FS 175 void blkdev_show(struct seq_file *seqf, off_t offset) 176 { 177 struct blk_major_name *dp; 178 179 mutex_lock(&major_names_lock); 180 for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next) 181 if (dp->major == offset) 182 seq_printf(seqf, "%3d %s\n", dp->major, dp->name); 183 mutex_unlock(&major_names_lock); 184 } 185 #endif /* CONFIG_PROC_FS */ 186 187 /** 188 * __register_blkdev - register a new block device 189 * 190 * @major: the requested major device number [1..BLKDEV_MAJOR_MAX-1]. If 191 * @major = 0, try to allocate any unused major number. 192 * @name: the name of the new block device as a zero terminated string 193 * @probe: allback that is called on access to any minor number of @major 194 * 195 * The @name must be unique within the system. 196 * 197 * The return value depends on the @major input parameter: 198 * 199 * - if a major device number was requested in range [1..BLKDEV_MAJOR_MAX-1] 200 * then the function returns zero on success, or a negative error code 201 * - if any unused major number was requested with @major = 0 parameter 202 * then the return value is the allocated major number in range 203 * [1..BLKDEV_MAJOR_MAX-1] or a negative error code otherwise 204 * 205 * See Documentation/admin-guide/devices.txt for the list of allocated 206 * major numbers. 207 * 208 * Use register_blkdev instead for any new code. 209 */ 210 int __register_blkdev(unsigned int major, const char *name, 211 void (*probe)(dev_t devt)) 212 { 213 struct blk_major_name **n, *p; 214 int index, ret = 0; 215 216 mutex_lock(&major_names_lock); 217 218 /* temporary */ 219 if (major == 0) { 220 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { 221 if (major_names[index] == NULL) 222 break; 223 } 224 225 if (index == 0) { 226 printk("%s: failed to get major for %s\n", 227 __func__, name); 228 ret = -EBUSY; 229 goto out; 230 } 231 major = index; 232 ret = major; 233 } 234 235 if (major >= BLKDEV_MAJOR_MAX) { 236 pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n", 237 __func__, major, BLKDEV_MAJOR_MAX-1, name); 238 239 ret = -EINVAL; 240 goto out; 241 } 242 243 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL); 244 if (p == NULL) { 245 ret = -ENOMEM; 246 goto out; 247 } 248 249 p->major = major; 250 p->probe = probe; 251 strlcpy(p->name, name, sizeof(p->name)); 252 p->next = NULL; 253 index = major_to_index(major); 254 255 for (n = &major_names[index]; *n; n = &(*n)->next) { 256 if ((*n)->major == major) 257 break; 258 } 259 if (!*n) 260 *n = p; 261 else 262 ret = -EBUSY; 263 264 if (ret < 0) { 265 printk("register_blkdev: cannot get major %u for %s\n", 266 major, name); 267 kfree(p); 268 } 269 out: 270 mutex_unlock(&major_names_lock); 271 return ret; 272 } 273 EXPORT_SYMBOL(__register_blkdev); 274 275 void unregister_blkdev(unsigned int major, const char *name) 276 { 277 struct blk_major_name **n; 278 struct blk_major_name *p = NULL; 279 int index = major_to_index(major); 280 281 mutex_lock(&major_names_lock); 282 for (n = &major_names[index]; *n; n = &(*n)->next) 283 if ((*n)->major == major) 284 break; 285 if (!*n || strcmp((*n)->name, name)) { 286 WARN_ON(1); 287 } else { 288 p = *n; 289 *n = p->next; 290 } 291 mutex_unlock(&major_names_lock); 292 kfree(p); 293 } 294 295 EXPORT_SYMBOL(unregister_blkdev); 296 297 /** 298 * blk_mangle_minor - scatter minor numbers apart 299 * @minor: minor number to mangle 300 * 301 * Scatter consecutively allocated @minor number apart if MANGLE_DEVT 302 * is enabled. Mangling twice gives the original value. 303 * 304 * RETURNS: 305 * Mangled value. 306 * 307 * CONTEXT: 308 * Don't care. 309 */ 310 static int blk_mangle_minor(int minor) 311 { 312 #ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT 313 int i; 314 315 for (i = 0; i < MINORBITS / 2; i++) { 316 int low = minor & (1 << i); 317 int high = minor & (1 << (MINORBITS - 1 - i)); 318 int distance = MINORBITS - 1 - 2 * i; 319 320 minor ^= low | high; /* clear both bits */ 321 low <<= distance; /* swap the positions */ 322 high >>= distance; 323 minor |= low | high; /* and set */ 324 } 325 #endif 326 return minor; 327 } 328 329 int blk_alloc_ext_minor(void) 330 { 331 int idx; 332 333 idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL); 334 if (idx < 0) { 335 if (idx == -ENOSPC) 336 return -EBUSY; 337 return idx; 338 } 339 return blk_mangle_minor(idx); 340 } 341 342 void blk_free_ext_minor(unsigned int minor) 343 { 344 ida_free(&ext_devt_ida, blk_mangle_minor(minor)); 345 } 346 347 static char *bdevt_str(dev_t devt, char *buf) 348 { 349 if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) { 350 char tbuf[BDEVT_SIZE]; 351 snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt)); 352 snprintf(buf, BDEVT_SIZE, "%-9s", tbuf); 353 } else 354 snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt)); 355 356 return buf; 357 } 358 359 void disk_uevent(struct gendisk *disk, enum kobject_action action) 360 { 361 struct block_device *part; 362 unsigned long idx; 363 364 rcu_read_lock(); 365 xa_for_each(&disk->part_tbl, idx, part) { 366 if (bdev_is_partition(part) && !bdev_nr_sectors(part)) 367 continue; 368 if (!kobject_get_unless_zero(&part->bd_device.kobj)) 369 continue; 370 371 rcu_read_unlock(); 372 kobject_uevent(bdev_kobj(part), action); 373 put_device(&part->bd_device); 374 rcu_read_lock(); 375 } 376 rcu_read_unlock(); 377 } 378 EXPORT_SYMBOL_GPL(disk_uevent); 379 380 static void disk_scan_partitions(struct gendisk *disk) 381 { 382 struct block_device *bdev; 383 384 if (!get_capacity(disk) || !disk_part_scan_enabled(disk)) 385 return; 386 387 set_bit(GD_NEED_PART_SCAN, &disk->state); 388 bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL); 389 if (!IS_ERR(bdev)) 390 blkdev_put(bdev, FMODE_READ); 391 } 392 393 static void register_disk(struct device *parent, struct gendisk *disk, 394 const struct attribute_group **groups) 395 { 396 struct device *ddev = disk_to_dev(disk); 397 int err; 398 399 ddev->parent = parent; 400 401 dev_set_name(ddev, "%s", disk->disk_name); 402 403 /* delay uevents, until we scanned partition table */ 404 dev_set_uevent_suppress(ddev, 1); 405 406 if (groups) { 407 WARN_ON(ddev->groups); 408 ddev->groups = groups; 409 } 410 if (device_add(ddev)) 411 return; 412 if (!sysfs_deprecated) { 413 err = sysfs_create_link(block_depr, &ddev->kobj, 414 kobject_name(&ddev->kobj)); 415 if (err) { 416 device_del(ddev); 417 return; 418 } 419 } 420 421 /* 422 * avoid probable deadlock caused by allocating memory with 423 * GFP_KERNEL in runtime_resume callback of its all ancestor 424 * devices 425 */ 426 pm_runtime_set_memalloc_noio(ddev, true); 427 428 disk->part0->bd_holder_dir = 429 kobject_create_and_add("holders", &ddev->kobj); 430 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); 431 432 if (disk->flags & GENHD_FL_HIDDEN) 433 return; 434 435 disk_scan_partitions(disk); 436 437 /* announce the disk and partitions after all partitions are created */ 438 dev_set_uevent_suppress(ddev, 0); 439 disk_uevent(disk, KOBJ_ADD); 440 441 if (disk->queue->backing_dev_info->dev) { 442 err = sysfs_create_link(&ddev->kobj, 443 &disk->queue->backing_dev_info->dev->kobj, 444 "bdi"); 445 WARN_ON(err); 446 } 447 } 448 449 /** 450 * __device_add_disk - add disk information to kernel list 451 * @parent: parent device for the disk 452 * @disk: per-device partitioning information 453 * @groups: Additional per-device sysfs groups 454 * @register_queue: register the queue if set to true 455 * 456 * This function registers the partitioning information in @disk 457 * with the kernel. 458 * 459 * FIXME: error handling 460 */ 461 static void __device_add_disk(struct device *parent, struct gendisk *disk, 462 const struct attribute_group **groups, 463 bool register_queue) 464 { 465 int ret; 466 467 /* 468 * The disk queue should now be all set with enough information about 469 * the device for the elevator code to pick an adequate default 470 * elevator if one is needed, that is, for devices requesting queue 471 * registration. 472 */ 473 if (register_queue) 474 elevator_init_mq(disk->queue); 475 476 /* 477 * If the driver provides an explicit major number it also must provide 478 * the number of minors numbers supported, and those will be used to 479 * setup the gendisk. 480 * Otherwise just allocate the device numbers for both the whole device 481 * and all partitions from the extended dev_t space. 482 */ 483 if (disk->major) { 484 WARN_ON(!disk->minors); 485 486 if (disk->minors > DISK_MAX_PARTS) { 487 pr_err("block: can't allocate more than %d partitions\n", 488 DISK_MAX_PARTS); 489 disk->minors = DISK_MAX_PARTS; 490 } 491 } else { 492 WARN_ON(disk->minors); 493 494 ret = blk_alloc_ext_minor(); 495 if (ret < 0) { 496 WARN_ON(1); 497 return; 498 } 499 disk->major = BLOCK_EXT_MAJOR; 500 disk->first_minor = MINOR(ret); 501 disk->flags |= GENHD_FL_EXT_DEVT; 502 } 503 504 disk->flags |= GENHD_FL_UP; 505 506 disk_alloc_events(disk); 507 508 if (disk->flags & GENHD_FL_HIDDEN) { 509 /* 510 * Don't let hidden disks show up in /proc/partitions, 511 * and don't bother scanning for partitions either. 512 */ 513 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; 514 disk->flags |= GENHD_FL_NO_PART_SCAN; 515 } else { 516 struct backing_dev_info *bdi = disk->queue->backing_dev_info; 517 struct device *dev = disk_to_dev(disk); 518 519 /* Register BDI before referencing it from bdev */ 520 dev->devt = MKDEV(disk->major, disk->first_minor); 521 ret = bdi_register(bdi, "%u:%u", 522 disk->major, disk->first_minor); 523 WARN_ON(ret); 524 bdi_set_owner(bdi, dev); 525 bdev_add(disk->part0, dev->devt); 526 } 527 register_disk(parent, disk, groups); 528 if (register_queue) 529 blk_register_queue(disk); 530 531 /* 532 * Take an extra ref on queue which will be put on disk_release() 533 * so that it sticks around as long as @disk is there. 534 */ 535 if (blk_get_queue(disk->queue)) 536 set_bit(GD_QUEUE_REF, &disk->state); 537 else 538 WARN_ON_ONCE(1); 539 540 disk_add_events(disk); 541 blk_integrity_add(disk); 542 } 543 544 void device_add_disk(struct device *parent, struct gendisk *disk, 545 const struct attribute_group **groups) 546 547 { 548 __device_add_disk(parent, disk, groups, true); 549 } 550 EXPORT_SYMBOL(device_add_disk); 551 552 void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk) 553 { 554 __device_add_disk(parent, disk, NULL, false); 555 } 556 EXPORT_SYMBOL(device_add_disk_no_queue_reg); 557 558 /** 559 * del_gendisk - remove the gendisk 560 * @disk: the struct gendisk to remove 561 * 562 * Removes the gendisk and all its associated resources. This deletes the 563 * partitions associated with the gendisk, and unregisters the associated 564 * request_queue. 565 * 566 * This is the counter to the respective __device_add_disk() call. 567 * 568 * The final removal of the struct gendisk happens when its refcount reaches 0 569 * with put_disk(), which should be called after del_gendisk(), if 570 * __device_add_disk() was used. 571 * 572 * Drivers exist which depend on the release of the gendisk to be synchronous, 573 * it should not be deferred. 574 * 575 * Context: can sleep 576 */ 577 void del_gendisk(struct gendisk *disk) 578 { 579 might_sleep(); 580 581 if (WARN_ON_ONCE(!disk->queue)) 582 return; 583 584 blk_integrity_del(disk); 585 disk_del_events(disk); 586 587 mutex_lock(&disk->open_mutex); 588 disk->flags &= ~GENHD_FL_UP; 589 blk_drop_partitions(disk); 590 mutex_unlock(&disk->open_mutex); 591 592 fsync_bdev(disk->part0); 593 __invalidate_device(disk->part0, true); 594 595 /* 596 * Unhash the bdev inode for this device so that it can't be looked 597 * up any more even if openers still hold references to it. 598 */ 599 remove_inode_hash(disk->part0->bd_inode); 600 601 set_capacity(disk, 0); 602 603 if (!(disk->flags & GENHD_FL_HIDDEN)) { 604 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); 605 606 /* 607 * Unregister bdi before releasing device numbers (as they can 608 * get reused and we'd get clashes in sysfs). 609 */ 610 bdi_unregister(disk->queue->backing_dev_info); 611 } 612 613 blk_unregister_queue(disk); 614 615 kobject_put(disk->part0->bd_holder_dir); 616 kobject_put(disk->slave_dir); 617 618 part_stat_set_all(disk->part0, 0); 619 disk->part0->bd_stamp = 0; 620 if (!sysfs_deprecated) 621 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); 622 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); 623 device_del(disk_to_dev(disk)); 624 } 625 EXPORT_SYMBOL(del_gendisk); 626 627 /* sysfs access to bad-blocks list. */ 628 static ssize_t disk_badblocks_show(struct device *dev, 629 struct device_attribute *attr, 630 char *page) 631 { 632 struct gendisk *disk = dev_to_disk(dev); 633 634 if (!disk->bb) 635 return sprintf(page, "\n"); 636 637 return badblocks_show(disk->bb, page, 0); 638 } 639 640 static ssize_t disk_badblocks_store(struct device *dev, 641 struct device_attribute *attr, 642 const char *page, size_t len) 643 { 644 struct gendisk *disk = dev_to_disk(dev); 645 646 if (!disk->bb) 647 return -ENXIO; 648 649 return badblocks_store(disk->bb, page, len, 0); 650 } 651 652 void blk_request_module(dev_t devt) 653 { 654 unsigned int major = MAJOR(devt); 655 struct blk_major_name **n; 656 657 mutex_lock(&major_names_lock); 658 for (n = &major_names[major_to_index(major)]; *n; n = &(*n)->next) { 659 if ((*n)->major == major && (*n)->probe) { 660 (*n)->probe(devt); 661 mutex_unlock(&major_names_lock); 662 return; 663 } 664 } 665 mutex_unlock(&major_names_lock); 666 667 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) 668 /* Make old-style 2.4 aliases work */ 669 request_module("block-major-%d", MAJOR(devt)); 670 } 671 672 /* 673 * print a full list of all partitions - intended for places where the root 674 * filesystem can't be mounted and thus to give the victim some idea of what 675 * went wrong 676 */ 677 void __init printk_all_partitions(void) 678 { 679 struct class_dev_iter iter; 680 struct device *dev; 681 682 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 683 while ((dev = class_dev_iter_next(&iter))) { 684 struct gendisk *disk = dev_to_disk(dev); 685 struct block_device *part; 686 char name_buf[BDEVNAME_SIZE]; 687 char devt_buf[BDEVT_SIZE]; 688 unsigned long idx; 689 690 /* 691 * Don't show empty devices or things that have been 692 * suppressed 693 */ 694 if (get_capacity(disk) == 0 || 695 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) 696 continue; 697 698 /* 699 * Note, unlike /proc/partitions, I am showing the numbers in 700 * hex - the same format as the root= option takes. 701 */ 702 rcu_read_lock(); 703 xa_for_each(&disk->part_tbl, idx, part) { 704 if (!bdev_nr_sectors(part)) 705 continue; 706 printk("%s%s %10llu %s %s", 707 bdev_is_partition(part) ? " " : "", 708 bdevt_str(part->bd_dev, devt_buf), 709 bdev_nr_sectors(part) >> 1, 710 disk_name(disk, part->bd_partno, name_buf), 711 part->bd_meta_info ? 712 part->bd_meta_info->uuid : ""); 713 if (bdev_is_partition(part)) 714 printk("\n"); 715 else if (dev->parent && dev->parent->driver) 716 printk(" driver: %s\n", 717 dev->parent->driver->name); 718 else 719 printk(" (driver?)\n"); 720 } 721 rcu_read_unlock(); 722 } 723 class_dev_iter_exit(&iter); 724 } 725 726 #ifdef CONFIG_PROC_FS 727 /* iterator */ 728 static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos) 729 { 730 loff_t skip = *pos; 731 struct class_dev_iter *iter; 732 struct device *dev; 733 734 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 735 if (!iter) 736 return ERR_PTR(-ENOMEM); 737 738 seqf->private = iter; 739 class_dev_iter_init(iter, &block_class, NULL, &disk_type); 740 do { 741 dev = class_dev_iter_next(iter); 742 if (!dev) 743 return NULL; 744 } while (skip--); 745 746 return dev_to_disk(dev); 747 } 748 749 static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos) 750 { 751 struct device *dev; 752 753 (*pos)++; 754 dev = class_dev_iter_next(seqf->private); 755 if (dev) 756 return dev_to_disk(dev); 757 758 return NULL; 759 } 760 761 static void disk_seqf_stop(struct seq_file *seqf, void *v) 762 { 763 struct class_dev_iter *iter = seqf->private; 764 765 /* stop is called even after start failed :-( */ 766 if (iter) { 767 class_dev_iter_exit(iter); 768 kfree(iter); 769 seqf->private = NULL; 770 } 771 } 772 773 static void *show_partition_start(struct seq_file *seqf, loff_t *pos) 774 { 775 void *p; 776 777 p = disk_seqf_start(seqf, pos); 778 if (!IS_ERR_OR_NULL(p) && !*pos) 779 seq_puts(seqf, "major minor #blocks name\n\n"); 780 return p; 781 } 782 783 static int show_partition(struct seq_file *seqf, void *v) 784 { 785 struct gendisk *sgp = v; 786 struct block_device *part; 787 unsigned long idx; 788 char buf[BDEVNAME_SIZE]; 789 790 /* Don't show non-partitionable removeable devices or empty devices */ 791 if (!get_capacity(sgp) || (!disk_max_parts(sgp) && 792 (sgp->flags & GENHD_FL_REMOVABLE))) 793 return 0; 794 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO) 795 return 0; 796 797 rcu_read_lock(); 798 xa_for_each(&sgp->part_tbl, idx, part) { 799 if (!bdev_nr_sectors(part)) 800 continue; 801 seq_printf(seqf, "%4d %7d %10llu %s\n", 802 MAJOR(part->bd_dev), MINOR(part->bd_dev), 803 bdev_nr_sectors(part) >> 1, 804 disk_name(sgp, part->bd_partno, buf)); 805 } 806 rcu_read_unlock(); 807 return 0; 808 } 809 810 static const struct seq_operations partitions_op = { 811 .start = show_partition_start, 812 .next = disk_seqf_next, 813 .stop = disk_seqf_stop, 814 .show = show_partition 815 }; 816 #endif 817 818 static int __init genhd_device_init(void) 819 { 820 int error; 821 822 block_class.dev_kobj = sysfs_dev_block_kobj; 823 error = class_register(&block_class); 824 if (unlikely(error)) 825 return error; 826 blk_dev_init(); 827 828 register_blkdev(BLOCK_EXT_MAJOR, "blkext"); 829 830 /* create top-level block dir */ 831 if (!sysfs_deprecated) 832 block_depr = kobject_create_and_add("block", NULL); 833 return 0; 834 } 835 836 subsys_initcall(genhd_device_init); 837 838 static ssize_t disk_range_show(struct device *dev, 839 struct device_attribute *attr, char *buf) 840 { 841 struct gendisk *disk = dev_to_disk(dev); 842 843 return sprintf(buf, "%d\n", disk->minors); 844 } 845 846 static ssize_t disk_ext_range_show(struct device *dev, 847 struct device_attribute *attr, char *buf) 848 { 849 struct gendisk *disk = dev_to_disk(dev); 850 851 return sprintf(buf, "%d\n", disk_max_parts(disk)); 852 } 853 854 static ssize_t disk_removable_show(struct device *dev, 855 struct device_attribute *attr, char *buf) 856 { 857 struct gendisk *disk = dev_to_disk(dev); 858 859 return sprintf(buf, "%d\n", 860 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); 861 } 862 863 static ssize_t disk_hidden_show(struct device *dev, 864 struct device_attribute *attr, char *buf) 865 { 866 struct gendisk *disk = dev_to_disk(dev); 867 868 return sprintf(buf, "%d\n", 869 (disk->flags & GENHD_FL_HIDDEN ? 1 : 0)); 870 } 871 872 static ssize_t disk_ro_show(struct device *dev, 873 struct device_attribute *attr, char *buf) 874 { 875 struct gendisk *disk = dev_to_disk(dev); 876 877 return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0); 878 } 879 880 ssize_t part_size_show(struct device *dev, 881 struct device_attribute *attr, char *buf) 882 { 883 return sprintf(buf, "%llu\n", bdev_nr_sectors(dev_to_bdev(dev))); 884 } 885 886 ssize_t part_stat_show(struct device *dev, 887 struct device_attribute *attr, char *buf) 888 { 889 struct block_device *bdev = dev_to_bdev(dev); 890 struct request_queue *q = bdev->bd_disk->queue; 891 struct disk_stats stat; 892 unsigned int inflight; 893 894 part_stat_read_all(bdev, &stat); 895 if (queue_is_mq(q)) 896 inflight = blk_mq_in_flight(q, bdev); 897 else 898 inflight = part_in_flight(bdev); 899 900 return sprintf(buf, 901 "%8lu %8lu %8llu %8u " 902 "%8lu %8lu %8llu %8u " 903 "%8u %8u %8u " 904 "%8lu %8lu %8llu %8u " 905 "%8lu %8u" 906 "\n", 907 stat.ios[STAT_READ], 908 stat.merges[STAT_READ], 909 (unsigned long long)stat.sectors[STAT_READ], 910 (unsigned int)div_u64(stat.nsecs[STAT_READ], NSEC_PER_MSEC), 911 stat.ios[STAT_WRITE], 912 stat.merges[STAT_WRITE], 913 (unsigned long long)stat.sectors[STAT_WRITE], 914 (unsigned int)div_u64(stat.nsecs[STAT_WRITE], NSEC_PER_MSEC), 915 inflight, 916 jiffies_to_msecs(stat.io_ticks), 917 (unsigned int)div_u64(stat.nsecs[STAT_READ] + 918 stat.nsecs[STAT_WRITE] + 919 stat.nsecs[STAT_DISCARD] + 920 stat.nsecs[STAT_FLUSH], 921 NSEC_PER_MSEC), 922 stat.ios[STAT_DISCARD], 923 stat.merges[STAT_DISCARD], 924 (unsigned long long)stat.sectors[STAT_DISCARD], 925 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC), 926 stat.ios[STAT_FLUSH], 927 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC)); 928 } 929 930 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 931 char *buf) 932 { 933 struct block_device *bdev = dev_to_bdev(dev); 934 struct request_queue *q = bdev->bd_disk->queue; 935 unsigned int inflight[2]; 936 937 if (queue_is_mq(q)) 938 blk_mq_in_flight_rw(q, bdev, inflight); 939 else 940 part_in_flight_rw(bdev, inflight); 941 942 return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); 943 } 944 945 static ssize_t disk_capability_show(struct device *dev, 946 struct device_attribute *attr, char *buf) 947 { 948 struct gendisk *disk = dev_to_disk(dev); 949 950 return sprintf(buf, "%x\n", disk->flags); 951 } 952 953 static ssize_t disk_alignment_offset_show(struct device *dev, 954 struct device_attribute *attr, 955 char *buf) 956 { 957 struct gendisk *disk = dev_to_disk(dev); 958 959 return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); 960 } 961 962 static ssize_t disk_discard_alignment_show(struct device *dev, 963 struct device_attribute *attr, 964 char *buf) 965 { 966 struct gendisk *disk = dev_to_disk(dev); 967 968 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); 969 } 970 971 static DEVICE_ATTR(range, 0444, disk_range_show, NULL); 972 static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL); 973 static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL); 974 static DEVICE_ATTR(hidden, 0444, disk_hidden_show, NULL); 975 static DEVICE_ATTR(ro, 0444, disk_ro_show, NULL); 976 static DEVICE_ATTR(size, 0444, part_size_show, NULL); 977 static DEVICE_ATTR(alignment_offset, 0444, disk_alignment_offset_show, NULL); 978 static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL); 979 static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL); 980 static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); 981 static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); 982 static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); 983 984 #ifdef CONFIG_FAIL_MAKE_REQUEST 985 ssize_t part_fail_show(struct device *dev, 986 struct device_attribute *attr, char *buf) 987 { 988 return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail); 989 } 990 991 ssize_t part_fail_store(struct device *dev, 992 struct device_attribute *attr, 993 const char *buf, size_t count) 994 { 995 int i; 996 997 if (count > 0 && sscanf(buf, "%d", &i) > 0) 998 dev_to_bdev(dev)->bd_make_it_fail = i; 999 1000 return count; 1001 } 1002 1003 static struct device_attribute dev_attr_fail = 1004 __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store); 1005 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1006 1007 #ifdef CONFIG_FAIL_IO_TIMEOUT 1008 static struct device_attribute dev_attr_fail_timeout = 1009 __ATTR(io-timeout-fail, 0644, part_timeout_show, part_timeout_store); 1010 #endif 1011 1012 static struct attribute *disk_attrs[] = { 1013 &dev_attr_range.attr, 1014 &dev_attr_ext_range.attr, 1015 &dev_attr_removable.attr, 1016 &dev_attr_hidden.attr, 1017 &dev_attr_ro.attr, 1018 &dev_attr_size.attr, 1019 &dev_attr_alignment_offset.attr, 1020 &dev_attr_discard_alignment.attr, 1021 &dev_attr_capability.attr, 1022 &dev_attr_stat.attr, 1023 &dev_attr_inflight.attr, 1024 &dev_attr_badblocks.attr, 1025 &dev_attr_events.attr, 1026 &dev_attr_events_async.attr, 1027 &dev_attr_events_poll_msecs.attr, 1028 #ifdef CONFIG_FAIL_MAKE_REQUEST 1029 &dev_attr_fail.attr, 1030 #endif 1031 #ifdef CONFIG_FAIL_IO_TIMEOUT 1032 &dev_attr_fail_timeout.attr, 1033 #endif 1034 NULL 1035 }; 1036 1037 static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n) 1038 { 1039 struct device *dev = container_of(kobj, typeof(*dev), kobj); 1040 struct gendisk *disk = dev_to_disk(dev); 1041 1042 if (a == &dev_attr_badblocks.attr && !disk->bb) 1043 return 0; 1044 return a->mode; 1045 } 1046 1047 static struct attribute_group disk_attr_group = { 1048 .attrs = disk_attrs, 1049 .is_visible = disk_visible, 1050 }; 1051 1052 static const struct attribute_group *disk_attr_groups[] = { 1053 &disk_attr_group, 1054 NULL 1055 }; 1056 1057 /** 1058 * disk_release - releases all allocated resources of the gendisk 1059 * @dev: the device representing this disk 1060 * 1061 * This function releases all allocated resources of the gendisk. 1062 * 1063 * Drivers which used __device_add_disk() have a gendisk with a request_queue 1064 * assigned. Since the request_queue sits on top of the gendisk for these 1065 * drivers we also call blk_put_queue() for them, and we expect the 1066 * request_queue refcount to reach 0 at this point, and so the request_queue 1067 * will also be freed prior to the disk. 1068 * 1069 * Context: can sleep 1070 */ 1071 static void disk_release(struct device *dev) 1072 { 1073 struct gendisk *disk = dev_to_disk(dev); 1074 1075 might_sleep(); 1076 1077 if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR) 1078 blk_free_ext_minor(MINOR(dev->devt)); 1079 disk_release_events(disk); 1080 kfree(disk->random); 1081 xa_destroy(&disk->part_tbl); 1082 bdput(disk->part0); 1083 if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue) 1084 blk_put_queue(disk->queue); 1085 kfree(disk); 1086 } 1087 struct class block_class = { 1088 .name = "block", 1089 }; 1090 1091 static char *block_devnode(struct device *dev, umode_t *mode, 1092 kuid_t *uid, kgid_t *gid) 1093 { 1094 struct gendisk *disk = dev_to_disk(dev); 1095 1096 if (disk->fops->devnode) 1097 return disk->fops->devnode(disk, mode); 1098 return NULL; 1099 } 1100 1101 const struct device_type disk_type = { 1102 .name = "disk", 1103 .groups = disk_attr_groups, 1104 .release = disk_release, 1105 .devnode = block_devnode, 1106 }; 1107 1108 #ifdef CONFIG_PROC_FS 1109 /* 1110 * aggregate disk stat collector. Uses the same stats that the sysfs 1111 * entries do, above, but makes them available through one seq_file. 1112 * 1113 * The output looks suspiciously like /proc/partitions with a bunch of 1114 * extra fields. 1115 */ 1116 static int diskstats_show(struct seq_file *seqf, void *v) 1117 { 1118 struct gendisk *gp = v; 1119 struct block_device *hd; 1120 char buf[BDEVNAME_SIZE]; 1121 unsigned int inflight; 1122 struct disk_stats stat; 1123 unsigned long idx; 1124 1125 /* 1126 if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next) 1127 seq_puts(seqf, "major minor name" 1128 " rio rmerge rsect ruse wio wmerge " 1129 "wsect wuse running use aveq" 1130 "\n\n"); 1131 */ 1132 1133 rcu_read_lock(); 1134 xa_for_each(&gp->part_tbl, idx, hd) { 1135 if (bdev_is_partition(hd) && !bdev_nr_sectors(hd)) 1136 continue; 1137 part_stat_read_all(hd, &stat); 1138 if (queue_is_mq(gp->queue)) 1139 inflight = blk_mq_in_flight(gp->queue, hd); 1140 else 1141 inflight = part_in_flight(hd); 1142 1143 seq_printf(seqf, "%4d %7d %s " 1144 "%lu %lu %lu %u " 1145 "%lu %lu %lu %u " 1146 "%u %u %u " 1147 "%lu %lu %lu %u " 1148 "%lu %u" 1149 "\n", 1150 MAJOR(hd->bd_dev), MINOR(hd->bd_dev), 1151 disk_name(gp, hd->bd_partno, buf), 1152 stat.ios[STAT_READ], 1153 stat.merges[STAT_READ], 1154 stat.sectors[STAT_READ], 1155 (unsigned int)div_u64(stat.nsecs[STAT_READ], 1156 NSEC_PER_MSEC), 1157 stat.ios[STAT_WRITE], 1158 stat.merges[STAT_WRITE], 1159 stat.sectors[STAT_WRITE], 1160 (unsigned int)div_u64(stat.nsecs[STAT_WRITE], 1161 NSEC_PER_MSEC), 1162 inflight, 1163 jiffies_to_msecs(stat.io_ticks), 1164 (unsigned int)div_u64(stat.nsecs[STAT_READ] + 1165 stat.nsecs[STAT_WRITE] + 1166 stat.nsecs[STAT_DISCARD] + 1167 stat.nsecs[STAT_FLUSH], 1168 NSEC_PER_MSEC), 1169 stat.ios[STAT_DISCARD], 1170 stat.merges[STAT_DISCARD], 1171 stat.sectors[STAT_DISCARD], 1172 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], 1173 NSEC_PER_MSEC), 1174 stat.ios[STAT_FLUSH], 1175 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], 1176 NSEC_PER_MSEC) 1177 ); 1178 } 1179 rcu_read_unlock(); 1180 1181 return 0; 1182 } 1183 1184 static const struct seq_operations diskstats_op = { 1185 .start = disk_seqf_start, 1186 .next = disk_seqf_next, 1187 .stop = disk_seqf_stop, 1188 .show = diskstats_show 1189 }; 1190 1191 static int __init proc_genhd_init(void) 1192 { 1193 proc_create_seq("diskstats", 0, NULL, &diskstats_op); 1194 proc_create_seq("partitions", 0, NULL, &partitions_op); 1195 return 0; 1196 } 1197 module_init(proc_genhd_init); 1198 #endif /* CONFIG_PROC_FS */ 1199 1200 dev_t part_devt(struct gendisk *disk, u8 partno) 1201 { 1202 struct block_device *part; 1203 dev_t devt = 0; 1204 1205 rcu_read_lock(); 1206 part = xa_load(&disk->part_tbl, partno); 1207 if (part) 1208 devt = part->bd_dev; 1209 rcu_read_unlock(); 1210 1211 return devt; 1212 } 1213 1214 dev_t blk_lookup_devt(const char *name, int partno) 1215 { 1216 dev_t devt = MKDEV(0, 0); 1217 struct class_dev_iter iter; 1218 struct device *dev; 1219 1220 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 1221 while ((dev = class_dev_iter_next(&iter))) { 1222 struct gendisk *disk = dev_to_disk(dev); 1223 1224 if (strcmp(dev_name(dev), name)) 1225 continue; 1226 1227 if (partno < disk->minors) { 1228 /* We need to return the right devno, even 1229 * if the partition doesn't exist yet. 1230 */ 1231 devt = MKDEV(MAJOR(dev->devt), 1232 MINOR(dev->devt) + partno); 1233 } else { 1234 devt = part_devt(disk, partno); 1235 if (devt) 1236 break; 1237 } 1238 } 1239 class_dev_iter_exit(&iter); 1240 return devt; 1241 } 1242 1243 struct gendisk *__alloc_disk_node(int minors, int node_id) 1244 { 1245 struct gendisk *disk; 1246 1247 disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); 1248 if (!disk) 1249 return NULL; 1250 1251 disk->part0 = bdev_alloc(disk, 0); 1252 if (!disk->part0) 1253 goto out_free_disk; 1254 1255 disk->node_id = node_id; 1256 mutex_init(&disk->open_mutex); 1257 xa_init(&disk->part_tbl); 1258 if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL)) 1259 goto out_destroy_part_tbl; 1260 1261 disk->minors = minors; 1262 rand_initialize_disk(disk); 1263 disk_to_dev(disk)->class = &block_class; 1264 disk_to_dev(disk)->type = &disk_type; 1265 device_initialize(disk_to_dev(disk)); 1266 return disk; 1267 1268 out_destroy_part_tbl: 1269 xa_destroy(&disk->part_tbl); 1270 bdput(disk->part0); 1271 out_free_disk: 1272 kfree(disk); 1273 return NULL; 1274 } 1275 EXPORT_SYMBOL(__alloc_disk_node); 1276 1277 struct gendisk *__blk_alloc_disk(int node) 1278 { 1279 struct request_queue *q; 1280 struct gendisk *disk; 1281 1282 q = blk_alloc_queue(node); 1283 if (!q) 1284 return NULL; 1285 1286 disk = __alloc_disk_node(0, node); 1287 if (!disk) { 1288 blk_cleanup_queue(q); 1289 return NULL; 1290 } 1291 disk->queue = q; 1292 return disk; 1293 } 1294 EXPORT_SYMBOL(__blk_alloc_disk); 1295 1296 /** 1297 * put_disk - decrements the gendisk refcount 1298 * @disk: the struct gendisk to decrement the refcount for 1299 * 1300 * This decrements the refcount for the struct gendisk. When this reaches 0 1301 * we'll have disk_release() called. 1302 * 1303 * Context: Any context, but the last reference must not be dropped from 1304 * atomic context. 1305 */ 1306 void put_disk(struct gendisk *disk) 1307 { 1308 if (disk) 1309 put_device(disk_to_dev(disk)); 1310 } 1311 EXPORT_SYMBOL(put_disk); 1312 1313 /** 1314 * blk_cleanup_disk - shutdown a gendisk allocated by blk_alloc_disk 1315 * @disk: gendisk to shutdown 1316 * 1317 * Mark the queue hanging off @disk DYING, drain all pending requests, then mark 1318 * the queue DEAD, destroy and put it and the gendisk structure. 1319 * 1320 * Context: can sleep 1321 */ 1322 void blk_cleanup_disk(struct gendisk *disk) 1323 { 1324 blk_cleanup_queue(disk->queue); 1325 put_disk(disk); 1326 } 1327 EXPORT_SYMBOL(blk_cleanup_disk); 1328 1329 static void set_disk_ro_uevent(struct gendisk *gd, int ro) 1330 { 1331 char event[] = "DISK_RO=1"; 1332 char *envp[] = { event, NULL }; 1333 1334 if (!ro) 1335 event[8] = '0'; 1336 kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp); 1337 } 1338 1339 /** 1340 * set_disk_ro - set a gendisk read-only 1341 * @disk: gendisk to operate on 1342 * @read_only: %true to set the disk read-only, %false set the disk read/write 1343 * 1344 * This function is used to indicate whether a given disk device should have its 1345 * read-only flag set. set_disk_ro() is typically used by device drivers to 1346 * indicate whether the underlying physical device is write-protected. 1347 */ 1348 void set_disk_ro(struct gendisk *disk, bool read_only) 1349 { 1350 if (read_only) { 1351 if (test_and_set_bit(GD_READ_ONLY, &disk->state)) 1352 return; 1353 } else { 1354 if (!test_and_clear_bit(GD_READ_ONLY, &disk->state)) 1355 return; 1356 } 1357 set_disk_ro_uevent(disk, read_only); 1358 } 1359 EXPORT_SYMBOL(set_disk_ro); 1360 1361 int bdev_read_only(struct block_device *bdev) 1362 { 1363 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); 1364 } 1365 EXPORT_SYMBOL(bdev_read_only); 1366