1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * gendisk handling 4 * 5 * Portions Copyright (C) 2020 Christoph Hellwig 6 */ 7 8 #include <linux/module.h> 9 #include <linux/ctype.h> 10 #include <linux/fs.h> 11 #include <linux/genhd.h> 12 #include <linux/kdev_t.h> 13 #include <linux/kernel.h> 14 #include <linux/blkdev.h> 15 #include <linux/backing-dev.h> 16 #include <linux/init.h> 17 #include <linux/spinlock.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/slab.h> 21 #include <linux/kmod.h> 22 #include <linux/mutex.h> 23 #include <linux/idr.h> 24 #include <linux/log2.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/badblocks.h> 27 28 #include "blk.h" 29 30 static struct kobject *block_depr; 31 32 DECLARE_RWSEM(bdev_lookup_sem); 33 34 /* for extended dynamic devt allocation, currently only one major is used */ 35 #define NR_EXT_DEVT (1 << MINORBITS) 36 static DEFINE_IDA(ext_devt_ida); 37 38 static void disk_check_events(struct disk_events *ev, 39 unsigned int *clearing_ptr); 40 static void disk_alloc_events(struct gendisk *disk); 41 static void disk_add_events(struct gendisk *disk); 42 static void disk_del_events(struct gendisk *disk); 43 static void disk_release_events(struct gendisk *disk); 44 45 void set_capacity(struct gendisk *disk, sector_t sectors) 46 { 47 struct block_device *bdev = disk->part0; 48 49 spin_lock(&bdev->bd_size_lock); 50 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); 51 spin_unlock(&bdev->bd_size_lock); 52 } 53 EXPORT_SYMBOL(set_capacity); 54 55 /* 56 * Set disk capacity and notify if the size is not currently zero and will not 57 * be set to zero. Returns true if a uevent was sent, otherwise false. 58 */ 59 bool set_capacity_and_notify(struct gendisk *disk, sector_t size) 60 { 61 sector_t capacity = get_capacity(disk); 62 char *envp[] = { "RESIZE=1", NULL }; 63 64 set_capacity(disk, size); 65 66 /* 67 * Only print a message and send a uevent if the gendisk is user visible 68 * and alive. This avoids spamming the log and udev when setting the 69 * initial capacity during probing. 70 */ 71 if (size == capacity || 72 (disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP) 73 return false; 74 75 pr_info("%s: detected capacity change from %lld to %lld\n", 76 disk->disk_name, size, capacity); 77 78 /* 79 * Historically we did not send a uevent for changes to/from an empty 80 * device. 81 */ 82 if (!capacity || !size) 83 return false; 84 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); 85 return true; 86 } 87 EXPORT_SYMBOL_GPL(set_capacity_and_notify); 88 89 /* 90 * Format the device name of the indicated disk into the supplied buffer and 91 * return a pointer to that same buffer for convenience. 92 */ 93 char *disk_name(struct gendisk *hd, int partno, char *buf) 94 { 95 if (!partno) 96 snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name); 97 else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) 98 snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno); 99 else 100 snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno); 101 102 return buf; 103 } 104 105 const char *bdevname(struct block_device *bdev, char *buf) 106 { 107 return disk_name(bdev->bd_disk, bdev->bd_partno, buf); 108 } 109 EXPORT_SYMBOL(bdevname); 110 111 static void part_stat_read_all(struct block_device *part, 112 struct disk_stats *stat) 113 { 114 int cpu; 115 116 memset(stat, 0, sizeof(struct disk_stats)); 117 for_each_possible_cpu(cpu) { 118 struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu); 119 int group; 120 121 for (group = 0; group < NR_STAT_GROUPS; group++) { 122 stat->nsecs[group] += ptr->nsecs[group]; 123 stat->sectors[group] += ptr->sectors[group]; 124 stat->ios[group] += ptr->ios[group]; 125 stat->merges[group] += ptr->merges[group]; 126 } 127 128 stat->io_ticks += ptr->io_ticks; 129 } 130 } 131 132 static unsigned int part_in_flight(struct block_device *part) 133 { 134 unsigned int inflight = 0; 135 int cpu; 136 137 for_each_possible_cpu(cpu) { 138 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + 139 part_stat_local_read_cpu(part, in_flight[1], cpu); 140 } 141 if ((int)inflight < 0) 142 inflight = 0; 143 144 return inflight; 145 } 146 147 static void part_in_flight_rw(struct block_device *part, 148 unsigned int inflight[2]) 149 { 150 int cpu; 151 152 inflight[0] = 0; 153 inflight[1] = 0; 154 for_each_possible_cpu(cpu) { 155 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); 156 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu); 157 } 158 if ((int)inflight[0] < 0) 159 inflight[0] = 0; 160 if ((int)inflight[1] < 0) 161 inflight[1] = 0; 162 } 163 164 struct block_device *__disk_get_part(struct gendisk *disk, int partno) 165 { 166 struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); 167 168 if (unlikely(partno < 0 || partno >= ptbl->len)) 169 return NULL; 170 return rcu_dereference(ptbl->part[partno]); 171 } 172 173 /** 174 * disk_part_iter_init - initialize partition iterator 175 * @piter: iterator to initialize 176 * @disk: disk to iterate over 177 * @flags: DISK_PITER_* flags 178 * 179 * Initialize @piter so that it iterates over partitions of @disk. 180 * 181 * CONTEXT: 182 * Don't care. 183 */ 184 void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, 185 unsigned int flags) 186 { 187 struct disk_part_tbl *ptbl; 188 189 rcu_read_lock(); 190 ptbl = rcu_dereference(disk->part_tbl); 191 192 piter->disk = disk; 193 piter->part = NULL; 194 195 if (flags & DISK_PITER_REVERSE) 196 piter->idx = ptbl->len - 1; 197 else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) 198 piter->idx = 0; 199 else 200 piter->idx = 1; 201 202 piter->flags = flags; 203 204 rcu_read_unlock(); 205 } 206 EXPORT_SYMBOL_GPL(disk_part_iter_init); 207 208 /** 209 * disk_part_iter_next - proceed iterator to the next partition and return it 210 * @piter: iterator of interest 211 * 212 * Proceed @piter to the next partition and return it. 213 * 214 * CONTEXT: 215 * Don't care. 216 */ 217 struct block_device *disk_part_iter_next(struct disk_part_iter *piter) 218 { 219 struct disk_part_tbl *ptbl; 220 int inc, end; 221 222 /* put the last partition */ 223 disk_part_iter_exit(piter); 224 225 /* get part_tbl */ 226 rcu_read_lock(); 227 ptbl = rcu_dereference(piter->disk->part_tbl); 228 229 /* determine iteration parameters */ 230 if (piter->flags & DISK_PITER_REVERSE) { 231 inc = -1; 232 if (piter->flags & (DISK_PITER_INCL_PART0 | 233 DISK_PITER_INCL_EMPTY_PART0)) 234 end = -1; 235 else 236 end = 0; 237 } else { 238 inc = 1; 239 end = ptbl->len; 240 } 241 242 /* iterate to the next partition */ 243 for (; piter->idx != end; piter->idx += inc) { 244 struct block_device *part; 245 246 part = rcu_dereference(ptbl->part[piter->idx]); 247 if (!part) 248 continue; 249 if (!bdev_nr_sectors(part) && 250 !(piter->flags & DISK_PITER_INCL_EMPTY) && 251 !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && 252 piter->idx == 0)) 253 continue; 254 255 piter->part = bdgrab(part); 256 if (!piter->part) 257 continue; 258 piter->idx += inc; 259 break; 260 } 261 262 rcu_read_unlock(); 263 264 return piter->part; 265 } 266 EXPORT_SYMBOL_GPL(disk_part_iter_next); 267 268 /** 269 * disk_part_iter_exit - finish up partition iteration 270 * @piter: iter of interest 271 * 272 * Called when iteration is over. Cleans up @piter. 273 * 274 * CONTEXT: 275 * Don't care. 276 */ 277 void disk_part_iter_exit(struct disk_part_iter *piter) 278 { 279 if (piter->part) 280 bdput(piter->part); 281 piter->part = NULL; 282 } 283 EXPORT_SYMBOL_GPL(disk_part_iter_exit); 284 285 static inline int sector_in_part(struct block_device *part, sector_t sector) 286 { 287 return part->bd_start_sect <= sector && 288 sector < part->bd_start_sect + bdev_nr_sectors(part); 289 } 290 291 /** 292 * disk_map_sector_rcu - map sector to partition 293 * @disk: gendisk of interest 294 * @sector: sector to map 295 * 296 * Find out which partition @sector maps to on @disk. This is 297 * primarily used for stats accounting. 298 * 299 * CONTEXT: 300 * RCU read locked. 301 * 302 * RETURNS: 303 * Found partition on success, part0 is returned if no partition matches 304 * or the matched partition is being deleted. 305 */ 306 struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) 307 { 308 struct disk_part_tbl *ptbl; 309 struct block_device *part; 310 int i; 311 312 rcu_read_lock(); 313 ptbl = rcu_dereference(disk->part_tbl); 314 315 part = rcu_dereference(ptbl->last_lookup); 316 if (part && sector_in_part(part, sector)) 317 goto out_unlock; 318 319 for (i = 1; i < ptbl->len; i++) { 320 part = rcu_dereference(ptbl->part[i]); 321 if (part && sector_in_part(part, sector)) { 322 rcu_assign_pointer(ptbl->last_lookup, part); 323 goto out_unlock; 324 } 325 } 326 327 part = disk->part0; 328 out_unlock: 329 rcu_read_unlock(); 330 return part; 331 } 332 333 /** 334 * disk_has_partitions 335 * @disk: gendisk of interest 336 * 337 * Walk through the partition table and check if valid partition exists. 338 * 339 * CONTEXT: 340 * Don't care. 341 * 342 * RETURNS: 343 * True if the gendisk has at least one valid non-zero size partition. 344 * Otherwise false. 345 */ 346 bool disk_has_partitions(struct gendisk *disk) 347 { 348 struct disk_part_tbl *ptbl; 349 int i; 350 bool ret = false; 351 352 rcu_read_lock(); 353 ptbl = rcu_dereference(disk->part_tbl); 354 355 /* Iterate partitions skipping the whole device at index 0 */ 356 for (i = 1; i < ptbl->len; i++) { 357 if (rcu_dereference(ptbl->part[i])) { 358 ret = true; 359 break; 360 } 361 } 362 363 rcu_read_unlock(); 364 365 return ret; 366 } 367 EXPORT_SYMBOL_GPL(disk_has_partitions); 368 369 /* 370 * Can be deleted altogether. Later. 371 * 372 */ 373 #define BLKDEV_MAJOR_HASH_SIZE 255 374 static struct blk_major_name { 375 struct blk_major_name *next; 376 int major; 377 char name[16]; 378 void (*probe)(dev_t devt); 379 } *major_names[BLKDEV_MAJOR_HASH_SIZE]; 380 static DEFINE_MUTEX(major_names_lock); 381 382 /* index in the above - for now: assume no multimajor ranges */ 383 static inline int major_to_index(unsigned major) 384 { 385 return major % BLKDEV_MAJOR_HASH_SIZE; 386 } 387 388 #ifdef CONFIG_PROC_FS 389 void blkdev_show(struct seq_file *seqf, off_t offset) 390 { 391 struct blk_major_name *dp; 392 393 mutex_lock(&major_names_lock); 394 for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next) 395 if (dp->major == offset) 396 seq_printf(seqf, "%3d %s\n", dp->major, dp->name); 397 mutex_unlock(&major_names_lock); 398 } 399 #endif /* CONFIG_PROC_FS */ 400 401 /** 402 * __register_blkdev - register a new block device 403 * 404 * @major: the requested major device number [1..BLKDEV_MAJOR_MAX-1]. If 405 * @major = 0, try to allocate any unused major number. 406 * @name: the name of the new block device as a zero terminated string 407 * @probe: allback that is called on access to any minor number of @major 408 * 409 * The @name must be unique within the system. 410 * 411 * The return value depends on the @major input parameter: 412 * 413 * - if a major device number was requested in range [1..BLKDEV_MAJOR_MAX-1] 414 * then the function returns zero on success, or a negative error code 415 * - if any unused major number was requested with @major = 0 parameter 416 * then the return value is the allocated major number in range 417 * [1..BLKDEV_MAJOR_MAX-1] or a negative error code otherwise 418 * 419 * See Documentation/admin-guide/devices.txt for the list of allocated 420 * major numbers. 421 * 422 * Use register_blkdev instead for any new code. 423 */ 424 int __register_blkdev(unsigned int major, const char *name, 425 void (*probe)(dev_t devt)) 426 { 427 struct blk_major_name **n, *p; 428 int index, ret = 0; 429 430 mutex_lock(&major_names_lock); 431 432 /* temporary */ 433 if (major == 0) { 434 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { 435 if (major_names[index] == NULL) 436 break; 437 } 438 439 if (index == 0) { 440 printk("%s: failed to get major for %s\n", 441 __func__, name); 442 ret = -EBUSY; 443 goto out; 444 } 445 major = index; 446 ret = major; 447 } 448 449 if (major >= BLKDEV_MAJOR_MAX) { 450 pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n", 451 __func__, major, BLKDEV_MAJOR_MAX-1, name); 452 453 ret = -EINVAL; 454 goto out; 455 } 456 457 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL); 458 if (p == NULL) { 459 ret = -ENOMEM; 460 goto out; 461 } 462 463 p->major = major; 464 p->probe = probe; 465 strlcpy(p->name, name, sizeof(p->name)); 466 p->next = NULL; 467 index = major_to_index(major); 468 469 for (n = &major_names[index]; *n; n = &(*n)->next) { 470 if ((*n)->major == major) 471 break; 472 } 473 if (!*n) 474 *n = p; 475 else 476 ret = -EBUSY; 477 478 if (ret < 0) { 479 printk("register_blkdev: cannot get major %u for %s\n", 480 major, name); 481 kfree(p); 482 } 483 out: 484 mutex_unlock(&major_names_lock); 485 return ret; 486 } 487 EXPORT_SYMBOL(__register_blkdev); 488 489 void unregister_blkdev(unsigned int major, const char *name) 490 { 491 struct blk_major_name **n; 492 struct blk_major_name *p = NULL; 493 int index = major_to_index(major); 494 495 mutex_lock(&major_names_lock); 496 for (n = &major_names[index]; *n; n = &(*n)->next) 497 if ((*n)->major == major) 498 break; 499 if (!*n || strcmp((*n)->name, name)) { 500 WARN_ON(1); 501 } else { 502 p = *n; 503 *n = p->next; 504 } 505 mutex_unlock(&major_names_lock); 506 kfree(p); 507 } 508 509 EXPORT_SYMBOL(unregister_blkdev); 510 511 /** 512 * blk_mangle_minor - scatter minor numbers apart 513 * @minor: minor number to mangle 514 * 515 * Scatter consecutively allocated @minor number apart if MANGLE_DEVT 516 * is enabled. Mangling twice gives the original value. 517 * 518 * RETURNS: 519 * Mangled value. 520 * 521 * CONTEXT: 522 * Don't care. 523 */ 524 static int blk_mangle_minor(int minor) 525 { 526 #ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT 527 int i; 528 529 for (i = 0; i < MINORBITS / 2; i++) { 530 int low = minor & (1 << i); 531 int high = minor & (1 << (MINORBITS - 1 - i)); 532 int distance = MINORBITS - 1 - 2 * i; 533 534 minor ^= low | high; /* clear both bits */ 535 low <<= distance; /* swap the positions */ 536 high >>= distance; 537 minor |= low | high; /* and set */ 538 } 539 #endif 540 return minor; 541 } 542 543 /** 544 * blk_alloc_devt - allocate a dev_t for a block device 545 * @bdev: block device to allocate dev_t for 546 * @devt: out parameter for resulting dev_t 547 * 548 * Allocate a dev_t for block device. 549 * 550 * RETURNS: 551 * 0 on success, allocated dev_t is returned in *@devt. -errno on 552 * failure. 553 * 554 * CONTEXT: 555 * Might sleep. 556 */ 557 int blk_alloc_devt(struct block_device *bdev, dev_t *devt) 558 { 559 struct gendisk *disk = bdev->bd_disk; 560 int idx; 561 562 /* in consecutive minor range? */ 563 if (bdev->bd_partno < disk->minors) { 564 *devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno); 565 return 0; 566 } 567 568 idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL); 569 if (idx < 0) 570 return idx == -ENOSPC ? -EBUSY : idx; 571 572 *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); 573 return 0; 574 } 575 576 /** 577 * blk_free_devt - free a dev_t 578 * @devt: dev_t to free 579 * 580 * Free @devt which was allocated using blk_alloc_devt(). 581 * 582 * CONTEXT: 583 * Might sleep. 584 */ 585 void blk_free_devt(dev_t devt) 586 { 587 if (MAJOR(devt) == BLOCK_EXT_MAJOR) 588 ida_free(&ext_devt_ida, blk_mangle_minor(MINOR(devt))); 589 } 590 591 static char *bdevt_str(dev_t devt, char *buf) 592 { 593 if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) { 594 char tbuf[BDEVT_SIZE]; 595 snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt)); 596 snprintf(buf, BDEVT_SIZE, "%-9s", tbuf); 597 } else 598 snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt)); 599 600 return buf; 601 } 602 603 static void disk_scan_partitions(struct gendisk *disk) 604 { 605 struct block_device *bdev; 606 607 if (!get_capacity(disk) || !disk_part_scan_enabled(disk)) 608 return; 609 610 set_bit(GD_NEED_PART_SCAN, &disk->state); 611 bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL); 612 if (!IS_ERR(bdev)) 613 blkdev_put(bdev, FMODE_READ); 614 } 615 616 static void register_disk(struct device *parent, struct gendisk *disk, 617 const struct attribute_group **groups) 618 { 619 struct device *ddev = disk_to_dev(disk); 620 struct disk_part_iter piter; 621 struct block_device *part; 622 int err; 623 624 ddev->parent = parent; 625 626 dev_set_name(ddev, "%s", disk->disk_name); 627 628 /* delay uevents, until we scanned partition table */ 629 dev_set_uevent_suppress(ddev, 1); 630 631 if (groups) { 632 WARN_ON(ddev->groups); 633 ddev->groups = groups; 634 } 635 if (device_add(ddev)) 636 return; 637 if (!sysfs_deprecated) { 638 err = sysfs_create_link(block_depr, &ddev->kobj, 639 kobject_name(&ddev->kobj)); 640 if (err) { 641 device_del(ddev); 642 return; 643 } 644 } 645 646 /* 647 * avoid probable deadlock caused by allocating memory with 648 * GFP_KERNEL in runtime_resume callback of its all ancestor 649 * devices 650 */ 651 pm_runtime_set_memalloc_noio(ddev, true); 652 653 disk->part0->bd_holder_dir = 654 kobject_create_and_add("holders", &ddev->kobj); 655 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); 656 657 if (disk->flags & GENHD_FL_HIDDEN) { 658 dev_set_uevent_suppress(ddev, 0); 659 return; 660 } 661 662 disk_scan_partitions(disk); 663 664 /* announce disk after possible partitions are created */ 665 dev_set_uevent_suppress(ddev, 0); 666 kobject_uevent(&ddev->kobj, KOBJ_ADD); 667 668 /* announce possible partitions */ 669 disk_part_iter_init(&piter, disk, 0); 670 while ((part = disk_part_iter_next(&piter))) 671 kobject_uevent(bdev_kobj(part), KOBJ_ADD); 672 disk_part_iter_exit(&piter); 673 674 if (disk->queue->backing_dev_info->dev) { 675 err = sysfs_create_link(&ddev->kobj, 676 &disk->queue->backing_dev_info->dev->kobj, 677 "bdi"); 678 WARN_ON(err); 679 } 680 } 681 682 /** 683 * __device_add_disk - add disk information to kernel list 684 * @parent: parent device for the disk 685 * @disk: per-device partitioning information 686 * @groups: Additional per-device sysfs groups 687 * @register_queue: register the queue if set to true 688 * 689 * This function registers the partitioning information in @disk 690 * with the kernel. 691 * 692 * FIXME: error handling 693 */ 694 static void __device_add_disk(struct device *parent, struct gendisk *disk, 695 const struct attribute_group **groups, 696 bool register_queue) 697 { 698 dev_t devt; 699 int retval; 700 701 /* 702 * The disk queue should now be all set with enough information about 703 * the device for the elevator code to pick an adequate default 704 * elevator if one is needed, that is, for devices requesting queue 705 * registration. 706 */ 707 if (register_queue) 708 elevator_init_mq(disk->queue); 709 710 /* minors == 0 indicates to use ext devt from part0 and should 711 * be accompanied with EXT_DEVT flag. Make sure all 712 * parameters make sense. 713 */ 714 WARN_ON(disk->minors && !(disk->major || disk->first_minor)); 715 WARN_ON(!disk->minors && 716 !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN))); 717 718 disk->flags |= GENHD_FL_UP; 719 720 retval = blk_alloc_devt(disk->part0, &devt); 721 if (retval) { 722 WARN_ON(1); 723 return; 724 } 725 disk->major = MAJOR(devt); 726 disk->first_minor = MINOR(devt); 727 728 disk_alloc_events(disk); 729 730 if (disk->flags & GENHD_FL_HIDDEN) { 731 /* 732 * Don't let hidden disks show up in /proc/partitions, 733 * and don't bother scanning for partitions either. 734 */ 735 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; 736 disk->flags |= GENHD_FL_NO_PART_SCAN; 737 } else { 738 struct backing_dev_info *bdi = disk->queue->backing_dev_info; 739 struct device *dev = disk_to_dev(disk); 740 int ret; 741 742 /* Register BDI before referencing it from bdev */ 743 dev->devt = devt; 744 ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt)); 745 WARN_ON(ret); 746 bdi_set_owner(bdi, dev); 747 bdev_add(disk->part0, devt); 748 } 749 register_disk(parent, disk, groups); 750 if (register_queue) 751 blk_register_queue(disk); 752 753 /* 754 * Take an extra ref on queue which will be put on disk_release() 755 * so that it sticks around as long as @disk is there. 756 */ 757 WARN_ON_ONCE(!blk_get_queue(disk->queue)); 758 759 disk_add_events(disk); 760 blk_integrity_add(disk); 761 } 762 763 void device_add_disk(struct device *parent, struct gendisk *disk, 764 const struct attribute_group **groups) 765 766 { 767 __device_add_disk(parent, disk, groups, true); 768 } 769 EXPORT_SYMBOL(device_add_disk); 770 771 void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk) 772 { 773 __device_add_disk(parent, disk, NULL, false); 774 } 775 EXPORT_SYMBOL(device_add_disk_no_queue_reg); 776 777 static void invalidate_partition(struct block_device *bdev) 778 { 779 fsync_bdev(bdev); 780 __invalidate_device(bdev, true); 781 782 /* 783 * Unhash the bdev inode for this device so that it can't be looked 784 * up any more even if openers still hold references to it. 785 */ 786 remove_inode_hash(bdev->bd_inode); 787 } 788 789 /** 790 * del_gendisk - remove the gendisk 791 * @disk: the struct gendisk to remove 792 * 793 * Removes the gendisk and all its associated resources. This deletes the 794 * partitions associated with the gendisk, and unregisters the associated 795 * request_queue. 796 * 797 * This is the counter to the respective __device_add_disk() call. 798 * 799 * The final removal of the struct gendisk happens when its refcount reaches 0 800 * with put_disk(), which should be called after del_gendisk(), if 801 * __device_add_disk() was used. 802 * 803 * Drivers exist which depend on the release of the gendisk to be synchronous, 804 * it should not be deferred. 805 * 806 * Context: can sleep 807 */ 808 void del_gendisk(struct gendisk *disk) 809 { 810 struct disk_part_iter piter; 811 struct block_device *part; 812 813 might_sleep(); 814 815 if (WARN_ON_ONCE(!disk->queue)) 816 return; 817 818 blk_integrity_del(disk); 819 disk_del_events(disk); 820 821 /* 822 * Block lookups of the disk until all bdevs are unhashed and the 823 * disk is marked as dead (GENHD_FL_UP cleared). 824 */ 825 down_write(&bdev_lookup_sem); 826 827 /* invalidate stuff */ 828 disk_part_iter_init(&piter, disk, 829 DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); 830 while ((part = disk_part_iter_next(&piter))) { 831 invalidate_partition(part); 832 delete_partition(part); 833 } 834 disk_part_iter_exit(&piter); 835 836 invalidate_partition(disk->part0); 837 set_capacity(disk, 0); 838 disk->flags &= ~GENHD_FL_UP; 839 up_write(&bdev_lookup_sem); 840 841 if (!(disk->flags & GENHD_FL_HIDDEN)) { 842 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); 843 844 /* 845 * Unregister bdi before releasing device numbers (as they can 846 * get reused and we'd get clashes in sysfs). 847 */ 848 bdi_unregister(disk->queue->backing_dev_info); 849 } 850 851 blk_unregister_queue(disk); 852 853 kobject_put(disk->part0->bd_holder_dir); 854 kobject_put(disk->slave_dir); 855 856 part_stat_set_all(disk->part0, 0); 857 disk->part0->bd_stamp = 0; 858 if (!sysfs_deprecated) 859 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); 860 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); 861 device_del(disk_to_dev(disk)); 862 } 863 EXPORT_SYMBOL(del_gendisk); 864 865 /* sysfs access to bad-blocks list. */ 866 static ssize_t disk_badblocks_show(struct device *dev, 867 struct device_attribute *attr, 868 char *page) 869 { 870 struct gendisk *disk = dev_to_disk(dev); 871 872 if (!disk->bb) 873 return sprintf(page, "\n"); 874 875 return badblocks_show(disk->bb, page, 0); 876 } 877 878 static ssize_t disk_badblocks_store(struct device *dev, 879 struct device_attribute *attr, 880 const char *page, size_t len) 881 { 882 struct gendisk *disk = dev_to_disk(dev); 883 884 if (!disk->bb) 885 return -ENXIO; 886 887 return badblocks_store(disk->bb, page, len, 0); 888 } 889 890 void blk_request_module(dev_t devt) 891 { 892 unsigned int major = MAJOR(devt); 893 struct blk_major_name **n; 894 895 mutex_lock(&major_names_lock); 896 for (n = &major_names[major_to_index(major)]; *n; n = &(*n)->next) { 897 if ((*n)->major == major && (*n)->probe) { 898 (*n)->probe(devt); 899 mutex_unlock(&major_names_lock); 900 return; 901 } 902 } 903 mutex_unlock(&major_names_lock); 904 905 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) 906 /* Make old-style 2.4 aliases work */ 907 request_module("block-major-%d", MAJOR(devt)); 908 } 909 910 /** 911 * bdget_disk - do bdget() by gendisk and partition number 912 * @disk: gendisk of interest 913 * @partno: partition number 914 * 915 * Find partition @partno from @disk, do bdget() on it. 916 * 917 * CONTEXT: 918 * Don't care. 919 * 920 * RETURNS: 921 * Resulting block_device on success, NULL on failure. 922 */ 923 struct block_device *bdget_disk(struct gendisk *disk, int partno) 924 { 925 struct block_device *bdev = NULL; 926 927 rcu_read_lock(); 928 bdev = __disk_get_part(disk, partno); 929 if (bdev && !bdgrab(bdev)) 930 bdev = NULL; 931 rcu_read_unlock(); 932 933 return bdev; 934 } 935 936 /* 937 * print a full list of all partitions - intended for places where the root 938 * filesystem can't be mounted and thus to give the victim some idea of what 939 * went wrong 940 */ 941 void __init printk_all_partitions(void) 942 { 943 struct class_dev_iter iter; 944 struct device *dev; 945 946 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 947 while ((dev = class_dev_iter_next(&iter))) { 948 struct gendisk *disk = dev_to_disk(dev); 949 struct disk_part_iter piter; 950 struct block_device *part; 951 char name_buf[BDEVNAME_SIZE]; 952 char devt_buf[BDEVT_SIZE]; 953 954 /* 955 * Don't show empty devices or things that have been 956 * suppressed 957 */ 958 if (get_capacity(disk) == 0 || 959 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) 960 continue; 961 962 /* 963 * Note, unlike /proc/partitions, I am showing the 964 * numbers in hex - the same format as the root= 965 * option takes. 966 */ 967 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 968 while ((part = disk_part_iter_next(&piter))) { 969 bool is_part0 = part == disk->part0; 970 971 printk("%s%s %10llu %s %s", is_part0 ? "" : " ", 972 bdevt_str(part->bd_dev, devt_buf), 973 bdev_nr_sectors(part) >> 1, 974 disk_name(disk, part->bd_partno, name_buf), 975 part->bd_meta_info ? 976 part->bd_meta_info->uuid : ""); 977 if (is_part0) { 978 if (dev->parent && dev->parent->driver) 979 printk(" driver: %s\n", 980 dev->parent->driver->name); 981 else 982 printk(" (driver?)\n"); 983 } else 984 printk("\n"); 985 } 986 disk_part_iter_exit(&piter); 987 } 988 class_dev_iter_exit(&iter); 989 } 990 991 #ifdef CONFIG_PROC_FS 992 /* iterator */ 993 static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos) 994 { 995 loff_t skip = *pos; 996 struct class_dev_iter *iter; 997 struct device *dev; 998 999 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 1000 if (!iter) 1001 return ERR_PTR(-ENOMEM); 1002 1003 seqf->private = iter; 1004 class_dev_iter_init(iter, &block_class, NULL, &disk_type); 1005 do { 1006 dev = class_dev_iter_next(iter); 1007 if (!dev) 1008 return NULL; 1009 } while (skip--); 1010 1011 return dev_to_disk(dev); 1012 } 1013 1014 static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos) 1015 { 1016 struct device *dev; 1017 1018 (*pos)++; 1019 dev = class_dev_iter_next(seqf->private); 1020 if (dev) 1021 return dev_to_disk(dev); 1022 1023 return NULL; 1024 } 1025 1026 static void disk_seqf_stop(struct seq_file *seqf, void *v) 1027 { 1028 struct class_dev_iter *iter = seqf->private; 1029 1030 /* stop is called even after start failed :-( */ 1031 if (iter) { 1032 class_dev_iter_exit(iter); 1033 kfree(iter); 1034 seqf->private = NULL; 1035 } 1036 } 1037 1038 static void *show_partition_start(struct seq_file *seqf, loff_t *pos) 1039 { 1040 void *p; 1041 1042 p = disk_seqf_start(seqf, pos); 1043 if (!IS_ERR_OR_NULL(p) && !*pos) 1044 seq_puts(seqf, "major minor #blocks name\n\n"); 1045 return p; 1046 } 1047 1048 static int show_partition(struct seq_file *seqf, void *v) 1049 { 1050 struct gendisk *sgp = v; 1051 struct disk_part_iter piter; 1052 struct block_device *part; 1053 char buf[BDEVNAME_SIZE]; 1054 1055 /* Don't show non-partitionable removeable devices or empty devices */ 1056 if (!get_capacity(sgp) || (!disk_max_parts(sgp) && 1057 (sgp->flags & GENHD_FL_REMOVABLE))) 1058 return 0; 1059 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO) 1060 return 0; 1061 1062 /* show the full disk and all non-0 size partitions of it */ 1063 disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0); 1064 while ((part = disk_part_iter_next(&piter))) 1065 seq_printf(seqf, "%4d %7d %10llu %s\n", 1066 MAJOR(part->bd_dev), MINOR(part->bd_dev), 1067 bdev_nr_sectors(part) >> 1, 1068 disk_name(sgp, part->bd_partno, buf)); 1069 disk_part_iter_exit(&piter); 1070 1071 return 0; 1072 } 1073 1074 static const struct seq_operations partitions_op = { 1075 .start = show_partition_start, 1076 .next = disk_seqf_next, 1077 .stop = disk_seqf_stop, 1078 .show = show_partition 1079 }; 1080 #endif 1081 1082 static int __init genhd_device_init(void) 1083 { 1084 int error; 1085 1086 block_class.dev_kobj = sysfs_dev_block_kobj; 1087 error = class_register(&block_class); 1088 if (unlikely(error)) 1089 return error; 1090 blk_dev_init(); 1091 1092 register_blkdev(BLOCK_EXT_MAJOR, "blkext"); 1093 1094 /* create top-level block dir */ 1095 if (!sysfs_deprecated) 1096 block_depr = kobject_create_and_add("block", NULL); 1097 return 0; 1098 } 1099 1100 subsys_initcall(genhd_device_init); 1101 1102 static ssize_t disk_range_show(struct device *dev, 1103 struct device_attribute *attr, char *buf) 1104 { 1105 struct gendisk *disk = dev_to_disk(dev); 1106 1107 return sprintf(buf, "%d\n", disk->minors); 1108 } 1109 1110 static ssize_t disk_ext_range_show(struct device *dev, 1111 struct device_attribute *attr, char *buf) 1112 { 1113 struct gendisk *disk = dev_to_disk(dev); 1114 1115 return sprintf(buf, "%d\n", disk_max_parts(disk)); 1116 } 1117 1118 static ssize_t disk_removable_show(struct device *dev, 1119 struct device_attribute *attr, char *buf) 1120 { 1121 struct gendisk *disk = dev_to_disk(dev); 1122 1123 return sprintf(buf, "%d\n", 1124 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); 1125 } 1126 1127 static ssize_t disk_hidden_show(struct device *dev, 1128 struct device_attribute *attr, char *buf) 1129 { 1130 struct gendisk *disk = dev_to_disk(dev); 1131 1132 return sprintf(buf, "%d\n", 1133 (disk->flags & GENHD_FL_HIDDEN ? 1 : 0)); 1134 } 1135 1136 static ssize_t disk_ro_show(struct device *dev, 1137 struct device_attribute *attr, char *buf) 1138 { 1139 struct gendisk *disk = dev_to_disk(dev); 1140 1141 return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0); 1142 } 1143 1144 ssize_t part_size_show(struct device *dev, 1145 struct device_attribute *attr, char *buf) 1146 { 1147 return sprintf(buf, "%llu\n", bdev_nr_sectors(dev_to_bdev(dev))); 1148 } 1149 1150 ssize_t part_stat_show(struct device *dev, 1151 struct device_attribute *attr, char *buf) 1152 { 1153 struct block_device *bdev = dev_to_bdev(dev); 1154 struct request_queue *q = bdev->bd_disk->queue; 1155 struct disk_stats stat; 1156 unsigned int inflight; 1157 1158 part_stat_read_all(bdev, &stat); 1159 if (queue_is_mq(q)) 1160 inflight = blk_mq_in_flight(q, bdev); 1161 else 1162 inflight = part_in_flight(bdev); 1163 1164 return sprintf(buf, 1165 "%8lu %8lu %8llu %8u " 1166 "%8lu %8lu %8llu %8u " 1167 "%8u %8u %8u " 1168 "%8lu %8lu %8llu %8u " 1169 "%8lu %8u" 1170 "\n", 1171 stat.ios[STAT_READ], 1172 stat.merges[STAT_READ], 1173 (unsigned long long)stat.sectors[STAT_READ], 1174 (unsigned int)div_u64(stat.nsecs[STAT_READ], NSEC_PER_MSEC), 1175 stat.ios[STAT_WRITE], 1176 stat.merges[STAT_WRITE], 1177 (unsigned long long)stat.sectors[STAT_WRITE], 1178 (unsigned int)div_u64(stat.nsecs[STAT_WRITE], NSEC_PER_MSEC), 1179 inflight, 1180 jiffies_to_msecs(stat.io_ticks), 1181 (unsigned int)div_u64(stat.nsecs[STAT_READ] + 1182 stat.nsecs[STAT_WRITE] + 1183 stat.nsecs[STAT_DISCARD] + 1184 stat.nsecs[STAT_FLUSH], 1185 NSEC_PER_MSEC), 1186 stat.ios[STAT_DISCARD], 1187 stat.merges[STAT_DISCARD], 1188 (unsigned long long)stat.sectors[STAT_DISCARD], 1189 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC), 1190 stat.ios[STAT_FLUSH], 1191 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC)); 1192 } 1193 1194 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 1195 char *buf) 1196 { 1197 struct block_device *bdev = dev_to_bdev(dev); 1198 struct request_queue *q = bdev->bd_disk->queue; 1199 unsigned int inflight[2]; 1200 1201 if (queue_is_mq(q)) 1202 blk_mq_in_flight_rw(q, bdev, inflight); 1203 else 1204 part_in_flight_rw(bdev, inflight); 1205 1206 return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); 1207 } 1208 1209 static ssize_t disk_capability_show(struct device *dev, 1210 struct device_attribute *attr, char *buf) 1211 { 1212 struct gendisk *disk = dev_to_disk(dev); 1213 1214 return sprintf(buf, "%x\n", disk->flags); 1215 } 1216 1217 static ssize_t disk_alignment_offset_show(struct device *dev, 1218 struct device_attribute *attr, 1219 char *buf) 1220 { 1221 struct gendisk *disk = dev_to_disk(dev); 1222 1223 return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); 1224 } 1225 1226 static ssize_t disk_discard_alignment_show(struct device *dev, 1227 struct device_attribute *attr, 1228 char *buf) 1229 { 1230 struct gendisk *disk = dev_to_disk(dev); 1231 1232 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); 1233 } 1234 1235 static DEVICE_ATTR(range, 0444, disk_range_show, NULL); 1236 static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL); 1237 static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL); 1238 static DEVICE_ATTR(hidden, 0444, disk_hidden_show, NULL); 1239 static DEVICE_ATTR(ro, 0444, disk_ro_show, NULL); 1240 static DEVICE_ATTR(size, 0444, part_size_show, NULL); 1241 static DEVICE_ATTR(alignment_offset, 0444, disk_alignment_offset_show, NULL); 1242 static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL); 1243 static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL); 1244 static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); 1245 static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); 1246 static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); 1247 1248 #ifdef CONFIG_FAIL_MAKE_REQUEST 1249 ssize_t part_fail_show(struct device *dev, 1250 struct device_attribute *attr, char *buf) 1251 { 1252 return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail); 1253 } 1254 1255 ssize_t part_fail_store(struct device *dev, 1256 struct device_attribute *attr, 1257 const char *buf, size_t count) 1258 { 1259 int i; 1260 1261 if (count > 0 && sscanf(buf, "%d", &i) > 0) 1262 dev_to_bdev(dev)->bd_make_it_fail = i; 1263 1264 return count; 1265 } 1266 1267 static struct device_attribute dev_attr_fail = 1268 __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store); 1269 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1270 1271 #ifdef CONFIG_FAIL_IO_TIMEOUT 1272 static struct device_attribute dev_attr_fail_timeout = 1273 __ATTR(io-timeout-fail, 0644, part_timeout_show, part_timeout_store); 1274 #endif 1275 1276 static struct attribute *disk_attrs[] = { 1277 &dev_attr_range.attr, 1278 &dev_attr_ext_range.attr, 1279 &dev_attr_removable.attr, 1280 &dev_attr_hidden.attr, 1281 &dev_attr_ro.attr, 1282 &dev_attr_size.attr, 1283 &dev_attr_alignment_offset.attr, 1284 &dev_attr_discard_alignment.attr, 1285 &dev_attr_capability.attr, 1286 &dev_attr_stat.attr, 1287 &dev_attr_inflight.attr, 1288 &dev_attr_badblocks.attr, 1289 #ifdef CONFIG_FAIL_MAKE_REQUEST 1290 &dev_attr_fail.attr, 1291 #endif 1292 #ifdef CONFIG_FAIL_IO_TIMEOUT 1293 &dev_attr_fail_timeout.attr, 1294 #endif 1295 NULL 1296 }; 1297 1298 static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n) 1299 { 1300 struct device *dev = container_of(kobj, typeof(*dev), kobj); 1301 struct gendisk *disk = dev_to_disk(dev); 1302 1303 if (a == &dev_attr_badblocks.attr && !disk->bb) 1304 return 0; 1305 return a->mode; 1306 } 1307 1308 static struct attribute_group disk_attr_group = { 1309 .attrs = disk_attrs, 1310 .is_visible = disk_visible, 1311 }; 1312 1313 static const struct attribute_group *disk_attr_groups[] = { 1314 &disk_attr_group, 1315 NULL 1316 }; 1317 1318 /** 1319 * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way 1320 * @disk: disk to replace part_tbl for 1321 * @new_ptbl: new part_tbl to install 1322 * 1323 * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The 1324 * original ptbl is freed using RCU callback. 1325 * 1326 * LOCKING: 1327 * Matching bd_mutex locked or the caller is the only user of @disk. 1328 */ 1329 static void disk_replace_part_tbl(struct gendisk *disk, 1330 struct disk_part_tbl *new_ptbl) 1331 { 1332 struct disk_part_tbl *old_ptbl = 1333 rcu_dereference_protected(disk->part_tbl, 1); 1334 1335 rcu_assign_pointer(disk->part_tbl, new_ptbl); 1336 1337 if (old_ptbl) { 1338 rcu_assign_pointer(old_ptbl->last_lookup, NULL); 1339 kfree_rcu(old_ptbl, rcu_head); 1340 } 1341 } 1342 1343 /** 1344 * disk_expand_part_tbl - expand disk->part_tbl 1345 * @disk: disk to expand part_tbl for 1346 * @partno: expand such that this partno can fit in 1347 * 1348 * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl 1349 * uses RCU to allow unlocked dereferencing for stats and other stuff. 1350 * 1351 * LOCKING: 1352 * Matching bd_mutex locked or the caller is the only user of @disk. 1353 * Might sleep. 1354 * 1355 * RETURNS: 1356 * 0 on success, -errno on failure. 1357 */ 1358 int disk_expand_part_tbl(struct gendisk *disk, int partno) 1359 { 1360 struct disk_part_tbl *old_ptbl = 1361 rcu_dereference_protected(disk->part_tbl, 1); 1362 struct disk_part_tbl *new_ptbl; 1363 int len = old_ptbl ? old_ptbl->len : 0; 1364 int i, target; 1365 1366 /* 1367 * check for int overflow, since we can get here from blkpg_ioctl() 1368 * with a user passed 'partno'. 1369 */ 1370 target = partno + 1; 1371 if (target < 0) 1372 return -EINVAL; 1373 1374 /* disk_max_parts() is zero during initialization, ignore if so */ 1375 if (disk_max_parts(disk) && target > disk_max_parts(disk)) 1376 return -EINVAL; 1377 1378 if (target <= len) 1379 return 0; 1380 1381 new_ptbl = kzalloc_node(struct_size(new_ptbl, part, target), GFP_KERNEL, 1382 disk->node_id); 1383 if (!new_ptbl) 1384 return -ENOMEM; 1385 1386 new_ptbl->len = target; 1387 1388 for (i = 0; i < len; i++) 1389 rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]); 1390 1391 disk_replace_part_tbl(disk, new_ptbl); 1392 return 0; 1393 } 1394 1395 /** 1396 * disk_release - releases all allocated resources of the gendisk 1397 * @dev: the device representing this disk 1398 * 1399 * This function releases all allocated resources of the gendisk. 1400 * 1401 * Drivers which used __device_add_disk() have a gendisk with a request_queue 1402 * assigned. Since the request_queue sits on top of the gendisk for these 1403 * drivers we also call blk_put_queue() for them, and we expect the 1404 * request_queue refcount to reach 0 at this point, and so the request_queue 1405 * will also be freed prior to the disk. 1406 * 1407 * Context: can sleep 1408 */ 1409 static void disk_release(struct device *dev) 1410 { 1411 struct gendisk *disk = dev_to_disk(dev); 1412 1413 might_sleep(); 1414 1415 blk_free_devt(dev->devt); 1416 disk_release_events(disk); 1417 kfree(disk->random); 1418 disk_replace_part_tbl(disk, NULL); 1419 bdput(disk->part0); 1420 if (disk->queue) 1421 blk_put_queue(disk->queue); 1422 kfree(disk); 1423 } 1424 struct class block_class = { 1425 .name = "block", 1426 }; 1427 1428 static char *block_devnode(struct device *dev, umode_t *mode, 1429 kuid_t *uid, kgid_t *gid) 1430 { 1431 struct gendisk *disk = dev_to_disk(dev); 1432 1433 if (disk->fops->devnode) 1434 return disk->fops->devnode(disk, mode); 1435 return NULL; 1436 } 1437 1438 const struct device_type disk_type = { 1439 .name = "disk", 1440 .groups = disk_attr_groups, 1441 .release = disk_release, 1442 .devnode = block_devnode, 1443 }; 1444 1445 #ifdef CONFIG_PROC_FS 1446 /* 1447 * aggregate disk stat collector. Uses the same stats that the sysfs 1448 * entries do, above, but makes them available through one seq_file. 1449 * 1450 * The output looks suspiciously like /proc/partitions with a bunch of 1451 * extra fields. 1452 */ 1453 static int diskstats_show(struct seq_file *seqf, void *v) 1454 { 1455 struct gendisk *gp = v; 1456 struct disk_part_iter piter; 1457 struct block_device *hd; 1458 char buf[BDEVNAME_SIZE]; 1459 unsigned int inflight; 1460 struct disk_stats stat; 1461 1462 /* 1463 if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next) 1464 seq_puts(seqf, "major minor name" 1465 " rio rmerge rsect ruse wio wmerge " 1466 "wsect wuse running use aveq" 1467 "\n\n"); 1468 */ 1469 1470 disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); 1471 while ((hd = disk_part_iter_next(&piter))) { 1472 part_stat_read_all(hd, &stat); 1473 if (queue_is_mq(gp->queue)) 1474 inflight = blk_mq_in_flight(gp->queue, hd); 1475 else 1476 inflight = part_in_flight(hd); 1477 1478 seq_printf(seqf, "%4d %7d %s " 1479 "%lu %lu %lu %u " 1480 "%lu %lu %lu %u " 1481 "%u %u %u " 1482 "%lu %lu %lu %u " 1483 "%lu %u" 1484 "\n", 1485 MAJOR(hd->bd_dev), MINOR(hd->bd_dev), 1486 disk_name(gp, hd->bd_partno, buf), 1487 stat.ios[STAT_READ], 1488 stat.merges[STAT_READ], 1489 stat.sectors[STAT_READ], 1490 (unsigned int)div_u64(stat.nsecs[STAT_READ], 1491 NSEC_PER_MSEC), 1492 stat.ios[STAT_WRITE], 1493 stat.merges[STAT_WRITE], 1494 stat.sectors[STAT_WRITE], 1495 (unsigned int)div_u64(stat.nsecs[STAT_WRITE], 1496 NSEC_PER_MSEC), 1497 inflight, 1498 jiffies_to_msecs(stat.io_ticks), 1499 (unsigned int)div_u64(stat.nsecs[STAT_READ] + 1500 stat.nsecs[STAT_WRITE] + 1501 stat.nsecs[STAT_DISCARD] + 1502 stat.nsecs[STAT_FLUSH], 1503 NSEC_PER_MSEC), 1504 stat.ios[STAT_DISCARD], 1505 stat.merges[STAT_DISCARD], 1506 stat.sectors[STAT_DISCARD], 1507 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], 1508 NSEC_PER_MSEC), 1509 stat.ios[STAT_FLUSH], 1510 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], 1511 NSEC_PER_MSEC) 1512 ); 1513 } 1514 disk_part_iter_exit(&piter); 1515 1516 return 0; 1517 } 1518 1519 static const struct seq_operations diskstats_op = { 1520 .start = disk_seqf_start, 1521 .next = disk_seqf_next, 1522 .stop = disk_seqf_stop, 1523 .show = diskstats_show 1524 }; 1525 1526 static int __init proc_genhd_init(void) 1527 { 1528 proc_create_seq("diskstats", 0, NULL, &diskstats_op); 1529 proc_create_seq("partitions", 0, NULL, &partitions_op); 1530 return 0; 1531 } 1532 module_init(proc_genhd_init); 1533 #endif /* CONFIG_PROC_FS */ 1534 1535 dev_t blk_lookup_devt(const char *name, int partno) 1536 { 1537 dev_t devt = MKDEV(0, 0); 1538 struct class_dev_iter iter; 1539 struct device *dev; 1540 1541 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 1542 while ((dev = class_dev_iter_next(&iter))) { 1543 struct gendisk *disk = dev_to_disk(dev); 1544 struct block_device *part; 1545 1546 if (strcmp(dev_name(dev), name)) 1547 continue; 1548 1549 if (partno < disk->minors) { 1550 /* We need to return the right devno, even 1551 * if the partition doesn't exist yet. 1552 */ 1553 devt = MKDEV(MAJOR(dev->devt), 1554 MINOR(dev->devt) + partno); 1555 break; 1556 } 1557 part = bdget_disk(disk, partno); 1558 if (part) { 1559 devt = part->bd_dev; 1560 bdput(part); 1561 break; 1562 } 1563 } 1564 class_dev_iter_exit(&iter); 1565 return devt; 1566 } 1567 1568 struct gendisk *__alloc_disk_node(int minors, int node_id) 1569 { 1570 struct gendisk *disk; 1571 struct disk_part_tbl *ptbl; 1572 1573 if (minors > DISK_MAX_PARTS) { 1574 printk(KERN_ERR 1575 "block: can't allocate more than %d partitions\n", 1576 DISK_MAX_PARTS); 1577 minors = DISK_MAX_PARTS; 1578 } 1579 1580 disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); 1581 if (!disk) 1582 return NULL; 1583 1584 disk->part0 = bdev_alloc(disk, 0); 1585 if (!disk->part0) 1586 goto out_free_disk; 1587 1588 disk->node_id = node_id; 1589 if (disk_expand_part_tbl(disk, 0)) 1590 goto out_bdput; 1591 1592 ptbl = rcu_dereference_protected(disk->part_tbl, 1); 1593 rcu_assign_pointer(ptbl->part[0], disk->part0); 1594 1595 disk->minors = minors; 1596 rand_initialize_disk(disk); 1597 disk_to_dev(disk)->class = &block_class; 1598 disk_to_dev(disk)->type = &disk_type; 1599 device_initialize(disk_to_dev(disk)); 1600 return disk; 1601 1602 out_bdput: 1603 bdput(disk->part0); 1604 out_free_disk: 1605 kfree(disk); 1606 return NULL; 1607 } 1608 EXPORT_SYMBOL(__alloc_disk_node); 1609 1610 /** 1611 * put_disk - decrements the gendisk refcount 1612 * @disk: the struct gendisk to decrement the refcount for 1613 * 1614 * This decrements the refcount for the struct gendisk. When this reaches 0 1615 * we'll have disk_release() called. 1616 * 1617 * Context: Any context, but the last reference must not be dropped from 1618 * atomic context. 1619 */ 1620 void put_disk(struct gendisk *disk) 1621 { 1622 if (disk) 1623 put_device(disk_to_dev(disk)); 1624 } 1625 EXPORT_SYMBOL(put_disk); 1626 1627 static void set_disk_ro_uevent(struct gendisk *gd, int ro) 1628 { 1629 char event[] = "DISK_RO=1"; 1630 char *envp[] = { event, NULL }; 1631 1632 if (!ro) 1633 event[8] = '0'; 1634 kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp); 1635 } 1636 1637 void set_disk_ro(struct gendisk *disk, int flag) 1638 { 1639 struct disk_part_iter piter; 1640 struct block_device *part; 1641 1642 if (disk->part0->bd_read_only != flag) { 1643 set_disk_ro_uevent(disk, flag); 1644 disk->part0->bd_read_only = flag; 1645 } 1646 1647 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); 1648 while ((part = disk_part_iter_next(&piter))) 1649 part->bd_read_only = flag; 1650 disk_part_iter_exit(&piter); 1651 } 1652 1653 EXPORT_SYMBOL(set_disk_ro); 1654 1655 int bdev_read_only(struct block_device *bdev) 1656 { 1657 if (!bdev) 1658 return 0; 1659 return bdev->bd_read_only; 1660 } 1661 1662 EXPORT_SYMBOL(bdev_read_only); 1663 1664 /* 1665 * Disk events - monitor disk events like media change and eject request. 1666 */ 1667 struct disk_events { 1668 struct list_head node; /* all disk_event's */ 1669 struct gendisk *disk; /* the associated disk */ 1670 spinlock_t lock; 1671 1672 struct mutex block_mutex; /* protects blocking */ 1673 int block; /* event blocking depth */ 1674 unsigned int pending; /* events already sent out */ 1675 unsigned int clearing; /* events being cleared */ 1676 1677 long poll_msecs; /* interval, -1 for default */ 1678 struct delayed_work dwork; 1679 }; 1680 1681 static const char *disk_events_strs[] = { 1682 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change", 1683 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request", 1684 }; 1685 1686 static char *disk_uevents[] = { 1687 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1", 1688 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1", 1689 }; 1690 1691 /* list of all disk_events */ 1692 static DEFINE_MUTEX(disk_events_mutex); 1693 static LIST_HEAD(disk_events); 1694 1695 /* disable in-kernel polling by default */ 1696 static unsigned long disk_events_dfl_poll_msecs; 1697 1698 static unsigned long disk_events_poll_jiffies(struct gendisk *disk) 1699 { 1700 struct disk_events *ev = disk->ev; 1701 long intv_msecs = 0; 1702 1703 /* 1704 * If device-specific poll interval is set, always use it. If 1705 * the default is being used, poll if the POLL flag is set. 1706 */ 1707 if (ev->poll_msecs >= 0) 1708 intv_msecs = ev->poll_msecs; 1709 else if (disk->event_flags & DISK_EVENT_FLAG_POLL) 1710 intv_msecs = disk_events_dfl_poll_msecs; 1711 1712 return msecs_to_jiffies(intv_msecs); 1713 } 1714 1715 /** 1716 * disk_block_events - block and flush disk event checking 1717 * @disk: disk to block events for 1718 * 1719 * On return from this function, it is guaranteed that event checking 1720 * isn't in progress and won't happen until unblocked by 1721 * disk_unblock_events(). Events blocking is counted and the actual 1722 * unblocking happens after the matching number of unblocks are done. 1723 * 1724 * Note that this intentionally does not block event checking from 1725 * disk_clear_events(). 1726 * 1727 * CONTEXT: 1728 * Might sleep. 1729 */ 1730 void disk_block_events(struct gendisk *disk) 1731 { 1732 struct disk_events *ev = disk->ev; 1733 unsigned long flags; 1734 bool cancel; 1735 1736 if (!ev) 1737 return; 1738 1739 /* 1740 * Outer mutex ensures that the first blocker completes canceling 1741 * the event work before further blockers are allowed to finish. 1742 */ 1743 mutex_lock(&ev->block_mutex); 1744 1745 spin_lock_irqsave(&ev->lock, flags); 1746 cancel = !ev->block++; 1747 spin_unlock_irqrestore(&ev->lock, flags); 1748 1749 if (cancel) 1750 cancel_delayed_work_sync(&disk->ev->dwork); 1751 1752 mutex_unlock(&ev->block_mutex); 1753 } 1754 1755 static void __disk_unblock_events(struct gendisk *disk, bool check_now) 1756 { 1757 struct disk_events *ev = disk->ev; 1758 unsigned long intv; 1759 unsigned long flags; 1760 1761 spin_lock_irqsave(&ev->lock, flags); 1762 1763 if (WARN_ON_ONCE(ev->block <= 0)) 1764 goto out_unlock; 1765 1766 if (--ev->block) 1767 goto out_unlock; 1768 1769 intv = disk_events_poll_jiffies(disk); 1770 if (check_now) 1771 queue_delayed_work(system_freezable_power_efficient_wq, 1772 &ev->dwork, 0); 1773 else if (intv) 1774 queue_delayed_work(system_freezable_power_efficient_wq, 1775 &ev->dwork, intv); 1776 out_unlock: 1777 spin_unlock_irqrestore(&ev->lock, flags); 1778 } 1779 1780 /** 1781 * disk_unblock_events - unblock disk event checking 1782 * @disk: disk to unblock events for 1783 * 1784 * Undo disk_block_events(). When the block count reaches zero, it 1785 * starts events polling if configured. 1786 * 1787 * CONTEXT: 1788 * Don't care. Safe to call from irq context. 1789 */ 1790 void disk_unblock_events(struct gendisk *disk) 1791 { 1792 if (disk->ev) 1793 __disk_unblock_events(disk, false); 1794 } 1795 1796 /** 1797 * disk_flush_events - schedule immediate event checking and flushing 1798 * @disk: disk to check and flush events for 1799 * @mask: events to flush 1800 * 1801 * Schedule immediate event checking on @disk if not blocked. Events in 1802 * @mask are scheduled to be cleared from the driver. Note that this 1803 * doesn't clear the events from @disk->ev. 1804 * 1805 * CONTEXT: 1806 * If @mask is non-zero must be called with bdev->bd_mutex held. 1807 */ 1808 void disk_flush_events(struct gendisk *disk, unsigned int mask) 1809 { 1810 struct disk_events *ev = disk->ev; 1811 1812 if (!ev) 1813 return; 1814 1815 spin_lock_irq(&ev->lock); 1816 ev->clearing |= mask; 1817 if (!ev->block) 1818 mod_delayed_work(system_freezable_power_efficient_wq, 1819 &ev->dwork, 0); 1820 spin_unlock_irq(&ev->lock); 1821 } 1822 1823 /** 1824 * disk_clear_events - synchronously check, clear and return pending events 1825 * @disk: disk to fetch and clear events from 1826 * @mask: mask of events to be fetched and cleared 1827 * 1828 * Disk events are synchronously checked and pending events in @mask 1829 * are cleared and returned. This ignores the block count. 1830 * 1831 * CONTEXT: 1832 * Might sleep. 1833 */ 1834 static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) 1835 { 1836 struct disk_events *ev = disk->ev; 1837 unsigned int pending; 1838 unsigned int clearing = mask; 1839 1840 if (!ev) 1841 return 0; 1842 1843 disk_block_events(disk); 1844 1845 /* 1846 * store the union of mask and ev->clearing on the stack so that the 1847 * race with disk_flush_events does not cause ambiguity (ev->clearing 1848 * can still be modified even if events are blocked). 1849 */ 1850 spin_lock_irq(&ev->lock); 1851 clearing |= ev->clearing; 1852 ev->clearing = 0; 1853 spin_unlock_irq(&ev->lock); 1854 1855 disk_check_events(ev, &clearing); 1856 /* 1857 * if ev->clearing is not 0, the disk_flush_events got called in the 1858 * middle of this function, so we want to run the workfn without delay. 1859 */ 1860 __disk_unblock_events(disk, ev->clearing ? true : false); 1861 1862 /* then, fetch and clear pending events */ 1863 spin_lock_irq(&ev->lock); 1864 pending = ev->pending & mask; 1865 ev->pending &= ~mask; 1866 spin_unlock_irq(&ev->lock); 1867 WARN_ON_ONCE(clearing & mask); 1868 1869 return pending; 1870 } 1871 1872 /** 1873 * bdev_check_media_change - check if a removable media has been changed 1874 * @bdev: block device to check 1875 * 1876 * Check whether a removable media has been changed, and attempt to free all 1877 * dentries and inodes and invalidates all block device page cache entries in 1878 * that case. 1879 * 1880 * Returns %true if the block device changed, or %false if not. 1881 */ 1882 bool bdev_check_media_change(struct block_device *bdev) 1883 { 1884 unsigned int events; 1885 1886 events = disk_clear_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE | 1887 DISK_EVENT_EJECT_REQUEST); 1888 if (!(events & DISK_EVENT_MEDIA_CHANGE)) 1889 return false; 1890 1891 if (__invalidate_device(bdev, true)) 1892 pr_warn("VFS: busy inodes on changed media %s\n", 1893 bdev->bd_disk->disk_name); 1894 set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1895 return true; 1896 } 1897 EXPORT_SYMBOL(bdev_check_media_change); 1898 1899 /* 1900 * Separate this part out so that a different pointer for clearing_ptr can be 1901 * passed in for disk_clear_events. 1902 */ 1903 static void disk_events_workfn(struct work_struct *work) 1904 { 1905 struct delayed_work *dwork = to_delayed_work(work); 1906 struct disk_events *ev = container_of(dwork, struct disk_events, dwork); 1907 1908 disk_check_events(ev, &ev->clearing); 1909 } 1910 1911 static void disk_check_events(struct disk_events *ev, 1912 unsigned int *clearing_ptr) 1913 { 1914 struct gendisk *disk = ev->disk; 1915 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; 1916 unsigned int clearing = *clearing_ptr; 1917 unsigned int events; 1918 unsigned long intv; 1919 int nr_events = 0, i; 1920 1921 /* check events */ 1922 events = disk->fops->check_events(disk, clearing); 1923 1924 /* accumulate pending events and schedule next poll if necessary */ 1925 spin_lock_irq(&ev->lock); 1926 1927 events &= ~ev->pending; 1928 ev->pending |= events; 1929 *clearing_ptr &= ~clearing; 1930 1931 intv = disk_events_poll_jiffies(disk); 1932 if (!ev->block && intv) 1933 queue_delayed_work(system_freezable_power_efficient_wq, 1934 &ev->dwork, intv); 1935 1936 spin_unlock_irq(&ev->lock); 1937 1938 /* 1939 * Tell userland about new events. Only the events listed in 1940 * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT 1941 * is set. Otherwise, events are processed internally but never 1942 * get reported to userland. 1943 */ 1944 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) 1945 if ((events & disk->events & (1 << i)) && 1946 (disk->event_flags & DISK_EVENT_FLAG_UEVENT)) 1947 envp[nr_events++] = disk_uevents[i]; 1948 1949 if (nr_events) 1950 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); 1951 } 1952 1953 /* 1954 * A disk events enabled device has the following sysfs nodes under 1955 * its /sys/block/X/ directory. 1956 * 1957 * events : list of all supported events 1958 * events_async : list of events which can be detected w/o polling 1959 * (always empty, only for backwards compatibility) 1960 * events_poll_msecs : polling interval, 0: disable, -1: system default 1961 */ 1962 static ssize_t __disk_events_show(unsigned int events, char *buf) 1963 { 1964 const char *delim = ""; 1965 ssize_t pos = 0; 1966 int i; 1967 1968 for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++) 1969 if (events & (1 << i)) { 1970 pos += sprintf(buf + pos, "%s%s", 1971 delim, disk_events_strs[i]); 1972 delim = " "; 1973 } 1974 if (pos) 1975 pos += sprintf(buf + pos, "\n"); 1976 return pos; 1977 } 1978 1979 static ssize_t disk_events_show(struct device *dev, 1980 struct device_attribute *attr, char *buf) 1981 { 1982 struct gendisk *disk = dev_to_disk(dev); 1983 1984 if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT)) 1985 return 0; 1986 1987 return __disk_events_show(disk->events, buf); 1988 } 1989 1990 static ssize_t disk_events_async_show(struct device *dev, 1991 struct device_attribute *attr, char *buf) 1992 { 1993 return 0; 1994 } 1995 1996 static ssize_t disk_events_poll_msecs_show(struct device *dev, 1997 struct device_attribute *attr, 1998 char *buf) 1999 { 2000 struct gendisk *disk = dev_to_disk(dev); 2001 2002 if (!disk->ev) 2003 return sprintf(buf, "-1\n"); 2004 2005 return sprintf(buf, "%ld\n", disk->ev->poll_msecs); 2006 } 2007 2008 static ssize_t disk_events_poll_msecs_store(struct device *dev, 2009 struct device_attribute *attr, 2010 const char *buf, size_t count) 2011 { 2012 struct gendisk *disk = dev_to_disk(dev); 2013 long intv; 2014 2015 if (!count || !sscanf(buf, "%ld", &intv)) 2016 return -EINVAL; 2017 2018 if (intv < 0 && intv != -1) 2019 return -EINVAL; 2020 2021 if (!disk->ev) 2022 return -ENODEV; 2023 2024 disk_block_events(disk); 2025 disk->ev->poll_msecs = intv; 2026 __disk_unblock_events(disk, true); 2027 2028 return count; 2029 } 2030 2031 static const DEVICE_ATTR(events, 0444, disk_events_show, NULL); 2032 static const DEVICE_ATTR(events_async, 0444, disk_events_async_show, NULL); 2033 static const DEVICE_ATTR(events_poll_msecs, 0644, 2034 disk_events_poll_msecs_show, 2035 disk_events_poll_msecs_store); 2036 2037 static const struct attribute *disk_events_attrs[] = { 2038 &dev_attr_events.attr, 2039 &dev_attr_events_async.attr, 2040 &dev_attr_events_poll_msecs.attr, 2041 NULL, 2042 }; 2043 2044 /* 2045 * The default polling interval can be specified by the kernel 2046 * parameter block.events_dfl_poll_msecs which defaults to 0 2047 * (disable). This can also be modified runtime by writing to 2048 * /sys/module/block/parameters/events_dfl_poll_msecs. 2049 */ 2050 static int disk_events_set_dfl_poll_msecs(const char *val, 2051 const struct kernel_param *kp) 2052 { 2053 struct disk_events *ev; 2054 int ret; 2055 2056 ret = param_set_ulong(val, kp); 2057 if (ret < 0) 2058 return ret; 2059 2060 mutex_lock(&disk_events_mutex); 2061 2062 list_for_each_entry(ev, &disk_events, node) 2063 disk_flush_events(ev->disk, 0); 2064 2065 mutex_unlock(&disk_events_mutex); 2066 2067 return 0; 2068 } 2069 2070 static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = { 2071 .set = disk_events_set_dfl_poll_msecs, 2072 .get = param_get_ulong, 2073 }; 2074 2075 #undef MODULE_PARAM_PREFIX 2076 #define MODULE_PARAM_PREFIX "block." 2077 2078 module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops, 2079 &disk_events_dfl_poll_msecs, 0644); 2080 2081 /* 2082 * disk_{alloc|add|del|release}_events - initialize and destroy disk_events. 2083 */ 2084 static void disk_alloc_events(struct gendisk *disk) 2085 { 2086 struct disk_events *ev; 2087 2088 if (!disk->fops->check_events || !disk->events) 2089 return; 2090 2091 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 2092 if (!ev) { 2093 pr_warn("%s: failed to initialize events\n", disk->disk_name); 2094 return; 2095 } 2096 2097 INIT_LIST_HEAD(&ev->node); 2098 ev->disk = disk; 2099 spin_lock_init(&ev->lock); 2100 mutex_init(&ev->block_mutex); 2101 ev->block = 1; 2102 ev->poll_msecs = -1; 2103 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); 2104 2105 disk->ev = ev; 2106 } 2107 2108 static void disk_add_events(struct gendisk *disk) 2109 { 2110 /* FIXME: error handling */ 2111 if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0) 2112 pr_warn("%s: failed to create sysfs files for events\n", 2113 disk->disk_name); 2114 2115 if (!disk->ev) 2116 return; 2117 2118 mutex_lock(&disk_events_mutex); 2119 list_add_tail(&disk->ev->node, &disk_events); 2120 mutex_unlock(&disk_events_mutex); 2121 2122 /* 2123 * Block count is initialized to 1 and the following initial 2124 * unblock kicks it into action. 2125 */ 2126 __disk_unblock_events(disk, true); 2127 } 2128 2129 static void disk_del_events(struct gendisk *disk) 2130 { 2131 if (disk->ev) { 2132 disk_block_events(disk); 2133 2134 mutex_lock(&disk_events_mutex); 2135 list_del_init(&disk->ev->node); 2136 mutex_unlock(&disk_events_mutex); 2137 } 2138 2139 sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs); 2140 } 2141 2142 static void disk_release_events(struct gendisk *disk) 2143 { 2144 /* the block count should be 1 from disk_del_events() */ 2145 WARN_ON_ONCE(disk->ev && disk->ev->block != 1); 2146 kfree(disk->ev); 2147 } 2148