13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
23a65dfe8SJens Axboe /*
33a65dfe8SJens Axboe * gendisk handling
47b51e703SChristoph Hellwig *
57b51e703SChristoph Hellwig * Portions Copyright (C) 2020 Christoph Hellwig
63a65dfe8SJens Axboe */
73a65dfe8SJens Axboe
83a65dfe8SJens Axboe #include <linux/module.h>
93ad5cee5SChristoph Hellwig #include <linux/ctype.h>
103a65dfe8SJens Axboe #include <linux/fs.h>
11b446b60eSAndrew Morton #include <linux/kdev_t.h>
123a65dfe8SJens Axboe #include <linux/kernel.h>
133a65dfe8SJens Axboe #include <linux/blkdev.h>
1466114cadSTejun Heo #include <linux/backing-dev.h>
153a65dfe8SJens Axboe #include <linux/init.h>
163a65dfe8SJens Axboe #include <linux/spinlock.h>
17f500975aSAlexey Dobriyan #include <linux/proc_fs.h>
183a65dfe8SJens Axboe #include <linux/seq_file.h>
193a65dfe8SJens Axboe #include <linux/slab.h>
203a65dfe8SJens Axboe #include <linux/kmod.h>
21b81e0c23SChristoph Hellwig #include <linux/major.h>
2258383af6SJes Sorensen #include <linux/mutex.h>
23bcce3de1STejun Heo #include <linux/idr.h>
2477ea887eSTejun Heo #include <linux/log2.h>
2525e823c8SMing Lei #include <linux/pm_runtime.h>
2699e6608cSVishal Verma #include <linux/badblocks.h>
2782d981d4SChristoph Hellwig #include <linux/part_stat.h>
28dd7de370SYu Kuai #include <linux/blktrace_api.h>
293a65dfe8SJens Axboe
30dd7de370SYu Kuai #include "blk-throttle.h"
31ff88972cSAdrian Bunk #include "blk.h"
322aa7745bSChristoph Hellwig #include "blk-mq-sched.h"
338e141f9eSChristoph Hellwig #include "blk-rq-qos.h"
341059699fSMing Lei #include "blk-cgroup.h"
35ff88972cSAdrian Bunk
3631eb6186SChristoph Hellwig static struct kobject *block_depr;
373a65dfe8SJens Axboe
38cf179948SMatteo Croce /*
39cf179948SMatteo Croce * Unique, monotonically increasing sequential number associated with block
40cf179948SMatteo Croce * devices instances (i.e. incremented each time a device is attached).
41cf179948SMatteo Croce * Associating uevents with block devices in userspace is difficult and racy:
42cf179948SMatteo Croce * the uevent netlink socket is lossy, and on slow and overloaded systems has
43cf179948SMatteo Croce * a very high latency.
44cf179948SMatteo Croce * Block devices do not have exclusive owners in userspace, any process can set
45cf179948SMatteo Croce * one up (e.g. loop devices). Moreover, device names can be reused (e.g. loop0
46cf179948SMatteo Croce * can be reused again and again).
47cf179948SMatteo Croce * A userspace process setting up a block device and watching for its events
48cf179948SMatteo Croce * cannot thus reliably tell whether an event relates to the device it just set
49cf179948SMatteo Croce * up or another earlier instance with the same name.
50cf179948SMatteo Croce * This sequential number allows userspace processes to solve this problem, and
51cf179948SMatteo Croce * uniquely associate an uevent to the lifetime to a device.
52cf179948SMatteo Croce */
53cf179948SMatteo Croce static atomic64_t diskseq;
54cf179948SMatteo Croce
55bcce3de1STejun Heo /* for extended dynamic devt allocation, currently only one major is used */
56ce23bba8STejun Heo #define NR_EXT_DEVT (1 << MINORBITS)
5722ae8ce8SChristoph Hellwig static DEFINE_IDA(ext_devt_ida);
58bcce3de1STejun Heo
set_capacity(struct gendisk * disk,sector_t sectors)59a782483cSChristoph Hellwig void set_capacity(struct gendisk *disk, sector_t sectors)
60a782483cSChristoph Hellwig {
6183794367SDamien Le Moal bdev_set_nr_sectors(disk->part0, sectors);
62a782483cSChristoph Hellwig }
63a782483cSChristoph Hellwig EXPORT_SYMBOL(set_capacity);
64a782483cSChristoph Hellwig
65e598a72fSBalbir Singh /*
66449f4ec9SChristoph Hellwig * Set disk capacity and notify if the size is not currently zero and will not
67449f4ec9SChristoph Hellwig * be set to zero. Returns true if a uevent was sent, otherwise false.
68e598a72fSBalbir Singh */
set_capacity_and_notify(struct gendisk * disk,sector_t size)69449f4ec9SChristoph Hellwig bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
70e598a72fSBalbir Singh {
71e598a72fSBalbir Singh sector_t capacity = get_capacity(disk);
72e598a72fSBalbir Singh char *envp[] = { "RESIZE=1", NULL };
73e598a72fSBalbir Singh
74a782483cSChristoph Hellwig set_capacity(disk, size);
75a782483cSChristoph Hellwig
76a782483cSChristoph Hellwig /*
77a782483cSChristoph Hellwig * Only print a message and send a uevent if the gendisk is user visible
78a782483cSChristoph Hellwig * and alive. This avoids spamming the log and udev when setting the
79a782483cSChristoph Hellwig * initial capacity during probing.
80a782483cSChristoph Hellwig */
81a782483cSChristoph Hellwig if (size == capacity ||
8250b4aecfSChristoph Hellwig !disk_live(disk) ||
8350b4aecfSChristoph Hellwig (disk->flags & GENHD_FL_HIDDEN))
84a782483cSChristoph Hellwig return false;
85a782483cSChristoph Hellwig
86a782483cSChristoph Hellwig pr_info("%s: detected capacity change from %lld to %lld\n",
87452c0bf8SMing Lei disk->disk_name, capacity, size);
88a782483cSChristoph Hellwig
89a782483cSChristoph Hellwig /*
90a782483cSChristoph Hellwig * Historically we did not send a uevent for changes to/from an empty
91a782483cSChristoph Hellwig * device.
92a782483cSChristoph Hellwig */
93a782483cSChristoph Hellwig if (!capacity || !size)
94a782483cSChristoph Hellwig return false;
95e598a72fSBalbir Singh kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
967e890c37SChristoph Hellwig return true;
97e598a72fSBalbir Singh }
98449f4ec9SChristoph Hellwig EXPORT_SYMBOL_GPL(set_capacity_and_notify);
99e598a72fSBalbir Singh
part_stat_read_all(struct block_device * part,struct disk_stats * stat)1000d02129eSChristoph Hellwig static void part_stat_read_all(struct block_device *part,
1010d02129eSChristoph Hellwig struct disk_stats *stat)
102ea18e0f0SKonstantin Khlebnikov {
103ea18e0f0SKonstantin Khlebnikov int cpu;
104ea18e0f0SKonstantin Khlebnikov
105ea18e0f0SKonstantin Khlebnikov memset(stat, 0, sizeof(struct disk_stats));
106ea18e0f0SKonstantin Khlebnikov for_each_possible_cpu(cpu) {
1070d02129eSChristoph Hellwig struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu);
108ea18e0f0SKonstantin Khlebnikov int group;
109ea18e0f0SKonstantin Khlebnikov
110ea18e0f0SKonstantin Khlebnikov for (group = 0; group < NR_STAT_GROUPS; group++) {
111ea18e0f0SKonstantin Khlebnikov stat->nsecs[group] += ptr->nsecs[group];
112ea18e0f0SKonstantin Khlebnikov stat->sectors[group] += ptr->sectors[group];
113ea18e0f0SKonstantin Khlebnikov stat->ios[group] += ptr->ios[group];
114ea18e0f0SKonstantin Khlebnikov stat->merges[group] += ptr->merges[group];
115ea18e0f0SKonstantin Khlebnikov }
116ea18e0f0SKonstantin Khlebnikov
117ea18e0f0SKonstantin Khlebnikov stat->io_ticks += ptr->io_ticks;
118ea18e0f0SKonstantin Khlebnikov }
119ea18e0f0SKonstantin Khlebnikov }
120ea18e0f0SKonstantin Khlebnikov
part_in_flight(struct block_device * part)121e5d98cc3SYu Kuai unsigned int part_in_flight(struct block_device *part)
122f299b7c7SJens Axboe {
123b2f609e1SChristoph Hellwig unsigned int inflight = 0;
1241226b8ddSMikulas Patocka int cpu;
1251226b8ddSMikulas Patocka
1261226b8ddSMikulas Patocka for_each_possible_cpu(cpu) {
127e016b782SMikulas Patocka inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
1281226b8ddSMikulas Patocka part_stat_local_read_cpu(part, in_flight[1], cpu);
1291226b8ddSMikulas Patocka }
130e016b782SMikulas Patocka if ((int)inflight < 0)
131e016b782SMikulas Patocka inflight = 0;
1321226b8ddSMikulas Patocka
133e016b782SMikulas Patocka return inflight;
134f299b7c7SJens Axboe }
135f299b7c7SJens Axboe
part_in_flight_rw(struct block_device * part,unsigned int inflight[2])1368446fe92SChristoph Hellwig static void part_in_flight_rw(struct block_device *part,
1378446fe92SChristoph Hellwig unsigned int inflight[2])
138bf0ddabaSOmar Sandoval {
1391226b8ddSMikulas Patocka int cpu;
1401226b8ddSMikulas Patocka
1411226b8ddSMikulas Patocka inflight[0] = 0;
1421226b8ddSMikulas Patocka inflight[1] = 0;
1431226b8ddSMikulas Patocka for_each_possible_cpu(cpu) {
1441226b8ddSMikulas Patocka inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
1451226b8ddSMikulas Patocka inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
1461226b8ddSMikulas Patocka }
1471226b8ddSMikulas Patocka if ((int)inflight[0] < 0)
1481226b8ddSMikulas Patocka inflight[0] = 0;
1491226b8ddSMikulas Patocka if ((int)inflight[1] < 0)
1501226b8ddSMikulas Patocka inflight[1] = 0;
151bf0ddabaSOmar Sandoval }
152bf0ddabaSOmar Sandoval
1533a65dfe8SJens Axboe /*
1543a65dfe8SJens Axboe * Can be deleted altogether. Later.
1553a65dfe8SJens Axboe *
1563a65dfe8SJens Axboe */
157133d55cdSLogan Gunthorpe #define BLKDEV_MAJOR_HASH_SIZE 255
1583a65dfe8SJens Axboe static struct blk_major_name {
1593a65dfe8SJens Axboe struct blk_major_name *next;
1603a65dfe8SJens Axboe int major;
1613a65dfe8SJens Axboe char name[16];
162fbdee71bSChristoph Hellwig #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
163a160c615SChristoph Hellwig void (*probe)(dev_t devt);
164fbdee71bSChristoph Hellwig #endif
16568eef3b4SJoe Korty } *major_names[BLKDEV_MAJOR_HASH_SIZE];
166e49fbbbfSChristoph Hellwig static DEFINE_MUTEX(major_names_lock);
167dfbb3409STetsuo Handa static DEFINE_SPINLOCK(major_names_spinlock);
1683a65dfe8SJens Axboe
1693a65dfe8SJens Axboe /* index in the above - for now: assume no multimajor ranges */
major_to_index(unsigned major)170e61eb2e9SYang Zhang static inline int major_to_index(unsigned major)
1713a65dfe8SJens Axboe {
17268eef3b4SJoe Korty return major % BLKDEV_MAJOR_HASH_SIZE;
1733a65dfe8SJens Axboe }
1743a65dfe8SJens Axboe
17568eef3b4SJoe Korty #ifdef CONFIG_PROC_FS
blkdev_show(struct seq_file * seqf,off_t offset)176cf771cb5STejun Heo void blkdev_show(struct seq_file *seqf, off_t offset)
1777170be5fSNeil Horman {
17868eef3b4SJoe Korty struct blk_major_name *dp;
1797170be5fSNeil Horman
180dfbb3409STetsuo Handa spin_lock(&major_names_spinlock);
181133d55cdSLogan Gunthorpe for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
182133d55cdSLogan Gunthorpe if (dp->major == offset)
183cf771cb5STejun Heo seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
184dfbb3409STetsuo Handa spin_unlock(&major_names_spinlock);
18568eef3b4SJoe Korty }
18668eef3b4SJoe Korty #endif /* CONFIG_PROC_FS */
1873a65dfe8SJens Axboe
1889e8c0bccSMárton Németh /**
189e2b6b301SChristoph Hellwig * __register_blkdev - register a new block device
1909e8c0bccSMárton Németh *
191f33ff110SSrivatsa S. Bhat * @major: the requested major device number [1..BLKDEV_MAJOR_MAX-1]. If
192f33ff110SSrivatsa S. Bhat * @major = 0, try to allocate any unused major number.
1939e8c0bccSMárton Németh * @name: the name of the new block device as a zero terminated string
19426e06f5bSLuis Chamberlain * @probe: pre-devtmpfs / pre-udev callback used to create disks when their
19526e06f5bSLuis Chamberlain * pre-created device node is accessed. When a probe call uses
19626e06f5bSLuis Chamberlain * add_disk() and it fails the driver must cleanup resources. This
19726e06f5bSLuis Chamberlain * interface may soon be removed.
1989e8c0bccSMárton Németh *
1999e8c0bccSMárton Németh * The @name must be unique within the system.
2009e8c0bccSMárton Németh *
2010e056eb5Smchehab@s-opensource.com * The return value depends on the @major input parameter:
2020e056eb5Smchehab@s-opensource.com *
203f33ff110SSrivatsa S. Bhat * - if a major device number was requested in range [1..BLKDEV_MAJOR_MAX-1]
204f33ff110SSrivatsa S. Bhat * then the function returns zero on success, or a negative error code
2059e8c0bccSMárton Németh * - if any unused major number was requested with @major = 0 parameter
2069e8c0bccSMárton Németh * then the return value is the allocated major number in range
207f33ff110SSrivatsa S. Bhat * [1..BLKDEV_MAJOR_MAX-1] or a negative error code otherwise
208f33ff110SSrivatsa S. Bhat *
209f33ff110SSrivatsa S. Bhat * See Documentation/admin-guide/devices.txt for the list of allocated
210f33ff110SSrivatsa S. Bhat * major numbers.
211e2b6b301SChristoph Hellwig *
212e2b6b301SChristoph Hellwig * Use register_blkdev instead for any new code.
2139e8c0bccSMárton Németh */
__register_blkdev(unsigned int major,const char * name,void (* probe)(dev_t devt))214a160c615SChristoph Hellwig int __register_blkdev(unsigned int major, const char *name,
215a160c615SChristoph Hellwig void (*probe)(dev_t devt))
2163a65dfe8SJens Axboe {
2173a65dfe8SJens Axboe struct blk_major_name **n, *p;
2183a65dfe8SJens Axboe int index, ret = 0;
2193a65dfe8SJens Axboe
220e49fbbbfSChristoph Hellwig mutex_lock(&major_names_lock);
2213a65dfe8SJens Axboe
2223a65dfe8SJens Axboe /* temporary */
2233a65dfe8SJens Axboe if (major == 0) {
2243a65dfe8SJens Axboe for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
2253a65dfe8SJens Axboe if (major_names[index] == NULL)
2263a65dfe8SJens Axboe break;
2273a65dfe8SJens Axboe }
2283a65dfe8SJens Axboe
2293a65dfe8SJens Axboe if (index == 0) {
230dfc76d11SKeyur Patel printk("%s: failed to get major for %s\n",
231dfc76d11SKeyur Patel __func__, name);
2323a65dfe8SJens Axboe ret = -EBUSY;
2333a65dfe8SJens Axboe goto out;
2343a65dfe8SJens Axboe }
2353a65dfe8SJens Axboe major = index;
2363a65dfe8SJens Axboe ret = major;
2373a65dfe8SJens Axboe }
2383a65dfe8SJens Axboe
239133d55cdSLogan Gunthorpe if (major >= BLKDEV_MAJOR_MAX) {
240dfc76d11SKeyur Patel pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n",
241dfc76d11SKeyur Patel __func__, major, BLKDEV_MAJOR_MAX-1, name);
242133d55cdSLogan Gunthorpe
243133d55cdSLogan Gunthorpe ret = -EINVAL;
244133d55cdSLogan Gunthorpe goto out;
245133d55cdSLogan Gunthorpe }
246133d55cdSLogan Gunthorpe
2473a65dfe8SJens Axboe p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
2483a65dfe8SJens Axboe if (p == NULL) {
2493a65dfe8SJens Axboe ret = -ENOMEM;
2503a65dfe8SJens Axboe goto out;
2513a65dfe8SJens Axboe }
2523a65dfe8SJens Axboe
2533a65dfe8SJens Axboe p->major = major;
254fbdee71bSChristoph Hellwig #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
255a160c615SChristoph Hellwig p->probe = probe;
256fbdee71bSChristoph Hellwig #endif
25720d09975SAzeem Shaikh strscpy(p->name, name, sizeof(p->name));
2583a65dfe8SJens Axboe p->next = NULL;
2593a65dfe8SJens Axboe index = major_to_index(major);
2603a65dfe8SJens Axboe
261dfbb3409STetsuo Handa spin_lock(&major_names_spinlock);
2623a65dfe8SJens Axboe for (n = &major_names[index]; *n; n = &(*n)->next) {
2633a65dfe8SJens Axboe if ((*n)->major == major)
2643a65dfe8SJens Axboe break;
2653a65dfe8SJens Axboe }
2663a65dfe8SJens Axboe if (!*n)
2673a65dfe8SJens Axboe *n = p;
2683a65dfe8SJens Axboe else
2693a65dfe8SJens Axboe ret = -EBUSY;
270dfbb3409STetsuo Handa spin_unlock(&major_names_spinlock);
2713a65dfe8SJens Axboe
2723a65dfe8SJens Axboe if (ret < 0) {
273f33ff110SSrivatsa S. Bhat printk("register_blkdev: cannot get major %u for %s\n",
2743a65dfe8SJens Axboe major, name);
2753a65dfe8SJens Axboe kfree(p);
2763a65dfe8SJens Axboe }
2773a65dfe8SJens Axboe out:
278e49fbbbfSChristoph Hellwig mutex_unlock(&major_names_lock);
2793a65dfe8SJens Axboe return ret;
2803a65dfe8SJens Axboe }
281a160c615SChristoph Hellwig EXPORT_SYMBOL(__register_blkdev);
2823a65dfe8SJens Axboe
unregister_blkdev(unsigned int major,const char * name)283f4480240SAkinobu Mita void unregister_blkdev(unsigned int major, const char *name)
2843a65dfe8SJens Axboe {
2853a65dfe8SJens Axboe struct blk_major_name **n;
2863a65dfe8SJens Axboe struct blk_major_name *p = NULL;
2873a65dfe8SJens Axboe int index = major_to_index(major);
2883a65dfe8SJens Axboe
289e49fbbbfSChristoph Hellwig mutex_lock(&major_names_lock);
290dfbb3409STetsuo Handa spin_lock(&major_names_spinlock);
2913a65dfe8SJens Axboe for (n = &major_names[index]; *n; n = &(*n)->next)
2923a65dfe8SJens Axboe if ((*n)->major == major)
2933a65dfe8SJens Axboe break;
294294462a5SAkinobu Mita if (!*n || strcmp((*n)->name, name)) {
295294462a5SAkinobu Mita WARN_ON(1);
296294462a5SAkinobu Mita } else {
2973a65dfe8SJens Axboe p = *n;
2983a65dfe8SJens Axboe *n = p->next;
2993a65dfe8SJens Axboe }
300dfbb3409STetsuo Handa spin_unlock(&major_names_spinlock);
301e49fbbbfSChristoph Hellwig mutex_unlock(&major_names_lock);
3023a65dfe8SJens Axboe kfree(p);
3033a65dfe8SJens Axboe }
3043a65dfe8SJens Axboe
3053a65dfe8SJens Axboe EXPORT_SYMBOL(unregister_blkdev);
3063a65dfe8SJens Axboe
blk_alloc_ext_minor(void)3077c3f828bSChristoph Hellwig int blk_alloc_ext_minor(void)
308bcce3de1STejun Heo {
309bab998d6STejun Heo int idx;
310bcce3de1STejun Heo
311d1868328SChristophe JAILLET idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
3127c3f828bSChristoph Hellwig if (idx == -ENOSPC)
3137c3f828bSChristoph Hellwig return -EBUSY;
3147c3f828bSChristoph Hellwig return idx;
3157c3f828bSChristoph Hellwig }
316bcce3de1STejun Heo
blk_free_ext_minor(unsigned int minor)3177c3f828bSChristoph Hellwig void blk_free_ext_minor(unsigned int minor)
318bcce3de1STejun Heo {
319c4b2b7d1SChristoph Hellwig ida_free(&ext_devt_ida, minor);
3206fcc44d1SYufen Yu }
3216fcc44d1SYufen Yu
disk_uevent(struct gendisk * disk,enum kobject_action action)322bc359d03SChristoph Hellwig void disk_uevent(struct gendisk *disk, enum kobject_action action)
323bc359d03SChristoph Hellwig {
324bc359d03SChristoph Hellwig struct block_device *part;
3253212135aSChristoph Hellwig unsigned long idx;
326bc359d03SChristoph Hellwig
3273212135aSChristoph Hellwig rcu_read_lock();
3283212135aSChristoph Hellwig xa_for_each(&disk->part_tbl, idx, part) {
3293212135aSChristoph Hellwig if (bdev_is_partition(part) && !bdev_nr_sectors(part))
3303212135aSChristoph Hellwig continue;
331498dcc13SChristoph Hellwig if (!kobject_get_unless_zero(&part->bd_device.kobj))
3323212135aSChristoph Hellwig continue;
3333212135aSChristoph Hellwig
3343212135aSChristoph Hellwig rcu_read_unlock();
335bc359d03SChristoph Hellwig kobject_uevent(bdev_kobj(part), action);
336498dcc13SChristoph Hellwig put_device(&part->bd_device);
3373212135aSChristoph Hellwig rcu_read_lock();
3383212135aSChristoph Hellwig }
3393212135aSChristoph Hellwig rcu_read_unlock();
340bc359d03SChristoph Hellwig }
341bc359d03SChristoph Hellwig EXPORT_SYMBOL_GPL(disk_uevent);
342bc359d03SChristoph Hellwig
disk_scan_partitions(struct gendisk * disk,blk_mode_t mode)34305bdb996SChristoph Hellwig int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
3449301fe73SChristoph Hellwig {
3459301fe73SChristoph Hellwig struct block_device *bdev;
346e5cfefa9SYu Kuai int ret = 0;
3479301fe73SChristoph Hellwig
348d6b6dfffSChristoph Hellwig if (!disk_has_partscan(disk))
349b9684a71SChristoph Hellwig return -EINVAL;
350e16e506cSChristoph Hellwig if (disk->open_partitions)
351e16e506cSChristoph Hellwig return -EBUSY;
3529301fe73SChristoph Hellwig
353e5cfefa9SYu Kuai /*
354e5cfefa9SYu Kuai * If the device is opened exclusively by current thread already, it's
355e5cfefa9SYu Kuai * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
356e5cfefa9SYu Kuai * synchronize with other exclusive openers and other partition
357e5cfefa9SYu Kuai * scanners.
358e5cfefa9SYu Kuai */
35905bdb996SChristoph Hellwig if (!(mode & BLK_OPEN_EXCL)) {
3600718afd4SChristoph Hellwig ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions,
3610718afd4SChristoph Hellwig NULL);
362e5cfefa9SYu Kuai if (ret)
363e5cfefa9SYu Kuai return ret;
364e5cfefa9SYu Kuai }
365e5cfefa9SYu Kuai
3663723091eSYu Kuai set_bit(GD_NEED_PART_SCAN, &disk->state);
36756e71bdfSChristoph Hellwig bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL, NULL,
368985958b8SYu Kuai NULL);
369e16e506cSChristoph Hellwig if (IS_ERR(bdev))
370e5cfefa9SYu Kuai ret = PTR_ERR(bdev);
371e5cfefa9SYu Kuai else
3722736e8eeSChristoph Hellwig blkdev_put(bdev, NULL);
373e5cfefa9SYu Kuai
3743723091eSYu Kuai /*
3753723091eSYu Kuai * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
3763723091eSYu Kuai * and this will cause that re-assemble partitioned raid device will
3773723091eSYu Kuai * creat partition for underlying disk.
3783723091eSYu Kuai */
3793723091eSYu Kuai clear_bit(GD_NEED_PART_SCAN, &disk->state);
38005bdb996SChristoph Hellwig if (!(mode & BLK_OPEN_EXCL))
381e5cfefa9SYu Kuai bd_abort_claiming(disk->part0, disk_scan_partitions);
382e5cfefa9SYu Kuai return ret;
3839301fe73SChristoph Hellwig }
3849301fe73SChristoph Hellwig
3853a65dfe8SJens Axboe /**
386d1254a87SChristoph Hellwig * device_add_disk - add disk information to kernel list
387e63a46beSDan Williams * @parent: parent device for the disk
3883a65dfe8SJens Axboe * @disk: per-device partitioning information
389fef912bfSHannes Reinecke * @groups: Additional per-device sysfs groups
3903a65dfe8SJens Axboe *
3913a65dfe8SJens Axboe * This function registers the partitioning information in @disk
3923a65dfe8SJens Axboe * with the kernel.
3933a65dfe8SJens Axboe */
device_add_disk(struct device * parent,struct gendisk * disk,const struct attribute_group ** groups)394278167fdSLuis Chamberlain int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
395d1254a87SChristoph Hellwig const struct attribute_group **groups)
396d1254a87SChristoph Hellwig
3973a65dfe8SJens Axboe {
39852b85909SChristoph Hellwig struct device *ddev = disk_to_dev(disk);
3997c3f828bSChristoph Hellwig int ret;
400cf0ca9feSPeter Zijlstra
40169fe0f29SMing Lei /* Only makes sense for bio-based to set ->poll_bio */
40269fe0f29SMing Lei if (queue_is_mq(disk->queue) && disk->fops->poll_bio)
40369fe0f29SMing Lei return -EINVAL;
40469fe0f29SMing Lei
405737eb78eSDamien Le Moal /*
406737eb78eSDamien Le Moal * The disk queue should now be all set with enough information about
407737eb78eSDamien Le Moal * the device for the elevator code to pick an adequate default
408737eb78eSDamien Le Moal * elevator if one is needed, that is, for devices requesting queue
409737eb78eSDamien Le Moal * registration.
410737eb78eSDamien Le Moal */
411737eb78eSDamien Le Moal elevator_init_mq(disk->queue);
412737eb78eSDamien Le Moal
4139f4107b0SJens Axboe /* Mark bdev as having a submit_bio, if needed */
4149f4107b0SJens Axboe disk->part0->bd_has_submit_bio = disk->fops->submit_bio != NULL;
4159f4107b0SJens Axboe
4167c3f828bSChristoph Hellwig /*
4177c3f828bSChristoph Hellwig * If the driver provides an explicit major number it also must provide
4187c3f828bSChristoph Hellwig * the number of minors numbers supported, and those will be used to
4197c3f828bSChristoph Hellwig * setup the gendisk.
4207c3f828bSChristoph Hellwig * Otherwise just allocate the device numbers for both the whole device
4217c3f828bSChristoph Hellwig * and all partitions from the extended dev_t space.
4223e1a7ff8STejun Heo */
42302341a08SYu Kuai ret = -EINVAL;
4247c3f828bSChristoph Hellwig if (disk->major) {
42583cbce95SLuis Chamberlain if (WARN_ON(!disk->minors))
42602341a08SYu Kuai goto out_exit_elevator;
4272e3c73faSChristoph Hellwig
4282e3c73faSChristoph Hellwig if (disk->minors > DISK_MAX_PARTS) {
4292e3c73faSChristoph Hellwig pr_err("block: can't allocate more than %d partitions\n",
4302e3c73faSChristoph Hellwig DISK_MAX_PARTS);
4312e3c73faSChristoph Hellwig disk->minors = DISK_MAX_PARTS;
4322e3c73faSChristoph Hellwig }
433ed49fd2dSLi Nan if (disk->first_minor > MINORMASK ||
434ed49fd2dSLi Nan disk->minors > MINORMASK + 1 ||
435ed49fd2dSLi Nan disk->first_minor + disk->minors > MINORMASK + 1)
43602341a08SYu Kuai goto out_exit_elevator;
4377c3f828bSChristoph Hellwig } else {
43883cbce95SLuis Chamberlain if (WARN_ON(disk->minors))
43902341a08SYu Kuai goto out_exit_elevator;
4403e1a7ff8STejun Heo
4417c3f828bSChristoph Hellwig ret = blk_alloc_ext_minor();
44283cbce95SLuis Chamberlain if (ret < 0)
44302341a08SYu Kuai goto out_exit_elevator;
4447c3f828bSChristoph Hellwig disk->major = BLOCK_EXT_MAJOR;
445539711d7SChristoph Hellwig disk->first_minor = ret;
4467c3f828bSChristoph Hellwig }
4477c3f828bSChristoph Hellwig
44852b85909SChristoph Hellwig /* delay uevents, until we scanned partition table */
44952b85909SChristoph Hellwig dev_set_uevent_suppress(ddev, 1);
45052b85909SChristoph Hellwig
45152b85909SChristoph Hellwig ddev->parent = parent;
45252b85909SChristoph Hellwig ddev->groups = groups;
45352b85909SChristoph Hellwig dev_set_name(ddev, "%s", disk->disk_name);
4548235b5c1SChristoph Hellwig if (!(disk->flags & GENHD_FL_HIDDEN))
4558235b5c1SChristoph Hellwig ddev->devt = MKDEV(disk->major, disk->first_minor);
45683cbce95SLuis Chamberlain ret = device_add(ddev);
45783cbce95SLuis Chamberlain if (ret)
45899d8690aSChristoph Hellwig goto out_free_ext_minor;
45999d8690aSChristoph Hellwig
46099d8690aSChristoph Hellwig ret = disk_alloc_events(disk);
46199d8690aSChristoph Hellwig if (ret)
46299d8690aSChristoph Hellwig goto out_device_del;
46399d8690aSChristoph Hellwig
46452b85909SChristoph Hellwig ret = sysfs_create_link(block_depr, &ddev->kobj,
46552b85909SChristoph Hellwig kobject_name(&ddev->kobj));
46683cbce95SLuis Chamberlain if (ret)
46783cbce95SLuis Chamberlain goto out_device_del;
46852b85909SChristoph Hellwig
46952b85909SChristoph Hellwig /*
47052b85909SChristoph Hellwig * avoid probable deadlock caused by allocating memory with
47152b85909SChristoph Hellwig * GFP_KERNEL in runtime_resume callback of its all ancestor
47252b85909SChristoph Hellwig * devices
47352b85909SChristoph Hellwig */
47452b85909SChristoph Hellwig pm_runtime_set_memalloc_noio(ddev, true);
47552b85909SChristoph Hellwig
47652b85909SChristoph Hellwig disk->part0->bd_holder_dir =
47752b85909SChristoph Hellwig kobject_create_and_add("holders", &ddev->kobj);
478fe7d064fSLuis Chamberlain if (!disk->part0->bd_holder_dir) {
479fe7d064fSLuis Chamberlain ret = -ENOMEM;
480ff53cd52SThomas Weißschuh goto out_del_block_link;
481fe7d064fSLuis Chamberlain }
48252b85909SChristoph Hellwig disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
483fe7d064fSLuis Chamberlain if (!disk->slave_dir) {
484fe7d064fSLuis Chamberlain ret = -ENOMEM;
48583cbce95SLuis Chamberlain goto out_put_holder_dir;
486fe7d064fSLuis Chamberlain }
48752b85909SChristoph Hellwig
48883cbce95SLuis Chamberlain ret = blk_register_queue(disk);
48983cbce95SLuis Chamberlain if (ret)
49083cbce95SLuis Chamberlain goto out_put_slave_dir;
49175f4dca5SChristoph Hellwig
4929f18db57SChristoph Hellwig if (!(disk->flags & GENHD_FL_HIDDEN)) {
4938235b5c1SChristoph Hellwig ret = bdi_register(disk->bdi, "%u:%u",
4948235b5c1SChristoph Hellwig disk->major, disk->first_minor);
49583cbce95SLuis Chamberlain if (ret)
49683cbce95SLuis Chamberlain goto out_unregister_queue;
4978235b5c1SChristoph Hellwig bdi_set_owner(disk->bdi, ddev);
4989d5ee676SChristoph Hellwig ret = sysfs_create_link(&ddev->kobj,
4999d5ee676SChristoph Hellwig &disk->bdi->dev->kobj, "bdi");
50083cbce95SLuis Chamberlain if (ret)
50183cbce95SLuis Chamberlain goto out_unregister_bdi;
5028235b5c1SChristoph Hellwig
503e5cfefa9SYu Kuai /* Make sure the first partition scan will be proceed */
504d6b6dfffSChristoph Hellwig if (get_capacity(disk) && disk_has_partscan(disk))
505e5cfefa9SYu Kuai set_bit(GD_NEED_PART_SCAN, &disk->state);
506e5cfefa9SYu Kuai
5079d5ee676SChristoph Hellwig bdev_add(disk->part0, ddev->devt);
508e16e506cSChristoph Hellwig if (get_capacity(disk))
50905bdb996SChristoph Hellwig disk_scan_partitions(disk, BLK_OPEN_READ);
51052b85909SChristoph Hellwig
51152b85909SChristoph Hellwig /*
51252b85909SChristoph Hellwig * Announce the disk and partitions after all partitions are
5138235b5c1SChristoph Hellwig * created. (for hidden disks uevents remain suppressed forever)
51452b85909SChristoph Hellwig */
51552b85909SChristoph Hellwig dev_set_uevent_suppress(ddev, 0);
51652b85909SChristoph Hellwig disk_uevent(disk, KOBJ_ADD);
517a0a6314aSChristoph Hellwig } else {
518a0a6314aSChristoph Hellwig /*
519a0a6314aSChristoph Hellwig * Even if the block_device for a hidden gendisk is not
520a0a6314aSChristoph Hellwig * registered, it needs to have a valid bd_dev so that the
521a0a6314aSChristoph Hellwig * freeing of the dynamic major works.
522a0a6314aSChristoph Hellwig */
523a0a6314aSChristoph Hellwig disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor);
52452b85909SChristoph Hellwig }
52552b85909SChristoph Hellwig
52675f4dca5SChristoph Hellwig disk_update_readahead(disk);
52777ea887eSTejun Heo disk_add_events(disk);
52876792055SChristoph Hellwig set_bit(GD_ADDED, &disk->state);
52983cbce95SLuis Chamberlain return 0;
53083cbce95SLuis Chamberlain
53183cbce95SLuis Chamberlain out_unregister_bdi:
53283cbce95SLuis Chamberlain if (!(disk->flags & GENHD_FL_HIDDEN))
53383cbce95SLuis Chamberlain bdi_unregister(disk->bdi);
53483cbce95SLuis Chamberlain out_unregister_queue:
53583cbce95SLuis Chamberlain blk_unregister_queue(disk);
536fa81cbafSChen Zhongjin rq_qos_exit(disk->queue);
53783cbce95SLuis Chamberlain out_put_slave_dir:
53883cbce95SLuis Chamberlain kobject_put(disk->slave_dir);
539d90db3b1SChristoph Hellwig disk->slave_dir = NULL;
54083cbce95SLuis Chamberlain out_put_holder_dir:
54183cbce95SLuis Chamberlain kobject_put(disk->part0->bd_holder_dir);
54283cbce95SLuis Chamberlain out_del_block_link:
54383cbce95SLuis Chamberlain sysfs_remove_link(block_depr, dev_name(ddev));
54489665b3dSLi Nan pm_runtime_set_memalloc_noio(ddev, false);
54583cbce95SLuis Chamberlain out_device_del:
54683cbce95SLuis Chamberlain device_del(ddev);
54783cbce95SLuis Chamberlain out_free_ext_minor:
54883cbce95SLuis Chamberlain if (disk->major == BLOCK_EXT_MAJOR)
54983cbce95SLuis Chamberlain blk_free_ext_minor(disk->first_minor);
55002341a08SYu Kuai out_exit_elevator:
55102341a08SYu Kuai if (disk->queue->elevator)
55202341a08SYu Kuai elevator_exit(disk->queue);
553278167fdSLuis Chamberlain return ret;
5543a65dfe8SJens Axboe }
555e63a46beSDan Williams EXPORT_SYMBOL(device_add_disk);
5563a65dfe8SJens Axboe
blk_report_disk_dead(struct gendisk * disk,bool surprise)557d8530de5SChristoph Hellwig static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
558f55e017cSChristoph Hellwig {
559f55e017cSChristoph Hellwig struct block_device *bdev;
560f55e017cSChristoph Hellwig unsigned long idx;
561f55e017cSChristoph Hellwig
562f55e017cSChristoph Hellwig rcu_read_lock();
563f55e017cSChristoph Hellwig xa_for_each(&disk->part_tbl, idx, bdev) {
564f55e017cSChristoph Hellwig if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
565f55e017cSChristoph Hellwig continue;
566f55e017cSChristoph Hellwig rcu_read_unlock();
567f55e017cSChristoph Hellwig
568d8530de5SChristoph Hellwig bdev_mark_dead(bdev, surprise);
569f55e017cSChristoph Hellwig
570f55e017cSChristoph Hellwig put_device(&bdev->bd_device);
571f55e017cSChristoph Hellwig rcu_read_lock();
572f55e017cSChristoph Hellwig }
573f55e017cSChristoph Hellwig rcu_read_unlock();
574f55e017cSChristoph Hellwig }
575f55e017cSChristoph Hellwig
__blk_mark_disk_dead(struct gendisk * disk)576d8530de5SChristoph Hellwig static void __blk_mark_disk_dead(struct gendisk *disk)
5777a5428dcSChristoph Hellwig {
57866fddc25SChristoph Hellwig /*
57966fddc25SChristoph Hellwig * Fail any new I/O.
58066fddc25SChristoph Hellwig */
581a4f75764SChristoph Hellwig if (test_and_set_bit(GD_DEAD, &disk->state))
582a4f75764SChristoph Hellwig return;
583a4f75764SChristoph Hellwig
58466fddc25SChristoph Hellwig if (test_bit(GD_OWNS_QUEUE, &disk->state))
58566fddc25SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_DYING, disk->queue);
58671b26083SChristoph Hellwig
58771b26083SChristoph Hellwig /*
58871b26083SChristoph Hellwig * Stop buffered writers from dirtying pages that can't be written out.
58971b26083SChristoph Hellwig */
59066fddc25SChristoph Hellwig set_capacity(disk, 0);
59166fddc25SChristoph Hellwig
59266fddc25SChristoph Hellwig /*
59366fddc25SChristoph Hellwig * Prevent new I/O from crossing bio_queue_enter().
59466fddc25SChristoph Hellwig */
59566fddc25SChristoph Hellwig blk_queue_start_drain(disk->queue);
596d8530de5SChristoph Hellwig }
597f55e017cSChristoph Hellwig
598d8530de5SChristoph Hellwig /**
599d8530de5SChristoph Hellwig * blk_mark_disk_dead - mark a disk as dead
600d8530de5SChristoph Hellwig * @disk: disk to mark as dead
601d8530de5SChristoph Hellwig *
602d8530de5SChristoph Hellwig * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
603d8530de5SChristoph Hellwig * to this disk.
604d8530de5SChristoph Hellwig */
blk_mark_disk_dead(struct gendisk * disk)605d8530de5SChristoph Hellwig void blk_mark_disk_dead(struct gendisk *disk)
606d8530de5SChristoph Hellwig {
607d8530de5SChristoph Hellwig __blk_mark_disk_dead(disk);
608d8530de5SChristoph Hellwig blk_report_disk_dead(disk, true);
6097a5428dcSChristoph Hellwig }
6107a5428dcSChristoph Hellwig EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
6117a5428dcSChristoph Hellwig
6127a5428dcSChristoph Hellwig /**
613b5bd357cSLuis Chamberlain * del_gendisk - remove the gendisk
614b5bd357cSLuis Chamberlain * @disk: the struct gendisk to remove
615b5bd357cSLuis Chamberlain *
616b5bd357cSLuis Chamberlain * Removes the gendisk and all its associated resources. This deletes the
617b5bd357cSLuis Chamberlain * partitions associated with the gendisk, and unregisters the associated
618b5bd357cSLuis Chamberlain * request_queue.
619b5bd357cSLuis Chamberlain *
620b5bd357cSLuis Chamberlain * This is the counter to the respective __device_add_disk() call.
621b5bd357cSLuis Chamberlain *
622b5bd357cSLuis Chamberlain * The final removal of the struct gendisk happens when its refcount reaches 0
623b5bd357cSLuis Chamberlain * with put_disk(), which should be called after del_gendisk(), if
624b5bd357cSLuis Chamberlain * __device_add_disk() was used.
625e8c7d14aSLuis Chamberlain *
626e8c7d14aSLuis Chamberlain * Drivers exist which depend on the release of the gendisk to be synchronous,
627e8c7d14aSLuis Chamberlain * it should not be deferred.
628e8c7d14aSLuis Chamberlain *
629e8c7d14aSLuis Chamberlain * Context: can sleep
630b5bd357cSLuis Chamberlain */
del_gendisk(struct gendisk * disk)631d2bf1b67STejun Heo void del_gendisk(struct gendisk *disk)
6323a65dfe8SJens Axboe {
6338e141f9eSChristoph Hellwig struct request_queue *q = disk->queue;
634eec1be4cSChristoph Hellwig struct block_device *part;
635eec1be4cSChristoph Hellwig unsigned long idx;
6368e141f9eSChristoph Hellwig
637e8c7d14aSLuis Chamberlain might_sleep();
638e8c7d14aSLuis Chamberlain
6399f286992SChristoph Hellwig if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
6406b3ba976SChristoph Hellwig return;
6416b3ba976SChristoph Hellwig
64277ea887eSTejun Heo disk_del_events(disk);
64377ea887eSTejun Heo
644eec1be4cSChristoph Hellwig /*
645d8530de5SChristoph Hellwig * Prevent new openers by unlinked the bdev inode.
646eec1be4cSChristoph Hellwig */
647a8698707SChristoph Hellwig mutex_lock(&disk->open_mutex);
648d8530de5SChristoph Hellwig xa_for_each(&disk->part_tbl, idx, part)
649eec1be4cSChristoph Hellwig remove_inode_hash(part->bd_inode);
650a8698707SChristoph Hellwig mutex_unlock(&disk->open_mutex);
651c76f48ebSChristoph Hellwig
652d8530de5SChristoph Hellwig /*
653d8530de5SChristoph Hellwig * Tell the file system to write back all dirty data and shut down if
654d8530de5SChristoph Hellwig * it hasn't been notified earlier.
655d8530de5SChristoph Hellwig */
656d8530de5SChristoph Hellwig if (!test_bit(GD_DEAD, &disk->state))
657d8530de5SChristoph Hellwig blk_report_disk_dead(disk, false);
6588e141f9eSChristoph Hellwig
659eec1be4cSChristoph Hellwig /*
660eec1be4cSChristoph Hellwig * Drop all partitions now that the disk is marked dead.
661eec1be4cSChristoph Hellwig */
662eec1be4cSChristoph Hellwig mutex_lock(&disk->open_mutex);
6635a5625a8SYang Yang __blk_mark_disk_dead(disk);
664eec1be4cSChristoph Hellwig xa_for_each_start(&disk->part_tbl, idx, part, 1)
665eec1be4cSChristoph Hellwig drop_partition(part);
666eec1be4cSChristoph Hellwig mutex_unlock(&disk->open_mutex);
667eec1be4cSChristoph Hellwig
6686b3ba976SChristoph Hellwig if (!(disk->flags & GENHD_FL_HIDDEN)) {
669ed9e1982STejun Heo sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
6706b3ba976SChristoph Hellwig
671165a5e22SJan Kara /*
67290f16fddSJan Kara * Unregister bdi before releasing device numbers (as they can
67390f16fddSJan Kara * get reused and we'd get clashes in sysfs).
674165a5e22SJan Kara */
675edb0872fSChristoph Hellwig bdi_unregister(disk->bdi);
67690f16fddSJan Kara }
677d2bf1b67STejun Heo
6786b3ba976SChristoph Hellwig blk_unregister_queue(disk);
6796b3ba976SChristoph Hellwig
680cb8432d6SChristoph Hellwig kobject_put(disk->part0->bd_holder_dir);
681d2bf1b67STejun Heo kobject_put(disk->slave_dir);
682d90db3b1SChristoph Hellwig disk->slave_dir = NULL;
6833a65dfe8SJens Axboe
6848446fe92SChristoph Hellwig part_stat_set_all(disk->part0, 0);
685cb8432d6SChristoph Hellwig disk->part0->bd_stamp = 0;
686d2bf1b67STejun Heo sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
68725e823c8SMing Lei pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
688d2bf1b67STejun Heo device_del(disk_to_dev(disk));
689d308ae0dSMing Lei
6904c66a326SChristoph Hellwig blk_mq_freeze_queue_wait(q);
6914c66a326SChristoph Hellwig
692cad9266aSChristoph Hellwig blk_throtl_cancel_bios(disk);
6938f9e7b65SYu Kuai
694d308ae0dSMing Lei blk_sync_queue(q);
695d308ae0dSMing Lei blk_flush_integrity();
696219cf43cSJinlong Chen
697219cf43cSJinlong Chen if (queue_is_mq(q))
69850e34d78SChristoph Hellwig blk_mq_cancel_work_sync(q);
69950e34d78SChristoph Hellwig
70050e34d78SChristoph Hellwig blk_mq_quiesce_queue(q);
70150e34d78SChristoph Hellwig if (q->elevator) {
70250e34d78SChristoph Hellwig mutex_lock(&q->sysfs_lock);
70350e34d78SChristoph Hellwig elevator_exit(q);
70450e34d78SChristoph Hellwig mutex_unlock(&q->sysfs_lock);
70550e34d78SChristoph Hellwig }
70650e34d78SChristoph Hellwig rq_qos_exit(q);
70750e34d78SChristoph Hellwig blk_mq_unquiesce_queue(q);
70850e34d78SChristoph Hellwig
709d308ae0dSMing Lei /*
7106f8191fdSChristoph Hellwig * If the disk does not own the queue, allow using passthrough requests
7116f8191fdSChristoph Hellwig * again. Else leave the queue frozen to fail all I/O.
712d308ae0dSMing Lei */
7131364a29bSYu Kuai if (!test_bit(GD_OWNS_QUEUE, &disk->state))
714d308ae0dSMing Lei __blk_mq_unfreeze_queue(q, true);
7151364a29bSYu Kuai else if (queue_is_mq(q))
7166f8191fdSChristoph Hellwig blk_mq_exit_queue(q);
7176f8191fdSChristoph Hellwig }
718d2bf1b67STejun Heo EXPORT_SYMBOL(del_gendisk);
7193a65dfe8SJens Axboe
720f059a1d2SXie Yongji /**
721f059a1d2SXie Yongji * invalidate_disk - invalidate the disk
722f059a1d2SXie Yongji * @disk: the struct gendisk to invalidate
723f059a1d2SXie Yongji *
724f059a1d2SXie Yongji * A helper to invalidates the disk. It will clean the disk's associated
725f059a1d2SXie Yongji * buffer/page caches and reset its internal states so that the disk
726f059a1d2SXie Yongji * can be reused by the drivers.
727f059a1d2SXie Yongji *
728f059a1d2SXie Yongji * Context: can sleep
729f059a1d2SXie Yongji */
invalidate_disk(struct gendisk * disk)730f059a1d2SXie Yongji void invalidate_disk(struct gendisk *disk)
731f059a1d2SXie Yongji {
732f059a1d2SXie Yongji struct block_device *bdev = disk->part0;
733f059a1d2SXie Yongji
734f059a1d2SXie Yongji invalidate_bdev(bdev);
735f059a1d2SXie Yongji bdev->bd_inode->i_mapping->wb_err = 0;
736f059a1d2SXie Yongji set_capacity(disk, 0);
737f059a1d2SXie Yongji }
738f059a1d2SXie Yongji EXPORT_SYMBOL(invalidate_disk);
739f059a1d2SXie Yongji
74099e6608cSVishal Verma /* sysfs access to bad-blocks list. */
disk_badblocks_show(struct device * dev,struct device_attribute * attr,char * page)74199e6608cSVishal Verma static ssize_t disk_badblocks_show(struct device *dev,
74299e6608cSVishal Verma struct device_attribute *attr,
74399e6608cSVishal Verma char *page)
74499e6608cSVishal Verma {
74599e6608cSVishal Verma struct gendisk *disk = dev_to_disk(dev);
74699e6608cSVishal Verma
74799e6608cSVishal Verma if (!disk->bb)
74899e6608cSVishal Verma return sprintf(page, "\n");
74999e6608cSVishal Verma
75099e6608cSVishal Verma return badblocks_show(disk->bb, page, 0);
75199e6608cSVishal Verma }
75299e6608cSVishal Verma
disk_badblocks_store(struct device * dev,struct device_attribute * attr,const char * page,size_t len)75399e6608cSVishal Verma static ssize_t disk_badblocks_store(struct device *dev,
75499e6608cSVishal Verma struct device_attribute *attr,
75599e6608cSVishal Verma const char *page, size_t len)
75699e6608cSVishal Verma {
75799e6608cSVishal Verma struct gendisk *disk = dev_to_disk(dev);
75899e6608cSVishal Verma
75999e6608cSVishal Verma if (!disk->bb)
76099e6608cSVishal Verma return -ENXIO;
76199e6608cSVishal Verma
76299e6608cSVishal Verma return badblocks_store(disk->bb, page, len, 0);
76399e6608cSVishal Verma }
76499e6608cSVishal Verma
765fbdee71bSChristoph Hellwig #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
blk_probe_dev(dev_t devt)766*b1e537faSYang Erkun static bool blk_probe_dev(dev_t devt)
767bd8eff3bSChristoph Hellwig {
768a160c615SChristoph Hellwig unsigned int major = MAJOR(devt);
769a160c615SChristoph Hellwig struct blk_major_name **n;
770a160c615SChristoph Hellwig
771a160c615SChristoph Hellwig mutex_lock(&major_names_lock);
772a160c615SChristoph Hellwig for (n = &major_names[major_to_index(major)]; *n; n = &(*n)->next) {
773a160c615SChristoph Hellwig if ((*n)->major == major && (*n)->probe) {
774a160c615SChristoph Hellwig (*n)->probe(devt);
775a160c615SChristoph Hellwig mutex_unlock(&major_names_lock);
776*b1e537faSYang Erkun return true;
777a160c615SChristoph Hellwig }
778a160c615SChristoph Hellwig }
779a160c615SChristoph Hellwig mutex_unlock(&major_names_lock);
780*b1e537faSYang Erkun return false;
781*b1e537faSYang Erkun }
782a160c615SChristoph Hellwig
blk_request_module(dev_t devt)783*b1e537faSYang Erkun void blk_request_module(dev_t devt)
784*b1e537faSYang Erkun {
785*b1e537faSYang Erkun int error;
786*b1e537faSYang Erkun
787*b1e537faSYang Erkun if (blk_probe_dev(devt))
788*b1e537faSYang Erkun return;
789*b1e537faSYang Erkun
790*b1e537faSYang Erkun error = request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt));
791bd8eff3bSChristoph Hellwig /* Make old-style 2.4 aliases work */
792*b1e537faSYang Erkun if (error > 0)
793*b1e537faSYang Erkun error = request_module("block-major-%d", MAJOR(devt));
794*b1e537faSYang Erkun if (!error)
795*b1e537faSYang Erkun blk_probe_dev(devt);
796bd8eff3bSChristoph Hellwig }
797fbdee71bSChristoph Hellwig #endif /* CONFIG_BLOCK_LEGACY_AUTOLOAD */
798bd8eff3bSChristoph Hellwig
7993a65dfe8SJens Axboe #ifdef CONFIG_PROC_FS
8003a65dfe8SJens Axboe /* iterator */
disk_seqf_start(struct seq_file * seqf,loff_t * pos)801def4e38dSTejun Heo static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
80268c4d4a7SGreg Kroah-Hartman {
803def4e38dSTejun Heo loff_t skip = *pos;
804def4e38dSTejun Heo struct class_dev_iter *iter;
805def4e38dSTejun Heo struct device *dev;
80668c4d4a7SGreg Kroah-Hartman
807aeb3d3a8SHarvey Harrison iter = kmalloc(sizeof(*iter), GFP_KERNEL);
808def4e38dSTejun Heo if (!iter)
809def4e38dSTejun Heo return ERR_PTR(-ENOMEM);
810def4e38dSTejun Heo
811def4e38dSTejun Heo seqf->private = iter;
812def4e38dSTejun Heo class_dev_iter_init(iter, &block_class, NULL, &disk_type);
813def4e38dSTejun Heo do {
814def4e38dSTejun Heo dev = class_dev_iter_next(iter);
815def4e38dSTejun Heo if (!dev)
816def4e38dSTejun Heo return NULL;
817def4e38dSTejun Heo } while (skip--);
818def4e38dSTejun Heo
819def4e38dSTejun Heo return dev_to_disk(dev);
82068c4d4a7SGreg Kroah-Hartman }
82168c4d4a7SGreg Kroah-Hartman
disk_seqf_next(struct seq_file * seqf,void * v,loff_t * pos)822def4e38dSTejun Heo static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
8233a65dfe8SJens Axboe {
824edfaa7c3SKay Sievers struct device *dev;
82566c64afeSGreg Kroah-Hartman
826def4e38dSTejun Heo (*pos)++;
827def4e38dSTejun Heo dev = class_dev_iter_next(seqf->private);
8282ac3cee5STejun Heo if (dev)
829edfaa7c3SKay Sievers return dev_to_disk(dev);
8302ac3cee5STejun Heo
8313a65dfe8SJens Axboe return NULL;
8323a65dfe8SJens Axboe }
8333a65dfe8SJens Axboe
disk_seqf_stop(struct seq_file * seqf,void * v)834def4e38dSTejun Heo static void disk_seqf_stop(struct seq_file *seqf, void *v)
83527f30251SGreg Kroah-Hartman {
836def4e38dSTejun Heo struct class_dev_iter *iter = seqf->private;
837def4e38dSTejun Heo
838def4e38dSTejun Heo /* stop is called even after start failed :-( */
839def4e38dSTejun Heo if (iter) {
840def4e38dSTejun Heo class_dev_iter_exit(iter);
841def4e38dSTejun Heo kfree(iter);
84277da1605SVegard Nossum seqf->private = NULL;
843def4e38dSTejun Heo }
84427f30251SGreg Kroah-Hartman }
84527f30251SGreg Kroah-Hartman
show_partition_start(struct seq_file * seqf,loff_t * pos)846def4e38dSTejun Heo static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
8473a65dfe8SJens Axboe {
84806768067SJianpeng Ma void *p;
8493a65dfe8SJens Axboe
850def4e38dSTejun Heo p = disk_seqf_start(seqf, pos);
851b9f985b6SYang Zhang if (!IS_ERR_OR_NULL(p) && !*pos)
852def4e38dSTejun Heo seq_puts(seqf, "major minor #blocks name\n\n");
853def4e38dSTejun Heo return p;
8543a65dfe8SJens Axboe }
8553a65dfe8SJens Axboe
show_partition(struct seq_file * seqf,void * v)856cf771cb5STejun Heo static int show_partition(struct seq_file *seqf, void *v)
8573a65dfe8SJens Axboe {
8583a65dfe8SJens Axboe struct gendisk *sgp = v;
859ad1eaa53SChristoph Hellwig struct block_device *part;
860ecc75a98SChristoph Hellwig unsigned long idx;
8613a65dfe8SJens Axboe
8623b5149acSChristoph Hellwig if (!get_capacity(sgp) || (sgp->flags & GENHD_FL_HIDDEN))
8633a65dfe8SJens Axboe return 0;
8643a65dfe8SJens Axboe
865ecc75a98SChristoph Hellwig rcu_read_lock();
866ecc75a98SChristoph Hellwig xa_for_each(&sgp->part_tbl, idx, part) {
867ecc75a98SChristoph Hellwig if (!bdev_nr_sectors(part))
868ecc75a98SChristoph Hellwig continue;
869a291bb43SChristoph Hellwig seq_printf(seqf, "%4d %7d %10llu %pg\n",
870ad1eaa53SChristoph Hellwig MAJOR(part->bd_dev), MINOR(part->bd_dev),
871a291bb43SChristoph Hellwig bdev_nr_sectors(part) >> 1, part);
872ecc75a98SChristoph Hellwig }
873ecc75a98SChristoph Hellwig rcu_read_unlock();
8743a65dfe8SJens Axboe return 0;
8753a65dfe8SJens Axboe }
8763a65dfe8SJens Axboe
877f500975aSAlexey Dobriyan static const struct seq_operations partitions_op = {
878def4e38dSTejun Heo .start = show_partition_start,
879def4e38dSTejun Heo .next = disk_seqf_next,
880def4e38dSTejun Heo .stop = disk_seqf_stop,
8813a65dfe8SJens Axboe .show = show_partition
8823a65dfe8SJens Axboe };
8833a65dfe8SJens Axboe #endif
8843a65dfe8SJens Axboe
genhd_device_init(void)8853a65dfe8SJens Axboe static int __init genhd_device_init(void)
8863a65dfe8SJens Axboe {
887e105b8bfSDan Williams int error;
888e105b8bfSDan Williams
889e105b8bfSDan Williams error = class_register(&block_class);
890ee27a558SRoland McGrath if (unlikely(error))
891ee27a558SRoland McGrath return error;
8923a65dfe8SJens Axboe blk_dev_init();
893edfaa7c3SKay Sievers
894561ec68eSZhang, Yanmin register_blkdev(BLOCK_EXT_MAJOR, "blkext");
895561ec68eSZhang, Yanmin
896edfaa7c3SKay Sievers /* create top-level block dir */
897edfaa7c3SKay Sievers block_depr = kobject_create_and_add("block", NULL);
898830d3cfbSGreg Kroah-Hartman return 0;
8993a65dfe8SJens Axboe }
9003a65dfe8SJens Axboe
9013a65dfe8SJens Axboe subsys_initcall(genhd_device_init);
9023a65dfe8SJens Axboe
disk_range_show(struct device * dev,struct device_attribute * attr,char * buf)903edfaa7c3SKay Sievers static ssize_t disk_range_show(struct device *dev,
904edfaa7c3SKay Sievers struct device_attribute *attr, char *buf)
9053a65dfe8SJens Axboe {
906edfaa7c3SKay Sievers struct gendisk *disk = dev_to_disk(dev);
9073a65dfe8SJens Axboe
908edfaa7c3SKay Sievers return sprintf(buf, "%d\n", disk->minors);
9093a65dfe8SJens Axboe }
9103a65dfe8SJens Axboe
disk_ext_range_show(struct device * dev,struct device_attribute * attr,char * buf)9111f014290STejun Heo static ssize_t disk_ext_range_show(struct device *dev,
9121f014290STejun Heo struct device_attribute *attr, char *buf)
9131f014290STejun Heo {
9141f014290STejun Heo struct gendisk *disk = dev_to_disk(dev);
9151f014290STejun Heo
9161ebe2e5fSChristoph Hellwig return sprintf(buf, "%d\n",
9171ebe2e5fSChristoph Hellwig (disk->flags & GENHD_FL_NO_PART) ? 1 : DISK_MAX_PARTS);
9181f014290STejun Heo }
9191f014290STejun Heo
disk_removable_show(struct device * dev,struct device_attribute * attr,char * buf)920edfaa7c3SKay Sievers static ssize_t disk_removable_show(struct device *dev,
921edfaa7c3SKay Sievers struct device_attribute *attr, char *buf)
9223a65dfe8SJens Axboe {
923edfaa7c3SKay Sievers struct gendisk *disk = dev_to_disk(dev);
9243a65dfe8SJens Axboe
925edfaa7c3SKay Sievers return sprintf(buf, "%d\n",
9263a65dfe8SJens Axboe (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
927edfaa7c3SKay Sievers }
9283a65dfe8SJens Axboe
disk_hidden_show(struct device * dev,struct device_attribute * attr,char * buf)9298ddcd653SChristoph Hellwig static ssize_t disk_hidden_show(struct device *dev,
9308ddcd653SChristoph Hellwig struct device_attribute *attr, char *buf)
9318ddcd653SChristoph Hellwig {
9328ddcd653SChristoph Hellwig struct gendisk *disk = dev_to_disk(dev);
9338ddcd653SChristoph Hellwig
9348ddcd653SChristoph Hellwig return sprintf(buf, "%d\n",
9358ddcd653SChristoph Hellwig (disk->flags & GENHD_FL_HIDDEN ? 1 : 0));
9368ddcd653SChristoph Hellwig }
9378ddcd653SChristoph Hellwig
disk_ro_show(struct device * dev,struct device_attribute * attr,char * buf)9381c9ce527SKay Sievers static ssize_t disk_ro_show(struct device *dev,
9391c9ce527SKay Sievers struct device_attribute *attr, char *buf)
9401c9ce527SKay Sievers {
9411c9ce527SKay Sievers struct gendisk *disk = dev_to_disk(dev);
9421c9ce527SKay Sievers
943b7db9956STejun Heo return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
9441c9ce527SKay Sievers }
9451c9ce527SKay Sievers
part_size_show(struct device * dev,struct device_attribute * attr,char * buf)9463ad5cee5SChristoph Hellwig ssize_t part_size_show(struct device *dev,
9473ad5cee5SChristoph Hellwig struct device_attribute *attr, char *buf)
9483ad5cee5SChristoph Hellwig {
9490d02129eSChristoph Hellwig return sprintf(buf, "%llu\n", bdev_nr_sectors(dev_to_bdev(dev)));
9503ad5cee5SChristoph Hellwig }
9513ad5cee5SChristoph Hellwig
part_stat_show(struct device * dev,struct device_attribute * attr,char * buf)9523ad5cee5SChristoph Hellwig ssize_t part_stat_show(struct device *dev,
9533ad5cee5SChristoph Hellwig struct device_attribute *attr, char *buf)
9543ad5cee5SChristoph Hellwig {
9550d02129eSChristoph Hellwig struct block_device *bdev = dev_to_bdev(dev);
956ed6cddefSPavel Begunkov struct request_queue *q = bdev_get_queue(bdev);
957ea18e0f0SKonstantin Khlebnikov struct disk_stats stat;
9583ad5cee5SChristoph Hellwig unsigned int inflight;
9593ad5cee5SChristoph Hellwig
960b2f609e1SChristoph Hellwig if (queue_is_mq(q))
9610d02129eSChristoph Hellwig inflight = blk_mq_in_flight(q, bdev);
962b2f609e1SChristoph Hellwig else
9630d02129eSChristoph Hellwig inflight = part_in_flight(bdev);
964ea18e0f0SKonstantin Khlebnikov
96586d73312SZhang Wensheng if (inflight) {
96686d73312SZhang Wensheng part_stat_lock();
96786d73312SZhang Wensheng update_io_ticks(bdev, jiffies, true);
96886d73312SZhang Wensheng part_stat_unlock();
96986d73312SZhang Wensheng }
97086d73312SZhang Wensheng part_stat_read_all(bdev, &stat);
9713ad5cee5SChristoph Hellwig return sprintf(buf,
9723ad5cee5SChristoph Hellwig "%8lu %8lu %8llu %8u "
9733ad5cee5SChristoph Hellwig "%8lu %8lu %8llu %8u "
9743ad5cee5SChristoph Hellwig "%8u %8u %8u "
9753ad5cee5SChristoph Hellwig "%8lu %8lu %8llu %8u "
9763ad5cee5SChristoph Hellwig "%8lu %8u"
9773ad5cee5SChristoph Hellwig "\n",
978ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_READ],
979ea18e0f0SKonstantin Khlebnikov stat.merges[STAT_READ],
980ea18e0f0SKonstantin Khlebnikov (unsigned long long)stat.sectors[STAT_READ],
981ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_READ], NSEC_PER_MSEC),
982ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_WRITE],
983ea18e0f0SKonstantin Khlebnikov stat.merges[STAT_WRITE],
984ea18e0f0SKonstantin Khlebnikov (unsigned long long)stat.sectors[STAT_WRITE],
985ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_WRITE], NSEC_PER_MSEC),
9863ad5cee5SChristoph Hellwig inflight,
987ea18e0f0SKonstantin Khlebnikov jiffies_to_msecs(stat.io_ticks),
9888cd5b8fcSKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_READ] +
9898cd5b8fcSKonstantin Khlebnikov stat.nsecs[STAT_WRITE] +
9908cd5b8fcSKonstantin Khlebnikov stat.nsecs[STAT_DISCARD] +
9918cd5b8fcSKonstantin Khlebnikov stat.nsecs[STAT_FLUSH],
9928cd5b8fcSKonstantin Khlebnikov NSEC_PER_MSEC),
993ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_DISCARD],
994ea18e0f0SKonstantin Khlebnikov stat.merges[STAT_DISCARD],
995ea18e0f0SKonstantin Khlebnikov (unsigned long long)stat.sectors[STAT_DISCARD],
996ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC),
997ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_FLUSH],
998ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
9993ad5cee5SChristoph Hellwig }
10003ad5cee5SChristoph Hellwig
part_inflight_show(struct device * dev,struct device_attribute * attr,char * buf)10013ad5cee5SChristoph Hellwig ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
10023ad5cee5SChristoph Hellwig char *buf)
10033ad5cee5SChristoph Hellwig {
10040d02129eSChristoph Hellwig struct block_device *bdev = dev_to_bdev(dev);
1005ed6cddefSPavel Begunkov struct request_queue *q = bdev_get_queue(bdev);
10063ad5cee5SChristoph Hellwig unsigned int inflight[2];
10073ad5cee5SChristoph Hellwig
1008b2f609e1SChristoph Hellwig if (queue_is_mq(q))
10090d02129eSChristoph Hellwig blk_mq_in_flight_rw(q, bdev, inflight);
1010b2f609e1SChristoph Hellwig else
10110d02129eSChristoph Hellwig part_in_flight_rw(bdev, inflight);
1012b2f609e1SChristoph Hellwig
10133ad5cee5SChristoph Hellwig return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
10143ad5cee5SChristoph Hellwig }
10153ad5cee5SChristoph Hellwig
disk_capability_show(struct device * dev,struct device_attribute * attr,char * buf)1016edfaa7c3SKay Sievers static ssize_t disk_capability_show(struct device *dev,
1017edfaa7c3SKay Sievers struct device_attribute *attr, char *buf)
101886ce18d7SKristen Carlson Accardi {
1019e81cd5a9SChristoph Hellwig dev_warn_once(dev, "the capability attribute has been deprecated.\n");
1020e81cd5a9SChristoph Hellwig return sprintf(buf, "0\n");
102186ce18d7SKristen Carlson Accardi }
1022edfaa7c3SKay Sievers
disk_alignment_offset_show(struct device * dev,struct device_attribute * attr,char * buf)1023c72758f3SMartin K. Petersen static ssize_t disk_alignment_offset_show(struct device *dev,
1024c72758f3SMartin K. Petersen struct device_attribute *attr,
1025c72758f3SMartin K. Petersen char *buf)
1026c72758f3SMartin K. Petersen {
1027c72758f3SMartin K. Petersen struct gendisk *disk = dev_to_disk(dev);
1028c72758f3SMartin K. Petersen
1029640f2a23SChristoph Hellwig return sprintf(buf, "%d\n", bdev_alignment_offset(disk->part0));
1030c72758f3SMartin K. Petersen }
1031c72758f3SMartin K. Petersen
disk_discard_alignment_show(struct device * dev,struct device_attribute * attr,char * buf)103286b37281SMartin K. Petersen static ssize_t disk_discard_alignment_show(struct device *dev,
103386b37281SMartin K. Petersen struct device_attribute *attr,
103486b37281SMartin K. Petersen char *buf)
103586b37281SMartin K. Petersen {
103686b37281SMartin K. Petersen struct gendisk *disk = dev_to_disk(dev);
103786b37281SMartin K. Petersen
10384e1462ffSChristoph Hellwig return sprintf(buf, "%d\n", bdev_alignment_offset(disk->part0));
103986b37281SMartin K. Petersen }
104086b37281SMartin K. Petersen
diskseq_show(struct device * dev,struct device_attribute * attr,char * buf)104113927b31SMatteo Croce static ssize_t diskseq_show(struct device *dev,
104213927b31SMatteo Croce struct device_attribute *attr, char *buf)
104313927b31SMatteo Croce {
104413927b31SMatteo Croce struct gendisk *disk = dev_to_disk(dev);
104513927b31SMatteo Croce
104613927b31SMatteo Croce return sprintf(buf, "%llu\n", disk->diskseq);
104713927b31SMatteo Croce }
104813927b31SMatteo Croce
partscan_show(struct device * dev,struct device_attribute * attr,char * buf)10493ffef551SChristoph Hellwig static ssize_t partscan_show(struct device *dev,
10503ffef551SChristoph Hellwig struct device_attribute *attr, char *buf)
10513ffef551SChristoph Hellwig {
10523ffef551SChristoph Hellwig return sprintf(buf, "%u\n", disk_has_partscan(dev_to_disk(dev)));
10533ffef551SChristoph Hellwig }
10543ffef551SChristoph Hellwig
10555657a819SJoe Perches static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
10565657a819SJoe Perches static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
10575657a819SJoe Perches static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
10585657a819SJoe Perches static DEVICE_ATTR(hidden, 0444, disk_hidden_show, NULL);
10595657a819SJoe Perches static DEVICE_ATTR(ro, 0444, disk_ro_show, NULL);
10605657a819SJoe Perches static DEVICE_ATTR(size, 0444, part_size_show, NULL);
10615657a819SJoe Perches static DEVICE_ATTR(alignment_offset, 0444, disk_alignment_offset_show, NULL);
10625657a819SJoe Perches static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL);
10635657a819SJoe Perches static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
10645657a819SJoe Perches static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
10655657a819SJoe Perches static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
10665657a819SJoe Perches static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
106713927b31SMatteo Croce static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
10683ffef551SChristoph Hellwig static DEVICE_ATTR(partscan, 0444, partscan_show, NULL);
10693ad5cee5SChristoph Hellwig
1070c17bb495SAkinobu Mita #ifdef CONFIG_FAIL_MAKE_REQUEST
part_fail_show(struct device * dev,struct device_attribute * attr,char * buf)10713ad5cee5SChristoph Hellwig ssize_t part_fail_show(struct device *dev,
10723ad5cee5SChristoph Hellwig struct device_attribute *attr, char *buf)
10733ad5cee5SChristoph Hellwig {
10740d02129eSChristoph Hellwig return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
10753ad5cee5SChristoph Hellwig }
10763ad5cee5SChristoph Hellwig
part_fail_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)10773ad5cee5SChristoph Hellwig ssize_t part_fail_store(struct device *dev,
10783ad5cee5SChristoph Hellwig struct device_attribute *attr,
10793ad5cee5SChristoph Hellwig const char *buf, size_t count)
10803ad5cee5SChristoph Hellwig {
10813ad5cee5SChristoph Hellwig int i;
10823ad5cee5SChristoph Hellwig
10833ad5cee5SChristoph Hellwig if (count > 0 && sscanf(buf, "%d", &i) > 0)
10840d02129eSChristoph Hellwig dev_to_bdev(dev)->bd_make_it_fail = i;
10853ad5cee5SChristoph Hellwig
10863ad5cee5SChristoph Hellwig return count;
10873ad5cee5SChristoph Hellwig }
10883ad5cee5SChristoph Hellwig
1089edfaa7c3SKay Sievers static struct device_attribute dev_attr_fail =
10905657a819SJoe Perches __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
10913ad5cee5SChristoph Hellwig #endif /* CONFIG_FAIL_MAKE_REQUEST */
10923ad5cee5SChristoph Hellwig
1093581d4e28SJens Axboe #ifdef CONFIG_FAIL_IO_TIMEOUT
1094581d4e28SJens Axboe static struct device_attribute dev_attr_fail_timeout =
10955657a819SJoe Perches __ATTR(io-timeout-fail, 0644, part_timeout_show, part_timeout_store);
1096581d4e28SJens Axboe #endif
1097edfaa7c3SKay Sievers
1098edfaa7c3SKay Sievers static struct attribute *disk_attrs[] = {
1099edfaa7c3SKay Sievers &dev_attr_range.attr,
11001f014290STejun Heo &dev_attr_ext_range.attr,
1101edfaa7c3SKay Sievers &dev_attr_removable.attr,
11028ddcd653SChristoph Hellwig &dev_attr_hidden.attr,
11031c9ce527SKay Sievers &dev_attr_ro.attr,
1104edfaa7c3SKay Sievers &dev_attr_size.attr,
1105c72758f3SMartin K. Petersen &dev_attr_alignment_offset.attr,
110686b37281SMartin K. Petersen &dev_attr_discard_alignment.attr,
1107edfaa7c3SKay Sievers &dev_attr_capability.attr,
1108edfaa7c3SKay Sievers &dev_attr_stat.attr,
1109316d315bSNikanth Karthikesan &dev_attr_inflight.attr,
111099e6608cSVishal Verma &dev_attr_badblocks.attr,
11112bc8cda5SChristoph Hellwig &dev_attr_events.attr,
11122bc8cda5SChristoph Hellwig &dev_attr_events_async.attr,
11132bc8cda5SChristoph Hellwig &dev_attr_events_poll_msecs.attr,
111413927b31SMatteo Croce &dev_attr_diskseq.attr,
11153ffef551SChristoph Hellwig &dev_attr_partscan.attr,
1116edfaa7c3SKay Sievers #ifdef CONFIG_FAIL_MAKE_REQUEST
1117edfaa7c3SKay Sievers &dev_attr_fail.attr,
1118edfaa7c3SKay Sievers #endif
1119581d4e28SJens Axboe #ifdef CONFIG_FAIL_IO_TIMEOUT
1120581d4e28SJens Axboe &dev_attr_fail_timeout.attr,
1121581d4e28SJens Axboe #endif
1122edfaa7c3SKay Sievers NULL
11233a65dfe8SJens Axboe };
11243a65dfe8SJens Axboe
disk_visible(struct kobject * kobj,struct attribute * a,int n)11259438b3e0SDan Williams static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n)
11269438b3e0SDan Williams {
11279438b3e0SDan Williams struct device *dev = container_of(kobj, typeof(*dev), kobj);
11289438b3e0SDan Williams struct gendisk *disk = dev_to_disk(dev);
11299438b3e0SDan Williams
11309438b3e0SDan Williams if (a == &dev_attr_badblocks.attr && !disk->bb)
11319438b3e0SDan Williams return 0;
11329438b3e0SDan Williams return a->mode;
11339438b3e0SDan Williams }
11349438b3e0SDan Williams
1135edfaa7c3SKay Sievers static struct attribute_group disk_attr_group = {
1136edfaa7c3SKay Sievers .attrs = disk_attrs,
11379438b3e0SDan Williams .is_visible = disk_visible,
1138edfaa7c3SKay Sievers };
1139edfaa7c3SKay Sievers
1140a4dbd674SDavid Brownell static const struct attribute_group *disk_attr_groups[] = {
1141edfaa7c3SKay Sievers &disk_attr_group,
1142cc5c516dSChristoph Hellwig #ifdef CONFIG_BLK_DEV_IO_TRACE
1143cc5c516dSChristoph Hellwig &blk_trace_attr_group,
1144cc5c516dSChristoph Hellwig #endif
1145ff53cd52SThomas Weißschuh #ifdef CONFIG_BLK_DEV_INTEGRITY
1146ff53cd52SThomas Weißschuh &blk_integrity_attr_group,
1147ff53cd52SThomas Weißschuh #endif
1148edfaa7c3SKay Sievers NULL
1149edfaa7c3SKay Sievers };
1150edfaa7c3SKay Sievers
1151540eed56STejun Heo /**
1152b5bd357cSLuis Chamberlain * disk_release - releases all allocated resources of the gendisk
1153b5bd357cSLuis Chamberlain * @dev: the device representing this disk
1154b5bd357cSLuis Chamberlain *
1155b5bd357cSLuis Chamberlain * This function releases all allocated resources of the gendisk.
1156b5bd357cSLuis Chamberlain *
1157b5bd357cSLuis Chamberlain * Drivers which used __device_add_disk() have a gendisk with a request_queue
1158b5bd357cSLuis Chamberlain * assigned. Since the request_queue sits on top of the gendisk for these
1159b5bd357cSLuis Chamberlain * drivers we also call blk_put_queue() for them, and we expect the
1160b5bd357cSLuis Chamberlain * request_queue refcount to reach 0 at this point, and so the request_queue
1161b5bd357cSLuis Chamberlain * will also be freed prior to the disk.
1162e8c7d14aSLuis Chamberlain *
1163e8c7d14aSLuis Chamberlain * Context: can sleep
1164b5bd357cSLuis Chamberlain */
disk_release(struct device * dev)1165edfaa7c3SKay Sievers static void disk_release(struct device *dev)
11663a65dfe8SJens Axboe {
1167edfaa7c3SKay Sievers struct gendisk *disk = dev_to_disk(dev);
1168edfaa7c3SKay Sievers
1169e8c7d14aSLuis Chamberlain might_sleep();
1170a2041761SChristoph Hellwig WARN_ON_ONCE(disk_live(disk));
1171e8c7d14aSLuis Chamberlain
1172dd7de370SYu Kuai blk_trace_remove(disk->queue);
1173dd7de370SYu Kuai
1174c5db2cfcSChristoph Hellwig /*
1175c5db2cfcSChristoph Hellwig * To undo the all initialization from blk_mq_init_allocated_queue in
1176c5db2cfcSChristoph Hellwig * case of a probe failure where add_disk is never called we have to
1177c5db2cfcSChristoph Hellwig * call blk_mq_exit_queue here. We can't do this for the more common
1178c5db2cfcSChristoph Hellwig * teardown case (yet) as the tagset can be gone by the time the disk
1179c5db2cfcSChristoph Hellwig * is released once it was added.
1180c5db2cfcSChristoph Hellwig */
1181c5db2cfcSChristoph Hellwig if (queue_is_mq(disk->queue) &&
1182c5db2cfcSChristoph Hellwig test_bit(GD_OWNS_QUEUE, &disk->state) &&
1183c5db2cfcSChristoph Hellwig !test_bit(GD_ADDED, &disk->state))
1184c5db2cfcSChristoph Hellwig blk_mq_exit_queue(disk->queue);
1185c5db2cfcSChristoph Hellwig
1186b6553befSChristoph Hellwig blkcg_exit_disk(disk);
1187b6553befSChristoph Hellwig
118846754bd0SChristoph Hellwig bioset_exit(&disk->bio_split);
11892a19b28fSMing Lei
119077ea887eSTejun Heo disk_release_events(disk);
11913a65dfe8SJens Axboe kfree(disk->random);
11925d400665SChristoph Hellwig disk_free_zone_bitmaps(disk);
1193a33df75cSChristoph Hellwig xa_destroy(&disk->part_tbl);
11941059699fSMing Lei
1195d152c682SChristoph Hellwig disk->queue->disk = NULL;
1196523e1d39STejun Heo blk_put_queue(disk->queue);
119776792055SChristoph Hellwig
119876792055SChristoph Hellwig if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk)
119976792055SChristoph Hellwig disk->fops->free_disk(disk);
120076792055SChristoph Hellwig
12012f4731dcSChristoph Hellwig iput(disk->part0->bd_inode); /* frees the disk */
12023a65dfe8SJens Axboe }
120387eb7107SMatteo Croce
block_uevent(const struct device * dev,struct kobj_uevent_env * env)120423680f0bSGreg Kroah-Hartman static int block_uevent(const struct device *dev, struct kobj_uevent_env *env)
120587eb7107SMatteo Croce {
120623680f0bSGreg Kroah-Hartman const struct gendisk *disk = dev_to_disk(dev);
120787eb7107SMatteo Croce
120887eb7107SMatteo Croce return add_uevent_var(env, "DISKSEQ=%llu", disk->diskseq);
120987eb7107SMatteo Croce }
121087eb7107SMatteo Croce
1211edfaa7c3SKay Sievers struct class block_class = {
1212edfaa7c3SKay Sievers .name = "block",
121387eb7107SMatteo Croce .dev_uevent = block_uevent,
12143a65dfe8SJens Axboe };
12153a65dfe8SJens Axboe
block_devnode(const struct device * dev,umode_t * mode,kuid_t * uid,kgid_t * gid)1216a9b12f8bSGreg Kroah-Hartman static char *block_devnode(const struct device *dev, umode_t *mode,
1217050a4f34SJens Axboe kuid_t *uid, kgid_t *gid)
1218050a4f34SJens Axboe {
1219050a4f34SJens Axboe struct gendisk *disk = dev_to_disk(dev);
1220050a4f34SJens Axboe
1221050a4f34SJens Axboe if (disk->fops->devnode)
1222050a4f34SJens Axboe return disk->fops->devnode(disk, mode);
1223050a4f34SJens Axboe return NULL;
1224050a4f34SJens Axboe }
1225050a4f34SJens Axboe
1226ef45fe47SBoris Burkov const struct device_type disk_type = {
1227edfaa7c3SKay Sievers .name = "disk",
1228edfaa7c3SKay Sievers .groups = disk_attr_groups,
1229edfaa7c3SKay Sievers .release = disk_release,
1230050a4f34SJens Axboe .devnode = block_devnode,
12313a65dfe8SJens Axboe };
12323a65dfe8SJens Axboe
1233a6e2ba88SRandy Dunlap #ifdef CONFIG_PROC_FS
1234cf771cb5STejun Heo /*
1235cf771cb5STejun Heo * aggregate disk stat collector. Uses the same stats that the sysfs
1236cf771cb5STejun Heo * entries do, above, but makes them available through one seq_file.
1237cf771cb5STejun Heo *
1238cf771cb5STejun Heo * The output looks suspiciously like /proc/partitions with a bunch of
1239cf771cb5STejun Heo * extra fields.
1240cf771cb5STejun Heo */
diskstats_show(struct seq_file * seqf,void * v)1241cf771cb5STejun Heo static int diskstats_show(struct seq_file *seqf, void *v)
12423a65dfe8SJens Axboe {
12433a65dfe8SJens Axboe struct gendisk *gp = v;
1244ad1eaa53SChristoph Hellwig struct block_device *hd;
1245e016b782SMikulas Patocka unsigned int inflight;
1246ea18e0f0SKonstantin Khlebnikov struct disk_stats stat;
12477fae67ccSChristoph Hellwig unsigned long idx;
12483a65dfe8SJens Axboe
12493a65dfe8SJens Axboe /*
1250ed9e1982STejun Heo if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
1251cf771cb5STejun Heo seq_puts(seqf, "major minor name"
12523a65dfe8SJens Axboe " rio rmerge rsect ruse wio wmerge "
12533a65dfe8SJens Axboe "wsect wuse running use aveq"
12543a65dfe8SJens Axboe "\n\n");
12553a65dfe8SJens Axboe */
12563a65dfe8SJens Axboe
12577fae67ccSChristoph Hellwig rcu_read_lock();
12587fae67ccSChristoph Hellwig xa_for_each(&gp->part_tbl, idx, hd) {
12597fae67ccSChristoph Hellwig if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
12607fae67ccSChristoph Hellwig continue;
1261b2f609e1SChristoph Hellwig if (queue_is_mq(gp->queue))
1262ad1eaa53SChristoph Hellwig inflight = blk_mq_in_flight(gp->queue, hd);
1263b2f609e1SChristoph Hellwig else
1264ad1eaa53SChristoph Hellwig inflight = part_in_flight(hd);
1265ea18e0f0SKonstantin Khlebnikov
126686d73312SZhang Wensheng if (inflight) {
126786d73312SZhang Wensheng part_stat_lock();
126886d73312SZhang Wensheng update_io_ticks(hd, jiffies, true);
126986d73312SZhang Wensheng part_stat_unlock();
127086d73312SZhang Wensheng }
127186d73312SZhang Wensheng part_stat_read_all(hd, &stat);
127226e2d7a3SAbd-Alrhman Masalkhi seq_printf(seqf, "%4d %7d %pg "
1273bdca3c87SMichael Callahan "%lu %lu %lu %u "
1274bdca3c87SMichael Callahan "%lu %lu %lu %u "
1275bdca3c87SMichael Callahan "%u %u %u "
1276b6866318SKonstantin Khlebnikov "%lu %lu %lu %u "
1277b6866318SKonstantin Khlebnikov "%lu %u"
1278b6866318SKonstantin Khlebnikov "\n",
127926e2d7a3SAbd-Alrhman Masalkhi MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd,
1280ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_READ],
1281ea18e0f0SKonstantin Khlebnikov stat.merges[STAT_READ],
1282ea18e0f0SKonstantin Khlebnikov stat.sectors[STAT_READ],
1283ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_READ],
1284ea18e0f0SKonstantin Khlebnikov NSEC_PER_MSEC),
1285ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_WRITE],
1286ea18e0f0SKonstantin Khlebnikov stat.merges[STAT_WRITE],
1287ea18e0f0SKonstantin Khlebnikov stat.sectors[STAT_WRITE],
1288ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_WRITE],
1289ea18e0f0SKonstantin Khlebnikov NSEC_PER_MSEC),
1290e016b782SMikulas Patocka inflight,
1291ea18e0f0SKonstantin Khlebnikov jiffies_to_msecs(stat.io_ticks),
12928cd5b8fcSKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_READ] +
12938cd5b8fcSKonstantin Khlebnikov stat.nsecs[STAT_WRITE] +
12948cd5b8fcSKonstantin Khlebnikov stat.nsecs[STAT_DISCARD] +
12958cd5b8fcSKonstantin Khlebnikov stat.nsecs[STAT_FLUSH],
12968cd5b8fcSKonstantin Khlebnikov NSEC_PER_MSEC),
1297ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_DISCARD],
1298ea18e0f0SKonstantin Khlebnikov stat.merges[STAT_DISCARD],
1299ea18e0f0SKonstantin Khlebnikov stat.sectors[STAT_DISCARD],
1300ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_DISCARD],
1301ea18e0f0SKonstantin Khlebnikov NSEC_PER_MSEC),
1302ea18e0f0SKonstantin Khlebnikov stat.ios[STAT_FLUSH],
1303ea18e0f0SKonstantin Khlebnikov (unsigned int)div_u64(stat.nsecs[STAT_FLUSH],
1304ea18e0f0SKonstantin Khlebnikov NSEC_PER_MSEC)
130528f39d55SJerome Marchand );
13063a65dfe8SJens Axboe }
13077fae67ccSChristoph Hellwig rcu_read_unlock();
13083a65dfe8SJens Axboe
13093a65dfe8SJens Axboe return 0;
13103a65dfe8SJens Axboe }
13113a65dfe8SJens Axboe
131231d85ab2SAlexey Dobriyan static const struct seq_operations diskstats_op = {
1313def4e38dSTejun Heo .start = disk_seqf_start,
1314def4e38dSTejun Heo .next = disk_seqf_next,
1315def4e38dSTejun Heo .stop = disk_seqf_stop,
13163a65dfe8SJens Axboe .show = diskstats_show
13173a65dfe8SJens Axboe };
1318f500975aSAlexey Dobriyan
proc_genhd_init(void)1319f500975aSAlexey Dobriyan static int __init proc_genhd_init(void)
1320f500975aSAlexey Dobriyan {
1321fddda2b7SChristoph Hellwig proc_create_seq("diskstats", 0, NULL, &diskstats_op);
1322fddda2b7SChristoph Hellwig proc_create_seq("partitions", 0, NULL, &partitions_op);
1323f500975aSAlexey Dobriyan return 0;
1324f500975aSAlexey Dobriyan }
1325f500975aSAlexey Dobriyan module_init(proc_genhd_init);
1326a6e2ba88SRandy Dunlap #endif /* CONFIG_PROC_FS */
13273a65dfe8SJens Axboe
part_devt(struct gendisk * disk,u8 partno)1328c97d93c3SChristoph Hellwig dev_t part_devt(struct gendisk *disk, u8 partno)
1329c97d93c3SChristoph Hellwig {
13300e0ccdecSChristoph Hellwig struct block_device *part;
1331c97d93c3SChristoph Hellwig dev_t devt = 0;
1332c97d93c3SChristoph Hellwig
13330e0ccdecSChristoph Hellwig rcu_read_lock();
13340e0ccdecSChristoph Hellwig part = xa_load(&disk->part_tbl, partno);
13350e0ccdecSChristoph Hellwig if (part)
1336c97d93c3SChristoph Hellwig devt = part->bd_dev;
13370e0ccdecSChristoph Hellwig rcu_read_unlock();
1338c97d93c3SChristoph Hellwig
1339c97d93c3SChristoph Hellwig return devt;
1340c97d93c3SChristoph Hellwig }
1341c97d93c3SChristoph Hellwig
__alloc_disk_node(struct request_queue * q,int node_id,struct lock_class_key * lkclass)13424a1fa41dSChristoph Hellwig struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
13434a1fa41dSChristoph Hellwig struct lock_class_key *lkclass)
13443a65dfe8SJens Axboe {
13453a65dfe8SJens Axboe struct gendisk *disk;
13463a65dfe8SJens Axboe
1347c1b511ebSJoe Perches disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
1348f93af2a4SChristoph Hellwig if (!disk)
1349aa0c680cSRafael Mendonca return NULL;
1350f93af2a4SChristoph Hellwig
135146754bd0SChristoph Hellwig if (bioset_init(&disk->bio_split, BIO_POOL_SIZE, 0, 0))
135246754bd0SChristoph Hellwig goto out_free_disk;
135346754bd0SChristoph Hellwig
1354edb0872fSChristoph Hellwig disk->bdi = bdi_alloc(node_id);
1355edb0872fSChristoph Hellwig if (!disk->bdi)
135646754bd0SChristoph Hellwig goto out_free_bioset;
1357edb0872fSChristoph Hellwig
135817220ca5SPavel Begunkov /* bdev_alloc() might need the queue, set before the first call */
135917220ca5SPavel Begunkov disk->queue = q;
136017220ca5SPavel Begunkov
1361cb8432d6SChristoph Hellwig disk->part0 = bdev_alloc(disk, 0);
1362cb8432d6SChristoph Hellwig if (!disk->part0)
1363edb0872fSChristoph Hellwig goto out_free_bdi;
1364f93af2a4SChristoph Hellwig
1365bf91db18SCheng Renquan disk->node_id = node_id;
1366a8698707SChristoph Hellwig mutex_init(&disk->open_mutex);
1367a33df75cSChristoph Hellwig xa_init(&disk->part_tbl);
1368a33df75cSChristoph Hellwig if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
1369a33df75cSChristoph Hellwig goto out_destroy_part_tbl;
1370b5d0b9dfSTejun Heo
1371b6553befSChristoph Hellwig if (blkcg_init_disk(disk))
1372b6553befSChristoph Hellwig goto out_erase_part0;
1373b6553befSChristoph Hellwig
13743a65dfe8SJens Axboe rand_initialize_disk(disk);
1375ed9e1982STejun Heo disk_to_dev(disk)->class = &block_class;
1376ed9e1982STejun Heo disk_to_dev(disk)->type = &disk_type;
1377ed9e1982STejun Heo device_initialize(disk_to_dev(disk));
1378cf179948SMatteo Croce inc_diskseq(disk);
1379d152c682SChristoph Hellwig q->disk = disk;
13804dcc4874SChristoph Hellwig lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0);
13810dbcfe24SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
13820dbcfe24SChristoph Hellwig INIT_LIST_HEAD(&disk->slave_bdevs);
13830dbcfe24SChristoph Hellwig #endif
13843a65dfe8SJens Axboe return disk;
1385f93af2a4SChristoph Hellwig
1386b6553befSChristoph Hellwig out_erase_part0:
1387b6553befSChristoph Hellwig xa_erase(&disk->part_tbl, 0);
1388a33df75cSChristoph Hellwig out_destroy_part_tbl:
1389a33df75cSChristoph Hellwig xa_destroy(&disk->part_tbl);
139006cc978dSTetsuo Handa disk->part0->bd_disk = NULL;
13912f4731dcSChristoph Hellwig iput(disk->part0->bd_inode);
1392edb0872fSChristoph Hellwig out_free_bdi:
1393edb0872fSChristoph Hellwig bdi_put(disk->bdi);
139446754bd0SChristoph Hellwig out_free_bioset:
139546754bd0SChristoph Hellwig bioset_exit(&disk->bio_split);
1396f93af2a4SChristoph Hellwig out_free_disk:
1397f93af2a4SChristoph Hellwig kfree(disk);
1398f93af2a4SChristoph Hellwig return NULL;
13993a65dfe8SJens Axboe }
14003a65dfe8SJens Axboe
__blk_alloc_disk(int node,struct lock_class_key * lkclass)14014dcc4874SChristoph Hellwig struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
1402f525464aSChristoph Hellwig {
1403f525464aSChristoph Hellwig struct request_queue *q;
1404f525464aSChristoph Hellwig struct gendisk *disk;
1405f525464aSChristoph Hellwig
140680bd4a7aSChristoph Hellwig q = blk_alloc_queue(node);
1407f525464aSChristoph Hellwig if (!q)
1408f525464aSChristoph Hellwig return NULL;
1409f525464aSChristoph Hellwig
14104a1fa41dSChristoph Hellwig disk = __alloc_disk_node(q, node, lkclass);
1411f525464aSChristoph Hellwig if (!disk) {
14126f8191fdSChristoph Hellwig blk_put_queue(q);
1413f525464aSChristoph Hellwig return NULL;
1414f525464aSChristoph Hellwig }
14156f8191fdSChristoph Hellwig set_bit(GD_OWNS_QUEUE, &disk->state);
1416f525464aSChristoph Hellwig return disk;
1417f525464aSChristoph Hellwig }
1418f525464aSChristoph Hellwig EXPORT_SYMBOL(__blk_alloc_disk);
1419f525464aSChristoph Hellwig
1420b5bd357cSLuis Chamberlain /**
1421b5bd357cSLuis Chamberlain * put_disk - decrements the gendisk refcount
14220d20dcc2SRandy Dunlap * @disk: the struct gendisk to decrement the refcount for
1423b5bd357cSLuis Chamberlain *
1424b5bd357cSLuis Chamberlain * This decrements the refcount for the struct gendisk. When this reaches 0
1425b5bd357cSLuis Chamberlain * we'll have disk_release() called.
1426e8c7d14aSLuis Chamberlain *
1427c5db2cfcSChristoph Hellwig * Note: for blk-mq disk put_disk must be called before freeing the tag_set
1428c5db2cfcSChristoph Hellwig * when handling probe errors (that is before add_disk() is called).
1429c5db2cfcSChristoph Hellwig *
1430e8c7d14aSLuis Chamberlain * Context: Any context, but the last reference must not be dropped from
1431e8c7d14aSLuis Chamberlain * atomic context.
1432b5bd357cSLuis Chamberlain */
put_disk(struct gendisk * disk)14333a65dfe8SJens Axboe void put_disk(struct gendisk *disk)
14343a65dfe8SJens Axboe {
14353a65dfe8SJens Axboe if (disk)
1436efdc41c8SChristoph Hellwig put_device(disk_to_dev(disk));
14373a65dfe8SJens Axboe }
14383a65dfe8SJens Axboe EXPORT_SYMBOL(put_disk);
14393a65dfe8SJens Axboe
set_disk_ro_uevent(struct gendisk * gd,int ro)1440e3264a4dSHannes Reinecke static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1441e3264a4dSHannes Reinecke {
1442e3264a4dSHannes Reinecke char event[] = "DISK_RO=1";
1443e3264a4dSHannes Reinecke char *envp[] = { event, NULL };
1444e3264a4dSHannes Reinecke
1445e3264a4dSHannes Reinecke if (!ro)
1446e3264a4dSHannes Reinecke event[8] = '0';
1447e3264a4dSHannes Reinecke kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1448e3264a4dSHannes Reinecke }
1449e3264a4dSHannes Reinecke
145052f019d4SChristoph Hellwig /**
145152f019d4SChristoph Hellwig * set_disk_ro - set a gendisk read-only
145252f019d4SChristoph Hellwig * @disk: gendisk to operate on
14537f31bee3SLukas Bulwahn * @read_only: %true to set the disk read-only, %false set the disk read/write
145452f019d4SChristoph Hellwig *
145552f019d4SChristoph Hellwig * This function is used to indicate whether a given disk device should have its
145652f019d4SChristoph Hellwig * read-only flag set. set_disk_ro() is typically used by device drivers to
145752f019d4SChristoph Hellwig * indicate whether the underlying physical device is write-protected.
145852f019d4SChristoph Hellwig */
set_disk_ro(struct gendisk * disk,bool read_only)145952f019d4SChristoph Hellwig void set_disk_ro(struct gendisk *disk, bool read_only)
14603a65dfe8SJens Axboe {
146152f019d4SChristoph Hellwig if (read_only) {
146252f019d4SChristoph Hellwig if (test_and_set_bit(GD_READ_ONLY, &disk->state))
146352f019d4SChristoph Hellwig return;
146452f019d4SChristoph Hellwig } else {
146552f019d4SChristoph Hellwig if (!test_and_clear_bit(GD_READ_ONLY, &disk->state))
146652f019d4SChristoph Hellwig return;
1467e3264a4dSHannes Reinecke }
146852f019d4SChristoph Hellwig set_disk_ro_uevent(disk, read_only);
14693a65dfe8SJens Axboe }
14703a65dfe8SJens Axboe EXPORT_SYMBOL(set_disk_ro);
14713a65dfe8SJens Axboe
inc_diskseq(struct gendisk * disk)1472cf179948SMatteo Croce void inc_diskseq(struct gendisk *disk)
1473cf179948SMatteo Croce {
1474cf179948SMatteo Croce disk->diskseq = atomic64_inc_return(&diskseq);
1475cf179948SMatteo Croce }
1476