xref: /openbmc/linux/include/linux/blkdev.h (revision d6b6dfff)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Portions Copyright (C) 1992 Drew Eckhardt
4  */
5 #ifndef _LINUX_BLKDEV_H
6 #define _LINUX_BLKDEV_H
7 
8 #include <linux/types.h>
9 #include <linux/blk_types.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
12 #include <linux/llist.h>
13 #include <linux/minmax.h>
14 #include <linux/timer.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/bio.h>
18 #include <linux/gfp.h>
19 #include <linux/kdev_t.h>
20 #include <linux/rcupdate.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/blkzoned.h>
23 #include <linux/sched.h>
24 #include <linux/sbitmap.h>
25 #include <linux/uuid.h>
26 #include <linux/xarray.h>
27 
28 struct module;
29 struct request_queue;
30 struct elevator_queue;
31 struct blk_trace;
32 struct request;
33 struct sg_io_hdr;
34 struct blkcg_gq;
35 struct blk_flush_queue;
36 struct kiocb;
37 struct pr_ops;
38 struct rq_qos;
39 struct blk_queue_stats;
40 struct blk_stat_callback;
41 struct blk_crypto_profile;
42 
43 extern const struct device_type disk_type;
44 extern const struct device_type part_type;
45 extern struct class block_class;
46 
47 /*
48  * Maximum number of blkcg policies allowed to be registered concurrently.
49  * Defined here to simplify include dependency.
50  */
51 #define BLKCG_MAX_POLS		6
52 
53 #define DISK_MAX_PARTS			256
54 #define DISK_NAME_LEN			32
55 
56 #define PARTITION_META_INFO_VOLNAMELTH	64
57 /*
58  * Enough for the string representation of any kind of UUID plus NULL.
59  * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
60  */
61 #define PARTITION_META_INFO_UUIDLTH	(UUID_STRING_LEN + 1)
62 
63 struct partition_meta_info {
64 	char uuid[PARTITION_META_INFO_UUIDLTH];
65 	u8 volname[PARTITION_META_INFO_VOLNAMELTH];
66 };
67 
68 /**
69  * DOC: genhd capability flags
70  *
71  * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
72  * removable media.  When set, the device remains present even when media is not
73  * inserted.  Shall not be set for devices which are removed entirely when the
74  * media is removed.
75  *
76  * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
77  * doesn't appear in sysfs, and can't be opened from userspace or using
78  * blkdev_get*. Used for the underlying components of multipath devices.
79  *
80  * ``GENHD_FL_NO_PART``: partition support is disabled.  The kernel will not
81  * scan for partitions from add_disk, and users can't add partitions manually.
82  *
83  */
84 enum {
85 	GENHD_FL_REMOVABLE			= 1 << 0,
86 	GENHD_FL_HIDDEN				= 1 << 1,
87 	GENHD_FL_NO_PART			= 1 << 2,
88 };
89 
90 enum {
91 	DISK_EVENT_MEDIA_CHANGE			= 1 << 0, /* media changed */
92 	DISK_EVENT_EJECT_REQUEST		= 1 << 1, /* eject requested */
93 };
94 
95 enum {
96 	/* Poll even if events_poll_msecs is unset */
97 	DISK_EVENT_FLAG_POLL			= 1 << 0,
98 	/* Forward events to udev */
99 	DISK_EVENT_FLAG_UEVENT			= 1 << 1,
100 	/* Block event polling when open for exclusive write */
101 	DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE	= 1 << 2,
102 };
103 
104 struct disk_events;
105 struct badblocks;
106 
107 struct blk_integrity {
108 	const struct blk_integrity_profile	*profile;
109 	unsigned char				flags;
110 	unsigned char				tuple_size;
111 	unsigned char				interval_exp;
112 	unsigned char				tag_size;
113 };
114 
115 typedef unsigned int __bitwise blk_mode_t;
116 
117 /* open for reading */
118 #define BLK_OPEN_READ		((__force blk_mode_t)(1 << 0))
119 /* open for writing */
120 #define BLK_OPEN_WRITE		((__force blk_mode_t)(1 << 1))
121 /* open exclusively (vs other exclusive openers */
122 #define BLK_OPEN_EXCL		((__force blk_mode_t)(1 << 2))
123 /* opened with O_NDELAY */
124 #define BLK_OPEN_NDELAY		((__force blk_mode_t)(1 << 3))
125 /* open for "writes" only for ioctls (specialy hack for floppy.c) */
126 #define BLK_OPEN_WRITE_IOCTL	((__force blk_mode_t)(1 << 4))
127 
128 struct gendisk {
129 	/*
130 	 * major/first_minor/minors should not be set by any new driver, the
131 	 * block core will take care of allocating them automatically.
132 	 */
133 	int major;
134 	int first_minor;
135 	int minors;
136 
137 	char disk_name[DISK_NAME_LEN];	/* name of major driver */
138 
139 	unsigned short events;		/* supported events */
140 	unsigned short event_flags;	/* flags related to event processing */
141 
142 	struct xarray part_tbl;
143 	struct block_device *part0;
144 
145 	const struct block_device_operations *fops;
146 	struct request_queue *queue;
147 	void *private_data;
148 
149 	struct bio_set bio_split;
150 
151 	int flags;
152 	unsigned long state;
153 #define GD_NEED_PART_SCAN		0
154 #define GD_READ_ONLY			1
155 #define GD_DEAD				2
156 #define GD_NATIVE_CAPACITY		3
157 #define GD_ADDED			4
158 #define GD_SUPPRESS_PART_SCAN		5
159 #define GD_OWNS_QUEUE			6
160 
161 	struct mutex open_mutex;	/* open/close mutex */
162 	unsigned open_partitions;	/* number of open partitions */
163 
164 	struct backing_dev_info	*bdi;
165 	struct kobject queue_kobj;	/* the queue/ directory */
166 	struct kobject *slave_dir;
167 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
168 	struct list_head slave_bdevs;
169 #endif
170 	struct timer_rand_state *random;
171 	atomic_t sync_io;		/* RAID */
172 	struct disk_events *ev;
173 
174 #ifdef CONFIG_BLK_DEV_ZONED
175 	/*
176 	 * Zoned block device information for request dispatch control.
177 	 * nr_zones is the total number of zones of the device. This is always
178 	 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
179 	 * bits which indicates if a zone is conventional (bit set) or
180 	 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
181 	 * bits which indicates if a zone is write locked, that is, if a write
182 	 * request targeting the zone was dispatched.
183 	 *
184 	 * Reads of this information must be protected with blk_queue_enter() /
185 	 * blk_queue_exit(). Modifying this information is only allowed while
186 	 * no requests are being processed. See also blk_mq_freeze_queue() and
187 	 * blk_mq_unfreeze_queue().
188 	 */
189 	unsigned int		nr_zones;
190 	unsigned int		max_open_zones;
191 	unsigned int		max_active_zones;
192 	unsigned long		*conv_zones_bitmap;
193 	unsigned long		*seq_zones_wlock;
194 #endif /* CONFIG_BLK_DEV_ZONED */
195 
196 #if IS_ENABLED(CONFIG_CDROM)
197 	struct cdrom_device_info *cdi;
198 #endif
199 	int node_id;
200 	struct badblocks *bb;
201 	struct lockdep_map lockdep_map;
202 	u64 diskseq;
203 	blk_mode_t open_mode;
204 
205 	/*
206 	 * Independent sector access ranges. This is always NULL for
207 	 * devices that do not have multiple independent access ranges.
208 	 */
209 	struct blk_independent_access_ranges *ia_ranges;
210 };
211 
disk_live(struct gendisk * disk)212 static inline bool disk_live(struct gendisk *disk)
213 {
214 	return !inode_unhashed(disk->part0->bd_inode);
215 }
216 
217 /**
218  * disk_openers - returns how many openers are there for a disk
219  * @disk: disk to check
220  *
221  * This returns the number of openers for a disk.  Note that this value is only
222  * stable if disk->open_mutex is held.
223  *
224  * Note: Due to a quirk in the block layer open code, each open partition is
225  * only counted once even if there are multiple openers.
226  */
disk_openers(struct gendisk * disk)227 static inline unsigned int disk_openers(struct gendisk *disk)
228 {
229 	return atomic_read(&disk->part0->bd_openers);
230 }
231 
232 /**
233  * disk_has_partscan - return %true if partition scanning is enabled on a disk
234  * @disk: disk to check
235  *
236  * Returns %true if partitions scanning is enabled for @disk, or %false if
237  * partition scanning is disabled either permanently or temporarily.
238  */
disk_has_partscan(struct gendisk * disk)239 static inline bool disk_has_partscan(struct gendisk *disk)
240 {
241 	return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
242 		!test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
243 }
244 
245 /*
246  * The gendisk is refcounted by the part0 block_device, and the bd_device
247  * therein is also used for device model presentation in sysfs.
248  */
249 #define dev_to_disk(device) \
250 	(dev_to_bdev(device)->bd_disk)
251 #define disk_to_dev(disk) \
252 	(&((disk)->part0->bd_device))
253 
254 #if IS_REACHABLE(CONFIG_CDROM)
255 #define disk_to_cdi(disk)	((disk)->cdi)
256 #else
257 #define disk_to_cdi(disk)	NULL
258 #endif
259 
disk_devt(struct gendisk * disk)260 static inline dev_t disk_devt(struct gendisk *disk)
261 {
262 	return MKDEV(disk->major, disk->first_minor);
263 }
264 
blk_validate_block_size(unsigned long bsize)265 static inline int blk_validate_block_size(unsigned long bsize)
266 {
267 	if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
268 		return -EINVAL;
269 
270 	return 0;
271 }
272 
blk_op_is_passthrough(blk_opf_t op)273 static inline bool blk_op_is_passthrough(blk_opf_t op)
274 {
275 	op &= REQ_OP_MASK;
276 	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
277 }
278 
279 /*
280  * Zoned block device models (zoned limit).
281  *
282  * Note: This needs to be ordered from the least to the most severe
283  * restrictions for the inheritance in blk_stack_limits() to work.
284  */
285 enum blk_zoned_model {
286 	BLK_ZONED_NONE = 0,	/* Regular block device */
287 	BLK_ZONED_HA,		/* Host-aware zoned block device */
288 	BLK_ZONED_HM,		/* Host-managed zoned block device */
289 };
290 
291 /*
292  * BLK_BOUNCE_NONE:	never bounce (default)
293  * BLK_BOUNCE_HIGH:	bounce all highmem pages
294  */
295 enum blk_bounce {
296 	BLK_BOUNCE_NONE,
297 	BLK_BOUNCE_HIGH,
298 };
299 
300 struct queue_limits {
301 	enum blk_bounce		bounce;
302 	unsigned long		seg_boundary_mask;
303 	unsigned long		virt_boundary_mask;
304 
305 	unsigned int		max_hw_sectors;
306 	unsigned int		max_dev_sectors;
307 	unsigned int		chunk_sectors;
308 	unsigned int		max_sectors;
309 	unsigned int		max_user_sectors;
310 	unsigned int		max_segment_size;
311 	unsigned int		physical_block_size;
312 	unsigned int		logical_block_size;
313 	unsigned int		alignment_offset;
314 	unsigned int		io_min;
315 	unsigned int		io_opt;
316 	unsigned int		max_discard_sectors;
317 	unsigned int		max_hw_discard_sectors;
318 	unsigned int		max_secure_erase_sectors;
319 	unsigned int		max_write_zeroes_sectors;
320 	unsigned int		max_zone_append_sectors;
321 	unsigned int		discard_granularity;
322 	unsigned int		discard_alignment;
323 	unsigned int		zone_write_granularity;
324 
325 	unsigned short		max_segments;
326 	unsigned short		max_integrity_segments;
327 	unsigned short		max_discard_segments;
328 
329 	unsigned char		misaligned;
330 	unsigned char		discard_misaligned;
331 	unsigned char		raid_partial_stripes_expensive;
332 	enum blk_zoned_model	zoned;
333 
334 	/*
335 	 * Drivers that set dma_alignment to less than 511 must be prepared to
336 	 * handle individual bvec's that are not a multiple of a SECTOR_SIZE
337 	 * due to possible offsets.
338 	 */
339 	unsigned int		dma_alignment;
340 };
341 
342 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
343 			       void *data);
344 
345 void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
346 
347 #ifdef CONFIG_BLK_DEV_ZONED
348 #define BLK_ALL_ZONES  ((unsigned int)-1)
349 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
350 			unsigned int nr_zones, report_zones_cb cb, void *data);
351 unsigned int bdev_nr_zones(struct block_device *bdev);
352 extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
353 			    sector_t sectors, sector_t nr_sectors,
354 			    gfp_t gfp_mask);
355 int blk_revalidate_disk_zones(struct gendisk *disk,
356 			      void (*update_driver_data)(struct gendisk *disk));
357 #else /* CONFIG_BLK_DEV_ZONED */
bdev_nr_zones(struct block_device * bdev)358 static inline unsigned int bdev_nr_zones(struct block_device *bdev)
359 {
360 	return 0;
361 }
362 #endif /* CONFIG_BLK_DEV_ZONED */
363 
364 /*
365  * Independent access ranges: struct blk_independent_access_range describes
366  * a range of contiguous sectors that can be accessed using device command
367  * execution resources that are independent from the resources used for
368  * other access ranges. This is typically found with single-LUN multi-actuator
369  * HDDs where each access range is served by a different set of heads.
370  * The set of independent ranges supported by the device is defined using
371  * struct blk_independent_access_ranges. The independent ranges must not overlap
372  * and must include all sectors within the disk capacity (no sector holes
373  * allowed).
374  * For a device with multiple ranges, requests targeting sectors in different
375  * ranges can be executed in parallel. A request can straddle an access range
376  * boundary.
377  */
378 struct blk_independent_access_range {
379 	struct kobject		kobj;
380 	sector_t		sector;
381 	sector_t		nr_sectors;
382 };
383 
384 struct blk_independent_access_ranges {
385 	struct kobject				kobj;
386 	bool					sysfs_registered;
387 	unsigned int				nr_ia_ranges;
388 	struct blk_independent_access_range	ia_range[];
389 };
390 
391 struct request_queue {
392 	struct request		*last_merge;
393 	struct elevator_queue	*elevator;
394 
395 	struct percpu_ref	q_usage_counter;
396 
397 	struct blk_queue_stats	*stats;
398 	struct rq_qos		*rq_qos;
399 	struct mutex		rq_qos_mutex;
400 
401 	const struct blk_mq_ops	*mq_ops;
402 
403 	/* sw queues */
404 	struct blk_mq_ctx __percpu	*queue_ctx;
405 
406 	unsigned int		queue_depth;
407 
408 	/* hw dispatch queues */
409 	struct xarray		hctx_table;
410 	unsigned int		nr_hw_queues;
411 
412 	/*
413 	 * The queue owner gets to use this for whatever they like.
414 	 * ll_rw_blk doesn't touch it.
415 	 */
416 	void			*queuedata;
417 
418 	/*
419 	 * various queue flags, see QUEUE_* below
420 	 */
421 	unsigned long		queue_flags;
422 	/*
423 	 * Number of contexts that have called blk_set_pm_only(). If this
424 	 * counter is above zero then only RQF_PM requests are processed.
425 	 */
426 	atomic_t		pm_only;
427 
428 	/*
429 	 * ida allocated id for this queue.  Used to index queues from
430 	 * ioctx.
431 	 */
432 	int			id;
433 
434 	spinlock_t		queue_lock;
435 
436 	struct gendisk		*disk;
437 
438 	refcount_t		refs;
439 
440 	/*
441 	 * mq queue kobject
442 	 */
443 	struct kobject *mq_kobj;
444 
445 #ifdef  CONFIG_BLK_DEV_INTEGRITY
446 	struct blk_integrity integrity;
447 #endif	/* CONFIG_BLK_DEV_INTEGRITY */
448 
449 #ifdef CONFIG_PM
450 	struct device		*dev;
451 	enum rpm_status		rpm_status;
452 #endif
453 
454 	/*
455 	 * queue settings
456 	 */
457 	unsigned long		nr_requests;	/* Max # of requests */
458 
459 	unsigned int		dma_pad_mask;
460 
461 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
462 	struct blk_crypto_profile *crypto_profile;
463 	struct kobject *crypto_kobject;
464 #endif
465 
466 	unsigned int		rq_timeout;
467 
468 	struct timer_list	timeout;
469 	struct work_struct	timeout_work;
470 
471 	atomic_t		nr_active_requests_shared_tags;
472 
473 	struct blk_mq_tags	*sched_shared_tags;
474 
475 	struct list_head	icq_list;
476 #ifdef CONFIG_BLK_CGROUP
477 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
478 	struct blkcg_gq		*root_blkg;
479 	struct list_head	blkg_list;
480 	struct mutex		blkcg_mutex;
481 #endif
482 
483 	struct queue_limits	limits;
484 
485 	unsigned int		required_elevator_features;
486 
487 	int			node;
488 #ifdef CONFIG_BLK_DEV_IO_TRACE
489 	struct blk_trace __rcu	*blk_trace;
490 #endif
491 	/*
492 	 * for flush operations
493 	 */
494 	struct blk_flush_queue	*fq;
495 	struct list_head	flush_list;
496 
497 	struct list_head	requeue_list;
498 	spinlock_t		requeue_lock;
499 	struct delayed_work	requeue_work;
500 
501 	struct mutex		sysfs_lock;
502 	struct mutex		sysfs_dir_lock;
503 
504 	/*
505 	 * for reusing dead hctx instance in case of updating
506 	 * nr_hw_queues
507 	 */
508 	struct list_head	unused_hctx_list;
509 	spinlock_t		unused_hctx_lock;
510 
511 	int			mq_freeze_depth;
512 
513 #ifdef CONFIG_BLK_DEV_THROTTLING
514 	/* Throttle data */
515 	struct throtl_data *td;
516 #endif
517 	struct rcu_head		rcu_head;
518 	wait_queue_head_t	mq_freeze_wq;
519 	/*
520 	 * Protect concurrent access to q_usage_counter by
521 	 * percpu_ref_kill() and percpu_ref_reinit().
522 	 */
523 	struct mutex		mq_freeze_lock;
524 
525 	int			quiesce_depth;
526 
527 	struct blk_mq_tag_set	*tag_set;
528 	struct list_head	tag_set_list;
529 
530 	struct dentry		*debugfs_dir;
531 	struct dentry		*sched_debugfs_dir;
532 	struct dentry		*rqos_debugfs_dir;
533 	/*
534 	 * Serializes all debugfs metadata operations using the above dentries.
535 	 */
536 	struct mutex		debugfs_mutex;
537 
538 	bool			mq_sysfs_init_done;
539 };
540 
541 /* Keep blk_queue_flag_name[] in sync with the definitions below */
542 #define QUEUE_FLAG_STOPPED	0	/* queue is stopped */
543 #define QUEUE_FLAG_DYING	1	/* queue being torn down */
544 #define QUEUE_FLAG_NOMERGES     3	/* disable merge attempts */
545 #define QUEUE_FLAG_SAME_COMP	4	/* complete on same CPU-group */
546 #define QUEUE_FLAG_FAIL_IO	5	/* fake timeout */
547 #define QUEUE_FLAG_NONROT	6	/* non-rotational device (SSD) */
548 #define QUEUE_FLAG_VIRT		QUEUE_FLAG_NONROT /* paravirt device */
549 #define QUEUE_FLAG_IO_STAT	7	/* do disk/partitions IO accounting */
550 #define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */
551 #define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */
552 #define QUEUE_FLAG_SYNCHRONOUS	11	/* always completes in submit context */
553 #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
554 #define QUEUE_FLAG_HW_WC	13	/* Write back caching supported */
555 #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
556 #define QUEUE_FLAG_STABLE_WRITES 15	/* don't modify blks until WB is done */
557 #define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
558 #define QUEUE_FLAG_WC		17	/* Write back caching */
559 #define QUEUE_FLAG_FUA		18	/* device supports FUA writes */
560 #define QUEUE_FLAG_DAX		19	/* device supports DAX */
561 #define QUEUE_FLAG_STATS	20	/* track IO start and completion times */
562 #define QUEUE_FLAG_REGISTERED	22	/* queue has been registered to a disk */
563 #define QUEUE_FLAG_QUIESCED	24	/* queue has been quiesced */
564 #define QUEUE_FLAG_PCI_P2PDMA	25	/* device supports PCI p2p requests */
565 #define QUEUE_FLAG_ZONE_RESETALL 26	/* supports Zone Reset All */
566 #define QUEUE_FLAG_RQ_ALLOC_TIME 27	/* record rq->alloc_time_ns */
567 #define QUEUE_FLAG_HCTX_ACTIVE	28	/* at least one blk-mq hctx is active */
568 #define QUEUE_FLAG_NOWAIT       29	/* device supports NOWAIT */
569 #define QUEUE_FLAG_SQ_SCHED     30	/* single queue style io dispatch */
570 #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE	31 /* quiesce_tagset skip the queue*/
571 
572 #define QUEUE_FLAG_MQ_DEFAULT	((1UL << QUEUE_FLAG_IO_STAT) |		\
573 				 (1UL << QUEUE_FLAG_SAME_COMP) |	\
574 				 (1UL << QUEUE_FLAG_NOWAIT))
575 
576 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
577 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
578 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
579 
580 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
581 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
582 #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
583 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
584 #define blk_queue_noxmerges(q)	\
585 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
586 #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
587 #define blk_queue_stable_writes(q) \
588 	test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
589 #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
590 #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
591 #define blk_queue_zone_resetall(q)	\
592 	test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
593 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
594 #define blk_queue_pci_p2pdma(q)	\
595 	test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
596 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
597 #define blk_queue_rq_alloc_time(q)	\
598 	test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
599 #else
600 #define blk_queue_rq_alloc_time(q)	false
601 #endif
602 
603 #define blk_noretry_request(rq) \
604 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
605 			     REQ_FAILFAST_DRIVER))
606 #define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
607 #define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only)
608 #define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
609 #define blk_queue_sq_sched(q)	test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
610 #define blk_queue_skip_tagset_quiesce(q) \
611 	test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
612 
613 extern void blk_set_pm_only(struct request_queue *q);
614 extern void blk_clear_pm_only(struct request_queue *q);
615 
616 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
617 
618 #define dma_map_bvec(dev, bv, dir, attrs) \
619 	dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
620 	(dir), (attrs))
621 
queue_is_mq(struct request_queue * q)622 static inline bool queue_is_mq(struct request_queue *q)
623 {
624 	return q->mq_ops;
625 }
626 
627 #ifdef CONFIG_PM
queue_rpm_status(struct request_queue * q)628 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
629 {
630 	return q->rpm_status;
631 }
632 #else
queue_rpm_status(struct request_queue * q)633 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
634 {
635 	return RPM_ACTIVE;
636 }
637 #endif
638 
639 static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue * q)640 blk_queue_zoned_model(struct request_queue *q)
641 {
642 	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
643 		return q->limits.zoned;
644 	return BLK_ZONED_NONE;
645 }
646 
blk_queue_is_zoned(struct request_queue * q)647 static inline bool blk_queue_is_zoned(struct request_queue *q)
648 {
649 	switch (blk_queue_zoned_model(q)) {
650 	case BLK_ZONED_HA:
651 	case BLK_ZONED_HM:
652 		return true;
653 	default:
654 		return false;
655 	}
656 }
657 
658 #ifdef CONFIG_BLK_DEV_ZONED
disk_nr_zones(struct gendisk * disk)659 static inline unsigned int disk_nr_zones(struct gendisk *disk)
660 {
661 	return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
662 }
663 
disk_zone_no(struct gendisk * disk,sector_t sector)664 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
665 {
666 	if (!blk_queue_is_zoned(disk->queue))
667 		return 0;
668 	return sector >> ilog2(disk->queue->limits.chunk_sectors);
669 }
670 
disk_zone_is_seq(struct gendisk * disk,sector_t sector)671 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
672 {
673 	if (!blk_queue_is_zoned(disk->queue))
674 		return false;
675 	if (!disk->conv_zones_bitmap)
676 		return true;
677 	return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
678 }
679 
disk_set_max_open_zones(struct gendisk * disk,unsigned int max_open_zones)680 static inline void disk_set_max_open_zones(struct gendisk *disk,
681 		unsigned int max_open_zones)
682 {
683 	disk->max_open_zones = max_open_zones;
684 }
685 
disk_set_max_active_zones(struct gendisk * disk,unsigned int max_active_zones)686 static inline void disk_set_max_active_zones(struct gendisk *disk,
687 		unsigned int max_active_zones)
688 {
689 	disk->max_active_zones = max_active_zones;
690 }
691 
bdev_max_open_zones(struct block_device * bdev)692 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
693 {
694 	return bdev->bd_disk->max_open_zones;
695 }
696 
bdev_max_active_zones(struct block_device * bdev)697 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
698 {
699 	return bdev->bd_disk->max_active_zones;
700 }
701 
702 #else /* CONFIG_BLK_DEV_ZONED */
disk_nr_zones(struct gendisk * disk)703 static inline unsigned int disk_nr_zones(struct gendisk *disk)
704 {
705 	return 0;
706 }
disk_zone_is_seq(struct gendisk * disk,sector_t sector)707 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
708 {
709 	return false;
710 }
disk_zone_no(struct gendisk * disk,sector_t sector)711 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
712 {
713 	return 0;
714 }
bdev_max_open_zones(struct block_device * bdev)715 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
716 {
717 	return 0;
718 }
719 
bdev_max_active_zones(struct block_device * bdev)720 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
721 {
722 	return 0;
723 }
724 #endif /* CONFIG_BLK_DEV_ZONED */
725 
blk_queue_depth(struct request_queue * q)726 static inline unsigned int blk_queue_depth(struct request_queue *q)
727 {
728 	if (q->queue_depth)
729 		return q->queue_depth;
730 
731 	return q->nr_requests;
732 }
733 
734 /*
735  * default timeout for SG_IO if none specified
736  */
737 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
738 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
739 
740 /* This should not be used directly - use rq_for_each_segment */
741 #define for_each_bio(_bio)		\
742 	for (; _bio; _bio = _bio->bi_next)
743 
744 int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
745 				 const struct attribute_group **groups);
add_disk(struct gendisk * disk)746 static inline int __must_check add_disk(struct gendisk *disk)
747 {
748 	return device_add_disk(NULL, disk, NULL);
749 }
750 void del_gendisk(struct gendisk *gp);
751 void invalidate_disk(struct gendisk *disk);
752 void set_disk_ro(struct gendisk *disk, bool read_only);
753 void disk_uevent(struct gendisk *disk, enum kobject_action action);
754 
get_disk_ro(struct gendisk * disk)755 static inline int get_disk_ro(struct gendisk *disk)
756 {
757 	return disk->part0->bd_read_only ||
758 		test_bit(GD_READ_ONLY, &disk->state);
759 }
760 
bdev_read_only(struct block_device * bdev)761 static inline int bdev_read_only(struct block_device *bdev)
762 {
763 	return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
764 }
765 
766 bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
767 void disk_force_media_change(struct gendisk *disk);
768 void bdev_mark_dead(struct block_device *bdev, bool surprise);
769 
770 void add_disk_randomness(struct gendisk *disk) __latent_entropy;
771 void rand_initialize_disk(struct gendisk *disk);
772 
get_start_sect(struct block_device * bdev)773 static inline sector_t get_start_sect(struct block_device *bdev)
774 {
775 	return bdev->bd_start_sect;
776 }
777 
bdev_nr_sectors(struct block_device * bdev)778 static inline sector_t bdev_nr_sectors(struct block_device *bdev)
779 {
780 	return bdev->bd_nr_sectors;
781 }
782 
bdev_nr_bytes(struct block_device * bdev)783 static inline loff_t bdev_nr_bytes(struct block_device *bdev)
784 {
785 	return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
786 }
787 
get_capacity(struct gendisk * disk)788 static inline sector_t get_capacity(struct gendisk *disk)
789 {
790 	return bdev_nr_sectors(disk->part0);
791 }
792 
sb_bdev_nr_blocks(struct super_block * sb)793 static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
794 {
795 	return bdev_nr_sectors(sb->s_bdev) >>
796 		(sb->s_blocksize_bits - SECTOR_SHIFT);
797 }
798 
799 int bdev_disk_changed(struct gendisk *disk, bool invalidate);
800 
801 void put_disk(struct gendisk *disk);
802 struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
803 
804 /**
805  * blk_alloc_disk - allocate a gendisk structure
806  * @node_id: numa node to allocate on
807  *
808  * Allocate and pre-initialize a gendisk structure for use with BIO based
809  * drivers.
810  *
811  * Context: can sleep
812  */
813 #define blk_alloc_disk(node_id)						\
814 ({									\
815 	static struct lock_class_key __key;				\
816 									\
817 	__blk_alloc_disk(node_id, &__key);				\
818 })
819 
820 int __register_blkdev(unsigned int major, const char *name,
821 		void (*probe)(dev_t devt));
822 #define register_blkdev(major, name) \
823 	__register_blkdev(major, name, NULL)
824 void unregister_blkdev(unsigned int major, const char *name);
825 
826 bool disk_check_media_change(struct gendisk *disk);
827 void set_capacity(struct gendisk *disk, sector_t size);
828 
829 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
830 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
831 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
832 #else
bd_link_disk_holder(struct block_device * bdev,struct gendisk * disk)833 static inline int bd_link_disk_holder(struct block_device *bdev,
834 				      struct gendisk *disk)
835 {
836 	return 0;
837 }
bd_unlink_disk_holder(struct block_device * bdev,struct gendisk * disk)838 static inline void bd_unlink_disk_holder(struct block_device *bdev,
839 					 struct gendisk *disk)
840 {
841 }
842 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
843 
844 dev_t part_devt(struct gendisk *disk, u8 partno);
845 void inc_diskseq(struct gendisk *disk);
846 void blk_request_module(dev_t devt);
847 
848 extern int blk_register_queue(struct gendisk *disk);
849 extern void blk_unregister_queue(struct gendisk *disk);
850 void submit_bio_noacct(struct bio *bio);
851 struct bio *bio_split_to_limits(struct bio *bio);
852 
853 extern int blk_lld_busy(struct request_queue *q);
854 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
855 extern void blk_queue_exit(struct request_queue *q);
856 extern void blk_sync_queue(struct request_queue *q);
857 
858 /* Helper to convert REQ_OP_XXX to its string format XXX */
859 extern const char *blk_op_str(enum req_op op);
860 
861 int blk_status_to_errno(blk_status_t status);
862 blk_status_t errno_to_blk_status(int errno);
863 const char *blk_status_to_str(blk_status_t status);
864 
865 /* only poll the hardware once, don't continue until a completion was found */
866 #define BLK_POLL_ONESHOT		(1 << 0)
867 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
868 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
869 			unsigned int flags);
870 
bdev_get_queue(struct block_device * bdev)871 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
872 {
873 	return bdev->bd_queue;	/* this is never NULL */
874 }
875 
876 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
877 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
878 
bio_zone_no(struct bio * bio)879 static inline unsigned int bio_zone_no(struct bio *bio)
880 {
881 	return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
882 }
883 
bio_zone_is_seq(struct bio * bio)884 static inline unsigned int bio_zone_is_seq(struct bio *bio)
885 {
886 	return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
887 }
888 
889 /*
890  * Return how much of the chunk is left to be used for I/O at a given offset.
891  */
blk_chunk_sectors_left(sector_t offset,unsigned int chunk_sectors)892 static inline unsigned int blk_chunk_sectors_left(sector_t offset,
893 		unsigned int chunk_sectors)
894 {
895 	if (unlikely(!is_power_of_2(chunk_sectors)))
896 		return chunk_sectors - sector_div(offset, chunk_sectors);
897 	return chunk_sectors - (offset & (chunk_sectors - 1));
898 }
899 
900 /*
901  * Access functions for manipulating queue properties
902  */
903 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
904 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
905 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
906 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
907 extern void blk_queue_max_discard_segments(struct request_queue *,
908 		unsigned short);
909 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
910 		unsigned int max_sectors);
911 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
912 extern void blk_queue_max_discard_sectors(struct request_queue *q,
913 		unsigned int max_discard_sectors);
914 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
915 		unsigned int max_write_same_sectors);
916 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
917 extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
918 		unsigned int max_zone_append_sectors);
919 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
920 void blk_queue_zone_write_granularity(struct request_queue *q,
921 				      unsigned int size);
922 extern void blk_queue_alignment_offset(struct request_queue *q,
923 				       unsigned int alignment);
924 void disk_update_readahead(struct gendisk *disk);
925 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
926 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
927 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
928 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
929 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
930 extern void blk_set_stacking_limits(struct queue_limits *lim);
931 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
932 			    sector_t offset);
933 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
934 			      sector_t offset);
935 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
936 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
937 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
938 extern void blk_queue_dma_alignment(struct request_queue *, int);
939 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
940 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
941 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
942 
943 struct blk_independent_access_ranges *
944 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
945 void disk_set_independent_access_ranges(struct gendisk *disk,
946 				struct blk_independent_access_ranges *iars);
947 
948 /*
949  * Elevator features for blk_queue_required_elevator_features:
950  */
951 /* Supports zoned block devices sequential write constraint */
952 #define ELEVATOR_F_ZBD_SEQ_WRITE	(1U << 0)
953 
954 extern void blk_queue_required_elevator_features(struct request_queue *q,
955 						 unsigned int features);
956 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
957 					      struct device *dev);
958 
959 bool __must_check blk_get_queue(struct request_queue *);
960 extern void blk_put_queue(struct request_queue *);
961 
962 void blk_mark_disk_dead(struct gendisk *disk);
963 
964 #ifdef CONFIG_BLOCK
965 /*
966  * blk_plug permits building a queue of related requests by holding the I/O
967  * fragments for a short period. This allows merging of sequential requests
968  * into single larger request. As the requests are moved from a per-task list to
969  * the device's request_queue in a batch, this results in improved scalability
970  * as the lock contention for request_queue lock is reduced.
971  *
972  * It is ok not to disable preemption when adding the request to the plug list
973  * or when attempting a merge. For details, please see schedule() where
974  * blk_flush_plug() is called.
975  */
976 struct blk_plug {
977 	struct request *mq_list; /* blk-mq requests */
978 
979 	/* if ios_left is > 1, we can batch tag/rq allocations */
980 	struct request *cached_rq;
981 	unsigned short nr_ios;
982 
983 	unsigned short rq_count;
984 
985 	bool multiple_queues;
986 	bool has_elevator;
987 
988 	struct list_head cb_list; /* md requires an unplug callback */
989 };
990 
991 struct blk_plug_cb;
992 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
993 struct blk_plug_cb {
994 	struct list_head list;
995 	blk_plug_cb_fn callback;
996 	void *data;
997 };
998 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
999 					     void *data, int size);
1000 extern void blk_start_plug(struct blk_plug *);
1001 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
1002 extern void blk_finish_plug(struct blk_plug *);
1003 
1004 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
blk_flush_plug(struct blk_plug * plug,bool async)1005 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1006 {
1007 	if (plug)
1008 		__blk_flush_plug(plug, async);
1009 }
1010 
1011 int blkdev_issue_flush(struct block_device *bdev);
1012 long nr_blockdev_pages(void);
1013 #else /* CONFIG_BLOCK */
1014 struct blk_plug {
1015 };
1016 
blk_start_plug_nr_ios(struct blk_plug * plug,unsigned short nr_ios)1017 static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
1018 					 unsigned short nr_ios)
1019 {
1020 }
1021 
blk_start_plug(struct blk_plug * plug)1022 static inline void blk_start_plug(struct blk_plug *plug)
1023 {
1024 }
1025 
blk_finish_plug(struct blk_plug * plug)1026 static inline void blk_finish_plug(struct blk_plug *plug)
1027 {
1028 }
1029 
blk_flush_plug(struct blk_plug * plug,bool async)1030 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1031 {
1032 }
1033 
blkdev_issue_flush(struct block_device * bdev)1034 static inline int blkdev_issue_flush(struct block_device *bdev)
1035 {
1036 	return 0;
1037 }
1038 
nr_blockdev_pages(void)1039 static inline long nr_blockdev_pages(void)
1040 {
1041 	return 0;
1042 }
1043 #endif /* CONFIG_BLOCK */
1044 
1045 extern void blk_io_schedule(void);
1046 
1047 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1048 		sector_t nr_sects, gfp_t gfp_mask);
1049 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1050 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
1051 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
1052 		sector_t nr_sects, gfp_t gfp);
1053 
1054 #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
1055 #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
1056 
1057 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1058 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1059 		unsigned flags);
1060 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1061 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1062 
sb_issue_discard(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask,unsigned long flags)1063 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1064 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1065 {
1066 	return blkdev_issue_discard(sb->s_bdev,
1067 				    block << (sb->s_blocksize_bits -
1068 					      SECTOR_SHIFT),
1069 				    nr_blocks << (sb->s_blocksize_bits -
1070 						  SECTOR_SHIFT),
1071 				    gfp_mask);
1072 }
sb_issue_zeroout(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask)1073 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1074 		sector_t nr_blocks, gfp_t gfp_mask)
1075 {
1076 	return blkdev_issue_zeroout(sb->s_bdev,
1077 				    block << (sb->s_blocksize_bits -
1078 					      SECTOR_SHIFT),
1079 				    nr_blocks << (sb->s_blocksize_bits -
1080 						  SECTOR_SHIFT),
1081 				    gfp_mask, 0);
1082 }
1083 
bdev_is_partition(struct block_device * bdev)1084 static inline bool bdev_is_partition(struct block_device *bdev)
1085 {
1086 	return bdev->bd_partno;
1087 }
1088 
1089 enum blk_default_limits {
1090 	BLK_MAX_SEGMENTS	= 128,
1091 	BLK_SAFE_MAX_SECTORS	= 255,
1092 	BLK_MAX_SEGMENT_SIZE	= 65536,
1093 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
1094 };
1095 
1096 #define BLK_DEF_MAX_SECTORS 2560u
1097 
queue_segment_boundary(const struct request_queue * q)1098 static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1099 {
1100 	return q->limits.seg_boundary_mask;
1101 }
1102 
queue_virt_boundary(const struct request_queue * q)1103 static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1104 {
1105 	return q->limits.virt_boundary_mask;
1106 }
1107 
queue_max_sectors(const struct request_queue * q)1108 static inline unsigned int queue_max_sectors(const struct request_queue *q)
1109 {
1110 	return q->limits.max_sectors;
1111 }
1112 
queue_max_bytes(struct request_queue * q)1113 static inline unsigned int queue_max_bytes(struct request_queue *q)
1114 {
1115 	return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
1116 }
1117 
queue_max_hw_sectors(const struct request_queue * q)1118 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1119 {
1120 	return q->limits.max_hw_sectors;
1121 }
1122 
queue_max_segments(const struct request_queue * q)1123 static inline unsigned short queue_max_segments(const struct request_queue *q)
1124 {
1125 	return q->limits.max_segments;
1126 }
1127 
queue_max_discard_segments(const struct request_queue * q)1128 static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1129 {
1130 	return q->limits.max_discard_segments;
1131 }
1132 
queue_max_segment_size(const struct request_queue * q)1133 static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1134 {
1135 	return q->limits.max_segment_size;
1136 }
1137 
queue_max_zone_append_sectors(const struct request_queue * q)1138 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
1139 {
1140 
1141 	const struct queue_limits *l = &q->limits;
1142 
1143 	return min(l->max_zone_append_sectors, l->max_sectors);
1144 }
1145 
1146 static inline unsigned int
bdev_max_zone_append_sectors(struct block_device * bdev)1147 bdev_max_zone_append_sectors(struct block_device *bdev)
1148 {
1149 	return queue_max_zone_append_sectors(bdev_get_queue(bdev));
1150 }
1151 
bdev_max_segments(struct block_device * bdev)1152 static inline unsigned int bdev_max_segments(struct block_device *bdev)
1153 {
1154 	return queue_max_segments(bdev_get_queue(bdev));
1155 }
1156 
queue_logical_block_size(const struct request_queue * q)1157 static inline unsigned queue_logical_block_size(const struct request_queue *q)
1158 {
1159 	int retval = 512;
1160 
1161 	if (q && q->limits.logical_block_size)
1162 		retval = q->limits.logical_block_size;
1163 
1164 	return retval;
1165 }
1166 
bdev_logical_block_size(struct block_device * bdev)1167 static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1168 {
1169 	return queue_logical_block_size(bdev_get_queue(bdev));
1170 }
1171 
queue_physical_block_size(const struct request_queue * q)1172 static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1173 {
1174 	return q->limits.physical_block_size;
1175 }
1176 
bdev_physical_block_size(struct block_device * bdev)1177 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1178 {
1179 	return queue_physical_block_size(bdev_get_queue(bdev));
1180 }
1181 
queue_io_min(const struct request_queue * q)1182 static inline unsigned int queue_io_min(const struct request_queue *q)
1183 {
1184 	return q->limits.io_min;
1185 }
1186 
bdev_io_min(struct block_device * bdev)1187 static inline int bdev_io_min(struct block_device *bdev)
1188 {
1189 	return queue_io_min(bdev_get_queue(bdev));
1190 }
1191 
queue_io_opt(const struct request_queue * q)1192 static inline unsigned int queue_io_opt(const struct request_queue *q)
1193 {
1194 	return q->limits.io_opt;
1195 }
1196 
bdev_io_opt(struct block_device * bdev)1197 static inline int bdev_io_opt(struct block_device *bdev)
1198 {
1199 	return queue_io_opt(bdev_get_queue(bdev));
1200 }
1201 
1202 static inline unsigned int
queue_zone_write_granularity(const struct request_queue * q)1203 queue_zone_write_granularity(const struct request_queue *q)
1204 {
1205 	return q->limits.zone_write_granularity;
1206 }
1207 
1208 static inline unsigned int
bdev_zone_write_granularity(struct block_device * bdev)1209 bdev_zone_write_granularity(struct block_device *bdev)
1210 {
1211 	return queue_zone_write_granularity(bdev_get_queue(bdev));
1212 }
1213 
1214 int bdev_alignment_offset(struct block_device *bdev);
1215 unsigned int bdev_discard_alignment(struct block_device *bdev);
1216 
bdev_max_discard_sectors(struct block_device * bdev)1217 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1218 {
1219 	return bdev_get_queue(bdev)->limits.max_discard_sectors;
1220 }
1221 
bdev_discard_granularity(struct block_device * bdev)1222 static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
1223 {
1224 	return bdev_get_queue(bdev)->limits.discard_granularity;
1225 }
1226 
1227 static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device * bdev)1228 bdev_max_secure_erase_sectors(struct block_device *bdev)
1229 {
1230 	return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
1231 }
1232 
bdev_write_zeroes_sectors(struct block_device * bdev)1233 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1234 {
1235 	struct request_queue *q = bdev_get_queue(bdev);
1236 
1237 	if (q)
1238 		return q->limits.max_write_zeroes_sectors;
1239 
1240 	return 0;
1241 }
1242 
bdev_nonrot(struct block_device * bdev)1243 static inline bool bdev_nonrot(struct block_device *bdev)
1244 {
1245 	return blk_queue_nonrot(bdev_get_queue(bdev));
1246 }
1247 
bdev_synchronous(struct block_device * bdev)1248 static inline bool bdev_synchronous(struct block_device *bdev)
1249 {
1250 	return test_bit(QUEUE_FLAG_SYNCHRONOUS,
1251 			&bdev_get_queue(bdev)->queue_flags);
1252 }
1253 
bdev_stable_writes(struct block_device * bdev)1254 static inline bool bdev_stable_writes(struct block_device *bdev)
1255 {
1256 	return test_bit(QUEUE_FLAG_STABLE_WRITES,
1257 			&bdev_get_queue(bdev)->queue_flags);
1258 }
1259 
bdev_write_cache(struct block_device * bdev)1260 static inline bool bdev_write_cache(struct block_device *bdev)
1261 {
1262 	return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
1263 }
1264 
bdev_fua(struct block_device * bdev)1265 static inline bool bdev_fua(struct block_device *bdev)
1266 {
1267 	return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
1268 }
1269 
bdev_nowait(struct block_device * bdev)1270 static inline bool bdev_nowait(struct block_device *bdev)
1271 {
1272 	return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
1273 }
1274 
bdev_zoned_model(struct block_device * bdev)1275 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1276 {
1277 	return blk_queue_zoned_model(bdev_get_queue(bdev));
1278 }
1279 
bdev_is_zoned(struct block_device * bdev)1280 static inline bool bdev_is_zoned(struct block_device *bdev)
1281 {
1282 	return blk_queue_is_zoned(bdev_get_queue(bdev));
1283 }
1284 
bdev_zone_no(struct block_device * bdev,sector_t sec)1285 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
1286 {
1287 	return disk_zone_no(bdev->bd_disk, sec);
1288 }
1289 
1290 /* Whether write serialization is required for @op on zoned devices. */
op_needs_zoned_write_locking(enum req_op op)1291 static inline bool op_needs_zoned_write_locking(enum req_op op)
1292 {
1293 	return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
1294 }
1295 
bdev_op_is_zoned_write(struct block_device * bdev,enum req_op op)1296 static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
1297 					  enum req_op op)
1298 {
1299 	return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op);
1300 }
1301 
bdev_zone_sectors(struct block_device * bdev)1302 static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1303 {
1304 	struct request_queue *q = bdev_get_queue(bdev);
1305 
1306 	if (!blk_queue_is_zoned(q))
1307 		return 0;
1308 	return q->limits.chunk_sectors;
1309 }
1310 
bdev_offset_from_zone_start(struct block_device * bdev,sector_t sector)1311 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
1312 						   sector_t sector)
1313 {
1314 	return sector & (bdev_zone_sectors(bdev) - 1);
1315 }
1316 
bdev_is_zone_start(struct block_device * bdev,sector_t sector)1317 static inline bool bdev_is_zone_start(struct block_device *bdev,
1318 				      sector_t sector)
1319 {
1320 	return bdev_offset_from_zone_start(bdev, sector) == 0;
1321 }
1322 
queue_dma_alignment(const struct request_queue * q)1323 static inline int queue_dma_alignment(const struct request_queue *q)
1324 {
1325 	return q ? q->limits.dma_alignment : 511;
1326 }
1327 
bdev_dma_alignment(struct block_device * bdev)1328 static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
1329 {
1330 	return queue_dma_alignment(bdev_get_queue(bdev));
1331 }
1332 
bdev_iter_is_aligned(struct block_device * bdev,struct iov_iter * iter)1333 static inline bool bdev_iter_is_aligned(struct block_device *bdev,
1334 					struct iov_iter *iter)
1335 {
1336 	return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
1337 				   bdev_logical_block_size(bdev) - 1);
1338 }
1339 
blk_rq_aligned(struct request_queue * q,unsigned long addr,unsigned int len)1340 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1341 				 unsigned int len)
1342 {
1343 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1344 	return !(addr & alignment) && !(len & alignment);
1345 }
1346 
1347 /* assumes size > 256 */
blksize_bits(unsigned int size)1348 static inline unsigned int blksize_bits(unsigned int size)
1349 {
1350 	return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
1351 }
1352 
block_size(struct block_device * bdev)1353 static inline unsigned int block_size(struct block_device *bdev)
1354 {
1355 	return 1 << bdev->bd_inode->i_blkbits;
1356 }
1357 
1358 int kblockd_schedule_work(struct work_struct *work);
1359 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1360 
1361 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1362 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1363 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1364 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
1365 
1366 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1367 
1368 bool blk_crypto_register(struct blk_crypto_profile *profile,
1369 			 struct request_queue *q);
1370 
1371 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1372 
blk_crypto_register(struct blk_crypto_profile * profile,struct request_queue * q)1373 static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
1374 				       struct request_queue *q)
1375 {
1376 	return true;
1377 }
1378 
1379 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1380 
1381 enum blk_unique_id {
1382 	/* these match the Designator Types specified in SPC */
1383 	BLK_UID_T10	= 1,
1384 	BLK_UID_EUI64	= 2,
1385 	BLK_UID_NAA	= 3,
1386 };
1387 
1388 struct block_device_operations {
1389 	void (*submit_bio)(struct bio *bio);
1390 	int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
1391 			unsigned int flags);
1392 	int (*open)(struct gendisk *disk, blk_mode_t mode);
1393 	void (*release)(struct gendisk *disk);
1394 	int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
1395 			unsigned cmd, unsigned long arg);
1396 	int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
1397 			unsigned cmd, unsigned long arg);
1398 	unsigned int (*check_events) (struct gendisk *disk,
1399 				      unsigned int clearing);
1400 	void (*unlock_native_capacity) (struct gendisk *);
1401 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1402 	int (*set_read_only)(struct block_device *bdev, bool ro);
1403 	void (*free_disk)(struct gendisk *disk);
1404 	/* this callback is with swap_lock and sometimes page table lock held */
1405 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1406 	int (*report_zones)(struct gendisk *, sector_t sector,
1407 			unsigned int nr_zones, report_zones_cb cb, void *data);
1408 	char *(*devnode)(struct gendisk *disk, umode_t *mode);
1409 	/* returns the length of the identifier or a negative errno: */
1410 	int (*get_unique_id)(struct gendisk *disk, u8 id[16],
1411 			enum blk_unique_id id_type);
1412 	struct module *owner;
1413 	const struct pr_ops *pr_ops;
1414 
1415 	/*
1416 	 * Special callback for probing GPT entry at a given sector.
1417 	 * Needed by Android devices, used by GPT scanner and MMC blk
1418 	 * driver.
1419 	 */
1420 	int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
1421 };
1422 
1423 #ifdef CONFIG_COMPAT
1424 extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
1425 				      unsigned int, unsigned long);
1426 #else
1427 #define blkdev_compat_ptr_ioctl NULL
1428 #endif
1429 
blk_wake_io_task(struct task_struct * waiter)1430 static inline void blk_wake_io_task(struct task_struct *waiter)
1431 {
1432 	/*
1433 	 * If we're polling, the task itself is doing the completions. For
1434 	 * that case, we don't need to signal a wakeup, it's enough to just
1435 	 * mark us as RUNNING.
1436 	 */
1437 	if (waiter == current)
1438 		__set_current_state(TASK_RUNNING);
1439 	else
1440 		wake_up_process(waiter);
1441 }
1442 
1443 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
1444 				 unsigned long start_time);
1445 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1446 		      unsigned int sectors, unsigned long start_time);
1447 
1448 unsigned long bio_start_io_acct(struct bio *bio);
1449 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1450 		struct block_device *orig_bdev);
1451 
1452 /**
1453  * bio_end_io_acct - end I/O accounting for bio based drivers
1454  * @bio:	bio to end account for
1455  * @start_time:	start time returned by bio_start_io_acct()
1456  */
bio_end_io_acct(struct bio * bio,unsigned long start_time)1457 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1458 {
1459 	return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1460 }
1461 
1462 int bdev_read_only(struct block_device *bdev);
1463 int set_blocksize(struct block_device *bdev, int size);
1464 
1465 int lookup_bdev(const char *pathname, dev_t *dev);
1466 
1467 void blkdev_show(struct seq_file *seqf, off_t offset);
1468 
1469 #define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
1470 #define BDEVT_SIZE	10	/* Largest string for MAJ:MIN for blkdev */
1471 #ifdef CONFIG_BLOCK
1472 #define BLKDEV_MAJOR_MAX	512
1473 #else
1474 #define BLKDEV_MAJOR_MAX	0
1475 #endif
1476 
1477 struct blk_holder_ops {
1478 	void (*mark_dead)(struct block_device *bdev, bool surprise);
1479 
1480 	/*
1481 	 * Sync the file system mounted on the block device.
1482 	 */
1483 	void (*sync)(struct block_device *bdev);
1484 };
1485 
1486 extern const struct blk_holder_ops fs_holder_ops;
1487 
1488 /*
1489  * Return the correct open flags for blkdev_get_by_* for super block flags
1490  * as stored in sb->s_flags.
1491  */
1492 #define sb_open_mode(flags) \
1493 	(BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
1494 
1495 struct bdev_handle {
1496 	struct block_device *bdev;
1497 	void *holder;
1498 };
1499 
1500 struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1501 		const struct blk_holder_ops *hops);
1502 struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
1503 		void *holder, const struct blk_holder_ops *hops);
1504 struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1505 		const struct blk_holder_ops *hops);
1506 struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
1507 		void *holder, const struct blk_holder_ops *hops);
1508 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
1509 		const struct blk_holder_ops *hops);
1510 void bd_abort_claiming(struct block_device *bdev, void *holder);
1511 void blkdev_put(struct block_device *bdev, void *holder);
1512 void bdev_release(struct bdev_handle *handle);
1513 
1514 /* just for blk-cgroup, don't use elsewhere */
1515 struct block_device *blkdev_get_no_open(dev_t dev);
1516 void blkdev_put_no_open(struct block_device *bdev);
1517 
1518 struct block_device *I_BDEV(struct inode *inode);
1519 
1520 #ifdef CONFIG_BLOCK
1521 void invalidate_bdev(struct block_device *bdev);
1522 int sync_blockdev(struct block_device *bdev);
1523 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
1524 int sync_blockdev_nowait(struct block_device *bdev);
1525 void sync_bdevs(bool wait);
1526 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
1527 void printk_all_partitions(void);
1528 int __init early_lookup_bdev(const char *pathname, dev_t *dev);
1529 #else
invalidate_bdev(struct block_device * bdev)1530 static inline void invalidate_bdev(struct block_device *bdev)
1531 {
1532 }
sync_blockdev(struct block_device * bdev)1533 static inline int sync_blockdev(struct block_device *bdev)
1534 {
1535 	return 0;
1536 }
sync_blockdev_nowait(struct block_device * bdev)1537 static inline int sync_blockdev_nowait(struct block_device *bdev)
1538 {
1539 	return 0;
1540 }
sync_bdevs(bool wait)1541 static inline void sync_bdevs(bool wait)
1542 {
1543 }
bdev_statx_dioalign(struct inode * inode,struct kstat * stat)1544 static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
1545 {
1546 }
printk_all_partitions(void)1547 static inline void printk_all_partitions(void)
1548 {
1549 }
early_lookup_bdev(const char * pathname,dev_t * dev)1550 static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
1551 {
1552 	return -EINVAL;
1553 }
1554 #endif /* CONFIG_BLOCK */
1555 
1556 int freeze_bdev(struct block_device *bdev);
1557 int thaw_bdev(struct block_device *bdev);
1558 
1559 struct io_comp_batch {
1560 	struct request *req_list;
1561 	bool need_ts;
1562 	void (*complete)(struct io_comp_batch *);
1563 };
1564 
1565 #define DEFINE_IO_COMP_BATCH(name)	struct io_comp_batch name = { }
1566 
1567 #endif /* _LINUX_BLKDEV_H */
1568