1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Portions Copyright (C) 1992 Drew Eckhardt 4 */ 5 #ifndef _LINUX_BLKDEV_H 6 #define _LINUX_BLKDEV_H 7 8 #include <linux/types.h> 9 #include <linux/blk_types.h> 10 #include <linux/device.h> 11 #include <linux/list.h> 12 #include <linux/llist.h> 13 #include <linux/minmax.h> 14 #include <linux/timer.h> 15 #include <linux/workqueue.h> 16 #include <linux/wait.h> 17 #include <linux/bio.h> 18 #include <linux/gfp.h> 19 #include <linux/kdev_t.h> 20 #include <linux/rcupdate.h> 21 #include <linux/percpu-refcount.h> 22 #include <linux/blkzoned.h> 23 #include <linux/sched.h> 24 #include <linux/sbitmap.h> 25 #include <linux/uuid.h> 26 #include <linux/xarray.h> 27 28 struct module; 29 struct request_queue; 30 struct elevator_queue; 31 struct blk_trace; 32 struct request; 33 struct sg_io_hdr; 34 struct blkcg_gq; 35 struct blk_flush_queue; 36 struct kiocb; 37 struct pr_ops; 38 struct rq_qos; 39 struct blk_queue_stats; 40 struct blk_stat_callback; 41 struct blk_crypto_profile; 42 43 extern const struct device_type disk_type; 44 extern struct device_type part_type; 45 extern struct class block_class; 46 47 /* Must be consistent with blk_mq_poll_stats_bkt() */ 48 #define BLK_MQ_POLL_STATS_BKTS 16 49 50 /* Doing classic polling */ 51 #define BLK_MQ_POLL_CLASSIC -1 52 53 /* 54 * Maximum number of blkcg policies allowed to be registered concurrently. 55 * Defined here to simplify include dependency. 56 */ 57 #define BLKCG_MAX_POLS 6 58 59 #define DISK_MAX_PARTS 256 60 #define DISK_NAME_LEN 32 61 62 #define PARTITION_META_INFO_VOLNAMELTH 64 63 /* 64 * Enough for the string representation of any kind of UUID plus NULL. 65 * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 66 */ 67 #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 68 69 struct partition_meta_info { 70 char uuid[PARTITION_META_INFO_UUIDLTH]; 71 u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 72 }; 73 74 /** 75 * DOC: genhd capability flags 76 * 77 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 78 * removable media. When set, the device remains present even when media is not 79 * inserted. Shall not be set for devices which are removed entirely when the 80 * media is removed. 81 * 82 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 83 * doesn't appear in sysfs, and can't be opened from userspace or using 84 * blkdev_get*. Used for the underlying components of multipath devices. 85 * 86 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 87 * scan for partitions from add_disk, and users can't add partitions manually. 88 * 89 */ 90 enum { 91 GENHD_FL_REMOVABLE = 1 << 0, 92 GENHD_FL_HIDDEN = 1 << 1, 93 GENHD_FL_NO_PART = 1 << 2, 94 }; 95 96 enum { 97 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 98 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 99 }; 100 101 enum { 102 /* Poll even if events_poll_msecs is unset */ 103 DISK_EVENT_FLAG_POLL = 1 << 0, 104 /* Forward events to udev */ 105 DISK_EVENT_FLAG_UEVENT = 1 << 1, 106 /* Block event polling when open for exclusive write */ 107 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 108 }; 109 110 struct disk_events; 111 struct badblocks; 112 113 struct blk_integrity { 114 const struct blk_integrity_profile *profile; 115 unsigned char flags; 116 unsigned char tuple_size; 117 unsigned char interval_exp; 118 unsigned char tag_size; 119 }; 120 121 struct gendisk { 122 /* 123 * major/first_minor/minors should not be set by any new driver, the 124 * block core will take care of allocating them automatically. 125 */ 126 int major; 127 int first_minor; 128 int minors; 129 130 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 131 132 unsigned short events; /* supported events */ 133 unsigned short event_flags; /* flags related to event processing */ 134 135 struct xarray part_tbl; 136 struct block_device *part0; 137 138 const struct block_device_operations *fops; 139 struct request_queue *queue; 140 void *private_data; 141 142 struct bio_set bio_split; 143 144 int flags; 145 unsigned long state; 146 #define GD_NEED_PART_SCAN 0 147 #define GD_READ_ONLY 1 148 #define GD_DEAD 2 149 #define GD_NATIVE_CAPACITY 3 150 #define GD_ADDED 4 151 #define GD_SUPPRESS_PART_SCAN 5 152 #define GD_OWNS_QUEUE 6 153 154 struct mutex open_mutex; /* open/close mutex */ 155 unsigned open_partitions; /* number of open partitions */ 156 157 struct backing_dev_info *bdi; 158 struct kobject queue_kobj; /* the queue/ directory */ 159 struct kobject *slave_dir; 160 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 161 struct list_head slave_bdevs; 162 #endif 163 struct timer_rand_state *random; 164 atomic_t sync_io; /* RAID */ 165 struct disk_events *ev; 166 #ifdef CONFIG_BLK_DEV_INTEGRITY 167 struct kobject integrity_kobj; 168 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 169 170 #ifdef CONFIG_BLK_DEV_ZONED 171 /* 172 * Zoned block device information for request dispatch control. 173 * nr_zones is the total number of zones of the device. This is always 174 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones 175 * bits which indicates if a zone is conventional (bit set) or 176 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones 177 * bits which indicates if a zone is write locked, that is, if a write 178 * request targeting the zone was dispatched. 179 * 180 * Reads of this information must be protected with blk_queue_enter() / 181 * blk_queue_exit(). Modifying this information is only allowed while 182 * no requests are being processed. See also blk_mq_freeze_queue() and 183 * blk_mq_unfreeze_queue(). 184 */ 185 unsigned int nr_zones; 186 unsigned int max_open_zones; 187 unsigned int max_active_zones; 188 unsigned long *conv_zones_bitmap; 189 unsigned long *seq_zones_wlock; 190 #endif /* CONFIG_BLK_DEV_ZONED */ 191 192 #if IS_ENABLED(CONFIG_CDROM) 193 struct cdrom_device_info *cdi; 194 #endif 195 int node_id; 196 struct badblocks *bb; 197 struct lockdep_map lockdep_map; 198 u64 diskseq; 199 200 /* 201 * Independent sector access ranges. This is always NULL for 202 * devices that do not have multiple independent access ranges. 203 */ 204 struct blk_independent_access_ranges *ia_ranges; 205 }; 206 207 static inline bool disk_live(struct gendisk *disk) 208 { 209 return !inode_unhashed(disk->part0->bd_inode); 210 } 211 212 /** 213 * disk_openers - returns how many openers are there for a disk 214 * @disk: disk to check 215 * 216 * This returns the number of openers for a disk. Note that this value is only 217 * stable if disk->open_mutex is held. 218 * 219 * Note: Due to a quirk in the block layer open code, each open partition is 220 * only counted once even if there are multiple openers. 221 */ 222 static inline unsigned int disk_openers(struct gendisk *disk) 223 { 224 return atomic_read(&disk->part0->bd_openers); 225 } 226 227 /* 228 * The gendisk is refcounted by the part0 block_device, and the bd_device 229 * therein is also used for device model presentation in sysfs. 230 */ 231 #define dev_to_disk(device) \ 232 (dev_to_bdev(device)->bd_disk) 233 #define disk_to_dev(disk) \ 234 (&((disk)->part0->bd_device)) 235 236 #if IS_REACHABLE(CONFIG_CDROM) 237 #define disk_to_cdi(disk) ((disk)->cdi) 238 #else 239 #define disk_to_cdi(disk) NULL 240 #endif 241 242 static inline dev_t disk_devt(struct gendisk *disk) 243 { 244 return MKDEV(disk->major, disk->first_minor); 245 } 246 247 static inline int blk_validate_block_size(unsigned long bsize) 248 { 249 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 250 return -EINVAL; 251 252 return 0; 253 } 254 255 static inline bool blk_op_is_passthrough(blk_opf_t op) 256 { 257 op &= REQ_OP_MASK; 258 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 259 } 260 261 /* 262 * Zoned block device models (zoned limit). 263 * 264 * Note: This needs to be ordered from the least to the most severe 265 * restrictions for the inheritance in blk_stack_limits() to work. 266 */ 267 enum blk_zoned_model { 268 BLK_ZONED_NONE = 0, /* Regular block device */ 269 BLK_ZONED_HA, /* Host-aware zoned block device */ 270 BLK_ZONED_HM, /* Host-managed zoned block device */ 271 }; 272 273 /* 274 * BLK_BOUNCE_NONE: never bounce (default) 275 * BLK_BOUNCE_HIGH: bounce all highmem pages 276 */ 277 enum blk_bounce { 278 BLK_BOUNCE_NONE, 279 BLK_BOUNCE_HIGH, 280 }; 281 282 struct queue_limits { 283 enum blk_bounce bounce; 284 unsigned long seg_boundary_mask; 285 unsigned long virt_boundary_mask; 286 287 unsigned int max_hw_sectors; 288 unsigned int max_dev_sectors; 289 unsigned int chunk_sectors; 290 unsigned int max_sectors; 291 unsigned int max_user_sectors; 292 unsigned int max_segment_size; 293 unsigned int physical_block_size; 294 unsigned int logical_block_size; 295 unsigned int alignment_offset; 296 unsigned int io_min; 297 unsigned int io_opt; 298 unsigned int max_discard_sectors; 299 unsigned int max_hw_discard_sectors; 300 unsigned int max_secure_erase_sectors; 301 unsigned int max_write_zeroes_sectors; 302 unsigned int max_zone_append_sectors; 303 unsigned int discard_granularity; 304 unsigned int discard_alignment; 305 unsigned int zone_write_granularity; 306 307 unsigned short max_segments; 308 unsigned short max_integrity_segments; 309 unsigned short max_discard_segments; 310 311 unsigned char misaligned; 312 unsigned char discard_misaligned; 313 unsigned char raid_partial_stripes_expensive; 314 enum blk_zoned_model zoned; 315 316 /* 317 * Drivers that set dma_alignment to less than 511 must be prepared to 318 * handle individual bvec's that are not a multiple of a SECTOR_SIZE 319 * due to possible offsets. 320 */ 321 unsigned int dma_alignment; 322 }; 323 324 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 325 void *data); 326 327 void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model); 328 329 #ifdef CONFIG_BLK_DEV_ZONED 330 331 #define BLK_ALL_ZONES ((unsigned int)-1) 332 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 333 unsigned int nr_zones, report_zones_cb cb, void *data); 334 unsigned int bdev_nr_zones(struct block_device *bdev); 335 extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 336 sector_t sectors, sector_t nr_sectors, 337 gfp_t gfp_mask); 338 int blk_revalidate_disk_zones(struct gendisk *disk, 339 void (*update_driver_data)(struct gendisk *disk)); 340 341 extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 342 unsigned int cmd, unsigned long arg); 343 extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, 344 unsigned int cmd, unsigned long arg); 345 346 #else /* CONFIG_BLK_DEV_ZONED */ 347 348 static inline unsigned int bdev_nr_zones(struct block_device *bdev) 349 { 350 return 0; 351 } 352 353 static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 354 fmode_t mode, unsigned int cmd, 355 unsigned long arg) 356 { 357 return -ENOTTY; 358 } 359 360 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, 361 fmode_t mode, unsigned int cmd, 362 unsigned long arg) 363 { 364 return -ENOTTY; 365 } 366 367 #endif /* CONFIG_BLK_DEV_ZONED */ 368 369 /* 370 * Independent access ranges: struct blk_independent_access_range describes 371 * a range of contiguous sectors that can be accessed using device command 372 * execution resources that are independent from the resources used for 373 * other access ranges. This is typically found with single-LUN multi-actuator 374 * HDDs where each access range is served by a different set of heads. 375 * The set of independent ranges supported by the device is defined using 376 * struct blk_independent_access_ranges. The independent ranges must not overlap 377 * and must include all sectors within the disk capacity (no sector holes 378 * allowed). 379 * For a device with multiple ranges, requests targeting sectors in different 380 * ranges can be executed in parallel. A request can straddle an access range 381 * boundary. 382 */ 383 struct blk_independent_access_range { 384 struct kobject kobj; 385 sector_t sector; 386 sector_t nr_sectors; 387 }; 388 389 struct blk_independent_access_ranges { 390 struct kobject kobj; 391 bool sysfs_registered; 392 unsigned int nr_ia_ranges; 393 struct blk_independent_access_range ia_range[]; 394 }; 395 396 struct request_queue { 397 struct request *last_merge; 398 struct elevator_queue *elevator; 399 400 struct percpu_ref q_usage_counter; 401 402 struct blk_queue_stats *stats; 403 struct rq_qos *rq_qos; 404 405 const struct blk_mq_ops *mq_ops; 406 407 /* sw queues */ 408 struct blk_mq_ctx __percpu *queue_ctx; 409 410 unsigned int queue_depth; 411 412 /* hw dispatch queues */ 413 struct xarray hctx_table; 414 unsigned int nr_hw_queues; 415 416 /* 417 * The queue owner gets to use this for whatever they like. 418 * ll_rw_blk doesn't touch it. 419 */ 420 void *queuedata; 421 422 /* 423 * various queue flags, see QUEUE_* below 424 */ 425 unsigned long queue_flags; 426 /* 427 * Number of contexts that have called blk_set_pm_only(). If this 428 * counter is above zero then only RQF_PM requests are processed. 429 */ 430 atomic_t pm_only; 431 432 /* 433 * ida allocated id for this queue. Used to index queues from 434 * ioctx. 435 */ 436 int id; 437 438 spinlock_t queue_lock; 439 440 struct gendisk *disk; 441 442 refcount_t refs; 443 444 /* 445 * mq queue kobject 446 */ 447 struct kobject *mq_kobj; 448 449 #ifdef CONFIG_BLK_DEV_INTEGRITY 450 struct blk_integrity integrity; 451 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 452 453 #ifdef CONFIG_PM 454 struct device *dev; 455 enum rpm_status rpm_status; 456 #endif 457 458 /* 459 * queue settings 460 */ 461 unsigned long nr_requests; /* Max # of requests */ 462 463 unsigned int dma_pad_mask; 464 465 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 466 struct blk_crypto_profile *crypto_profile; 467 struct kobject *crypto_kobject; 468 #endif 469 470 unsigned int rq_timeout; 471 int poll_nsec; 472 473 struct blk_stat_callback *poll_cb; 474 struct blk_rq_stat *poll_stat; 475 476 struct timer_list timeout; 477 struct work_struct timeout_work; 478 479 atomic_t nr_active_requests_shared_tags; 480 481 struct blk_mq_tags *sched_shared_tags; 482 483 struct list_head icq_list; 484 #ifdef CONFIG_BLK_CGROUP 485 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 486 struct blkcg_gq *root_blkg; 487 struct list_head blkg_list; 488 struct mutex blkcg_mutex; 489 #endif 490 491 struct queue_limits limits; 492 493 unsigned int required_elevator_features; 494 495 int node; 496 #ifdef CONFIG_BLK_DEV_IO_TRACE 497 struct blk_trace __rcu *blk_trace; 498 #endif 499 /* 500 * for flush operations 501 */ 502 struct blk_flush_queue *fq; 503 504 struct list_head requeue_list; 505 spinlock_t requeue_lock; 506 struct delayed_work requeue_work; 507 508 struct mutex sysfs_lock; 509 struct mutex sysfs_dir_lock; 510 511 /* 512 * for reusing dead hctx instance in case of updating 513 * nr_hw_queues 514 */ 515 struct list_head unused_hctx_list; 516 spinlock_t unused_hctx_lock; 517 518 int mq_freeze_depth; 519 520 #ifdef CONFIG_BLK_DEV_THROTTLING 521 /* Throttle data */ 522 struct throtl_data *td; 523 #endif 524 struct rcu_head rcu_head; 525 wait_queue_head_t mq_freeze_wq; 526 /* 527 * Protect concurrent access to q_usage_counter by 528 * percpu_ref_kill() and percpu_ref_reinit(). 529 */ 530 struct mutex mq_freeze_lock; 531 532 int quiesce_depth; 533 534 struct blk_mq_tag_set *tag_set; 535 struct list_head tag_set_list; 536 537 struct dentry *debugfs_dir; 538 struct dentry *sched_debugfs_dir; 539 struct dentry *rqos_debugfs_dir; 540 /* 541 * Serializes all debugfs metadata operations using the above dentries. 542 */ 543 struct mutex debugfs_mutex; 544 545 bool mq_sysfs_init_done; 546 }; 547 548 /* Keep blk_queue_flag_name[] in sync with the definitions below */ 549 #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 550 #define QUEUE_FLAG_DYING 1 /* queue being torn down */ 551 #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 552 #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 553 #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 554 #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 555 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 556 #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 557 #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 558 #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 559 #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ 560 #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 561 #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 562 #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ 563 #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 564 #define QUEUE_FLAG_WC 17 /* Write back caching */ 565 #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 566 #define QUEUE_FLAG_DAX 19 /* device supports DAX */ 567 #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 568 #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 569 #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 570 #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 571 #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 572 #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 573 #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ 574 #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ 575 #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ 576 #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ 577 578 #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ 579 (1UL << QUEUE_FLAG_SAME_COMP) | \ 580 (1UL << QUEUE_FLAG_NOWAIT)) 581 582 void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 583 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 584 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 585 586 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 587 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 588 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 589 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 590 #define blk_queue_noxmerges(q) \ 591 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 592 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 593 #define blk_queue_stable_writes(q) \ 594 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 595 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 596 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 597 #define blk_queue_zone_resetall(q) \ 598 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 599 #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 600 #define blk_queue_pci_p2pdma(q) \ 601 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 602 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 603 #define blk_queue_rq_alloc_time(q) \ 604 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 605 #else 606 #define blk_queue_rq_alloc_time(q) false 607 #endif 608 609 #define blk_noretry_request(rq) \ 610 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 611 REQ_FAILFAST_DRIVER)) 612 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 613 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 614 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 615 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 616 #define blk_queue_skip_tagset_quiesce(q) \ 617 test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags) 618 619 extern void blk_set_pm_only(struct request_queue *q); 620 extern void blk_clear_pm_only(struct request_queue *q); 621 622 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 623 624 #define dma_map_bvec(dev, bv, dir, attrs) \ 625 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 626 (dir), (attrs)) 627 628 static inline bool queue_is_mq(struct request_queue *q) 629 { 630 return q->mq_ops; 631 } 632 633 #ifdef CONFIG_PM 634 static inline enum rpm_status queue_rpm_status(struct request_queue *q) 635 { 636 return q->rpm_status; 637 } 638 #else 639 static inline enum rpm_status queue_rpm_status(struct request_queue *q) 640 { 641 return RPM_ACTIVE; 642 } 643 #endif 644 645 static inline enum blk_zoned_model 646 blk_queue_zoned_model(struct request_queue *q) 647 { 648 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 649 return q->limits.zoned; 650 return BLK_ZONED_NONE; 651 } 652 653 static inline bool blk_queue_is_zoned(struct request_queue *q) 654 { 655 switch (blk_queue_zoned_model(q)) { 656 case BLK_ZONED_HA: 657 case BLK_ZONED_HM: 658 return true; 659 default: 660 return false; 661 } 662 } 663 664 #ifdef CONFIG_BLK_DEV_ZONED 665 static inline unsigned int disk_nr_zones(struct gendisk *disk) 666 { 667 return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; 668 } 669 670 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 671 { 672 if (!blk_queue_is_zoned(disk->queue)) 673 return 0; 674 return sector >> ilog2(disk->queue->limits.chunk_sectors); 675 } 676 677 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 678 { 679 if (!blk_queue_is_zoned(disk->queue)) 680 return false; 681 if (!disk->conv_zones_bitmap) 682 return true; 683 return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); 684 } 685 686 static inline void disk_set_max_open_zones(struct gendisk *disk, 687 unsigned int max_open_zones) 688 { 689 disk->max_open_zones = max_open_zones; 690 } 691 692 static inline void disk_set_max_active_zones(struct gendisk *disk, 693 unsigned int max_active_zones) 694 { 695 disk->max_active_zones = max_active_zones; 696 } 697 698 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 699 { 700 return bdev->bd_disk->max_open_zones; 701 } 702 703 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 704 { 705 return bdev->bd_disk->max_active_zones; 706 } 707 708 #else /* CONFIG_BLK_DEV_ZONED */ 709 static inline unsigned int disk_nr_zones(struct gendisk *disk) 710 { 711 return 0; 712 } 713 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 714 { 715 return false; 716 } 717 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 718 { 719 return 0; 720 } 721 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 722 { 723 return 0; 724 } 725 726 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 727 { 728 return 0; 729 } 730 #endif /* CONFIG_BLK_DEV_ZONED */ 731 732 static inline unsigned int blk_queue_depth(struct request_queue *q) 733 { 734 if (q->queue_depth) 735 return q->queue_depth; 736 737 return q->nr_requests; 738 } 739 740 /* 741 * default timeout for SG_IO if none specified 742 */ 743 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 744 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 745 746 /* This should not be used directly - use rq_for_each_segment */ 747 #define for_each_bio(_bio) \ 748 for (; _bio; _bio = _bio->bi_next) 749 750 int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 751 const struct attribute_group **groups); 752 static inline int __must_check add_disk(struct gendisk *disk) 753 { 754 return device_add_disk(NULL, disk, NULL); 755 } 756 void del_gendisk(struct gendisk *gp); 757 void invalidate_disk(struct gendisk *disk); 758 void set_disk_ro(struct gendisk *disk, bool read_only); 759 void disk_uevent(struct gendisk *disk, enum kobject_action action); 760 761 static inline int get_disk_ro(struct gendisk *disk) 762 { 763 return disk->part0->bd_read_only || 764 test_bit(GD_READ_ONLY, &disk->state); 765 } 766 767 static inline int bdev_read_only(struct block_device *bdev) 768 { 769 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); 770 } 771 772 bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 773 bool disk_force_media_change(struct gendisk *disk, unsigned int events); 774 775 void add_disk_randomness(struct gendisk *disk) __latent_entropy; 776 void rand_initialize_disk(struct gendisk *disk); 777 778 static inline sector_t get_start_sect(struct block_device *bdev) 779 { 780 return bdev->bd_start_sect; 781 } 782 783 static inline sector_t bdev_nr_sectors(struct block_device *bdev) 784 { 785 return bdev->bd_nr_sectors; 786 } 787 788 static inline loff_t bdev_nr_bytes(struct block_device *bdev) 789 { 790 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 791 } 792 793 static inline sector_t get_capacity(struct gendisk *disk) 794 { 795 return bdev_nr_sectors(disk->part0); 796 } 797 798 static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 799 { 800 return bdev_nr_sectors(sb->s_bdev) >> 801 (sb->s_blocksize_bits - SECTOR_SHIFT); 802 } 803 804 int bdev_disk_changed(struct gendisk *disk, bool invalidate); 805 806 void put_disk(struct gendisk *disk); 807 struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); 808 809 /** 810 * blk_alloc_disk - allocate a gendisk structure 811 * @node_id: numa node to allocate on 812 * 813 * Allocate and pre-initialize a gendisk structure for use with BIO based 814 * drivers. 815 * 816 * Context: can sleep 817 */ 818 #define blk_alloc_disk(node_id) \ 819 ({ \ 820 static struct lock_class_key __key; \ 821 \ 822 __blk_alloc_disk(node_id, &__key); \ 823 }) 824 825 int __register_blkdev(unsigned int major, const char *name, 826 void (*probe)(dev_t devt)); 827 #define register_blkdev(major, name) \ 828 __register_blkdev(major, name, NULL) 829 void unregister_blkdev(unsigned int major, const char *name); 830 831 bool bdev_check_media_change(struct block_device *bdev); 832 int __invalidate_device(struct block_device *bdev, bool kill_dirty); 833 void set_capacity(struct gendisk *disk, sector_t size); 834 835 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 836 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 837 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 838 #else 839 static inline int bd_link_disk_holder(struct block_device *bdev, 840 struct gendisk *disk) 841 { 842 return 0; 843 } 844 static inline void bd_unlink_disk_holder(struct block_device *bdev, 845 struct gendisk *disk) 846 { 847 } 848 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 849 850 dev_t part_devt(struct gendisk *disk, u8 partno); 851 void inc_diskseq(struct gendisk *disk); 852 dev_t blk_lookup_devt(const char *name, int partno); 853 void blk_request_module(dev_t devt); 854 855 extern int blk_register_queue(struct gendisk *disk); 856 extern void blk_unregister_queue(struct gendisk *disk); 857 void submit_bio_noacct(struct bio *bio); 858 struct bio *bio_split_to_limits(struct bio *bio); 859 860 extern int blk_lld_busy(struct request_queue *q); 861 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 862 extern void blk_queue_exit(struct request_queue *q); 863 extern void blk_sync_queue(struct request_queue *q); 864 865 /* Helper to convert REQ_OP_XXX to its string format XXX */ 866 extern const char *blk_op_str(enum req_op op); 867 868 int blk_status_to_errno(blk_status_t status); 869 blk_status_t errno_to_blk_status(int errno); 870 871 /* only poll the hardware once, don't continue until a completion was found */ 872 #define BLK_POLL_ONESHOT (1 << 0) 873 /* do not sleep to wait for the expected completion time */ 874 #define BLK_POLL_NOSLEEP (1 << 1) 875 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 876 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 877 unsigned int flags); 878 879 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 880 { 881 return bdev->bd_queue; /* this is never NULL */ 882 } 883 884 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 885 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 886 887 static inline unsigned int bio_zone_no(struct bio *bio) 888 { 889 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 890 } 891 892 static inline unsigned int bio_zone_is_seq(struct bio *bio) 893 { 894 return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 895 } 896 897 /* 898 * Return how much of the chunk is left to be used for I/O at a given offset. 899 */ 900 static inline unsigned int blk_chunk_sectors_left(sector_t offset, 901 unsigned int chunk_sectors) 902 { 903 if (unlikely(!is_power_of_2(chunk_sectors))) 904 return chunk_sectors - sector_div(offset, chunk_sectors); 905 return chunk_sectors - (offset & (chunk_sectors - 1)); 906 } 907 908 /* 909 * Access functions for manipulating queue properties 910 */ 911 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); 912 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 913 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 914 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 915 extern void blk_queue_max_discard_segments(struct request_queue *, 916 unsigned short); 917 void blk_queue_max_secure_erase_sectors(struct request_queue *q, 918 unsigned int max_sectors); 919 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 920 extern void blk_queue_max_discard_sectors(struct request_queue *q, 921 unsigned int max_discard_sectors); 922 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 923 unsigned int max_write_same_sectors); 924 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 925 extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 926 unsigned int max_zone_append_sectors); 927 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 928 void blk_queue_zone_write_granularity(struct request_queue *q, 929 unsigned int size); 930 extern void blk_queue_alignment_offset(struct request_queue *q, 931 unsigned int alignment); 932 void disk_update_readahead(struct gendisk *disk); 933 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 934 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 935 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 936 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 937 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 938 extern void blk_set_stacking_limits(struct queue_limits *lim); 939 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 940 sector_t offset); 941 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 942 sector_t offset); 943 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 944 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 945 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 946 extern void blk_queue_dma_alignment(struct request_queue *, int); 947 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 948 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 949 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 950 951 struct blk_independent_access_ranges * 952 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 953 void disk_set_independent_access_ranges(struct gendisk *disk, 954 struct blk_independent_access_ranges *iars); 955 956 /* 957 * Elevator features for blk_queue_required_elevator_features: 958 */ 959 /* Supports zoned block devices sequential write constraint */ 960 #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) 961 962 extern void blk_queue_required_elevator_features(struct request_queue *q, 963 unsigned int features); 964 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 965 struct device *dev); 966 967 bool __must_check blk_get_queue(struct request_queue *); 968 extern void blk_put_queue(struct request_queue *); 969 970 void blk_mark_disk_dead(struct gendisk *disk); 971 972 #ifdef CONFIG_BLOCK 973 /* 974 * blk_plug permits building a queue of related requests by holding the I/O 975 * fragments for a short period. This allows merging of sequential requests 976 * into single larger request. As the requests are moved from a per-task list to 977 * the device's request_queue in a batch, this results in improved scalability 978 * as the lock contention for request_queue lock is reduced. 979 * 980 * It is ok not to disable preemption when adding the request to the plug list 981 * or when attempting a merge. For details, please see schedule() where 982 * blk_flush_plug() is called. 983 */ 984 struct blk_plug { 985 struct request *mq_list; /* blk-mq requests */ 986 987 /* if ios_left is > 1, we can batch tag/rq allocations */ 988 struct request *cached_rq; 989 unsigned short nr_ios; 990 991 unsigned short rq_count; 992 993 bool multiple_queues; 994 bool has_elevator; 995 bool nowait; 996 997 struct list_head cb_list; /* md requires an unplug callback */ 998 }; 999 1000 struct blk_plug_cb; 1001 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1002 struct blk_plug_cb { 1003 struct list_head list; 1004 blk_plug_cb_fn callback; 1005 void *data; 1006 }; 1007 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1008 void *data, int size); 1009 extern void blk_start_plug(struct blk_plug *); 1010 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 1011 extern void blk_finish_plug(struct blk_plug *); 1012 1013 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 1014 static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1015 { 1016 if (plug) 1017 __blk_flush_plug(plug, async); 1018 } 1019 1020 int blkdev_issue_flush(struct block_device *bdev); 1021 long nr_blockdev_pages(void); 1022 #else /* CONFIG_BLOCK */ 1023 struct blk_plug { 1024 }; 1025 1026 static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 1027 unsigned short nr_ios) 1028 { 1029 } 1030 1031 static inline void blk_start_plug(struct blk_plug *plug) 1032 { 1033 } 1034 1035 static inline void blk_finish_plug(struct blk_plug *plug) 1036 { 1037 } 1038 1039 static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1040 { 1041 } 1042 1043 static inline int blkdev_issue_flush(struct block_device *bdev) 1044 { 1045 return 0; 1046 } 1047 1048 static inline long nr_blockdev_pages(void) 1049 { 1050 return 0; 1051 } 1052 #endif /* CONFIG_BLOCK */ 1053 1054 extern void blk_io_schedule(void); 1055 1056 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1057 sector_t nr_sects, gfp_t gfp_mask); 1058 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1059 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1060 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1061 sector_t nr_sects, gfp_t gfp); 1062 1063 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1064 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1065 1066 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1067 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1068 unsigned flags); 1069 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1070 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1071 1072 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1073 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1074 { 1075 return blkdev_issue_discard(sb->s_bdev, 1076 block << (sb->s_blocksize_bits - 1077 SECTOR_SHIFT), 1078 nr_blocks << (sb->s_blocksize_bits - 1079 SECTOR_SHIFT), 1080 gfp_mask); 1081 } 1082 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1083 sector_t nr_blocks, gfp_t gfp_mask) 1084 { 1085 return blkdev_issue_zeroout(sb->s_bdev, 1086 block << (sb->s_blocksize_bits - 1087 SECTOR_SHIFT), 1088 nr_blocks << (sb->s_blocksize_bits - 1089 SECTOR_SHIFT), 1090 gfp_mask, 0); 1091 } 1092 1093 static inline bool bdev_is_partition(struct block_device *bdev) 1094 { 1095 return bdev->bd_partno; 1096 } 1097 1098 enum blk_default_limits { 1099 BLK_MAX_SEGMENTS = 128, 1100 BLK_SAFE_MAX_SECTORS = 255, 1101 BLK_MAX_SEGMENT_SIZE = 65536, 1102 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1103 }; 1104 1105 #define BLK_DEF_MAX_SECTORS 2560u 1106 1107 static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1108 { 1109 return q->limits.seg_boundary_mask; 1110 } 1111 1112 static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1113 { 1114 return q->limits.virt_boundary_mask; 1115 } 1116 1117 static inline unsigned int queue_max_sectors(const struct request_queue *q) 1118 { 1119 return q->limits.max_sectors; 1120 } 1121 1122 static inline unsigned int queue_max_bytes(struct request_queue *q) 1123 { 1124 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1125 } 1126 1127 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1128 { 1129 return q->limits.max_hw_sectors; 1130 } 1131 1132 static inline unsigned short queue_max_segments(const struct request_queue *q) 1133 { 1134 return q->limits.max_segments; 1135 } 1136 1137 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1138 { 1139 return q->limits.max_discard_segments; 1140 } 1141 1142 static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1143 { 1144 return q->limits.max_segment_size; 1145 } 1146 1147 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) 1148 { 1149 1150 const struct queue_limits *l = &q->limits; 1151 1152 return min(l->max_zone_append_sectors, l->max_sectors); 1153 } 1154 1155 static inline unsigned int 1156 bdev_max_zone_append_sectors(struct block_device *bdev) 1157 { 1158 return queue_max_zone_append_sectors(bdev_get_queue(bdev)); 1159 } 1160 1161 static inline unsigned int bdev_max_segments(struct block_device *bdev) 1162 { 1163 return queue_max_segments(bdev_get_queue(bdev)); 1164 } 1165 1166 static inline unsigned queue_logical_block_size(const struct request_queue *q) 1167 { 1168 int retval = 512; 1169 1170 if (q && q->limits.logical_block_size) 1171 retval = q->limits.logical_block_size; 1172 1173 return retval; 1174 } 1175 1176 static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1177 { 1178 return queue_logical_block_size(bdev_get_queue(bdev)); 1179 } 1180 1181 static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1182 { 1183 return q->limits.physical_block_size; 1184 } 1185 1186 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1187 { 1188 return queue_physical_block_size(bdev_get_queue(bdev)); 1189 } 1190 1191 static inline unsigned int queue_io_min(const struct request_queue *q) 1192 { 1193 return q->limits.io_min; 1194 } 1195 1196 static inline int bdev_io_min(struct block_device *bdev) 1197 { 1198 return queue_io_min(bdev_get_queue(bdev)); 1199 } 1200 1201 static inline unsigned int queue_io_opt(const struct request_queue *q) 1202 { 1203 return q->limits.io_opt; 1204 } 1205 1206 static inline int bdev_io_opt(struct block_device *bdev) 1207 { 1208 return queue_io_opt(bdev_get_queue(bdev)); 1209 } 1210 1211 static inline unsigned int 1212 queue_zone_write_granularity(const struct request_queue *q) 1213 { 1214 return q->limits.zone_write_granularity; 1215 } 1216 1217 static inline unsigned int 1218 bdev_zone_write_granularity(struct block_device *bdev) 1219 { 1220 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1221 } 1222 1223 int bdev_alignment_offset(struct block_device *bdev); 1224 unsigned int bdev_discard_alignment(struct block_device *bdev); 1225 1226 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1227 { 1228 return bdev_get_queue(bdev)->limits.max_discard_sectors; 1229 } 1230 1231 static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1232 { 1233 return bdev_get_queue(bdev)->limits.discard_granularity; 1234 } 1235 1236 static inline unsigned int 1237 bdev_max_secure_erase_sectors(struct block_device *bdev) 1238 { 1239 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 1240 } 1241 1242 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1243 { 1244 struct request_queue *q = bdev_get_queue(bdev); 1245 1246 if (q) 1247 return q->limits.max_write_zeroes_sectors; 1248 1249 return 0; 1250 } 1251 1252 static inline bool bdev_nonrot(struct block_device *bdev) 1253 { 1254 return blk_queue_nonrot(bdev_get_queue(bdev)); 1255 } 1256 1257 static inline bool bdev_synchronous(struct block_device *bdev) 1258 { 1259 return test_bit(QUEUE_FLAG_SYNCHRONOUS, 1260 &bdev_get_queue(bdev)->queue_flags); 1261 } 1262 1263 static inline bool bdev_stable_writes(struct block_device *bdev) 1264 { 1265 return test_bit(QUEUE_FLAG_STABLE_WRITES, 1266 &bdev_get_queue(bdev)->queue_flags); 1267 } 1268 1269 static inline bool bdev_write_cache(struct block_device *bdev) 1270 { 1271 return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); 1272 } 1273 1274 static inline bool bdev_fua(struct block_device *bdev) 1275 { 1276 return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); 1277 } 1278 1279 static inline bool bdev_nowait(struct block_device *bdev) 1280 { 1281 return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); 1282 } 1283 1284 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1285 { 1286 return blk_queue_zoned_model(bdev_get_queue(bdev)); 1287 } 1288 1289 static inline bool bdev_is_zoned(struct block_device *bdev) 1290 { 1291 return blk_queue_is_zoned(bdev_get_queue(bdev)); 1292 } 1293 1294 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1295 { 1296 return disk_zone_no(bdev->bd_disk, sec); 1297 } 1298 1299 static inline bool bdev_op_is_zoned_write(struct block_device *bdev, 1300 blk_opf_t op) 1301 { 1302 if (!bdev_is_zoned(bdev)) 1303 return false; 1304 1305 return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES; 1306 } 1307 1308 static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1309 { 1310 struct request_queue *q = bdev_get_queue(bdev); 1311 1312 if (!blk_queue_is_zoned(q)) 1313 return 0; 1314 return q->limits.chunk_sectors; 1315 } 1316 1317 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1318 sector_t sector) 1319 { 1320 return sector & (bdev_zone_sectors(bdev) - 1); 1321 } 1322 1323 static inline bool bdev_is_zone_start(struct block_device *bdev, 1324 sector_t sector) 1325 { 1326 return bdev_offset_from_zone_start(bdev, sector) == 0; 1327 } 1328 1329 static inline int queue_dma_alignment(const struct request_queue *q) 1330 { 1331 return q ? q->limits.dma_alignment : 511; 1332 } 1333 1334 static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 1335 { 1336 return queue_dma_alignment(bdev_get_queue(bdev)); 1337 } 1338 1339 static inline bool bdev_iter_is_aligned(struct block_device *bdev, 1340 struct iov_iter *iter) 1341 { 1342 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 1343 bdev_logical_block_size(bdev) - 1); 1344 } 1345 1346 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1347 unsigned int len) 1348 { 1349 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1350 return !(addr & alignment) && !(len & alignment); 1351 } 1352 1353 /* assumes size > 256 */ 1354 static inline unsigned int blksize_bits(unsigned int size) 1355 { 1356 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1357 } 1358 1359 static inline unsigned int block_size(struct block_device *bdev) 1360 { 1361 return 1 << bdev->bd_inode->i_blkbits; 1362 } 1363 1364 int kblockd_schedule_work(struct work_struct *work); 1365 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1366 1367 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1368 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1369 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1370 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1371 1372 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1373 1374 bool blk_crypto_register(struct blk_crypto_profile *profile, 1375 struct request_queue *q); 1376 1377 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1378 1379 static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1380 struct request_queue *q) 1381 { 1382 return true; 1383 } 1384 1385 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1386 1387 enum blk_unique_id { 1388 /* these match the Designator Types specified in SPC */ 1389 BLK_UID_T10 = 1, 1390 BLK_UID_EUI64 = 2, 1391 BLK_UID_NAA = 3, 1392 }; 1393 1394 #define NFL4_UFLG_MASK 0x0000003F 1395 1396 struct block_device_operations { 1397 void (*submit_bio)(struct bio *bio); 1398 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 1399 unsigned int flags); 1400 int (*open) (struct block_device *, fmode_t); 1401 void (*release) (struct gendisk *, fmode_t); 1402 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1403 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1404 unsigned int (*check_events) (struct gendisk *disk, 1405 unsigned int clearing); 1406 void (*unlock_native_capacity) (struct gendisk *); 1407 int (*getgeo)(struct block_device *, struct hd_geometry *); 1408 int (*set_read_only)(struct block_device *bdev, bool ro); 1409 void (*free_disk)(struct gendisk *disk); 1410 /* this callback is with swap_lock and sometimes page table lock held */ 1411 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1412 int (*report_zones)(struct gendisk *, sector_t sector, 1413 unsigned int nr_zones, report_zones_cb cb, void *data); 1414 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1415 /* returns the length of the identifier or a negative errno: */ 1416 int (*get_unique_id)(struct gendisk *disk, u8 id[16], 1417 enum blk_unique_id id_type); 1418 struct module *owner; 1419 const struct pr_ops *pr_ops; 1420 1421 /* 1422 * Special callback for probing GPT entry at a given sector. 1423 * Needed by Android devices, used by GPT scanner and MMC blk 1424 * driver. 1425 */ 1426 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 1427 }; 1428 1429 #ifdef CONFIG_COMPAT 1430 extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, 1431 unsigned int, unsigned long); 1432 #else 1433 #define blkdev_compat_ptr_ioctl NULL 1434 #endif 1435 1436 static inline void blk_wake_io_task(struct task_struct *waiter) 1437 { 1438 /* 1439 * If we're polling, the task itself is doing the completions. For 1440 * that case, we don't need to signal a wakeup, it's enough to just 1441 * mark us as RUNNING. 1442 */ 1443 if (waiter == current) 1444 __set_current_state(TASK_RUNNING); 1445 else 1446 wake_up_process(waiter); 1447 } 1448 1449 unsigned long bdev_start_io_acct(struct block_device *bdev, 1450 unsigned int sectors, enum req_op op, 1451 unsigned long start_time); 1452 void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1453 unsigned long start_time); 1454 1455 unsigned long bio_start_io_acct(struct bio *bio); 1456 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1457 struct block_device *orig_bdev); 1458 1459 /** 1460 * bio_end_io_acct - end I/O accounting for bio based drivers 1461 * @bio: bio to end account for 1462 * @start_time: start time returned by bio_start_io_acct() 1463 */ 1464 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1465 { 1466 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1467 } 1468 1469 int bdev_read_only(struct block_device *bdev); 1470 int set_blocksize(struct block_device *bdev, int size); 1471 1472 int lookup_bdev(const char *pathname, dev_t *dev); 1473 1474 void blkdev_show(struct seq_file *seqf, off_t offset); 1475 1476 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1477 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1478 #ifdef CONFIG_BLOCK 1479 #define BLKDEV_MAJOR_MAX 512 1480 #else 1481 #define BLKDEV_MAJOR_MAX 0 1482 #endif 1483 1484 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, 1485 void *holder); 1486 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); 1487 int bd_prepare_to_claim(struct block_device *bdev, void *holder); 1488 void bd_abort_claiming(struct block_device *bdev, void *holder); 1489 void blkdev_put(struct block_device *bdev, fmode_t mode); 1490 1491 /* just for blk-cgroup, don't use elsewhere */ 1492 struct block_device *blkdev_get_no_open(dev_t dev); 1493 void blkdev_put_no_open(struct block_device *bdev); 1494 1495 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); 1496 void bdev_add(struct block_device *bdev, dev_t dev); 1497 struct block_device *I_BDEV(struct inode *inode); 1498 int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, 1499 loff_t lend); 1500 1501 #ifdef CONFIG_BLOCK 1502 void invalidate_bdev(struct block_device *bdev); 1503 int sync_blockdev(struct block_device *bdev); 1504 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 1505 int sync_blockdev_nowait(struct block_device *bdev); 1506 void sync_bdevs(bool wait); 1507 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat); 1508 void printk_all_partitions(void); 1509 #else 1510 static inline void invalidate_bdev(struct block_device *bdev) 1511 { 1512 } 1513 static inline int sync_blockdev(struct block_device *bdev) 1514 { 1515 return 0; 1516 } 1517 static inline int sync_blockdev_nowait(struct block_device *bdev) 1518 { 1519 return 0; 1520 } 1521 static inline void sync_bdevs(bool wait) 1522 { 1523 } 1524 static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) 1525 { 1526 } 1527 static inline void printk_all_partitions(void) 1528 { 1529 } 1530 #endif /* CONFIG_BLOCK */ 1531 1532 int fsync_bdev(struct block_device *bdev); 1533 1534 int freeze_bdev(struct block_device *bdev); 1535 int thaw_bdev(struct block_device *bdev); 1536 1537 struct io_comp_batch { 1538 struct request *req_list; 1539 bool need_ts; 1540 void (*complete)(struct io_comp_batch *); 1541 }; 1542 1543 #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 1544 1545 #endif /* _LINUX_BLKDEV_H */ 1546