xref: /openbmc/linux/block/blk-settings.c (revision 46eeaa11bdd1bc9e077bdf741d32ca7235d263c6)
13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
286db1e29SJens Axboe /*
386db1e29SJens Axboe  * Functions related to setting various queue properties from drivers
486db1e29SJens Axboe  */
586db1e29SJens Axboe #include <linux/kernel.h>
686db1e29SJens Axboe #include <linux/module.h>
786db1e29SJens Axboe #include <linux/init.h>
886db1e29SJens Axboe #include <linux/bio.h>
986db1e29SJens Axboe #include <linux/blkdev.h>
104ee60ec1SMatthew Wilcox (Oracle) #include <linux/pagemap.h>
11edb0872fSChristoph Hellwig #include <linux/backing-dev-defs.h>
1270dd5bf3SMartin K. Petersen #include <linux/gcd.h>
132cda2728SMartin K. Petersen #include <linux/lcm.h>
14ad5ebd2fSRandy Dunlap #include <linux/jiffies.h>
155a0e3ad6STejun Heo #include <linux/gfp.h>
1645147fb5SYoshihiro Shimoda #include <linux/dma-mapping.h>
1786db1e29SJens Axboe 
1886db1e29SJens Axboe #include "blk.h"
190bc65bd4SChristoph Hellwig #include "blk-rq-qos.h"
2087760e5eSJens Axboe #include "blk-wbt.h"
2186db1e29SJens Axboe 
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22242f9dcbSJens Axboe void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23242f9dcbSJens Axboe {
24242f9dcbSJens Axboe 	q->rq_timeout = timeout;
25242f9dcbSJens Axboe }
26242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27242f9dcbSJens Axboe 
2886db1e29SJens Axboe /**
29e475bba2SMartin K. Petersen  * blk_set_default_limits - reset limits to default values
30f740f5caSRandy Dunlap  * @lim:  the queue_limits structure to reset
31e475bba2SMartin K. Petersen  *
32e475bba2SMartin K. Petersen  * Description:
33b1bd055dSMartin K. Petersen  *   Returns a queue_limit struct to its default state.
34e475bba2SMartin K. Petersen  */
blk_set_default_limits(struct queue_limits * lim)35e475bba2SMartin K. Petersen void blk_set_default_limits(struct queue_limits *lim)
36e475bba2SMartin K. Petersen {
378a78362cSMartin K. Petersen 	lim->max_segments = BLK_MAX_SEGMENTS;
381e739730SChristoph Hellwig 	lim->max_discard_segments = 1;
3913f05c8dSMartin K. Petersen 	lim->max_integrity_segments = 0;
40e475bba2SMartin K. Petersen 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
4103100aadSKeith Busch 	lim->virt_boundary_mask = 0;
42eb28d31bSMartin K. Petersen 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
435f009d3fSKeith Busch 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
44c9c77418SKeith Busch 	lim->max_user_sectors = lim->max_dev_sectors = 0;
45762380adSJens Axboe 	lim->chunk_sectors = 0;
46a6f0788eSChaitanya Kulkarni 	lim->max_write_zeroes_sectors = 0;
470512a75bSKeith Busch 	lim->max_zone_append_sectors = 0;
4886b37281SMartin K. Petersen 	lim->max_discard_sectors = 0;
490034af03SJens Axboe 	lim->max_hw_discard_sectors = 0;
5044abff2cSChristoph Hellwig 	lim->max_secure_erase_sectors = 0;
5186b37281SMartin K. Petersen 	lim->discard_granularity = 0;
5286b37281SMartin K. Petersen 	lim->discard_alignment = 0;
5386b37281SMartin K. Petersen 	lim->discard_misaligned = 0;
54e475bba2SMartin K. Petersen 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
559bb33f24SChristoph Hellwig 	lim->bounce = BLK_BOUNCE_NONE;
56e475bba2SMartin K. Petersen 	lim->alignment_offset = 0;
57e475bba2SMartin K. Petersen 	lim->io_opt = 0;
58e475bba2SMartin K. Petersen 	lim->misaligned = 0;
59797476b8SDamien Le Moal 	lim->zoned = BLK_ZONED_NONE;
60a805a4faSDamien Le Moal 	lim->zone_write_granularity = 0;
61c964d62fSKeith Busch 	lim->dma_alignment = 511;
62e475bba2SMartin K. Petersen }
63e475bba2SMartin K. Petersen 
64e475bba2SMartin K. Petersen /**
65b1bd055dSMartin K. Petersen  * blk_set_stacking_limits - set default limits for stacking devices
66b1bd055dSMartin K. Petersen  * @lim:  the queue_limits structure to reset
67b1bd055dSMartin K. Petersen  *
68b1bd055dSMartin K. Petersen  * Description:
69b1bd055dSMartin K. Petersen  *   Returns a queue_limit struct to its default state. Should be used
70b1bd055dSMartin K. Petersen  *   by stacking drivers like DM that have no internal limits.
71b1bd055dSMartin K. Petersen  */
blk_set_stacking_limits(struct queue_limits * lim)72b1bd055dSMartin K. Petersen void blk_set_stacking_limits(struct queue_limits *lim)
73b1bd055dSMartin K. Petersen {
74b1bd055dSMartin K. Petersen 	blk_set_default_limits(lim);
75b1bd055dSMartin K. Petersen 
76b1bd055dSMartin K. Petersen 	/* Inherit limits from component devices */
77b1bd055dSMartin K. Petersen 	lim->max_segments = USHRT_MAX;
7842c9cdfeSMike Snitzer 	lim->max_discard_segments = USHRT_MAX;
79b1bd055dSMartin K. Petersen 	lim->max_hw_sectors = UINT_MAX;
80d82ae52eSMike Snitzer 	lim->max_segment_size = UINT_MAX;
81fe86cdceSMike Snitzer 	lim->max_sectors = UINT_MAX;
82ca369d51SMartin K. Petersen 	lim->max_dev_sectors = UINT_MAX;
83a6f0788eSChaitanya Kulkarni 	lim->max_write_zeroes_sectors = UINT_MAX;
840512a75bSKeith Busch 	lim->max_zone_append_sectors = UINT_MAX;
85b1bd055dSMartin K. Petersen }
86b1bd055dSMartin K. Petersen EXPORT_SYMBOL(blk_set_stacking_limits);
87b1bd055dSMartin K. Petersen 
88b1bd055dSMartin K. Petersen /**
8986db1e29SJens Axboe  * blk_queue_bounce_limit - set bounce buffer limit for queue
9086db1e29SJens Axboe  * @q: the request queue for the device
919bb33f24SChristoph Hellwig  * @bounce: bounce limit to enforce
9286db1e29SJens Axboe  *
9386db1e29SJens Axboe  * Description:
949bb33f24SChristoph Hellwig  *    Force bouncing for ISA DMA ranges or highmem.
959bb33f24SChristoph Hellwig  *
969bb33f24SChristoph Hellwig  *    DEPRECATED, don't use in new code.
9786db1e29SJens Axboe  **/
blk_queue_bounce_limit(struct request_queue * q,enum blk_bounce bounce)989bb33f24SChristoph Hellwig void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
9986db1e29SJens Axboe {
1009bb33f24SChristoph Hellwig 	q->limits.bounce = bounce;
10186db1e29SJens Axboe }
10286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_bounce_limit);
10386db1e29SJens Axboe 
10486db1e29SJens Axboe /**
105ca369d51SMartin K. Petersen  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
106ca369d51SMartin K. Petersen  * @q:  the request queue for the device
1072800aac1SMartin K. Petersen  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
10886db1e29SJens Axboe  *
10986db1e29SJens Axboe  * Description:
1102800aac1SMartin K. Petersen  *    Enables a low level driver to set a hard upper limit,
1112800aac1SMartin K. Petersen  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
1124f258a46SMartin K. Petersen  *    the device driver based upon the capabilities of the I/O
1134f258a46SMartin K. Petersen  *    controller.
1142800aac1SMartin K. Petersen  *
115ca369d51SMartin K. Petersen  *    max_dev_sectors is a hard limit imposed by the storage device for
116ca369d51SMartin K. Petersen  *    READ/WRITE requests. It is set by the disk driver.
117ca369d51SMartin K. Petersen  *
1182800aac1SMartin K. Petersen  *    max_sectors is a soft limit imposed by the block layer for
1192800aac1SMartin K. Petersen  *    filesystem type requests.  This value can be overridden on a
1202800aac1SMartin K. Petersen  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
1212800aac1SMartin K. Petersen  *    The soft limit can not exceed max_hw_sectors.
12286db1e29SJens Axboe  **/
blk_queue_max_hw_sectors(struct request_queue * q,unsigned int max_hw_sectors)123ca369d51SMartin K. Petersen void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
12486db1e29SJens Axboe {
125ca369d51SMartin K. Petersen 	struct queue_limits *limits = &q->limits;
126ca369d51SMartin K. Petersen 	unsigned int max_sectors;
127ca369d51SMartin K. Petersen 
12809cbfeafSKirill A. Shutemov 	if ((max_hw_sectors << 9) < PAGE_SIZE) {
12909cbfeafSKirill A. Shutemov 		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
13024c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
1312800aac1SMartin K. Petersen 		       __func__, max_hw_sectors);
13286db1e29SJens Axboe 	}
13386db1e29SJens Axboe 
134817046ecSDamien Le Moal 	max_hw_sectors = round_down(max_hw_sectors,
135817046ecSDamien Le Moal 				    limits->logical_block_size >> SECTOR_SHIFT);
13630e2bc08SJeff Moyer 	limits->max_hw_sectors = max_hw_sectors;
137817046ecSDamien Le Moal 
138ca369d51SMartin K. Petersen 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
139c9c77418SKeith Busch 
140c9c77418SKeith Busch 	if (limits->max_user_sectors)
141c9c77418SKeith Busch 		max_sectors = min(max_sectors, limits->max_user_sectors);
142c9c77418SKeith Busch 	else
1430a26f327SKeith Busch 		max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS);
144c9c77418SKeith Busch 
145817046ecSDamien Le Moal 	max_sectors = round_down(max_sectors,
146817046ecSDamien Le Moal 				 limits->logical_block_size >> SECTOR_SHIFT);
147ca369d51SMartin K. Petersen 	limits->max_sectors = max_sectors;
148817046ecSDamien Le Moal 
149d152c682SChristoph Hellwig 	if (!q->disk)
150edb0872fSChristoph Hellwig 		return;
151d152c682SChristoph Hellwig 	q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
15272d4cd9fSMike Snitzer }
153086fa5ffSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_hw_sectors);
15486db1e29SJens Axboe 
15586db1e29SJens Axboe /**
156762380adSJens Axboe  * blk_queue_chunk_sectors - set size of the chunk for this queue
157762380adSJens Axboe  * @q:  the request queue for the device
158762380adSJens Axboe  * @chunk_sectors:  chunk sectors in the usual 512b unit
159762380adSJens Axboe  *
160762380adSJens Axboe  * Description:
161762380adSJens Axboe  *    If a driver doesn't want IOs to cross a given chunk size, it can set
16207d098e6SMike Snitzer  *    this limit and prevent merging across chunks. Note that the block layer
16307d098e6SMike Snitzer  *    must accept a page worth of data at any offset. So if the crossing of
16407d098e6SMike Snitzer  *    chunks is a hard limitation in the driver, it must still be prepared
16507d098e6SMike Snitzer  *    to split single page bios.
166762380adSJens Axboe  **/
blk_queue_chunk_sectors(struct request_queue * q,unsigned int chunk_sectors)167762380adSJens Axboe void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
168762380adSJens Axboe {
169762380adSJens Axboe 	q->limits.chunk_sectors = chunk_sectors;
170762380adSJens Axboe }
171762380adSJens Axboe EXPORT_SYMBOL(blk_queue_chunk_sectors);
172762380adSJens Axboe 
173762380adSJens Axboe /**
17467efc925SChristoph Hellwig  * blk_queue_max_discard_sectors - set max sectors for a single discard
17567efc925SChristoph Hellwig  * @q:  the request queue for the device
176c7ebf065SRandy Dunlap  * @max_discard_sectors: maximum number of sectors to discard
17767efc925SChristoph Hellwig  **/
blk_queue_max_discard_sectors(struct request_queue * q,unsigned int max_discard_sectors)17867efc925SChristoph Hellwig void blk_queue_max_discard_sectors(struct request_queue *q,
17967efc925SChristoph Hellwig 		unsigned int max_discard_sectors)
18067efc925SChristoph Hellwig {
1810034af03SJens Axboe 	q->limits.max_hw_discard_sectors = max_discard_sectors;
18267efc925SChristoph Hellwig 	q->limits.max_discard_sectors = max_discard_sectors;
18367efc925SChristoph Hellwig }
18467efc925SChristoph Hellwig EXPORT_SYMBOL(blk_queue_max_discard_sectors);
18567efc925SChristoph Hellwig 
18667efc925SChristoph Hellwig /**
18744abff2cSChristoph Hellwig  * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
18844abff2cSChristoph Hellwig  * @q:  the request queue for the device
18944abff2cSChristoph Hellwig  * @max_sectors: maximum number of sectors to secure_erase
19044abff2cSChristoph Hellwig  **/
blk_queue_max_secure_erase_sectors(struct request_queue * q,unsigned int max_sectors)19144abff2cSChristoph Hellwig void blk_queue_max_secure_erase_sectors(struct request_queue *q,
19244abff2cSChristoph Hellwig 		unsigned int max_sectors)
19344abff2cSChristoph Hellwig {
19444abff2cSChristoph Hellwig 	q->limits.max_secure_erase_sectors = max_sectors;
19544abff2cSChristoph Hellwig }
19644abff2cSChristoph Hellwig EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
19744abff2cSChristoph Hellwig 
19844abff2cSChristoph Hellwig /**
199a6f0788eSChaitanya Kulkarni  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
200a6f0788eSChaitanya Kulkarni  *                                      write zeroes
201a6f0788eSChaitanya Kulkarni  * @q:  the request queue for the device
202a6f0788eSChaitanya Kulkarni  * @max_write_zeroes_sectors: maximum number of sectors to write per command
203a6f0788eSChaitanya Kulkarni  **/
blk_queue_max_write_zeroes_sectors(struct request_queue * q,unsigned int max_write_zeroes_sectors)204a6f0788eSChaitanya Kulkarni void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
205a6f0788eSChaitanya Kulkarni 		unsigned int max_write_zeroes_sectors)
206a6f0788eSChaitanya Kulkarni {
207a6f0788eSChaitanya Kulkarni 	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
208a6f0788eSChaitanya Kulkarni }
209a6f0788eSChaitanya Kulkarni EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
210a6f0788eSChaitanya Kulkarni 
211a6f0788eSChaitanya Kulkarni /**
2120512a75bSKeith Busch  * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
2130512a75bSKeith Busch  * @q:  the request queue for the device
2140512a75bSKeith Busch  * @max_zone_append_sectors: maximum number of sectors to write per command
2150512a75bSKeith Busch  **/
blk_queue_max_zone_append_sectors(struct request_queue * q,unsigned int max_zone_append_sectors)2160512a75bSKeith Busch void blk_queue_max_zone_append_sectors(struct request_queue *q,
2170512a75bSKeith Busch 		unsigned int max_zone_append_sectors)
2180512a75bSKeith Busch {
2190512a75bSKeith Busch 	unsigned int max_sectors;
2200512a75bSKeith Busch 
2210512a75bSKeith Busch 	if (WARN_ON(!blk_queue_is_zoned(q)))
2220512a75bSKeith Busch 		return;
2230512a75bSKeith Busch 
2240512a75bSKeith Busch 	max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
2250512a75bSKeith Busch 	max_sectors = min(q->limits.chunk_sectors, max_sectors);
2260512a75bSKeith Busch 
2270512a75bSKeith Busch 	/*
2280512a75bSKeith Busch 	 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
2290512a75bSKeith Busch 	 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
2300512a75bSKeith Busch 	 * or the max_hw_sectors limit not set.
2310512a75bSKeith Busch 	 */
2320512a75bSKeith Busch 	WARN_ON(!max_sectors);
2330512a75bSKeith Busch 
2340512a75bSKeith Busch 	q->limits.max_zone_append_sectors = max_sectors;
2350512a75bSKeith Busch }
2360512a75bSKeith Busch EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
2370512a75bSKeith Busch 
2380512a75bSKeith Busch /**
2398a78362cSMartin K. Petersen  * blk_queue_max_segments - set max hw segments for a request for this queue
24086db1e29SJens Axboe  * @q:  the request queue for the device
24186db1e29SJens Axboe  * @max_segments:  max number of segments
24286db1e29SJens Axboe  *
24386db1e29SJens Axboe  * Description:
24486db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the number of
2458a78362cSMartin K. Petersen  *    hw data segments in a request.
24686db1e29SJens Axboe  **/
blk_queue_max_segments(struct request_queue * q,unsigned short max_segments)2478a78362cSMartin K. Petersen void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
24886db1e29SJens Axboe {
24986db1e29SJens Axboe 	if (!max_segments) {
25086db1e29SJens Axboe 		max_segments = 1;
25124c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
25224c03d47SHarvey Harrison 		       __func__, max_segments);
25386db1e29SJens Axboe 	}
25486db1e29SJens Axboe 
2558a78362cSMartin K. Petersen 	q->limits.max_segments = max_segments;
25686db1e29SJens Axboe }
2578a78362cSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_segments);
25886db1e29SJens Axboe 
25986db1e29SJens Axboe /**
2601e739730SChristoph Hellwig  * blk_queue_max_discard_segments - set max segments for discard requests
2611e739730SChristoph Hellwig  * @q:  the request queue for the device
2621e739730SChristoph Hellwig  * @max_segments:  max number of segments
2631e739730SChristoph Hellwig  *
2641e739730SChristoph Hellwig  * Description:
2651e739730SChristoph Hellwig  *    Enables a low level driver to set an upper limit on the number of
2661e739730SChristoph Hellwig  *    segments in a discard request.
2671e739730SChristoph Hellwig  **/
blk_queue_max_discard_segments(struct request_queue * q,unsigned short max_segments)2681e739730SChristoph Hellwig void blk_queue_max_discard_segments(struct request_queue *q,
2691e739730SChristoph Hellwig 		unsigned short max_segments)
2701e739730SChristoph Hellwig {
2711e739730SChristoph Hellwig 	q->limits.max_discard_segments = max_segments;
2721e739730SChristoph Hellwig }
2731e739730SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
2741e739730SChristoph Hellwig 
2751e739730SChristoph Hellwig /**
27686db1e29SJens Axboe  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
27786db1e29SJens Axboe  * @q:  the request queue for the device
27886db1e29SJens Axboe  * @max_size:  max size of segment in bytes
27986db1e29SJens Axboe  *
28086db1e29SJens Axboe  * Description:
28186db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the size of a
28286db1e29SJens Axboe  *    coalesced segment
28386db1e29SJens Axboe  **/
blk_queue_max_segment_size(struct request_queue * q,unsigned int max_size)28486db1e29SJens Axboe void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
28586db1e29SJens Axboe {
28609cbfeafSKirill A. Shutemov 	if (max_size < PAGE_SIZE) {
28709cbfeafSKirill A. Shutemov 		max_size = PAGE_SIZE;
28824c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
28924c03d47SHarvey Harrison 		       __func__, max_size);
29086db1e29SJens Axboe 	}
29186db1e29SJens Axboe 
29209324d32SChristoph Hellwig 	/* see blk_queue_virt_boundary() for the explanation */
29309324d32SChristoph Hellwig 	WARN_ON_ONCE(q->limits.virt_boundary_mask);
29409324d32SChristoph Hellwig 
295025146e1SMartin K. Petersen 	q->limits.max_segment_size = max_size;
29686db1e29SJens Axboe }
29786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_segment_size);
29886db1e29SJens Axboe 
29986db1e29SJens Axboe /**
300e1defc4fSMartin K. Petersen  * blk_queue_logical_block_size - set logical block size for the queue
30186db1e29SJens Axboe  * @q:  the request queue for the device
302e1defc4fSMartin K. Petersen  * @size:  the logical block size, in bytes
30386db1e29SJens Axboe  *
30486db1e29SJens Axboe  * Description:
305e1defc4fSMartin K. Petersen  *   This should be set to the lowest possible block size that the
306e1defc4fSMartin K. Petersen  *   storage device can address.  The default of 512 covers most
307e1defc4fSMartin K. Petersen  *   hardware.
30886db1e29SJens Axboe  **/
blk_queue_logical_block_size(struct request_queue * q,unsigned int size)309ad6bf88aSMikulas Patocka void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
31086db1e29SJens Axboe {
311817046ecSDamien Le Moal 	struct queue_limits *limits = &q->limits;
312c72758f3SMartin K. Petersen 
313817046ecSDamien Le Moal 	limits->logical_block_size = size;
314c72758f3SMartin K. Petersen 
315817046ecSDamien Le Moal 	if (limits->physical_block_size < size)
316817046ecSDamien Le Moal 		limits->physical_block_size = size;
317817046ecSDamien Le Moal 
318817046ecSDamien Le Moal 	if (limits->io_min < limits->physical_block_size)
319817046ecSDamien Le Moal 		limits->io_min = limits->physical_block_size;
320817046ecSDamien Le Moal 
321817046ecSDamien Le Moal 	limits->max_hw_sectors =
322817046ecSDamien Le Moal 		round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
323817046ecSDamien Le Moal 	limits->max_sectors =
324817046ecSDamien Le Moal 		round_down(limits->max_sectors, size >> SECTOR_SHIFT);
32586db1e29SJens Axboe }
326e1defc4fSMartin K. Petersen EXPORT_SYMBOL(blk_queue_logical_block_size);
32786db1e29SJens Axboe 
328c72758f3SMartin K. Petersen /**
329c72758f3SMartin K. Petersen  * blk_queue_physical_block_size - set physical block size for the queue
330c72758f3SMartin K. Petersen  * @q:  the request queue for the device
331c72758f3SMartin K. Petersen  * @size:  the physical block size, in bytes
332c72758f3SMartin K. Petersen  *
333c72758f3SMartin K. Petersen  * Description:
334c72758f3SMartin K. Petersen  *   This should be set to the lowest possible sector size that the
335c72758f3SMartin K. Petersen  *   hardware can operate on without reverting to read-modify-write
336c72758f3SMartin K. Petersen  *   operations.
337c72758f3SMartin K. Petersen  */
blk_queue_physical_block_size(struct request_queue * q,unsigned int size)338892b6f90SMartin K. Petersen void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
339c72758f3SMartin K. Petersen {
340c72758f3SMartin K. Petersen 	q->limits.physical_block_size = size;
341c72758f3SMartin K. Petersen 
342c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < q->limits.logical_block_size)
343c72758f3SMartin K. Petersen 		q->limits.physical_block_size = q->limits.logical_block_size;
344c72758f3SMartin K. Petersen 
345c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
346c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
347c72758f3SMartin K. Petersen }
348c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_physical_block_size);
349c72758f3SMartin K. Petersen 
350c72758f3SMartin K. Petersen /**
351a805a4faSDamien Le Moal  * blk_queue_zone_write_granularity - set zone write granularity for the queue
352a805a4faSDamien Le Moal  * @q:  the request queue for the zoned device
353a805a4faSDamien Le Moal  * @size:  the zone write granularity size, in bytes
354a805a4faSDamien Le Moal  *
355a805a4faSDamien Le Moal  * Description:
356a805a4faSDamien Le Moal  *   This should be set to the lowest possible size allowing to write in
357a805a4faSDamien Le Moal  *   sequential zones of a zoned block device.
358a805a4faSDamien Le Moal  */
blk_queue_zone_write_granularity(struct request_queue * q,unsigned int size)359a805a4faSDamien Le Moal void blk_queue_zone_write_granularity(struct request_queue *q,
360a805a4faSDamien Le Moal 				      unsigned int size)
361a805a4faSDamien Le Moal {
362a805a4faSDamien Le Moal 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
363a805a4faSDamien Le Moal 		return;
364a805a4faSDamien Le Moal 
365a805a4faSDamien Le Moal 	q->limits.zone_write_granularity = size;
366a805a4faSDamien Le Moal 
367a805a4faSDamien Le Moal 	if (q->limits.zone_write_granularity < q->limits.logical_block_size)
368a805a4faSDamien Le Moal 		q->limits.zone_write_granularity = q->limits.logical_block_size;
369a805a4faSDamien Le Moal }
370a805a4faSDamien Le Moal EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
371a805a4faSDamien Le Moal 
372a805a4faSDamien Le Moal /**
373c72758f3SMartin K. Petersen  * blk_queue_alignment_offset - set physical block alignment offset
374c72758f3SMartin K. Petersen  * @q:	the request queue for the device
3758ebf9756SRandy Dunlap  * @offset: alignment offset in bytes
376c72758f3SMartin K. Petersen  *
377c72758f3SMartin K. Petersen  * Description:
378c72758f3SMartin K. Petersen  *   Some devices are naturally misaligned to compensate for things like
379c72758f3SMartin K. Petersen  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
380c72758f3SMartin K. Petersen  *   should call this function for devices whose first sector is not
381c72758f3SMartin K. Petersen  *   naturally aligned.
382c72758f3SMartin K. Petersen  */
blk_queue_alignment_offset(struct request_queue * q,unsigned int offset)383c72758f3SMartin K. Petersen void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
384c72758f3SMartin K. Petersen {
385c72758f3SMartin K. Petersen 	q->limits.alignment_offset =
386c72758f3SMartin K. Petersen 		offset & (q->limits.physical_block_size - 1);
387c72758f3SMartin K. Petersen 	q->limits.misaligned = 0;
388c72758f3SMartin K. Petersen }
389c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_alignment_offset);
390c72758f3SMartin K. Petersen 
disk_update_readahead(struct gendisk * disk)391471aa704SChristoph Hellwig void disk_update_readahead(struct gendisk *disk)
392c2e4cd57SChristoph Hellwig {
393471aa704SChristoph Hellwig 	struct request_queue *q = disk->queue;
394471aa704SChristoph Hellwig 
395c2e4cd57SChristoph Hellwig 	/*
396c2e4cd57SChristoph Hellwig 	 * For read-ahead of large files to be effective, we need to read ahead
397c2e4cd57SChristoph Hellwig 	 * at least twice the optimal I/O size.
398c2e4cd57SChristoph Hellwig 	 */
399edb0872fSChristoph Hellwig 	disk->bdi->ra_pages =
400c2e4cd57SChristoph Hellwig 		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
401edb0872fSChristoph Hellwig 	disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
402c2e4cd57SChristoph Hellwig }
403471aa704SChristoph Hellwig EXPORT_SYMBOL_GPL(disk_update_readahead);
404c2e4cd57SChristoph Hellwig 
405c72758f3SMartin K. Petersen /**
4067c958e32SMartin K. Petersen  * blk_limits_io_min - set minimum request size for a device
4077c958e32SMartin K. Petersen  * @limits: the queue limits
4087c958e32SMartin K. Petersen  * @min:  smallest I/O size in bytes
4097c958e32SMartin K. Petersen  *
4107c958e32SMartin K. Petersen  * Description:
4117c958e32SMartin K. Petersen  *   Some devices have an internal block size bigger than the reported
4127c958e32SMartin K. Petersen  *   hardware sector size.  This function can be used to signal the
4137c958e32SMartin K. Petersen  *   smallest I/O the device can perform without incurring a performance
4147c958e32SMartin K. Petersen  *   penalty.
4157c958e32SMartin K. Petersen  */
blk_limits_io_min(struct queue_limits * limits,unsigned int min)4167c958e32SMartin K. Petersen void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
4177c958e32SMartin K. Petersen {
4187c958e32SMartin K. Petersen 	limits->io_min = min;
4197c958e32SMartin K. Petersen 
4207c958e32SMartin K. Petersen 	if (limits->io_min < limits->logical_block_size)
4217c958e32SMartin K. Petersen 		limits->io_min = limits->logical_block_size;
4227c958e32SMartin K. Petersen 
4237c958e32SMartin K. Petersen 	if (limits->io_min < limits->physical_block_size)
4247c958e32SMartin K. Petersen 		limits->io_min = limits->physical_block_size;
4257c958e32SMartin K. Petersen }
4267c958e32SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_min);
4277c958e32SMartin K. Petersen 
4287c958e32SMartin K. Petersen /**
429c72758f3SMartin K. Petersen  * blk_queue_io_min - set minimum request size for the queue
430c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4318ebf9756SRandy Dunlap  * @min:  smallest I/O size in bytes
432c72758f3SMartin K. Petersen  *
433c72758f3SMartin K. Petersen  * Description:
4347e5f5fb0SMartin K. Petersen  *   Storage devices may report a granularity or preferred minimum I/O
4357e5f5fb0SMartin K. Petersen  *   size which is the smallest request the device can perform without
4367e5f5fb0SMartin K. Petersen  *   incurring a performance penalty.  For disk drives this is often the
4377e5f5fb0SMartin K. Petersen  *   physical block size.  For RAID arrays it is often the stripe chunk
4387e5f5fb0SMartin K. Petersen  *   size.  A properly aligned multiple of minimum_io_size is the
4397e5f5fb0SMartin K. Petersen  *   preferred request size for workloads where a high number of I/O
4407e5f5fb0SMartin K. Petersen  *   operations is desired.
441c72758f3SMartin K. Petersen  */
blk_queue_io_min(struct request_queue * q,unsigned int min)442c72758f3SMartin K. Petersen void blk_queue_io_min(struct request_queue *q, unsigned int min)
443c72758f3SMartin K. Petersen {
4447c958e32SMartin K. Petersen 	blk_limits_io_min(&q->limits, min);
445c72758f3SMartin K. Petersen }
446c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_min);
447c72758f3SMartin K. Petersen 
448c72758f3SMartin K. Petersen /**
4493c5820c7SMartin K. Petersen  * blk_limits_io_opt - set optimal request size for a device
4503c5820c7SMartin K. Petersen  * @limits: the queue limits
4513c5820c7SMartin K. Petersen  * @opt:  smallest I/O size in bytes
4523c5820c7SMartin K. Petersen  *
4533c5820c7SMartin K. Petersen  * Description:
4543c5820c7SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4553c5820c7SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4563c5820c7SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4573c5820c7SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4583c5820c7SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4593c5820c7SMartin K. Petersen  *   sustained throughput is desired.
4603c5820c7SMartin K. Petersen  */
blk_limits_io_opt(struct queue_limits * limits,unsigned int opt)4613c5820c7SMartin K. Petersen void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
4623c5820c7SMartin K. Petersen {
4633c5820c7SMartin K. Petersen 	limits->io_opt = opt;
4643c5820c7SMartin K. Petersen }
4653c5820c7SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_opt);
4663c5820c7SMartin K. Petersen 
4673c5820c7SMartin K. Petersen /**
468c72758f3SMartin K. Petersen  * blk_queue_io_opt - set optimal request size for the queue
469c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4708ebf9756SRandy Dunlap  * @opt:  optimal request size in bytes
471c72758f3SMartin K. Petersen  *
472c72758f3SMartin K. Petersen  * Description:
4737e5f5fb0SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4747e5f5fb0SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4757e5f5fb0SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4767e5f5fb0SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4777e5f5fb0SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4787e5f5fb0SMartin K. Petersen  *   sustained throughput is desired.
479c72758f3SMartin K. Petersen  */
blk_queue_io_opt(struct request_queue * q,unsigned int opt)480c72758f3SMartin K. Petersen void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
481c72758f3SMartin K. Petersen {
4823c5820c7SMartin K. Petersen 	blk_limits_io_opt(&q->limits, opt);
483d152c682SChristoph Hellwig 	if (!q->disk)
484edb0872fSChristoph Hellwig 		return;
485d152c682SChristoph Hellwig 	q->disk->bdi->ra_pages =
486c2e4cd57SChristoph Hellwig 		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
487c72758f3SMartin K. Petersen }
488c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_opt);
489c72758f3SMartin K. Petersen 
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)490aa261f20SBart Van Assche static int queue_limit_alignment_offset(const struct queue_limits *lim,
49189098b07SChristoph Hellwig 		sector_t sector)
49289098b07SChristoph Hellwig {
49389098b07SChristoph Hellwig 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
49489098b07SChristoph Hellwig 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
49589098b07SChristoph Hellwig 		<< SECTOR_SHIFT;
49689098b07SChristoph Hellwig 
49789098b07SChristoph Hellwig 	return (granularity + lim->alignment_offset - alignment) % granularity;
49889098b07SChristoph Hellwig }
49989098b07SChristoph Hellwig 
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)500aa261f20SBart Van Assche static unsigned int queue_limit_discard_alignment(
501aa261f20SBart Van Assche 		const struct queue_limits *lim, sector_t sector)
5025c4b4a5cSChristoph Hellwig {
5035c4b4a5cSChristoph Hellwig 	unsigned int alignment, granularity, offset;
5045c4b4a5cSChristoph Hellwig 
5055c4b4a5cSChristoph Hellwig 	if (!lim->max_discard_sectors)
5065c4b4a5cSChristoph Hellwig 		return 0;
5075c4b4a5cSChristoph Hellwig 
5085c4b4a5cSChristoph Hellwig 	/* Why are these in bytes, not sectors? */
5095c4b4a5cSChristoph Hellwig 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
5105c4b4a5cSChristoph Hellwig 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
5115c4b4a5cSChristoph Hellwig 	if (!granularity)
5125c4b4a5cSChristoph Hellwig 		return 0;
5135c4b4a5cSChristoph Hellwig 
5145c4b4a5cSChristoph Hellwig 	/* Offset of the partition start in 'granularity' sectors */
5155c4b4a5cSChristoph Hellwig 	offset = sector_div(sector, granularity);
5165c4b4a5cSChristoph Hellwig 
5175c4b4a5cSChristoph Hellwig 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
5185c4b4a5cSChristoph Hellwig 	offset = (granularity + alignment - offset) % granularity;
5195c4b4a5cSChristoph Hellwig 
5205c4b4a5cSChristoph Hellwig 	/* Turn it back into bytes, gaah */
5215c4b4a5cSChristoph Hellwig 	return offset << SECTOR_SHIFT;
5225c4b4a5cSChristoph Hellwig }
5235c4b4a5cSChristoph Hellwig 
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)52497f433c3SMikulas Patocka static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
52597f433c3SMikulas Patocka {
52697f433c3SMikulas Patocka 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
52797f433c3SMikulas Patocka 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
52897f433c3SMikulas Patocka 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
52997f433c3SMikulas Patocka 	return sectors;
53097f433c3SMikulas Patocka }
53197f433c3SMikulas Patocka 
53286db1e29SJens Axboe /**
533c72758f3SMartin K. Petersen  * blk_stack_limits - adjust queue_limits for stacked devices
53481744ee4SMartin K. Petersen  * @t:	the stacking driver limits (top device)
53581744ee4SMartin K. Petersen  * @b:  the underlying queue limits (bottom, component device)
536e03a72e1SMartin K. Petersen  * @start:  first data sector within component device
537c72758f3SMartin K. Petersen  *
538c72758f3SMartin K. Petersen  * Description:
53981744ee4SMartin K. Petersen  *    This function is used by stacking drivers like MD and DM to ensure
54081744ee4SMartin K. Petersen  *    that all component devices have compatible block sizes and
54181744ee4SMartin K. Petersen  *    alignments.  The stacking driver must provide a queue_limits
54281744ee4SMartin K. Petersen  *    struct (top) and then iteratively call the stacking function for
54381744ee4SMartin K. Petersen  *    all component (bottom) devices.  The stacking function will
54481744ee4SMartin K. Petersen  *    attempt to combine the values and ensure proper alignment.
54581744ee4SMartin K. Petersen  *
54681744ee4SMartin K. Petersen  *    Returns 0 if the top and bottom queue_limits are compatible.  The
54781744ee4SMartin K. Petersen  *    top device's block sizes and alignment offsets may be adjusted to
54881744ee4SMartin K. Petersen  *    ensure alignment with the bottom device. If no compatible sizes
54981744ee4SMartin K. Petersen  *    and alignments exist, -1 is returned and the resulting top
55081744ee4SMartin K. Petersen  *    queue_limits will have the misaligned flag set to indicate that
55181744ee4SMartin K. Petersen  *    the alignment_offset is undefined.
552c72758f3SMartin K. Petersen  */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)553c72758f3SMartin K. Petersen int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
554e03a72e1SMartin K. Petersen 		     sector_t start)
555c72758f3SMartin K. Petersen {
556e03a72e1SMartin K. Petersen 	unsigned int top, bottom, alignment, ret = 0;
55786b37281SMartin K. Petersen 
558c72758f3SMartin K. Petersen 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
559c72758f3SMartin K. Petersen 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
560ca369d51SMartin K. Petersen 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
561a6f0788eSChaitanya Kulkarni 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
562a6f0788eSChaitanya Kulkarni 					b->max_write_zeroes_sectors);
5630512a75bSKeith Busch 	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
5640512a75bSKeith Busch 					b->max_zone_append_sectors);
5659bb33f24SChristoph Hellwig 	t->bounce = max(t->bounce, b->bounce);
566c72758f3SMartin K. Petersen 
567c72758f3SMartin K. Petersen 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
568c72758f3SMartin K. Petersen 					    b->seg_boundary_mask);
56903100aadSKeith Busch 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
57003100aadSKeith Busch 					    b->virt_boundary_mask);
571c72758f3SMartin K. Petersen 
5728a78362cSMartin K. Petersen 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
5731e739730SChristoph Hellwig 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
5741e739730SChristoph Hellwig 					       b->max_discard_segments);
57513f05c8dSMartin K. Petersen 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
57613f05c8dSMartin K. Petersen 						 b->max_integrity_segments);
577c72758f3SMartin K. Petersen 
578c72758f3SMartin K. Petersen 	t->max_segment_size = min_not_zero(t->max_segment_size,
579c72758f3SMartin K. Petersen 					   b->max_segment_size);
580c72758f3SMartin K. Petersen 
581fe0b393fSMartin K. Petersen 	t->misaligned |= b->misaligned;
582fe0b393fSMartin K. Petersen 
583e03a72e1SMartin K. Petersen 	alignment = queue_limit_alignment_offset(b, start);
5849504e086SMartin K. Petersen 
58581744ee4SMartin K. Petersen 	/* Bottom device has different alignment.  Check that it is
58681744ee4SMartin K. Petersen 	 * compatible with the current top alignment.
58781744ee4SMartin K. Petersen 	 */
5889504e086SMartin K. Petersen 	if (t->alignment_offset != alignment) {
5899504e086SMartin K. Petersen 
5909504e086SMartin K. Petersen 		top = max(t->physical_block_size, t->io_min)
5919504e086SMartin K. Petersen 			+ t->alignment_offset;
59281744ee4SMartin K. Petersen 		bottom = max(b->physical_block_size, b->io_min) + alignment;
5939504e086SMartin K. Petersen 
59481744ee4SMartin K. Petersen 		/* Verify that top and bottom intervals line up */
595b8839b8cSMike Snitzer 		if (max(top, bottom) % min(top, bottom)) {
5969504e086SMartin K. Petersen 			t->misaligned = 1;
597fe0b393fSMartin K. Petersen 			ret = -1;
598fe0b393fSMartin K. Petersen 		}
5999504e086SMartin K. Petersen 	}
6009504e086SMartin K. Petersen 
601c72758f3SMartin K. Petersen 	t->logical_block_size = max(t->logical_block_size,
602c72758f3SMartin K. Petersen 				    b->logical_block_size);
603c72758f3SMartin K. Petersen 
604c72758f3SMartin K. Petersen 	t->physical_block_size = max(t->physical_block_size,
605c72758f3SMartin K. Petersen 				     b->physical_block_size);
606c72758f3SMartin K. Petersen 
607c72758f3SMartin K. Petersen 	t->io_min = max(t->io_min, b->io_min);
608e9637415SMike Snitzer 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
609c964d62fSKeith Busch 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
6107e7986f9SMike Snitzer 
6117e7986f9SMike Snitzer 	/* Set non-power-of-2 compatible chunk_sectors boundary */
6127e7986f9SMike Snitzer 	if (b->chunk_sectors)
6137e7986f9SMike Snitzer 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
6149504e086SMartin K. Petersen 
61581744ee4SMartin K. Petersen 	/* Physical block size a multiple of the logical block size? */
6169504e086SMartin K. Petersen 	if (t->physical_block_size & (t->logical_block_size - 1)) {
6179504e086SMartin K. Petersen 		t->physical_block_size = t->logical_block_size;
618c72758f3SMartin K. Petersen 		t->misaligned = 1;
619fe0b393fSMartin K. Petersen 		ret = -1;
62086b37281SMartin K. Petersen 	}
62186b37281SMartin K. Petersen 
62281744ee4SMartin K. Petersen 	/* Minimum I/O a multiple of the physical block size? */
6239504e086SMartin K. Petersen 	if (t->io_min & (t->physical_block_size - 1)) {
6249504e086SMartin K. Petersen 		t->io_min = t->physical_block_size;
6259504e086SMartin K. Petersen 		t->misaligned = 1;
626fe0b393fSMartin K. Petersen 		ret = -1;
6279504e086SMartin K. Petersen 	}
6289504e086SMartin K. Petersen 
62981744ee4SMartin K. Petersen 	/* Optimal I/O a multiple of the physical block size? */
6309504e086SMartin K. Petersen 	if (t->io_opt & (t->physical_block_size - 1)) {
6319504e086SMartin K. Petersen 		t->io_opt = 0;
6329504e086SMartin K. Petersen 		t->misaligned = 1;
633fe0b393fSMartin K. Petersen 		ret = -1;
6349504e086SMartin K. Petersen 	}
6359504e086SMartin K. Petersen 
63622ada802SMike Snitzer 	/* chunk_sectors a multiple of the physical block size? */
63722ada802SMike Snitzer 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
63822ada802SMike Snitzer 		t->chunk_sectors = 0;
63922ada802SMike Snitzer 		t->misaligned = 1;
64022ada802SMike Snitzer 		ret = -1;
64122ada802SMike Snitzer 	}
64222ada802SMike Snitzer 
643c78afc62SKent Overstreet 	t->raid_partial_stripes_expensive =
644c78afc62SKent Overstreet 		max(t->raid_partial_stripes_expensive,
645c78afc62SKent Overstreet 		    b->raid_partial_stripes_expensive);
646c78afc62SKent Overstreet 
64781744ee4SMartin K. Petersen 	/* Find lowest common alignment_offset */
648e9637415SMike Snitzer 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
649b8839b8cSMike Snitzer 		% max(t->physical_block_size, t->io_min);
6509504e086SMartin K. Petersen 
65181744ee4SMartin K. Petersen 	/* Verify that new alignment_offset is on a logical block boundary */
652fe0b393fSMartin K. Petersen 	if (t->alignment_offset & (t->logical_block_size - 1)) {
6539504e086SMartin K. Petersen 		t->misaligned = 1;
654fe0b393fSMartin K. Petersen 		ret = -1;
655fe0b393fSMartin K. Petersen 	}
6569504e086SMartin K. Petersen 
65797f433c3SMikulas Patocka 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
65897f433c3SMikulas Patocka 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
65997f433c3SMikulas Patocka 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
66097f433c3SMikulas Patocka 
6619504e086SMartin K. Petersen 	/* Discard alignment and granularity */
6629504e086SMartin K. Petersen 	if (b->discard_granularity) {
663e03a72e1SMartin K. Petersen 		alignment = queue_limit_discard_alignment(b, start);
6649504e086SMartin K. Petersen 
6659504e086SMartin K. Petersen 		if (t->discard_granularity != 0 &&
6669504e086SMartin K. Petersen 		    t->discard_alignment != alignment) {
6679504e086SMartin K. Petersen 			top = t->discard_granularity + t->discard_alignment;
6689504e086SMartin K. Petersen 			bottom = b->discard_granularity + alignment;
6699504e086SMartin K. Petersen 
6709504e086SMartin K. Petersen 			/* Verify that top and bottom intervals line up */
6718dd2cb7eSShaohua Li 			if ((max(top, bottom) % min(top, bottom)) != 0)
67286b37281SMartin K. Petersen 				t->discard_misaligned = 1;
673c72758f3SMartin K. Petersen 		}
674c72758f3SMartin K. Petersen 
67581744ee4SMartin K. Petersen 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
67681744ee4SMartin K. Petersen 						      b->max_discard_sectors);
6770034af03SJens Axboe 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
6780034af03SJens Axboe 							 b->max_hw_discard_sectors);
6799504e086SMartin K. Petersen 		t->discard_granularity = max(t->discard_granularity,
68086b37281SMartin K. Petersen 					     b->discard_granularity);
681e9637415SMike Snitzer 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
6828dd2cb7eSShaohua Li 			t->discard_granularity;
6839504e086SMartin K. Petersen 	}
68444abff2cSChristoph Hellwig 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
68544abff2cSChristoph Hellwig 						   b->max_secure_erase_sectors);
686a805a4faSDamien Le Moal 	t->zone_write_granularity = max(t->zone_write_granularity,
687a805a4faSDamien Le Moal 					b->zone_write_granularity);
6883093a479SChristoph Hellwig 	t->zoned = max(t->zoned, b->zoned);
689*fe86d01cSDamien Le Moal 	if (!t->zoned) {
690*fe86d01cSDamien Le Moal 		t->zone_write_granularity = 0;
691*fe86d01cSDamien Le Moal 		t->max_zone_append_sectors = 0;
692*fe86d01cSDamien Le Moal 	}
693fe0b393fSMartin K. Petersen 	return ret;
694c72758f3SMartin K. Petersen }
6955d85d324SMike Snitzer EXPORT_SYMBOL(blk_stack_limits);
696c72758f3SMartin K. Petersen 
697c72758f3SMartin K. Petersen /**
698c72758f3SMartin K. Petersen  * disk_stack_limits - adjust queue limits for stacked drivers
69977634f33SMartin K. Petersen  * @disk:  MD/DM gendisk (top)
700c72758f3SMartin K. Petersen  * @bdev:  the underlying block device (bottom)
701c72758f3SMartin K. Petersen  * @offset:  offset to beginning of data within component device
702c72758f3SMartin K. Petersen  *
703c72758f3SMartin K. Petersen  * Description:
704e03a72e1SMartin K. Petersen  *    Merges the limits for a top level gendisk and a bottom level
705e03a72e1SMartin K. Petersen  *    block_device.
706c72758f3SMartin K. Petersen  */
disk_stack_limits(struct gendisk * disk,struct block_device * bdev,sector_t offset)707c72758f3SMartin K. Petersen void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
708c72758f3SMartin K. Petersen 		       sector_t offset)
709c72758f3SMartin K. Petersen {
710c72758f3SMartin K. Petersen 	struct request_queue *t = disk->queue;
711c72758f3SMartin K. Petersen 
7129efa82efSChristoph Hellwig 	if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
713453b8ab6SChristoph Hellwig 			get_start_sect(bdev) + (offset >> 9)) < 0)
714453b8ab6SChristoph Hellwig 		pr_notice("%s: Warning: Device %pg is misaligned\n",
715453b8ab6SChristoph Hellwig 			disk->disk_name, bdev);
716e74d93e9SKonstantin Khlebnikov 
717471aa704SChristoph Hellwig 	disk_update_readahead(disk);
718c72758f3SMartin K. Petersen }
719c72758f3SMartin K. Petersen EXPORT_SYMBOL(disk_stack_limits);
720c72758f3SMartin K. Petersen 
721c72758f3SMartin K. Petersen /**
72227f8221aSFUJITA Tomonori  * blk_queue_update_dma_pad - update pad mask
72327f8221aSFUJITA Tomonori  * @q:     the request queue for the device
72427f8221aSFUJITA Tomonori  * @mask:  pad mask
72527f8221aSFUJITA Tomonori  *
72627f8221aSFUJITA Tomonori  * Update dma pad mask.
72727f8221aSFUJITA Tomonori  *
72827f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
72927f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
73027f8221aSFUJITA Tomonori  **/
blk_queue_update_dma_pad(struct request_queue * q,unsigned int mask)73127f8221aSFUJITA Tomonori void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
73227f8221aSFUJITA Tomonori {
73327f8221aSFUJITA Tomonori 	if (mask > q->dma_pad_mask)
73427f8221aSFUJITA Tomonori 		q->dma_pad_mask = mask;
73527f8221aSFUJITA Tomonori }
73627f8221aSFUJITA Tomonori EXPORT_SYMBOL(blk_queue_update_dma_pad);
73727f8221aSFUJITA Tomonori 
73827f8221aSFUJITA Tomonori /**
73986db1e29SJens Axboe  * blk_queue_segment_boundary - set boundary rules for segment merging
74086db1e29SJens Axboe  * @q:  the request queue for the device
74186db1e29SJens Axboe  * @mask:  the memory boundary mask
74286db1e29SJens Axboe  **/
blk_queue_segment_boundary(struct request_queue * q,unsigned long mask)74386db1e29SJens Axboe void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
74486db1e29SJens Axboe {
74509cbfeafSKirill A. Shutemov 	if (mask < PAGE_SIZE - 1) {
74609cbfeafSKirill A. Shutemov 		mask = PAGE_SIZE - 1;
74724c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %lx\n",
74824c03d47SHarvey Harrison 		       __func__, mask);
74986db1e29SJens Axboe 	}
75086db1e29SJens Axboe 
751025146e1SMartin K. Petersen 	q->limits.seg_boundary_mask = mask;
75286db1e29SJens Axboe }
75386db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_segment_boundary);
75486db1e29SJens Axboe 
75586db1e29SJens Axboe /**
75603100aadSKeith Busch  * blk_queue_virt_boundary - set boundary rules for bio merging
75703100aadSKeith Busch  * @q:  the request queue for the device
75803100aadSKeith Busch  * @mask:  the memory boundary mask
75903100aadSKeith Busch  **/
blk_queue_virt_boundary(struct request_queue * q,unsigned long mask)76003100aadSKeith Busch void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
76103100aadSKeith Busch {
76203100aadSKeith Busch 	q->limits.virt_boundary_mask = mask;
76309324d32SChristoph Hellwig 
76409324d32SChristoph Hellwig 	/*
76509324d32SChristoph Hellwig 	 * Devices that require a virtual boundary do not support scatter/gather
76609324d32SChristoph Hellwig 	 * I/O natively, but instead require a descriptor list entry for each
76709324d32SChristoph Hellwig 	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
76809324d32SChristoph Hellwig 	 * of that they are not limited by our notion of "segment size".
76909324d32SChristoph Hellwig 	 */
770c6c84f78SChristoph Hellwig 	if (mask)
77109324d32SChristoph Hellwig 		q->limits.max_segment_size = UINT_MAX;
77203100aadSKeith Busch }
77303100aadSKeith Busch EXPORT_SYMBOL(blk_queue_virt_boundary);
77403100aadSKeith Busch 
77503100aadSKeith Busch /**
77686db1e29SJens Axboe  * blk_queue_dma_alignment - set dma length and memory alignment
77786db1e29SJens Axboe  * @q:     the request queue for the device
77886db1e29SJens Axboe  * @mask:  alignment mask
77986db1e29SJens Axboe  *
78086db1e29SJens Axboe  * description:
781710027a4SRandy Dunlap  *    set required memory and length alignment for direct dma transactions.
7828feb4d20SAlan Cox  *    this is used when building direct io requests for the queue.
78386db1e29SJens Axboe  *
78486db1e29SJens Axboe  **/
blk_queue_dma_alignment(struct request_queue * q,int mask)78586db1e29SJens Axboe void blk_queue_dma_alignment(struct request_queue *q, int mask)
78686db1e29SJens Axboe {
787c964d62fSKeith Busch 	q->limits.dma_alignment = mask;
78886db1e29SJens Axboe }
78986db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_dma_alignment);
79086db1e29SJens Axboe 
79186db1e29SJens Axboe /**
79286db1e29SJens Axboe  * blk_queue_update_dma_alignment - update dma length and memory alignment
79386db1e29SJens Axboe  * @q:     the request queue for the device
79486db1e29SJens Axboe  * @mask:  alignment mask
79586db1e29SJens Axboe  *
79686db1e29SJens Axboe  * description:
797710027a4SRandy Dunlap  *    update required memory and length alignment for direct dma transactions.
79886db1e29SJens Axboe  *    If the requested alignment is larger than the current alignment, then
79986db1e29SJens Axboe  *    the current queue alignment is updated to the new value, otherwise it
80086db1e29SJens Axboe  *    is left alone.  The design of this is to allow multiple objects
80186db1e29SJens Axboe  *    (driver, device, transport etc) to set their respective
80286db1e29SJens Axboe  *    alignments without having them interfere.
80386db1e29SJens Axboe  *
80486db1e29SJens Axboe  **/
blk_queue_update_dma_alignment(struct request_queue * q,int mask)80586db1e29SJens Axboe void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
80686db1e29SJens Axboe {
80786db1e29SJens Axboe 	BUG_ON(mask > PAGE_SIZE);
80886db1e29SJens Axboe 
809c964d62fSKeith Busch 	if (mask > q->limits.dma_alignment)
810c964d62fSKeith Busch 		q->limits.dma_alignment = mask;
81186db1e29SJens Axboe }
81286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_update_dma_alignment);
81386db1e29SJens Axboe 
81493e9d8e8SJens Axboe /**
815d278d4a8SJens Axboe  * blk_set_queue_depth - tell the block layer about the device queue depth
816d278d4a8SJens Axboe  * @q:		the request queue for the device
817d278d4a8SJens Axboe  * @depth:		queue depth
818d278d4a8SJens Axboe  *
819d278d4a8SJens Axboe  */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)820d278d4a8SJens Axboe void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
821d278d4a8SJens Axboe {
822d278d4a8SJens Axboe 	q->queue_depth = depth;
8239677a3e0STejun Heo 	rq_qos_queue_depth_changed(q);
824d278d4a8SJens Axboe }
825d278d4a8SJens Axboe EXPORT_SYMBOL(blk_set_queue_depth);
826d278d4a8SJens Axboe 
827d278d4a8SJens Axboe /**
82893e9d8e8SJens Axboe  * blk_queue_write_cache - configure queue's write cache
82993e9d8e8SJens Axboe  * @q:		the request queue for the device
83093e9d8e8SJens Axboe  * @wc:		write back cache on or off
83193e9d8e8SJens Axboe  * @fua:	device supports FUA writes, if true
83293e9d8e8SJens Axboe  *
83393e9d8e8SJens Axboe  * Tell the block layer about the write cache of @q.
83493e9d8e8SJens Axboe  */
blk_queue_write_cache(struct request_queue * q,bool wc,bool fua)83593e9d8e8SJens Axboe void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
83693e9d8e8SJens Axboe {
83743c9835bSChristoph Hellwig 	if (wc) {
83843c9835bSChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
83957d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
84043c9835bSChristoph Hellwig 	} else {
84143c9835bSChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
84257d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
84343c9835bSChristoph Hellwig 	}
844c888a8f9SJens Axboe 	if (fua)
84557d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
846c888a8f9SJens Axboe 	else
84757d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
84887760e5eSJens Axboe 
849a7905043SJosef Bacik 	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
85093e9d8e8SJens Axboe }
85193e9d8e8SJens Axboe EXPORT_SYMBOL_GPL(blk_queue_write_cache);
85293e9d8e8SJens Axboe 
85368c43f13SDamien Le Moal /**
85468c43f13SDamien Le Moal  * blk_queue_required_elevator_features - Set a queue required elevator features
85568c43f13SDamien Le Moal  * @q:		the request queue for the target device
85668c43f13SDamien Le Moal  * @features:	Required elevator features OR'ed together
85768c43f13SDamien Le Moal  *
85868c43f13SDamien Le Moal  * Tell the block layer that for the device controlled through @q, only the
85968c43f13SDamien Le Moal  * only elevators that can be used are those that implement at least the set of
86068c43f13SDamien Le Moal  * features specified by @features.
86168c43f13SDamien Le Moal  */
blk_queue_required_elevator_features(struct request_queue * q,unsigned int features)86268c43f13SDamien Le Moal void blk_queue_required_elevator_features(struct request_queue *q,
86368c43f13SDamien Le Moal 					  unsigned int features)
86468c43f13SDamien Le Moal {
86568c43f13SDamien Le Moal 	q->required_elevator_features = features;
86668c43f13SDamien Le Moal }
86768c43f13SDamien Le Moal EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
86868c43f13SDamien Le Moal 
869671df189SLinus Torvalds /**
87045147fb5SYoshihiro Shimoda  * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
87145147fb5SYoshihiro Shimoda  * @q:		the request queue for the device
87245147fb5SYoshihiro Shimoda  * @dev:	the device pointer for dma
87345147fb5SYoshihiro Shimoda  *
87445147fb5SYoshihiro Shimoda  * Tell the block layer about merging the segments by dma map of @q.
87545147fb5SYoshihiro Shimoda  */
blk_queue_can_use_dma_map_merging(struct request_queue * q,struct device * dev)87645147fb5SYoshihiro Shimoda bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
87745147fb5SYoshihiro Shimoda 				       struct device *dev)
87845147fb5SYoshihiro Shimoda {
87945147fb5SYoshihiro Shimoda 	unsigned long boundary = dma_get_merge_boundary(dev);
88045147fb5SYoshihiro Shimoda 
88145147fb5SYoshihiro Shimoda 	if (!boundary)
88245147fb5SYoshihiro Shimoda 		return false;
88345147fb5SYoshihiro Shimoda 
88445147fb5SYoshihiro Shimoda 	/* No need to update max_segment_size. see blk_queue_virt_boundary() */
88545147fb5SYoshihiro Shimoda 	blk_queue_virt_boundary(q, boundary);
88645147fb5SYoshihiro Shimoda 
88745147fb5SYoshihiro Shimoda 	return true;
88845147fb5SYoshihiro Shimoda }
88945147fb5SYoshihiro Shimoda EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
89045147fb5SYoshihiro Shimoda 
disk_has_partitions(struct gendisk * disk)891e0c60d01SShin'ichiro Kawasaki static bool disk_has_partitions(struct gendisk *disk)
892e0c60d01SShin'ichiro Kawasaki {
893e0c60d01SShin'ichiro Kawasaki 	unsigned long idx;
894e0c60d01SShin'ichiro Kawasaki 	struct block_device *part;
895e0c60d01SShin'ichiro Kawasaki 	bool ret = false;
896e0c60d01SShin'ichiro Kawasaki 
897e0c60d01SShin'ichiro Kawasaki 	rcu_read_lock();
898e0c60d01SShin'ichiro Kawasaki 	xa_for_each(&disk->part_tbl, idx, part) {
899e0c60d01SShin'ichiro Kawasaki 		if (bdev_is_partition(part)) {
900e0c60d01SShin'ichiro Kawasaki 			ret = true;
901e0c60d01SShin'ichiro Kawasaki 			break;
902e0c60d01SShin'ichiro Kawasaki 		}
903e0c60d01SShin'ichiro Kawasaki 	}
904e0c60d01SShin'ichiro Kawasaki 	rcu_read_unlock();
905e0c60d01SShin'ichiro Kawasaki 
906e0c60d01SShin'ichiro Kawasaki 	return ret;
907e0c60d01SShin'ichiro Kawasaki }
908e0c60d01SShin'ichiro Kawasaki 
90927ba3e8fSDamien Le Moal /**
9106b2bd274SChristoph Hellwig  * disk_set_zoned - configure the zoned model for a disk
91127ba3e8fSDamien Le Moal  * @disk:	the gendisk of the queue to configure
91227ba3e8fSDamien Le Moal  * @model:	the zoned model to set
91327ba3e8fSDamien Le Moal  *
9146b2bd274SChristoph Hellwig  * Set the zoned model of @disk to @model.
9156b2bd274SChristoph Hellwig  *
91627ba3e8fSDamien Le Moal  * When @model is BLK_ZONED_HM (host managed), this should be called only
91727ba3e8fSDamien Le Moal  * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
91827ba3e8fSDamien Le Moal  * If @model specifies BLK_ZONED_HA (host aware), the effective model used
91927ba3e8fSDamien Le Moal  * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
92027ba3e8fSDamien Le Moal  * on the disk.
92127ba3e8fSDamien Le Moal  */
disk_set_zoned(struct gendisk * disk,enum blk_zoned_model model)9226b2bd274SChristoph Hellwig void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
92327ba3e8fSDamien Le Moal {
924a805a4faSDamien Le Moal 	struct request_queue *q = disk->queue;
92547fe1c30SDamien Le Moal 	unsigned int old_model = q->limits.zoned;
926a805a4faSDamien Le Moal 
92727ba3e8fSDamien Le Moal 	switch (model) {
92827ba3e8fSDamien Le Moal 	case BLK_ZONED_HM:
92927ba3e8fSDamien Le Moal 		/*
93027ba3e8fSDamien Le Moal 		 * Host managed devices are supported only if
93127ba3e8fSDamien Le Moal 		 * CONFIG_BLK_DEV_ZONED is enabled.
93227ba3e8fSDamien Le Moal 		 */
93327ba3e8fSDamien Le Moal 		WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
93427ba3e8fSDamien Le Moal 		break;
93527ba3e8fSDamien Le Moal 	case BLK_ZONED_HA:
93627ba3e8fSDamien Le Moal 		/*
93727ba3e8fSDamien Le Moal 		 * Host aware devices can be treated either as regular block
93827ba3e8fSDamien Le Moal 		 * devices (similar to drive managed devices) or as zoned block
93927ba3e8fSDamien Le Moal 		 * devices to take advantage of the zone command set, similarly
94027ba3e8fSDamien Le Moal 		 * to host managed devices. We try the latter if there are no
94127ba3e8fSDamien Le Moal 		 * partitions and zoned block device support is enabled, else
94227ba3e8fSDamien Le Moal 		 * we do nothing special as far as the block layer is concerned.
94327ba3e8fSDamien Le Moal 		 */
94427ba3e8fSDamien Le Moal 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
945e0c60d01SShin'ichiro Kawasaki 		    disk_has_partitions(disk))
94627ba3e8fSDamien Le Moal 			model = BLK_ZONED_NONE;
94727ba3e8fSDamien Le Moal 		break;
94827ba3e8fSDamien Le Moal 	case BLK_ZONED_NONE:
94927ba3e8fSDamien Le Moal 	default:
95027ba3e8fSDamien Le Moal 		if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
95127ba3e8fSDamien Le Moal 			model = BLK_ZONED_NONE;
95227ba3e8fSDamien Le Moal 		break;
95327ba3e8fSDamien Le Moal 	}
95427ba3e8fSDamien Le Moal 
955a805a4faSDamien Le Moal 	q->limits.zoned = model;
956a805a4faSDamien Le Moal 	if (model != BLK_ZONED_NONE) {
957a805a4faSDamien Le Moal 		/*
958a805a4faSDamien Le Moal 		 * Set the zone write granularity to the device logical block
959a805a4faSDamien Le Moal 		 * size by default. The driver can change this value if needed.
960a805a4faSDamien Le Moal 		 */
961a805a4faSDamien Le Moal 		blk_queue_zone_write_granularity(q,
962a805a4faSDamien Le Moal 						queue_logical_block_size(q));
96347fe1c30SDamien Le Moal 	} else if (old_model != BLK_ZONED_NONE) {
964b3c72f81SChristoph Hellwig 		disk_clear_zone_settings(disk);
965a805a4faSDamien Le Moal 	}
96627ba3e8fSDamien Le Moal }
9676b2bd274SChristoph Hellwig EXPORT_SYMBOL_GPL(disk_set_zoned);
96889098b07SChristoph Hellwig 
bdev_alignment_offset(struct block_device * bdev)96989098b07SChristoph Hellwig int bdev_alignment_offset(struct block_device *bdev)
97089098b07SChristoph Hellwig {
97189098b07SChristoph Hellwig 	struct request_queue *q = bdev_get_queue(bdev);
97289098b07SChristoph Hellwig 
97389098b07SChristoph Hellwig 	if (q->limits.misaligned)
97489098b07SChristoph Hellwig 		return -1;
97589098b07SChristoph Hellwig 	if (bdev_is_partition(bdev))
97689098b07SChristoph Hellwig 		return queue_limit_alignment_offset(&q->limits,
97789098b07SChristoph Hellwig 				bdev->bd_start_sect);
97889098b07SChristoph Hellwig 	return q->limits.alignment_offset;
97989098b07SChristoph Hellwig }
98089098b07SChristoph Hellwig EXPORT_SYMBOL_GPL(bdev_alignment_offset);
9815c4b4a5cSChristoph Hellwig 
bdev_discard_alignment(struct block_device * bdev)9825c4b4a5cSChristoph Hellwig unsigned int bdev_discard_alignment(struct block_device *bdev)
9835c4b4a5cSChristoph Hellwig {
9845c4b4a5cSChristoph Hellwig 	struct request_queue *q = bdev_get_queue(bdev);
9855c4b4a5cSChristoph Hellwig 
9865c4b4a5cSChristoph Hellwig 	if (bdev_is_partition(bdev))
9875c4b4a5cSChristoph Hellwig 		return queue_limit_discard_alignment(&q->limits,
9885c4b4a5cSChristoph Hellwig 				bdev->bd_start_sect);
9895c4b4a5cSChristoph Hellwig 	return q->limits.discard_alignment;
9905c4b4a5cSChristoph Hellwig }
9915c4b4a5cSChristoph Hellwig EXPORT_SYMBOL_GPL(bdev_discard_alignment);
992