xref: /openbmc/linux/block/blk-settings.c (revision 151f4e2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
11 #include <linux/gcd.h>
12 #include <linux/lcm.h>
13 #include <linux/jiffies.h>
14 #include <linux/gfp.h>
15 
16 #include "blk.h"
17 #include "blk-wbt.h"
18 
19 unsigned long blk_max_low_pfn;
20 EXPORT_SYMBOL(blk_max_low_pfn);
21 
22 unsigned long blk_max_pfn;
23 
24 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25 {
26 	q->rq_timeout = timeout;
27 }
28 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29 
30 /**
31  * blk_set_default_limits - reset limits to default values
32  * @lim:  the queue_limits structure to reset
33  *
34  * Description:
35  *   Returns a queue_limit struct to its default state.
36  */
37 void blk_set_default_limits(struct queue_limits *lim)
38 {
39 	lim->max_segments = BLK_MAX_SEGMENTS;
40 	lim->max_discard_segments = 1;
41 	lim->max_integrity_segments = 0;
42 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
43 	lim->virt_boundary_mask = 0;
44 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
45 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
46 	lim->max_dev_sectors = 0;
47 	lim->chunk_sectors = 0;
48 	lim->max_write_same_sectors = 0;
49 	lim->max_write_zeroes_sectors = 0;
50 	lim->max_discard_sectors = 0;
51 	lim->max_hw_discard_sectors = 0;
52 	lim->discard_granularity = 0;
53 	lim->discard_alignment = 0;
54 	lim->discard_misaligned = 0;
55 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
56 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
57 	lim->alignment_offset = 0;
58 	lim->io_opt = 0;
59 	lim->misaligned = 0;
60 	lim->zoned = BLK_ZONED_NONE;
61 }
62 EXPORT_SYMBOL(blk_set_default_limits);
63 
64 /**
65  * blk_set_stacking_limits - set default limits for stacking devices
66  * @lim:  the queue_limits structure to reset
67  *
68  * Description:
69  *   Returns a queue_limit struct to its default state. Should be used
70  *   by stacking drivers like DM that have no internal limits.
71  */
72 void blk_set_stacking_limits(struct queue_limits *lim)
73 {
74 	blk_set_default_limits(lim);
75 
76 	/* Inherit limits from component devices */
77 	lim->max_segments = USHRT_MAX;
78 	lim->max_discard_segments = USHRT_MAX;
79 	lim->max_hw_sectors = UINT_MAX;
80 	lim->max_segment_size = UINT_MAX;
81 	lim->max_sectors = UINT_MAX;
82 	lim->max_dev_sectors = UINT_MAX;
83 	lim->max_write_same_sectors = UINT_MAX;
84 	lim->max_write_zeroes_sectors = UINT_MAX;
85 }
86 EXPORT_SYMBOL(blk_set_stacking_limits);
87 
88 /**
89  * blk_queue_make_request - define an alternate make_request function for a device
90  * @q:  the request queue for the device to be affected
91  * @mfn: the alternate make_request function
92  *
93  * Description:
94  *    The normal way for &struct bios to be passed to a device
95  *    driver is for them to be collected into requests on a request
96  *    queue, and then to allow the device driver to select requests
97  *    off that queue when it is ready.  This works well for many block
98  *    devices. However some block devices (typically virtual devices
99  *    such as md or lvm) do not benefit from the processing on the
100  *    request queue, and are served best by having the requests passed
101  *    directly to them.  This can be achieved by providing a function
102  *    to blk_queue_make_request().
103  *
104  * Caveat:
105  *    The driver that does this *must* be able to deal appropriately
106  *    with buffers in "highmemory". This can be accomplished by either calling
107  *    kmap_atomic() to get a temporary kernel mapping, or by calling
108  *    blk_queue_bounce() to create a buffer in normal memory.
109  **/
110 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
111 {
112 	/*
113 	 * set defaults
114 	 */
115 	q->nr_requests = BLKDEV_MAX_RQ;
116 
117 	q->make_request_fn = mfn;
118 	blk_queue_dma_alignment(q, 511);
119 
120 	blk_set_default_limits(&q->limits);
121 }
122 EXPORT_SYMBOL(blk_queue_make_request);
123 
124 /**
125  * blk_queue_bounce_limit - set bounce buffer limit for queue
126  * @q: the request queue for the device
127  * @max_addr: the maximum address the device can handle
128  *
129  * Description:
130  *    Different hardware can have different requirements as to what pages
131  *    it can do I/O directly to. A low level driver can call
132  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
133  *    buffers for doing I/O to pages residing above @max_addr.
134  **/
135 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
136 {
137 	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
138 	int dma = 0;
139 
140 	q->bounce_gfp = GFP_NOIO;
141 #if BITS_PER_LONG == 64
142 	/*
143 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
144 	 * some IOMMUs can handle everything, but I don't know of a
145 	 * way to test this here.
146 	 */
147 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
148 		dma = 1;
149 	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
150 #else
151 	if (b_pfn < blk_max_low_pfn)
152 		dma = 1;
153 	q->limits.bounce_pfn = b_pfn;
154 #endif
155 	if (dma) {
156 		init_emergency_isa_pool();
157 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
158 		q->limits.bounce_pfn = b_pfn;
159 	}
160 }
161 EXPORT_SYMBOL(blk_queue_bounce_limit);
162 
163 /**
164  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
165  * @q:  the request queue for the device
166  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
167  *
168  * Description:
169  *    Enables a low level driver to set a hard upper limit,
170  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
171  *    the device driver based upon the capabilities of the I/O
172  *    controller.
173  *
174  *    max_dev_sectors is a hard limit imposed by the storage device for
175  *    READ/WRITE requests. It is set by the disk driver.
176  *
177  *    max_sectors is a soft limit imposed by the block layer for
178  *    filesystem type requests.  This value can be overridden on a
179  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
180  *    The soft limit can not exceed max_hw_sectors.
181  **/
182 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
183 {
184 	struct queue_limits *limits = &q->limits;
185 	unsigned int max_sectors;
186 
187 	if ((max_hw_sectors << 9) < PAGE_SIZE) {
188 		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
189 		printk(KERN_INFO "%s: set to minimum %d\n",
190 		       __func__, max_hw_sectors);
191 	}
192 
193 	limits->max_hw_sectors = max_hw_sectors;
194 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
195 	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
196 	limits->max_sectors = max_sectors;
197 	q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
198 }
199 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
200 
201 /**
202  * blk_queue_chunk_sectors - set size of the chunk for this queue
203  * @q:  the request queue for the device
204  * @chunk_sectors:  chunk sectors in the usual 512b unit
205  *
206  * Description:
207  *    If a driver doesn't want IOs to cross a given chunk size, it can set
208  *    this limit and prevent merging across chunks. Note that the chunk size
209  *    must currently be a power-of-2 in sectors. Also note that the block
210  *    layer must accept a page worth of data at any offset. So if the
211  *    crossing of chunks is a hard limitation in the driver, it must still be
212  *    prepared to split single page bios.
213  **/
214 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
215 {
216 	BUG_ON(!is_power_of_2(chunk_sectors));
217 	q->limits.chunk_sectors = chunk_sectors;
218 }
219 EXPORT_SYMBOL(blk_queue_chunk_sectors);
220 
221 /**
222  * blk_queue_max_discard_sectors - set max sectors for a single discard
223  * @q:  the request queue for the device
224  * @max_discard_sectors: maximum number of sectors to discard
225  **/
226 void blk_queue_max_discard_sectors(struct request_queue *q,
227 		unsigned int max_discard_sectors)
228 {
229 	q->limits.max_hw_discard_sectors = max_discard_sectors;
230 	q->limits.max_discard_sectors = max_discard_sectors;
231 }
232 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
233 
234 /**
235  * blk_queue_max_write_same_sectors - set max sectors for a single write same
236  * @q:  the request queue for the device
237  * @max_write_same_sectors: maximum number of sectors to write per command
238  **/
239 void blk_queue_max_write_same_sectors(struct request_queue *q,
240 				      unsigned int max_write_same_sectors)
241 {
242 	q->limits.max_write_same_sectors = max_write_same_sectors;
243 }
244 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
245 
246 /**
247  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
248  *                                      write zeroes
249  * @q:  the request queue for the device
250  * @max_write_zeroes_sectors: maximum number of sectors to write per command
251  **/
252 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
253 		unsigned int max_write_zeroes_sectors)
254 {
255 	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
256 }
257 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
258 
259 /**
260  * blk_queue_max_segments - set max hw segments for a request for this queue
261  * @q:  the request queue for the device
262  * @max_segments:  max number of segments
263  *
264  * Description:
265  *    Enables a low level driver to set an upper limit on the number of
266  *    hw data segments in a request.
267  **/
268 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
269 {
270 	if (!max_segments) {
271 		max_segments = 1;
272 		printk(KERN_INFO "%s: set to minimum %d\n",
273 		       __func__, max_segments);
274 	}
275 
276 	q->limits.max_segments = max_segments;
277 }
278 EXPORT_SYMBOL(blk_queue_max_segments);
279 
280 /**
281  * blk_queue_max_discard_segments - set max segments for discard requests
282  * @q:  the request queue for the device
283  * @max_segments:  max number of segments
284  *
285  * Description:
286  *    Enables a low level driver to set an upper limit on the number of
287  *    segments in a discard request.
288  **/
289 void blk_queue_max_discard_segments(struct request_queue *q,
290 		unsigned short max_segments)
291 {
292 	q->limits.max_discard_segments = max_segments;
293 }
294 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
295 
296 /**
297  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
298  * @q:  the request queue for the device
299  * @max_size:  max size of segment in bytes
300  *
301  * Description:
302  *    Enables a low level driver to set an upper limit on the size of a
303  *    coalesced segment
304  **/
305 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
306 {
307 	if (max_size < PAGE_SIZE) {
308 		max_size = PAGE_SIZE;
309 		printk(KERN_INFO "%s: set to minimum %d\n",
310 		       __func__, max_size);
311 	}
312 
313 	q->limits.max_segment_size = max_size;
314 }
315 EXPORT_SYMBOL(blk_queue_max_segment_size);
316 
317 /**
318  * blk_queue_logical_block_size - set logical block size for the queue
319  * @q:  the request queue for the device
320  * @size:  the logical block size, in bytes
321  *
322  * Description:
323  *   This should be set to the lowest possible block size that the
324  *   storage device can address.  The default of 512 covers most
325  *   hardware.
326  **/
327 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
328 {
329 	q->limits.logical_block_size = size;
330 
331 	if (q->limits.physical_block_size < size)
332 		q->limits.physical_block_size = size;
333 
334 	if (q->limits.io_min < q->limits.physical_block_size)
335 		q->limits.io_min = q->limits.physical_block_size;
336 }
337 EXPORT_SYMBOL(blk_queue_logical_block_size);
338 
339 /**
340  * blk_queue_physical_block_size - set physical block size for the queue
341  * @q:  the request queue for the device
342  * @size:  the physical block size, in bytes
343  *
344  * Description:
345  *   This should be set to the lowest possible sector size that the
346  *   hardware can operate on without reverting to read-modify-write
347  *   operations.
348  */
349 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
350 {
351 	q->limits.physical_block_size = size;
352 
353 	if (q->limits.physical_block_size < q->limits.logical_block_size)
354 		q->limits.physical_block_size = q->limits.logical_block_size;
355 
356 	if (q->limits.io_min < q->limits.physical_block_size)
357 		q->limits.io_min = q->limits.physical_block_size;
358 }
359 EXPORT_SYMBOL(blk_queue_physical_block_size);
360 
361 /**
362  * blk_queue_alignment_offset - set physical block alignment offset
363  * @q:	the request queue for the device
364  * @offset: alignment offset in bytes
365  *
366  * Description:
367  *   Some devices are naturally misaligned to compensate for things like
368  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
369  *   should call this function for devices whose first sector is not
370  *   naturally aligned.
371  */
372 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
373 {
374 	q->limits.alignment_offset =
375 		offset & (q->limits.physical_block_size - 1);
376 	q->limits.misaligned = 0;
377 }
378 EXPORT_SYMBOL(blk_queue_alignment_offset);
379 
380 /**
381  * blk_limits_io_min - set minimum request size for a device
382  * @limits: the queue limits
383  * @min:  smallest I/O size in bytes
384  *
385  * Description:
386  *   Some devices have an internal block size bigger than the reported
387  *   hardware sector size.  This function can be used to signal the
388  *   smallest I/O the device can perform without incurring a performance
389  *   penalty.
390  */
391 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
392 {
393 	limits->io_min = min;
394 
395 	if (limits->io_min < limits->logical_block_size)
396 		limits->io_min = limits->logical_block_size;
397 
398 	if (limits->io_min < limits->physical_block_size)
399 		limits->io_min = limits->physical_block_size;
400 }
401 EXPORT_SYMBOL(blk_limits_io_min);
402 
403 /**
404  * blk_queue_io_min - set minimum request size for the queue
405  * @q:	the request queue for the device
406  * @min:  smallest I/O size in bytes
407  *
408  * Description:
409  *   Storage devices may report a granularity or preferred minimum I/O
410  *   size which is the smallest request the device can perform without
411  *   incurring a performance penalty.  For disk drives this is often the
412  *   physical block size.  For RAID arrays it is often the stripe chunk
413  *   size.  A properly aligned multiple of minimum_io_size is the
414  *   preferred request size for workloads where a high number of I/O
415  *   operations is desired.
416  */
417 void blk_queue_io_min(struct request_queue *q, unsigned int min)
418 {
419 	blk_limits_io_min(&q->limits, min);
420 }
421 EXPORT_SYMBOL(blk_queue_io_min);
422 
423 /**
424  * blk_limits_io_opt - set optimal request size for a device
425  * @limits: the queue limits
426  * @opt:  smallest I/O size in bytes
427  *
428  * Description:
429  *   Storage devices may report an optimal I/O size, which is the
430  *   device's preferred unit for sustained I/O.  This is rarely reported
431  *   for disk drives.  For RAID arrays it is usually the stripe width or
432  *   the internal track size.  A properly aligned multiple of
433  *   optimal_io_size is the preferred request size for workloads where
434  *   sustained throughput is desired.
435  */
436 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
437 {
438 	limits->io_opt = opt;
439 }
440 EXPORT_SYMBOL(blk_limits_io_opt);
441 
442 /**
443  * blk_queue_io_opt - set optimal request size for the queue
444  * @q:	the request queue for the device
445  * @opt:  optimal request size in bytes
446  *
447  * Description:
448  *   Storage devices may report an optimal I/O size, which is the
449  *   device's preferred unit for sustained I/O.  This is rarely reported
450  *   for disk drives.  For RAID arrays it is usually the stripe width or
451  *   the internal track size.  A properly aligned multiple of
452  *   optimal_io_size is the preferred request size for workloads where
453  *   sustained throughput is desired.
454  */
455 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
456 {
457 	blk_limits_io_opt(&q->limits, opt);
458 }
459 EXPORT_SYMBOL(blk_queue_io_opt);
460 
461 /**
462  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
463  * @t:	the stacking driver (top)
464  * @b:  the underlying device (bottom)
465  **/
466 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
467 {
468 	blk_stack_limits(&t->limits, &b->limits, 0);
469 }
470 EXPORT_SYMBOL(blk_queue_stack_limits);
471 
472 /**
473  * blk_stack_limits - adjust queue_limits for stacked devices
474  * @t:	the stacking driver limits (top device)
475  * @b:  the underlying queue limits (bottom, component device)
476  * @start:  first data sector within component device
477  *
478  * Description:
479  *    This function is used by stacking drivers like MD and DM to ensure
480  *    that all component devices have compatible block sizes and
481  *    alignments.  The stacking driver must provide a queue_limits
482  *    struct (top) and then iteratively call the stacking function for
483  *    all component (bottom) devices.  The stacking function will
484  *    attempt to combine the values and ensure proper alignment.
485  *
486  *    Returns 0 if the top and bottom queue_limits are compatible.  The
487  *    top device's block sizes and alignment offsets may be adjusted to
488  *    ensure alignment with the bottom device. If no compatible sizes
489  *    and alignments exist, -1 is returned and the resulting top
490  *    queue_limits will have the misaligned flag set to indicate that
491  *    the alignment_offset is undefined.
492  */
493 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
494 		     sector_t start)
495 {
496 	unsigned int top, bottom, alignment, ret = 0;
497 
498 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
499 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
500 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
501 	t->max_write_same_sectors = min(t->max_write_same_sectors,
502 					b->max_write_same_sectors);
503 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
504 					b->max_write_zeroes_sectors);
505 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
506 
507 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
508 					    b->seg_boundary_mask);
509 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
510 					    b->virt_boundary_mask);
511 
512 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
513 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
514 					       b->max_discard_segments);
515 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
516 						 b->max_integrity_segments);
517 
518 	t->max_segment_size = min_not_zero(t->max_segment_size,
519 					   b->max_segment_size);
520 
521 	t->misaligned |= b->misaligned;
522 
523 	alignment = queue_limit_alignment_offset(b, start);
524 
525 	/* Bottom device has different alignment.  Check that it is
526 	 * compatible with the current top alignment.
527 	 */
528 	if (t->alignment_offset != alignment) {
529 
530 		top = max(t->physical_block_size, t->io_min)
531 			+ t->alignment_offset;
532 		bottom = max(b->physical_block_size, b->io_min) + alignment;
533 
534 		/* Verify that top and bottom intervals line up */
535 		if (max(top, bottom) % min(top, bottom)) {
536 			t->misaligned = 1;
537 			ret = -1;
538 		}
539 	}
540 
541 	t->logical_block_size = max(t->logical_block_size,
542 				    b->logical_block_size);
543 
544 	t->physical_block_size = max(t->physical_block_size,
545 				     b->physical_block_size);
546 
547 	t->io_min = max(t->io_min, b->io_min);
548 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
549 
550 	/* Physical block size a multiple of the logical block size? */
551 	if (t->physical_block_size & (t->logical_block_size - 1)) {
552 		t->physical_block_size = t->logical_block_size;
553 		t->misaligned = 1;
554 		ret = -1;
555 	}
556 
557 	/* Minimum I/O a multiple of the physical block size? */
558 	if (t->io_min & (t->physical_block_size - 1)) {
559 		t->io_min = t->physical_block_size;
560 		t->misaligned = 1;
561 		ret = -1;
562 	}
563 
564 	/* Optimal I/O a multiple of the physical block size? */
565 	if (t->io_opt & (t->physical_block_size - 1)) {
566 		t->io_opt = 0;
567 		t->misaligned = 1;
568 		ret = -1;
569 	}
570 
571 	t->raid_partial_stripes_expensive =
572 		max(t->raid_partial_stripes_expensive,
573 		    b->raid_partial_stripes_expensive);
574 
575 	/* Find lowest common alignment_offset */
576 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
577 		% max(t->physical_block_size, t->io_min);
578 
579 	/* Verify that new alignment_offset is on a logical block boundary */
580 	if (t->alignment_offset & (t->logical_block_size - 1)) {
581 		t->misaligned = 1;
582 		ret = -1;
583 	}
584 
585 	/* Discard alignment and granularity */
586 	if (b->discard_granularity) {
587 		alignment = queue_limit_discard_alignment(b, start);
588 
589 		if (t->discard_granularity != 0 &&
590 		    t->discard_alignment != alignment) {
591 			top = t->discard_granularity + t->discard_alignment;
592 			bottom = b->discard_granularity + alignment;
593 
594 			/* Verify that top and bottom intervals line up */
595 			if ((max(top, bottom) % min(top, bottom)) != 0)
596 				t->discard_misaligned = 1;
597 		}
598 
599 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
600 						      b->max_discard_sectors);
601 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
602 							 b->max_hw_discard_sectors);
603 		t->discard_granularity = max(t->discard_granularity,
604 					     b->discard_granularity);
605 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
606 			t->discard_granularity;
607 	}
608 
609 	if (b->chunk_sectors)
610 		t->chunk_sectors = min_not_zero(t->chunk_sectors,
611 						b->chunk_sectors);
612 
613 	return ret;
614 }
615 EXPORT_SYMBOL(blk_stack_limits);
616 
617 /**
618  * bdev_stack_limits - adjust queue limits for stacked drivers
619  * @t:	the stacking driver limits (top device)
620  * @bdev:  the component block_device (bottom)
621  * @start:  first data sector within component device
622  *
623  * Description:
624  *    Merges queue limits for a top device and a block_device.  Returns
625  *    0 if alignment didn't change.  Returns -1 if adding the bottom
626  *    device caused misalignment.
627  */
628 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
629 		      sector_t start)
630 {
631 	struct request_queue *bq = bdev_get_queue(bdev);
632 
633 	start += get_start_sect(bdev);
634 
635 	return blk_stack_limits(t, &bq->limits, start);
636 }
637 EXPORT_SYMBOL(bdev_stack_limits);
638 
639 /**
640  * disk_stack_limits - adjust queue limits for stacked drivers
641  * @disk:  MD/DM gendisk (top)
642  * @bdev:  the underlying block device (bottom)
643  * @offset:  offset to beginning of data within component device
644  *
645  * Description:
646  *    Merges the limits for a top level gendisk and a bottom level
647  *    block_device.
648  */
649 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
650 		       sector_t offset)
651 {
652 	struct request_queue *t = disk->queue;
653 
654 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
655 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
656 
657 		disk_name(disk, 0, top);
658 		bdevname(bdev, bottom);
659 
660 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
661 		       top, bottom);
662 	}
663 }
664 EXPORT_SYMBOL(disk_stack_limits);
665 
666 /**
667  * blk_queue_update_dma_pad - update pad mask
668  * @q:     the request queue for the device
669  * @mask:  pad mask
670  *
671  * Update dma pad mask.
672  *
673  * Appending pad buffer to a request modifies the last entry of a
674  * scatter list such that it includes the pad buffer.
675  **/
676 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
677 {
678 	if (mask > q->dma_pad_mask)
679 		q->dma_pad_mask = mask;
680 }
681 EXPORT_SYMBOL(blk_queue_update_dma_pad);
682 
683 /**
684  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
685  * @q:  the request queue for the device
686  * @dma_drain_needed: fn which returns non-zero if drain is necessary
687  * @buf:	physically contiguous buffer
688  * @size:	size of the buffer in bytes
689  *
690  * Some devices have excess DMA problems and can't simply discard (or
691  * zero fill) the unwanted piece of the transfer.  They have to have a
692  * real area of memory to transfer it into.  The use case for this is
693  * ATAPI devices in DMA mode.  If the packet command causes a transfer
694  * bigger than the transfer size some HBAs will lock up if there
695  * aren't DMA elements to contain the excess transfer.  What this API
696  * does is adjust the queue so that the buf is always appended
697  * silently to the scatterlist.
698  *
699  * Note: This routine adjusts max_hw_segments to make room for appending
700  * the drain buffer.  If you call blk_queue_max_segments() after calling
701  * this routine, you must set the limit to one fewer than your device
702  * can support otherwise there won't be room for the drain buffer.
703  */
704 int blk_queue_dma_drain(struct request_queue *q,
705 			       dma_drain_needed_fn *dma_drain_needed,
706 			       void *buf, unsigned int size)
707 {
708 	if (queue_max_segments(q) < 2)
709 		return -EINVAL;
710 	/* make room for appending the drain */
711 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
712 	q->dma_drain_needed = dma_drain_needed;
713 	q->dma_drain_buffer = buf;
714 	q->dma_drain_size = size;
715 
716 	return 0;
717 }
718 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
719 
720 /**
721  * blk_queue_segment_boundary - set boundary rules for segment merging
722  * @q:  the request queue for the device
723  * @mask:  the memory boundary mask
724  **/
725 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
726 {
727 	if (mask < PAGE_SIZE - 1) {
728 		mask = PAGE_SIZE - 1;
729 		printk(KERN_INFO "%s: set to minimum %lx\n",
730 		       __func__, mask);
731 	}
732 
733 	q->limits.seg_boundary_mask = mask;
734 }
735 EXPORT_SYMBOL(blk_queue_segment_boundary);
736 
737 /**
738  * blk_queue_virt_boundary - set boundary rules for bio merging
739  * @q:  the request queue for the device
740  * @mask:  the memory boundary mask
741  **/
742 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
743 {
744 	q->limits.virt_boundary_mask = mask;
745 }
746 EXPORT_SYMBOL(blk_queue_virt_boundary);
747 
748 /**
749  * blk_queue_dma_alignment - set dma length and memory alignment
750  * @q:     the request queue for the device
751  * @mask:  alignment mask
752  *
753  * description:
754  *    set required memory and length alignment for direct dma transactions.
755  *    this is used when building direct io requests for the queue.
756  *
757  **/
758 void blk_queue_dma_alignment(struct request_queue *q, int mask)
759 {
760 	q->dma_alignment = mask;
761 }
762 EXPORT_SYMBOL(blk_queue_dma_alignment);
763 
764 /**
765  * blk_queue_update_dma_alignment - update dma length and memory alignment
766  * @q:     the request queue for the device
767  * @mask:  alignment mask
768  *
769  * description:
770  *    update required memory and length alignment for direct dma transactions.
771  *    If the requested alignment is larger than the current alignment, then
772  *    the current queue alignment is updated to the new value, otherwise it
773  *    is left alone.  The design of this is to allow multiple objects
774  *    (driver, device, transport etc) to set their respective
775  *    alignments without having them interfere.
776  *
777  **/
778 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
779 {
780 	BUG_ON(mask > PAGE_SIZE);
781 
782 	if (mask > q->dma_alignment)
783 		q->dma_alignment = mask;
784 }
785 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
786 
787 /**
788  * blk_set_queue_depth - tell the block layer about the device queue depth
789  * @q:		the request queue for the device
790  * @depth:		queue depth
791  *
792  */
793 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
794 {
795 	q->queue_depth = depth;
796 	wbt_set_queue_depth(q, depth);
797 }
798 EXPORT_SYMBOL(blk_set_queue_depth);
799 
800 /**
801  * blk_queue_write_cache - configure queue's write cache
802  * @q:		the request queue for the device
803  * @wc:		write back cache on or off
804  * @fua:	device supports FUA writes, if true
805  *
806  * Tell the block layer about the write cache of @q.
807  */
808 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
809 {
810 	if (wc)
811 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
812 	else
813 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
814 	if (fua)
815 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
816 	else
817 		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
818 
819 	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
820 }
821 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
822 
823 static int __init blk_settings_init(void)
824 {
825 	blk_max_low_pfn = max_low_pfn - 1;
826 	blk_max_pfn = max_pfn - 1;
827 	return 0;
828 }
829 subsys_initcall(blk_settings_init);
830