xref: /openbmc/linux/block/blk-settings.c (revision 29c37341)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
11 #include <linux/gcd.h>
12 #include <linux/lcm.h>
13 #include <linux/jiffies.h>
14 #include <linux/gfp.h>
15 #include <linux/dma-mapping.h>
16 
17 #include "blk.h"
18 #include "blk-wbt.h"
19 
20 unsigned long blk_max_low_pfn;
21 EXPORT_SYMBOL(blk_max_low_pfn);
22 
23 unsigned long blk_max_pfn;
24 
25 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
26 {
27 	q->rq_timeout = timeout;
28 }
29 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
30 
31 /**
32  * blk_set_default_limits - reset limits to default values
33  * @lim:  the queue_limits structure to reset
34  *
35  * Description:
36  *   Returns a queue_limit struct to its default state.
37  */
38 void blk_set_default_limits(struct queue_limits *lim)
39 {
40 	lim->max_segments = BLK_MAX_SEGMENTS;
41 	lim->max_discard_segments = 1;
42 	lim->max_integrity_segments = 0;
43 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 	lim->virt_boundary_mask = 0;
45 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
46 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
47 	lim->max_dev_sectors = 0;
48 	lim->chunk_sectors = 0;
49 	lim->max_write_same_sectors = 0;
50 	lim->max_write_zeroes_sectors = 0;
51 	lim->max_zone_append_sectors = 0;
52 	lim->max_discard_sectors = 0;
53 	lim->max_hw_discard_sectors = 0;
54 	lim->discard_granularity = 0;
55 	lim->discard_alignment = 0;
56 	lim->discard_misaligned = 0;
57 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
58 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
59 	lim->alignment_offset = 0;
60 	lim->io_opt = 0;
61 	lim->misaligned = 0;
62 	lim->zoned = BLK_ZONED_NONE;
63 }
64 EXPORT_SYMBOL(blk_set_default_limits);
65 
66 /**
67  * blk_set_stacking_limits - set default limits for stacking devices
68  * @lim:  the queue_limits structure to reset
69  *
70  * Description:
71  *   Returns a queue_limit struct to its default state. Should be used
72  *   by stacking drivers like DM that have no internal limits.
73  */
74 void blk_set_stacking_limits(struct queue_limits *lim)
75 {
76 	blk_set_default_limits(lim);
77 
78 	/* Inherit limits from component devices */
79 	lim->max_segments = USHRT_MAX;
80 	lim->max_discard_segments = USHRT_MAX;
81 	lim->max_hw_sectors = UINT_MAX;
82 	lim->max_segment_size = UINT_MAX;
83 	lim->max_sectors = UINT_MAX;
84 	lim->max_dev_sectors = UINT_MAX;
85 	lim->max_write_same_sectors = UINT_MAX;
86 	lim->max_write_zeroes_sectors = UINT_MAX;
87 	lim->max_zone_append_sectors = UINT_MAX;
88 }
89 EXPORT_SYMBOL(blk_set_stacking_limits);
90 
91 /**
92  * blk_queue_bounce_limit - set bounce buffer limit for queue
93  * @q: the request queue for the device
94  * @max_addr: the maximum address the device can handle
95  *
96  * Description:
97  *    Different hardware can have different requirements as to what pages
98  *    it can do I/O directly to. A low level driver can call
99  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
100  *    buffers for doing I/O to pages residing above @max_addr.
101  **/
102 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
103 {
104 	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
105 	int dma = 0;
106 
107 	q->bounce_gfp = GFP_NOIO;
108 #if BITS_PER_LONG == 64
109 	/*
110 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
111 	 * some IOMMUs can handle everything, but I don't know of a
112 	 * way to test this here.
113 	 */
114 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
115 		dma = 1;
116 	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
117 #else
118 	if (b_pfn < blk_max_low_pfn)
119 		dma = 1;
120 	q->limits.bounce_pfn = b_pfn;
121 #endif
122 	if (dma) {
123 		init_emergency_isa_pool();
124 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
125 		q->limits.bounce_pfn = b_pfn;
126 	}
127 }
128 EXPORT_SYMBOL(blk_queue_bounce_limit);
129 
130 /**
131  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
132  * @q:  the request queue for the device
133  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
134  *
135  * Description:
136  *    Enables a low level driver to set a hard upper limit,
137  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
138  *    the device driver based upon the capabilities of the I/O
139  *    controller.
140  *
141  *    max_dev_sectors is a hard limit imposed by the storage device for
142  *    READ/WRITE requests. It is set by the disk driver.
143  *
144  *    max_sectors is a soft limit imposed by the block layer for
145  *    filesystem type requests.  This value can be overridden on a
146  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
147  *    The soft limit can not exceed max_hw_sectors.
148  **/
149 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
150 {
151 	struct queue_limits *limits = &q->limits;
152 	unsigned int max_sectors;
153 
154 	if ((max_hw_sectors << 9) < PAGE_SIZE) {
155 		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
156 		printk(KERN_INFO "%s: set to minimum %d\n",
157 		       __func__, max_hw_sectors);
158 	}
159 
160 	limits->max_hw_sectors = max_hw_sectors;
161 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
162 	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
163 	limits->max_sectors = max_sectors;
164 	q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
165 }
166 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
167 
168 /**
169  * blk_queue_chunk_sectors - set size of the chunk for this queue
170  * @q:  the request queue for the device
171  * @chunk_sectors:  chunk sectors in the usual 512b unit
172  *
173  * Description:
174  *    If a driver doesn't want IOs to cross a given chunk size, it can set
175  *    this limit and prevent merging across chunks. Note that the chunk size
176  *    must currently be a power-of-2 in sectors. Also note that the block
177  *    layer must accept a page worth of data at any offset. So if the
178  *    crossing of chunks is a hard limitation in the driver, it must still be
179  *    prepared to split single page bios.
180  **/
181 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
182 {
183 	BUG_ON(!is_power_of_2(chunk_sectors));
184 	q->limits.chunk_sectors = chunk_sectors;
185 }
186 EXPORT_SYMBOL(blk_queue_chunk_sectors);
187 
188 /**
189  * blk_queue_max_discard_sectors - set max sectors for a single discard
190  * @q:  the request queue for the device
191  * @max_discard_sectors: maximum number of sectors to discard
192  **/
193 void blk_queue_max_discard_sectors(struct request_queue *q,
194 		unsigned int max_discard_sectors)
195 {
196 	q->limits.max_hw_discard_sectors = max_discard_sectors;
197 	q->limits.max_discard_sectors = max_discard_sectors;
198 }
199 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
200 
201 /**
202  * blk_queue_max_write_same_sectors - set max sectors for a single write same
203  * @q:  the request queue for the device
204  * @max_write_same_sectors: maximum number of sectors to write per command
205  **/
206 void blk_queue_max_write_same_sectors(struct request_queue *q,
207 				      unsigned int max_write_same_sectors)
208 {
209 	q->limits.max_write_same_sectors = max_write_same_sectors;
210 }
211 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
212 
213 /**
214  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
215  *                                      write zeroes
216  * @q:  the request queue for the device
217  * @max_write_zeroes_sectors: maximum number of sectors to write per command
218  **/
219 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
220 		unsigned int max_write_zeroes_sectors)
221 {
222 	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
223 }
224 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
225 
226 /**
227  * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
228  * @q:  the request queue for the device
229  * @max_zone_append_sectors: maximum number of sectors to write per command
230  **/
231 void blk_queue_max_zone_append_sectors(struct request_queue *q,
232 		unsigned int max_zone_append_sectors)
233 {
234 	unsigned int max_sectors;
235 
236 	if (WARN_ON(!blk_queue_is_zoned(q)))
237 		return;
238 
239 	max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
240 	max_sectors = min(q->limits.chunk_sectors, max_sectors);
241 
242 	/*
243 	 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
244 	 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
245 	 * or the max_hw_sectors limit not set.
246 	 */
247 	WARN_ON(!max_sectors);
248 
249 	q->limits.max_zone_append_sectors = max_sectors;
250 }
251 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
252 
253 /**
254  * blk_queue_max_segments - set max hw segments for a request for this queue
255  * @q:  the request queue for the device
256  * @max_segments:  max number of segments
257  *
258  * Description:
259  *    Enables a low level driver to set an upper limit on the number of
260  *    hw data segments in a request.
261  **/
262 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
263 {
264 	if (!max_segments) {
265 		max_segments = 1;
266 		printk(KERN_INFO "%s: set to minimum %d\n",
267 		       __func__, max_segments);
268 	}
269 
270 	q->limits.max_segments = max_segments;
271 }
272 EXPORT_SYMBOL(blk_queue_max_segments);
273 
274 /**
275  * blk_queue_max_discard_segments - set max segments for discard requests
276  * @q:  the request queue for the device
277  * @max_segments:  max number of segments
278  *
279  * Description:
280  *    Enables a low level driver to set an upper limit on the number of
281  *    segments in a discard request.
282  **/
283 void blk_queue_max_discard_segments(struct request_queue *q,
284 		unsigned short max_segments)
285 {
286 	q->limits.max_discard_segments = max_segments;
287 }
288 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
289 
290 /**
291  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
292  * @q:  the request queue for the device
293  * @max_size:  max size of segment in bytes
294  *
295  * Description:
296  *    Enables a low level driver to set an upper limit on the size of a
297  *    coalesced segment
298  **/
299 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
300 {
301 	if (max_size < PAGE_SIZE) {
302 		max_size = PAGE_SIZE;
303 		printk(KERN_INFO "%s: set to minimum %d\n",
304 		       __func__, max_size);
305 	}
306 
307 	/* see blk_queue_virt_boundary() for the explanation */
308 	WARN_ON_ONCE(q->limits.virt_boundary_mask);
309 
310 	q->limits.max_segment_size = max_size;
311 }
312 EXPORT_SYMBOL(blk_queue_max_segment_size);
313 
314 /**
315  * blk_queue_logical_block_size - set logical block size for the queue
316  * @q:  the request queue for the device
317  * @size:  the logical block size, in bytes
318  *
319  * Description:
320  *   This should be set to the lowest possible block size that the
321  *   storage device can address.  The default of 512 covers most
322  *   hardware.
323  **/
324 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
325 {
326 	q->limits.logical_block_size = size;
327 
328 	if (q->limits.physical_block_size < size)
329 		q->limits.physical_block_size = size;
330 
331 	if (q->limits.io_min < q->limits.physical_block_size)
332 		q->limits.io_min = q->limits.physical_block_size;
333 }
334 EXPORT_SYMBOL(blk_queue_logical_block_size);
335 
336 /**
337  * blk_queue_physical_block_size - set physical block size for the queue
338  * @q:  the request queue for the device
339  * @size:  the physical block size, in bytes
340  *
341  * Description:
342  *   This should be set to the lowest possible sector size that the
343  *   hardware can operate on without reverting to read-modify-write
344  *   operations.
345  */
346 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
347 {
348 	q->limits.physical_block_size = size;
349 
350 	if (q->limits.physical_block_size < q->limits.logical_block_size)
351 		q->limits.physical_block_size = q->limits.logical_block_size;
352 
353 	if (q->limits.io_min < q->limits.physical_block_size)
354 		q->limits.io_min = q->limits.physical_block_size;
355 }
356 EXPORT_SYMBOL(blk_queue_physical_block_size);
357 
358 /**
359  * blk_queue_alignment_offset - set physical block alignment offset
360  * @q:	the request queue for the device
361  * @offset: alignment offset in bytes
362  *
363  * Description:
364  *   Some devices are naturally misaligned to compensate for things like
365  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
366  *   should call this function for devices whose first sector is not
367  *   naturally aligned.
368  */
369 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
370 {
371 	q->limits.alignment_offset =
372 		offset & (q->limits.physical_block_size - 1);
373 	q->limits.misaligned = 0;
374 }
375 EXPORT_SYMBOL(blk_queue_alignment_offset);
376 
377 /**
378  * blk_limits_io_min - set minimum request size for a device
379  * @limits: the queue limits
380  * @min:  smallest I/O size in bytes
381  *
382  * Description:
383  *   Some devices have an internal block size bigger than the reported
384  *   hardware sector size.  This function can be used to signal the
385  *   smallest I/O the device can perform without incurring a performance
386  *   penalty.
387  */
388 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
389 {
390 	limits->io_min = min;
391 
392 	if (limits->io_min < limits->logical_block_size)
393 		limits->io_min = limits->logical_block_size;
394 
395 	if (limits->io_min < limits->physical_block_size)
396 		limits->io_min = limits->physical_block_size;
397 }
398 EXPORT_SYMBOL(blk_limits_io_min);
399 
400 /**
401  * blk_queue_io_min - set minimum request size for the queue
402  * @q:	the request queue for the device
403  * @min:  smallest I/O size in bytes
404  *
405  * Description:
406  *   Storage devices may report a granularity or preferred minimum I/O
407  *   size which is the smallest request the device can perform without
408  *   incurring a performance penalty.  For disk drives this is often the
409  *   physical block size.  For RAID arrays it is often the stripe chunk
410  *   size.  A properly aligned multiple of minimum_io_size is the
411  *   preferred request size for workloads where a high number of I/O
412  *   operations is desired.
413  */
414 void blk_queue_io_min(struct request_queue *q, unsigned int min)
415 {
416 	blk_limits_io_min(&q->limits, min);
417 }
418 EXPORT_SYMBOL(blk_queue_io_min);
419 
420 /**
421  * blk_limits_io_opt - set optimal request size for a device
422  * @limits: the queue limits
423  * @opt:  smallest I/O size in bytes
424  *
425  * Description:
426  *   Storage devices may report an optimal I/O size, which is the
427  *   device's preferred unit for sustained I/O.  This is rarely reported
428  *   for disk drives.  For RAID arrays it is usually the stripe width or
429  *   the internal track size.  A properly aligned multiple of
430  *   optimal_io_size is the preferred request size for workloads where
431  *   sustained throughput is desired.
432  */
433 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
434 {
435 	limits->io_opt = opt;
436 }
437 EXPORT_SYMBOL(blk_limits_io_opt);
438 
439 /**
440  * blk_queue_io_opt - set optimal request size for the queue
441  * @q:	the request queue for the device
442  * @opt:  optimal request size in bytes
443  *
444  * Description:
445  *   Storage devices may report an optimal I/O size, which is the
446  *   device's preferred unit for sustained I/O.  This is rarely reported
447  *   for disk drives.  For RAID arrays it is usually the stripe width or
448  *   the internal track size.  A properly aligned multiple of
449  *   optimal_io_size is the preferred request size for workloads where
450  *   sustained throughput is desired.
451  */
452 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
453 {
454 	blk_limits_io_opt(&q->limits, opt);
455 }
456 EXPORT_SYMBOL(blk_queue_io_opt);
457 
458 /**
459  * blk_stack_limits - adjust queue_limits for stacked devices
460  * @t:	the stacking driver limits (top device)
461  * @b:  the underlying queue limits (bottom, component device)
462  * @start:  first data sector within component device
463  *
464  * Description:
465  *    This function is used by stacking drivers like MD and DM to ensure
466  *    that all component devices have compatible block sizes and
467  *    alignments.  The stacking driver must provide a queue_limits
468  *    struct (top) and then iteratively call the stacking function for
469  *    all component (bottom) devices.  The stacking function will
470  *    attempt to combine the values and ensure proper alignment.
471  *
472  *    Returns 0 if the top and bottom queue_limits are compatible.  The
473  *    top device's block sizes and alignment offsets may be adjusted to
474  *    ensure alignment with the bottom device. If no compatible sizes
475  *    and alignments exist, -1 is returned and the resulting top
476  *    queue_limits will have the misaligned flag set to indicate that
477  *    the alignment_offset is undefined.
478  */
479 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
480 		     sector_t start)
481 {
482 	unsigned int top, bottom, alignment, ret = 0;
483 
484 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
485 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
486 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
487 	t->max_write_same_sectors = min(t->max_write_same_sectors,
488 					b->max_write_same_sectors);
489 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
490 					b->max_write_zeroes_sectors);
491 	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
492 					b->max_zone_append_sectors);
493 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
494 
495 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
496 					    b->seg_boundary_mask);
497 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
498 					    b->virt_boundary_mask);
499 
500 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
501 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
502 					       b->max_discard_segments);
503 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
504 						 b->max_integrity_segments);
505 
506 	t->max_segment_size = min_not_zero(t->max_segment_size,
507 					   b->max_segment_size);
508 
509 	t->misaligned |= b->misaligned;
510 
511 	alignment = queue_limit_alignment_offset(b, start);
512 
513 	/* Bottom device has different alignment.  Check that it is
514 	 * compatible with the current top alignment.
515 	 */
516 	if (t->alignment_offset != alignment) {
517 
518 		top = max(t->physical_block_size, t->io_min)
519 			+ t->alignment_offset;
520 		bottom = max(b->physical_block_size, b->io_min) + alignment;
521 
522 		/* Verify that top and bottom intervals line up */
523 		if (max(top, bottom) % min(top, bottom)) {
524 			t->misaligned = 1;
525 			ret = -1;
526 		}
527 	}
528 
529 	t->logical_block_size = max(t->logical_block_size,
530 				    b->logical_block_size);
531 
532 	t->physical_block_size = max(t->physical_block_size,
533 				     b->physical_block_size);
534 
535 	t->io_min = max(t->io_min, b->io_min);
536 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
537 
538 	/* Physical block size a multiple of the logical block size? */
539 	if (t->physical_block_size & (t->logical_block_size - 1)) {
540 		t->physical_block_size = t->logical_block_size;
541 		t->misaligned = 1;
542 		ret = -1;
543 	}
544 
545 	/* Minimum I/O a multiple of the physical block size? */
546 	if (t->io_min & (t->physical_block_size - 1)) {
547 		t->io_min = t->physical_block_size;
548 		t->misaligned = 1;
549 		ret = -1;
550 	}
551 
552 	/* Optimal I/O a multiple of the physical block size? */
553 	if (t->io_opt & (t->physical_block_size - 1)) {
554 		t->io_opt = 0;
555 		t->misaligned = 1;
556 		ret = -1;
557 	}
558 
559 	t->raid_partial_stripes_expensive =
560 		max(t->raid_partial_stripes_expensive,
561 		    b->raid_partial_stripes_expensive);
562 
563 	/* Find lowest common alignment_offset */
564 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
565 		% max(t->physical_block_size, t->io_min);
566 
567 	/* Verify that new alignment_offset is on a logical block boundary */
568 	if (t->alignment_offset & (t->logical_block_size - 1)) {
569 		t->misaligned = 1;
570 		ret = -1;
571 	}
572 
573 	/* Discard alignment and granularity */
574 	if (b->discard_granularity) {
575 		alignment = queue_limit_discard_alignment(b, start);
576 
577 		if (t->discard_granularity != 0 &&
578 		    t->discard_alignment != alignment) {
579 			top = t->discard_granularity + t->discard_alignment;
580 			bottom = b->discard_granularity + alignment;
581 
582 			/* Verify that top and bottom intervals line up */
583 			if ((max(top, bottom) % min(top, bottom)) != 0)
584 				t->discard_misaligned = 1;
585 		}
586 
587 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
588 						      b->max_discard_sectors);
589 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
590 							 b->max_hw_discard_sectors);
591 		t->discard_granularity = max(t->discard_granularity,
592 					     b->discard_granularity);
593 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
594 			t->discard_granularity;
595 	}
596 
597 	if (b->chunk_sectors)
598 		t->chunk_sectors = min_not_zero(t->chunk_sectors,
599 						b->chunk_sectors);
600 
601 	t->zoned = max(t->zoned, b->zoned);
602 	return ret;
603 }
604 EXPORT_SYMBOL(blk_stack_limits);
605 
606 /**
607  * disk_stack_limits - adjust queue limits for stacked drivers
608  * @disk:  MD/DM gendisk (top)
609  * @bdev:  the underlying block device (bottom)
610  * @offset:  offset to beginning of data within component device
611  *
612  * Description:
613  *    Merges the limits for a top level gendisk and a bottom level
614  *    block_device.
615  */
616 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
617 		       sector_t offset)
618 {
619 	struct request_queue *t = disk->queue;
620 
621 	if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
622 			get_start_sect(bdev) + (offset >> 9)) < 0) {
623 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
624 
625 		disk_name(disk, 0, top);
626 		bdevname(bdev, bottom);
627 
628 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
629 		       top, bottom);
630 	}
631 
632 	t->backing_dev_info->io_pages =
633 		t->limits.max_sectors >> (PAGE_SHIFT - 9);
634 }
635 EXPORT_SYMBOL(disk_stack_limits);
636 
637 /**
638  * blk_queue_update_dma_pad - update pad mask
639  * @q:     the request queue for the device
640  * @mask:  pad mask
641  *
642  * Update dma pad mask.
643  *
644  * Appending pad buffer to a request modifies the last entry of a
645  * scatter list such that it includes the pad buffer.
646  **/
647 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
648 {
649 	if (mask > q->dma_pad_mask)
650 		q->dma_pad_mask = mask;
651 }
652 EXPORT_SYMBOL(blk_queue_update_dma_pad);
653 
654 /**
655  * blk_queue_segment_boundary - set boundary rules for segment merging
656  * @q:  the request queue for the device
657  * @mask:  the memory boundary mask
658  **/
659 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
660 {
661 	if (mask < PAGE_SIZE - 1) {
662 		mask = PAGE_SIZE - 1;
663 		printk(KERN_INFO "%s: set to minimum %lx\n",
664 		       __func__, mask);
665 	}
666 
667 	q->limits.seg_boundary_mask = mask;
668 }
669 EXPORT_SYMBOL(blk_queue_segment_boundary);
670 
671 /**
672  * blk_queue_virt_boundary - set boundary rules for bio merging
673  * @q:  the request queue for the device
674  * @mask:  the memory boundary mask
675  **/
676 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
677 {
678 	q->limits.virt_boundary_mask = mask;
679 
680 	/*
681 	 * Devices that require a virtual boundary do not support scatter/gather
682 	 * I/O natively, but instead require a descriptor list entry for each
683 	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
684 	 * of that they are not limited by our notion of "segment size".
685 	 */
686 	if (mask)
687 		q->limits.max_segment_size = UINT_MAX;
688 }
689 EXPORT_SYMBOL(blk_queue_virt_boundary);
690 
691 /**
692  * blk_queue_dma_alignment - set dma length and memory alignment
693  * @q:     the request queue for the device
694  * @mask:  alignment mask
695  *
696  * description:
697  *    set required memory and length alignment for direct dma transactions.
698  *    this is used when building direct io requests for the queue.
699  *
700  **/
701 void blk_queue_dma_alignment(struct request_queue *q, int mask)
702 {
703 	q->dma_alignment = mask;
704 }
705 EXPORT_SYMBOL(blk_queue_dma_alignment);
706 
707 /**
708  * blk_queue_update_dma_alignment - update dma length and memory alignment
709  * @q:     the request queue for the device
710  * @mask:  alignment mask
711  *
712  * description:
713  *    update required memory and length alignment for direct dma transactions.
714  *    If the requested alignment is larger than the current alignment, then
715  *    the current queue alignment is updated to the new value, otherwise it
716  *    is left alone.  The design of this is to allow multiple objects
717  *    (driver, device, transport etc) to set their respective
718  *    alignments without having them interfere.
719  *
720  **/
721 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
722 {
723 	BUG_ON(mask > PAGE_SIZE);
724 
725 	if (mask > q->dma_alignment)
726 		q->dma_alignment = mask;
727 }
728 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
729 
730 /**
731  * blk_set_queue_depth - tell the block layer about the device queue depth
732  * @q:		the request queue for the device
733  * @depth:		queue depth
734  *
735  */
736 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
737 {
738 	q->queue_depth = depth;
739 	rq_qos_queue_depth_changed(q);
740 }
741 EXPORT_SYMBOL(blk_set_queue_depth);
742 
743 /**
744  * blk_queue_write_cache - configure queue's write cache
745  * @q:		the request queue for the device
746  * @wc:		write back cache on or off
747  * @fua:	device supports FUA writes, if true
748  *
749  * Tell the block layer about the write cache of @q.
750  */
751 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
752 {
753 	if (wc)
754 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
755 	else
756 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
757 	if (fua)
758 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
759 	else
760 		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
761 
762 	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
763 }
764 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
765 
766 /**
767  * blk_queue_required_elevator_features - Set a queue required elevator features
768  * @q:		the request queue for the target device
769  * @features:	Required elevator features OR'ed together
770  *
771  * Tell the block layer that for the device controlled through @q, only the
772  * only elevators that can be used are those that implement at least the set of
773  * features specified by @features.
774  */
775 void blk_queue_required_elevator_features(struct request_queue *q,
776 					  unsigned int features)
777 {
778 	q->required_elevator_features = features;
779 }
780 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
781 
782 /**
783  * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
784  * @q:		the request queue for the device
785  * @dev:	the device pointer for dma
786  *
787  * Tell the block layer about merging the segments by dma map of @q.
788  */
789 bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
790 				       struct device *dev)
791 {
792 	unsigned long boundary = dma_get_merge_boundary(dev);
793 
794 	if (!boundary)
795 		return false;
796 
797 	/* No need to update max_segment_size. see blk_queue_virt_boundary() */
798 	blk_queue_virt_boundary(q, boundary);
799 
800 	return true;
801 }
802 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
803 
804 static int __init blk_settings_init(void)
805 {
806 	blk_max_low_pfn = max_low_pfn - 1;
807 	blk_max_pfn = max_pfn - 1;
808 	return 0;
809 }
810 subsys_initcall(blk_settings_init);
811