xref: /openbmc/linux/block/blk-settings.c (revision 1c2dd16a)
1 /*
2  * Functions related to setting various queue properties from drivers
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10 #include <linux/gcd.h>
11 #include <linux/lcm.h>
12 #include <linux/jiffies.h>
13 #include <linux/gfp.h>
14 
15 #include "blk.h"
16 #include "blk-wbt.h"
17 
18 unsigned long blk_max_low_pfn;
19 EXPORT_SYMBOL(blk_max_low_pfn);
20 
21 unsigned long blk_max_pfn;
22 
23 /**
24  * blk_queue_prep_rq - set a prepare_request function for queue
25  * @q:		queue
26  * @pfn:	prepare_request function
27  *
28  * It's possible for a queue to register a prepare_request callback which
29  * is invoked before the request is handed to the request_fn. The goal of
30  * the function is to prepare a request for I/O, it can be used to build a
31  * cdb from the request data for instance.
32  *
33  */
34 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
35 {
36 	q->prep_rq_fn = pfn;
37 }
38 EXPORT_SYMBOL(blk_queue_prep_rq);
39 
40 /**
41  * blk_queue_unprep_rq - set an unprepare_request function for queue
42  * @q:		queue
43  * @ufn:	unprepare_request function
44  *
45  * It's possible for a queue to register an unprepare_request callback
46  * which is invoked before the request is finally completed. The goal
47  * of the function is to deallocate any data that was allocated in the
48  * prepare_request callback.
49  *
50  */
51 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
52 {
53 	q->unprep_rq_fn = ufn;
54 }
55 EXPORT_SYMBOL(blk_queue_unprep_rq);
56 
57 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
58 {
59 	q->softirq_done_fn = fn;
60 }
61 EXPORT_SYMBOL(blk_queue_softirq_done);
62 
63 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
64 {
65 	q->rq_timeout = timeout;
66 }
67 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
68 
69 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
70 {
71 	q->rq_timed_out_fn = fn;
72 }
73 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
74 
75 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
76 {
77 	q->lld_busy_fn = fn;
78 }
79 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
80 
81 /**
82  * blk_set_default_limits - reset limits to default values
83  * @lim:  the queue_limits structure to reset
84  *
85  * Description:
86  *   Returns a queue_limit struct to its default state.
87  */
88 void blk_set_default_limits(struct queue_limits *lim)
89 {
90 	lim->max_segments = BLK_MAX_SEGMENTS;
91 	lim->max_discard_segments = 1;
92 	lim->max_integrity_segments = 0;
93 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
94 	lim->virt_boundary_mask = 0;
95 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
96 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
97 	lim->max_dev_sectors = 0;
98 	lim->chunk_sectors = 0;
99 	lim->max_write_same_sectors = 0;
100 	lim->max_write_zeroes_sectors = 0;
101 	lim->max_discard_sectors = 0;
102 	lim->max_hw_discard_sectors = 0;
103 	lim->discard_granularity = 0;
104 	lim->discard_alignment = 0;
105 	lim->discard_misaligned = 0;
106 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
107 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
108 	lim->alignment_offset = 0;
109 	lim->io_opt = 0;
110 	lim->misaligned = 0;
111 	lim->cluster = 1;
112 	lim->zoned = BLK_ZONED_NONE;
113 }
114 EXPORT_SYMBOL(blk_set_default_limits);
115 
116 /**
117  * blk_set_stacking_limits - set default limits for stacking devices
118  * @lim:  the queue_limits structure to reset
119  *
120  * Description:
121  *   Returns a queue_limit struct to its default state. Should be used
122  *   by stacking drivers like DM that have no internal limits.
123  */
124 void blk_set_stacking_limits(struct queue_limits *lim)
125 {
126 	blk_set_default_limits(lim);
127 
128 	/* Inherit limits from component devices */
129 	lim->max_segments = USHRT_MAX;
130 	lim->max_discard_segments = 1;
131 	lim->max_hw_sectors = UINT_MAX;
132 	lim->max_segment_size = UINT_MAX;
133 	lim->max_sectors = UINT_MAX;
134 	lim->max_dev_sectors = UINT_MAX;
135 	lim->max_write_same_sectors = UINT_MAX;
136 	lim->max_write_zeroes_sectors = UINT_MAX;
137 }
138 EXPORT_SYMBOL(blk_set_stacking_limits);
139 
140 /**
141  * blk_queue_make_request - define an alternate make_request function for a device
142  * @q:  the request queue for the device to be affected
143  * @mfn: the alternate make_request function
144  *
145  * Description:
146  *    The normal way for &struct bios to be passed to a device
147  *    driver is for them to be collected into requests on a request
148  *    queue, and then to allow the device driver to select requests
149  *    off that queue when it is ready.  This works well for many block
150  *    devices. However some block devices (typically virtual devices
151  *    such as md or lvm) do not benefit from the processing on the
152  *    request queue, and are served best by having the requests passed
153  *    directly to them.  This can be achieved by providing a function
154  *    to blk_queue_make_request().
155  *
156  * Caveat:
157  *    The driver that does this *must* be able to deal appropriately
158  *    with buffers in "highmemory". This can be accomplished by either calling
159  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
160  *    blk_queue_bounce() to create a buffer in normal memory.
161  **/
162 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
163 {
164 	/*
165 	 * set defaults
166 	 */
167 	q->nr_requests = BLKDEV_MAX_RQ;
168 
169 	q->make_request_fn = mfn;
170 	blk_queue_dma_alignment(q, 511);
171 	blk_queue_congestion_threshold(q);
172 	q->nr_batching = BLK_BATCH_REQ;
173 
174 	blk_set_default_limits(&q->limits);
175 
176 	/*
177 	 * by default assume old behaviour and bounce for any highmem page
178 	 */
179 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
180 }
181 EXPORT_SYMBOL(blk_queue_make_request);
182 
183 /**
184  * blk_queue_bounce_limit - set bounce buffer limit for queue
185  * @q: the request queue for the device
186  * @max_addr: the maximum address the device can handle
187  *
188  * Description:
189  *    Different hardware can have different requirements as to what pages
190  *    it can do I/O directly to. A low level driver can call
191  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
192  *    buffers for doing I/O to pages residing above @max_addr.
193  **/
194 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
195 {
196 	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
197 	int dma = 0;
198 
199 	q->bounce_gfp = GFP_NOIO;
200 #if BITS_PER_LONG == 64
201 	/*
202 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
203 	 * some IOMMUs can handle everything, but I don't know of a
204 	 * way to test this here.
205 	 */
206 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
207 		dma = 1;
208 	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
209 #else
210 	if (b_pfn < blk_max_low_pfn)
211 		dma = 1;
212 	q->limits.bounce_pfn = b_pfn;
213 #endif
214 	if (dma) {
215 		init_emergency_isa_pool();
216 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
217 		q->limits.bounce_pfn = b_pfn;
218 	}
219 }
220 EXPORT_SYMBOL(blk_queue_bounce_limit);
221 
222 /**
223  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
224  * @q:  the request queue for the device
225  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
226  *
227  * Description:
228  *    Enables a low level driver to set a hard upper limit,
229  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
230  *    the device driver based upon the capabilities of the I/O
231  *    controller.
232  *
233  *    max_dev_sectors is a hard limit imposed by the storage device for
234  *    READ/WRITE requests. It is set by the disk driver.
235  *
236  *    max_sectors is a soft limit imposed by the block layer for
237  *    filesystem type requests.  This value can be overridden on a
238  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
239  *    The soft limit can not exceed max_hw_sectors.
240  **/
241 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
242 {
243 	struct queue_limits *limits = &q->limits;
244 	unsigned int max_sectors;
245 
246 	if ((max_hw_sectors << 9) < PAGE_SIZE) {
247 		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
248 		printk(KERN_INFO "%s: set to minimum %d\n",
249 		       __func__, max_hw_sectors);
250 	}
251 
252 	limits->max_hw_sectors = max_hw_sectors;
253 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
254 	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
255 	limits->max_sectors = max_sectors;
256 	q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
257 }
258 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
259 
260 /**
261  * blk_queue_chunk_sectors - set size of the chunk for this queue
262  * @q:  the request queue for the device
263  * @chunk_sectors:  chunk sectors in the usual 512b unit
264  *
265  * Description:
266  *    If a driver doesn't want IOs to cross a given chunk size, it can set
267  *    this limit and prevent merging across chunks. Note that the chunk size
268  *    must currently be a power-of-2 in sectors. Also note that the block
269  *    layer must accept a page worth of data at any offset. So if the
270  *    crossing of chunks is a hard limitation in the driver, it must still be
271  *    prepared to split single page bios.
272  **/
273 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
274 {
275 	BUG_ON(!is_power_of_2(chunk_sectors));
276 	q->limits.chunk_sectors = chunk_sectors;
277 }
278 EXPORT_SYMBOL(blk_queue_chunk_sectors);
279 
280 /**
281  * blk_queue_max_discard_sectors - set max sectors for a single discard
282  * @q:  the request queue for the device
283  * @max_discard_sectors: maximum number of sectors to discard
284  **/
285 void blk_queue_max_discard_sectors(struct request_queue *q,
286 		unsigned int max_discard_sectors)
287 {
288 	q->limits.max_hw_discard_sectors = max_discard_sectors;
289 	q->limits.max_discard_sectors = max_discard_sectors;
290 }
291 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
292 
293 /**
294  * blk_queue_max_write_same_sectors - set max sectors for a single write same
295  * @q:  the request queue for the device
296  * @max_write_same_sectors: maximum number of sectors to write per command
297  **/
298 void blk_queue_max_write_same_sectors(struct request_queue *q,
299 				      unsigned int max_write_same_sectors)
300 {
301 	q->limits.max_write_same_sectors = max_write_same_sectors;
302 }
303 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
304 
305 /**
306  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
307  *                                      write zeroes
308  * @q:  the request queue for the device
309  * @max_write_zeroes_sectors: maximum number of sectors to write per command
310  **/
311 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
312 		unsigned int max_write_zeroes_sectors)
313 {
314 	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
315 }
316 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
317 
318 /**
319  * blk_queue_max_segments - set max hw segments for a request for this queue
320  * @q:  the request queue for the device
321  * @max_segments:  max number of segments
322  *
323  * Description:
324  *    Enables a low level driver to set an upper limit on the number of
325  *    hw data segments in a request.
326  **/
327 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
328 {
329 	if (!max_segments) {
330 		max_segments = 1;
331 		printk(KERN_INFO "%s: set to minimum %d\n",
332 		       __func__, max_segments);
333 	}
334 
335 	q->limits.max_segments = max_segments;
336 }
337 EXPORT_SYMBOL(blk_queue_max_segments);
338 
339 /**
340  * blk_queue_max_discard_segments - set max segments for discard requests
341  * @q:  the request queue for the device
342  * @max_segments:  max number of segments
343  *
344  * Description:
345  *    Enables a low level driver to set an upper limit on the number of
346  *    segments in a discard request.
347  **/
348 void blk_queue_max_discard_segments(struct request_queue *q,
349 		unsigned short max_segments)
350 {
351 	q->limits.max_discard_segments = max_segments;
352 }
353 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
354 
355 /**
356  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
357  * @q:  the request queue for the device
358  * @max_size:  max size of segment in bytes
359  *
360  * Description:
361  *    Enables a low level driver to set an upper limit on the size of a
362  *    coalesced segment
363  **/
364 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
365 {
366 	if (max_size < PAGE_SIZE) {
367 		max_size = PAGE_SIZE;
368 		printk(KERN_INFO "%s: set to minimum %d\n",
369 		       __func__, max_size);
370 	}
371 
372 	q->limits.max_segment_size = max_size;
373 }
374 EXPORT_SYMBOL(blk_queue_max_segment_size);
375 
376 /**
377  * blk_queue_logical_block_size - set logical block size for the queue
378  * @q:  the request queue for the device
379  * @size:  the logical block size, in bytes
380  *
381  * Description:
382  *   This should be set to the lowest possible block size that the
383  *   storage device can address.  The default of 512 covers most
384  *   hardware.
385  **/
386 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
387 {
388 	q->limits.logical_block_size = size;
389 
390 	if (q->limits.physical_block_size < size)
391 		q->limits.physical_block_size = size;
392 
393 	if (q->limits.io_min < q->limits.physical_block_size)
394 		q->limits.io_min = q->limits.physical_block_size;
395 }
396 EXPORT_SYMBOL(blk_queue_logical_block_size);
397 
398 /**
399  * blk_queue_physical_block_size - set physical block size for the queue
400  * @q:  the request queue for the device
401  * @size:  the physical block size, in bytes
402  *
403  * Description:
404  *   This should be set to the lowest possible sector size that the
405  *   hardware can operate on without reverting to read-modify-write
406  *   operations.
407  */
408 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
409 {
410 	q->limits.physical_block_size = size;
411 
412 	if (q->limits.physical_block_size < q->limits.logical_block_size)
413 		q->limits.physical_block_size = q->limits.logical_block_size;
414 
415 	if (q->limits.io_min < q->limits.physical_block_size)
416 		q->limits.io_min = q->limits.physical_block_size;
417 }
418 EXPORT_SYMBOL(blk_queue_physical_block_size);
419 
420 /**
421  * blk_queue_alignment_offset - set physical block alignment offset
422  * @q:	the request queue for the device
423  * @offset: alignment offset in bytes
424  *
425  * Description:
426  *   Some devices are naturally misaligned to compensate for things like
427  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
428  *   should call this function for devices whose first sector is not
429  *   naturally aligned.
430  */
431 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
432 {
433 	q->limits.alignment_offset =
434 		offset & (q->limits.physical_block_size - 1);
435 	q->limits.misaligned = 0;
436 }
437 EXPORT_SYMBOL(blk_queue_alignment_offset);
438 
439 /**
440  * blk_limits_io_min - set minimum request size for a device
441  * @limits: the queue limits
442  * @min:  smallest I/O size in bytes
443  *
444  * Description:
445  *   Some devices have an internal block size bigger than the reported
446  *   hardware sector size.  This function can be used to signal the
447  *   smallest I/O the device can perform without incurring a performance
448  *   penalty.
449  */
450 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
451 {
452 	limits->io_min = min;
453 
454 	if (limits->io_min < limits->logical_block_size)
455 		limits->io_min = limits->logical_block_size;
456 
457 	if (limits->io_min < limits->physical_block_size)
458 		limits->io_min = limits->physical_block_size;
459 }
460 EXPORT_SYMBOL(blk_limits_io_min);
461 
462 /**
463  * blk_queue_io_min - set minimum request size for the queue
464  * @q:	the request queue for the device
465  * @min:  smallest I/O size in bytes
466  *
467  * Description:
468  *   Storage devices may report a granularity or preferred minimum I/O
469  *   size which is the smallest request the device can perform without
470  *   incurring a performance penalty.  For disk drives this is often the
471  *   physical block size.  For RAID arrays it is often the stripe chunk
472  *   size.  A properly aligned multiple of minimum_io_size is the
473  *   preferred request size for workloads where a high number of I/O
474  *   operations is desired.
475  */
476 void blk_queue_io_min(struct request_queue *q, unsigned int min)
477 {
478 	blk_limits_io_min(&q->limits, min);
479 }
480 EXPORT_SYMBOL(blk_queue_io_min);
481 
482 /**
483  * blk_limits_io_opt - set optimal request size for a device
484  * @limits: the queue limits
485  * @opt:  smallest I/O size in bytes
486  *
487  * Description:
488  *   Storage devices may report an optimal I/O size, which is the
489  *   device's preferred unit for sustained I/O.  This is rarely reported
490  *   for disk drives.  For RAID arrays it is usually the stripe width or
491  *   the internal track size.  A properly aligned multiple of
492  *   optimal_io_size is the preferred request size for workloads where
493  *   sustained throughput is desired.
494  */
495 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
496 {
497 	limits->io_opt = opt;
498 }
499 EXPORT_SYMBOL(blk_limits_io_opt);
500 
501 /**
502  * blk_queue_io_opt - set optimal request size for the queue
503  * @q:	the request queue for the device
504  * @opt:  optimal request size in bytes
505  *
506  * Description:
507  *   Storage devices may report an optimal I/O size, which is the
508  *   device's preferred unit for sustained I/O.  This is rarely reported
509  *   for disk drives.  For RAID arrays it is usually the stripe width or
510  *   the internal track size.  A properly aligned multiple of
511  *   optimal_io_size is the preferred request size for workloads where
512  *   sustained throughput is desired.
513  */
514 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
515 {
516 	blk_limits_io_opt(&q->limits, opt);
517 }
518 EXPORT_SYMBOL(blk_queue_io_opt);
519 
520 /**
521  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
522  * @t:	the stacking driver (top)
523  * @b:  the underlying device (bottom)
524  **/
525 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
526 {
527 	blk_stack_limits(&t->limits, &b->limits, 0);
528 }
529 EXPORT_SYMBOL(blk_queue_stack_limits);
530 
531 /**
532  * blk_stack_limits - adjust queue_limits for stacked devices
533  * @t:	the stacking driver limits (top device)
534  * @b:  the underlying queue limits (bottom, component device)
535  * @start:  first data sector within component device
536  *
537  * Description:
538  *    This function is used by stacking drivers like MD and DM to ensure
539  *    that all component devices have compatible block sizes and
540  *    alignments.  The stacking driver must provide a queue_limits
541  *    struct (top) and then iteratively call the stacking function for
542  *    all component (bottom) devices.  The stacking function will
543  *    attempt to combine the values and ensure proper alignment.
544  *
545  *    Returns 0 if the top and bottom queue_limits are compatible.  The
546  *    top device's block sizes and alignment offsets may be adjusted to
547  *    ensure alignment with the bottom device. If no compatible sizes
548  *    and alignments exist, -1 is returned and the resulting top
549  *    queue_limits will have the misaligned flag set to indicate that
550  *    the alignment_offset is undefined.
551  */
552 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
553 		     sector_t start)
554 {
555 	unsigned int top, bottom, alignment, ret = 0;
556 
557 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
558 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
559 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
560 	t->max_write_same_sectors = min(t->max_write_same_sectors,
561 					b->max_write_same_sectors);
562 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
563 					b->max_write_zeroes_sectors);
564 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
565 
566 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
567 					    b->seg_boundary_mask);
568 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
569 					    b->virt_boundary_mask);
570 
571 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
572 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
573 					       b->max_discard_segments);
574 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
575 						 b->max_integrity_segments);
576 
577 	t->max_segment_size = min_not_zero(t->max_segment_size,
578 					   b->max_segment_size);
579 
580 	t->misaligned |= b->misaligned;
581 
582 	alignment = queue_limit_alignment_offset(b, start);
583 
584 	/* Bottom device has different alignment.  Check that it is
585 	 * compatible with the current top alignment.
586 	 */
587 	if (t->alignment_offset != alignment) {
588 
589 		top = max(t->physical_block_size, t->io_min)
590 			+ t->alignment_offset;
591 		bottom = max(b->physical_block_size, b->io_min) + alignment;
592 
593 		/* Verify that top and bottom intervals line up */
594 		if (max(top, bottom) % min(top, bottom)) {
595 			t->misaligned = 1;
596 			ret = -1;
597 		}
598 	}
599 
600 	t->logical_block_size = max(t->logical_block_size,
601 				    b->logical_block_size);
602 
603 	t->physical_block_size = max(t->physical_block_size,
604 				     b->physical_block_size);
605 
606 	t->io_min = max(t->io_min, b->io_min);
607 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
608 
609 	t->cluster &= b->cluster;
610 
611 	/* Physical block size a multiple of the logical block size? */
612 	if (t->physical_block_size & (t->logical_block_size - 1)) {
613 		t->physical_block_size = t->logical_block_size;
614 		t->misaligned = 1;
615 		ret = -1;
616 	}
617 
618 	/* Minimum I/O a multiple of the physical block size? */
619 	if (t->io_min & (t->physical_block_size - 1)) {
620 		t->io_min = t->physical_block_size;
621 		t->misaligned = 1;
622 		ret = -1;
623 	}
624 
625 	/* Optimal I/O a multiple of the physical block size? */
626 	if (t->io_opt & (t->physical_block_size - 1)) {
627 		t->io_opt = 0;
628 		t->misaligned = 1;
629 		ret = -1;
630 	}
631 
632 	t->raid_partial_stripes_expensive =
633 		max(t->raid_partial_stripes_expensive,
634 		    b->raid_partial_stripes_expensive);
635 
636 	/* Find lowest common alignment_offset */
637 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
638 		% max(t->physical_block_size, t->io_min);
639 
640 	/* Verify that new alignment_offset is on a logical block boundary */
641 	if (t->alignment_offset & (t->logical_block_size - 1)) {
642 		t->misaligned = 1;
643 		ret = -1;
644 	}
645 
646 	/* Discard alignment and granularity */
647 	if (b->discard_granularity) {
648 		alignment = queue_limit_discard_alignment(b, start);
649 
650 		if (t->discard_granularity != 0 &&
651 		    t->discard_alignment != alignment) {
652 			top = t->discard_granularity + t->discard_alignment;
653 			bottom = b->discard_granularity + alignment;
654 
655 			/* Verify that top and bottom intervals line up */
656 			if ((max(top, bottom) % min(top, bottom)) != 0)
657 				t->discard_misaligned = 1;
658 		}
659 
660 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
661 						      b->max_discard_sectors);
662 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
663 							 b->max_hw_discard_sectors);
664 		t->discard_granularity = max(t->discard_granularity,
665 					     b->discard_granularity);
666 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
667 			t->discard_granularity;
668 	}
669 
670 	if (b->chunk_sectors)
671 		t->chunk_sectors = min_not_zero(t->chunk_sectors,
672 						b->chunk_sectors);
673 
674 	return ret;
675 }
676 EXPORT_SYMBOL(blk_stack_limits);
677 
678 /**
679  * bdev_stack_limits - adjust queue limits for stacked drivers
680  * @t:	the stacking driver limits (top device)
681  * @bdev:  the component block_device (bottom)
682  * @start:  first data sector within component device
683  *
684  * Description:
685  *    Merges queue limits for a top device and a block_device.  Returns
686  *    0 if alignment didn't change.  Returns -1 if adding the bottom
687  *    device caused misalignment.
688  */
689 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
690 		      sector_t start)
691 {
692 	struct request_queue *bq = bdev_get_queue(bdev);
693 
694 	start += get_start_sect(bdev);
695 
696 	return blk_stack_limits(t, &bq->limits, start);
697 }
698 EXPORT_SYMBOL(bdev_stack_limits);
699 
700 /**
701  * disk_stack_limits - adjust queue limits for stacked drivers
702  * @disk:  MD/DM gendisk (top)
703  * @bdev:  the underlying block device (bottom)
704  * @offset:  offset to beginning of data within component device
705  *
706  * Description:
707  *    Merges the limits for a top level gendisk and a bottom level
708  *    block_device.
709  */
710 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
711 		       sector_t offset)
712 {
713 	struct request_queue *t = disk->queue;
714 
715 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
716 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
717 
718 		disk_name(disk, 0, top);
719 		bdevname(bdev, bottom);
720 
721 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
722 		       top, bottom);
723 	}
724 }
725 EXPORT_SYMBOL(disk_stack_limits);
726 
727 /**
728  * blk_queue_dma_pad - set pad mask
729  * @q:     the request queue for the device
730  * @mask:  pad mask
731  *
732  * Set dma pad mask.
733  *
734  * Appending pad buffer to a request modifies the last entry of a
735  * scatter list such that it includes the pad buffer.
736  **/
737 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
738 {
739 	q->dma_pad_mask = mask;
740 }
741 EXPORT_SYMBOL(blk_queue_dma_pad);
742 
743 /**
744  * blk_queue_update_dma_pad - update pad mask
745  * @q:     the request queue for the device
746  * @mask:  pad mask
747  *
748  * Update dma pad mask.
749  *
750  * Appending pad buffer to a request modifies the last entry of a
751  * scatter list such that it includes the pad buffer.
752  **/
753 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
754 {
755 	if (mask > q->dma_pad_mask)
756 		q->dma_pad_mask = mask;
757 }
758 EXPORT_SYMBOL(blk_queue_update_dma_pad);
759 
760 /**
761  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
762  * @q:  the request queue for the device
763  * @dma_drain_needed: fn which returns non-zero if drain is necessary
764  * @buf:	physically contiguous buffer
765  * @size:	size of the buffer in bytes
766  *
767  * Some devices have excess DMA problems and can't simply discard (or
768  * zero fill) the unwanted piece of the transfer.  They have to have a
769  * real area of memory to transfer it into.  The use case for this is
770  * ATAPI devices in DMA mode.  If the packet command causes a transfer
771  * bigger than the transfer size some HBAs will lock up if there
772  * aren't DMA elements to contain the excess transfer.  What this API
773  * does is adjust the queue so that the buf is always appended
774  * silently to the scatterlist.
775  *
776  * Note: This routine adjusts max_hw_segments to make room for appending
777  * the drain buffer.  If you call blk_queue_max_segments() after calling
778  * this routine, you must set the limit to one fewer than your device
779  * can support otherwise there won't be room for the drain buffer.
780  */
781 int blk_queue_dma_drain(struct request_queue *q,
782 			       dma_drain_needed_fn *dma_drain_needed,
783 			       void *buf, unsigned int size)
784 {
785 	if (queue_max_segments(q) < 2)
786 		return -EINVAL;
787 	/* make room for appending the drain */
788 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
789 	q->dma_drain_needed = dma_drain_needed;
790 	q->dma_drain_buffer = buf;
791 	q->dma_drain_size = size;
792 
793 	return 0;
794 }
795 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
796 
797 /**
798  * blk_queue_segment_boundary - set boundary rules for segment merging
799  * @q:  the request queue for the device
800  * @mask:  the memory boundary mask
801  **/
802 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
803 {
804 	if (mask < PAGE_SIZE - 1) {
805 		mask = PAGE_SIZE - 1;
806 		printk(KERN_INFO "%s: set to minimum %lx\n",
807 		       __func__, mask);
808 	}
809 
810 	q->limits.seg_boundary_mask = mask;
811 }
812 EXPORT_SYMBOL(blk_queue_segment_boundary);
813 
814 /**
815  * blk_queue_virt_boundary - set boundary rules for bio merging
816  * @q:  the request queue for the device
817  * @mask:  the memory boundary mask
818  **/
819 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
820 {
821 	q->limits.virt_boundary_mask = mask;
822 }
823 EXPORT_SYMBOL(blk_queue_virt_boundary);
824 
825 /**
826  * blk_queue_dma_alignment - set dma length and memory alignment
827  * @q:     the request queue for the device
828  * @mask:  alignment mask
829  *
830  * description:
831  *    set required memory and length alignment for direct dma transactions.
832  *    this is used when building direct io requests for the queue.
833  *
834  **/
835 void blk_queue_dma_alignment(struct request_queue *q, int mask)
836 {
837 	q->dma_alignment = mask;
838 }
839 EXPORT_SYMBOL(blk_queue_dma_alignment);
840 
841 /**
842  * blk_queue_update_dma_alignment - update dma length and memory alignment
843  * @q:     the request queue for the device
844  * @mask:  alignment mask
845  *
846  * description:
847  *    update required memory and length alignment for direct dma transactions.
848  *    If the requested alignment is larger than the current alignment, then
849  *    the current queue alignment is updated to the new value, otherwise it
850  *    is left alone.  The design of this is to allow multiple objects
851  *    (driver, device, transport etc) to set their respective
852  *    alignments without having them interfere.
853  *
854  **/
855 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
856 {
857 	BUG_ON(mask > PAGE_SIZE);
858 
859 	if (mask > q->dma_alignment)
860 		q->dma_alignment = mask;
861 }
862 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
863 
864 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
865 {
866 	spin_lock_irq(q->queue_lock);
867 	if (queueable)
868 		clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
869 	else
870 		set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
871 	spin_unlock_irq(q->queue_lock);
872 }
873 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
874 
875 /**
876  * blk_set_queue_depth - tell the block layer about the device queue depth
877  * @q:		the request queue for the device
878  * @depth:		queue depth
879  *
880  */
881 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
882 {
883 	q->queue_depth = depth;
884 	wbt_set_queue_depth(q->rq_wb, depth);
885 }
886 EXPORT_SYMBOL(blk_set_queue_depth);
887 
888 /**
889  * blk_queue_write_cache - configure queue's write cache
890  * @q:		the request queue for the device
891  * @wc:		write back cache on or off
892  * @fua:	device supports FUA writes, if true
893  *
894  * Tell the block layer about the write cache of @q.
895  */
896 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
897 {
898 	spin_lock_irq(q->queue_lock);
899 	if (wc)
900 		queue_flag_set(QUEUE_FLAG_WC, q);
901 	else
902 		queue_flag_clear(QUEUE_FLAG_WC, q);
903 	if (fua)
904 		queue_flag_set(QUEUE_FLAG_FUA, q);
905 	else
906 		queue_flag_clear(QUEUE_FLAG_FUA, q);
907 	spin_unlock_irq(q->queue_lock);
908 
909 	wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
910 }
911 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
912 
913 static int __init blk_settings_init(void)
914 {
915 	blk_max_low_pfn = max_low_pfn - 1;
916 	blk_max_pfn = max_pfn - 1;
917 	return 0;
918 }
919 subsys_initcall(blk_settings_init);
920