xref: /openbmc/linux/block/blk-settings.c (revision bcb84fb4)
1 /*
2  * Functions related to setting various queue properties from drivers
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10 #include <linux/gcd.h>
11 #include <linux/lcm.h>
12 #include <linux/jiffies.h>
13 #include <linux/gfp.h>
14 
15 #include "blk.h"
16 #include "blk-wbt.h"
17 
18 unsigned long blk_max_low_pfn;
19 EXPORT_SYMBOL(blk_max_low_pfn);
20 
21 unsigned long blk_max_pfn;
22 
23 /**
24  * blk_queue_prep_rq - set a prepare_request function for queue
25  * @q:		queue
26  * @pfn:	prepare_request function
27  *
28  * It's possible for a queue to register a prepare_request callback which
29  * is invoked before the request is handed to the request_fn. The goal of
30  * the function is to prepare a request for I/O, it can be used to build a
31  * cdb from the request data for instance.
32  *
33  */
34 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
35 {
36 	q->prep_rq_fn = pfn;
37 }
38 EXPORT_SYMBOL(blk_queue_prep_rq);
39 
40 /**
41  * blk_queue_unprep_rq - set an unprepare_request function for queue
42  * @q:		queue
43  * @ufn:	unprepare_request function
44  *
45  * It's possible for a queue to register an unprepare_request callback
46  * which is invoked before the request is finally completed. The goal
47  * of the function is to deallocate any data that was allocated in the
48  * prepare_request callback.
49  *
50  */
51 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
52 {
53 	q->unprep_rq_fn = ufn;
54 }
55 EXPORT_SYMBOL(blk_queue_unprep_rq);
56 
57 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
58 {
59 	q->softirq_done_fn = fn;
60 }
61 EXPORT_SYMBOL(blk_queue_softirq_done);
62 
63 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
64 {
65 	q->rq_timeout = timeout;
66 }
67 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
68 
69 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
70 {
71 	q->rq_timed_out_fn = fn;
72 }
73 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
74 
75 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
76 {
77 	q->lld_busy_fn = fn;
78 }
79 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
80 
81 /**
82  * blk_set_default_limits - reset limits to default values
83  * @lim:  the queue_limits structure to reset
84  *
85  * Description:
86  *   Returns a queue_limit struct to its default state.
87  */
88 void blk_set_default_limits(struct queue_limits *lim)
89 {
90 	lim->max_segments = BLK_MAX_SEGMENTS;
91 	lim->max_discard_segments = 1;
92 	lim->max_integrity_segments = 0;
93 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
94 	lim->virt_boundary_mask = 0;
95 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
96 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
97 	lim->max_dev_sectors = 0;
98 	lim->chunk_sectors = 0;
99 	lim->max_write_same_sectors = 0;
100 	lim->max_write_zeroes_sectors = 0;
101 	lim->max_discard_sectors = 0;
102 	lim->max_hw_discard_sectors = 0;
103 	lim->discard_granularity = 0;
104 	lim->discard_alignment = 0;
105 	lim->discard_misaligned = 0;
106 	lim->discard_zeroes_data = 0;
107 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
108 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
109 	lim->alignment_offset = 0;
110 	lim->io_opt = 0;
111 	lim->misaligned = 0;
112 	lim->cluster = 1;
113 	lim->zoned = BLK_ZONED_NONE;
114 }
115 EXPORT_SYMBOL(blk_set_default_limits);
116 
117 /**
118  * blk_set_stacking_limits - set default limits for stacking devices
119  * @lim:  the queue_limits structure to reset
120  *
121  * Description:
122  *   Returns a queue_limit struct to its default state. Should be used
123  *   by stacking drivers like DM that have no internal limits.
124  */
125 void blk_set_stacking_limits(struct queue_limits *lim)
126 {
127 	blk_set_default_limits(lim);
128 
129 	/* Inherit limits from component devices */
130 	lim->discard_zeroes_data = 1;
131 	lim->max_segments = USHRT_MAX;
132 	lim->max_discard_segments = 1;
133 	lim->max_hw_sectors = UINT_MAX;
134 	lim->max_segment_size = UINT_MAX;
135 	lim->max_sectors = UINT_MAX;
136 	lim->max_dev_sectors = UINT_MAX;
137 	lim->max_write_same_sectors = UINT_MAX;
138 	lim->max_write_zeroes_sectors = UINT_MAX;
139 }
140 EXPORT_SYMBOL(blk_set_stacking_limits);
141 
142 /**
143  * blk_queue_make_request - define an alternate make_request function for a device
144  * @q:  the request queue for the device to be affected
145  * @mfn: the alternate make_request function
146  *
147  * Description:
148  *    The normal way for &struct bios to be passed to a device
149  *    driver is for them to be collected into requests on a request
150  *    queue, and then to allow the device driver to select requests
151  *    off that queue when it is ready.  This works well for many block
152  *    devices. However some block devices (typically virtual devices
153  *    such as md or lvm) do not benefit from the processing on the
154  *    request queue, and are served best by having the requests passed
155  *    directly to them.  This can be achieved by providing a function
156  *    to blk_queue_make_request().
157  *
158  * Caveat:
159  *    The driver that does this *must* be able to deal appropriately
160  *    with buffers in "highmemory". This can be accomplished by either calling
161  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
162  *    blk_queue_bounce() to create a buffer in normal memory.
163  **/
164 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
165 {
166 	/*
167 	 * set defaults
168 	 */
169 	q->nr_requests = BLKDEV_MAX_RQ;
170 
171 	q->make_request_fn = mfn;
172 	blk_queue_dma_alignment(q, 511);
173 	blk_queue_congestion_threshold(q);
174 	q->nr_batching = BLK_BATCH_REQ;
175 
176 	blk_set_default_limits(&q->limits);
177 
178 	/*
179 	 * by default assume old behaviour and bounce for any highmem page
180 	 */
181 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
182 }
183 EXPORT_SYMBOL(blk_queue_make_request);
184 
185 /**
186  * blk_queue_bounce_limit - set bounce buffer limit for queue
187  * @q: the request queue for the device
188  * @max_addr: the maximum address the device can handle
189  *
190  * Description:
191  *    Different hardware can have different requirements as to what pages
192  *    it can do I/O directly to. A low level driver can call
193  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
194  *    buffers for doing I/O to pages residing above @max_addr.
195  **/
196 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
197 {
198 	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
199 	int dma = 0;
200 
201 	q->bounce_gfp = GFP_NOIO;
202 #if BITS_PER_LONG == 64
203 	/*
204 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
205 	 * some IOMMUs can handle everything, but I don't know of a
206 	 * way to test this here.
207 	 */
208 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
209 		dma = 1;
210 	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
211 #else
212 	if (b_pfn < blk_max_low_pfn)
213 		dma = 1;
214 	q->limits.bounce_pfn = b_pfn;
215 #endif
216 	if (dma) {
217 		init_emergency_isa_pool();
218 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
219 		q->limits.bounce_pfn = b_pfn;
220 	}
221 }
222 EXPORT_SYMBOL(blk_queue_bounce_limit);
223 
224 /**
225  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
226  * @q:  the request queue for the device
227  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
228  *
229  * Description:
230  *    Enables a low level driver to set a hard upper limit,
231  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
232  *    the device driver based upon the capabilities of the I/O
233  *    controller.
234  *
235  *    max_dev_sectors is a hard limit imposed by the storage device for
236  *    READ/WRITE requests. It is set by the disk driver.
237  *
238  *    max_sectors is a soft limit imposed by the block layer for
239  *    filesystem type requests.  This value can be overridden on a
240  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
241  *    The soft limit can not exceed max_hw_sectors.
242  **/
243 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
244 {
245 	struct queue_limits *limits = &q->limits;
246 	unsigned int max_sectors;
247 
248 	if ((max_hw_sectors << 9) < PAGE_SIZE) {
249 		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
250 		printk(KERN_INFO "%s: set to minimum %d\n",
251 		       __func__, max_hw_sectors);
252 	}
253 
254 	limits->max_hw_sectors = max_hw_sectors;
255 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
256 	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
257 	limits->max_sectors = max_sectors;
258 	q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
259 }
260 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
261 
262 /**
263  * blk_queue_chunk_sectors - set size of the chunk for this queue
264  * @q:  the request queue for the device
265  * @chunk_sectors:  chunk sectors in the usual 512b unit
266  *
267  * Description:
268  *    If a driver doesn't want IOs to cross a given chunk size, it can set
269  *    this limit and prevent merging across chunks. Note that the chunk size
270  *    must currently be a power-of-2 in sectors. Also note that the block
271  *    layer must accept a page worth of data at any offset. So if the
272  *    crossing of chunks is a hard limitation in the driver, it must still be
273  *    prepared to split single page bios.
274  **/
275 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
276 {
277 	BUG_ON(!is_power_of_2(chunk_sectors));
278 	q->limits.chunk_sectors = chunk_sectors;
279 }
280 EXPORT_SYMBOL(blk_queue_chunk_sectors);
281 
282 /**
283  * blk_queue_max_discard_sectors - set max sectors for a single discard
284  * @q:  the request queue for the device
285  * @max_discard_sectors: maximum number of sectors to discard
286  **/
287 void blk_queue_max_discard_sectors(struct request_queue *q,
288 		unsigned int max_discard_sectors)
289 {
290 	q->limits.max_hw_discard_sectors = max_discard_sectors;
291 	q->limits.max_discard_sectors = max_discard_sectors;
292 }
293 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
294 
295 /**
296  * blk_queue_max_write_same_sectors - set max sectors for a single write same
297  * @q:  the request queue for the device
298  * @max_write_same_sectors: maximum number of sectors to write per command
299  **/
300 void blk_queue_max_write_same_sectors(struct request_queue *q,
301 				      unsigned int max_write_same_sectors)
302 {
303 	q->limits.max_write_same_sectors = max_write_same_sectors;
304 }
305 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
306 
307 /**
308  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
309  *                                      write zeroes
310  * @q:  the request queue for the device
311  * @max_write_zeroes_sectors: maximum number of sectors to write per command
312  **/
313 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
314 		unsigned int max_write_zeroes_sectors)
315 {
316 	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
317 }
318 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
319 
320 /**
321  * blk_queue_max_segments - set max hw segments for a request for this queue
322  * @q:  the request queue for the device
323  * @max_segments:  max number of segments
324  *
325  * Description:
326  *    Enables a low level driver to set an upper limit on the number of
327  *    hw data segments in a request.
328  **/
329 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
330 {
331 	if (!max_segments) {
332 		max_segments = 1;
333 		printk(KERN_INFO "%s: set to minimum %d\n",
334 		       __func__, max_segments);
335 	}
336 
337 	q->limits.max_segments = max_segments;
338 }
339 EXPORT_SYMBOL(blk_queue_max_segments);
340 
341 /**
342  * blk_queue_max_discard_segments - set max segments for discard requests
343  * @q:  the request queue for the device
344  * @max_segments:  max number of segments
345  *
346  * Description:
347  *    Enables a low level driver to set an upper limit on the number of
348  *    segments in a discard request.
349  **/
350 void blk_queue_max_discard_segments(struct request_queue *q,
351 		unsigned short max_segments)
352 {
353 	q->limits.max_discard_segments = max_segments;
354 }
355 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
356 
357 /**
358  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
359  * @q:  the request queue for the device
360  * @max_size:  max size of segment in bytes
361  *
362  * Description:
363  *    Enables a low level driver to set an upper limit on the size of a
364  *    coalesced segment
365  **/
366 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
367 {
368 	if (max_size < PAGE_SIZE) {
369 		max_size = PAGE_SIZE;
370 		printk(KERN_INFO "%s: set to minimum %d\n",
371 		       __func__, max_size);
372 	}
373 
374 	q->limits.max_segment_size = max_size;
375 }
376 EXPORT_SYMBOL(blk_queue_max_segment_size);
377 
378 /**
379  * blk_queue_logical_block_size - set logical block size for the queue
380  * @q:  the request queue for the device
381  * @size:  the logical block size, in bytes
382  *
383  * Description:
384  *   This should be set to the lowest possible block size that the
385  *   storage device can address.  The default of 512 covers most
386  *   hardware.
387  **/
388 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
389 {
390 	q->limits.logical_block_size = size;
391 
392 	if (q->limits.physical_block_size < size)
393 		q->limits.physical_block_size = size;
394 
395 	if (q->limits.io_min < q->limits.physical_block_size)
396 		q->limits.io_min = q->limits.physical_block_size;
397 }
398 EXPORT_SYMBOL(blk_queue_logical_block_size);
399 
400 /**
401  * blk_queue_physical_block_size - set physical block size for the queue
402  * @q:  the request queue for the device
403  * @size:  the physical block size, in bytes
404  *
405  * Description:
406  *   This should be set to the lowest possible sector size that the
407  *   hardware can operate on without reverting to read-modify-write
408  *   operations.
409  */
410 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
411 {
412 	q->limits.physical_block_size = size;
413 
414 	if (q->limits.physical_block_size < q->limits.logical_block_size)
415 		q->limits.physical_block_size = q->limits.logical_block_size;
416 
417 	if (q->limits.io_min < q->limits.physical_block_size)
418 		q->limits.io_min = q->limits.physical_block_size;
419 }
420 EXPORT_SYMBOL(blk_queue_physical_block_size);
421 
422 /**
423  * blk_queue_alignment_offset - set physical block alignment offset
424  * @q:	the request queue for the device
425  * @offset: alignment offset in bytes
426  *
427  * Description:
428  *   Some devices are naturally misaligned to compensate for things like
429  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
430  *   should call this function for devices whose first sector is not
431  *   naturally aligned.
432  */
433 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
434 {
435 	q->limits.alignment_offset =
436 		offset & (q->limits.physical_block_size - 1);
437 	q->limits.misaligned = 0;
438 }
439 EXPORT_SYMBOL(blk_queue_alignment_offset);
440 
441 /**
442  * blk_limits_io_min - set minimum request size for a device
443  * @limits: the queue limits
444  * @min:  smallest I/O size in bytes
445  *
446  * Description:
447  *   Some devices have an internal block size bigger than the reported
448  *   hardware sector size.  This function can be used to signal the
449  *   smallest I/O the device can perform without incurring a performance
450  *   penalty.
451  */
452 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
453 {
454 	limits->io_min = min;
455 
456 	if (limits->io_min < limits->logical_block_size)
457 		limits->io_min = limits->logical_block_size;
458 
459 	if (limits->io_min < limits->physical_block_size)
460 		limits->io_min = limits->physical_block_size;
461 }
462 EXPORT_SYMBOL(blk_limits_io_min);
463 
464 /**
465  * blk_queue_io_min - set minimum request size for the queue
466  * @q:	the request queue for the device
467  * @min:  smallest I/O size in bytes
468  *
469  * Description:
470  *   Storage devices may report a granularity or preferred minimum I/O
471  *   size which is the smallest request the device can perform without
472  *   incurring a performance penalty.  For disk drives this is often the
473  *   physical block size.  For RAID arrays it is often the stripe chunk
474  *   size.  A properly aligned multiple of minimum_io_size is the
475  *   preferred request size for workloads where a high number of I/O
476  *   operations is desired.
477  */
478 void blk_queue_io_min(struct request_queue *q, unsigned int min)
479 {
480 	blk_limits_io_min(&q->limits, min);
481 }
482 EXPORT_SYMBOL(blk_queue_io_min);
483 
484 /**
485  * blk_limits_io_opt - set optimal request size for a device
486  * @limits: the queue limits
487  * @opt:  smallest I/O size in bytes
488  *
489  * Description:
490  *   Storage devices may report an optimal I/O size, which is the
491  *   device's preferred unit for sustained I/O.  This is rarely reported
492  *   for disk drives.  For RAID arrays it is usually the stripe width or
493  *   the internal track size.  A properly aligned multiple of
494  *   optimal_io_size is the preferred request size for workloads where
495  *   sustained throughput is desired.
496  */
497 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
498 {
499 	limits->io_opt = opt;
500 }
501 EXPORT_SYMBOL(blk_limits_io_opt);
502 
503 /**
504  * blk_queue_io_opt - set optimal request size for the queue
505  * @q:	the request queue for the device
506  * @opt:  optimal request size in bytes
507  *
508  * Description:
509  *   Storage devices may report an optimal I/O size, which is the
510  *   device's preferred unit for sustained I/O.  This is rarely reported
511  *   for disk drives.  For RAID arrays it is usually the stripe width or
512  *   the internal track size.  A properly aligned multiple of
513  *   optimal_io_size is the preferred request size for workloads where
514  *   sustained throughput is desired.
515  */
516 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
517 {
518 	blk_limits_io_opt(&q->limits, opt);
519 }
520 EXPORT_SYMBOL(blk_queue_io_opt);
521 
522 /**
523  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
524  * @t:	the stacking driver (top)
525  * @b:  the underlying device (bottom)
526  **/
527 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
528 {
529 	blk_stack_limits(&t->limits, &b->limits, 0);
530 }
531 EXPORT_SYMBOL(blk_queue_stack_limits);
532 
533 /**
534  * blk_stack_limits - adjust queue_limits for stacked devices
535  * @t:	the stacking driver limits (top device)
536  * @b:  the underlying queue limits (bottom, component device)
537  * @start:  first data sector within component device
538  *
539  * Description:
540  *    This function is used by stacking drivers like MD and DM to ensure
541  *    that all component devices have compatible block sizes and
542  *    alignments.  The stacking driver must provide a queue_limits
543  *    struct (top) and then iteratively call the stacking function for
544  *    all component (bottom) devices.  The stacking function will
545  *    attempt to combine the values and ensure proper alignment.
546  *
547  *    Returns 0 if the top and bottom queue_limits are compatible.  The
548  *    top device's block sizes and alignment offsets may be adjusted to
549  *    ensure alignment with the bottom device. If no compatible sizes
550  *    and alignments exist, -1 is returned and the resulting top
551  *    queue_limits will have the misaligned flag set to indicate that
552  *    the alignment_offset is undefined.
553  */
554 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
555 		     sector_t start)
556 {
557 	unsigned int top, bottom, alignment, ret = 0;
558 
559 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
560 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
561 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
562 	t->max_write_same_sectors = min(t->max_write_same_sectors,
563 					b->max_write_same_sectors);
564 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
565 					b->max_write_zeroes_sectors);
566 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
567 
568 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
569 					    b->seg_boundary_mask);
570 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
571 					    b->virt_boundary_mask);
572 
573 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
574 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
575 					       b->max_discard_segments);
576 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
577 						 b->max_integrity_segments);
578 
579 	t->max_segment_size = min_not_zero(t->max_segment_size,
580 					   b->max_segment_size);
581 
582 	t->misaligned |= b->misaligned;
583 
584 	alignment = queue_limit_alignment_offset(b, start);
585 
586 	/* Bottom device has different alignment.  Check that it is
587 	 * compatible with the current top alignment.
588 	 */
589 	if (t->alignment_offset != alignment) {
590 
591 		top = max(t->physical_block_size, t->io_min)
592 			+ t->alignment_offset;
593 		bottom = max(b->physical_block_size, b->io_min) + alignment;
594 
595 		/* Verify that top and bottom intervals line up */
596 		if (max(top, bottom) % min(top, bottom)) {
597 			t->misaligned = 1;
598 			ret = -1;
599 		}
600 	}
601 
602 	t->logical_block_size = max(t->logical_block_size,
603 				    b->logical_block_size);
604 
605 	t->physical_block_size = max(t->physical_block_size,
606 				     b->physical_block_size);
607 
608 	t->io_min = max(t->io_min, b->io_min);
609 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
610 
611 	t->cluster &= b->cluster;
612 	t->discard_zeroes_data &= b->discard_zeroes_data;
613 
614 	/* Physical block size a multiple of the logical block size? */
615 	if (t->physical_block_size & (t->logical_block_size - 1)) {
616 		t->physical_block_size = t->logical_block_size;
617 		t->misaligned = 1;
618 		ret = -1;
619 	}
620 
621 	/* Minimum I/O a multiple of the physical block size? */
622 	if (t->io_min & (t->physical_block_size - 1)) {
623 		t->io_min = t->physical_block_size;
624 		t->misaligned = 1;
625 		ret = -1;
626 	}
627 
628 	/* Optimal I/O a multiple of the physical block size? */
629 	if (t->io_opt & (t->physical_block_size - 1)) {
630 		t->io_opt = 0;
631 		t->misaligned = 1;
632 		ret = -1;
633 	}
634 
635 	t->raid_partial_stripes_expensive =
636 		max(t->raid_partial_stripes_expensive,
637 		    b->raid_partial_stripes_expensive);
638 
639 	/* Find lowest common alignment_offset */
640 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
641 		% max(t->physical_block_size, t->io_min);
642 
643 	/* Verify that new alignment_offset is on a logical block boundary */
644 	if (t->alignment_offset & (t->logical_block_size - 1)) {
645 		t->misaligned = 1;
646 		ret = -1;
647 	}
648 
649 	/* Discard alignment and granularity */
650 	if (b->discard_granularity) {
651 		alignment = queue_limit_discard_alignment(b, start);
652 
653 		if (t->discard_granularity != 0 &&
654 		    t->discard_alignment != alignment) {
655 			top = t->discard_granularity + t->discard_alignment;
656 			bottom = b->discard_granularity + alignment;
657 
658 			/* Verify that top and bottom intervals line up */
659 			if ((max(top, bottom) % min(top, bottom)) != 0)
660 				t->discard_misaligned = 1;
661 		}
662 
663 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
664 						      b->max_discard_sectors);
665 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
666 							 b->max_hw_discard_sectors);
667 		t->discard_granularity = max(t->discard_granularity,
668 					     b->discard_granularity);
669 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
670 			t->discard_granularity;
671 	}
672 
673 	if (b->chunk_sectors)
674 		t->chunk_sectors = min_not_zero(t->chunk_sectors,
675 						b->chunk_sectors);
676 
677 	return ret;
678 }
679 EXPORT_SYMBOL(blk_stack_limits);
680 
681 /**
682  * bdev_stack_limits - adjust queue limits for stacked drivers
683  * @t:	the stacking driver limits (top device)
684  * @bdev:  the component block_device (bottom)
685  * @start:  first data sector within component device
686  *
687  * Description:
688  *    Merges queue limits for a top device and a block_device.  Returns
689  *    0 if alignment didn't change.  Returns -1 if adding the bottom
690  *    device caused misalignment.
691  */
692 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
693 		      sector_t start)
694 {
695 	struct request_queue *bq = bdev_get_queue(bdev);
696 
697 	start += get_start_sect(bdev);
698 
699 	return blk_stack_limits(t, &bq->limits, start);
700 }
701 EXPORT_SYMBOL(bdev_stack_limits);
702 
703 /**
704  * disk_stack_limits - adjust queue limits for stacked drivers
705  * @disk:  MD/DM gendisk (top)
706  * @bdev:  the underlying block device (bottom)
707  * @offset:  offset to beginning of data within component device
708  *
709  * Description:
710  *    Merges the limits for a top level gendisk and a bottom level
711  *    block_device.
712  */
713 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
714 		       sector_t offset)
715 {
716 	struct request_queue *t = disk->queue;
717 
718 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
719 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
720 
721 		disk_name(disk, 0, top);
722 		bdevname(bdev, bottom);
723 
724 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
725 		       top, bottom);
726 	}
727 }
728 EXPORT_SYMBOL(disk_stack_limits);
729 
730 /**
731  * blk_queue_dma_pad - set pad mask
732  * @q:     the request queue for the device
733  * @mask:  pad mask
734  *
735  * Set dma pad mask.
736  *
737  * Appending pad buffer to a request modifies the last entry of a
738  * scatter list such that it includes the pad buffer.
739  **/
740 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
741 {
742 	q->dma_pad_mask = mask;
743 }
744 EXPORT_SYMBOL(blk_queue_dma_pad);
745 
746 /**
747  * blk_queue_update_dma_pad - update pad mask
748  * @q:     the request queue for the device
749  * @mask:  pad mask
750  *
751  * Update dma pad mask.
752  *
753  * Appending pad buffer to a request modifies the last entry of a
754  * scatter list such that it includes the pad buffer.
755  **/
756 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
757 {
758 	if (mask > q->dma_pad_mask)
759 		q->dma_pad_mask = mask;
760 }
761 EXPORT_SYMBOL(blk_queue_update_dma_pad);
762 
763 /**
764  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
765  * @q:  the request queue for the device
766  * @dma_drain_needed: fn which returns non-zero if drain is necessary
767  * @buf:	physically contiguous buffer
768  * @size:	size of the buffer in bytes
769  *
770  * Some devices have excess DMA problems and can't simply discard (or
771  * zero fill) the unwanted piece of the transfer.  They have to have a
772  * real area of memory to transfer it into.  The use case for this is
773  * ATAPI devices in DMA mode.  If the packet command causes a transfer
774  * bigger than the transfer size some HBAs will lock up if there
775  * aren't DMA elements to contain the excess transfer.  What this API
776  * does is adjust the queue so that the buf is always appended
777  * silently to the scatterlist.
778  *
779  * Note: This routine adjusts max_hw_segments to make room for appending
780  * the drain buffer.  If you call blk_queue_max_segments() after calling
781  * this routine, you must set the limit to one fewer than your device
782  * can support otherwise there won't be room for the drain buffer.
783  */
784 int blk_queue_dma_drain(struct request_queue *q,
785 			       dma_drain_needed_fn *dma_drain_needed,
786 			       void *buf, unsigned int size)
787 {
788 	if (queue_max_segments(q) < 2)
789 		return -EINVAL;
790 	/* make room for appending the drain */
791 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
792 	q->dma_drain_needed = dma_drain_needed;
793 	q->dma_drain_buffer = buf;
794 	q->dma_drain_size = size;
795 
796 	return 0;
797 }
798 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
799 
800 /**
801  * blk_queue_segment_boundary - set boundary rules for segment merging
802  * @q:  the request queue for the device
803  * @mask:  the memory boundary mask
804  **/
805 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
806 {
807 	if (mask < PAGE_SIZE - 1) {
808 		mask = PAGE_SIZE - 1;
809 		printk(KERN_INFO "%s: set to minimum %lx\n",
810 		       __func__, mask);
811 	}
812 
813 	q->limits.seg_boundary_mask = mask;
814 }
815 EXPORT_SYMBOL(blk_queue_segment_boundary);
816 
817 /**
818  * blk_queue_virt_boundary - set boundary rules for bio merging
819  * @q:  the request queue for the device
820  * @mask:  the memory boundary mask
821  **/
822 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
823 {
824 	q->limits.virt_boundary_mask = mask;
825 }
826 EXPORT_SYMBOL(blk_queue_virt_boundary);
827 
828 /**
829  * blk_queue_dma_alignment - set dma length and memory alignment
830  * @q:     the request queue for the device
831  * @mask:  alignment mask
832  *
833  * description:
834  *    set required memory and length alignment for direct dma transactions.
835  *    this is used when building direct io requests for the queue.
836  *
837  **/
838 void blk_queue_dma_alignment(struct request_queue *q, int mask)
839 {
840 	q->dma_alignment = mask;
841 }
842 EXPORT_SYMBOL(blk_queue_dma_alignment);
843 
844 /**
845  * blk_queue_update_dma_alignment - update dma length and memory alignment
846  * @q:     the request queue for the device
847  * @mask:  alignment mask
848  *
849  * description:
850  *    update required memory and length alignment for direct dma transactions.
851  *    If the requested alignment is larger than the current alignment, then
852  *    the current queue alignment is updated to the new value, otherwise it
853  *    is left alone.  The design of this is to allow multiple objects
854  *    (driver, device, transport etc) to set their respective
855  *    alignments without having them interfere.
856  *
857  **/
858 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
859 {
860 	BUG_ON(mask > PAGE_SIZE);
861 
862 	if (mask > q->dma_alignment)
863 		q->dma_alignment = mask;
864 }
865 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
866 
867 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
868 {
869 	spin_lock_irq(q->queue_lock);
870 	if (queueable)
871 		clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
872 	else
873 		set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
874 	spin_unlock_irq(q->queue_lock);
875 }
876 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
877 
878 /**
879  * blk_set_queue_depth - tell the block layer about the device queue depth
880  * @q:		the request queue for the device
881  * @depth:		queue depth
882  *
883  */
884 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
885 {
886 	q->queue_depth = depth;
887 	wbt_set_queue_depth(q->rq_wb, depth);
888 }
889 EXPORT_SYMBOL(blk_set_queue_depth);
890 
891 /**
892  * blk_queue_write_cache - configure queue's write cache
893  * @q:		the request queue for the device
894  * @wc:		write back cache on or off
895  * @fua:	device supports FUA writes, if true
896  *
897  * Tell the block layer about the write cache of @q.
898  */
899 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
900 {
901 	spin_lock_irq(q->queue_lock);
902 	if (wc)
903 		queue_flag_set(QUEUE_FLAG_WC, q);
904 	else
905 		queue_flag_clear(QUEUE_FLAG_WC, q);
906 	if (fua)
907 		queue_flag_set(QUEUE_FLAG_FUA, q);
908 	else
909 		queue_flag_clear(QUEUE_FLAG_FUA, q);
910 	spin_unlock_irq(q->queue_lock);
911 
912 	wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
913 }
914 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
915 
916 static int __init blk_settings_init(void)
917 {
918 	blk_max_low_pfn = max_low_pfn - 1;
919 	blk_max_pfn = max_pfn - 1;
920 	return 0;
921 }
922 subsys_initcall(blk_settings_init);
923