blk-settings.c (086fa5ff0854c676ec333760f4c0154b3b242616) blk-settings.c (8a78362c4eefc1deddbefe2c7f38aabbc2429d6b)
1/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>

--- 77 unchanged lines hidden (view full) ---

86 *
87 * Description:
88 * Returns a queue_limit struct to its default state. Can be used by
89 * stacking drivers like DM that stage table swaps and reuse an
90 * existing device queue.
91 */
92void blk_set_default_limits(struct queue_limits *lim)
93{
1/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>

--- 77 unchanged lines hidden (view full) ---

86 *
87 * Description:
88 * Returns a queue_limit struct to its default state. Can be used by
89 * stacking drivers like DM that stage table swaps and reuse an
90 * existing device queue.
91 */
92void blk_set_default_limits(struct queue_limits *lim)
93{
94 lim->max_phys_segments = MAX_PHYS_SEGMENTS;
95 lim->max_hw_segments = MAX_HW_SEGMENTS;
94 lim->max_segments = BLK_MAX_SEGMENTS;
96 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
97 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
98 lim->max_sectors = BLK_DEF_MAX_SECTORS;
99 lim->max_hw_sectors = INT_MAX;
100 lim->max_discard_sectors = 0;
101 lim->discard_granularity = 0;
102 lim->discard_alignment = 0;
103 lim->discard_misaligned = 0;

--- 143 unchanged lines hidden (view full) ---

247void blk_queue_max_discard_sectors(struct request_queue *q,
248 unsigned int max_discard_sectors)
249{
250 q->limits.max_discard_sectors = max_discard_sectors;
251}
252EXPORT_SYMBOL(blk_queue_max_discard_sectors);
253
254/**
95 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
96 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
97 lim->max_sectors = BLK_DEF_MAX_SECTORS;
98 lim->max_hw_sectors = INT_MAX;
99 lim->max_discard_sectors = 0;
100 lim->discard_granularity = 0;
101 lim->discard_alignment = 0;
102 lim->discard_misaligned = 0;

--- 143 unchanged lines hidden (view full) ---

246void blk_queue_max_discard_sectors(struct request_queue *q,
247 unsigned int max_discard_sectors)
248{
249 q->limits.max_discard_sectors = max_discard_sectors;
250}
251EXPORT_SYMBOL(blk_queue_max_discard_sectors);
252
253/**
255 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
254 * blk_queue_max_segments - set max hw segments for a request for this queue
256 * @q: the request queue for the device
257 * @max_segments: max number of segments
258 *
259 * Description:
260 * Enables a low level driver to set an upper limit on the number of
255 * @q: the request queue for the device
256 * @max_segments: max number of segments
257 *
258 * Description:
259 * Enables a low level driver to set an upper limit on the number of
261 * physical data segments in a request. This would be the largest sized
262 * scatter list the driver could handle.
260 * hw data segments in a request.
263 **/
261 **/
264void blk_queue_max_phys_segments(struct request_queue *q,
265 unsigned short max_segments)
262void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
266{
267 if (!max_segments) {
268 max_segments = 1;
269 printk(KERN_INFO "%s: set to minimum %d\n",
270 __func__, max_segments);
271 }
272
263{
264 if (!max_segments) {
265 max_segments = 1;
266 printk(KERN_INFO "%s: set to minimum %d\n",
267 __func__, max_segments);
268 }
269
273 q->limits.max_phys_segments = max_segments;
270 q->limits.max_segments = max_segments;
274}
271}
275EXPORT_SYMBOL(blk_queue_max_phys_segments);
272EXPORT_SYMBOL(blk_queue_max_segments);
276
277/**
273
274/**
278 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
279 * @q: the request queue for the device
280 * @max_segments: max number of segments
281 *
282 * Description:
283 * Enables a low level driver to set an upper limit on the number of
284 * hw data segments in a request. This would be the largest number of
285 * address/length pairs the host adapter can actually give at once
286 * to the device.
287 **/
288void blk_queue_max_hw_segments(struct request_queue *q,
289 unsigned short max_segments)
290{
291 if (!max_segments) {
292 max_segments = 1;
293 printk(KERN_INFO "%s: set to minimum %d\n",
294 __func__, max_segments);
295 }
296
297 q->limits.max_hw_segments = max_segments;
298}
299EXPORT_SYMBOL(blk_queue_max_hw_segments);
300
301/**
302 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
303 * @q: the request queue for the device
304 * @max_size: max size of segment in bytes
305 *
306 * Description:
307 * Enables a low level driver to set an upper limit on the size of a
308 * coalesced segment
309 **/

--- 216 unchanged lines hidden (view full) ---

526
527 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
528 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
529 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
530
531 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
532 b->seg_boundary_mask);
533
275 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
276 * @q: the request queue for the device
277 * @max_size: max size of segment in bytes
278 *
279 * Description:
280 * Enables a low level driver to set an upper limit on the size of a
281 * coalesced segment
282 **/

--- 216 unchanged lines hidden (view full) ---

499
500 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
501 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
502 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
503
504 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
505 b->seg_boundary_mask);
506
534 t->max_phys_segments = min_not_zero(t->max_phys_segments,
535 b->max_phys_segments);
507 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
536
508
537 t->max_hw_segments = min_not_zero(t->max_hw_segments,
538 b->max_hw_segments);
539
540 t->max_segment_size = min_not_zero(t->max_segment_size,
541 b->max_segment_size);
542
543 t->misaligned |= b->misaligned;
544
545 alignment = queue_limit_alignment_offset(b, start);
546
547 /* Bottom device has different alignment. Check that it is

--- 186 unchanged lines hidden (view full) ---

734 * zero fill) the unwanted piece of the transfer. They have to have a
735 * real area of memory to transfer it into. The use case for this is
736 * ATAPI devices in DMA mode. If the packet command causes a transfer
737 * bigger than the transfer size some HBAs will lock up if there
738 * aren't DMA elements to contain the excess transfer. What this API
739 * does is adjust the queue so that the buf is always appended
740 * silently to the scatterlist.
741 *
509 t->max_segment_size = min_not_zero(t->max_segment_size,
510 b->max_segment_size);
511
512 t->misaligned |= b->misaligned;
513
514 alignment = queue_limit_alignment_offset(b, start);
515
516 /* Bottom device has different alignment. Check that it is

--- 186 unchanged lines hidden (view full) ---

703 * zero fill) the unwanted piece of the transfer. They have to have a
704 * real area of memory to transfer it into. The use case for this is
705 * ATAPI devices in DMA mode. If the packet command causes a transfer
706 * bigger than the transfer size some HBAs will lock up if there
707 * aren't DMA elements to contain the excess transfer. What this API
708 * does is adjust the queue so that the buf is always appended
709 * silently to the scatterlist.
710 *
742 * Note: This routine adjusts max_hw_segments to make room for
743 * appending the drain buffer. If you call
744 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
745 * calling this routine, you must set the limit to one fewer than your
746 * device can support otherwise there won't be room for the drain
747 * buffer.
711 * Note: This routine adjusts max_hw_segments to make room for appending
712 * the drain buffer. If you call blk_queue_max_segments() after calling
713 * this routine, you must set the limit to one fewer than your device
714 * can support otherwise there won't be room for the drain buffer.
748 */
749int blk_queue_dma_drain(struct request_queue *q,
750 dma_drain_needed_fn *dma_drain_needed,
751 void *buf, unsigned int size)
752{
715 */
716int blk_queue_dma_drain(struct request_queue *q,
717 dma_drain_needed_fn *dma_drain_needed,
718 void *buf, unsigned int size)
719{
753 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
720 if (queue_max_segments(q) < 2)
754 return -EINVAL;
755 /* make room for appending the drain */
721 return -EINVAL;
722 /* make room for appending the drain */
756 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
757 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
723 blk_queue_max_segments(q, queue_max_segments(q) - 1);
758 q->dma_drain_needed = dma_drain_needed;
759 q->dma_drain_buffer = buf;
760 q->dma_drain_size = size;
761
762 return 0;
763}
764EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
765

--- 63 unchanged lines hidden ---
724 q->dma_drain_needed = dma_drain_needed;
725 q->dma_drain_buffer = buf;
726 q->dma_drain_size = size;
727
728 return 0;
729}
730EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
731

--- 63 unchanged lines hidden ---