Lines Matching refs:q

22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)  in blk_queue_rq_timeout()  argument
24 q->rq_timeout = timeout; in blk_queue_rq_timeout()
98 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) in blk_queue_bounce_limit() argument
100 q->limits.bounce = bounce; in blk_queue_bounce_limit()
123 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors() argument
125 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors()
149 if (!q->disk) in blk_queue_max_hw_sectors()
151 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
167 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument
169 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
178 void blk_queue_max_discard_sectors(struct request_queue *q, in blk_queue_max_discard_sectors() argument
181 q->limits.max_hw_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
182 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
191 void blk_queue_max_secure_erase_sectors(struct request_queue *q, in blk_queue_max_secure_erase_sectors() argument
194 q->limits.max_secure_erase_sectors = max_sectors; in blk_queue_max_secure_erase_sectors()
204 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, in blk_queue_max_write_zeroes_sectors() argument
207 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; in blk_queue_max_write_zeroes_sectors()
216 void blk_queue_max_zone_append_sectors(struct request_queue *q, in blk_queue_max_zone_append_sectors() argument
221 if (WARN_ON(!blk_queue_is_zoned(q))) in blk_queue_max_zone_append_sectors()
224 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors()
225 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors()
234 q->limits.max_zone_append_sectors = max_sectors; in blk_queue_max_zone_append_sectors()
247 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) in blk_queue_max_segments() argument
255 q->limits.max_segments = max_segments; in blk_queue_max_segments()
268 void blk_queue_max_discard_segments(struct request_queue *q, in blk_queue_max_discard_segments() argument
271 q->limits.max_discard_segments = max_segments; in blk_queue_max_discard_segments()
284 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) in blk_queue_max_segment_size() argument
293 WARN_ON_ONCE(q->limits.virt_boundary_mask); in blk_queue_max_segment_size()
295 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
309 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) in blk_queue_logical_block_size() argument
311 struct queue_limits *limits = &q->limits; in blk_queue_logical_block_size()
338 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) in blk_queue_physical_block_size() argument
340 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
342 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
343 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
345 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
346 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
359 void blk_queue_zone_write_granularity(struct request_queue *q, in blk_queue_zone_write_granularity() argument
362 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) in blk_queue_zone_write_granularity()
365 q->limits.zone_write_granularity = size; in blk_queue_zone_write_granularity()
367 if (q->limits.zone_write_granularity < q->limits.logical_block_size) in blk_queue_zone_write_granularity()
368 q->limits.zone_write_granularity = q->limits.logical_block_size; in blk_queue_zone_write_granularity()
383 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) in blk_queue_alignment_offset() argument
385 q->limits.alignment_offset = in blk_queue_alignment_offset()
386 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
387 q->limits.misaligned = 0; in blk_queue_alignment_offset()
393 struct request_queue *q = disk->queue; in disk_update_readahead() local
400 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); in disk_update_readahead()
401 disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9); in disk_update_readahead()
442 void blk_queue_io_min(struct request_queue *q, unsigned int min) in blk_queue_io_min() argument
444 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
480 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) in blk_queue_io_opt() argument
482 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
483 if (!q->disk) in blk_queue_io_opt()
485 q->disk->bdi->ra_pages = in blk_queue_io_opt()
486 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); in blk_queue_io_opt()
731 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) in blk_queue_update_dma_pad() argument
733 if (mask > q->dma_pad_mask) in blk_queue_update_dma_pad()
734 q->dma_pad_mask = mask; in blk_queue_update_dma_pad()
743 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) in blk_queue_segment_boundary() argument
751 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
760 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) in blk_queue_virt_boundary() argument
762 q->limits.virt_boundary_mask = mask; in blk_queue_virt_boundary()
771 q->limits.max_segment_size = UINT_MAX; in blk_queue_virt_boundary()
785 void blk_queue_dma_alignment(struct request_queue *q, int mask) in blk_queue_dma_alignment() argument
787 q->limits.dma_alignment = mask; in blk_queue_dma_alignment()
805 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) in blk_queue_update_dma_alignment() argument
809 if (mask > q->limits.dma_alignment) in blk_queue_update_dma_alignment()
810 q->limits.dma_alignment = mask; in blk_queue_update_dma_alignment()
820 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) in blk_set_queue_depth() argument
822 q->queue_depth = depth; in blk_set_queue_depth()
823 rq_qos_queue_depth_changed(q); in blk_set_queue_depth()
835 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) in blk_queue_write_cache() argument
838 blk_queue_flag_set(QUEUE_FLAG_HW_WC, q); in blk_queue_write_cache()
839 blk_queue_flag_set(QUEUE_FLAG_WC, q); in blk_queue_write_cache()
841 blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q); in blk_queue_write_cache()
842 blk_queue_flag_clear(QUEUE_FLAG_WC, q); in blk_queue_write_cache()
845 blk_queue_flag_set(QUEUE_FLAG_FUA, q); in blk_queue_write_cache()
847 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); in blk_queue_write_cache()
849 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); in blk_queue_write_cache()
862 void blk_queue_required_elevator_features(struct request_queue *q, in blk_queue_required_elevator_features() argument
865 q->required_elevator_features = features; in blk_queue_required_elevator_features()
876 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, in blk_queue_can_use_dma_map_merging() argument
885 blk_queue_virt_boundary(q, boundary); in blk_queue_can_use_dma_map_merging()
924 struct request_queue *q = disk->queue; in disk_set_zoned() local
925 unsigned int old_model = q->limits.zoned; in disk_set_zoned()
955 q->limits.zoned = model; in disk_set_zoned()
961 blk_queue_zone_write_granularity(q, in disk_set_zoned()
962 queue_logical_block_size(q)); in disk_set_zoned()
971 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() local
973 if (q->limits.misaligned) in bdev_alignment_offset()
976 return queue_limit_alignment_offset(&q->limits, in bdev_alignment_offset()
978 return q->limits.alignment_offset; in bdev_alignment_offset()
984 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() local
987 return queue_limit_discard_alignment(&q->limits, in bdev_discard_alignment()
989 return q->limits.discard_alignment; in bdev_discard_alignment()