Lines Matching refs:limits

100 	q->limits.bounce = bounce;  in blk_queue_bounce_limit()
125 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors() local
135 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
136 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors()
138 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors()
140 if (limits->max_user_sectors) in blk_queue_max_hw_sectors()
141 max_sectors = min(max_sectors, limits->max_user_sectors); in blk_queue_max_hw_sectors()
146 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
147 limits->max_sectors = max_sectors; in blk_queue_max_hw_sectors()
169 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
181 q->limits.max_hw_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
182 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
194 q->limits.max_secure_erase_sectors = max_sectors; in blk_queue_max_secure_erase_sectors()
207 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; in blk_queue_max_write_zeroes_sectors()
224 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors()
225 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors()
234 q->limits.max_zone_append_sectors = max_sectors; in blk_queue_max_zone_append_sectors()
255 q->limits.max_segments = max_segments; in blk_queue_max_segments()
271 q->limits.max_discard_segments = max_segments; in blk_queue_max_discard_segments()
293 WARN_ON_ONCE(q->limits.virt_boundary_mask); in blk_queue_max_segment_size()
295 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
311 struct queue_limits *limits = &q->limits; in blk_queue_logical_block_size() local
313 limits->logical_block_size = size; in blk_queue_logical_block_size()
315 if (limits->physical_block_size < size) in blk_queue_logical_block_size()
316 limits->physical_block_size = size; in blk_queue_logical_block_size()
318 if (limits->io_min < limits->physical_block_size) in blk_queue_logical_block_size()
319 limits->io_min = limits->physical_block_size; in blk_queue_logical_block_size()
321 limits->max_hw_sectors = in blk_queue_logical_block_size()
322 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
323 limits->max_sectors = in blk_queue_logical_block_size()
324 round_down(limits->max_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
340 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
342 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
343 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
345 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
346 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
365 q->limits.zone_write_granularity = size; in blk_queue_zone_write_granularity()
367 if (q->limits.zone_write_granularity < q->limits.logical_block_size) in blk_queue_zone_write_granularity()
368 q->limits.zone_write_granularity = q->limits.logical_block_size; in blk_queue_zone_write_granularity()
385 q->limits.alignment_offset = in blk_queue_alignment_offset()
386 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
387 q->limits.misaligned = 0; in blk_queue_alignment_offset()
416 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) in blk_limits_io_min() argument
418 limits->io_min = min; in blk_limits_io_min()
420 if (limits->io_min < limits->logical_block_size) in blk_limits_io_min()
421 limits->io_min = limits->logical_block_size; in blk_limits_io_min()
423 if (limits->io_min < limits->physical_block_size) in blk_limits_io_min()
424 limits->io_min = limits->physical_block_size; in blk_limits_io_min()
444 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
461 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) in blk_limits_io_opt() argument
463 limits->io_opt = opt; in blk_limits_io_opt()
482 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
712 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()
751 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
762 q->limits.virt_boundary_mask = mask; in blk_queue_virt_boundary()
771 q->limits.max_segment_size = UINT_MAX; in blk_queue_virt_boundary()
787 q->limits.dma_alignment = mask; in blk_queue_dma_alignment()
809 if (mask > q->limits.dma_alignment) in blk_queue_update_dma_alignment()
810 q->limits.dma_alignment = mask; in blk_queue_update_dma_alignment()
925 unsigned int old_model = q->limits.zoned; in disk_set_zoned()
955 q->limits.zoned = model; in disk_set_zoned()
973 if (q->limits.misaligned) in bdev_alignment_offset()
976 return queue_limit_alignment_offset(&q->limits, in bdev_alignment_offset()
978 return q->limits.alignment_offset; in bdev_alignment_offset()
987 return queue_limit_discard_alignment(&q->limits, in bdev_discard_alignment()
989 return q->limits.discard_alignment; in bdev_discard_alignment()