Lines Matching refs:t

553 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,  in blk_stack_limits()  argument
558 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); in blk_stack_limits()
559 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); in blk_stack_limits()
560 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); in blk_stack_limits()
561 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, in blk_stack_limits()
563 t->max_zone_append_sectors = min(t->max_zone_append_sectors, in blk_stack_limits()
565 t->bounce = max(t->bounce, b->bounce); in blk_stack_limits()
567 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, in blk_stack_limits()
569 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, in blk_stack_limits()
572 t->max_segments = min_not_zero(t->max_segments, b->max_segments); in blk_stack_limits()
573 t->max_discard_segments = min_not_zero(t->max_discard_segments, in blk_stack_limits()
575 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, in blk_stack_limits()
578 t->max_segment_size = min_not_zero(t->max_segment_size, in blk_stack_limits()
581 t->misaligned |= b->misaligned; in blk_stack_limits()
588 if (t->alignment_offset != alignment) { in blk_stack_limits()
590 top = max(t->physical_block_size, t->io_min) in blk_stack_limits()
591 + t->alignment_offset; in blk_stack_limits()
596 t->misaligned = 1; in blk_stack_limits()
601 t->logical_block_size = max(t->logical_block_size, in blk_stack_limits()
604 t->physical_block_size = max(t->physical_block_size, in blk_stack_limits()
607 t->io_min = max(t->io_min, b->io_min); in blk_stack_limits()
608 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); in blk_stack_limits()
609 t->dma_alignment = max(t->dma_alignment, b->dma_alignment); in blk_stack_limits()
613 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); in blk_stack_limits()
616 if (t->physical_block_size & (t->logical_block_size - 1)) { in blk_stack_limits()
617 t->physical_block_size = t->logical_block_size; in blk_stack_limits()
618 t->misaligned = 1; in blk_stack_limits()
623 if (t->io_min & (t->physical_block_size - 1)) { in blk_stack_limits()
624 t->io_min = t->physical_block_size; in blk_stack_limits()
625 t->misaligned = 1; in blk_stack_limits()
630 if (t->io_opt & (t->physical_block_size - 1)) { in blk_stack_limits()
631 t->io_opt = 0; in blk_stack_limits()
632 t->misaligned = 1; in blk_stack_limits()
637 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { in blk_stack_limits()
638 t->chunk_sectors = 0; in blk_stack_limits()
639 t->misaligned = 1; in blk_stack_limits()
643 t->raid_partial_stripes_expensive = in blk_stack_limits()
644 max(t->raid_partial_stripes_expensive, in blk_stack_limits()
648 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) in blk_stack_limits()
649 % max(t->physical_block_size, t->io_min); in blk_stack_limits()
652 if (t->alignment_offset & (t->logical_block_size - 1)) { in blk_stack_limits()
653 t->misaligned = 1; in blk_stack_limits()
657 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); in blk_stack_limits()
658 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); in blk_stack_limits()
659 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); in blk_stack_limits()
665 if (t->discard_granularity != 0 && in blk_stack_limits()
666 t->discard_alignment != alignment) { in blk_stack_limits()
667 top = t->discard_granularity + t->discard_alignment; in blk_stack_limits()
672 t->discard_misaligned = 1; in blk_stack_limits()
675 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, in blk_stack_limits()
677 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, in blk_stack_limits()
679 t->discard_granularity = max(t->discard_granularity, in blk_stack_limits()
681 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % in blk_stack_limits()
682 t->discard_granularity; in blk_stack_limits()
684 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors, in blk_stack_limits()
686 t->zone_write_granularity = max(t->zone_write_granularity, in blk_stack_limits()
688 t->zoned = max(t->zoned, b->zoned); in blk_stack_limits()
689 if (!t->zoned) { in blk_stack_limits()
690 t->zone_write_granularity = 0; in blk_stack_limits()
691 t->max_zone_append_sectors = 0; in blk_stack_limits()
710 struct request_queue *t = disk->queue; in disk_stack_limits() local
712 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()