Lines Matching refs:chunk_sectors
922 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
3008 : conf->chunk_sectors; in raid5_compute_sector()
3204 : conf->chunk_sectors; in raid5_compute_blocknr()
3535 if (first + conf->chunk_sectors * (count - 1) != last) in stripe_bio_overlaps()
3622 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
5381 unsigned int chunk_sectors; in in_chunk_boundary() local
5384 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5385 return chunk_sectors >= in in_chunk_boundary()
5386 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
5546 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
5759 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5765 logical_sector *= conf->chunk_sectors; in make_discard_request()
5766 last_sector *= conf->chunk_sectors; in make_discard_request()
5971 sectors_per_chunk = conf->chunk_sectors * in raid5_bitmap_sector()
6123 int sectors_per_chunk = conf->chunk_sectors; in raid5_bio_lowest_chunk_sector()
6331 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
7367 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
7391 max(conf->chunk_sectors, in alloc_scratch_buffer()
7464 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
7652 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7699 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7702 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
7709 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7789 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * in raid5_set_io_opt()
7855 int chunk_sectors; in raid5_run() local
7878 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run()
7880 if (sector_div(here_new, chunk_sectors * new_data_disks)) { in raid5_run()
7885 reshape_offset = here_new * chunk_sectors; in raid5_run()
7888 sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); in raid5_run()
7899 if (abs(min_offset_diff) >= mddev->chunk_sectors && in raid5_run()
7908 ? (here_new * chunk_sectors + min_offset_diff <= in raid5_run()
7909 here_old * chunk_sectors) in raid5_run()
7910 : (here_new * chunk_sectors >= in raid5_run()
7911 here_old * chunk_sectors + (-min_offset_diff))) { in raid5_run()
7922 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in raid5_run()
8019 mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); in raid5_run()
8074 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in raid5_run()
8076 chunk_size = mddev->chunk_sectors << 9; in raid5_run()
8155 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
8425 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
8457 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8463 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
8478 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
8498 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
8504 mddev->chunk_sectors) in check_reshape()
8566 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
8567 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8642 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
8717 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8776 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
8881 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
8882 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()