Lines Matching refs:dmz

116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,  in dmz_submit_bio()  argument
128 clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set); in dmz_submit_bio()
134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio()
153 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, in dmz_handle_read_zero() argument
169 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, in dmz_handle_read() argument
172 struct dmz_metadata *zmd = dmz->metadata; in dmz_handle_read()
229 ret = dmz_submit_bio(dmz, rzone, bio, in dmz_handle_read()
236 dmz_handle_read_zero(dmz, bio, chunk_block, 1); in dmz_handle_read()
249 static int dmz_handle_direct_write(struct dmz_target *dmz, in dmz_handle_direct_write() argument
254 struct dmz_metadata *zmd = dmz->metadata; in dmz_handle_direct_write()
262 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); in dmz_handle_direct_write()
282 static int dmz_handle_buffered_write(struct dmz_target *dmz, in dmz_handle_buffered_write() argument
287 struct dmz_metadata *zmd = dmz->metadata; in dmz_handle_buffered_write()
300 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks); in dmz_handle_buffered_write()
318 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone, in dmz_handle_write() argument
321 struct dmz_metadata *zmd = dmz->metadata; in dmz_handle_write()
343 return dmz_handle_direct_write(dmz, zone, bio, in dmz_handle_write()
351 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks); in dmz_handle_write()
357 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone, in dmz_handle_discard() argument
360 struct dmz_metadata *zmd = dmz->metadata; in dmz_handle_discard()
374 dmz_metadata_label(dmz->metadata), in dmz_handle_discard()
395 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, in dmz_handle_bio() argument
400 struct dmz_metadata *zmd = dmz->metadata; in dmz_handle_bio()
427 ret = dmz_handle_read(dmz, zone, bio); in dmz_handle_bio()
430 ret = dmz_handle_write(dmz, zone, bio); in dmz_handle_bio()
434 ret = dmz_handle_discard(dmz, zone, bio); in dmz_handle_bio()
438 dmz_metadata_label(dmz->metadata), bio_op(bio)); in dmz_handle_bio()
481 struct dmz_target *dmz = cw->target; in dmz_chunk_work() local
484 mutex_lock(&dmz->chunk_lock); in dmz_chunk_work()
488 mutex_unlock(&dmz->chunk_lock); in dmz_chunk_work()
489 dmz_handle_bio(dmz, cw, bio); in dmz_chunk_work()
490 mutex_lock(&dmz->chunk_lock); in dmz_chunk_work()
497 mutex_unlock(&dmz->chunk_lock); in dmz_chunk_work()
505 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work); in dmz_flush_work() local
510 ret = dmz_flush_metadata(dmz->metadata); in dmz_flush_work()
513 dmz_metadata_label(dmz->metadata), ret); in dmz_flush_work()
517 spin_lock(&dmz->flush_lock); in dmz_flush_work()
518 bio = bio_list_pop(&dmz->flush_list); in dmz_flush_work()
519 spin_unlock(&dmz->flush_lock); in dmz_flush_work()
527 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); in dmz_flush_work()
534 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) in dmz_queue_chunk_work() argument
536 unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio); in dmz_queue_chunk_work()
540 mutex_lock(&dmz->chunk_lock); in dmz_queue_chunk_work()
543 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); in dmz_queue_chunk_work()
556 cw->target = dmz; in dmz_queue_chunk_work()
560 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); in dmz_queue_chunk_work()
569 if (queue_work(dmz->chunk_wq, &cw->work)) in dmz_queue_chunk_work()
572 mutex_unlock(&dmz->chunk_lock); in dmz_queue_chunk_work()
627 struct dmz_target *dmz = ti->private; in dmz_map() local
628 struct dmz_metadata *zmd = dmz->metadata; in dmz_map()
660 spin_lock(&dmz->flush_lock); in dmz_map()
661 bio_list_add(&dmz->flush_list, bio); in dmz_map()
662 spin_unlock(&dmz->flush_lock); in dmz_map()
663 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0); in dmz_map()
673 ret = dmz_queue_chunk_work(dmz, bio); in dmz_map()
691 struct dmz_target *dmz = ti->private; in dmz_get_zoned_device() local
714 if (dmz->ddev[0]) { in dmz_get_zoned_device()
718 dev = &dmz->dev[idx]; in dmz_get_zoned_device()
721 if (dmz->ddev[idx]) { in dmz_get_zoned_device()
729 dev = &dmz->dev[idx]; in dmz_get_zoned_device()
740 dmz->ddev[idx] = ddev; in dmz_get_zoned_device()
753 struct dmz_target *dmz = ti->private; in dmz_put_zoned_devices() local
756 for (i = 0; i < dmz->nr_ddevs; i++) in dmz_put_zoned_devices()
757 if (dmz->ddev[i]) in dmz_put_zoned_devices()
758 dm_put_device(ti, dmz->ddev[i]); in dmz_put_zoned_devices()
760 kfree(dmz->ddev); in dmz_put_zoned_devices()
765 struct dmz_target *dmz = ti->private; in dmz_fixup_devices() local
774 if (dmz->nr_ddevs > 1) { in dmz_fixup_devices()
775 reg_dev = &dmz->dev[0]; in dmz_fixup_devices()
780 for (i = 1; i < dmz->nr_ddevs; i++) { in dmz_fixup_devices()
781 struct dmz_dev *zoned_dev = &dmz->dev[i]; in dmz_fixup_devices()
798 struct dmz_dev *zoned_dev = &dmz->dev[0]; in dmz_fixup_devices()
818 for (i = 1; i < dmz->nr_ddevs; i++) { in dmz_fixup_devices()
819 dmz->dev[i].zone_offset = zone_offset; in dmz_fixup_devices()
820 zone_offset += dmz->dev[i].nr_zones; in dmz_fixup_devices()
831 struct dmz_target *dmz; in dmz_ctr() local
841 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL); in dmz_ctr()
842 if (!dmz) { in dmz_ctr()
846 dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL); in dmz_ctr()
847 if (!dmz->dev) { in dmz_ctr()
849 kfree(dmz); in dmz_ctr()
852 dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL); in dmz_ctr()
853 if (!dmz->ddev) { in dmz_ctr()
858 dmz->nr_ddevs = argc; in dmz_ctr()
860 ti->private = dmz; in dmz_ctr()
873 ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata, in dmz_ctr()
881 ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata); in dmz_ctr()
890 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << in dmz_ctr()
891 dmz_zone_nr_sectors_shift(dmz->metadata); in dmz_ctr()
894 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0); in dmz_ctr()
901 mutex_init(&dmz->chunk_lock); in dmz_ctr()
902 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); in dmz_ctr()
903 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", in dmz_ctr()
905 dmz_metadata_label(dmz->metadata)); in dmz_ctr()
906 if (!dmz->chunk_wq) { in dmz_ctr()
913 spin_lock_init(&dmz->flush_lock); in dmz_ctr()
914 bio_list_init(&dmz->flush_list); in dmz_ctr()
915 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work); in dmz_ctr()
916 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM, in dmz_ctr()
917 dmz_metadata_label(dmz->metadata)); in dmz_ctr()
918 if (!dmz->flush_wq) { in dmz_ctr()
923 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); in dmz_ctr()
926 for (i = 0; i < dmz->nr_ddevs; i++) { in dmz_ctr()
927 ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i); in dmz_ctr()
935 dmz_metadata_label(dmz->metadata), in dmz_ctr()
941 destroy_workqueue(dmz->flush_wq); in dmz_ctr()
943 destroy_workqueue(dmz->chunk_wq); in dmz_ctr()
945 mutex_destroy(&dmz->chunk_lock); in dmz_ctr()
946 bioset_exit(&dmz->bio_set); in dmz_ctr()
948 dmz_dtr_metadata(dmz->metadata); in dmz_ctr()
952 kfree(dmz->dev); in dmz_ctr()
953 kfree(dmz); in dmz_ctr()
963 struct dmz_target *dmz = ti->private; in dmz_dtr() local
966 destroy_workqueue(dmz->chunk_wq); in dmz_dtr()
968 for (i = 0; i < dmz->nr_ddevs; i++) in dmz_dtr()
969 dmz_dtr_reclaim(dmz->dev[i].reclaim); in dmz_dtr()
971 cancel_delayed_work_sync(&dmz->flush_work); in dmz_dtr()
972 destroy_workqueue(dmz->flush_wq); in dmz_dtr()
974 (void) dmz_flush_metadata(dmz->metadata); in dmz_dtr()
976 dmz_dtr_metadata(dmz->metadata); in dmz_dtr()
978 bioset_exit(&dmz->bio_set); in dmz_dtr()
982 mutex_destroy(&dmz->chunk_lock); in dmz_dtr()
984 kfree(dmz->dev); in dmz_dtr()
985 kfree(dmz); in dmz_dtr()
993 struct dmz_target *dmz = ti->private; in dmz_io_hints() local
994 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata); in dmz_io_hints()
1021 struct dmz_target *dmz = ti->private; in dmz_prepare_ioctl() local
1022 struct dmz_dev *dev = &dmz->dev[0]; in dmz_prepare_ioctl()
1037 struct dmz_target *dmz = ti->private; in dmz_suspend() local
1040 flush_workqueue(dmz->chunk_wq); in dmz_suspend()
1041 for (i = 0; i < dmz->nr_ddevs; i++) in dmz_suspend()
1042 dmz_suspend_reclaim(dmz->dev[i].reclaim); in dmz_suspend()
1043 cancel_delayed_work_sync(&dmz->flush_work); in dmz_suspend()
1051 struct dmz_target *dmz = ti->private; in dmz_resume() local
1054 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); in dmz_resume()
1055 for (i = 0; i < dmz->nr_ddevs; i++) in dmz_resume()
1056 dmz_resume_reclaim(dmz->dev[i].reclaim); in dmz_resume()
1062 struct dmz_target *dmz = ti->private; in dmz_iterate_devices() local
1063 unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata); in dmz_iterate_devices()
1067 for (i = 0; i < dmz->nr_ddevs; i++) { in dmz_iterate_devices()
1068 capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1); in dmz_iterate_devices()
1069 r = fn(ti, dmz->ddev[i], 0, capacity, data); in dmz_iterate_devices()
1080 struct dmz_target *dmz = ti->private; in dmz_status() local
1089 dmz_nr_zones(dmz->metadata), in dmz_status()
1090 dmz_nr_unmap_cache_zones(dmz->metadata), in dmz_status()
1091 dmz_nr_cache_zones(dmz->metadata)); in dmz_status()
1092 for (i = 0; i < dmz->nr_ddevs; i++) { in dmz_status()
1098 (dmz_nr_cache_zones(dmz->metadata) > 0)) in dmz_status()
1101 dmz_nr_unmap_rnd_zones(dmz->metadata, i), in dmz_status()
1102 dmz_nr_rnd_zones(dmz->metadata, i), in dmz_status()
1103 dmz_nr_unmap_seq_zones(dmz->metadata, i), in dmz_status()
1104 dmz_nr_seq_zones(dmz->metadata, i)); in dmz_status()
1108 dev = &dmz->dev[0]; in dmz_status()
1111 for (i = 1; i < dmz->nr_ddevs; i++) { in dmz_status()
1112 dev = &dmz->dev[i]; in dmz_status()
1126 struct dmz_target *dmz = ti->private; in dmz_message() local
1132 for (i = 0; i < dmz->nr_ddevs; i++) in dmz_message()
1133 dmz_schedule_reclaim(dmz->dev[i].reclaim); in dmz_message()