Lines Matching refs:arena
27 static struct device *to_dev(struct arena_info *arena) in to_dev() argument
29 return &arena->nd_btt->dev; in to_dev()
37 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, in arena_read_bytes() argument
40 struct nd_btt *nd_btt = arena->nd_btt; in arena_read_bytes()
48 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, in arena_write_bytes() argument
51 struct nd_btt *nd_btt = arena->nd_btt; in arena_write_bytes()
59 static int btt_info_write(struct arena_info *arena, struct btt_sb *super) in btt_info_write() argument
68 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), in btt_info_write()
69 "arena->infooff: %#llx is unaligned\n", arena->infooff); in btt_info_write()
70 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), in btt_info_write()
71 "arena->info2off: %#llx is unaligned\n", arena->info2off); in btt_info_write()
73 ret = arena_write_bytes(arena, arena->info2off, super, in btt_info_write()
78 return arena_write_bytes(arena, arena->infooff, super, in btt_info_write()
82 static int btt_info_read(struct arena_info *arena, struct btt_sb *super) in btt_info_read() argument
84 return arena_read_bytes(arena, arena->infooff, super, in btt_info_read()
94 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, in __btt_map_write() argument
97 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); in __btt_map_write()
99 if (unlikely(lba >= arena->external_nlba)) in __btt_map_write()
100 dev_err_ratelimited(to_dev(arena), in __btt_map_write()
102 __func__, lba, arena->external_nlba); in __btt_map_write()
103 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags); in __btt_map_write()
106 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, in btt_map_write() argument
141 dev_err_ratelimited(to_dev(arena), in btt_map_write()
147 return __btt_map_write(arena, lba, mapping_le, rwb_flags); in btt_map_write()
150 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, in btt_map_read() argument
156 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); in btt_map_read()
158 if (unlikely(lba >= arena->external_nlba)) in btt_map_read()
159 dev_err_ratelimited(to_dev(arena), in btt_map_read()
161 __func__, lba, arena->external_nlba); in btt_map_read()
163 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags); in btt_map_read()
206 static int btt_log_group_read(struct arena_info *arena, u32 lane, in btt_log_group_read() argument
209 return arena_read_bytes(arena, in btt_log_group_read()
210 arena->logoff + (lane * LOG_GRP_SIZE), log, in btt_log_group_read()
258 struct arena_info *arena; in btt_debugfs_init() local
265 list_for_each_entry(arena, &btt->arena_list, list) { in btt_debugfs_init()
266 arena_debugfs_init(arena, btt->debugfs_dir, i); in btt_debugfs_init()
327 static int btt_log_read(struct arena_info *arena, u32 lane, in btt_log_read() argument
334 ret = btt_log_group_read(arena, lane, &log); in btt_log_read()
338 old_ent = btt_log_get_old(arena, &log); in btt_log_read()
340 dev_err(to_dev(arena), in btt_log_read()
342 old_ent, lane, log.ent[arena->log_index[0]].seq, in btt_log_read()
343 log.ent[arena->log_index[1]].seq); in btt_log_read()
351 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); in btt_log_read()
361 static int __btt_log_write(struct arena_info *arena, u32 lane, in __btt_log_write() argument
365 u32 group_slot = arena->log_index[sub]; in __btt_log_write()
370 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + in __btt_log_write()
373 ret = arena_write_bytes(arena, ns_off, src, log_half, flags); in __btt_log_write()
379 return arena_write_bytes(arena, ns_off, src, log_half, flags); in __btt_log_write()
382 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, in btt_flog_write() argument
387 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); in btt_flog_write()
392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; in btt_flog_write()
393 if (++(arena->freelist[lane].seq) == 4) in btt_flog_write()
394 arena->freelist[lane].seq = 1; in btt_flog_write()
396 arena->freelist[lane].has_err = 1; in btt_flog_write()
397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); in btt_flog_write()
406 static int btt_map_init(struct arena_info *arena) in btt_map_init() argument
412 size_t mapsize = arena->logoff - arena->mapoff; in btt_map_init()
423 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512), in btt_map_init()
424 "arena->mapoff: %#llx is unaligned\n", arena->mapoff); in btt_map_init()
429 dev_WARN_ONCE(to_dev(arena), size < 512, in btt_map_init()
431 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, in btt_map_init()
450 static int btt_log_init(struct arena_info *arena) in btt_log_init() argument
452 size_t logsize = arena->info2off - arena->logoff; in btt_log_init()
467 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512), in btt_log_init()
468 "arena->logoff: %#llx is unaligned\n", arena->logoff); in btt_log_init()
473 dev_WARN_ONCE(to_dev(arena), size < 512, in btt_log_init()
475 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, in btt_log_init()
485 for (i = 0; i < arena->nfree; i++) { in btt_log_init()
487 ent.old_map = cpu_to_le32(arena->external_nlba + i); in btt_log_init()
488 ent.new_map = cpu_to_le32(arena->external_nlba + i); in btt_log_init()
490 ret = __btt_log_write(arena, i, 0, &ent, 0); in btt_log_init()
500 static u64 to_namespace_offset(struct arena_info *arena, u64 lba) in to_namespace_offset() argument
502 return arena->dataoff + ((u64)lba * arena->internal_lbasize); in to_namespace_offset()
505 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) in arena_clear_freelist_error() argument
509 if (arena->freelist[lane].has_err) { in arena_clear_freelist_error()
511 u32 lba = arena->freelist[lane].block; in arena_clear_freelist_error()
512 u64 nsoff = to_namespace_offset(arena, lba); in arena_clear_freelist_error()
513 unsigned long len = arena->sector_size; in arena_clear_freelist_error()
515 mutex_lock(&arena->err_lock); in arena_clear_freelist_error()
520 ret = arena_write_bytes(arena, nsoff, zero_page, in arena_clear_freelist_error()
527 arena->freelist[lane].has_err = 0; in arena_clear_freelist_error()
529 mutex_unlock(&arena->err_lock); in arena_clear_freelist_error()
534 static int btt_freelist_init(struct arena_info *arena) in btt_freelist_init() argument
540 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), in btt_freelist_init()
542 if (!arena->freelist) in btt_freelist_init()
545 for (i = 0; i < arena->nfree; i++) { in btt_freelist_init()
546 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); in btt_freelist_init()
555 arena->freelist[i].sub = 1 - new; in btt_freelist_init()
556 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); in btt_freelist_init()
557 arena->freelist[i].block = log_oldmap; in btt_freelist_init()
565 arena->freelist[i].has_err = 1; in btt_freelist_init()
566 ret = arena_clear_freelist_error(arena, i); in btt_freelist_init()
568 dev_err_ratelimited(to_dev(arena), in btt_freelist_init()
577 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry, in btt_freelist_init()
594 ret = btt_map_write(arena, le32_to_cpu(log_new.lba), in btt_freelist_init()
623 static int log_set_indices(struct arena_info *arena) in log_set_indices() argument
631 for (i = 0; i < arena->nfree; i++) { in log_set_indices()
632 ret = btt_log_group_read(arena, i, &log); in log_set_indices()
710 dev_err(to_dev(arena), "Found an unknown padding scheme\n"); in log_set_indices()
714 arena->log_index[0] = log_index[0]; in log_set_indices()
715 arena->log_index[1] = log_index[1]; in log_set_indices()
716 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]); in log_set_indices()
717 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]); in log_set_indices()
721 static int btt_rtt_init(struct arena_info *arena) in btt_rtt_init() argument
723 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); in btt_rtt_init()
724 if (arena->rtt == NULL) in btt_rtt_init()
730 static int btt_maplocks_init(struct arena_info *arena) in btt_maplocks_init() argument
734 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock), in btt_maplocks_init()
736 if (!arena->map_locks) in btt_maplocks_init()
739 for (i = 0; i < arena->nfree; i++) in btt_maplocks_init()
740 spin_lock_init(&arena->map_locks[i].lock); in btt_maplocks_init()
748 struct arena_info *arena; in alloc_arena() local
752 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL); in alloc_arena()
753 if (!arena) in alloc_arena()
755 arena->nd_btt = btt->nd_btt; in alloc_arena()
756 arena->sector_size = btt->sector_size; in alloc_arena()
757 mutex_init(&arena->err_lock); in alloc_arena()
760 return arena; in alloc_arena()
762 arena->size = size; in alloc_arena()
763 arena->external_lba_start = start; in alloc_arena()
764 arena->external_lbasize = btt->lbasize; in alloc_arena()
765 arena->internal_lbasize = roundup(arena->external_lbasize, in alloc_arena()
767 arena->nfree = BTT_DEFAULT_NFREE; in alloc_arena()
768 arena->version_major = btt->nd_btt->version_major; in alloc_arena()
769 arena->version_minor = btt->nd_btt->version_minor; in alloc_arena()
778 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); in alloc_arena()
782 arena->internal_nlba = div_u64(available - BTT_PG_SIZE, in alloc_arena()
783 arena->internal_lbasize + MAP_ENT_SIZE); in alloc_arena()
784 arena->external_nlba = arena->internal_nlba - arena->nfree; in alloc_arena()
786 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE); in alloc_arena()
790 arena->infooff = arena_off; in alloc_arena()
791 arena->dataoff = arena->infooff + BTT_PG_SIZE; in alloc_arena()
792 arena->mapoff = arena->dataoff + datasize; in alloc_arena()
793 arena->logoff = arena->mapoff + mapsize; in alloc_arena()
794 arena->info2off = arena->logoff + logsize; in alloc_arena()
797 arena->log_index[0] = 0; in alloc_arena()
798 arena->log_index[1] = 1; in alloc_arena()
799 return arena; in alloc_arena()
804 struct arena_info *arena, *next; in free_arenas() local
806 list_for_each_entry_safe(arena, next, &btt->arena_list, list) { in free_arenas()
807 list_del(&arena->list); in free_arenas()
808 kfree(arena->rtt); in free_arenas()
809 kfree(arena->map_locks); in free_arenas()
810 kfree(arena->freelist); in free_arenas()
811 debugfs_remove_recursive(arena->debugfs_dir); in free_arenas()
812 kfree(arena); in free_arenas()
820 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super, in parse_arena_meta() argument
823 arena->internal_nlba = le32_to_cpu(super->internal_nlba); in parse_arena_meta()
824 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize); in parse_arena_meta()
825 arena->external_nlba = le32_to_cpu(super->external_nlba); in parse_arena_meta()
826 arena->external_lbasize = le32_to_cpu(super->external_lbasize); in parse_arena_meta()
827 arena->nfree = le32_to_cpu(super->nfree); in parse_arena_meta()
828 arena->version_major = le16_to_cpu(super->version_major); in parse_arena_meta()
829 arena->version_minor = le16_to_cpu(super->version_minor); in parse_arena_meta()
831 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off + in parse_arena_meta()
833 arena->infooff = arena_off; in parse_arena_meta()
834 arena->dataoff = arena_off + le64_to_cpu(super->dataoff); in parse_arena_meta()
835 arena->mapoff = arena_off + le64_to_cpu(super->mapoff); in parse_arena_meta()
836 arena->logoff = arena_off + le64_to_cpu(super->logoff); in parse_arena_meta()
837 arena->info2off = arena_off + le64_to_cpu(super->info2off); in parse_arena_meta()
839 arena->size = (le64_to_cpu(super->nextoff) > 0) in parse_arena_meta()
841 : (arena->info2off - arena->infooff + BTT_PG_SIZE); in parse_arena_meta()
843 arena->flags = le32_to_cpu(super->flags); in parse_arena_meta()
849 struct arena_info *arena; in discover_arenas() local
862 arena = alloc_arena(btt, 0, 0, 0); in discover_arenas()
863 if (!arena) { in discover_arenas()
868 arena->infooff = cur_off; in discover_arenas()
869 ret = btt_info_read(arena, super); in discover_arenas()
876 dev_info(to_dev(arena), "No existing arenas\n"); in discover_arenas()
879 dev_err(to_dev(arena), in discover_arenas()
886 arena->external_lba_start = cur_nlba; in discover_arenas()
887 parse_arena_meta(arena, super, cur_off); in discover_arenas()
889 ret = log_set_indices(arena); in discover_arenas()
891 dev_err(to_dev(arena), in discover_arenas()
896 ret = btt_freelist_init(arena); in discover_arenas()
900 ret = btt_rtt_init(arena); in discover_arenas()
904 ret = btt_maplocks_init(arena); in discover_arenas()
908 list_add_tail(&arena->list, &btt->arena_list); in discover_arenas()
910 remaining -= arena->size; in discover_arenas()
911 cur_off += arena->size; in discover_arenas()
912 cur_nlba += arena->external_nlba; in discover_arenas()
915 if (arena->nextoff == 0) in discover_arenas()
926 kfree(arena); in discover_arenas()
939 struct arena_info *arena; in create_arenas() local
946 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off); in create_arenas()
947 if (!arena) { in create_arenas()
951 btt->nlba += arena->external_nlba; in create_arenas()
953 arena->nextoff = arena->size; in create_arenas()
955 arena->nextoff = 0; in create_arenas()
957 list_add_tail(&arena->list, &btt->arena_list); in create_arenas()
969 static int btt_arena_write_layout(struct arena_info *arena) in btt_arena_write_layout() argument
974 struct nd_btt *nd_btt = arena->nd_btt; in btt_arena_write_layout()
977 ret = btt_map_init(arena); in btt_arena_write_layout()
981 ret = btt_log_init(arena); in btt_arena_write_layout()
992 super->flags = cpu_to_le32(arena->flags); in btt_arena_write_layout()
993 super->version_major = cpu_to_le16(arena->version_major); in btt_arena_write_layout()
994 super->version_minor = cpu_to_le16(arena->version_minor); in btt_arena_write_layout()
995 super->external_lbasize = cpu_to_le32(arena->external_lbasize); in btt_arena_write_layout()
996 super->external_nlba = cpu_to_le32(arena->external_nlba); in btt_arena_write_layout()
997 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize); in btt_arena_write_layout()
998 super->internal_nlba = cpu_to_le32(arena->internal_nlba); in btt_arena_write_layout()
999 super->nfree = cpu_to_le32(arena->nfree); in btt_arena_write_layout()
1001 super->nextoff = cpu_to_le64(arena->nextoff); in btt_arena_write_layout()
1006 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff); in btt_arena_write_layout()
1007 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff); in btt_arena_write_layout()
1008 super->logoff = cpu_to_le64(arena->logoff - arena->infooff); in btt_arena_write_layout()
1009 super->info2off = cpu_to_le64(arena->info2off - arena->infooff); in btt_arena_write_layout()
1015 ret = btt_info_write(arena, super); in btt_arena_write_layout()
1028 struct arena_info *arena; in btt_meta_init() local
1031 list_for_each_entry(arena, &btt->arena_list, list) { in btt_meta_init()
1032 ret = btt_arena_write_layout(arena); in btt_meta_init()
1036 ret = btt_freelist_init(arena); in btt_meta_init()
1040 ret = btt_rtt_init(arena); in btt_meta_init()
1044 ret = btt_maplocks_init(arena); in btt_meta_init()
1069 struct arena_info **arena) in lba_to_arena() argument
1076 *arena = arena_list; in lba_to_arena()
1090 static void lock_map(struct arena_info *arena, u32 premap) in lock_map() argument
1091 __acquires(&arena->map_locks[idx].lock) in lock_map()
1093 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; in lock_map()
1095 spin_lock(&arena->map_locks[idx].lock); in lock_map()
1098 static void unlock_map(struct arena_info *arena, u32 premap) in unlock_map() argument
1099 __releases(&arena->map_locks[idx].lock) in unlock_map()
1101 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; in unlock_map()
1103 spin_unlock(&arena->map_locks[idx].lock); in unlock_map()
1106 static int btt_data_read(struct arena_info *arena, struct page *page, in btt_data_read() argument
1110 u64 nsoff = to_namespace_offset(arena, lba); in btt_data_read()
1113 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); in btt_data_read()
1119 static int btt_data_write(struct arena_info *arena, u32 lba, in btt_data_write() argument
1123 u64 nsoff = to_namespace_offset(arena, lba); in btt_data_write()
1126 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); in btt_data_write()
1142 struct arena_info *arena, u32 postmap, int rw) in btt_rw_integrity() argument
1151 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size; in btt_rw_integrity()
1168 ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len, in btt_rw_integrity()
1171 ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len, in btt_rw_integrity()
1189 struct arena_info *arena, u32 postmap, int rw) in btt_rw_integrity() argument
1201 struct arena_info *arena = NULL; in btt_read_pg() local
1209 ret = lba_to_arena(btt, sector, &premap, &arena); in btt_read_pg()
1215 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag, in btt_read_pg()
1239 arena->rtt[lane] = RTT_VALID | postmap; in btt_read_pg()
1246 ret = btt_map_read(arena, premap, &new_map, &new_t, in btt_read_pg()
1260 ret = btt_data_read(arena, page, off, postmap, cur_len); in btt_read_pg()
1263 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC)) in btt_read_pg()
1264 dev_warn_ratelimited(to_dev(arena), in btt_read_pg()
1271 ret = btt_rw_integrity(btt, bip, arena, postmap, READ); in btt_read_pg()
1276 arena->rtt[lane] = RTT_INVALID; in btt_read_pg()
1287 arena->rtt[lane] = RTT_INVALID; in btt_read_pg()
1298 static bool btt_is_badblock(struct btt *btt, struct arena_info *arena, in btt_is_badblock() argument
1301 u64 nsoff = adjust_initial_offset(arena->nd_btt, in btt_is_badblock()
1302 to_namespace_offset(arena, postmap)); in btt_is_badblock()
1305 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize); in btt_is_badblock()
1313 struct arena_info *arena = NULL; in btt_write_pg() local
1325 ret = lba_to_arena(btt, sector, &premap, &arena); in btt_write_pg()
1330 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) { in btt_write_pg()
1335 if (btt_is_badblock(btt, arena, arena->freelist[lane].block)) in btt_write_pg()
1336 arena->freelist[lane].has_err = 1; in btt_write_pg()
1338 if (mutex_is_locked(&arena->err_lock) in btt_write_pg()
1339 || arena->freelist[lane].has_err) { in btt_write_pg()
1342 ret = arena_clear_freelist_error(arena, lane); in btt_write_pg()
1350 new_postmap = arena->freelist[lane].block; in btt_write_pg()
1353 for (i = 0; i < arena->nfree; i++) in btt_write_pg()
1354 while (arena->rtt[i] == (RTT_VALID | new_postmap)) in btt_write_pg()
1358 if (new_postmap >= arena->internal_nlba) { in btt_write_pg()
1363 ret = btt_data_write(arena, new_postmap, page, off, cur_len); in btt_write_pg()
1368 ret = btt_rw_integrity(btt, bip, arena, new_postmap, in btt_write_pg()
1374 lock_map(arena, premap); in btt_write_pg()
1375 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag, in btt_write_pg()
1379 if (old_postmap >= arena->internal_nlba) { in btt_write_pg()
1389 log.seq = cpu_to_le32(arena->freelist[lane].seq); in btt_write_pg()
1390 sub = arena->freelist[lane].sub; in btt_write_pg()
1391 ret = btt_flog_write(arena, lane, sub, &log); in btt_write_pg()
1395 ret = btt_map_write(arena, premap, new_postmap, 0, 0, in btt_write_pg()
1400 unlock_map(arena, premap); in btt_write_pg()
1404 ret = arena_clear_freelist_error(arena, lane); in btt_write_pg()
1417 unlock_map(arena, premap); in btt_write_pg()