xref: /openbmc/linux/drivers/nvdimm/btt.c (revision 6189f1b0)
1 /*
2  * Block Translation Table
3  * Copyright (c) 2014-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
24 #include <linux/fs.h>
25 #include <linux/nd.h>
26 #include "btt.h"
27 #include "nd.h"
28 
29 enum log_ent_request {
30 	LOG_NEW_ENT = 0,
31 	LOG_OLD_ENT
32 };
33 
34 static int btt_major;
35 
36 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
37 		void *buf, size_t n)
38 {
39 	struct nd_btt *nd_btt = arena->nd_btt;
40 	struct nd_namespace_common *ndns = nd_btt->ndns;
41 
42 	/* arena offsets are 4K from the base of the device */
43 	offset += SZ_4K;
44 	return nvdimm_read_bytes(ndns, offset, buf, n);
45 }
46 
47 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
48 		void *buf, size_t n)
49 {
50 	struct nd_btt *nd_btt = arena->nd_btt;
51 	struct nd_namespace_common *ndns = nd_btt->ndns;
52 
53 	/* arena offsets are 4K from the base of the device */
54 	offset += SZ_4K;
55 	return nvdimm_write_bytes(ndns, offset, buf, n);
56 }
57 
58 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
59 {
60 	int ret;
61 
62 	ret = arena_write_bytes(arena, arena->info2off, super,
63 			sizeof(struct btt_sb));
64 	if (ret)
65 		return ret;
66 
67 	return arena_write_bytes(arena, arena->infooff, super,
68 			sizeof(struct btt_sb));
69 }
70 
71 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
72 {
73 	WARN_ON(!super);
74 	return arena_read_bytes(arena, arena->infooff, super,
75 			sizeof(struct btt_sb));
76 }
77 
78 /*
79  * 'raw' version of btt_map write
80  * Assumptions:
81  *   mapping is in little-endian
82  *   mapping contains 'E' and 'Z' flags as desired
83  */
84 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping)
85 {
86 	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
87 
88 	WARN_ON(lba >= arena->external_nlba);
89 	return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE);
90 }
91 
92 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
93 			u32 z_flag, u32 e_flag)
94 {
95 	u32 ze;
96 	__le32 mapping_le;
97 
98 	/*
99 	 * This 'mapping' is supposed to be just the LBA mapping, without
100 	 * any flags set, so strip the flag bits.
101 	 */
102 	mapping &= MAP_LBA_MASK;
103 
104 	ze = (z_flag << 1) + e_flag;
105 	switch (ze) {
106 	case 0:
107 		/*
108 		 * We want to set neither of the Z or E flags, and
109 		 * in the actual layout, this means setting the bit
110 		 * positions of both to '1' to indicate a 'normal'
111 		 * map entry
112 		 */
113 		mapping |= MAP_ENT_NORMAL;
114 		break;
115 	case 1:
116 		mapping |= (1 << MAP_ERR_SHIFT);
117 		break;
118 	case 2:
119 		mapping |= (1 << MAP_TRIM_SHIFT);
120 		break;
121 	default:
122 		/*
123 		 * The case where Z and E are both sent in as '1' could be
124 		 * construed as a valid 'normal' case, but we decide not to,
125 		 * to avoid confusion
126 		 */
127 		WARN_ONCE(1, "Invalid use of Z and E flags\n");
128 		return -EIO;
129 	}
130 
131 	mapping_le = cpu_to_le32(mapping);
132 	return __btt_map_write(arena, lba, mapping_le);
133 }
134 
135 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
136 			int *trim, int *error)
137 {
138 	int ret;
139 	__le32 in;
140 	u32 raw_mapping, postmap, ze, z_flag, e_flag;
141 	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
142 
143 	WARN_ON(lba >= arena->external_nlba);
144 
145 	ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE);
146 	if (ret)
147 		return ret;
148 
149 	raw_mapping = le32_to_cpu(in);
150 
151 	z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT;
152 	e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT;
153 	ze = (z_flag << 1) + e_flag;
154 	postmap = raw_mapping & MAP_LBA_MASK;
155 
156 	/* Reuse the {z,e}_flag variables for *trim and *error */
157 	z_flag = 0;
158 	e_flag = 0;
159 
160 	switch (ze) {
161 	case 0:
162 		/* Initial state. Return postmap = premap */
163 		*mapping = lba;
164 		break;
165 	case 1:
166 		*mapping = postmap;
167 		e_flag = 1;
168 		break;
169 	case 2:
170 		*mapping = postmap;
171 		z_flag = 1;
172 		break;
173 	case 3:
174 		*mapping = postmap;
175 		break;
176 	default:
177 		return -EIO;
178 	}
179 
180 	if (trim)
181 		*trim = z_flag;
182 	if (error)
183 		*error = e_flag;
184 
185 	return ret;
186 }
187 
188 static int btt_log_read_pair(struct arena_info *arena, u32 lane,
189 			struct log_entry *ent)
190 {
191 	WARN_ON(!ent);
192 	return arena_read_bytes(arena,
193 			arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
194 			2 * LOG_ENT_SIZE);
195 }
196 
197 static struct dentry *debugfs_root;
198 
199 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
200 				int idx)
201 {
202 	char dirname[32];
203 	struct dentry *d;
204 
205 	/* If for some reason, parent bttN was not created, exit */
206 	if (!parent)
207 		return;
208 
209 	snprintf(dirname, 32, "arena%d", idx);
210 	d = debugfs_create_dir(dirname, parent);
211 	if (IS_ERR_OR_NULL(d))
212 		return;
213 	a->debugfs_dir = d;
214 
215 	debugfs_create_x64("size", S_IRUGO, d, &a->size);
216 	debugfs_create_x64("external_lba_start", S_IRUGO, d,
217 				&a->external_lba_start);
218 	debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
219 	debugfs_create_u32("internal_lbasize", S_IRUGO, d,
220 				&a->internal_lbasize);
221 	debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
222 	debugfs_create_u32("external_lbasize", S_IRUGO, d,
223 				&a->external_lbasize);
224 	debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
225 	debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
226 	debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
227 	debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
228 	debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
229 	debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
230 	debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
231 	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
232 	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
233 	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
234 }
235 
236 static void btt_debugfs_init(struct btt *btt)
237 {
238 	int i = 0;
239 	struct arena_info *arena;
240 
241 	btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
242 						debugfs_root);
243 	if (IS_ERR_OR_NULL(btt->debugfs_dir))
244 		return;
245 
246 	list_for_each_entry(arena, &btt->arena_list, list) {
247 		arena_debugfs_init(arena, btt->debugfs_dir, i);
248 		i++;
249 	}
250 }
251 
252 /*
253  * This function accepts two log entries, and uses the
254  * sequence number to find the 'older' entry.
255  * It also updates the sequence number in this old entry to
256  * make it the 'new' one if the mark_flag is set.
257  * Finally, it returns which of the entries was the older one.
258  *
259  * TODO The logic feels a bit kludge-y. make it better..
260  */
261 static int btt_log_get_old(struct log_entry *ent)
262 {
263 	int old;
264 
265 	/*
266 	 * the first ever time this is seen, the entry goes into [0]
267 	 * the next time, the following logic works out to put this
268 	 * (next) entry into [1]
269 	 */
270 	if (ent[0].seq == 0) {
271 		ent[0].seq = cpu_to_le32(1);
272 		return 0;
273 	}
274 
275 	if (ent[0].seq == ent[1].seq)
276 		return -EINVAL;
277 	if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
278 		return -EINVAL;
279 
280 	if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
281 		if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
282 			old = 0;
283 		else
284 			old = 1;
285 	} else {
286 		if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
287 			old = 1;
288 		else
289 			old = 0;
290 	}
291 
292 	return old;
293 }
294 
295 static struct device *to_dev(struct arena_info *arena)
296 {
297 	return &arena->nd_btt->dev;
298 }
299 
300 /*
301  * This function copies the desired (old/new) log entry into ent if
302  * it is not NULL. It returns the sub-slot number (0 or 1)
303  * where the desired log entry was found. Negative return values
304  * indicate errors.
305  */
306 static int btt_log_read(struct arena_info *arena, u32 lane,
307 			struct log_entry *ent, int old_flag)
308 {
309 	int ret;
310 	int old_ent, ret_ent;
311 	struct log_entry log[2];
312 
313 	ret = btt_log_read_pair(arena, lane, log);
314 	if (ret)
315 		return -EIO;
316 
317 	old_ent = btt_log_get_old(log);
318 	if (old_ent < 0 || old_ent > 1) {
319 		dev_info(to_dev(arena),
320 				"log corruption (%d): lane %d seq [%d, %d]\n",
321 			old_ent, lane, log[0].seq, log[1].seq);
322 		/* TODO set error state? */
323 		return -EIO;
324 	}
325 
326 	ret_ent = (old_flag ? old_ent : (1 - old_ent));
327 
328 	if (ent != NULL)
329 		memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
330 
331 	return ret_ent;
332 }
333 
334 /*
335  * This function commits a log entry to media
336  * It does _not_ prepare the freelist entry for the next write
337  * btt_flog_write is the wrapper for updating the freelist elements
338  */
339 static int __btt_log_write(struct arena_info *arena, u32 lane,
340 			u32 sub, struct log_entry *ent)
341 {
342 	int ret;
343 	/*
344 	 * Ignore the padding in log_entry for calculating log_half.
345 	 * The entry is 'committed' when we write the sequence number,
346 	 * and we want to ensure that that is the last thing written.
347 	 * We don't bother writing the padding as that would be extra
348 	 * media wear and write amplification
349 	 */
350 	unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
351 	u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
352 	void *src = ent;
353 
354 	/* split the 16B write into atomic, durable halves */
355 	ret = arena_write_bytes(arena, ns_off, src, log_half);
356 	if (ret)
357 		return ret;
358 
359 	ns_off += log_half;
360 	src += log_half;
361 	return arena_write_bytes(arena, ns_off, src, log_half);
362 }
363 
364 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
365 			struct log_entry *ent)
366 {
367 	int ret;
368 
369 	ret = __btt_log_write(arena, lane, sub, ent);
370 	if (ret)
371 		return ret;
372 
373 	/* prepare the next free entry */
374 	arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
375 	if (++(arena->freelist[lane].seq) == 4)
376 		arena->freelist[lane].seq = 1;
377 	arena->freelist[lane].block = le32_to_cpu(ent->old_map);
378 
379 	return ret;
380 }
381 
382 /*
383  * This function initializes the BTT map to the initial state, which is
384  * all-zeroes, and indicates an identity mapping
385  */
386 static int btt_map_init(struct arena_info *arena)
387 {
388 	int ret = -EINVAL;
389 	void *zerobuf;
390 	size_t offset = 0;
391 	size_t chunk_size = SZ_2M;
392 	size_t mapsize = arena->logoff - arena->mapoff;
393 
394 	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
395 	if (!zerobuf)
396 		return -ENOMEM;
397 
398 	while (mapsize) {
399 		size_t size = min(mapsize, chunk_size);
400 
401 		ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
402 				size);
403 		if (ret)
404 			goto free;
405 
406 		offset += size;
407 		mapsize -= size;
408 		cond_resched();
409 	}
410 
411  free:
412 	kfree(zerobuf);
413 	return ret;
414 }
415 
416 /*
417  * This function initializes the BTT log with 'fake' entries pointing
418  * to the initial reserved set of blocks as being free
419  */
420 static int btt_log_init(struct arena_info *arena)
421 {
422 	int ret;
423 	u32 i;
424 	struct log_entry log, zerolog;
425 
426 	memset(&zerolog, 0, sizeof(zerolog));
427 
428 	for (i = 0; i < arena->nfree; i++) {
429 		log.lba = cpu_to_le32(i);
430 		log.old_map = cpu_to_le32(arena->external_nlba + i);
431 		log.new_map = cpu_to_le32(arena->external_nlba + i);
432 		log.seq = cpu_to_le32(LOG_SEQ_INIT);
433 		ret = __btt_log_write(arena, i, 0, &log);
434 		if (ret)
435 			return ret;
436 		ret = __btt_log_write(arena, i, 1, &zerolog);
437 		if (ret)
438 			return ret;
439 	}
440 
441 	return 0;
442 }
443 
444 static int btt_freelist_init(struct arena_info *arena)
445 {
446 	int old, new, ret;
447 	u32 i, map_entry;
448 	struct log_entry log_new, log_old;
449 
450 	arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
451 					GFP_KERNEL);
452 	if (!arena->freelist)
453 		return -ENOMEM;
454 
455 	for (i = 0; i < arena->nfree; i++) {
456 		old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
457 		if (old < 0)
458 			return old;
459 
460 		new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
461 		if (new < 0)
462 			return new;
463 
464 		/* sub points to the next one to be overwritten */
465 		arena->freelist[i].sub = 1 - new;
466 		arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
467 		arena->freelist[i].block = le32_to_cpu(log_new.old_map);
468 
469 		/* This implies a newly created or untouched flog entry */
470 		if (log_new.old_map == log_new.new_map)
471 			continue;
472 
473 		/* Check if map recovery is needed */
474 		ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
475 				NULL, NULL);
476 		if (ret)
477 			return ret;
478 		if ((le32_to_cpu(log_new.new_map) != map_entry) &&
479 				(le32_to_cpu(log_new.old_map) == map_entry)) {
480 			/*
481 			 * Last transaction wrote the flog, but wasn't able
482 			 * to complete the map write. So fix up the map.
483 			 */
484 			ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
485 					le32_to_cpu(log_new.new_map), 0, 0);
486 			if (ret)
487 				return ret;
488 		}
489 
490 	}
491 
492 	return 0;
493 }
494 
495 static int btt_rtt_init(struct arena_info *arena)
496 {
497 	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
498 	if (arena->rtt == NULL)
499 		return -ENOMEM;
500 
501 	return 0;
502 }
503 
504 static int btt_maplocks_init(struct arena_info *arena)
505 {
506 	u32 i;
507 
508 	arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
509 				GFP_KERNEL);
510 	if (!arena->map_locks)
511 		return -ENOMEM;
512 
513 	for (i = 0; i < arena->nfree; i++)
514 		spin_lock_init(&arena->map_locks[i].lock);
515 
516 	return 0;
517 }
518 
519 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
520 				size_t start, size_t arena_off)
521 {
522 	struct arena_info *arena;
523 	u64 logsize, mapsize, datasize;
524 	u64 available = size;
525 
526 	arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
527 	if (!arena)
528 		return NULL;
529 	arena->nd_btt = btt->nd_btt;
530 
531 	if (!size)
532 		return arena;
533 
534 	arena->size = size;
535 	arena->external_lba_start = start;
536 	arena->external_lbasize = btt->lbasize;
537 	arena->internal_lbasize = roundup(arena->external_lbasize,
538 					INT_LBASIZE_ALIGNMENT);
539 	arena->nfree = BTT_DEFAULT_NFREE;
540 	arena->version_major = 1;
541 	arena->version_minor = 1;
542 
543 	if (available % BTT_PG_SIZE)
544 		available -= (available % BTT_PG_SIZE);
545 
546 	/* Two pages are reserved for the super block and its copy */
547 	available -= 2 * BTT_PG_SIZE;
548 
549 	/* The log takes a fixed amount of space based on nfree */
550 	logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
551 				BTT_PG_SIZE);
552 	available -= logsize;
553 
554 	/* Calculate optimal split between map and data area */
555 	arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
556 			arena->internal_lbasize + MAP_ENT_SIZE);
557 	arena->external_nlba = arena->internal_nlba - arena->nfree;
558 
559 	mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
560 	datasize = available - mapsize;
561 
562 	/* 'Absolute' values, relative to start of storage space */
563 	arena->infooff = arena_off;
564 	arena->dataoff = arena->infooff + BTT_PG_SIZE;
565 	arena->mapoff = arena->dataoff + datasize;
566 	arena->logoff = arena->mapoff + mapsize;
567 	arena->info2off = arena->logoff + logsize;
568 	return arena;
569 }
570 
571 static void free_arenas(struct btt *btt)
572 {
573 	struct arena_info *arena, *next;
574 
575 	list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
576 		list_del(&arena->list);
577 		kfree(arena->rtt);
578 		kfree(arena->map_locks);
579 		kfree(arena->freelist);
580 		debugfs_remove_recursive(arena->debugfs_dir);
581 		kfree(arena);
582 	}
583 }
584 
585 /*
586  * This function checks if the metadata layout is valid and error free
587  */
588 static int arena_is_valid(struct arena_info *arena, struct btt_sb *super,
589 				u8 *uuid, u32 lbasize)
590 {
591 	u64 checksum;
592 
593 	if (memcmp(super->uuid, uuid, 16))
594 		return 0;
595 
596 	checksum = le64_to_cpu(super->checksum);
597 	super->checksum = 0;
598 	if (checksum != nd_btt_sb_checksum(super))
599 		return 0;
600 	super->checksum = cpu_to_le64(checksum);
601 
602 	if (lbasize != le32_to_cpu(super->external_lbasize))
603 		return 0;
604 
605 	/* TODO: figure out action for this */
606 	if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0)
607 		dev_info(to_dev(arena), "Found arena with an error flag\n");
608 
609 	return 1;
610 }
611 
612 /*
613  * This function reads an existing valid btt superblock and
614  * populates the corresponding arena_info struct
615  */
616 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
617 				u64 arena_off)
618 {
619 	arena->internal_nlba = le32_to_cpu(super->internal_nlba);
620 	arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
621 	arena->external_nlba = le32_to_cpu(super->external_nlba);
622 	arena->external_lbasize = le32_to_cpu(super->external_lbasize);
623 	arena->nfree = le32_to_cpu(super->nfree);
624 	arena->version_major = le16_to_cpu(super->version_major);
625 	arena->version_minor = le16_to_cpu(super->version_minor);
626 
627 	arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
628 			le64_to_cpu(super->nextoff));
629 	arena->infooff = arena_off;
630 	arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
631 	arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
632 	arena->logoff = arena_off + le64_to_cpu(super->logoff);
633 	arena->info2off = arena_off + le64_to_cpu(super->info2off);
634 
635 	arena->size = (super->nextoff > 0) ? (le64_to_cpu(super->nextoff)) :
636 			(arena->info2off - arena->infooff + BTT_PG_SIZE);
637 
638 	arena->flags = le32_to_cpu(super->flags);
639 }
640 
641 static int discover_arenas(struct btt *btt)
642 {
643 	int ret = 0;
644 	struct arena_info *arena;
645 	struct btt_sb *super;
646 	size_t remaining = btt->rawsize;
647 	u64 cur_nlba = 0;
648 	size_t cur_off = 0;
649 	int num_arenas = 0;
650 
651 	super = kzalloc(sizeof(*super), GFP_KERNEL);
652 	if (!super)
653 		return -ENOMEM;
654 
655 	while (remaining) {
656 		/* Alloc memory for arena */
657 		arena = alloc_arena(btt, 0, 0, 0);
658 		if (!arena) {
659 			ret = -ENOMEM;
660 			goto out_super;
661 		}
662 
663 		arena->infooff = cur_off;
664 		ret = btt_info_read(arena, super);
665 		if (ret)
666 			goto out;
667 
668 		if (!arena_is_valid(arena, super, btt->nd_btt->uuid,
669 				btt->lbasize)) {
670 			if (remaining == btt->rawsize) {
671 				btt->init_state = INIT_NOTFOUND;
672 				dev_info(to_dev(arena), "No existing arenas\n");
673 				goto out;
674 			} else {
675 				dev_info(to_dev(arena),
676 						"Found corrupted metadata!\n");
677 				ret = -ENODEV;
678 				goto out;
679 			}
680 		}
681 
682 		arena->external_lba_start = cur_nlba;
683 		parse_arena_meta(arena, super, cur_off);
684 
685 		ret = btt_freelist_init(arena);
686 		if (ret)
687 			goto out;
688 
689 		ret = btt_rtt_init(arena);
690 		if (ret)
691 			goto out;
692 
693 		ret = btt_maplocks_init(arena);
694 		if (ret)
695 			goto out;
696 
697 		list_add_tail(&arena->list, &btt->arena_list);
698 
699 		remaining -= arena->size;
700 		cur_off += arena->size;
701 		cur_nlba += arena->external_nlba;
702 		num_arenas++;
703 
704 		if (arena->nextoff == 0)
705 			break;
706 	}
707 	btt->num_arenas = num_arenas;
708 	btt->nlba = cur_nlba;
709 	btt->init_state = INIT_READY;
710 
711 	kfree(super);
712 	return ret;
713 
714  out:
715 	kfree(arena);
716 	free_arenas(btt);
717  out_super:
718 	kfree(super);
719 	return ret;
720 }
721 
722 static int create_arenas(struct btt *btt)
723 {
724 	size_t remaining = btt->rawsize;
725 	size_t cur_off = 0;
726 
727 	while (remaining) {
728 		struct arena_info *arena;
729 		size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
730 
731 		remaining -= arena_size;
732 		if (arena_size < ARENA_MIN_SIZE)
733 			break;
734 
735 		arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
736 		if (!arena) {
737 			free_arenas(btt);
738 			return -ENOMEM;
739 		}
740 		btt->nlba += arena->external_nlba;
741 		if (remaining >= ARENA_MIN_SIZE)
742 			arena->nextoff = arena->size;
743 		else
744 			arena->nextoff = 0;
745 		cur_off += arena_size;
746 		list_add_tail(&arena->list, &btt->arena_list);
747 	}
748 
749 	return 0;
750 }
751 
752 /*
753  * This function completes arena initialization by writing
754  * all the metadata.
755  * It is only called for an uninitialized arena when a write
756  * to that arena occurs for the first time.
757  */
758 static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid)
759 {
760 	int ret;
761 	struct btt_sb *super;
762 
763 	ret = btt_map_init(arena);
764 	if (ret)
765 		return ret;
766 
767 	ret = btt_log_init(arena);
768 	if (ret)
769 		return ret;
770 
771 	super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
772 	if (!super)
773 		return -ENOMEM;
774 
775 	strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
776 	memcpy(super->uuid, uuid, 16);
777 	super->flags = cpu_to_le32(arena->flags);
778 	super->version_major = cpu_to_le16(arena->version_major);
779 	super->version_minor = cpu_to_le16(arena->version_minor);
780 	super->external_lbasize = cpu_to_le32(arena->external_lbasize);
781 	super->external_nlba = cpu_to_le32(arena->external_nlba);
782 	super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
783 	super->internal_nlba = cpu_to_le32(arena->internal_nlba);
784 	super->nfree = cpu_to_le32(arena->nfree);
785 	super->infosize = cpu_to_le32(sizeof(struct btt_sb));
786 	super->nextoff = cpu_to_le64(arena->nextoff);
787 	/*
788 	 * Subtract arena->infooff (arena start) so numbers are relative
789 	 * to 'this' arena
790 	 */
791 	super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
792 	super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
793 	super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
794 	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
795 
796 	super->flags = 0;
797 	super->checksum = cpu_to_le64(nd_btt_sb_checksum(super));
798 
799 	ret = btt_info_write(arena, super);
800 
801 	kfree(super);
802 	return ret;
803 }
804 
805 /*
806  * This function completes the initialization for the BTT namespace
807  * such that it is ready to accept IOs
808  */
809 static int btt_meta_init(struct btt *btt)
810 {
811 	int ret = 0;
812 	struct arena_info *arena;
813 
814 	mutex_lock(&btt->init_lock);
815 	list_for_each_entry(arena, &btt->arena_list, list) {
816 		ret = btt_arena_write_layout(arena, btt->nd_btt->uuid);
817 		if (ret)
818 			goto unlock;
819 
820 		ret = btt_freelist_init(arena);
821 		if (ret)
822 			goto unlock;
823 
824 		ret = btt_rtt_init(arena);
825 		if (ret)
826 			goto unlock;
827 
828 		ret = btt_maplocks_init(arena);
829 		if (ret)
830 			goto unlock;
831 	}
832 
833 	btt->init_state = INIT_READY;
834 
835  unlock:
836 	mutex_unlock(&btt->init_lock);
837 	return ret;
838 }
839 
840 static u32 btt_meta_size(struct btt *btt)
841 {
842 	return btt->lbasize - btt->sector_size;
843 }
844 
845 /*
846  * This function calculates the arena in which the given LBA lies
847  * by doing a linear walk. This is acceptable since we expect only
848  * a few arenas. If we have backing devices that get much larger,
849  * we can construct a balanced binary tree of arenas at init time
850  * so that this range search becomes faster.
851  */
852 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
853 				struct arena_info **arena)
854 {
855 	struct arena_info *arena_list;
856 	__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
857 
858 	list_for_each_entry(arena_list, &btt->arena_list, list) {
859 		if (lba < arena_list->external_nlba) {
860 			*arena = arena_list;
861 			*premap = lba;
862 			return 0;
863 		}
864 		lba -= arena_list->external_nlba;
865 	}
866 
867 	return -EIO;
868 }
869 
870 /*
871  * The following (lock_map, unlock_map) are mostly just to improve
872  * readability, since they index into an array of locks
873  */
874 static void lock_map(struct arena_info *arena, u32 premap)
875 		__acquires(&arena->map_locks[idx].lock)
876 {
877 	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
878 
879 	spin_lock(&arena->map_locks[idx].lock);
880 }
881 
882 static void unlock_map(struct arena_info *arena, u32 premap)
883 		__releases(&arena->map_locks[idx].lock)
884 {
885 	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
886 
887 	spin_unlock(&arena->map_locks[idx].lock);
888 }
889 
890 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
891 {
892 	return arena->dataoff + ((u64)lba * arena->internal_lbasize);
893 }
894 
895 static int btt_data_read(struct arena_info *arena, struct page *page,
896 			unsigned int off, u32 lba, u32 len)
897 {
898 	int ret;
899 	u64 nsoff = to_namespace_offset(arena, lba);
900 	void *mem = kmap_atomic(page);
901 
902 	ret = arena_read_bytes(arena, nsoff, mem + off, len);
903 	kunmap_atomic(mem);
904 
905 	return ret;
906 }
907 
908 static int btt_data_write(struct arena_info *arena, u32 lba,
909 			struct page *page, unsigned int off, u32 len)
910 {
911 	int ret;
912 	u64 nsoff = to_namespace_offset(arena, lba);
913 	void *mem = kmap_atomic(page);
914 
915 	ret = arena_write_bytes(arena, nsoff, mem + off, len);
916 	kunmap_atomic(mem);
917 
918 	return ret;
919 }
920 
921 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
922 {
923 	void *mem = kmap_atomic(page);
924 
925 	memset(mem + off, 0, len);
926 	kunmap_atomic(mem);
927 }
928 
929 #ifdef CONFIG_BLK_DEV_INTEGRITY
930 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
931 			struct arena_info *arena, u32 postmap, int rw)
932 {
933 	unsigned int len = btt_meta_size(btt);
934 	u64 meta_nsoff;
935 	int ret = 0;
936 
937 	if (bip == NULL)
938 		return 0;
939 
940 	meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
941 
942 	while (len) {
943 		unsigned int cur_len;
944 		struct bio_vec bv;
945 		void *mem;
946 
947 		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
948 		/*
949 		 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
950 		 * .bv_offset already adjusted for iter->bi_bvec_done, and we
951 		 * can use those directly
952 		 */
953 
954 		cur_len = min(len, bv.bv_len);
955 		mem = kmap_atomic(bv.bv_page);
956 		if (rw)
957 			ret = arena_write_bytes(arena, meta_nsoff,
958 					mem + bv.bv_offset, cur_len);
959 		else
960 			ret = arena_read_bytes(arena, meta_nsoff,
961 					mem + bv.bv_offset, cur_len);
962 
963 		kunmap_atomic(mem);
964 		if (ret)
965 			return ret;
966 
967 		len -= cur_len;
968 		meta_nsoff += cur_len;
969 		bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
970 	}
971 
972 	return ret;
973 }
974 
975 #else /* CONFIG_BLK_DEV_INTEGRITY */
976 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
977 			struct arena_info *arena, u32 postmap, int rw)
978 {
979 	return 0;
980 }
981 #endif
982 
983 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
984 			struct page *page, unsigned int off, sector_t sector,
985 			unsigned int len)
986 {
987 	int ret = 0;
988 	int t_flag, e_flag;
989 	struct arena_info *arena = NULL;
990 	u32 lane = 0, premap, postmap;
991 
992 	while (len) {
993 		u32 cur_len;
994 
995 		lane = nd_region_acquire_lane(btt->nd_region);
996 
997 		ret = lba_to_arena(btt, sector, &premap, &arena);
998 		if (ret)
999 			goto out_lane;
1000 
1001 		cur_len = min(btt->sector_size, len);
1002 
1003 		ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag);
1004 		if (ret)
1005 			goto out_lane;
1006 
1007 		/*
1008 		 * We loop to make sure that the post map LBA didn't change
1009 		 * from under us between writing the RTT and doing the actual
1010 		 * read.
1011 		 */
1012 		while (1) {
1013 			u32 new_map;
1014 
1015 			if (t_flag) {
1016 				zero_fill_data(page, off, cur_len);
1017 				goto out_lane;
1018 			}
1019 
1020 			if (e_flag) {
1021 				ret = -EIO;
1022 				goto out_lane;
1023 			}
1024 
1025 			arena->rtt[lane] = RTT_VALID | postmap;
1026 			/*
1027 			 * Barrier to make sure this write is not reordered
1028 			 * to do the verification map_read before the RTT store
1029 			 */
1030 			barrier();
1031 
1032 			ret = btt_map_read(arena, premap, &new_map, &t_flag,
1033 						&e_flag);
1034 			if (ret)
1035 				goto out_rtt;
1036 
1037 			if (postmap == new_map)
1038 				break;
1039 
1040 			postmap = new_map;
1041 		}
1042 
1043 		ret = btt_data_read(arena, page, off, postmap, cur_len);
1044 		if (ret)
1045 			goto out_rtt;
1046 
1047 		if (bip) {
1048 			ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1049 			if (ret)
1050 				goto out_rtt;
1051 		}
1052 
1053 		arena->rtt[lane] = RTT_INVALID;
1054 		nd_region_release_lane(btt->nd_region, lane);
1055 
1056 		len -= cur_len;
1057 		off += cur_len;
1058 		sector += btt->sector_size >> SECTOR_SHIFT;
1059 	}
1060 
1061 	return 0;
1062 
1063  out_rtt:
1064 	arena->rtt[lane] = RTT_INVALID;
1065  out_lane:
1066 	nd_region_release_lane(btt->nd_region, lane);
1067 	return ret;
1068 }
1069 
1070 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1071 			sector_t sector, struct page *page, unsigned int off,
1072 			unsigned int len)
1073 {
1074 	int ret = 0;
1075 	struct arena_info *arena = NULL;
1076 	u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1077 	struct log_entry log;
1078 	int sub;
1079 
1080 	while (len) {
1081 		u32 cur_len;
1082 
1083 		lane = nd_region_acquire_lane(btt->nd_region);
1084 
1085 		ret = lba_to_arena(btt, sector, &premap, &arena);
1086 		if (ret)
1087 			goto out_lane;
1088 		cur_len = min(btt->sector_size, len);
1089 
1090 		if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1091 			ret = -EIO;
1092 			goto out_lane;
1093 		}
1094 
1095 		new_postmap = arena->freelist[lane].block;
1096 
1097 		/* Wait if the new block is being read from */
1098 		for (i = 0; i < arena->nfree; i++)
1099 			while (arena->rtt[i] == (RTT_VALID | new_postmap))
1100 				cpu_relax();
1101 
1102 
1103 		if (new_postmap >= arena->internal_nlba) {
1104 			ret = -EIO;
1105 			goto out_lane;
1106 		}
1107 
1108 		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1109 		if (ret)
1110 			goto out_lane;
1111 
1112 		if (bip) {
1113 			ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1114 						WRITE);
1115 			if (ret)
1116 				goto out_lane;
1117 		}
1118 
1119 		lock_map(arena, premap);
1120 		ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
1121 		if (ret)
1122 			goto out_map;
1123 		if (old_postmap >= arena->internal_nlba) {
1124 			ret = -EIO;
1125 			goto out_map;
1126 		}
1127 
1128 		log.lba = cpu_to_le32(premap);
1129 		log.old_map = cpu_to_le32(old_postmap);
1130 		log.new_map = cpu_to_le32(new_postmap);
1131 		log.seq = cpu_to_le32(arena->freelist[lane].seq);
1132 		sub = arena->freelist[lane].sub;
1133 		ret = btt_flog_write(arena, lane, sub, &log);
1134 		if (ret)
1135 			goto out_map;
1136 
1137 		ret = btt_map_write(arena, premap, new_postmap, 0, 0);
1138 		if (ret)
1139 			goto out_map;
1140 
1141 		unlock_map(arena, premap);
1142 		nd_region_release_lane(btt->nd_region, lane);
1143 
1144 		len -= cur_len;
1145 		off += cur_len;
1146 		sector += btt->sector_size >> SECTOR_SHIFT;
1147 	}
1148 
1149 	return 0;
1150 
1151  out_map:
1152 	unlock_map(arena, premap);
1153  out_lane:
1154 	nd_region_release_lane(btt->nd_region, lane);
1155 	return ret;
1156 }
1157 
1158 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1159 			struct page *page, unsigned int len, unsigned int off,
1160 			int rw, sector_t sector)
1161 {
1162 	int ret;
1163 
1164 	if (rw == READ) {
1165 		ret = btt_read_pg(btt, bip, page, off, sector, len);
1166 		flush_dcache_page(page);
1167 	} else {
1168 		flush_dcache_page(page);
1169 		ret = btt_write_pg(btt, bip, sector, page, off, len);
1170 	}
1171 
1172 	return ret;
1173 }
1174 
1175 static void btt_make_request(struct request_queue *q, struct bio *bio)
1176 {
1177 	struct bio_integrity_payload *bip = bio_integrity(bio);
1178 	struct btt *btt = q->queuedata;
1179 	struct bvec_iter iter;
1180 	unsigned long start;
1181 	struct bio_vec bvec;
1182 	int err = 0, rw;
1183 	bool do_acct;
1184 
1185 	/*
1186 	 * bio_integrity_enabled also checks if the bio already has an
1187 	 * integrity payload attached. If it does, we *don't* do a
1188 	 * bio_integrity_prep here - the payload has been generated by
1189 	 * another kernel subsystem, and we just pass it through.
1190 	 */
1191 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1192 		err = -EIO;
1193 		goto out;
1194 	}
1195 
1196 	do_acct = nd_iostat_start(bio, &start);
1197 	rw = bio_data_dir(bio);
1198 	bio_for_each_segment(bvec, bio, iter) {
1199 		unsigned int len = bvec.bv_len;
1200 
1201 		BUG_ON(len > PAGE_SIZE);
1202 		/* Make sure len is in multiples of sector size. */
1203 		/* XXX is this right? */
1204 		BUG_ON(len < btt->sector_size);
1205 		BUG_ON(len % btt->sector_size);
1206 
1207 		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1208 				rw, iter.bi_sector);
1209 		if (err) {
1210 			dev_info(&btt->nd_btt->dev,
1211 					"io error in %s sector %lld, len %d,\n",
1212 					(rw == READ) ? "READ" : "WRITE",
1213 					(unsigned long long) iter.bi_sector, len);
1214 			break;
1215 		}
1216 	}
1217 	if (do_acct)
1218 		nd_iostat_end(bio, start);
1219 
1220 out:
1221 	bio_endio(bio, err);
1222 }
1223 
1224 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1225 		struct page *page, int rw)
1226 {
1227 	struct btt *btt = bdev->bd_disk->private_data;
1228 
1229 	btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
1230 	page_endio(page, rw & WRITE, 0);
1231 	return 0;
1232 }
1233 
1234 
1235 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1236 {
1237 	/* some standard values */
1238 	geo->heads = 1 << 6;
1239 	geo->sectors = 1 << 5;
1240 	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1241 	return 0;
1242 }
1243 
1244 static const struct block_device_operations btt_fops = {
1245 	.owner =		THIS_MODULE,
1246 	.rw_page =		btt_rw_page,
1247 	.getgeo =		btt_getgeo,
1248 	.revalidate_disk =	nvdimm_revalidate_disk,
1249 };
1250 
1251 static int btt_blk_init(struct btt *btt)
1252 {
1253 	struct nd_btt *nd_btt = btt->nd_btt;
1254 	struct nd_namespace_common *ndns = nd_btt->ndns;
1255 
1256 	/* create a new disk and request queue for btt */
1257 	btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1258 	if (!btt->btt_queue)
1259 		return -ENOMEM;
1260 
1261 	btt->btt_disk = alloc_disk(0);
1262 	if (!btt->btt_disk) {
1263 		blk_cleanup_queue(btt->btt_queue);
1264 		return -ENOMEM;
1265 	}
1266 
1267 	nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1268 	btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
1269 	btt->btt_disk->major = btt_major;
1270 	btt->btt_disk->first_minor = 0;
1271 	btt->btt_disk->fops = &btt_fops;
1272 	btt->btt_disk->private_data = btt;
1273 	btt->btt_disk->queue = btt->btt_queue;
1274 	btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1275 
1276 	blk_queue_make_request(btt->btt_queue, btt_make_request);
1277 	blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1278 	blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1279 	blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
1280 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1281 	btt->btt_queue->queuedata = btt;
1282 
1283 	set_capacity(btt->btt_disk, 0);
1284 	add_disk(btt->btt_disk);
1285 	if (btt_meta_size(btt)) {
1286 		int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1287 
1288 		if (rc) {
1289 			del_gendisk(btt->btt_disk);
1290 			put_disk(btt->btt_disk);
1291 			blk_cleanup_queue(btt->btt_queue);
1292 			return rc;
1293 		}
1294 	}
1295 	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1296 	revalidate_disk(btt->btt_disk);
1297 
1298 	return 0;
1299 }
1300 
1301 static void btt_blk_cleanup(struct btt *btt)
1302 {
1303 	blk_integrity_unregister(btt->btt_disk);
1304 	del_gendisk(btt->btt_disk);
1305 	put_disk(btt->btt_disk);
1306 	blk_cleanup_queue(btt->btt_queue);
1307 }
1308 
1309 /**
1310  * btt_init - initialize a block translation table for the given device
1311  * @nd_btt:	device with BTT geometry and backing device info
1312  * @rawsize:	raw size in bytes of the backing device
1313  * @lbasize:	lba size of the backing device
1314  * @uuid:	A uuid for the backing device - this is stored on media
1315  * @maxlane:	maximum number of parallel requests the device can handle
1316  *
1317  * Initialize a Block Translation Table on a backing device to provide
1318  * single sector power fail atomicity.
1319  *
1320  * Context:
1321  * Might sleep.
1322  *
1323  * Returns:
1324  * Pointer to a new struct btt on success, NULL on failure.
1325  */
1326 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1327 		u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1328 {
1329 	int ret;
1330 	struct btt *btt;
1331 	struct device *dev = &nd_btt->dev;
1332 
1333 	btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
1334 	if (!btt)
1335 		return NULL;
1336 
1337 	btt->nd_btt = nd_btt;
1338 	btt->rawsize = rawsize;
1339 	btt->lbasize = lbasize;
1340 	btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1341 	INIT_LIST_HEAD(&btt->arena_list);
1342 	mutex_init(&btt->init_lock);
1343 	btt->nd_region = nd_region;
1344 
1345 	ret = discover_arenas(btt);
1346 	if (ret) {
1347 		dev_err(dev, "init: error in arena_discover: %d\n", ret);
1348 		goto out_free;
1349 	}
1350 
1351 	if (btt->init_state != INIT_READY && nd_region->ro) {
1352 		dev_info(dev, "%s is read-only, unable to init btt metadata\n",
1353 				dev_name(&nd_region->dev));
1354 		goto out_free;
1355 	} else if (btt->init_state != INIT_READY) {
1356 		btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1357 			((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1358 		dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1359 				btt->num_arenas, rawsize);
1360 
1361 		ret = create_arenas(btt);
1362 		if (ret) {
1363 			dev_info(dev, "init: create_arenas: %d\n", ret);
1364 			goto out_free;
1365 		}
1366 
1367 		ret = btt_meta_init(btt);
1368 		if (ret) {
1369 			dev_err(dev, "init: error in meta_init: %d\n", ret);
1370 			goto out_free;
1371 		}
1372 	}
1373 
1374 	ret = btt_blk_init(btt);
1375 	if (ret) {
1376 		dev_err(dev, "init: error in blk_init: %d\n", ret);
1377 		goto out_free;
1378 	}
1379 
1380 	btt_debugfs_init(btt);
1381 
1382 	return btt;
1383 
1384  out_free:
1385 	kfree(btt);
1386 	return NULL;
1387 }
1388 
1389 /**
1390  * btt_fini - de-initialize a BTT
1391  * @btt:	the BTT handle that was generated by btt_init
1392  *
1393  * De-initialize a Block Translation Table on device removal
1394  *
1395  * Context:
1396  * Might sleep.
1397  */
1398 static void btt_fini(struct btt *btt)
1399 {
1400 	if (btt) {
1401 		btt_blk_cleanup(btt);
1402 		free_arenas(btt);
1403 		debugfs_remove_recursive(btt->debugfs_dir);
1404 		kfree(btt);
1405 	}
1406 }
1407 
1408 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1409 {
1410 	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1411 	struct nd_region *nd_region;
1412 	struct btt *btt;
1413 	size_t rawsize;
1414 
1415 	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize)
1416 		return -ENODEV;
1417 
1418 	rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
1419 	if (rawsize < ARENA_MIN_SIZE) {
1420 		return -ENXIO;
1421 	}
1422 	nd_region = to_nd_region(nd_btt->dev.parent);
1423 	btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1424 			nd_region);
1425 	if (!btt)
1426 		return -ENOMEM;
1427 	nd_btt->btt = btt;
1428 
1429 	return 0;
1430 }
1431 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1432 
1433 int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
1434 {
1435 	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1436 	struct btt *btt = nd_btt->btt;
1437 
1438 	btt_fini(btt);
1439 	nd_btt->btt = NULL;
1440 
1441 	return 0;
1442 }
1443 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1444 
1445 static int __init nd_btt_init(void)
1446 {
1447 	int rc;
1448 
1449 	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
1450 
1451 	btt_major = register_blkdev(0, "btt");
1452 	if (btt_major < 0)
1453 		return btt_major;
1454 
1455 	debugfs_root = debugfs_create_dir("btt", NULL);
1456 	if (IS_ERR_OR_NULL(debugfs_root)) {
1457 		rc = -ENXIO;
1458 		goto err_debugfs;
1459 	}
1460 
1461 	return 0;
1462 
1463  err_debugfs:
1464 	unregister_blkdev(btt_major, "btt");
1465 
1466 	return rc;
1467 }
1468 
1469 static void __exit nd_btt_exit(void)
1470 {
1471 	debugfs_remove_recursive(debugfs_root);
1472 	unregister_blkdev(btt_major, "btt");
1473 }
1474 
1475 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1476 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1477 MODULE_LICENSE("GPL v2");
1478 module_init(nd_btt_init);
1479 module_exit(nd_btt_exit);
1480