xref: /openbmc/linux/drivers/nvdimm/core.c (revision 1c2dd16a)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/libnvdimm.h>
14 #include <linux/badblocks.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/blkdev.h>
18 #include <linux/device.h>
19 #include <linux/ctype.h>
20 #include <linux/ndctl.h>
21 #include <linux/mutex.h>
22 #include <linux/slab.h>
23 #include <linux/io.h>
24 #include "nd-core.h"
25 #include "nd.h"
26 
27 LIST_HEAD(nvdimm_bus_list);
28 DEFINE_MUTEX(nvdimm_bus_list_mutex);
29 
30 void nvdimm_bus_lock(struct device *dev)
31 {
32 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
33 
34 	if (!nvdimm_bus)
35 		return;
36 	mutex_lock(&nvdimm_bus->reconfig_mutex);
37 }
38 EXPORT_SYMBOL(nvdimm_bus_lock);
39 
40 void nvdimm_bus_unlock(struct device *dev)
41 {
42 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
43 
44 	if (!nvdimm_bus)
45 		return;
46 	mutex_unlock(&nvdimm_bus->reconfig_mutex);
47 }
48 EXPORT_SYMBOL(nvdimm_bus_unlock);
49 
50 bool is_nvdimm_bus_locked(struct device *dev)
51 {
52 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
53 
54 	if (!nvdimm_bus)
55 		return false;
56 	return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
57 }
58 EXPORT_SYMBOL(is_nvdimm_bus_locked);
59 
60 struct nvdimm_map {
61 	struct nvdimm_bus *nvdimm_bus;
62 	struct list_head list;
63 	resource_size_t offset;
64 	unsigned long flags;
65 	size_t size;
66 	union {
67 		void *mem;
68 		void __iomem *iomem;
69 	};
70 	struct kref kref;
71 };
72 
73 static struct nvdimm_map *find_nvdimm_map(struct device *dev,
74 		resource_size_t offset)
75 {
76 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
77 	struct nvdimm_map *nvdimm_map;
78 
79 	list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
80 		if (nvdimm_map->offset == offset)
81 			return nvdimm_map;
82 	return NULL;
83 }
84 
85 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
86 		resource_size_t offset, size_t size, unsigned long flags)
87 {
88 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
89 	struct nvdimm_map *nvdimm_map;
90 
91 	nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
92 	if (!nvdimm_map)
93 		return NULL;
94 
95 	INIT_LIST_HEAD(&nvdimm_map->list);
96 	nvdimm_map->nvdimm_bus = nvdimm_bus;
97 	nvdimm_map->offset = offset;
98 	nvdimm_map->flags = flags;
99 	nvdimm_map->size = size;
100 	kref_init(&nvdimm_map->kref);
101 
102 	if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
103 		dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
104 				&offset, size, dev_name(dev));
105 		goto err_request_region;
106 	}
107 
108 	if (flags)
109 		nvdimm_map->mem = memremap(offset, size, flags);
110 	else
111 		nvdimm_map->iomem = ioremap(offset, size);
112 
113 	if (!nvdimm_map->mem)
114 		goto err_map;
115 
116 	dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
117 			__func__);
118 	list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
119 
120 	return nvdimm_map;
121 
122  err_map:
123 	release_mem_region(offset, size);
124  err_request_region:
125 	kfree(nvdimm_map);
126 	return NULL;
127 }
128 
129 static void nvdimm_map_release(struct kref *kref)
130 {
131 	struct nvdimm_bus *nvdimm_bus;
132 	struct nvdimm_map *nvdimm_map;
133 
134 	nvdimm_map = container_of(kref, struct nvdimm_map, kref);
135 	nvdimm_bus = nvdimm_map->nvdimm_bus;
136 
137 	dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
138 	list_del(&nvdimm_map->list);
139 	if (nvdimm_map->flags)
140 		memunmap(nvdimm_map->mem);
141 	else
142 		iounmap(nvdimm_map->iomem);
143 	release_mem_region(nvdimm_map->offset, nvdimm_map->size);
144 	kfree(nvdimm_map);
145 }
146 
147 static void nvdimm_map_put(void *data)
148 {
149 	struct nvdimm_map *nvdimm_map = data;
150 	struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
151 
152 	nvdimm_bus_lock(&nvdimm_bus->dev);
153 	kref_put(&nvdimm_map->kref, nvdimm_map_release);
154 	nvdimm_bus_unlock(&nvdimm_bus->dev);
155 }
156 
157 /**
158  * devm_nvdimm_memremap - map a resource that is shared across regions
159  * @dev: device that will own a reference to the shared mapping
160  * @offset: physical base address of the mapping
161  * @size: mapping size
162  * @flags: memremap flags, or, if zero, perform an ioremap instead
163  */
164 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
165 		size_t size, unsigned long flags)
166 {
167 	struct nvdimm_map *nvdimm_map;
168 
169 	nvdimm_bus_lock(dev);
170 	nvdimm_map = find_nvdimm_map(dev, offset);
171 	if (!nvdimm_map)
172 		nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
173 	else
174 		kref_get(&nvdimm_map->kref);
175 	nvdimm_bus_unlock(dev);
176 
177 	if (!nvdimm_map)
178 		return NULL;
179 
180 	if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
181 		return NULL;
182 
183 	return nvdimm_map->mem;
184 }
185 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
186 
187 u64 nd_fletcher64(void *addr, size_t len, bool le)
188 {
189 	u32 *buf = addr;
190 	u32 lo32 = 0;
191 	u64 hi32 = 0;
192 	int i;
193 
194 	for (i = 0; i < len / sizeof(u32); i++) {
195 		lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
196 		hi32 += lo32;
197 	}
198 
199 	return hi32 << 32 | lo32;
200 }
201 EXPORT_SYMBOL_GPL(nd_fletcher64);
202 
203 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
204 {
205 	/* struct nvdimm_bus definition is private to libnvdimm */
206 	return nvdimm_bus->nd_desc;
207 }
208 EXPORT_SYMBOL_GPL(to_nd_desc);
209 
210 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
211 {
212 	/* struct nvdimm_bus definition is private to libnvdimm */
213 	return &nvdimm_bus->dev;
214 }
215 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
216 
217 static bool is_uuid_sep(char sep)
218 {
219 	if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
220 		return true;
221 	return false;
222 }
223 
224 static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
225 		size_t len)
226 {
227 	const char *str = buf;
228 	u8 uuid[16];
229 	int i;
230 
231 	for (i = 0; i < 16; i++) {
232 		if (!isxdigit(str[0]) || !isxdigit(str[1])) {
233 			dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
234 					__func__, i, str - buf, str[0],
235 					str + 1 - buf, str[1]);
236 			return -EINVAL;
237 		}
238 
239 		uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
240 		str += 2;
241 		if (is_uuid_sep(*str))
242 			str++;
243 	}
244 
245 	memcpy(uuid_out, uuid, sizeof(uuid));
246 	return 0;
247 }
248 
249 /**
250  * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
251  * @dev: container device for the uuid property
252  * @uuid_out: uuid buffer to replace
253  * @buf: raw sysfs buffer to parse
254  *
255  * Enforce that uuids can only be changed while the device is disabled
256  * (driver detached)
257  * LOCKING: expects device_lock() is held on entry
258  */
259 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
260 		size_t len)
261 {
262 	u8 uuid[16];
263 	int rc;
264 
265 	if (dev->driver)
266 		return -EBUSY;
267 
268 	rc = nd_uuid_parse(dev, uuid, buf, len);
269 	if (rc)
270 		return rc;
271 
272 	kfree(*uuid_out);
273 	*uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
274 	if (!(*uuid_out))
275 		return -ENOMEM;
276 
277 	return 0;
278 }
279 
280 ssize_t nd_sector_size_show(unsigned long current_lbasize,
281 		const unsigned long *supported, char *buf)
282 {
283 	ssize_t len = 0;
284 	int i;
285 
286 	for (i = 0; supported[i]; i++)
287 		if (current_lbasize == supported[i])
288 			len += sprintf(buf + len, "[%ld] ", supported[i]);
289 		else
290 			len += sprintf(buf + len, "%ld ", supported[i]);
291 	len += sprintf(buf + len, "\n");
292 	return len;
293 }
294 
295 ssize_t nd_sector_size_store(struct device *dev, const char *buf,
296 		unsigned long *current_lbasize, const unsigned long *supported)
297 {
298 	unsigned long lbasize;
299 	int rc, i;
300 
301 	if (dev->driver)
302 		return -EBUSY;
303 
304 	rc = kstrtoul(buf, 0, &lbasize);
305 	if (rc)
306 		return rc;
307 
308 	for (i = 0; supported[i]; i++)
309 		if (lbasize == supported[i])
310 			break;
311 
312 	if (supported[i]) {
313 		*current_lbasize = lbasize;
314 		return 0;
315 	} else {
316 		return -EINVAL;
317 	}
318 }
319 
320 static ssize_t commands_show(struct device *dev,
321 		struct device_attribute *attr, char *buf)
322 {
323 	int cmd, len = 0;
324 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
325 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
326 
327 	for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
328 		len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
329 	len += sprintf(buf + len, "\n");
330 	return len;
331 }
332 static DEVICE_ATTR_RO(commands);
333 
334 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
335 {
336 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
337 	struct device *parent = nvdimm_bus->dev.parent;
338 
339 	if (nd_desc->provider_name)
340 		return nd_desc->provider_name;
341 	else if (parent)
342 		return dev_name(parent);
343 	else
344 		return "unknown";
345 }
346 
347 static ssize_t provider_show(struct device *dev,
348 		struct device_attribute *attr, char *buf)
349 {
350 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
351 
352 	return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
353 }
354 static DEVICE_ATTR_RO(provider);
355 
356 static int flush_namespaces(struct device *dev, void *data)
357 {
358 	device_lock(dev);
359 	device_unlock(dev);
360 	return 0;
361 }
362 
363 static int flush_regions_dimms(struct device *dev, void *data)
364 {
365 	device_lock(dev);
366 	device_unlock(dev);
367 	device_for_each_child(dev, NULL, flush_namespaces);
368 	return 0;
369 }
370 
371 static ssize_t wait_probe_show(struct device *dev,
372 		struct device_attribute *attr, char *buf)
373 {
374 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
375 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
376 	int rc;
377 
378 	if (nd_desc->flush_probe) {
379 		rc = nd_desc->flush_probe(nd_desc);
380 		if (rc)
381 			return rc;
382 	}
383 	nd_synchronize();
384 	device_for_each_child(dev, NULL, flush_regions_dimms);
385 	return sprintf(buf, "1\n");
386 }
387 static DEVICE_ATTR_RO(wait_probe);
388 
389 static struct attribute *nvdimm_bus_attributes[] = {
390 	&dev_attr_commands.attr,
391 	&dev_attr_wait_probe.attr,
392 	&dev_attr_provider.attr,
393 	NULL,
394 };
395 
396 struct attribute_group nvdimm_bus_attribute_group = {
397 	.attrs = nvdimm_bus_attributes,
398 };
399 EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
400 
401 static void set_badblock(struct badblocks *bb, sector_t s, int num)
402 {
403 	dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
404 			(u64) s * 512, (u64) num * 512);
405 	/* this isn't an error as the hardware will still throw an exception */
406 	if (badblocks_set(bb, s, num, 1))
407 		dev_info_once(bb->dev, "%s: failed for sector %llx\n",
408 				__func__, (u64) s);
409 }
410 
411 /**
412  * __add_badblock_range() - Convert a physical address range to bad sectors
413  * @bb:		badblocks instance to populate
414  * @ns_offset:	namespace offset where the error range begins (in bytes)
415  * @len:	number of bytes of poison to be added
416  *
417  * This assumes that the range provided with (ns_offset, len) is within
418  * the bounds of physical addresses for this namespace, i.e. lies in the
419  * interval [ns_start, ns_start + ns_size)
420  */
421 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
422 {
423 	const unsigned int sector_size = 512;
424 	sector_t start_sector;
425 	u64 num_sectors;
426 	u32 rem;
427 
428 	start_sector = div_u64(ns_offset, sector_size);
429 	num_sectors = div_u64_rem(len, sector_size, &rem);
430 	if (rem)
431 		num_sectors++;
432 
433 	if (unlikely(num_sectors > (u64)INT_MAX)) {
434 		u64 remaining = num_sectors;
435 		sector_t s = start_sector;
436 
437 		while (remaining) {
438 			int done = min_t(u64, remaining, INT_MAX);
439 
440 			set_badblock(bb, s, done);
441 			remaining -= done;
442 			s += done;
443 		}
444 	} else
445 		set_badblock(bb, start_sector, num_sectors);
446 }
447 
448 static void badblocks_populate(struct list_head *poison_list,
449 		struct badblocks *bb, const struct resource *res)
450 {
451 	struct nd_poison *pl;
452 
453 	if (list_empty(poison_list))
454 		return;
455 
456 	list_for_each_entry(pl, poison_list, list) {
457 		u64 pl_end = pl->start + pl->length - 1;
458 
459 		/* Discard intervals with no intersection */
460 		if (pl_end < res->start)
461 			continue;
462 		if (pl->start >  res->end)
463 			continue;
464 		/* Deal with any overlap after start of the namespace */
465 		if (pl->start >= res->start) {
466 			u64 start = pl->start;
467 			u64 len;
468 
469 			if (pl_end <= res->end)
470 				len = pl->length;
471 			else
472 				len = res->start + resource_size(res)
473 					- pl->start;
474 			__add_badblock_range(bb, start - res->start, len);
475 			continue;
476 		}
477 		/* Deal with overlap for poison starting before the namespace */
478 		if (pl->start < res->start) {
479 			u64 len;
480 
481 			if (pl_end < res->end)
482 				len = pl->start + pl->length - res->start;
483 			else
484 				len = resource_size(res);
485 			__add_badblock_range(bb, 0, len);
486 		}
487 	}
488 }
489 
490 /**
491  * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
492  * @region: parent region of the range to interrogate
493  * @bb: badblocks instance to populate
494  * @res: resource range to consider
495  *
496  * The poison list generated during bus initialization may contain
497  * multiple, possibly overlapping physical address ranges.  Compare each
498  * of these ranges to the resource range currently being initialized,
499  * and add badblocks entries for all matching sub-ranges
500  */
501 void nvdimm_badblocks_populate(struct nd_region *nd_region,
502 		struct badblocks *bb, const struct resource *res)
503 {
504 	struct nvdimm_bus *nvdimm_bus;
505 	struct list_head *poison_list;
506 
507 	if (!is_nd_pmem(&nd_region->dev)) {
508 		dev_WARN_ONCE(&nd_region->dev, 1,
509 				"%s only valid for pmem regions\n", __func__);
510 		return;
511 	}
512 	nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
513 	poison_list = &nvdimm_bus->poison_list;
514 
515 	nvdimm_bus_lock(&nvdimm_bus->dev);
516 	badblocks_populate(poison_list, bb, res);
517 	nvdimm_bus_unlock(&nvdimm_bus->dev);
518 }
519 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
520 
521 static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
522 			gfp_t flags)
523 {
524 	struct nd_poison *pl;
525 
526 	pl = kzalloc(sizeof(*pl), flags);
527 	if (!pl)
528 		return -ENOMEM;
529 
530 	pl->start = addr;
531 	pl->length = length;
532 	list_add_tail(&pl->list, &nvdimm_bus->poison_list);
533 
534 	return 0;
535 }
536 
537 static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
538 {
539 	struct nd_poison *pl;
540 
541 	if (list_empty(&nvdimm_bus->poison_list))
542 		return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
543 
544 	/*
545 	 * There is a chance this is a duplicate, check for those first.
546 	 * This will be the common case as ARS_STATUS returns all known
547 	 * errors in the SPA space, and we can't query it per region
548 	 */
549 	list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
550 		if (pl->start == addr) {
551 			/* If length has changed, update this list entry */
552 			if (pl->length != length)
553 				pl->length = length;
554 			return 0;
555 		}
556 
557 	/*
558 	 * If not a duplicate or a simple length update, add the entry as is,
559 	 * as any overlapping ranges will get resolved when the list is consumed
560 	 * and converted to badblocks
561 	 */
562 	return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
563 }
564 
565 int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
566 {
567 	int rc;
568 
569 	nvdimm_bus_lock(&nvdimm_bus->dev);
570 	rc = bus_add_poison(nvdimm_bus, addr, length);
571 	nvdimm_bus_unlock(&nvdimm_bus->dev);
572 
573 	return rc;
574 }
575 EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
576 
577 void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
578 		phys_addr_t start, unsigned int len)
579 {
580 	struct list_head *poison_list = &nvdimm_bus->poison_list;
581 	u64 clr_end = start + len - 1;
582 	struct nd_poison *pl, *next;
583 
584 	nvdimm_bus_lock(&nvdimm_bus->dev);
585 	WARN_ON_ONCE(list_empty(poison_list));
586 
587 	/*
588 	 * [start, clr_end] is the poison interval being cleared.
589 	 * [pl->start, pl_end] is the poison_list entry we're comparing
590 	 * the above interval against. The poison list entry may need
591 	 * to be modified (update either start or length), deleted, or
592 	 * split into two based on the overlap characteristics
593 	 */
594 
595 	list_for_each_entry_safe(pl, next, poison_list, list) {
596 		u64 pl_end = pl->start + pl->length - 1;
597 
598 		/* Skip intervals with no intersection */
599 		if (pl_end < start)
600 			continue;
601 		if (pl->start >  clr_end)
602 			continue;
603 		/* Delete completely overlapped poison entries */
604 		if ((pl->start >= start) && (pl_end <= clr_end)) {
605 			list_del(&pl->list);
606 			kfree(pl);
607 			continue;
608 		}
609 		/* Adjust start point of partially cleared entries */
610 		if ((start <= pl->start) && (clr_end > pl->start)) {
611 			pl->length -= clr_end - pl->start + 1;
612 			pl->start = clr_end + 1;
613 			continue;
614 		}
615 		/* Adjust pl->length for partial clearing at the tail end */
616 		if ((pl->start < start) && (pl_end <= clr_end)) {
617 			/* pl->start remains the same */
618 			pl->length = start - pl->start;
619 			continue;
620 		}
621 		/*
622 		 * If clearing in the middle of an entry, we split it into
623 		 * two by modifying the current entry to represent one half of
624 		 * the split, and adding a new entry for the second half.
625 		 */
626 		if ((pl->start < start) && (pl_end > clr_end)) {
627 			u64 new_start = clr_end + 1;
628 			u64 new_len = pl_end - new_start + 1;
629 
630 			/* Add new entry covering the right half */
631 			add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
632 			/* Adjust this entry to cover the left half */
633 			pl->length = start - pl->start;
634 			continue;
635 		}
636 	}
637 	nvdimm_bus_unlock(&nvdimm_bus->dev);
638 }
639 EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
640 
641 #ifdef CONFIG_BLK_DEV_INTEGRITY
642 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
643 {
644 	struct blk_integrity bi;
645 
646 	if (meta_size == 0)
647 		return 0;
648 
649 	memset(&bi, 0, sizeof(bi));
650 
651 	bi.tuple_size = meta_size;
652 	bi.tag_size = meta_size;
653 
654 	blk_integrity_register(disk, &bi);
655 	blk_queue_max_integrity_segments(disk->queue, 1);
656 
657 	return 0;
658 }
659 EXPORT_SYMBOL(nd_integrity_init);
660 
661 #else /* CONFIG_BLK_DEV_INTEGRITY */
662 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
663 {
664 	return 0;
665 }
666 EXPORT_SYMBOL(nd_integrity_init);
667 
668 #endif
669 
670 static __init int libnvdimm_init(void)
671 {
672 	int rc;
673 
674 	rc = nvdimm_bus_init();
675 	if (rc)
676 		return rc;
677 	rc = nvdimm_init();
678 	if (rc)
679 		goto err_dimm;
680 	rc = nd_region_init();
681 	if (rc)
682 		goto err_region;
683 	return 0;
684  err_region:
685 	nvdimm_exit();
686  err_dimm:
687 	nvdimm_bus_exit();
688 	return rc;
689 }
690 
691 static __exit void libnvdimm_exit(void)
692 {
693 	WARN_ON(!list_empty(&nvdimm_bus_list));
694 	nd_region_exit();
695 	nvdimm_exit();
696 	nvdimm_bus_exit();
697 	nd_region_devs_exit();
698 	nvdimm_devs_exit();
699 }
700 
701 MODULE_LICENSE("GPL v2");
702 MODULE_AUTHOR("Intel Corporation");
703 subsys_initcall(libnvdimm_init);
704 module_exit(libnvdimm_exit);
705