xref: /openbmc/linux/drivers/nvdimm/pmem.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Persistent Memory Driver
4  *
5  * Copyright (c) 2014-2015, Intel Corporation.
6  * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7  * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8  */
9 
10 #include <linux/blkdev.h>
11 #include <linux/hdreg.h>
12 #include <linux/init.h>
13 #include <linux/platform_device.h>
14 #include <linux/set_memory.h>
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/badblocks.h>
18 #include <linux/memremap.h>
19 #include <linux/vmalloc.h>
20 #include <linux/blk-mq.h>
21 #include <linux/pfn_t.h>
22 #include <linux/slab.h>
23 #include <linux/uio.h>
24 #include <linux/dax.h>
25 #include <linux/nd.h>
26 #include <linux/mm.h>
27 #include <asm/cacheflush.h>
28 #include "pmem.h"
29 #include "pfn.h"
30 #include "nd.h"
31 
32 static struct device *to_dev(struct pmem_device *pmem)
33 {
34 	/*
35 	 * nvdimm bus services need a 'dev' parameter, and we record the device
36 	 * at init in bb.dev.
37 	 */
38 	return pmem->bb.dev;
39 }
40 
41 static struct nd_region *to_region(struct pmem_device *pmem)
42 {
43 	return to_nd_region(to_dev(pmem)->parent);
44 }
45 
46 static void hwpoison_clear(struct pmem_device *pmem,
47 		phys_addr_t phys, unsigned int len)
48 {
49 	unsigned long pfn_start, pfn_end, pfn;
50 
51 	/* only pmem in the linear map supports HWPoison */
52 	if (is_vmalloc_addr(pmem->virt_addr))
53 		return;
54 
55 	pfn_start = PHYS_PFN(phys);
56 	pfn_end = pfn_start + PHYS_PFN(len);
57 	for (pfn = pfn_start; pfn < pfn_end; pfn++) {
58 		struct page *page = pfn_to_page(pfn);
59 
60 		/*
61 		 * Note, no need to hold a get_dev_pagemap() reference
62 		 * here since we're in the driver I/O path and
63 		 * outstanding I/O requests pin the dev_pagemap.
64 		 */
65 		if (test_and_clear_pmem_poison(page))
66 			clear_mce_nospec(pfn);
67 	}
68 }
69 
70 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
71 		phys_addr_t offset, unsigned int len)
72 {
73 	struct device *dev = to_dev(pmem);
74 	sector_t sector;
75 	long cleared;
76 	blk_status_t rc = BLK_STS_OK;
77 
78 	sector = (offset - pmem->data_offset) / 512;
79 
80 	cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
81 	if (cleared < len)
82 		rc = BLK_STS_IOERR;
83 	if (cleared > 0 && cleared / 512) {
84 		hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
85 		cleared /= 512;
86 		dev_dbg(dev, "%#llx clear %ld sector%s\n",
87 				(unsigned long long) sector, cleared,
88 				cleared > 1 ? "s" : "");
89 		badblocks_clear(&pmem->bb, sector, cleared);
90 		if (pmem->bb_state)
91 			sysfs_notify_dirent(pmem->bb_state);
92 	}
93 
94 	arch_invalidate_pmem(pmem->virt_addr + offset, len);
95 
96 	return rc;
97 }
98 
99 static void write_pmem(void *pmem_addr, struct page *page,
100 		unsigned int off, unsigned int len)
101 {
102 	unsigned int chunk;
103 	void *mem;
104 
105 	while (len) {
106 		mem = kmap_atomic(page);
107 		chunk = min_t(unsigned int, len, PAGE_SIZE - off);
108 		memcpy_flushcache(pmem_addr, mem + off, chunk);
109 		kunmap_atomic(mem);
110 		len -= chunk;
111 		off = 0;
112 		page++;
113 		pmem_addr += chunk;
114 	}
115 }
116 
117 static blk_status_t read_pmem(struct page *page, unsigned int off,
118 		void *pmem_addr, unsigned int len)
119 {
120 	unsigned int chunk;
121 	unsigned long rem;
122 	void *mem;
123 
124 	while (len) {
125 		mem = kmap_atomic(page);
126 		chunk = min_t(unsigned int, len, PAGE_SIZE - off);
127 		rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
128 		kunmap_atomic(mem);
129 		if (rem)
130 			return BLK_STS_IOERR;
131 		len -= chunk;
132 		off = 0;
133 		page++;
134 		pmem_addr += chunk;
135 	}
136 	return BLK_STS_OK;
137 }
138 
139 static blk_status_t pmem_do_read(struct pmem_device *pmem,
140 			struct page *page, unsigned int page_off,
141 			sector_t sector, unsigned int len)
142 {
143 	blk_status_t rc;
144 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
145 	void *pmem_addr = pmem->virt_addr + pmem_off;
146 
147 	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
148 		return BLK_STS_IOERR;
149 
150 	rc = read_pmem(page, page_off, pmem_addr, len);
151 	flush_dcache_page(page);
152 	return rc;
153 }
154 
155 static blk_status_t pmem_do_write(struct pmem_device *pmem,
156 			struct page *page, unsigned int page_off,
157 			sector_t sector, unsigned int len)
158 {
159 	blk_status_t rc = BLK_STS_OK;
160 	bool bad_pmem = false;
161 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
162 	void *pmem_addr = pmem->virt_addr + pmem_off;
163 
164 	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
165 		bad_pmem = true;
166 
167 	/*
168 	 * Note that we write the data both before and after
169 	 * clearing poison.  The write before clear poison
170 	 * handles situations where the latest written data is
171 	 * preserved and the clear poison operation simply marks
172 	 * the address range as valid without changing the data.
173 	 * In this case application software can assume that an
174 	 * interrupted write will either return the new good
175 	 * data or an error.
176 	 *
177 	 * However, if pmem_clear_poison() leaves the data in an
178 	 * indeterminate state we need to perform the write
179 	 * after clear poison.
180 	 */
181 	flush_dcache_page(page);
182 	write_pmem(pmem_addr, page, page_off, len);
183 	if (unlikely(bad_pmem)) {
184 		rc = pmem_clear_poison(pmem, pmem_off, len);
185 		write_pmem(pmem_addr, page, page_off, len);
186 	}
187 
188 	return rc;
189 }
190 
191 static blk_qc_t pmem_submit_bio(struct bio *bio)
192 {
193 	int ret = 0;
194 	blk_status_t rc = 0;
195 	bool do_acct;
196 	unsigned long start;
197 	struct bio_vec bvec;
198 	struct bvec_iter iter;
199 	struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
200 	struct nd_region *nd_region = to_region(pmem);
201 
202 	if (bio->bi_opf & REQ_PREFLUSH)
203 		ret = nvdimm_flush(nd_region, bio);
204 
205 	do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
206 	if (do_acct)
207 		start = bio_start_io_acct(bio);
208 	bio_for_each_segment(bvec, bio, iter) {
209 		if (op_is_write(bio_op(bio)))
210 			rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
211 				iter.bi_sector, bvec.bv_len);
212 		else
213 			rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
214 				iter.bi_sector, bvec.bv_len);
215 		if (rc) {
216 			bio->bi_status = rc;
217 			break;
218 		}
219 	}
220 	if (do_acct)
221 		bio_end_io_acct(bio, start);
222 
223 	if (bio->bi_opf & REQ_FUA)
224 		ret = nvdimm_flush(nd_region, bio);
225 
226 	if (ret)
227 		bio->bi_status = errno_to_blk_status(ret);
228 
229 	bio_endio(bio);
230 	return BLK_QC_T_NONE;
231 }
232 
233 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
234 		       struct page *page, unsigned int op)
235 {
236 	struct pmem_device *pmem = bdev->bd_disk->private_data;
237 	blk_status_t rc;
238 
239 	if (op_is_write(op))
240 		rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
241 	else
242 		rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
243 	/*
244 	 * The ->rw_page interface is subtle and tricky.  The core
245 	 * retries on any error, so we can only invoke page_endio() in
246 	 * the successful completion case.  Otherwise, we'll see crashes
247 	 * caused by double completion.
248 	 */
249 	if (rc == 0)
250 		page_endio(page, op_is_write(op), 0);
251 
252 	return blk_status_to_errno(rc);
253 }
254 
255 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
256 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
257 		long nr_pages, void **kaddr, pfn_t *pfn)
258 {
259 	resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
260 
261 	if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
262 					PFN_PHYS(nr_pages))))
263 		return -EIO;
264 
265 	if (kaddr)
266 		*kaddr = pmem->virt_addr + offset;
267 	if (pfn)
268 		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
269 
270 	/*
271 	 * If badblocks are present, limit known good range to the
272 	 * requested range.
273 	 */
274 	if (unlikely(pmem->bb.count))
275 		return nr_pages;
276 	return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
277 }
278 
279 static const struct block_device_operations pmem_fops = {
280 	.owner =		THIS_MODULE,
281 	.submit_bio =		pmem_submit_bio,
282 	.rw_page =		pmem_rw_page,
283 };
284 
285 static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
286 				    size_t nr_pages)
287 {
288 	struct pmem_device *pmem = dax_get_private(dax_dev);
289 
290 	return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
291 				   PFN_PHYS(pgoff) >> SECTOR_SHIFT,
292 				   PAGE_SIZE));
293 }
294 
295 static long pmem_dax_direct_access(struct dax_device *dax_dev,
296 		pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
297 {
298 	struct pmem_device *pmem = dax_get_private(dax_dev);
299 
300 	return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
301 }
302 
303 /*
304  * Use the 'no check' versions of copy_from_iter_flushcache() and
305  * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
306  * checking, both file offset and device offset, is handled by
307  * dax_iomap_actor()
308  */
309 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
310 		void *addr, size_t bytes, struct iov_iter *i)
311 {
312 	return _copy_from_iter_flushcache(addr, bytes, i);
313 }
314 
315 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
316 		void *addr, size_t bytes, struct iov_iter *i)
317 {
318 	return _copy_mc_to_iter(addr, bytes, i);
319 }
320 
321 static const struct dax_operations pmem_dax_ops = {
322 	.direct_access = pmem_dax_direct_access,
323 	.dax_supported = generic_fsdax_supported,
324 	.copy_from_iter = pmem_copy_from_iter,
325 	.copy_to_iter = pmem_copy_to_iter,
326 	.zero_page_range = pmem_dax_zero_page_range,
327 };
328 
329 static const struct attribute_group *pmem_attribute_groups[] = {
330 	&dax_attribute_group,
331 	NULL,
332 };
333 
334 static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
335 {
336 	struct request_queue *q =
337 		container_of(pgmap->ref, struct request_queue, q_usage_counter);
338 
339 	blk_cleanup_queue(q);
340 }
341 
342 static void pmem_release_queue(void *pgmap)
343 {
344 	pmem_pagemap_cleanup(pgmap);
345 }
346 
347 static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
348 {
349 	struct request_queue *q =
350 		container_of(pgmap->ref, struct request_queue, q_usage_counter);
351 
352 	blk_freeze_queue_start(q);
353 }
354 
355 static void pmem_release_disk(void *__pmem)
356 {
357 	struct pmem_device *pmem = __pmem;
358 
359 	kill_dax(pmem->dax_dev);
360 	put_dax(pmem->dax_dev);
361 	del_gendisk(pmem->disk);
362 	put_disk(pmem->disk);
363 }
364 
365 static const struct dev_pagemap_ops fsdax_pagemap_ops = {
366 	.kill			= pmem_pagemap_kill,
367 	.cleanup		= pmem_pagemap_cleanup,
368 };
369 
370 static int pmem_attach_disk(struct device *dev,
371 		struct nd_namespace_common *ndns)
372 {
373 	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
374 	struct nd_region *nd_region = to_nd_region(dev->parent);
375 	int nid = dev_to_node(dev), fua;
376 	struct resource *res = &nsio->res;
377 	struct range bb_range;
378 	struct nd_pfn *nd_pfn = NULL;
379 	struct dax_device *dax_dev;
380 	struct nd_pfn_sb *pfn_sb;
381 	struct pmem_device *pmem;
382 	struct request_queue *q;
383 	struct device *gendev;
384 	struct gendisk *disk;
385 	void *addr;
386 	int rc;
387 	unsigned long flags = 0UL;
388 
389 	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
390 	if (!pmem)
391 		return -ENOMEM;
392 
393 	rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
394 	if (rc)
395 		return rc;
396 
397 	/* while nsio_rw_bytes is active, parse a pfn info block if present */
398 	if (is_nd_pfn(dev)) {
399 		nd_pfn = to_nd_pfn(dev);
400 		rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
401 		if (rc)
402 			return rc;
403 	}
404 
405 	/* we're attaching a block device, disable raw namespace access */
406 	devm_namespace_disable(dev, ndns);
407 
408 	dev_set_drvdata(dev, pmem);
409 	pmem->phys_addr = res->start;
410 	pmem->size = resource_size(res);
411 	fua = nvdimm_has_flush(nd_region);
412 	if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
413 		dev_warn(dev, "unable to guarantee persistence of writes\n");
414 		fua = 0;
415 	}
416 
417 	if (!devm_request_mem_region(dev, res->start, resource_size(res),
418 				dev_name(&ndns->dev))) {
419 		dev_warn(dev, "could not reserve region %pR\n", res);
420 		return -EBUSY;
421 	}
422 
423 	q = blk_alloc_queue(dev_to_node(dev));
424 	if (!q)
425 		return -ENOMEM;
426 
427 	pmem->pfn_flags = PFN_DEV;
428 	pmem->pgmap.ref = &q->q_usage_counter;
429 	if (is_nd_pfn(dev)) {
430 		pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
431 		pmem->pgmap.ops = &fsdax_pagemap_ops;
432 		addr = devm_memremap_pages(dev, &pmem->pgmap);
433 		pfn_sb = nd_pfn->pfn_sb;
434 		pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
435 		pmem->pfn_pad = resource_size(res) -
436 			range_len(&pmem->pgmap.range);
437 		pmem->pfn_flags |= PFN_MAP;
438 		bb_range = pmem->pgmap.range;
439 		bb_range.start += pmem->data_offset;
440 	} else if (pmem_should_map_pages(dev)) {
441 		pmem->pgmap.range.start = res->start;
442 		pmem->pgmap.range.end = res->end;
443 		pmem->pgmap.nr_range = 1;
444 		pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
445 		pmem->pgmap.ops = &fsdax_pagemap_ops;
446 		addr = devm_memremap_pages(dev, &pmem->pgmap);
447 		pmem->pfn_flags |= PFN_MAP;
448 		bb_range = pmem->pgmap.range;
449 	} else {
450 		if (devm_add_action_or_reset(dev, pmem_release_queue,
451 					&pmem->pgmap))
452 			return -ENOMEM;
453 		addr = devm_memremap(dev, pmem->phys_addr,
454 				pmem->size, ARCH_MEMREMAP_PMEM);
455 		bb_range.start =  res->start;
456 		bb_range.end = res->end;
457 	}
458 
459 	if (IS_ERR(addr))
460 		return PTR_ERR(addr);
461 	pmem->virt_addr = addr;
462 
463 	blk_queue_write_cache(q, true, fua);
464 	blk_queue_physical_block_size(q, PAGE_SIZE);
465 	blk_queue_logical_block_size(q, pmem_sector_size(ndns));
466 	blk_queue_max_hw_sectors(q, UINT_MAX);
467 	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
468 	if (pmem->pfn_flags & PFN_MAP)
469 		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
470 
471 	disk = alloc_disk_node(0, nid);
472 	if (!disk)
473 		return -ENOMEM;
474 	pmem->disk = disk;
475 
476 	disk->fops		= &pmem_fops;
477 	disk->queue		= q;
478 	disk->flags		= GENHD_FL_EXT_DEVT;
479 	disk->private_data	= pmem;
480 	nvdimm_namespace_disk_name(ndns, disk->disk_name);
481 	set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
482 			/ 512);
483 	if (devm_init_badblocks(dev, &pmem->bb))
484 		return -ENOMEM;
485 	nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
486 	disk->bb = &pmem->bb;
487 
488 	if (is_nvdimm_sync(nd_region))
489 		flags = DAXDEV_F_SYNC;
490 	dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
491 	if (IS_ERR(dax_dev)) {
492 		put_disk(disk);
493 		return PTR_ERR(dax_dev);
494 	}
495 	dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
496 	pmem->dax_dev = dax_dev;
497 	gendev = disk_to_dev(disk);
498 	gendev->groups = pmem_attribute_groups;
499 
500 	device_add_disk(dev, disk, NULL);
501 	if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
502 		return -ENOMEM;
503 
504 	nvdimm_check_and_set_ro(disk);
505 
506 	pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
507 					  "badblocks");
508 	if (!pmem->bb_state)
509 		dev_warn(dev, "'badblocks' notification disabled\n");
510 
511 	return 0;
512 }
513 
514 static int nd_pmem_probe(struct device *dev)
515 {
516 	int ret;
517 	struct nd_namespace_common *ndns;
518 
519 	ndns = nvdimm_namespace_common_probe(dev);
520 	if (IS_ERR(ndns))
521 		return PTR_ERR(ndns);
522 
523 	if (is_nd_btt(dev))
524 		return nvdimm_namespace_attach_btt(ndns);
525 
526 	if (is_nd_pfn(dev))
527 		return pmem_attach_disk(dev, ndns);
528 
529 	ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
530 	if (ret)
531 		return ret;
532 
533 	ret = nd_btt_probe(dev, ndns);
534 	if (ret == 0)
535 		return -ENXIO;
536 
537 	/*
538 	 * We have two failure conditions here, there is no
539 	 * info reserver block or we found a valid info reserve block
540 	 * but failed to initialize the pfn superblock.
541 	 *
542 	 * For the first case consider namespace as a raw pmem namespace
543 	 * and attach a disk.
544 	 *
545 	 * For the latter, consider this a success and advance the namespace
546 	 * seed.
547 	 */
548 	ret = nd_pfn_probe(dev, ndns);
549 	if (ret == 0)
550 		return -ENXIO;
551 	else if (ret == -EOPNOTSUPP)
552 		return ret;
553 
554 	ret = nd_dax_probe(dev, ndns);
555 	if (ret == 0)
556 		return -ENXIO;
557 	else if (ret == -EOPNOTSUPP)
558 		return ret;
559 
560 	/* probe complete, attach handles namespace enabling */
561 	devm_namespace_disable(dev, ndns);
562 
563 	return pmem_attach_disk(dev, ndns);
564 }
565 
566 static void nd_pmem_remove(struct device *dev)
567 {
568 	struct pmem_device *pmem = dev_get_drvdata(dev);
569 
570 	if (is_nd_btt(dev))
571 		nvdimm_namespace_detach_btt(to_nd_btt(dev));
572 	else {
573 		/*
574 		 * Note, this assumes nd_device_lock() context to not
575 		 * race nd_pmem_notify()
576 		 */
577 		sysfs_put(pmem->bb_state);
578 		pmem->bb_state = NULL;
579 	}
580 	nvdimm_flush(to_nd_region(dev->parent), NULL);
581 }
582 
583 static void nd_pmem_shutdown(struct device *dev)
584 {
585 	nvdimm_flush(to_nd_region(dev->parent), NULL);
586 }
587 
588 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
589 {
590 	struct nd_region *nd_region;
591 	resource_size_t offset = 0, end_trunc = 0;
592 	struct nd_namespace_common *ndns;
593 	struct nd_namespace_io *nsio;
594 	struct badblocks *bb;
595 	struct range range;
596 	struct kernfs_node *bb_state;
597 
598 	if (event != NVDIMM_REVALIDATE_POISON)
599 		return;
600 
601 	if (is_nd_btt(dev)) {
602 		struct nd_btt *nd_btt = to_nd_btt(dev);
603 
604 		ndns = nd_btt->ndns;
605 		nd_region = to_nd_region(ndns->dev.parent);
606 		nsio = to_nd_namespace_io(&ndns->dev);
607 		bb = &nsio->bb;
608 		bb_state = NULL;
609 	} else {
610 		struct pmem_device *pmem = dev_get_drvdata(dev);
611 
612 		nd_region = to_region(pmem);
613 		bb = &pmem->bb;
614 		bb_state = pmem->bb_state;
615 
616 		if (is_nd_pfn(dev)) {
617 			struct nd_pfn *nd_pfn = to_nd_pfn(dev);
618 			struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
619 
620 			ndns = nd_pfn->ndns;
621 			offset = pmem->data_offset +
622 					__le32_to_cpu(pfn_sb->start_pad);
623 			end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
624 		} else {
625 			ndns = to_ndns(dev);
626 		}
627 
628 		nsio = to_nd_namespace_io(&ndns->dev);
629 	}
630 
631 	range.start = nsio->res.start + offset;
632 	range.end = nsio->res.end - end_trunc;
633 	nvdimm_badblocks_populate(nd_region, bb, &range);
634 	if (bb_state)
635 		sysfs_notify_dirent(bb_state);
636 }
637 
638 MODULE_ALIAS("pmem");
639 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
640 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
641 static struct nd_device_driver nd_pmem_driver = {
642 	.probe = nd_pmem_probe,
643 	.remove = nd_pmem_remove,
644 	.notify = nd_pmem_notify,
645 	.shutdown = nd_pmem_shutdown,
646 	.drv = {
647 		.name = "nd_pmem",
648 	},
649 	.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
650 };
651 
652 module_nd_driver(nd_pmem_driver);
653 
654 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
655 MODULE_LICENSE("GPL v2");
656