xref: /openbmc/linux/drivers/nvdimm/pmem.c (revision 4e95bc26)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Persistent Memory Driver
4  *
5  * Copyright (c) 2014-2015, Intel Corporation.
6  * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7  * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8  */
9 
10 #include <asm/cacheflush.h>
11 #include <linux/blkdev.h>
12 #include <linux/hdreg.h>
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/set_memory.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/badblocks.h>
19 #include <linux/memremap.h>
20 #include <linux/vmalloc.h>
21 #include <linux/blk-mq.h>
22 #include <linux/pfn_t.h>
23 #include <linux/slab.h>
24 #include <linux/uio.h>
25 #include <linux/dax.h>
26 #include <linux/nd.h>
27 #include <linux/backing-dev.h>
28 #include "pmem.h"
29 #include "pfn.h"
30 #include "nd.h"
31 #include "nd-core.h"
32 
33 static struct device *to_dev(struct pmem_device *pmem)
34 {
35 	/*
36 	 * nvdimm bus services need a 'dev' parameter, and we record the device
37 	 * at init in bb.dev.
38 	 */
39 	return pmem->bb.dev;
40 }
41 
42 static struct nd_region *to_region(struct pmem_device *pmem)
43 {
44 	return to_nd_region(to_dev(pmem)->parent);
45 }
46 
47 static void hwpoison_clear(struct pmem_device *pmem,
48 		phys_addr_t phys, unsigned int len)
49 {
50 	unsigned long pfn_start, pfn_end, pfn;
51 
52 	/* only pmem in the linear map supports HWPoison */
53 	if (is_vmalloc_addr(pmem->virt_addr))
54 		return;
55 
56 	pfn_start = PHYS_PFN(phys);
57 	pfn_end = pfn_start + PHYS_PFN(len);
58 	for (pfn = pfn_start; pfn < pfn_end; pfn++) {
59 		struct page *page = pfn_to_page(pfn);
60 
61 		/*
62 		 * Note, no need to hold a get_dev_pagemap() reference
63 		 * here since we're in the driver I/O path and
64 		 * outstanding I/O requests pin the dev_pagemap.
65 		 */
66 		if (test_and_clear_pmem_poison(page))
67 			clear_mce_nospec(pfn);
68 	}
69 }
70 
71 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
72 		phys_addr_t offset, unsigned int len)
73 {
74 	struct device *dev = to_dev(pmem);
75 	sector_t sector;
76 	long cleared;
77 	blk_status_t rc = BLK_STS_OK;
78 
79 	sector = (offset - pmem->data_offset) / 512;
80 
81 	cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
82 	if (cleared < len)
83 		rc = BLK_STS_IOERR;
84 	if (cleared > 0 && cleared / 512) {
85 		hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
86 		cleared /= 512;
87 		dev_dbg(dev, "%#llx clear %ld sector%s\n",
88 				(unsigned long long) sector, cleared,
89 				cleared > 1 ? "s" : "");
90 		badblocks_clear(&pmem->bb, sector, cleared);
91 		if (pmem->bb_state)
92 			sysfs_notify_dirent(pmem->bb_state);
93 	}
94 
95 	arch_invalidate_pmem(pmem->virt_addr + offset, len);
96 
97 	return rc;
98 }
99 
100 static void write_pmem(void *pmem_addr, struct page *page,
101 		unsigned int off, unsigned int len)
102 {
103 	unsigned int chunk;
104 	void *mem;
105 
106 	while (len) {
107 		mem = kmap_atomic(page);
108 		chunk = min_t(unsigned int, len, PAGE_SIZE - off);
109 		memcpy_flushcache(pmem_addr, mem + off, chunk);
110 		kunmap_atomic(mem);
111 		len -= chunk;
112 		off = 0;
113 		page++;
114 		pmem_addr += chunk;
115 	}
116 }
117 
118 static blk_status_t read_pmem(struct page *page, unsigned int off,
119 		void *pmem_addr, unsigned int len)
120 {
121 	unsigned int chunk;
122 	unsigned long rem;
123 	void *mem;
124 
125 	while (len) {
126 		mem = kmap_atomic(page);
127 		chunk = min_t(unsigned int, len, PAGE_SIZE - off);
128 		rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
129 		kunmap_atomic(mem);
130 		if (rem)
131 			return BLK_STS_IOERR;
132 		len -= chunk;
133 		off = 0;
134 		page++;
135 		pmem_addr += chunk;
136 	}
137 	return BLK_STS_OK;
138 }
139 
140 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
141 			unsigned int len, unsigned int off, unsigned int op,
142 			sector_t sector)
143 {
144 	blk_status_t rc = BLK_STS_OK;
145 	bool bad_pmem = false;
146 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
147 	void *pmem_addr = pmem->virt_addr + pmem_off;
148 
149 	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
150 		bad_pmem = true;
151 
152 	if (!op_is_write(op)) {
153 		if (unlikely(bad_pmem))
154 			rc = BLK_STS_IOERR;
155 		else {
156 			rc = read_pmem(page, off, pmem_addr, len);
157 			flush_dcache_page(page);
158 		}
159 	} else {
160 		/*
161 		 * Note that we write the data both before and after
162 		 * clearing poison.  The write before clear poison
163 		 * handles situations where the latest written data is
164 		 * preserved and the clear poison operation simply marks
165 		 * the address range as valid without changing the data.
166 		 * In this case application software can assume that an
167 		 * interrupted write will either return the new good
168 		 * data or an error.
169 		 *
170 		 * However, if pmem_clear_poison() leaves the data in an
171 		 * indeterminate state we need to perform the write
172 		 * after clear poison.
173 		 */
174 		flush_dcache_page(page);
175 		write_pmem(pmem_addr, page, off, len);
176 		if (unlikely(bad_pmem)) {
177 			rc = pmem_clear_poison(pmem, pmem_off, len);
178 			write_pmem(pmem_addr, page, off, len);
179 		}
180 	}
181 
182 	return rc;
183 }
184 
185 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
186 {
187 	blk_status_t rc = 0;
188 	bool do_acct;
189 	unsigned long start;
190 	struct bio_vec bvec;
191 	struct bvec_iter iter;
192 	struct pmem_device *pmem = q->queuedata;
193 	struct nd_region *nd_region = to_region(pmem);
194 
195 	if (bio->bi_opf & REQ_PREFLUSH)
196 		nvdimm_flush(nd_region);
197 
198 	do_acct = nd_iostat_start(bio, &start);
199 	bio_for_each_segment(bvec, bio, iter) {
200 		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
201 				bvec.bv_offset, bio_op(bio), iter.bi_sector);
202 		if (rc) {
203 			bio->bi_status = rc;
204 			break;
205 		}
206 	}
207 	if (do_acct)
208 		nd_iostat_end(bio, start);
209 
210 	if (bio->bi_opf & REQ_FUA)
211 		nvdimm_flush(nd_region);
212 
213 	bio_endio(bio);
214 	return BLK_QC_T_NONE;
215 }
216 
217 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
218 		       struct page *page, unsigned int op)
219 {
220 	struct pmem_device *pmem = bdev->bd_queue->queuedata;
221 	blk_status_t rc;
222 
223 	rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
224 			  0, op, sector);
225 
226 	/*
227 	 * The ->rw_page interface is subtle and tricky.  The core
228 	 * retries on any error, so we can only invoke page_endio() in
229 	 * the successful completion case.  Otherwise, we'll see crashes
230 	 * caused by double completion.
231 	 */
232 	if (rc == 0)
233 		page_endio(page, op_is_write(op), 0);
234 
235 	return blk_status_to_errno(rc);
236 }
237 
238 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
239 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
240 		long nr_pages, void **kaddr, pfn_t *pfn)
241 {
242 	resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
243 
244 	if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
245 					PFN_PHYS(nr_pages))))
246 		return -EIO;
247 
248 	if (kaddr)
249 		*kaddr = pmem->virt_addr + offset;
250 	if (pfn)
251 		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
252 
253 	/*
254 	 * If badblocks are present, limit known good range to the
255 	 * requested range.
256 	 */
257 	if (unlikely(pmem->bb.count))
258 		return nr_pages;
259 	return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
260 }
261 
262 static const struct block_device_operations pmem_fops = {
263 	.owner =		THIS_MODULE,
264 	.rw_page =		pmem_rw_page,
265 	.revalidate_disk =	nvdimm_revalidate_disk,
266 };
267 
268 static long pmem_dax_direct_access(struct dax_device *dax_dev,
269 		pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
270 {
271 	struct pmem_device *pmem = dax_get_private(dax_dev);
272 
273 	return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
274 }
275 
276 /*
277  * Use the 'no check' versions of copy_from_iter_flushcache() and
278  * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
279  * checking, both file offset and device offset, is handled by
280  * dax_iomap_actor()
281  */
282 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
283 		void *addr, size_t bytes, struct iov_iter *i)
284 {
285 	return _copy_from_iter_flushcache(addr, bytes, i);
286 }
287 
288 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
289 		void *addr, size_t bytes, struct iov_iter *i)
290 {
291 	return _copy_to_iter_mcsafe(addr, bytes, i);
292 }
293 
294 static const struct dax_operations pmem_dax_ops = {
295 	.direct_access = pmem_dax_direct_access,
296 	.dax_supported = generic_fsdax_supported,
297 	.copy_from_iter = pmem_copy_from_iter,
298 	.copy_to_iter = pmem_copy_to_iter,
299 };
300 
301 static const struct attribute_group *pmem_attribute_groups[] = {
302 	&dax_attribute_group,
303 	NULL,
304 };
305 
306 static void __pmem_release_queue(struct percpu_ref *ref)
307 {
308 	struct request_queue *q;
309 
310 	q = container_of(ref, typeof(*q), q_usage_counter);
311 	blk_cleanup_queue(q);
312 }
313 
314 static void pmem_release_queue(void *ref)
315 {
316 	__pmem_release_queue(ref);
317 }
318 
319 static void pmem_freeze_queue(struct percpu_ref *ref)
320 {
321 	struct request_queue *q;
322 
323 	q = container_of(ref, typeof(*q), q_usage_counter);
324 	blk_freeze_queue_start(q);
325 }
326 
327 static void pmem_release_disk(void *__pmem)
328 {
329 	struct pmem_device *pmem = __pmem;
330 
331 	kill_dax(pmem->dax_dev);
332 	put_dax(pmem->dax_dev);
333 	del_gendisk(pmem->disk);
334 	put_disk(pmem->disk);
335 }
336 
337 static void pmem_release_pgmap_ops(void *__pgmap)
338 {
339 	dev_pagemap_put_ops();
340 }
341 
342 static void fsdax_pagefree(struct page *page, void *data)
343 {
344 	wake_up_var(&page->_refcount);
345 }
346 
347 static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
348 {
349 	dev_pagemap_get_ops();
350 	if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
351 		return -ENOMEM;
352 	pgmap->type = MEMORY_DEVICE_FS_DAX;
353 	pgmap->page_free = fsdax_pagefree;
354 
355 	return 0;
356 }
357 
358 static int pmem_attach_disk(struct device *dev,
359 		struct nd_namespace_common *ndns)
360 {
361 	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
362 	struct nd_region *nd_region = to_nd_region(dev->parent);
363 	int nid = dev_to_node(dev), fua;
364 	struct resource *res = &nsio->res;
365 	struct resource bb_res;
366 	struct nd_pfn *nd_pfn = NULL;
367 	struct dax_device *dax_dev;
368 	struct nd_pfn_sb *pfn_sb;
369 	struct pmem_device *pmem;
370 	struct request_queue *q;
371 	struct device *gendev;
372 	struct gendisk *disk;
373 	void *addr;
374 	int rc;
375 
376 	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
377 	if (!pmem)
378 		return -ENOMEM;
379 
380 	/* while nsio_rw_bytes is active, parse a pfn info block if present */
381 	if (is_nd_pfn(dev)) {
382 		nd_pfn = to_nd_pfn(dev);
383 		rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
384 		if (rc)
385 			return rc;
386 	}
387 
388 	/* we're attaching a block device, disable raw namespace access */
389 	devm_nsio_disable(dev, nsio);
390 
391 	dev_set_drvdata(dev, pmem);
392 	pmem->phys_addr = res->start;
393 	pmem->size = resource_size(res);
394 	fua = nvdimm_has_flush(nd_region);
395 	if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
396 		dev_warn(dev, "unable to guarantee persistence of writes\n");
397 		fua = 0;
398 	}
399 
400 	if (!devm_request_mem_region(dev, res->start, resource_size(res),
401 				dev_name(&ndns->dev))) {
402 		dev_warn(dev, "could not reserve region %pR\n", res);
403 		return -EBUSY;
404 	}
405 
406 	q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
407 	if (!q)
408 		return -ENOMEM;
409 
410 	pmem->pfn_flags = PFN_DEV;
411 	pmem->pgmap.ref = &q->q_usage_counter;
412 	pmem->pgmap.kill = pmem_freeze_queue;
413 	pmem->pgmap.cleanup = __pmem_release_queue;
414 	if (is_nd_pfn(dev)) {
415 		if (setup_pagemap_fsdax(dev, &pmem->pgmap))
416 			return -ENOMEM;
417 		addr = devm_memremap_pages(dev, &pmem->pgmap);
418 		pfn_sb = nd_pfn->pfn_sb;
419 		pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
420 		pmem->pfn_pad = resource_size(res) -
421 			resource_size(&pmem->pgmap.res);
422 		pmem->pfn_flags |= PFN_MAP;
423 		memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
424 		bb_res.start += pmem->data_offset;
425 	} else if (pmem_should_map_pages(dev)) {
426 		memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
427 		pmem->pgmap.altmap_valid = false;
428 		if (setup_pagemap_fsdax(dev, &pmem->pgmap))
429 			return -ENOMEM;
430 		addr = devm_memremap_pages(dev, &pmem->pgmap);
431 		pmem->pfn_flags |= PFN_MAP;
432 		memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
433 	} else {
434 		if (devm_add_action_or_reset(dev, pmem_release_queue,
435 					&q->q_usage_counter))
436 			return -ENOMEM;
437 		addr = devm_memremap(dev, pmem->phys_addr,
438 				pmem->size, ARCH_MEMREMAP_PMEM);
439 		memcpy(&bb_res, &nsio->res, sizeof(bb_res));
440 	}
441 
442 	if (IS_ERR(addr))
443 		return PTR_ERR(addr);
444 	pmem->virt_addr = addr;
445 
446 	blk_queue_write_cache(q, true, fua);
447 	blk_queue_make_request(q, pmem_make_request);
448 	blk_queue_physical_block_size(q, PAGE_SIZE);
449 	blk_queue_logical_block_size(q, pmem_sector_size(ndns));
450 	blk_queue_max_hw_sectors(q, UINT_MAX);
451 	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
452 	if (pmem->pfn_flags & PFN_MAP)
453 		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
454 	q->queuedata = pmem;
455 
456 	disk = alloc_disk_node(0, nid);
457 	if (!disk)
458 		return -ENOMEM;
459 	pmem->disk = disk;
460 
461 	disk->fops		= &pmem_fops;
462 	disk->queue		= q;
463 	disk->flags		= GENHD_FL_EXT_DEVT;
464 	disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
465 	nvdimm_namespace_disk_name(ndns, disk->disk_name);
466 	set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
467 			/ 512);
468 	if (devm_init_badblocks(dev, &pmem->bb))
469 		return -ENOMEM;
470 	nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
471 	disk->bb = &pmem->bb;
472 
473 	dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
474 	if (!dax_dev) {
475 		put_disk(disk);
476 		return -ENOMEM;
477 	}
478 	dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
479 	pmem->dax_dev = dax_dev;
480 
481 	gendev = disk_to_dev(disk);
482 	gendev->groups = pmem_attribute_groups;
483 
484 	device_add_disk(dev, disk, NULL);
485 	if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
486 		return -ENOMEM;
487 
488 	revalidate_disk(disk);
489 
490 	pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
491 					  "badblocks");
492 	if (!pmem->bb_state)
493 		dev_warn(dev, "'badblocks' notification disabled\n");
494 
495 	return 0;
496 }
497 
498 static int nd_pmem_probe(struct device *dev)
499 {
500 	struct nd_namespace_common *ndns;
501 
502 	ndns = nvdimm_namespace_common_probe(dev);
503 	if (IS_ERR(ndns))
504 		return PTR_ERR(ndns);
505 
506 	if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
507 		return -ENXIO;
508 
509 	if (is_nd_btt(dev))
510 		return nvdimm_namespace_attach_btt(ndns);
511 
512 	if (is_nd_pfn(dev))
513 		return pmem_attach_disk(dev, ndns);
514 
515 	/* if we find a valid info-block we'll come back as that personality */
516 	if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
517 			|| nd_dax_probe(dev, ndns) == 0)
518 		return -ENXIO;
519 
520 	/* ...otherwise we're just a raw pmem device */
521 	return pmem_attach_disk(dev, ndns);
522 }
523 
524 static int nd_pmem_remove(struct device *dev)
525 {
526 	struct pmem_device *pmem = dev_get_drvdata(dev);
527 
528 	if (is_nd_btt(dev))
529 		nvdimm_namespace_detach_btt(to_nd_btt(dev));
530 	else {
531 		/*
532 		 * Note, this assumes device_lock() context to not race
533 		 * nd_pmem_notify()
534 		 */
535 		sysfs_put(pmem->bb_state);
536 		pmem->bb_state = NULL;
537 	}
538 	nvdimm_flush(to_nd_region(dev->parent));
539 
540 	return 0;
541 }
542 
543 static void nd_pmem_shutdown(struct device *dev)
544 {
545 	nvdimm_flush(to_nd_region(dev->parent));
546 }
547 
548 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
549 {
550 	struct nd_region *nd_region;
551 	resource_size_t offset = 0, end_trunc = 0;
552 	struct nd_namespace_common *ndns;
553 	struct nd_namespace_io *nsio;
554 	struct resource res;
555 	struct badblocks *bb;
556 	struct kernfs_node *bb_state;
557 
558 	if (event != NVDIMM_REVALIDATE_POISON)
559 		return;
560 
561 	if (is_nd_btt(dev)) {
562 		struct nd_btt *nd_btt = to_nd_btt(dev);
563 
564 		ndns = nd_btt->ndns;
565 		nd_region = to_nd_region(ndns->dev.parent);
566 		nsio = to_nd_namespace_io(&ndns->dev);
567 		bb = &nsio->bb;
568 		bb_state = NULL;
569 	} else {
570 		struct pmem_device *pmem = dev_get_drvdata(dev);
571 
572 		nd_region = to_region(pmem);
573 		bb = &pmem->bb;
574 		bb_state = pmem->bb_state;
575 
576 		if (is_nd_pfn(dev)) {
577 			struct nd_pfn *nd_pfn = to_nd_pfn(dev);
578 			struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
579 
580 			ndns = nd_pfn->ndns;
581 			offset = pmem->data_offset +
582 					__le32_to_cpu(pfn_sb->start_pad);
583 			end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
584 		} else {
585 			ndns = to_ndns(dev);
586 		}
587 
588 		nsio = to_nd_namespace_io(&ndns->dev);
589 	}
590 
591 	res.start = nsio->res.start + offset;
592 	res.end = nsio->res.end - end_trunc;
593 	nvdimm_badblocks_populate(nd_region, bb, &res);
594 	if (bb_state)
595 		sysfs_notify_dirent(bb_state);
596 }
597 
598 MODULE_ALIAS("pmem");
599 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
600 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
601 static struct nd_device_driver nd_pmem_driver = {
602 	.probe = nd_pmem_probe,
603 	.remove = nd_pmem_remove,
604 	.notify = nd_pmem_notify,
605 	.shutdown = nd_pmem_shutdown,
606 	.drv = {
607 		.name = "nd_pmem",
608 	},
609 	.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
610 };
611 
612 module_nd_driver(nd_pmem_driver);
613 
614 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
615 MODULE_LICENSE("GPL v2");
616