xref: /openbmc/linux/tools/testing/nvdimm/test/ndtest.c (revision c2fe645e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/platform_device.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/genalloc.h>
8 #include <linux/vmalloc.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/list_sort.h>
11 #include <linux/libnvdimm.h>
12 #include <linux/ndctl.h>
13 #include <nd-core.h>
14 #include <linux/printk.h>
15 #include <linux/seq_buf.h>
16 
17 #include "../watermark.h"
18 #include "nfit_test.h"
19 #include "ndtest.h"
20 
21 enum {
22 	DIMM_SIZE = SZ_32M,
23 	LABEL_SIZE = SZ_128K,
24 	NUM_INSTANCES = 2,
25 	NUM_DCR = 4,
26 	NDTEST_MAX_MAPPING = 6,
27 };
28 
29 #define NDTEST_SCM_DIMM_CMD_MASK	   \
30 	((1ul << ND_CMD_GET_CONFIG_SIZE) | \
31 	 (1ul << ND_CMD_GET_CONFIG_DATA) | \
32 	 (1ul << ND_CMD_SET_CONFIG_DATA) | \
33 	 (1ul << ND_CMD_CALL))
34 
35 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm)			\
36 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12)		\
37 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
38 
39 static DEFINE_SPINLOCK(ndtest_lock);
40 static struct ndtest_priv *instances[NUM_INSTANCES];
41 static struct class *ndtest_dimm_class;
42 static struct gen_pool *ndtest_pool;
43 
44 static struct ndtest_dimm dimm_group1[] = {
45 	{
46 		.size = DIMM_SIZE,
47 		.handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
48 		.uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
49 		.physical_id = 0,
50 		.num_formats = 2,
51 	},
52 	{
53 		.size = DIMM_SIZE,
54 		.handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
55 		.uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
56 		.physical_id = 1,
57 		.num_formats = 2,
58 	},
59 	{
60 		.size = DIMM_SIZE,
61 		.handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
62 		.uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
63 		.physical_id = 2,
64 		.num_formats = 2,
65 	},
66 	{
67 		.size = DIMM_SIZE,
68 		.handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
69 		.uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
70 		.physical_id = 3,
71 		.num_formats = 2,
72 	},
73 	{
74 		.size = DIMM_SIZE,
75 		.handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
76 		.uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
77 		.physical_id = 4,
78 		.num_formats = 2,
79 	},
80 };
81 
82 static struct ndtest_dimm dimm_group2[] = {
83 	{
84 		.size = DIMM_SIZE,
85 		.handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
86 		.uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
87 		.physical_id = 0,
88 		.num_formats = 1,
89 		.flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
90 			 PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
91 			 PAPR_PMEM_HEALTH_FATAL,
92 	},
93 };
94 
95 static struct ndtest_mapping region0_mapping[] = {
96 	{
97 		.dimm = 0,
98 		.position = 0,
99 		.start = 0,
100 		.size = SZ_16M,
101 	},
102 	{
103 		.dimm = 1,
104 		.position = 1,
105 		.start = 0,
106 		.size = SZ_16M,
107 	}
108 };
109 
110 static struct ndtest_mapping region1_mapping[] = {
111 	{
112 		.dimm = 0,
113 		.position = 0,
114 		.start = SZ_16M,
115 		.size = SZ_16M,
116 	},
117 	{
118 		.dimm = 1,
119 		.position = 1,
120 		.start = SZ_16M,
121 		.size = SZ_16M,
122 	},
123 	{
124 		.dimm = 2,
125 		.position = 2,
126 		.start = SZ_16M,
127 		.size = SZ_16M,
128 	},
129 	{
130 		.dimm = 3,
131 		.position = 3,
132 		.start = SZ_16M,
133 		.size = SZ_16M,
134 	},
135 };
136 
137 static struct ndtest_mapping region2_mapping[] = {
138 	{
139 		.dimm = 0,
140 		.position = 0,
141 		.start = 0,
142 		.size = DIMM_SIZE,
143 	},
144 };
145 
146 static struct ndtest_mapping region3_mapping[] = {
147 	{
148 		.dimm = 1,
149 		.start = 0,
150 		.size = DIMM_SIZE,
151 	}
152 };
153 
154 static struct ndtest_mapping region4_mapping[] = {
155 	{
156 		.dimm = 2,
157 		.start = 0,
158 		.size = DIMM_SIZE,
159 	}
160 };
161 
162 static struct ndtest_mapping region5_mapping[] = {
163 	{
164 		.dimm = 3,
165 		.start = 0,
166 		.size = DIMM_SIZE,
167 	}
168 };
169 
170 static struct ndtest_region bus0_regions[] = {
171 	{
172 		.type = ND_DEVICE_NAMESPACE_PMEM,
173 		.num_mappings = ARRAY_SIZE(region0_mapping),
174 		.mapping = region0_mapping,
175 		.size = DIMM_SIZE,
176 		.range_index = 1,
177 	},
178 	{
179 		.type = ND_DEVICE_NAMESPACE_PMEM,
180 		.num_mappings = ARRAY_SIZE(region1_mapping),
181 		.mapping = region1_mapping,
182 		.size = DIMM_SIZE * 2,
183 		.range_index = 2,
184 	},
185 	{
186 		.type = ND_DEVICE_NAMESPACE_BLK,
187 		.num_mappings = ARRAY_SIZE(region2_mapping),
188 		.mapping = region2_mapping,
189 		.size = DIMM_SIZE,
190 		.range_index = 3,
191 	},
192 	{
193 		.type = ND_DEVICE_NAMESPACE_BLK,
194 		.num_mappings = ARRAY_SIZE(region3_mapping),
195 		.mapping = region3_mapping,
196 		.size = DIMM_SIZE,
197 		.range_index = 4,
198 	},
199 	{
200 		.type = ND_DEVICE_NAMESPACE_BLK,
201 		.num_mappings = ARRAY_SIZE(region4_mapping),
202 		.mapping = region4_mapping,
203 		.size = DIMM_SIZE,
204 		.range_index = 5,
205 	},
206 	{
207 		.type = ND_DEVICE_NAMESPACE_BLK,
208 		.num_mappings = ARRAY_SIZE(region5_mapping),
209 		.mapping = region5_mapping,
210 		.size = DIMM_SIZE,
211 		.range_index = 6,
212 	},
213 };
214 
215 static struct ndtest_mapping region6_mapping[] = {
216 	{
217 		.dimm = 0,
218 		.position = 0,
219 		.start = 0,
220 		.size = DIMM_SIZE,
221 	},
222 };
223 
224 static struct ndtest_region bus1_regions[] = {
225 	{
226 		.type = ND_DEVICE_NAMESPACE_IO,
227 		.num_mappings = ARRAY_SIZE(region6_mapping),
228 		.mapping = region6_mapping,
229 		.size = DIMM_SIZE,
230 		.range_index = 1,
231 	},
232 };
233 
234 static struct ndtest_config bus_configs[NUM_INSTANCES] = {
235 	/* bus 1 */
236 	{
237 		.dimm_start = 0,
238 		.dimm_count = ARRAY_SIZE(dimm_group1),
239 		.dimms = dimm_group1,
240 		.regions = bus0_regions,
241 		.num_regions = ARRAY_SIZE(bus0_regions),
242 	},
243 	/* bus 2 */
244 	{
245 		.dimm_start = ARRAY_SIZE(dimm_group1),
246 		.dimm_count = ARRAY_SIZE(dimm_group2),
247 		.dimms = dimm_group2,
248 		.regions = bus1_regions,
249 		.num_regions = ARRAY_SIZE(bus1_regions),
250 	},
251 };
252 
253 static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
254 {
255 	struct platform_device *pdev = to_platform_device(dev);
256 
257 	return container_of(pdev, struct ndtest_priv, pdev);
258 }
259 
260 static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
261 			     struct nd_cmd_get_config_data_hdr *hdr)
262 {
263 	unsigned int len;
264 
265 	if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
266 		return -EINVAL;
267 
268 	hdr->status = 0;
269 	len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
270 	memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
271 
272 	return buf_len - len;
273 }
274 
275 static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
276 			     struct nd_cmd_set_config_hdr *hdr)
277 {
278 	unsigned int len;
279 
280 	if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
281 		return -EINVAL;
282 
283 	len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
284 	memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
285 
286 	return buf_len - len;
287 }
288 
289 static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
290 				  struct nd_cmd_get_config_size *size)
291 {
292 	size->status = 0;
293 	size->max_xfer = 8;
294 	size->config_size = dimm->config_size;
295 
296 	return 0;
297 }
298 
299 static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
300 		      struct nvdimm *nvdimm, unsigned int cmd, void *buf,
301 		      unsigned int buf_len, int *cmd_rc)
302 {
303 	struct ndtest_dimm *dimm;
304 	int _cmd_rc;
305 
306 	if (!cmd_rc)
307 		cmd_rc = &_cmd_rc;
308 
309 	*cmd_rc = 0;
310 
311 	if (!nvdimm)
312 		return -EINVAL;
313 
314 	dimm = nvdimm_provider_data(nvdimm);
315 	if (!dimm)
316 		return -EINVAL;
317 
318 	switch (cmd) {
319 	case ND_CMD_GET_CONFIG_SIZE:
320 		*cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
321 		break;
322 	case ND_CMD_GET_CONFIG_DATA:
323 		*cmd_rc = ndtest_config_get(dimm, buf_len, buf);
324 		break;
325 	case ND_CMD_SET_CONFIG_DATA:
326 		*cmd_rc = ndtest_config_set(dimm, buf_len, buf);
327 		break;
328 	default:
329 		return -EINVAL;
330 	}
331 
332 	/* Failures for a DIMM can be injected using fail_cmd and
333 	 * fail_cmd_code, see the device attributes below
334 	 */
335 	if ((1 << cmd) & dimm->fail_cmd)
336 		return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
337 
338 	return 0;
339 }
340 
341 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
342 {
343 	int i;
344 
345 	for (i = 0; i < NUM_INSTANCES; i++) {
346 		struct nfit_test_resource *n, *nfit_res = NULL;
347 		struct ndtest_priv *t = instances[i];
348 
349 		if (!t)
350 			continue;
351 		spin_lock(&ndtest_lock);
352 		list_for_each_entry(n, &t->resources, list) {
353 			if (addr >= n->res.start && (addr < n->res.start
354 						+ resource_size(&n->res))) {
355 				nfit_res = n;
356 				break;
357 			} else if (addr >= (unsigned long) n->buf
358 					&& (addr < (unsigned long) n->buf
359 						+ resource_size(&n->res))) {
360 				nfit_res = n;
361 				break;
362 			}
363 		}
364 		spin_unlock(&ndtest_lock);
365 		if (nfit_res)
366 			return nfit_res;
367 	}
368 
369 	pr_warn("Failed to get resource\n");
370 
371 	return NULL;
372 }
373 
374 static void ndtest_release_resource(void *data)
375 {
376 	struct nfit_test_resource *res  = data;
377 
378 	spin_lock(&ndtest_lock);
379 	list_del(&res->list);
380 	spin_unlock(&ndtest_lock);
381 
382 	if (resource_size(&res->res) >= DIMM_SIZE)
383 		gen_pool_free(ndtest_pool, res->res.start,
384 				resource_size(&res->res));
385 	vfree(res->buf);
386 	kfree(res);
387 }
388 
389 static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
390 				   dma_addr_t *dma)
391 {
392 	dma_addr_t __dma;
393 	void *buf;
394 	struct nfit_test_resource *res;
395 	struct genpool_data_align data = {
396 		.align = SZ_128M,
397 	};
398 
399 	res = kzalloc(sizeof(*res), GFP_KERNEL);
400 	if (!res)
401 		return NULL;
402 
403 	buf = vmalloc(size);
404 	if (size >= DIMM_SIZE)
405 		__dma = gen_pool_alloc_algo(ndtest_pool, size,
406 					    gen_pool_first_fit_align, &data);
407 	else
408 		__dma = (unsigned long) buf;
409 
410 	if (!__dma)
411 		goto buf_err;
412 
413 	INIT_LIST_HEAD(&res->list);
414 	res->dev = &p->pdev.dev;
415 	res->buf = buf;
416 	res->res.start = __dma;
417 	res->res.end = __dma + size - 1;
418 	res->res.name = "NFIT";
419 	spin_lock_init(&res->lock);
420 	INIT_LIST_HEAD(&res->requests);
421 	spin_lock(&ndtest_lock);
422 	list_add(&res->list, &p->resources);
423 	spin_unlock(&ndtest_lock);
424 
425 	if (dma)
426 		*dma = __dma;
427 
428 	if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
429 		return res->buf;
430 
431 buf_err:
432 	if (__dma && size >= DIMM_SIZE)
433 		gen_pool_free(ndtest_pool, __dma, size);
434 	if (buf)
435 		vfree(buf);
436 	kfree(res);
437 
438 	return NULL;
439 }
440 
441 static ssize_t range_index_show(struct device *dev,
442 		struct device_attribute *attr, char *buf)
443 {
444 	struct nd_region *nd_region = to_nd_region(dev);
445 	struct ndtest_region *region = nd_region_provider_data(nd_region);
446 
447 	return sprintf(buf, "%d\n", region->range_index);
448 }
449 static DEVICE_ATTR_RO(range_index);
450 
451 static struct attribute *ndtest_region_attributes[] = {
452 	&dev_attr_range_index.attr,
453 	NULL,
454 };
455 
456 static const struct attribute_group ndtest_region_attribute_group = {
457 	.name = "papr",
458 	.attrs = ndtest_region_attributes,
459 };
460 
461 static const struct attribute_group *ndtest_region_attribute_groups[] = {
462 	&ndtest_region_attribute_group,
463 	NULL,
464 };
465 
466 static int ndtest_create_region(struct ndtest_priv *p,
467 				struct ndtest_region *region)
468 {
469 	struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
470 	struct nd_region_desc *ndr_desc, _ndr_desc;
471 	struct nd_interleave_set *nd_set;
472 	struct resource res;
473 	int i, ndimm = region->mapping[0].dimm;
474 	u64 uuid[2];
475 
476 	memset(&res, 0, sizeof(res));
477 	memset(&mappings, 0, sizeof(mappings));
478 	memset(&_ndr_desc, 0, sizeof(_ndr_desc));
479 	ndr_desc = &_ndr_desc;
480 
481 	if (!ndtest_alloc_resource(p, region->size, &res.start))
482 		return -ENOMEM;
483 
484 	res.end = res.start + region->size - 1;
485 	ndr_desc->mapping = mappings;
486 	ndr_desc->res = &res;
487 	ndr_desc->provider_data = region;
488 	ndr_desc->attr_groups = ndtest_region_attribute_groups;
489 
490 	if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
491 		pr_err("failed to parse UUID\n");
492 		return -ENXIO;
493 	}
494 
495 	nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
496 	if (!nd_set)
497 		return -ENOMEM;
498 
499 	nd_set->cookie1 = cpu_to_le64(uuid[0]);
500 	nd_set->cookie2 = cpu_to_le64(uuid[1]);
501 	nd_set->altcookie = nd_set->cookie1;
502 	ndr_desc->nd_set = nd_set;
503 
504 	if (region->type == ND_DEVICE_NAMESPACE_BLK) {
505 		mappings[0].start = 0;
506 		mappings[0].size = DIMM_SIZE;
507 		mappings[0].nvdimm = p->config->dimms[ndimm].nvdimm;
508 
509 		ndr_desc->mapping = &mappings[0];
510 		ndr_desc->num_mappings = 1;
511 		ndr_desc->num_lanes = 1;
512 		ndbr_desc.enable = ndtest_blk_region_enable;
513 		ndbr_desc.do_io = ndtest_blk_do_io;
514 		region->region = nvdimm_blk_region_create(p->bus, ndr_desc);
515 
516 		goto done;
517 	}
518 
519 	for (i = 0; i < region->num_mappings; i++) {
520 		ndimm = region->mapping[i].dimm;
521 		mappings[i].start = region->mapping[i].start;
522 		mappings[i].size = region->mapping[i].size;
523 		mappings[i].position = region->mapping[i].position;
524 		mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
525 	}
526 
527 	ndr_desc->num_mappings = region->num_mappings;
528 	region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
529 
530 done:
531 	if (!region->region) {
532 		dev_err(&p->pdev.dev, "Error registering region %pR\n",
533 			ndr_desc->res);
534 		return -ENXIO;
535 	}
536 
537 	return 0;
538 }
539 
540 static int ndtest_init_regions(struct ndtest_priv *p)
541 {
542 	int i, ret = 0;
543 
544 	for (i = 0; i < p->config->num_regions; i++) {
545 		ret = ndtest_create_region(p, &p->config->regions[i]);
546 		if (ret)
547 			return ret;
548 	}
549 
550 	return 0;
551 }
552 
553 static void put_dimms(void *data)
554 {
555 	struct ndtest_priv *p = data;
556 	int i;
557 
558 	for (i = 0; i < p->config->dimm_count; i++)
559 		if (p->config->dimms[i].dev) {
560 			device_unregister(p->config->dimms[i].dev);
561 			p->config->dimms[i].dev = NULL;
562 		}
563 }
564 
565 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
566 		char *buf)
567 {
568 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
569 
570 	return sprintf(buf, "%#x\n", dimm->handle);
571 }
572 static DEVICE_ATTR_RO(handle);
573 
574 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
575 		char *buf)
576 {
577 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
578 
579 	return sprintf(buf, "%#x\n", dimm->fail_cmd);
580 }
581 
582 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
583 		const char *buf, size_t size)
584 {
585 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
586 	unsigned long val;
587 	ssize_t rc;
588 
589 	rc = kstrtol(buf, 0, &val);
590 	if (rc)
591 		return rc;
592 
593 	dimm->fail_cmd = val;
594 
595 	return size;
596 }
597 static DEVICE_ATTR_RW(fail_cmd);
598 
599 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
600 		char *buf)
601 {
602 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
603 
604 	return sprintf(buf, "%d\n", dimm->fail_cmd_code);
605 }
606 
607 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
608 		const char *buf, size_t size)
609 {
610 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
611 	unsigned long val;
612 	ssize_t rc;
613 
614 	rc = kstrtol(buf, 0, &val);
615 	if (rc)
616 		return rc;
617 
618 	dimm->fail_cmd_code = val;
619 	return size;
620 }
621 static DEVICE_ATTR_RW(fail_cmd_code);
622 
623 static struct attribute *dimm_attributes[] = {
624 	&dev_attr_handle.attr,
625 	&dev_attr_fail_cmd.attr,
626 	&dev_attr_fail_cmd_code.attr,
627 	NULL,
628 };
629 
630 static struct attribute_group dimm_attribute_group = {
631 	.attrs = dimm_attributes,
632 };
633 
634 static const struct attribute_group *dimm_attribute_groups[] = {
635 	&dimm_attribute_group,
636 	NULL,
637 };
638 
639 static ssize_t phys_id_show(struct device *dev,
640 		struct device_attribute *attr, char *buf)
641 {
642 	struct nvdimm *nvdimm = to_nvdimm(dev);
643 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
644 
645 	return sprintf(buf, "%#x\n", dimm->physical_id);
646 }
647 static DEVICE_ATTR_RO(phys_id);
648 
649 static ssize_t vendor_show(struct device *dev,
650 			   struct device_attribute *attr, char *buf)
651 {
652 	return sprintf(buf, "0x1234567\n");
653 }
654 static DEVICE_ATTR_RO(vendor);
655 
656 static ssize_t id_show(struct device *dev,
657 		       struct device_attribute *attr, char *buf)
658 {
659 	struct nvdimm *nvdimm = to_nvdimm(dev);
660 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
661 
662 	return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
663 		       0xa, 2016, ~(dimm->handle));
664 }
665 static DEVICE_ATTR_RO(id);
666 
667 static ssize_t nvdimm_handle_show(struct device *dev,
668 				  struct device_attribute *attr, char *buf)
669 {
670 	struct nvdimm *nvdimm = to_nvdimm(dev);
671 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
672 
673 	return sprintf(buf, "%#x\n", dimm->handle);
674 }
675 
676 static struct device_attribute dev_attr_nvdimm_show_handle =  {
677 	.attr	= { .name = "handle", .mode = 0444 },
678 	.show	= nvdimm_handle_show,
679 };
680 
681 static ssize_t subsystem_vendor_show(struct device *dev,
682 		struct device_attribute *attr, char *buf)
683 {
684 	return sprintf(buf, "0x%04x\n", 0);
685 }
686 static DEVICE_ATTR_RO(subsystem_vendor);
687 
688 static ssize_t dirty_shutdown_show(struct device *dev,
689 		struct device_attribute *attr, char *buf)
690 {
691 	return sprintf(buf, "%d\n", 42);
692 }
693 static DEVICE_ATTR_RO(dirty_shutdown);
694 
695 static ssize_t formats_show(struct device *dev,
696 		struct device_attribute *attr, char *buf)
697 {
698 	struct nvdimm *nvdimm = to_nvdimm(dev);
699 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
700 
701 	return sprintf(buf, "%d\n", dimm->num_formats);
702 }
703 static DEVICE_ATTR_RO(formats);
704 
705 static ssize_t format_show(struct device *dev,
706 		struct device_attribute *attr, char *buf)
707 {
708 	struct nvdimm *nvdimm = to_nvdimm(dev);
709 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
710 
711 	if (dimm->num_formats > 1)
712 		return sprintf(buf, "0x201\n");
713 
714 	return sprintf(buf, "0x101\n");
715 }
716 static DEVICE_ATTR_RO(format);
717 
718 static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
719 			    char *buf)
720 {
721 	return sprintf(buf, "0x301\n");
722 }
723 static DEVICE_ATTR_RO(format1);
724 
725 static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
726 					struct attribute *a, int n)
727 {
728 	struct device *dev = container_of(kobj, struct device, kobj);
729 	struct nvdimm *nvdimm = to_nvdimm(dev);
730 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
731 
732 	if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
733 		return 0;
734 
735 	return a->mode;
736 }
737 
738 static ssize_t flags_show(struct device *dev,
739 			  struct device_attribute *attr, char *buf)
740 {
741 	struct nvdimm *nvdimm = to_nvdimm(dev);
742 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
743 	struct seq_buf s;
744 	u64 flags;
745 
746 	flags = dimm->flags;
747 
748 	seq_buf_init(&s, buf, PAGE_SIZE);
749 	if (flags & PAPR_PMEM_UNARMED_MASK)
750 		seq_buf_printf(&s, "not_armed ");
751 
752 	if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
753 		seq_buf_printf(&s, "flush_fail ");
754 
755 	if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
756 		seq_buf_printf(&s, "restore_fail ");
757 
758 	if (flags & PAPR_PMEM_SAVE_MASK)
759 		seq_buf_printf(&s, "save_fail ");
760 
761 	if (flags & PAPR_PMEM_SMART_EVENT_MASK)
762 		seq_buf_printf(&s, "smart_notify ");
763 
764 
765 	if (seq_buf_used(&s))
766 		seq_buf_printf(&s, "\n");
767 
768 	return seq_buf_used(&s);
769 }
770 static DEVICE_ATTR_RO(flags);
771 
772 static struct attribute *ndtest_nvdimm_attributes[] = {
773 	&dev_attr_nvdimm_show_handle.attr,
774 	&dev_attr_vendor.attr,
775 	&dev_attr_id.attr,
776 	&dev_attr_phys_id.attr,
777 	&dev_attr_subsystem_vendor.attr,
778 	&dev_attr_dirty_shutdown.attr,
779 	&dev_attr_formats.attr,
780 	&dev_attr_format.attr,
781 	&dev_attr_format1.attr,
782 	&dev_attr_flags.attr,
783 	NULL,
784 };
785 
786 static const struct attribute_group ndtest_nvdimm_attribute_group = {
787 	.name = "papr",
788 	.attrs = ndtest_nvdimm_attributes,
789 	.is_visible = ndtest_nvdimm_attr_visible,
790 };
791 
792 static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
793 	&ndtest_nvdimm_attribute_group,
794 	NULL,
795 };
796 
797 static int ndtest_dimm_register(struct ndtest_priv *priv,
798 				struct ndtest_dimm *dimm, int id)
799 {
800 	struct device *dev = &priv->pdev.dev;
801 	unsigned long dimm_flags = dimm->flags;
802 
803 	if (dimm->num_formats > 1)
804 		set_bit(NDD_LABELING, &dimm_flags);
805 
806 	if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
807 		set_bit(NDD_UNARMED, &dimm_flags);
808 
809 	dimm->nvdimm = nvdimm_create(priv->bus, dimm,
810 				    ndtest_nvdimm_attribute_groups, dimm_flags,
811 				    NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
812 	if (!dimm->nvdimm) {
813 		dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
814 		return -ENXIO;
815 	}
816 
817 	dimm->dev = device_create_with_groups(ndtest_dimm_class,
818 					     &priv->pdev.dev,
819 					     0, dimm, dimm_attribute_groups,
820 					     "test_dimm%d", id);
821 	if (!dimm->dev) {
822 		pr_err("Could not create dimm device attributes\n");
823 		return -ENOMEM;
824 	}
825 
826 	return 0;
827 }
828 
829 static int ndtest_nvdimm_init(struct ndtest_priv *p)
830 {
831 	struct ndtest_dimm *d;
832 	void *res;
833 	int i, id;
834 
835 	for (i = 0; i < p->config->dimm_count; i++) {
836 		d = &p->config->dimms[i];
837 		d->id = id = p->config->dimm_start + i;
838 		res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
839 		if (!res)
840 			return -ENOMEM;
841 
842 		d->label_area = res;
843 		sprintf(d->label_area, "label%d", id);
844 		d->config_size = LABEL_SIZE;
845 
846 		if (!ndtest_alloc_resource(p, d->size,
847 					   &p->dimm_dma[id]))
848 			return -ENOMEM;
849 
850 		if (!ndtest_alloc_resource(p, LABEL_SIZE,
851 					   &p->label_dma[id]))
852 			return -ENOMEM;
853 
854 		if (!ndtest_alloc_resource(p, LABEL_SIZE,
855 					   &p->dcr_dma[id]))
856 			return -ENOMEM;
857 
858 		d->address = p->dimm_dma[id];
859 
860 		ndtest_dimm_register(p, d, id);
861 	}
862 
863 	return 0;
864 }
865 
866 static ssize_t compatible_show(struct device *dev,
867 			       struct device_attribute *attr, char *buf)
868 {
869 	return sprintf(buf, "nvdimm_test");
870 }
871 static DEVICE_ATTR_RO(compatible);
872 
873 static struct attribute *of_node_attributes[] = {
874 	&dev_attr_compatible.attr,
875 	NULL
876 };
877 
878 static const struct attribute_group of_node_attribute_group = {
879 	.name = "of_node",
880 	.attrs = of_node_attributes,
881 };
882 
883 static const struct attribute_group *ndtest_attribute_groups[] = {
884 	&of_node_attribute_group,
885 	NULL,
886 };
887 
888 static int ndtest_bus_register(struct ndtest_priv *p)
889 {
890 	p->config = &bus_configs[p->pdev.id];
891 
892 	p->bus_desc.ndctl = ndtest_ctl;
893 	p->bus_desc.module = THIS_MODULE;
894 	p->bus_desc.provider_name = NULL;
895 	p->bus_desc.attr_groups = ndtest_attribute_groups;
896 
897 	p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
898 	if (!p->bus) {
899 		dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
900 		return -ENOMEM;
901 	}
902 
903 	return 0;
904 }
905 
906 static int ndtest_remove(struct platform_device *pdev)
907 {
908 	struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
909 
910 	nvdimm_bus_unregister(p->bus);
911 	return 0;
912 }
913 
914 static int ndtest_probe(struct platform_device *pdev)
915 {
916 	struct ndtest_priv *p;
917 	int rc;
918 
919 	p = to_ndtest_priv(&pdev->dev);
920 	if (ndtest_bus_register(p))
921 		return -ENOMEM;
922 
923 	p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
924 				 sizeof(dma_addr_t), GFP_KERNEL);
925 	p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
926 				   sizeof(dma_addr_t), GFP_KERNEL);
927 	p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
928 				  sizeof(dma_addr_t), GFP_KERNEL);
929 
930 	rc = ndtest_nvdimm_init(p);
931 	if (rc)
932 		goto err;
933 
934 	rc = ndtest_init_regions(p);
935 	if (rc)
936 		goto err;
937 
938 	rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
939 	if (rc)
940 		goto err;
941 
942 	platform_set_drvdata(pdev, p);
943 
944 	return 0;
945 
946 err:
947 	pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
948 	return rc;
949 }
950 
951 static const struct platform_device_id ndtest_id[] = {
952 	{ KBUILD_MODNAME },
953 	{ },
954 };
955 
956 static struct platform_driver ndtest_driver = {
957 	.probe = ndtest_probe,
958 	.remove = ndtest_remove,
959 	.driver = {
960 		.name = KBUILD_MODNAME,
961 	},
962 	.id_table = ndtest_id,
963 };
964 
965 static void ndtest_release(struct device *dev)
966 {
967 	struct ndtest_priv *p = to_ndtest_priv(dev);
968 
969 	kfree(p);
970 }
971 
972 static void cleanup_devices(void)
973 {
974 	int i;
975 
976 	for (i = 0; i < NUM_INSTANCES; i++)
977 		if (instances[i])
978 			platform_device_unregister(&instances[i]->pdev);
979 
980 	nfit_test_teardown();
981 
982 	if (ndtest_pool)
983 		gen_pool_destroy(ndtest_pool);
984 
985 
986 	if (ndtest_dimm_class)
987 		class_destroy(ndtest_dimm_class);
988 }
989 
990 static __init int ndtest_init(void)
991 {
992 	int rc, i;
993 
994 	pmem_test();
995 	libnvdimm_test();
996 	device_dax_test();
997 	dax_pmem_test();
998 
999 	nfit_test_setup(ndtest_resource_lookup, NULL);
1000 
1001 	ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
1002 	if (IS_ERR(ndtest_dimm_class)) {
1003 		rc = PTR_ERR(ndtest_dimm_class);
1004 		goto err_register;
1005 	}
1006 
1007 	ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
1008 	if (!ndtest_pool) {
1009 		rc = -ENOMEM;
1010 		goto err_register;
1011 	}
1012 
1013 	if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
1014 		rc = -ENOMEM;
1015 		goto err_register;
1016 	}
1017 
1018 	/* Each instance can be taken as a bus, which can have multiple dimms */
1019 	for (i = 0; i < NUM_INSTANCES; i++) {
1020 		struct ndtest_priv *priv;
1021 		struct platform_device *pdev;
1022 
1023 		priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1024 		if (!priv) {
1025 			rc = -ENOMEM;
1026 			goto err_register;
1027 		}
1028 
1029 		INIT_LIST_HEAD(&priv->resources);
1030 		pdev = &priv->pdev;
1031 		pdev->name = KBUILD_MODNAME;
1032 		pdev->id = i;
1033 		pdev->dev.release = ndtest_release;
1034 		rc = platform_device_register(pdev);
1035 		if (rc) {
1036 			put_device(&pdev->dev);
1037 			goto err_register;
1038 		}
1039 		get_device(&pdev->dev);
1040 
1041 		instances[i] = priv;
1042 	}
1043 
1044 	rc = platform_driver_register(&ndtest_driver);
1045 	if (rc)
1046 		goto err_register;
1047 
1048 	return 0;
1049 
1050 err_register:
1051 	pr_err("Error registering platform device\n");
1052 	cleanup_devices();
1053 
1054 	return rc;
1055 }
1056 
1057 static __exit void ndtest_exit(void)
1058 {
1059 	cleanup_devices();
1060 	platform_driver_unregister(&ndtest_driver);
1061 }
1062 
1063 module_init(ndtest_init);
1064 module_exit(ndtest_exit);
1065 MODULE_LICENSE("GPL");
1066 MODULE_AUTHOR("IBM Corporation");
1067