xref: /openbmc/linux/tools/testing/nvdimm/test/ndtest.c (revision 1aaba11d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/platform_device.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/genalloc.h>
8 #include <linux/vmalloc.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/list_sort.h>
11 #include <linux/libnvdimm.h>
12 #include <linux/ndctl.h>
13 #include <nd-core.h>
14 #include <linux/printk.h>
15 #include <linux/seq_buf.h>
16 
17 #include "../watermark.h"
18 #include "nfit_test.h"
19 #include "ndtest.h"
20 
21 enum {
22 	DIMM_SIZE = SZ_32M,
23 	LABEL_SIZE = SZ_128K,
24 	NUM_INSTANCES = 2,
25 	NUM_DCR = 4,
26 	NDTEST_MAX_MAPPING = 6,
27 };
28 
29 #define NDTEST_SCM_DIMM_CMD_MASK	   \
30 	((1ul << ND_CMD_GET_CONFIG_SIZE) | \
31 	 (1ul << ND_CMD_GET_CONFIG_DATA) | \
32 	 (1ul << ND_CMD_SET_CONFIG_DATA) | \
33 	 (1ul << ND_CMD_CALL))
34 
35 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm)			\
36 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12)		\
37 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
38 
39 static DEFINE_SPINLOCK(ndtest_lock);
40 static struct ndtest_priv *instances[NUM_INSTANCES];
41 static struct class *ndtest_dimm_class;
42 static struct gen_pool *ndtest_pool;
43 
44 static struct ndtest_dimm dimm_group1[] = {
45 	{
46 		.size = DIMM_SIZE,
47 		.handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
48 		.uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
49 		.physical_id = 0,
50 		.num_formats = 2,
51 	},
52 	{
53 		.size = DIMM_SIZE,
54 		.handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
55 		.uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
56 		.physical_id = 1,
57 		.num_formats = 2,
58 	},
59 	{
60 		.size = DIMM_SIZE,
61 		.handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
62 		.uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
63 		.physical_id = 2,
64 		.num_formats = 2,
65 	},
66 	{
67 		.size = DIMM_SIZE,
68 		.handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
69 		.uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
70 		.physical_id = 3,
71 		.num_formats = 2,
72 	},
73 	{
74 		.size = DIMM_SIZE,
75 		.handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
76 		.uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
77 		.physical_id = 4,
78 		.num_formats = 2,
79 	},
80 };
81 
82 static struct ndtest_dimm dimm_group2[] = {
83 	{
84 		.size = DIMM_SIZE,
85 		.handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
86 		.uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
87 		.physical_id = 0,
88 		.num_formats = 1,
89 		.flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
90 			 PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
91 			 PAPR_PMEM_HEALTH_FATAL,
92 	},
93 };
94 
95 static struct ndtest_mapping region0_mapping[] = {
96 	{
97 		.dimm = 0,
98 		.position = 0,
99 		.start = 0,
100 		.size = SZ_16M,
101 	},
102 	{
103 		.dimm = 1,
104 		.position = 1,
105 		.start = 0,
106 		.size = SZ_16M,
107 	}
108 };
109 
110 static struct ndtest_mapping region1_mapping[] = {
111 	{
112 		.dimm = 0,
113 		.position = 0,
114 		.start = SZ_16M,
115 		.size = SZ_16M,
116 	},
117 	{
118 		.dimm = 1,
119 		.position = 1,
120 		.start = SZ_16M,
121 		.size = SZ_16M,
122 	},
123 	{
124 		.dimm = 2,
125 		.position = 2,
126 		.start = SZ_16M,
127 		.size = SZ_16M,
128 	},
129 	{
130 		.dimm = 3,
131 		.position = 3,
132 		.start = SZ_16M,
133 		.size = SZ_16M,
134 	},
135 };
136 
137 static struct ndtest_region bus0_regions[] = {
138 	{
139 		.type = ND_DEVICE_NAMESPACE_PMEM,
140 		.num_mappings = ARRAY_SIZE(region0_mapping),
141 		.mapping = region0_mapping,
142 		.size = DIMM_SIZE,
143 		.range_index = 1,
144 	},
145 	{
146 		.type = ND_DEVICE_NAMESPACE_PMEM,
147 		.num_mappings = ARRAY_SIZE(region1_mapping),
148 		.mapping = region1_mapping,
149 		.size = DIMM_SIZE * 2,
150 		.range_index = 2,
151 	},
152 };
153 
154 static struct ndtest_mapping region6_mapping[] = {
155 	{
156 		.dimm = 0,
157 		.position = 0,
158 		.start = 0,
159 		.size = DIMM_SIZE,
160 	},
161 };
162 
163 static struct ndtest_region bus1_regions[] = {
164 	{
165 		.type = ND_DEVICE_NAMESPACE_IO,
166 		.num_mappings = ARRAY_SIZE(region6_mapping),
167 		.mapping = region6_mapping,
168 		.size = DIMM_SIZE,
169 		.range_index = 1,
170 	},
171 };
172 
173 static struct ndtest_config bus_configs[NUM_INSTANCES] = {
174 	/* bus 1 */
175 	{
176 		.dimm_start = 0,
177 		.dimm_count = ARRAY_SIZE(dimm_group1),
178 		.dimms = dimm_group1,
179 		.regions = bus0_regions,
180 		.num_regions = ARRAY_SIZE(bus0_regions),
181 	},
182 	/* bus 2 */
183 	{
184 		.dimm_start = ARRAY_SIZE(dimm_group1),
185 		.dimm_count = ARRAY_SIZE(dimm_group2),
186 		.dimms = dimm_group2,
187 		.regions = bus1_regions,
188 		.num_regions = ARRAY_SIZE(bus1_regions),
189 	},
190 };
191 
to_ndtest_priv(struct device * dev)192 static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
193 {
194 	struct platform_device *pdev = to_platform_device(dev);
195 
196 	return container_of(pdev, struct ndtest_priv, pdev);
197 }
198 
ndtest_config_get(struct ndtest_dimm * p,unsigned int buf_len,struct nd_cmd_get_config_data_hdr * hdr)199 static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
200 			     struct nd_cmd_get_config_data_hdr *hdr)
201 {
202 	unsigned int len;
203 
204 	if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
205 		return -EINVAL;
206 
207 	hdr->status = 0;
208 	len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
209 	memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
210 
211 	return buf_len - len;
212 }
213 
ndtest_config_set(struct ndtest_dimm * p,unsigned int buf_len,struct nd_cmd_set_config_hdr * hdr)214 static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
215 			     struct nd_cmd_set_config_hdr *hdr)
216 {
217 	unsigned int len;
218 
219 	if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
220 		return -EINVAL;
221 
222 	len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
223 	memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
224 
225 	return buf_len - len;
226 }
227 
ndtest_get_config_size(struct ndtest_dimm * dimm,unsigned int buf_len,struct nd_cmd_get_config_size * size)228 static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
229 				  struct nd_cmd_get_config_size *size)
230 {
231 	size->status = 0;
232 	size->max_xfer = 8;
233 	size->config_size = dimm->config_size;
234 
235 	return 0;
236 }
237 
ndtest_ctl(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,unsigned int cmd,void * buf,unsigned int buf_len,int * cmd_rc)238 static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
239 		      struct nvdimm *nvdimm, unsigned int cmd, void *buf,
240 		      unsigned int buf_len, int *cmd_rc)
241 {
242 	struct ndtest_dimm *dimm;
243 	int _cmd_rc;
244 
245 	if (!cmd_rc)
246 		cmd_rc = &_cmd_rc;
247 
248 	*cmd_rc = 0;
249 
250 	if (!nvdimm)
251 		return -EINVAL;
252 
253 	dimm = nvdimm_provider_data(nvdimm);
254 	if (!dimm)
255 		return -EINVAL;
256 
257 	switch (cmd) {
258 	case ND_CMD_GET_CONFIG_SIZE:
259 		*cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
260 		break;
261 	case ND_CMD_GET_CONFIG_DATA:
262 		*cmd_rc = ndtest_config_get(dimm, buf_len, buf);
263 		break;
264 	case ND_CMD_SET_CONFIG_DATA:
265 		*cmd_rc = ndtest_config_set(dimm, buf_len, buf);
266 		break;
267 	default:
268 		return -EINVAL;
269 	}
270 
271 	/* Failures for a DIMM can be injected using fail_cmd and
272 	 * fail_cmd_code, see the device attributes below
273 	 */
274 	if ((1 << cmd) & dimm->fail_cmd)
275 		return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
276 
277 	return 0;
278 }
279 
ndtest_resource_lookup(resource_size_t addr)280 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
281 {
282 	int i;
283 
284 	for (i = 0; i < NUM_INSTANCES; i++) {
285 		struct nfit_test_resource *n, *nfit_res = NULL;
286 		struct ndtest_priv *t = instances[i];
287 
288 		if (!t)
289 			continue;
290 		spin_lock(&ndtest_lock);
291 		list_for_each_entry(n, &t->resources, list) {
292 			if (addr >= n->res.start && (addr < n->res.start
293 						+ resource_size(&n->res))) {
294 				nfit_res = n;
295 				break;
296 			} else if (addr >= (unsigned long) n->buf
297 					&& (addr < (unsigned long) n->buf
298 						+ resource_size(&n->res))) {
299 				nfit_res = n;
300 				break;
301 			}
302 		}
303 		spin_unlock(&ndtest_lock);
304 		if (nfit_res)
305 			return nfit_res;
306 	}
307 
308 	pr_warn("Failed to get resource\n");
309 
310 	return NULL;
311 }
312 
ndtest_release_resource(void * data)313 static void ndtest_release_resource(void *data)
314 {
315 	struct nfit_test_resource *res  = data;
316 
317 	spin_lock(&ndtest_lock);
318 	list_del(&res->list);
319 	spin_unlock(&ndtest_lock);
320 
321 	if (resource_size(&res->res) >= DIMM_SIZE)
322 		gen_pool_free(ndtest_pool, res->res.start,
323 				resource_size(&res->res));
324 	vfree(res->buf);
325 	kfree(res);
326 }
327 
ndtest_alloc_resource(struct ndtest_priv * p,size_t size,dma_addr_t * dma)328 static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
329 				   dma_addr_t *dma)
330 {
331 	dma_addr_t __dma;
332 	void *buf;
333 	struct nfit_test_resource *res;
334 	struct genpool_data_align data = {
335 		.align = SZ_128M,
336 	};
337 
338 	res = kzalloc(sizeof(*res), GFP_KERNEL);
339 	if (!res)
340 		return NULL;
341 
342 	buf = vmalloc(size);
343 	if (size >= DIMM_SIZE)
344 		__dma = gen_pool_alloc_algo(ndtest_pool, size,
345 					    gen_pool_first_fit_align, &data);
346 	else
347 		__dma = (unsigned long) buf;
348 
349 	if (!__dma)
350 		goto buf_err;
351 
352 	INIT_LIST_HEAD(&res->list);
353 	res->dev = &p->pdev.dev;
354 	res->buf = buf;
355 	res->res.start = __dma;
356 	res->res.end = __dma + size - 1;
357 	res->res.name = "NFIT";
358 	spin_lock_init(&res->lock);
359 	INIT_LIST_HEAD(&res->requests);
360 	spin_lock(&ndtest_lock);
361 	list_add(&res->list, &p->resources);
362 	spin_unlock(&ndtest_lock);
363 
364 	if (dma)
365 		*dma = __dma;
366 
367 	if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
368 		return res->buf;
369 
370 buf_err:
371 	if (__dma && size >= DIMM_SIZE)
372 		gen_pool_free(ndtest_pool, __dma, size);
373 	if (buf)
374 		vfree(buf);
375 	kfree(res);
376 
377 	return NULL;
378 }
379 
range_index_show(struct device * dev,struct device_attribute * attr,char * buf)380 static ssize_t range_index_show(struct device *dev,
381 		struct device_attribute *attr, char *buf)
382 {
383 	struct nd_region *nd_region = to_nd_region(dev);
384 	struct ndtest_region *region = nd_region_provider_data(nd_region);
385 
386 	return sprintf(buf, "%d\n", region->range_index);
387 }
388 static DEVICE_ATTR_RO(range_index);
389 
390 static struct attribute *ndtest_region_attributes[] = {
391 	&dev_attr_range_index.attr,
392 	NULL,
393 };
394 
395 static const struct attribute_group ndtest_region_attribute_group = {
396 	.name = "papr",
397 	.attrs = ndtest_region_attributes,
398 };
399 
400 static const struct attribute_group *ndtest_region_attribute_groups[] = {
401 	&ndtest_region_attribute_group,
402 	NULL,
403 };
404 
ndtest_create_region(struct ndtest_priv * p,struct ndtest_region * region)405 static int ndtest_create_region(struct ndtest_priv *p,
406 				struct ndtest_region *region)
407 {
408 	struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
409 	struct nd_region_desc *ndr_desc, _ndr_desc;
410 	struct nd_interleave_set *nd_set;
411 	struct resource res;
412 	int i, ndimm = region->mapping[0].dimm;
413 	u64 uuid[2];
414 
415 	memset(&res, 0, sizeof(res));
416 	memset(&mappings, 0, sizeof(mappings));
417 	memset(&_ndr_desc, 0, sizeof(_ndr_desc));
418 	ndr_desc = &_ndr_desc;
419 
420 	if (!ndtest_alloc_resource(p, region->size, &res.start))
421 		return -ENOMEM;
422 
423 	res.end = res.start + region->size - 1;
424 	ndr_desc->mapping = mappings;
425 	ndr_desc->res = &res;
426 	ndr_desc->provider_data = region;
427 	ndr_desc->attr_groups = ndtest_region_attribute_groups;
428 
429 	if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
430 		pr_err("failed to parse UUID\n");
431 		return -ENXIO;
432 	}
433 
434 	nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
435 	if (!nd_set)
436 		return -ENOMEM;
437 
438 	nd_set->cookie1 = cpu_to_le64(uuid[0]);
439 	nd_set->cookie2 = cpu_to_le64(uuid[1]);
440 	nd_set->altcookie = nd_set->cookie1;
441 	ndr_desc->nd_set = nd_set;
442 
443 	for (i = 0; i < region->num_mappings; i++) {
444 		ndimm = region->mapping[i].dimm;
445 		mappings[i].start = region->mapping[i].start;
446 		mappings[i].size = region->mapping[i].size;
447 		mappings[i].position = region->mapping[i].position;
448 		mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
449 	}
450 
451 	ndr_desc->num_mappings = region->num_mappings;
452 	region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
453 
454 	if (!region->region) {
455 		dev_err(&p->pdev.dev, "Error registering region %pR\n",
456 			ndr_desc->res);
457 		return -ENXIO;
458 	}
459 
460 	return 0;
461 }
462 
ndtest_init_regions(struct ndtest_priv * p)463 static int ndtest_init_regions(struct ndtest_priv *p)
464 {
465 	int i, ret = 0;
466 
467 	for (i = 0; i < p->config->num_regions; i++) {
468 		ret = ndtest_create_region(p, &p->config->regions[i]);
469 		if (ret)
470 			return ret;
471 	}
472 
473 	return 0;
474 }
475 
put_dimms(void * data)476 static void put_dimms(void *data)
477 {
478 	struct ndtest_priv *p = data;
479 	int i;
480 
481 	for (i = 0; i < p->config->dimm_count; i++)
482 		if (p->config->dimms[i].dev) {
483 			device_unregister(p->config->dimms[i].dev);
484 			p->config->dimms[i].dev = NULL;
485 		}
486 }
487 
handle_show(struct device * dev,struct device_attribute * attr,char * buf)488 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
489 		char *buf)
490 {
491 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
492 
493 	return sprintf(buf, "%#x\n", dimm->handle);
494 }
495 static DEVICE_ATTR_RO(handle);
496 
fail_cmd_show(struct device * dev,struct device_attribute * attr,char * buf)497 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
498 		char *buf)
499 {
500 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
501 
502 	return sprintf(buf, "%#x\n", dimm->fail_cmd);
503 }
504 
fail_cmd_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)505 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
506 		const char *buf, size_t size)
507 {
508 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
509 	unsigned long val;
510 	ssize_t rc;
511 
512 	rc = kstrtol(buf, 0, &val);
513 	if (rc)
514 		return rc;
515 
516 	dimm->fail_cmd = val;
517 
518 	return size;
519 }
520 static DEVICE_ATTR_RW(fail_cmd);
521 
fail_cmd_code_show(struct device * dev,struct device_attribute * attr,char * buf)522 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
523 		char *buf)
524 {
525 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
526 
527 	return sprintf(buf, "%d\n", dimm->fail_cmd_code);
528 }
529 
fail_cmd_code_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)530 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
531 		const char *buf, size_t size)
532 {
533 	struct ndtest_dimm *dimm = dev_get_drvdata(dev);
534 	unsigned long val;
535 	ssize_t rc;
536 
537 	rc = kstrtol(buf, 0, &val);
538 	if (rc)
539 		return rc;
540 
541 	dimm->fail_cmd_code = val;
542 	return size;
543 }
544 static DEVICE_ATTR_RW(fail_cmd_code);
545 
546 static struct attribute *dimm_attributes[] = {
547 	&dev_attr_handle.attr,
548 	&dev_attr_fail_cmd.attr,
549 	&dev_attr_fail_cmd_code.attr,
550 	NULL,
551 };
552 
553 static struct attribute_group dimm_attribute_group = {
554 	.attrs = dimm_attributes,
555 };
556 
557 static const struct attribute_group *dimm_attribute_groups[] = {
558 	&dimm_attribute_group,
559 	NULL,
560 };
561 
phys_id_show(struct device * dev,struct device_attribute * attr,char * buf)562 static ssize_t phys_id_show(struct device *dev,
563 		struct device_attribute *attr, char *buf)
564 {
565 	struct nvdimm *nvdimm = to_nvdimm(dev);
566 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
567 
568 	return sprintf(buf, "%#x\n", dimm->physical_id);
569 }
570 static DEVICE_ATTR_RO(phys_id);
571 
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)572 static ssize_t vendor_show(struct device *dev,
573 			   struct device_attribute *attr, char *buf)
574 {
575 	return sprintf(buf, "0x1234567\n");
576 }
577 static DEVICE_ATTR_RO(vendor);
578 
id_show(struct device * dev,struct device_attribute * attr,char * buf)579 static ssize_t id_show(struct device *dev,
580 		       struct device_attribute *attr, char *buf)
581 {
582 	struct nvdimm *nvdimm = to_nvdimm(dev);
583 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
584 
585 	return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
586 		       0xa, 2016, ~(dimm->handle));
587 }
588 static DEVICE_ATTR_RO(id);
589 
nvdimm_handle_show(struct device * dev,struct device_attribute * attr,char * buf)590 static ssize_t nvdimm_handle_show(struct device *dev,
591 				  struct device_attribute *attr, char *buf)
592 {
593 	struct nvdimm *nvdimm = to_nvdimm(dev);
594 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
595 
596 	return sprintf(buf, "%#x\n", dimm->handle);
597 }
598 
599 static struct device_attribute dev_attr_nvdimm_show_handle =  {
600 	.attr	= { .name = "handle", .mode = 0444 },
601 	.show	= nvdimm_handle_show,
602 };
603 
subsystem_vendor_show(struct device * dev,struct device_attribute * attr,char * buf)604 static ssize_t subsystem_vendor_show(struct device *dev,
605 		struct device_attribute *attr, char *buf)
606 {
607 	return sprintf(buf, "0x%04x\n", 0);
608 }
609 static DEVICE_ATTR_RO(subsystem_vendor);
610 
dirty_shutdown_show(struct device * dev,struct device_attribute * attr,char * buf)611 static ssize_t dirty_shutdown_show(struct device *dev,
612 		struct device_attribute *attr, char *buf)
613 {
614 	return sprintf(buf, "%d\n", 42);
615 }
616 static DEVICE_ATTR_RO(dirty_shutdown);
617 
formats_show(struct device * dev,struct device_attribute * attr,char * buf)618 static ssize_t formats_show(struct device *dev,
619 		struct device_attribute *attr, char *buf)
620 {
621 	struct nvdimm *nvdimm = to_nvdimm(dev);
622 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
623 
624 	return sprintf(buf, "%d\n", dimm->num_formats);
625 }
626 static DEVICE_ATTR_RO(formats);
627 
format_show(struct device * dev,struct device_attribute * attr,char * buf)628 static ssize_t format_show(struct device *dev,
629 		struct device_attribute *attr, char *buf)
630 {
631 	struct nvdimm *nvdimm = to_nvdimm(dev);
632 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
633 
634 	if (dimm->num_formats > 1)
635 		return sprintf(buf, "0x201\n");
636 
637 	return sprintf(buf, "0x101\n");
638 }
639 static DEVICE_ATTR_RO(format);
640 
format1_show(struct device * dev,struct device_attribute * attr,char * buf)641 static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
642 			    char *buf)
643 {
644 	return sprintf(buf, "0x301\n");
645 }
646 static DEVICE_ATTR_RO(format1);
647 
ndtest_nvdimm_attr_visible(struct kobject * kobj,struct attribute * a,int n)648 static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
649 					struct attribute *a, int n)
650 {
651 	struct device *dev = container_of(kobj, struct device, kobj);
652 	struct nvdimm *nvdimm = to_nvdimm(dev);
653 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
654 
655 	if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
656 		return 0;
657 
658 	return a->mode;
659 }
660 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)661 static ssize_t flags_show(struct device *dev,
662 			  struct device_attribute *attr, char *buf)
663 {
664 	struct nvdimm *nvdimm = to_nvdimm(dev);
665 	struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
666 	struct seq_buf s;
667 	u64 flags;
668 
669 	flags = dimm->flags;
670 
671 	seq_buf_init(&s, buf, PAGE_SIZE);
672 	if (flags & PAPR_PMEM_UNARMED_MASK)
673 		seq_buf_printf(&s, "not_armed ");
674 
675 	if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
676 		seq_buf_printf(&s, "flush_fail ");
677 
678 	if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
679 		seq_buf_printf(&s, "restore_fail ");
680 
681 	if (flags & PAPR_PMEM_SAVE_MASK)
682 		seq_buf_printf(&s, "save_fail ");
683 
684 	if (flags & PAPR_PMEM_SMART_EVENT_MASK)
685 		seq_buf_printf(&s, "smart_notify ");
686 
687 
688 	if (seq_buf_used(&s))
689 		seq_buf_printf(&s, "\n");
690 
691 	return seq_buf_used(&s);
692 }
693 static DEVICE_ATTR_RO(flags);
694 
695 static struct attribute *ndtest_nvdimm_attributes[] = {
696 	&dev_attr_nvdimm_show_handle.attr,
697 	&dev_attr_vendor.attr,
698 	&dev_attr_id.attr,
699 	&dev_attr_phys_id.attr,
700 	&dev_attr_subsystem_vendor.attr,
701 	&dev_attr_dirty_shutdown.attr,
702 	&dev_attr_formats.attr,
703 	&dev_attr_format.attr,
704 	&dev_attr_format1.attr,
705 	&dev_attr_flags.attr,
706 	NULL,
707 };
708 
709 static const struct attribute_group ndtest_nvdimm_attribute_group = {
710 	.name = "papr",
711 	.attrs = ndtest_nvdimm_attributes,
712 	.is_visible = ndtest_nvdimm_attr_visible,
713 };
714 
715 static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
716 	&ndtest_nvdimm_attribute_group,
717 	NULL,
718 };
719 
ndtest_dimm_register(struct ndtest_priv * priv,struct ndtest_dimm * dimm,int id)720 static int ndtest_dimm_register(struct ndtest_priv *priv,
721 				struct ndtest_dimm *dimm, int id)
722 {
723 	struct device *dev = &priv->pdev.dev;
724 	unsigned long dimm_flags = dimm->flags;
725 
726 	if (dimm->num_formats > 1)
727 		set_bit(NDD_LABELING, &dimm_flags);
728 
729 	if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
730 		set_bit(NDD_UNARMED, &dimm_flags);
731 
732 	dimm->nvdimm = nvdimm_create(priv->bus, dimm,
733 				    ndtest_nvdimm_attribute_groups, dimm_flags,
734 				    NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
735 	if (!dimm->nvdimm) {
736 		dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
737 		return -ENXIO;
738 	}
739 
740 	dimm->dev = device_create_with_groups(ndtest_dimm_class,
741 					     &priv->pdev.dev,
742 					     0, dimm, dimm_attribute_groups,
743 					     "test_dimm%d", id);
744 	if (!dimm->dev) {
745 		pr_err("Could not create dimm device attributes\n");
746 		return -ENOMEM;
747 	}
748 
749 	return 0;
750 }
751 
ndtest_nvdimm_init(struct ndtest_priv * p)752 static int ndtest_nvdimm_init(struct ndtest_priv *p)
753 {
754 	struct ndtest_dimm *d;
755 	void *res;
756 	int i, id;
757 
758 	for (i = 0; i < p->config->dimm_count; i++) {
759 		d = &p->config->dimms[i];
760 		d->id = id = p->config->dimm_start + i;
761 		res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
762 		if (!res)
763 			return -ENOMEM;
764 
765 		d->label_area = res;
766 		sprintf(d->label_area, "label%d", id);
767 		d->config_size = LABEL_SIZE;
768 
769 		if (!ndtest_alloc_resource(p, d->size,
770 					   &p->dimm_dma[id]))
771 			return -ENOMEM;
772 
773 		if (!ndtest_alloc_resource(p, LABEL_SIZE,
774 					   &p->label_dma[id]))
775 			return -ENOMEM;
776 
777 		if (!ndtest_alloc_resource(p, LABEL_SIZE,
778 					   &p->dcr_dma[id]))
779 			return -ENOMEM;
780 
781 		d->address = p->dimm_dma[id];
782 
783 		ndtest_dimm_register(p, d, id);
784 	}
785 
786 	return 0;
787 }
788 
compatible_show(struct device * dev,struct device_attribute * attr,char * buf)789 static ssize_t compatible_show(struct device *dev,
790 			       struct device_attribute *attr, char *buf)
791 {
792 	return sprintf(buf, "nvdimm_test");
793 }
794 static DEVICE_ATTR_RO(compatible);
795 
796 static struct attribute *of_node_attributes[] = {
797 	&dev_attr_compatible.attr,
798 	NULL
799 };
800 
801 static const struct attribute_group of_node_attribute_group = {
802 	.name = "of_node",
803 	.attrs = of_node_attributes,
804 };
805 
806 static const struct attribute_group *ndtest_attribute_groups[] = {
807 	&of_node_attribute_group,
808 	NULL,
809 };
810 
ndtest_bus_register(struct ndtest_priv * p)811 static int ndtest_bus_register(struct ndtest_priv *p)
812 {
813 	p->config = &bus_configs[p->pdev.id];
814 
815 	p->bus_desc.ndctl = ndtest_ctl;
816 	p->bus_desc.module = THIS_MODULE;
817 	p->bus_desc.provider_name = NULL;
818 	p->bus_desc.attr_groups = ndtest_attribute_groups;
819 
820 	p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
821 	if (!p->bus) {
822 		dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
823 		return -ENOMEM;
824 	}
825 
826 	return 0;
827 }
828 
ndtest_remove(struct platform_device * pdev)829 static int ndtest_remove(struct platform_device *pdev)
830 {
831 	struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
832 
833 	nvdimm_bus_unregister(p->bus);
834 	return 0;
835 }
836 
ndtest_probe(struct platform_device * pdev)837 static int ndtest_probe(struct platform_device *pdev)
838 {
839 	struct ndtest_priv *p;
840 	int rc;
841 
842 	p = to_ndtest_priv(&pdev->dev);
843 	if (ndtest_bus_register(p))
844 		return -ENOMEM;
845 
846 	p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
847 				 sizeof(dma_addr_t), GFP_KERNEL);
848 	p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
849 				   sizeof(dma_addr_t), GFP_KERNEL);
850 	p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
851 				  sizeof(dma_addr_t), GFP_KERNEL);
852 
853 	rc = ndtest_nvdimm_init(p);
854 	if (rc)
855 		goto err;
856 
857 	rc = ndtest_init_regions(p);
858 	if (rc)
859 		goto err;
860 
861 	rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
862 	if (rc)
863 		goto err;
864 
865 	platform_set_drvdata(pdev, p);
866 
867 	return 0;
868 
869 err:
870 	pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
871 	return rc;
872 }
873 
874 static const struct platform_device_id ndtest_id[] = {
875 	{ KBUILD_MODNAME },
876 	{ },
877 };
878 
879 static struct platform_driver ndtest_driver = {
880 	.probe = ndtest_probe,
881 	.remove = ndtest_remove,
882 	.driver = {
883 		.name = KBUILD_MODNAME,
884 	},
885 	.id_table = ndtest_id,
886 };
887 
ndtest_release(struct device * dev)888 static void ndtest_release(struct device *dev)
889 {
890 	struct ndtest_priv *p = to_ndtest_priv(dev);
891 
892 	kfree(p);
893 }
894 
cleanup_devices(void)895 static void cleanup_devices(void)
896 {
897 	int i;
898 
899 	for (i = 0; i < NUM_INSTANCES; i++)
900 		if (instances[i])
901 			platform_device_unregister(&instances[i]->pdev);
902 
903 	nfit_test_teardown();
904 
905 	if (ndtest_pool)
906 		gen_pool_destroy(ndtest_pool);
907 
908 
909 	if (ndtest_dimm_class)
910 		class_destroy(ndtest_dimm_class);
911 }
912 
ndtest_init(void)913 static __init int ndtest_init(void)
914 {
915 	int rc, i;
916 
917 	pmem_test();
918 	libnvdimm_test();
919 	device_dax_test();
920 	dax_pmem_test();
921 
922 	nfit_test_setup(ndtest_resource_lookup, NULL);
923 
924 	ndtest_dimm_class = class_create("nfit_test_dimm");
925 	if (IS_ERR(ndtest_dimm_class)) {
926 		rc = PTR_ERR(ndtest_dimm_class);
927 		goto err_register;
928 	}
929 
930 	ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
931 	if (!ndtest_pool) {
932 		rc = -ENOMEM;
933 		goto err_register;
934 	}
935 
936 	if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
937 		rc = -ENOMEM;
938 		goto err_register;
939 	}
940 
941 	/* Each instance can be taken as a bus, which can have multiple dimms */
942 	for (i = 0; i < NUM_INSTANCES; i++) {
943 		struct ndtest_priv *priv;
944 		struct platform_device *pdev;
945 
946 		priv = kzalloc(sizeof(*priv), GFP_KERNEL);
947 		if (!priv) {
948 			rc = -ENOMEM;
949 			goto err_register;
950 		}
951 
952 		INIT_LIST_HEAD(&priv->resources);
953 		pdev = &priv->pdev;
954 		pdev->name = KBUILD_MODNAME;
955 		pdev->id = i;
956 		pdev->dev.release = ndtest_release;
957 		rc = platform_device_register(pdev);
958 		if (rc) {
959 			put_device(&pdev->dev);
960 			goto err_register;
961 		}
962 		get_device(&pdev->dev);
963 
964 		instances[i] = priv;
965 	}
966 
967 	rc = platform_driver_register(&ndtest_driver);
968 	if (rc)
969 		goto err_register;
970 
971 	return 0;
972 
973 err_register:
974 	pr_err("Error registering platform device\n");
975 	cleanup_devices();
976 
977 	return rc;
978 }
979 
ndtest_exit(void)980 static __exit void ndtest_exit(void)
981 {
982 	cleanup_devices();
983 	platform_driver_unregister(&ndtest_driver);
984 }
985 
986 module_init(ndtest_init);
987 module_exit(ndtest_exit);
988 MODULE_LICENSE("GPL");
989 MODULE_AUTHOR("IBM Corporation");
990