xref: /openbmc/linux/tools/testing/nvdimm/test/nfit.c (revision ca481398)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/workqueue.h>
17 #include <linux/libnvdimm.h>
18 #include <linux/vmalloc.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/ndctl.h>
23 #include <linux/sizes.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <nd-core.h>
27 #include <nfit.h>
28 #include <nd.h>
29 #include "nfit_test.h"
30 
31 /*
32  * Generate an NFIT table to describe the following topology:
33  *
34  * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
35  *
36  *                     (a)                       (b)            DIMM   BLK-REGION
37  *           +----------+--------------+----------+---------+
38  * +------+  |  blk2.0  |     pm0.0    |  blk2.1  |  pm1.0  |    0      region2
39  * | imc0 +--+- - - - - region0 - - - -+----------+         +
40  * +--+---+  |  blk3.0  |     pm0.0    |  blk3.1  |  pm1.0  |    1      region3
41  *    |      +----------+--------------v----------v         v
42  * +--+---+                            |                    |
43  * | cpu0 |                                    region1
44  * +--+---+                            |                    |
45  *    |      +-------------------------^----------^         ^
46  * +--+---+  |                 blk4.0             |  pm1.0  |    2      region4
47  * | imc1 +--+-------------------------+----------+         +
48  * +------+  |                 blk5.0             |  pm1.0  |    3      region5
49  *           +-------------------------+----------+-+-------+
50  *
51  * +--+---+
52  * | cpu1 |
53  * +--+---+                   (Hotplug DIMM)
54  *    |      +----------------------------------------------+
55  * +--+---+  |                 blk6.0/pm7.0                 |    4      region6/7
56  * | imc0 +--+----------------------------------------------+
57  * +------+
58  *
59  *
60  * *) In this layout we have four dimms and two memory controllers in one
61  *    socket.  Each unique interface (BLK or PMEM) to DPA space
62  *    is identified by a region device with a dynamically assigned id.
63  *
64  * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
65  *    A single PMEM namespace "pm0.0" is created using half of the
66  *    REGION0 SPA-range.  REGION0 spans dimm0 and dimm1.  PMEM namespace
67  *    allocate from from the bottom of a region.  The unallocated
68  *    portion of REGION0 aliases with REGION2 and REGION3.  That
69  *    unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
70  *    "blk3.0") starting at the base of each DIMM to offset (a) in those
71  *    DIMMs.  "pm0.0", "blk2.0" and "blk3.0" are free-form readable
72  *    names that can be assigned to a namespace.
73  *
74  * *) In the last portion of dimm0 and dimm1 we have an interleaved
75  *    SPA range, REGION1, that spans those two dimms as well as dimm2
76  *    and dimm3.  Some of REGION1 allocated to a PMEM namespace named
77  *    "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
78  *    dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
79  *    "blk5.0".
80  *
81  * *) The portion of dimm2 and dimm3 that do not participate in the
82  *    REGION1 interleaved SPA range (i.e. the DPA address below offset
83  *    (b) are also included in the "blk4.0" and "blk5.0" namespaces.
84  *    Note, that BLK namespaces need not be contiguous in DPA-space, and
85  *    can consume aliased capacity from multiple interleave sets.
86  *
87  * BUS1: Legacy NVDIMM (single contiguous range)
88  *
89  *  region2
90  * +---------------------+
91  * |---------------------|
92  * ||       pm2.0       ||
93  * |---------------------|
94  * +---------------------+
95  *
96  * *) A NFIT-table may describe a simple system-physical-address range
97  *    with no BLK aliasing.  This type of region may optionally
98  *    reference an NVDIMM.
99  */
100 enum {
101 	NUM_PM  = 3,
102 	NUM_DCR = 5,
103 	NUM_HINTS = 8,
104 	NUM_BDW = NUM_DCR,
105 	NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
106 	NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
107 	DIMM_SIZE = SZ_32M,
108 	LABEL_SIZE = SZ_128K,
109 	SPA_VCD_SIZE = SZ_4M,
110 	SPA0_SIZE = DIMM_SIZE,
111 	SPA1_SIZE = DIMM_SIZE*2,
112 	SPA2_SIZE = DIMM_SIZE,
113 	BDW_SIZE = 64 << 8,
114 	DCR_SIZE = 12,
115 	NUM_NFITS = 2, /* permit testing multiple NFITs per system */
116 };
117 
118 struct nfit_test_dcr {
119 	__le64 bdw_addr;
120 	__le32 bdw_status;
121 	__u8 aperature[BDW_SIZE];
122 };
123 
124 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
125 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
126 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
127 
128 static u32 handle[] = {
129 	[0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
130 	[1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
131 	[2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
132 	[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
133 	[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
134 	[5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
135 	[6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
136 };
137 
138 static unsigned long dimm_fail_cmd_flags[NUM_DCR];
139 
140 struct nfit_test {
141 	struct acpi_nfit_desc acpi_desc;
142 	struct platform_device pdev;
143 	struct list_head resources;
144 	void *nfit_buf;
145 	dma_addr_t nfit_dma;
146 	size_t nfit_size;
147 	int dcr_idx;
148 	int num_dcr;
149 	int num_pm;
150 	void **dimm;
151 	dma_addr_t *dimm_dma;
152 	void **flush;
153 	dma_addr_t *flush_dma;
154 	void **label;
155 	dma_addr_t *label_dma;
156 	void **spa_set;
157 	dma_addr_t *spa_set_dma;
158 	struct nfit_test_dcr **dcr;
159 	dma_addr_t *dcr_dma;
160 	int (*alloc)(struct nfit_test *t);
161 	void (*setup)(struct nfit_test *t);
162 	int setup_hotplug;
163 	union acpi_object **_fit;
164 	dma_addr_t _fit_dma;
165 	struct ars_state {
166 		struct nd_cmd_ars_status *ars_status;
167 		unsigned long deadline;
168 		spinlock_t lock;
169 	} ars_state;
170 	struct device *dimm_dev[NUM_DCR];
171 };
172 
173 static struct nfit_test *to_nfit_test(struct device *dev)
174 {
175 	struct platform_device *pdev = to_platform_device(dev);
176 
177 	return container_of(pdev, struct nfit_test, pdev);
178 }
179 
180 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
181 		unsigned int buf_len)
182 {
183 	if (buf_len < sizeof(*nd_cmd))
184 		return -EINVAL;
185 
186 	nd_cmd->status = 0;
187 	nd_cmd->config_size = LABEL_SIZE;
188 	nd_cmd->max_xfer = SZ_4K;
189 
190 	return 0;
191 }
192 
193 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
194 		*nd_cmd, unsigned int buf_len, void *label)
195 {
196 	unsigned int len, offset = nd_cmd->in_offset;
197 	int rc;
198 
199 	if (buf_len < sizeof(*nd_cmd))
200 		return -EINVAL;
201 	if (offset >= LABEL_SIZE)
202 		return -EINVAL;
203 	if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
204 		return -EINVAL;
205 
206 	nd_cmd->status = 0;
207 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
208 	memcpy(nd_cmd->out_buf, label + offset, len);
209 	rc = buf_len - sizeof(*nd_cmd) - len;
210 
211 	return rc;
212 }
213 
214 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
215 		unsigned int buf_len, void *label)
216 {
217 	unsigned int len, offset = nd_cmd->in_offset;
218 	u32 *status;
219 	int rc;
220 
221 	if (buf_len < sizeof(*nd_cmd))
222 		return -EINVAL;
223 	if (offset >= LABEL_SIZE)
224 		return -EINVAL;
225 	if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
226 		return -EINVAL;
227 
228 	status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
229 	*status = 0;
230 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
231 	memcpy(label + offset, nd_cmd->in_buf, len);
232 	rc = buf_len - sizeof(*nd_cmd) - (len + 4);
233 
234 	return rc;
235 }
236 
237 #define NFIT_TEST_ARS_RECORDS 4
238 #define NFIT_TEST_CLEAR_ERR_UNIT 256
239 
240 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
241 		unsigned int buf_len)
242 {
243 	if (buf_len < sizeof(*nd_cmd))
244 		return -EINVAL;
245 
246 	nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
247 		+ NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
248 	nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
249 	nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
250 
251 	return 0;
252 }
253 
254 /*
255  * Initialize the ars_state to return an ars_result 1 second in the future with
256  * a 4K error range in the middle of the requested address range.
257  */
258 static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
259 {
260 	struct nd_cmd_ars_status *ars_status;
261 	struct nd_ars_record *ars_record;
262 
263 	ars_state->deadline = jiffies + 1*HZ;
264 	ars_status = ars_state->ars_status;
265 	ars_status->status = 0;
266 	ars_status->out_length = sizeof(struct nd_cmd_ars_status)
267 		+ sizeof(struct nd_ars_record);
268 	ars_status->address = addr;
269 	ars_status->length = len;
270 	ars_status->type = ND_ARS_PERSISTENT;
271 	ars_status->num_records = 1;
272 	ars_record = &ars_status->records[0];
273 	ars_record->handle = 0;
274 	ars_record->err_address = addr + len / 2;
275 	ars_record->length = SZ_4K;
276 }
277 
278 static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
279 		struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
280 		int *cmd_rc)
281 {
282 	if (buf_len < sizeof(*ars_start))
283 		return -EINVAL;
284 
285 	spin_lock(&ars_state->lock);
286 	if (time_before(jiffies, ars_state->deadline)) {
287 		ars_start->status = NFIT_ARS_START_BUSY;
288 		*cmd_rc = -EBUSY;
289 	} else {
290 		ars_start->status = 0;
291 		ars_start->scrub_time = 1;
292 		post_ars_status(ars_state, ars_start->address,
293 				ars_start->length);
294 		*cmd_rc = 0;
295 	}
296 	spin_unlock(&ars_state->lock);
297 
298 	return 0;
299 }
300 
301 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
302 		struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
303 		int *cmd_rc)
304 {
305 	if (buf_len < ars_state->ars_status->out_length)
306 		return -EINVAL;
307 
308 	spin_lock(&ars_state->lock);
309 	if (time_before(jiffies, ars_state->deadline)) {
310 		memset(ars_status, 0, buf_len);
311 		ars_status->status = NFIT_ARS_STATUS_BUSY;
312 		ars_status->out_length = sizeof(*ars_status);
313 		*cmd_rc = -EBUSY;
314 	} else {
315 		memcpy(ars_status, ars_state->ars_status,
316 				ars_state->ars_status->out_length);
317 		*cmd_rc = 0;
318 	}
319 	spin_unlock(&ars_state->lock);
320 	return 0;
321 }
322 
323 static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
324 		unsigned int buf_len, int *cmd_rc)
325 {
326 	const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
327 	if (buf_len < sizeof(*clear_err))
328 		return -EINVAL;
329 
330 	if ((clear_err->address & mask) || (clear_err->length & mask))
331 		return -EINVAL;
332 
333 	/*
334 	 * Report 'all clear' success for all commands even though a new
335 	 * scrub will find errors again.  This is enough to have the
336 	 * error removed from the 'badblocks' tracking in the pmem
337 	 * driver.
338 	 */
339 	clear_err->status = 0;
340 	clear_err->cleared = clear_err->length;
341 	*cmd_rc = 0;
342 	return 0;
343 }
344 
345 static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
346 {
347 	static const struct nd_smart_payload smart_data = {
348 		.flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
349 			| ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
350 			| ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
351 		.health = ND_SMART_NON_CRITICAL_HEALTH,
352 		.temperature = 23 * 16,
353 		.spares = 75,
354 		.alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
355 		.life_used = 5,
356 		.shutdown_state = 0,
357 		.vendor_size = 0,
358 	};
359 
360 	if (buf_len < sizeof(*smart))
361 		return -EINVAL;
362 	memcpy(smart->data, &smart_data, sizeof(smart_data));
363 	return 0;
364 }
365 
366 static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
367 		unsigned int buf_len)
368 {
369 	static const struct nd_smart_threshold_payload smart_t_data = {
370 		.alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
371 		.temperature = 40 * 16,
372 		.spares = 5,
373 	};
374 
375 	if (buf_len < sizeof(*smart_t))
376 		return -EINVAL;
377 	memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
378 	return 0;
379 }
380 
381 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
382 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
383 		unsigned int buf_len, int *cmd_rc)
384 {
385 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
386 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
387 	unsigned int func = cmd;
388 	int i, rc = 0, __cmd_rc;
389 
390 	if (!cmd_rc)
391 		cmd_rc = &__cmd_rc;
392 	*cmd_rc = 0;
393 
394 	if (nvdimm) {
395 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
396 		unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
397 
398 		if (!nfit_mem)
399 			return -ENOTTY;
400 
401 		if (cmd == ND_CMD_CALL) {
402 			struct nd_cmd_pkg *call_pkg = buf;
403 
404 			buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
405 			buf = (void *) call_pkg->nd_payload;
406 			func = call_pkg->nd_command;
407 			if (call_pkg->nd_family != nfit_mem->family)
408 				return -ENOTTY;
409 		}
410 
411 		if (!test_bit(cmd, &cmd_mask)
412 				|| !test_bit(func, &nfit_mem->dsm_mask))
413 			return -ENOTTY;
414 
415 		/* lookup label space for the given dimm */
416 		for (i = 0; i < ARRAY_SIZE(handle); i++)
417 			if (__to_nfit_memdev(nfit_mem)->device_handle ==
418 					handle[i])
419 				break;
420 		if (i >= ARRAY_SIZE(handle))
421 			return -ENXIO;
422 
423 		if ((1 << func) & dimm_fail_cmd_flags[i])
424 			return -EIO;
425 
426 		switch (func) {
427 		case ND_CMD_GET_CONFIG_SIZE:
428 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
429 			break;
430 		case ND_CMD_GET_CONFIG_DATA:
431 			rc = nfit_test_cmd_get_config_data(buf, buf_len,
432 				t->label[i - t->dcr_idx]);
433 			break;
434 		case ND_CMD_SET_CONFIG_DATA:
435 			rc = nfit_test_cmd_set_config_data(buf, buf_len,
436 				t->label[i - t->dcr_idx]);
437 			break;
438 		case ND_CMD_SMART:
439 			rc = nfit_test_cmd_smart(buf, buf_len);
440 			break;
441 		case ND_CMD_SMART_THRESHOLD:
442 			rc = nfit_test_cmd_smart_threshold(buf, buf_len);
443 			device_lock(&t->pdev.dev);
444 			__acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
445 			device_unlock(&t->pdev.dev);
446 			break;
447 		default:
448 			return -ENOTTY;
449 		}
450 	} else {
451 		struct ars_state *ars_state = &t->ars_state;
452 
453 		if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
454 			return -ENOTTY;
455 
456 		switch (func) {
457 		case ND_CMD_ARS_CAP:
458 			rc = nfit_test_cmd_ars_cap(buf, buf_len);
459 			break;
460 		case ND_CMD_ARS_START:
461 			rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
462 					cmd_rc);
463 			break;
464 		case ND_CMD_ARS_STATUS:
465 			rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
466 					cmd_rc);
467 			break;
468 		case ND_CMD_CLEAR_ERROR:
469 			rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
470 			break;
471 		default:
472 			return -ENOTTY;
473 		}
474 	}
475 
476 	return rc;
477 }
478 
479 static DEFINE_SPINLOCK(nfit_test_lock);
480 static struct nfit_test *instances[NUM_NFITS];
481 
482 static void release_nfit_res(void *data)
483 {
484 	struct nfit_test_resource *nfit_res = data;
485 
486 	spin_lock(&nfit_test_lock);
487 	list_del(&nfit_res->list);
488 	spin_unlock(&nfit_test_lock);
489 
490 	vfree(nfit_res->buf);
491 	kfree(nfit_res);
492 }
493 
494 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
495 		void *buf)
496 {
497 	struct device *dev = &t->pdev.dev;
498 	struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
499 			GFP_KERNEL);
500 	int rc;
501 
502 	if (!buf || !nfit_res)
503 		goto err;
504 	rc = devm_add_action(dev, release_nfit_res, nfit_res);
505 	if (rc)
506 		goto err;
507 	INIT_LIST_HEAD(&nfit_res->list);
508 	memset(buf, 0, size);
509 	nfit_res->dev = dev;
510 	nfit_res->buf = buf;
511 	nfit_res->res.start = *dma;
512 	nfit_res->res.end = *dma + size - 1;
513 	nfit_res->res.name = "NFIT";
514 	spin_lock_init(&nfit_res->lock);
515 	INIT_LIST_HEAD(&nfit_res->requests);
516 	spin_lock(&nfit_test_lock);
517 	list_add(&nfit_res->list, &t->resources);
518 	spin_unlock(&nfit_test_lock);
519 
520 	return nfit_res->buf;
521  err:
522 	if (buf)
523 		vfree(buf);
524 	kfree(nfit_res);
525 	return NULL;
526 }
527 
528 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
529 {
530 	void *buf = vmalloc(size);
531 
532 	*dma = (unsigned long) buf;
533 	return __test_alloc(t, size, dma, buf);
534 }
535 
536 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
537 {
538 	int i;
539 
540 	for (i = 0; i < ARRAY_SIZE(instances); i++) {
541 		struct nfit_test_resource *n, *nfit_res = NULL;
542 		struct nfit_test *t = instances[i];
543 
544 		if (!t)
545 			continue;
546 		spin_lock(&nfit_test_lock);
547 		list_for_each_entry(n, &t->resources, list) {
548 			if (addr >= n->res.start && (addr < n->res.start
549 						+ resource_size(&n->res))) {
550 				nfit_res = n;
551 				break;
552 			} else if (addr >= (unsigned long) n->buf
553 					&& (addr < (unsigned long) n->buf
554 						+ resource_size(&n->res))) {
555 				nfit_res = n;
556 				break;
557 			}
558 		}
559 		spin_unlock(&nfit_test_lock);
560 		if (nfit_res)
561 			return nfit_res;
562 	}
563 
564 	return NULL;
565 }
566 
567 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
568 {
569 	ars_state->ars_status = devm_kzalloc(dev,
570 			sizeof(struct nd_cmd_ars_status)
571 			+ sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
572 			GFP_KERNEL);
573 	if (!ars_state->ars_status)
574 		return -ENOMEM;
575 	spin_lock_init(&ars_state->lock);
576 	return 0;
577 }
578 
579 static void put_dimms(void *data)
580 {
581 	struct device **dimm_dev = data;
582 	int i;
583 
584 	for (i = 0; i < NUM_DCR; i++)
585 		if (dimm_dev[i])
586 			device_unregister(dimm_dev[i]);
587 }
588 
589 static struct class *nfit_test_dimm;
590 
591 static int dimm_name_to_id(struct device *dev)
592 {
593 	int dimm;
594 
595 	if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
596 			|| dimm >= NUM_DCR || dimm < 0)
597 		return -ENXIO;
598 	return dimm;
599 }
600 
601 
602 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
603 		char *buf)
604 {
605 	int dimm = dimm_name_to_id(dev);
606 
607 	if (dimm < 0)
608 		return dimm;
609 
610 	return sprintf(buf, "%#x", handle[dimm]);
611 }
612 DEVICE_ATTR_RO(handle);
613 
614 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
615 		char *buf)
616 {
617 	int dimm = dimm_name_to_id(dev);
618 
619 	if (dimm < 0)
620 		return dimm;
621 
622 	return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
623 }
624 
625 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
626 		const char *buf, size_t size)
627 {
628 	int dimm = dimm_name_to_id(dev);
629 	unsigned long val;
630 	ssize_t rc;
631 
632 	if (dimm < 0)
633 		return dimm;
634 
635 	rc = kstrtol(buf, 0, &val);
636 	if (rc)
637 		return rc;
638 
639 	dimm_fail_cmd_flags[dimm] = val;
640 	return size;
641 }
642 static DEVICE_ATTR_RW(fail_cmd);
643 
644 static struct attribute *nfit_test_dimm_attributes[] = {
645 	&dev_attr_fail_cmd.attr,
646 	&dev_attr_handle.attr,
647 	NULL,
648 };
649 
650 static struct attribute_group nfit_test_dimm_attribute_group = {
651 	.attrs = nfit_test_dimm_attributes,
652 };
653 
654 static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
655 	&nfit_test_dimm_attribute_group,
656 	NULL,
657 };
658 
659 static int nfit_test0_alloc(struct nfit_test *t)
660 {
661 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
662 			+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
663 			+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
664 			+ offsetof(struct acpi_nfit_control_region,
665 					window_size) * NUM_DCR
666 			+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
667 			+ (sizeof(struct acpi_nfit_flush_address)
668 					+ sizeof(u64) * NUM_HINTS) * NUM_DCR;
669 	int i;
670 
671 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
672 	if (!t->nfit_buf)
673 		return -ENOMEM;
674 	t->nfit_size = nfit_size;
675 
676 	t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
677 	if (!t->spa_set[0])
678 		return -ENOMEM;
679 
680 	t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
681 	if (!t->spa_set[1])
682 		return -ENOMEM;
683 
684 	t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
685 	if (!t->spa_set[2])
686 		return -ENOMEM;
687 
688 	for (i = 0; i < t->num_dcr; i++) {
689 		t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
690 		if (!t->dimm[i])
691 			return -ENOMEM;
692 
693 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
694 		if (!t->label[i])
695 			return -ENOMEM;
696 		sprintf(t->label[i], "label%d", i);
697 
698 		t->flush[i] = test_alloc(t, max(PAGE_SIZE,
699 					sizeof(u64) * NUM_HINTS),
700 				&t->flush_dma[i]);
701 		if (!t->flush[i])
702 			return -ENOMEM;
703 	}
704 
705 	for (i = 0; i < t->num_dcr; i++) {
706 		t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
707 		if (!t->dcr[i])
708 			return -ENOMEM;
709 	}
710 
711 	t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
712 	if (!t->_fit)
713 		return -ENOMEM;
714 
715 	if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
716 		return -ENOMEM;
717 	for (i = 0; i < NUM_DCR; i++) {
718 		t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
719 				&t->pdev.dev, 0, NULL,
720 				nfit_test_dimm_attribute_groups,
721 				"test_dimm%d", i);
722 		if (!t->dimm_dev[i])
723 			return -ENOMEM;
724 	}
725 
726 	return ars_state_init(&t->pdev.dev, &t->ars_state);
727 }
728 
729 static int nfit_test1_alloc(struct nfit_test *t)
730 {
731 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
732 		+ sizeof(struct acpi_nfit_memory_map) * 2
733 		+ offsetof(struct acpi_nfit_control_region, window_size) * 2;
734 	int i;
735 
736 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
737 	if (!t->nfit_buf)
738 		return -ENOMEM;
739 	t->nfit_size = nfit_size;
740 
741 	t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
742 	if (!t->spa_set[0])
743 		return -ENOMEM;
744 
745 	for (i = 0; i < t->num_dcr; i++) {
746 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
747 		if (!t->label[i])
748 			return -ENOMEM;
749 		sprintf(t->label[i], "label%d", i);
750 	}
751 
752 	t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
753 	if (!t->spa_set[1])
754 		return -ENOMEM;
755 
756 	return ars_state_init(&t->pdev.dev, &t->ars_state);
757 }
758 
759 static void dcr_common_init(struct acpi_nfit_control_region *dcr)
760 {
761 	dcr->vendor_id = 0xabcd;
762 	dcr->device_id = 0;
763 	dcr->revision_id = 1;
764 	dcr->valid_fields = 1;
765 	dcr->manufacturing_location = 0xa;
766 	dcr->manufacturing_date = cpu_to_be16(2016);
767 }
768 
769 static void nfit_test0_setup(struct nfit_test *t)
770 {
771 	const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
772 		+ (sizeof(u64) * NUM_HINTS);
773 	struct acpi_nfit_desc *acpi_desc;
774 	struct acpi_nfit_memory_map *memdev;
775 	void *nfit_buf = t->nfit_buf;
776 	struct acpi_nfit_system_address *spa;
777 	struct acpi_nfit_control_region *dcr;
778 	struct acpi_nfit_data_region *bdw;
779 	struct acpi_nfit_flush_address *flush;
780 	unsigned int offset, i;
781 
782 	/*
783 	 * spa0 (interleave first half of dimm0 and dimm1, note storage
784 	 * does not actually alias the related block-data-window
785 	 * regions)
786 	 */
787 	spa = nfit_buf;
788 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
789 	spa->header.length = sizeof(*spa);
790 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
791 	spa->range_index = 0+1;
792 	spa->address = t->spa_set_dma[0];
793 	spa->length = SPA0_SIZE;
794 
795 	/*
796 	 * spa1 (interleave last half of the 4 DIMMS, note storage
797 	 * does not actually alias the related block-data-window
798 	 * regions)
799 	 */
800 	spa = nfit_buf + sizeof(*spa);
801 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
802 	spa->header.length = sizeof(*spa);
803 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
804 	spa->range_index = 1+1;
805 	spa->address = t->spa_set_dma[1];
806 	spa->length = SPA1_SIZE;
807 
808 	/* spa2 (dcr0) dimm0 */
809 	spa = nfit_buf + sizeof(*spa) * 2;
810 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
811 	spa->header.length = sizeof(*spa);
812 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
813 	spa->range_index = 2+1;
814 	spa->address = t->dcr_dma[0];
815 	spa->length = DCR_SIZE;
816 
817 	/* spa3 (dcr1) dimm1 */
818 	spa = nfit_buf + sizeof(*spa) * 3;
819 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
820 	spa->header.length = sizeof(*spa);
821 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
822 	spa->range_index = 3+1;
823 	spa->address = t->dcr_dma[1];
824 	spa->length = DCR_SIZE;
825 
826 	/* spa4 (dcr2) dimm2 */
827 	spa = nfit_buf + sizeof(*spa) * 4;
828 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
829 	spa->header.length = sizeof(*spa);
830 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
831 	spa->range_index = 4+1;
832 	spa->address = t->dcr_dma[2];
833 	spa->length = DCR_SIZE;
834 
835 	/* spa5 (dcr3) dimm3 */
836 	spa = nfit_buf + sizeof(*spa) * 5;
837 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
838 	spa->header.length = sizeof(*spa);
839 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
840 	spa->range_index = 5+1;
841 	spa->address = t->dcr_dma[3];
842 	spa->length = DCR_SIZE;
843 
844 	/* spa6 (bdw for dcr0) dimm0 */
845 	spa = nfit_buf + sizeof(*spa) * 6;
846 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
847 	spa->header.length = sizeof(*spa);
848 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
849 	spa->range_index = 6+1;
850 	spa->address = t->dimm_dma[0];
851 	spa->length = DIMM_SIZE;
852 
853 	/* spa7 (bdw for dcr1) dimm1 */
854 	spa = nfit_buf + sizeof(*spa) * 7;
855 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
856 	spa->header.length = sizeof(*spa);
857 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
858 	spa->range_index = 7+1;
859 	spa->address = t->dimm_dma[1];
860 	spa->length = DIMM_SIZE;
861 
862 	/* spa8 (bdw for dcr2) dimm2 */
863 	spa = nfit_buf + sizeof(*spa) * 8;
864 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
865 	spa->header.length = sizeof(*spa);
866 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
867 	spa->range_index = 8+1;
868 	spa->address = t->dimm_dma[2];
869 	spa->length = DIMM_SIZE;
870 
871 	/* spa9 (bdw for dcr3) dimm3 */
872 	spa = nfit_buf + sizeof(*spa) * 9;
873 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
874 	spa->header.length = sizeof(*spa);
875 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
876 	spa->range_index = 9+1;
877 	spa->address = t->dimm_dma[3];
878 	spa->length = DIMM_SIZE;
879 
880 	offset = sizeof(*spa) * 10;
881 	/* mem-region0 (spa0, dimm0) */
882 	memdev = nfit_buf + offset;
883 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
884 	memdev->header.length = sizeof(*memdev);
885 	memdev->device_handle = handle[0];
886 	memdev->physical_id = 0;
887 	memdev->region_id = 0;
888 	memdev->range_index = 0+1;
889 	memdev->region_index = 4+1;
890 	memdev->region_size = SPA0_SIZE/2;
891 	memdev->region_offset = 1;
892 	memdev->address = 0;
893 	memdev->interleave_index = 0;
894 	memdev->interleave_ways = 2;
895 
896 	/* mem-region1 (spa0, dimm1) */
897 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
898 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
899 	memdev->header.length = sizeof(*memdev);
900 	memdev->device_handle = handle[1];
901 	memdev->physical_id = 1;
902 	memdev->region_id = 0;
903 	memdev->range_index = 0+1;
904 	memdev->region_index = 5+1;
905 	memdev->region_size = SPA0_SIZE/2;
906 	memdev->region_offset = (1 << 8);
907 	memdev->address = 0;
908 	memdev->interleave_index = 0;
909 	memdev->interleave_ways = 2;
910 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
911 
912 	/* mem-region2 (spa1, dimm0) */
913 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
914 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
915 	memdev->header.length = sizeof(*memdev);
916 	memdev->device_handle = handle[0];
917 	memdev->physical_id = 0;
918 	memdev->region_id = 1;
919 	memdev->range_index = 1+1;
920 	memdev->region_index = 4+1;
921 	memdev->region_size = SPA1_SIZE/4;
922 	memdev->region_offset = (1 << 16);
923 	memdev->address = SPA0_SIZE/2;
924 	memdev->interleave_index = 0;
925 	memdev->interleave_ways = 4;
926 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
927 
928 	/* mem-region3 (spa1, dimm1) */
929 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
930 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
931 	memdev->header.length = sizeof(*memdev);
932 	memdev->device_handle = handle[1];
933 	memdev->physical_id = 1;
934 	memdev->region_id = 1;
935 	memdev->range_index = 1+1;
936 	memdev->region_index = 5+1;
937 	memdev->region_size = SPA1_SIZE/4;
938 	memdev->region_offset = (1 << 24);
939 	memdev->address = SPA0_SIZE/2;
940 	memdev->interleave_index = 0;
941 	memdev->interleave_ways = 4;
942 
943 	/* mem-region4 (spa1, dimm2) */
944 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
945 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
946 	memdev->header.length = sizeof(*memdev);
947 	memdev->device_handle = handle[2];
948 	memdev->physical_id = 2;
949 	memdev->region_id = 0;
950 	memdev->range_index = 1+1;
951 	memdev->region_index = 6+1;
952 	memdev->region_size = SPA1_SIZE/4;
953 	memdev->region_offset = (1ULL << 32);
954 	memdev->address = SPA0_SIZE/2;
955 	memdev->interleave_index = 0;
956 	memdev->interleave_ways = 4;
957 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
958 
959 	/* mem-region5 (spa1, dimm3) */
960 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
961 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
962 	memdev->header.length = sizeof(*memdev);
963 	memdev->device_handle = handle[3];
964 	memdev->physical_id = 3;
965 	memdev->region_id = 0;
966 	memdev->range_index = 1+1;
967 	memdev->region_index = 7+1;
968 	memdev->region_size = SPA1_SIZE/4;
969 	memdev->region_offset = (1ULL << 40);
970 	memdev->address = SPA0_SIZE/2;
971 	memdev->interleave_index = 0;
972 	memdev->interleave_ways = 4;
973 
974 	/* mem-region6 (spa/dcr0, dimm0) */
975 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
976 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
977 	memdev->header.length = sizeof(*memdev);
978 	memdev->device_handle = handle[0];
979 	memdev->physical_id = 0;
980 	memdev->region_id = 0;
981 	memdev->range_index = 2+1;
982 	memdev->region_index = 0+1;
983 	memdev->region_size = 0;
984 	memdev->region_offset = 0;
985 	memdev->address = 0;
986 	memdev->interleave_index = 0;
987 	memdev->interleave_ways = 1;
988 
989 	/* mem-region7 (spa/dcr1, dimm1) */
990 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
991 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
992 	memdev->header.length = sizeof(*memdev);
993 	memdev->device_handle = handle[1];
994 	memdev->physical_id = 1;
995 	memdev->region_id = 0;
996 	memdev->range_index = 3+1;
997 	memdev->region_index = 1+1;
998 	memdev->region_size = 0;
999 	memdev->region_offset = 0;
1000 	memdev->address = 0;
1001 	memdev->interleave_index = 0;
1002 	memdev->interleave_ways = 1;
1003 
1004 	/* mem-region8 (spa/dcr2, dimm2) */
1005 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
1006 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1007 	memdev->header.length = sizeof(*memdev);
1008 	memdev->device_handle = handle[2];
1009 	memdev->physical_id = 2;
1010 	memdev->region_id = 0;
1011 	memdev->range_index = 4+1;
1012 	memdev->region_index = 2+1;
1013 	memdev->region_size = 0;
1014 	memdev->region_offset = 0;
1015 	memdev->address = 0;
1016 	memdev->interleave_index = 0;
1017 	memdev->interleave_ways = 1;
1018 
1019 	/* mem-region9 (spa/dcr3, dimm3) */
1020 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
1021 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1022 	memdev->header.length = sizeof(*memdev);
1023 	memdev->device_handle = handle[3];
1024 	memdev->physical_id = 3;
1025 	memdev->region_id = 0;
1026 	memdev->range_index = 5+1;
1027 	memdev->region_index = 3+1;
1028 	memdev->region_size = 0;
1029 	memdev->region_offset = 0;
1030 	memdev->address = 0;
1031 	memdev->interleave_index = 0;
1032 	memdev->interleave_ways = 1;
1033 
1034 	/* mem-region10 (spa/bdw0, dimm0) */
1035 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
1036 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1037 	memdev->header.length = sizeof(*memdev);
1038 	memdev->device_handle = handle[0];
1039 	memdev->physical_id = 0;
1040 	memdev->region_id = 0;
1041 	memdev->range_index = 6+1;
1042 	memdev->region_index = 0+1;
1043 	memdev->region_size = 0;
1044 	memdev->region_offset = 0;
1045 	memdev->address = 0;
1046 	memdev->interleave_index = 0;
1047 	memdev->interleave_ways = 1;
1048 
1049 	/* mem-region11 (spa/bdw1, dimm1) */
1050 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
1051 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1052 	memdev->header.length = sizeof(*memdev);
1053 	memdev->device_handle = handle[1];
1054 	memdev->physical_id = 1;
1055 	memdev->region_id = 0;
1056 	memdev->range_index = 7+1;
1057 	memdev->region_index = 1+1;
1058 	memdev->region_size = 0;
1059 	memdev->region_offset = 0;
1060 	memdev->address = 0;
1061 	memdev->interleave_index = 0;
1062 	memdev->interleave_ways = 1;
1063 
1064 	/* mem-region12 (spa/bdw2, dimm2) */
1065 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
1066 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1067 	memdev->header.length = sizeof(*memdev);
1068 	memdev->device_handle = handle[2];
1069 	memdev->physical_id = 2;
1070 	memdev->region_id = 0;
1071 	memdev->range_index = 8+1;
1072 	memdev->region_index = 2+1;
1073 	memdev->region_size = 0;
1074 	memdev->region_offset = 0;
1075 	memdev->address = 0;
1076 	memdev->interleave_index = 0;
1077 	memdev->interleave_ways = 1;
1078 
1079 	/* mem-region13 (spa/dcr3, dimm3) */
1080 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
1081 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1082 	memdev->header.length = sizeof(*memdev);
1083 	memdev->device_handle = handle[3];
1084 	memdev->physical_id = 3;
1085 	memdev->region_id = 0;
1086 	memdev->range_index = 9+1;
1087 	memdev->region_index = 3+1;
1088 	memdev->region_size = 0;
1089 	memdev->region_offset = 0;
1090 	memdev->address = 0;
1091 	memdev->interleave_index = 0;
1092 	memdev->interleave_ways = 1;
1093 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1094 
1095 	offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
1096 	/* dcr-descriptor0: blk */
1097 	dcr = nfit_buf + offset;
1098 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1099 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1100 	dcr->region_index = 0+1;
1101 	dcr_common_init(dcr);
1102 	dcr->serial_number = ~handle[0];
1103 	dcr->code = NFIT_FIC_BLK;
1104 	dcr->windows = 1;
1105 	dcr->window_size = DCR_SIZE;
1106 	dcr->command_offset = 0;
1107 	dcr->command_size = 8;
1108 	dcr->status_offset = 8;
1109 	dcr->status_size = 4;
1110 
1111 	/* dcr-descriptor1: blk */
1112 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
1113 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1114 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1115 	dcr->region_index = 1+1;
1116 	dcr_common_init(dcr);
1117 	dcr->serial_number = ~handle[1];
1118 	dcr->code = NFIT_FIC_BLK;
1119 	dcr->windows = 1;
1120 	dcr->window_size = DCR_SIZE;
1121 	dcr->command_offset = 0;
1122 	dcr->command_size = 8;
1123 	dcr->status_offset = 8;
1124 	dcr->status_size = 4;
1125 
1126 	/* dcr-descriptor2: blk */
1127 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
1128 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1129 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1130 	dcr->region_index = 2+1;
1131 	dcr_common_init(dcr);
1132 	dcr->serial_number = ~handle[2];
1133 	dcr->code = NFIT_FIC_BLK;
1134 	dcr->windows = 1;
1135 	dcr->window_size = DCR_SIZE;
1136 	dcr->command_offset = 0;
1137 	dcr->command_size = 8;
1138 	dcr->status_offset = 8;
1139 	dcr->status_size = 4;
1140 
1141 	/* dcr-descriptor3: blk */
1142 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
1143 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1144 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1145 	dcr->region_index = 3+1;
1146 	dcr_common_init(dcr);
1147 	dcr->serial_number = ~handle[3];
1148 	dcr->code = NFIT_FIC_BLK;
1149 	dcr->windows = 1;
1150 	dcr->window_size = DCR_SIZE;
1151 	dcr->command_offset = 0;
1152 	dcr->command_size = 8;
1153 	dcr->status_offset = 8;
1154 	dcr->status_size = 4;
1155 
1156 	offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
1157 	/* dcr-descriptor0: pmem */
1158 	dcr = nfit_buf + offset;
1159 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1160 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1161 			window_size);
1162 	dcr->region_index = 4+1;
1163 	dcr_common_init(dcr);
1164 	dcr->serial_number = ~handle[0];
1165 	dcr->code = NFIT_FIC_BYTEN;
1166 	dcr->windows = 0;
1167 
1168 	/* dcr-descriptor1: pmem */
1169 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1170 			window_size);
1171 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1172 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1173 			window_size);
1174 	dcr->region_index = 5+1;
1175 	dcr_common_init(dcr);
1176 	dcr->serial_number = ~handle[1];
1177 	dcr->code = NFIT_FIC_BYTEN;
1178 	dcr->windows = 0;
1179 
1180 	/* dcr-descriptor2: pmem */
1181 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1182 			window_size) * 2;
1183 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1184 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1185 			window_size);
1186 	dcr->region_index = 6+1;
1187 	dcr_common_init(dcr);
1188 	dcr->serial_number = ~handle[2];
1189 	dcr->code = NFIT_FIC_BYTEN;
1190 	dcr->windows = 0;
1191 
1192 	/* dcr-descriptor3: pmem */
1193 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1194 			window_size) * 3;
1195 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1196 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1197 			window_size);
1198 	dcr->region_index = 7+1;
1199 	dcr_common_init(dcr);
1200 	dcr->serial_number = ~handle[3];
1201 	dcr->code = NFIT_FIC_BYTEN;
1202 	dcr->windows = 0;
1203 
1204 	offset = offset + offsetof(struct acpi_nfit_control_region,
1205 			window_size) * 4;
1206 	/* bdw0 (spa/dcr0, dimm0) */
1207 	bdw = nfit_buf + offset;
1208 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1209 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1210 	bdw->region_index = 0+1;
1211 	bdw->windows = 1;
1212 	bdw->offset = 0;
1213 	bdw->size = BDW_SIZE;
1214 	bdw->capacity = DIMM_SIZE;
1215 	bdw->start_address = 0;
1216 
1217 	/* bdw1 (spa/dcr1, dimm1) */
1218 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1219 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1220 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1221 	bdw->region_index = 1+1;
1222 	bdw->windows = 1;
1223 	bdw->offset = 0;
1224 	bdw->size = BDW_SIZE;
1225 	bdw->capacity = DIMM_SIZE;
1226 	bdw->start_address = 0;
1227 
1228 	/* bdw2 (spa/dcr2, dimm2) */
1229 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1230 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1231 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1232 	bdw->region_index = 2+1;
1233 	bdw->windows = 1;
1234 	bdw->offset = 0;
1235 	bdw->size = BDW_SIZE;
1236 	bdw->capacity = DIMM_SIZE;
1237 	bdw->start_address = 0;
1238 
1239 	/* bdw3 (spa/dcr3, dimm3) */
1240 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1241 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1242 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1243 	bdw->region_index = 3+1;
1244 	bdw->windows = 1;
1245 	bdw->offset = 0;
1246 	bdw->size = BDW_SIZE;
1247 	bdw->capacity = DIMM_SIZE;
1248 	bdw->start_address = 0;
1249 
1250 	offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1251 	/* flush0 (dimm0) */
1252 	flush = nfit_buf + offset;
1253 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1254 	flush->header.length = flush_hint_size;
1255 	flush->device_handle = handle[0];
1256 	flush->hint_count = NUM_HINTS;
1257 	for (i = 0; i < NUM_HINTS; i++)
1258 		flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
1259 
1260 	/* flush1 (dimm1) */
1261 	flush = nfit_buf + offset + flush_hint_size * 1;
1262 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1263 	flush->header.length = flush_hint_size;
1264 	flush->device_handle = handle[1];
1265 	flush->hint_count = NUM_HINTS;
1266 	for (i = 0; i < NUM_HINTS; i++)
1267 		flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
1268 
1269 	/* flush2 (dimm2) */
1270 	flush = nfit_buf + offset + flush_hint_size  * 2;
1271 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1272 	flush->header.length = flush_hint_size;
1273 	flush->device_handle = handle[2];
1274 	flush->hint_count = NUM_HINTS;
1275 	for (i = 0; i < NUM_HINTS; i++)
1276 		flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
1277 
1278 	/* flush3 (dimm3) */
1279 	flush = nfit_buf + offset + flush_hint_size * 3;
1280 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1281 	flush->header.length = flush_hint_size;
1282 	flush->device_handle = handle[3];
1283 	flush->hint_count = NUM_HINTS;
1284 	for (i = 0; i < NUM_HINTS; i++)
1285 		flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
1286 
1287 	if (t->setup_hotplug) {
1288 		offset = offset + flush_hint_size * 4;
1289 		/* dcr-descriptor4: blk */
1290 		dcr = nfit_buf + offset;
1291 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1292 		dcr->header.length = sizeof(struct acpi_nfit_control_region);
1293 		dcr->region_index = 8+1;
1294 		dcr_common_init(dcr);
1295 		dcr->serial_number = ~handle[4];
1296 		dcr->code = NFIT_FIC_BLK;
1297 		dcr->windows = 1;
1298 		dcr->window_size = DCR_SIZE;
1299 		dcr->command_offset = 0;
1300 		dcr->command_size = 8;
1301 		dcr->status_offset = 8;
1302 		dcr->status_size = 4;
1303 
1304 		offset = offset + sizeof(struct acpi_nfit_control_region);
1305 		/* dcr-descriptor4: pmem */
1306 		dcr = nfit_buf + offset;
1307 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1308 		dcr->header.length = offsetof(struct acpi_nfit_control_region,
1309 				window_size);
1310 		dcr->region_index = 9+1;
1311 		dcr_common_init(dcr);
1312 		dcr->serial_number = ~handle[4];
1313 		dcr->code = NFIT_FIC_BYTEN;
1314 		dcr->windows = 0;
1315 
1316 		offset = offset + offsetof(struct acpi_nfit_control_region,
1317 				window_size);
1318 		/* bdw4 (spa/dcr4, dimm4) */
1319 		bdw = nfit_buf + offset;
1320 		bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1321 		bdw->header.length = sizeof(struct acpi_nfit_data_region);
1322 		bdw->region_index = 8+1;
1323 		bdw->windows = 1;
1324 		bdw->offset = 0;
1325 		bdw->size = BDW_SIZE;
1326 		bdw->capacity = DIMM_SIZE;
1327 		bdw->start_address = 0;
1328 
1329 		offset = offset + sizeof(struct acpi_nfit_data_region);
1330 		/* spa10 (dcr4) dimm4 */
1331 		spa = nfit_buf + offset;
1332 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1333 		spa->header.length = sizeof(*spa);
1334 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1335 		spa->range_index = 10+1;
1336 		spa->address = t->dcr_dma[4];
1337 		spa->length = DCR_SIZE;
1338 
1339 		/*
1340 		 * spa11 (single-dimm interleave for hotplug, note storage
1341 		 * does not actually alias the related block-data-window
1342 		 * regions)
1343 		 */
1344 		spa = nfit_buf + offset + sizeof(*spa);
1345 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1346 		spa->header.length = sizeof(*spa);
1347 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1348 		spa->range_index = 11+1;
1349 		spa->address = t->spa_set_dma[2];
1350 		spa->length = SPA0_SIZE;
1351 
1352 		/* spa12 (bdw for dcr4) dimm4 */
1353 		spa = nfit_buf + offset + sizeof(*spa) * 2;
1354 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1355 		spa->header.length = sizeof(*spa);
1356 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1357 		spa->range_index = 12+1;
1358 		spa->address = t->dimm_dma[4];
1359 		spa->length = DIMM_SIZE;
1360 
1361 		offset = offset + sizeof(*spa) * 3;
1362 		/* mem-region14 (spa/dcr4, dimm4) */
1363 		memdev = nfit_buf + offset;
1364 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1365 		memdev->header.length = sizeof(*memdev);
1366 		memdev->device_handle = handle[4];
1367 		memdev->physical_id = 4;
1368 		memdev->region_id = 0;
1369 		memdev->range_index = 10+1;
1370 		memdev->region_index = 8+1;
1371 		memdev->region_size = 0;
1372 		memdev->region_offset = 0;
1373 		memdev->address = 0;
1374 		memdev->interleave_index = 0;
1375 		memdev->interleave_ways = 1;
1376 
1377 		/* mem-region15 (spa0, dimm4) */
1378 		memdev = nfit_buf + offset +
1379 				sizeof(struct acpi_nfit_memory_map);
1380 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1381 		memdev->header.length = sizeof(*memdev);
1382 		memdev->device_handle = handle[4];
1383 		memdev->physical_id = 4;
1384 		memdev->region_id = 0;
1385 		memdev->range_index = 11+1;
1386 		memdev->region_index = 9+1;
1387 		memdev->region_size = SPA0_SIZE;
1388 		memdev->region_offset = (1ULL << 48);
1389 		memdev->address = 0;
1390 		memdev->interleave_index = 0;
1391 		memdev->interleave_ways = 1;
1392 		memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1393 
1394 		/* mem-region16 (spa/bdw4, dimm4) */
1395 		memdev = nfit_buf + offset +
1396 				sizeof(struct acpi_nfit_memory_map) * 2;
1397 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1398 		memdev->header.length = sizeof(*memdev);
1399 		memdev->device_handle = handle[4];
1400 		memdev->physical_id = 4;
1401 		memdev->region_id = 0;
1402 		memdev->range_index = 12+1;
1403 		memdev->region_index = 8+1;
1404 		memdev->region_size = 0;
1405 		memdev->region_offset = 0;
1406 		memdev->address = 0;
1407 		memdev->interleave_index = 0;
1408 		memdev->interleave_ways = 1;
1409 
1410 		offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1411 		/* flush3 (dimm4) */
1412 		flush = nfit_buf + offset;
1413 		flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1414 		flush->header.length = flush_hint_size;
1415 		flush->device_handle = handle[4];
1416 		flush->hint_count = NUM_HINTS;
1417 		for (i = 0; i < NUM_HINTS; i++)
1418 			flush->hint_address[i] = t->flush_dma[4]
1419 				+ i * sizeof(u64);
1420 	}
1421 
1422 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
1423 
1424 	acpi_desc = &t->acpi_desc;
1425 	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
1426 	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1427 	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1428 	set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
1429 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1430 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1431 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1432 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1433 	set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
1434 }
1435 
1436 static void nfit_test1_setup(struct nfit_test *t)
1437 {
1438 	size_t offset;
1439 	void *nfit_buf = t->nfit_buf;
1440 	struct acpi_nfit_memory_map *memdev;
1441 	struct acpi_nfit_control_region *dcr;
1442 	struct acpi_nfit_system_address *spa;
1443 	struct acpi_nfit_desc *acpi_desc;
1444 
1445 	offset = 0;
1446 	/* spa0 (flat range with no bdw aliasing) */
1447 	spa = nfit_buf + offset;
1448 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1449 	spa->header.length = sizeof(*spa);
1450 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1451 	spa->range_index = 0+1;
1452 	spa->address = t->spa_set_dma[0];
1453 	spa->length = SPA2_SIZE;
1454 
1455 	/* virtual cd region */
1456 	spa = nfit_buf + sizeof(*spa);
1457 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1458 	spa->header.length = sizeof(*spa);
1459 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
1460 	spa->range_index = 0;
1461 	spa->address = t->spa_set_dma[1];
1462 	spa->length = SPA_VCD_SIZE;
1463 
1464 	offset += sizeof(*spa) * 2;
1465 	/* mem-region0 (spa0, dimm0) */
1466 	memdev = nfit_buf + offset;
1467 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1468 	memdev->header.length = sizeof(*memdev);
1469 	memdev->device_handle = handle[5];
1470 	memdev->physical_id = 0;
1471 	memdev->region_id = 0;
1472 	memdev->range_index = 0+1;
1473 	memdev->region_index = 0+1;
1474 	memdev->region_size = SPA2_SIZE;
1475 	memdev->region_offset = 0;
1476 	memdev->address = 0;
1477 	memdev->interleave_index = 0;
1478 	memdev->interleave_ways = 1;
1479 	memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1480 		| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1481 		| ACPI_NFIT_MEM_NOT_ARMED;
1482 
1483 	offset += sizeof(*memdev);
1484 	/* dcr-descriptor0 */
1485 	dcr = nfit_buf + offset;
1486 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1487 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1488 			window_size);
1489 	dcr->region_index = 0+1;
1490 	dcr_common_init(dcr);
1491 	dcr->serial_number = ~handle[5];
1492 	dcr->code = NFIT_FIC_BYTE;
1493 	dcr->windows = 0;
1494 
1495 	offset += dcr->header.length;
1496 	memdev = nfit_buf + offset;
1497 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1498 	memdev->header.length = sizeof(*memdev);
1499 	memdev->device_handle = handle[6];
1500 	memdev->physical_id = 0;
1501 	memdev->region_id = 0;
1502 	memdev->range_index = 0;
1503 	memdev->region_index = 0+2;
1504 	memdev->region_size = SPA2_SIZE;
1505 	memdev->region_offset = 0;
1506 	memdev->address = 0;
1507 	memdev->interleave_index = 0;
1508 	memdev->interleave_ways = 1;
1509 	memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
1510 
1511 	/* dcr-descriptor1 */
1512 	offset += sizeof(*memdev);
1513 	dcr = nfit_buf + offset;
1514 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1515 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1516 			window_size);
1517 	dcr->region_index = 0+2;
1518 	dcr_common_init(dcr);
1519 	dcr->serial_number = ~handle[6];
1520 	dcr->code = NFIT_FIC_BYTE;
1521 	dcr->windows = 0;
1522 
1523 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
1524 
1525 	acpi_desc = &t->acpi_desc;
1526 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1527 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1528 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1529 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1530 }
1531 
1532 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1533 		void *iobuf, u64 len, int rw)
1534 {
1535 	struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1536 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1537 	struct nd_region *nd_region = &ndbr->nd_region;
1538 	unsigned int lane;
1539 
1540 	lane = nd_region_acquire_lane(nd_region);
1541 	if (rw)
1542 		memcpy(mmio->addr.base + dpa, iobuf, len);
1543 	else {
1544 		memcpy(iobuf, mmio->addr.base + dpa, len);
1545 
1546 		/* give us some some coverage of the arch_invalidate_pmem() API */
1547 		arch_invalidate_pmem(mmio->addr.base + dpa, len);
1548 	}
1549 	nd_region_release_lane(nd_region, lane);
1550 
1551 	return 0;
1552 }
1553 
1554 static unsigned long nfit_ctl_handle;
1555 
1556 union acpi_object *result;
1557 
1558 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
1559 		const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
1560 {
1561 	if (handle != &nfit_ctl_handle)
1562 		return ERR_PTR(-ENXIO);
1563 
1564 	return result;
1565 }
1566 
1567 static int setup_result(void *buf, size_t size)
1568 {
1569 	result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
1570 	if (!result)
1571 		return -ENOMEM;
1572 	result->package.type = ACPI_TYPE_BUFFER,
1573 	result->buffer.pointer = (void *) (result + 1);
1574 	result->buffer.length = size;
1575 	memcpy(result->buffer.pointer, buf, size);
1576 	memset(buf, 0, size);
1577 	return 0;
1578 }
1579 
1580 static int nfit_ctl_test(struct device *dev)
1581 {
1582 	int rc, cmd_rc;
1583 	struct nvdimm *nvdimm;
1584 	struct acpi_device *adev;
1585 	struct nfit_mem *nfit_mem;
1586 	struct nd_ars_record *record;
1587 	struct acpi_nfit_desc *acpi_desc;
1588 	const u64 test_val = 0x0123456789abcdefULL;
1589 	unsigned long mask, cmd_size, offset;
1590 	union {
1591 		struct nd_cmd_get_config_size cfg_size;
1592 		struct nd_cmd_ars_status ars_stat;
1593 		struct nd_cmd_ars_cap ars_cap;
1594 		char buf[sizeof(struct nd_cmd_ars_status)
1595 			+ sizeof(struct nd_ars_record)];
1596 	} cmds;
1597 
1598 	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
1599 	if (!adev)
1600 		return -ENOMEM;
1601 	*adev = (struct acpi_device) {
1602 		.handle = &nfit_ctl_handle,
1603 		.dev = {
1604 			.init_name = "test-adev",
1605 		},
1606 	};
1607 
1608 	acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
1609 	if (!acpi_desc)
1610 		return -ENOMEM;
1611 	*acpi_desc = (struct acpi_nfit_desc) {
1612 		.nd_desc = {
1613 			.cmd_mask = 1UL << ND_CMD_ARS_CAP
1614 				| 1UL << ND_CMD_ARS_START
1615 				| 1UL << ND_CMD_ARS_STATUS
1616 				| 1UL << ND_CMD_CLEAR_ERROR,
1617 			.module = THIS_MODULE,
1618 			.provider_name = "ACPI.NFIT",
1619 			.ndctl = acpi_nfit_ctl,
1620 		},
1621 		.dev = &adev->dev,
1622 	};
1623 
1624 	nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
1625 	if (!nfit_mem)
1626 		return -ENOMEM;
1627 
1628 	mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
1629 		| 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
1630 		| 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
1631 		| 1UL << ND_CMD_VENDOR;
1632 	*nfit_mem = (struct nfit_mem) {
1633 		.adev = adev,
1634 		.family = NVDIMM_FAMILY_INTEL,
1635 		.dsm_mask = mask,
1636 	};
1637 
1638 	nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
1639 	if (!nvdimm)
1640 		return -ENOMEM;
1641 	*nvdimm = (struct nvdimm) {
1642 		.provider_data = nfit_mem,
1643 		.cmd_mask = mask,
1644 		.dev = {
1645 			.init_name = "test-dimm",
1646 		},
1647 	};
1648 
1649 
1650 	/* basic checkout of a typical 'get config size' command */
1651 	cmd_size = sizeof(cmds.cfg_size);
1652 	cmds.cfg_size = (struct nd_cmd_get_config_size) {
1653 		.status = 0,
1654 		.config_size = SZ_128K,
1655 		.max_xfer = SZ_4K,
1656 	};
1657 	rc = setup_result(cmds.buf, cmd_size);
1658 	if (rc)
1659 		return rc;
1660 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
1661 			cmds.buf, cmd_size, &cmd_rc);
1662 
1663 	if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
1664 			|| cmds.cfg_size.config_size != SZ_128K
1665 			|| cmds.cfg_size.max_xfer != SZ_4K) {
1666 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1667 				__func__, __LINE__, rc, cmd_rc);
1668 		return -EIO;
1669 	}
1670 
1671 
1672 	/* test ars_status with zero output */
1673 	cmd_size = offsetof(struct nd_cmd_ars_status, address);
1674 	cmds.ars_stat = (struct nd_cmd_ars_status) {
1675 		.out_length = 0,
1676 	};
1677 	rc = setup_result(cmds.buf, cmd_size);
1678 	if (rc)
1679 		return rc;
1680 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1681 			cmds.buf, cmd_size, &cmd_rc);
1682 
1683 	if (rc < 0 || cmd_rc) {
1684 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1685 				__func__, __LINE__, rc, cmd_rc);
1686 		return -EIO;
1687 	}
1688 
1689 
1690 	/* test ars_cap with benign extended status */
1691 	cmd_size = sizeof(cmds.ars_cap);
1692 	cmds.ars_cap = (struct nd_cmd_ars_cap) {
1693 		.status = ND_ARS_PERSISTENT << 16,
1694 	};
1695 	offset = offsetof(struct nd_cmd_ars_cap, status);
1696 	rc = setup_result(cmds.buf + offset, cmd_size - offset);
1697 	if (rc)
1698 		return rc;
1699 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
1700 			cmds.buf, cmd_size, &cmd_rc);
1701 
1702 	if (rc < 0 || cmd_rc) {
1703 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1704 				__func__, __LINE__, rc, cmd_rc);
1705 		return -EIO;
1706 	}
1707 
1708 
1709 	/* test ars_status with 'status' trimmed from 'out_length' */
1710 	cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
1711 	cmds.ars_stat = (struct nd_cmd_ars_status) {
1712 		.out_length = cmd_size - 4,
1713 	};
1714 	record = &cmds.ars_stat.records[0];
1715 	*record = (struct nd_ars_record) {
1716 		.length = test_val,
1717 	};
1718 	rc = setup_result(cmds.buf, cmd_size);
1719 	if (rc)
1720 		return rc;
1721 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1722 			cmds.buf, cmd_size, &cmd_rc);
1723 
1724 	if (rc < 0 || cmd_rc || record->length != test_val) {
1725 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1726 				__func__, __LINE__, rc, cmd_rc);
1727 		return -EIO;
1728 	}
1729 
1730 
1731 	/* test ars_status with 'Output (Size)' including 'status' */
1732 	cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
1733 	cmds.ars_stat = (struct nd_cmd_ars_status) {
1734 		.out_length = cmd_size,
1735 	};
1736 	record = &cmds.ars_stat.records[0];
1737 	*record = (struct nd_ars_record) {
1738 		.length = test_val,
1739 	};
1740 	rc = setup_result(cmds.buf, cmd_size);
1741 	if (rc)
1742 		return rc;
1743 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1744 			cmds.buf, cmd_size, &cmd_rc);
1745 
1746 	if (rc < 0 || cmd_rc || record->length != test_val) {
1747 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1748 				__func__, __LINE__, rc, cmd_rc);
1749 		return -EIO;
1750 	}
1751 
1752 
1753 	/* test extended status for get_config_size results in failure */
1754 	cmd_size = sizeof(cmds.cfg_size);
1755 	cmds.cfg_size = (struct nd_cmd_get_config_size) {
1756 		.status = 1 << 16,
1757 	};
1758 	rc = setup_result(cmds.buf, cmd_size);
1759 	if (rc)
1760 		return rc;
1761 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
1762 			cmds.buf, cmd_size, &cmd_rc);
1763 
1764 	if (rc < 0 || cmd_rc >= 0) {
1765 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1766 				__func__, __LINE__, rc, cmd_rc);
1767 		return -EIO;
1768 	}
1769 
1770 	return 0;
1771 }
1772 
1773 static int nfit_test_probe(struct platform_device *pdev)
1774 {
1775 	struct nvdimm_bus_descriptor *nd_desc;
1776 	struct acpi_nfit_desc *acpi_desc;
1777 	struct device *dev = &pdev->dev;
1778 	struct nfit_test *nfit_test;
1779 	struct nfit_mem *nfit_mem;
1780 	union acpi_object *obj;
1781 	int rc;
1782 
1783 	if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
1784 		rc = nfit_ctl_test(&pdev->dev);
1785 		if (rc)
1786 			return rc;
1787 	}
1788 
1789 	nfit_test = to_nfit_test(&pdev->dev);
1790 
1791 	/* common alloc */
1792 	if (nfit_test->num_dcr) {
1793 		int num = nfit_test->num_dcr;
1794 
1795 		nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
1796 				GFP_KERNEL);
1797 		nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1798 				GFP_KERNEL);
1799 		nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
1800 				GFP_KERNEL);
1801 		nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1802 				GFP_KERNEL);
1803 		nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
1804 				GFP_KERNEL);
1805 		nfit_test->label_dma = devm_kcalloc(dev, num,
1806 				sizeof(dma_addr_t), GFP_KERNEL);
1807 		nfit_test->dcr = devm_kcalloc(dev, num,
1808 				sizeof(struct nfit_test_dcr *), GFP_KERNEL);
1809 		nfit_test->dcr_dma = devm_kcalloc(dev, num,
1810 				sizeof(dma_addr_t), GFP_KERNEL);
1811 		if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
1812 				&& nfit_test->label_dma && nfit_test->dcr
1813 				&& nfit_test->dcr_dma && nfit_test->flush
1814 				&& nfit_test->flush_dma)
1815 			/* pass */;
1816 		else
1817 			return -ENOMEM;
1818 	}
1819 
1820 	if (nfit_test->num_pm) {
1821 		int num = nfit_test->num_pm;
1822 
1823 		nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
1824 				GFP_KERNEL);
1825 		nfit_test->spa_set_dma = devm_kcalloc(dev, num,
1826 				sizeof(dma_addr_t), GFP_KERNEL);
1827 		if (nfit_test->spa_set && nfit_test->spa_set_dma)
1828 			/* pass */;
1829 		else
1830 			return -ENOMEM;
1831 	}
1832 
1833 	/* per-nfit specific alloc */
1834 	if (nfit_test->alloc(nfit_test))
1835 		return -ENOMEM;
1836 
1837 	nfit_test->setup(nfit_test);
1838 	acpi_desc = &nfit_test->acpi_desc;
1839 	acpi_nfit_desc_init(acpi_desc, &pdev->dev);
1840 	acpi_desc->blk_do_io = nfit_test_blk_do_io;
1841 	nd_desc = &acpi_desc->nd_desc;
1842 	nd_desc->provider_name = NULL;
1843 	nd_desc->module = THIS_MODULE;
1844 	nd_desc->ndctl = nfit_test_ctl;
1845 
1846 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
1847 			nfit_test->nfit_size);
1848 	if (rc)
1849 		return rc;
1850 
1851 	rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
1852 	if (rc)
1853 		return rc;
1854 
1855 	if (nfit_test->setup != nfit_test0_setup)
1856 		return 0;
1857 
1858 	nfit_test->setup_hotplug = 1;
1859 	nfit_test->setup(nfit_test);
1860 
1861 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
1862 	if (!obj)
1863 		return -ENOMEM;
1864 	obj->type = ACPI_TYPE_BUFFER;
1865 	obj->buffer.length = nfit_test->nfit_size;
1866 	obj->buffer.pointer = nfit_test->nfit_buf;
1867 	*(nfit_test->_fit) = obj;
1868 	__acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
1869 
1870 	/* associate dimm devices with nfit_mem data for notification testing */
1871 	mutex_lock(&acpi_desc->init_mutex);
1872 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1873 		u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1874 		int i;
1875 
1876 		for (i = 0; i < NUM_DCR; i++)
1877 			if (nfit_handle == handle[i])
1878 				dev_set_drvdata(nfit_test->dimm_dev[i],
1879 						nfit_mem);
1880 	}
1881 	mutex_unlock(&acpi_desc->init_mutex);
1882 
1883 	return 0;
1884 }
1885 
1886 static int nfit_test_remove(struct platform_device *pdev)
1887 {
1888 	return 0;
1889 }
1890 
1891 static void nfit_test_release(struct device *dev)
1892 {
1893 	struct nfit_test *nfit_test = to_nfit_test(dev);
1894 
1895 	kfree(nfit_test);
1896 }
1897 
1898 static const struct platform_device_id nfit_test_id[] = {
1899 	{ KBUILD_MODNAME },
1900 	{ },
1901 };
1902 
1903 static struct platform_driver nfit_test_driver = {
1904 	.probe = nfit_test_probe,
1905 	.remove = nfit_test_remove,
1906 	.driver = {
1907 		.name = KBUILD_MODNAME,
1908 	},
1909 	.id_table = nfit_test_id,
1910 };
1911 
1912 static __init int nfit_test_init(void)
1913 {
1914 	int rc, i;
1915 
1916 	nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
1917 
1918 	nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
1919 	if (IS_ERR(nfit_test_dimm)) {
1920 		rc = PTR_ERR(nfit_test_dimm);
1921 		goto err_register;
1922 	}
1923 
1924 	for (i = 0; i < NUM_NFITS; i++) {
1925 		struct nfit_test *nfit_test;
1926 		struct platform_device *pdev;
1927 
1928 		nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
1929 		if (!nfit_test) {
1930 			rc = -ENOMEM;
1931 			goto err_register;
1932 		}
1933 		INIT_LIST_HEAD(&nfit_test->resources);
1934 		switch (i) {
1935 		case 0:
1936 			nfit_test->num_pm = NUM_PM;
1937 			nfit_test->dcr_idx = 0;
1938 			nfit_test->num_dcr = NUM_DCR;
1939 			nfit_test->alloc = nfit_test0_alloc;
1940 			nfit_test->setup = nfit_test0_setup;
1941 			break;
1942 		case 1:
1943 			nfit_test->num_pm = 2;
1944 			nfit_test->dcr_idx = NUM_DCR;
1945 			nfit_test->num_dcr = 2;
1946 			nfit_test->alloc = nfit_test1_alloc;
1947 			nfit_test->setup = nfit_test1_setup;
1948 			break;
1949 		default:
1950 			rc = -EINVAL;
1951 			goto err_register;
1952 		}
1953 		pdev = &nfit_test->pdev;
1954 		pdev->name = KBUILD_MODNAME;
1955 		pdev->id = i;
1956 		pdev->dev.release = nfit_test_release;
1957 		rc = platform_device_register(pdev);
1958 		if (rc) {
1959 			put_device(&pdev->dev);
1960 			goto err_register;
1961 		}
1962 		get_device(&pdev->dev);
1963 
1964 		rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1965 		if (rc)
1966 			goto err_register;
1967 
1968 		instances[i] = nfit_test;
1969 	}
1970 
1971 	rc = platform_driver_register(&nfit_test_driver);
1972 	if (rc)
1973 		goto err_register;
1974 	return 0;
1975 
1976  err_register:
1977 	for (i = 0; i < NUM_NFITS; i++)
1978 		if (instances[i])
1979 			platform_device_unregister(&instances[i]->pdev);
1980 	nfit_test_teardown();
1981 	for (i = 0; i < NUM_NFITS; i++)
1982 		if (instances[i])
1983 			put_device(&instances[i]->pdev.dev);
1984 
1985 	return rc;
1986 }
1987 
1988 static __exit void nfit_test_exit(void)
1989 {
1990 	int i;
1991 
1992 	for (i = 0; i < NUM_NFITS; i++)
1993 		platform_device_unregister(&instances[i]->pdev);
1994 	platform_driver_unregister(&nfit_test_driver);
1995 	nfit_test_teardown();
1996 
1997 	for (i = 0; i < NUM_NFITS; i++)
1998 		put_device(&instances[i]->pdev.dev);
1999 	class_destroy(nfit_test_dimm);
2000 }
2001 
2002 module_init(nfit_test_init);
2003 module_exit(nfit_test_exit);
2004 MODULE_LICENSE("GPL v2");
2005 MODULE_AUTHOR("Intel Corporation");
2006