xref: /openbmc/linux/tools/testing/nvdimm/test/nfit.c (revision 68198dca)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/workqueue.h>
17 #include <linux/libnvdimm.h>
18 #include <linux/vmalloc.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/ndctl.h>
23 #include <linux/sizes.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <nd-core.h>
27 #include <nfit.h>
28 #include <nd.h>
29 #include "nfit_test.h"
30 
31 /*
32  * Generate an NFIT table to describe the following topology:
33  *
34  * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
35  *
36  *                     (a)                       (b)            DIMM   BLK-REGION
37  *           +----------+--------------+----------+---------+
38  * +------+  |  blk2.0  |     pm0.0    |  blk2.1  |  pm1.0  |    0      region2
39  * | imc0 +--+- - - - - region0 - - - -+----------+         +
40  * +--+---+  |  blk3.0  |     pm0.0    |  blk3.1  |  pm1.0  |    1      region3
41  *    |      +----------+--------------v----------v         v
42  * +--+---+                            |                    |
43  * | cpu0 |                                    region1
44  * +--+---+                            |                    |
45  *    |      +-------------------------^----------^         ^
46  * +--+---+  |                 blk4.0             |  pm1.0  |    2      region4
47  * | imc1 +--+-------------------------+----------+         +
48  * +------+  |                 blk5.0             |  pm1.0  |    3      region5
49  *           +-------------------------+----------+-+-------+
50  *
51  * +--+---+
52  * | cpu1 |
53  * +--+---+                   (Hotplug DIMM)
54  *    |      +----------------------------------------------+
55  * +--+---+  |                 blk6.0/pm7.0                 |    4      region6/7
56  * | imc0 +--+----------------------------------------------+
57  * +------+
58  *
59  *
60  * *) In this layout we have four dimms and two memory controllers in one
61  *    socket.  Each unique interface (BLK or PMEM) to DPA space
62  *    is identified by a region device with a dynamically assigned id.
63  *
64  * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
65  *    A single PMEM namespace "pm0.0" is created using half of the
66  *    REGION0 SPA-range.  REGION0 spans dimm0 and dimm1.  PMEM namespace
67  *    allocate from from the bottom of a region.  The unallocated
68  *    portion of REGION0 aliases with REGION2 and REGION3.  That
69  *    unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
70  *    "blk3.0") starting at the base of each DIMM to offset (a) in those
71  *    DIMMs.  "pm0.0", "blk2.0" and "blk3.0" are free-form readable
72  *    names that can be assigned to a namespace.
73  *
74  * *) In the last portion of dimm0 and dimm1 we have an interleaved
75  *    SPA range, REGION1, that spans those two dimms as well as dimm2
76  *    and dimm3.  Some of REGION1 allocated to a PMEM namespace named
77  *    "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
78  *    dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
79  *    "blk5.0".
80  *
81  * *) The portion of dimm2 and dimm3 that do not participate in the
82  *    REGION1 interleaved SPA range (i.e. the DPA address below offset
83  *    (b) are also included in the "blk4.0" and "blk5.0" namespaces.
84  *    Note, that BLK namespaces need not be contiguous in DPA-space, and
85  *    can consume aliased capacity from multiple interleave sets.
86  *
87  * BUS1: Legacy NVDIMM (single contiguous range)
88  *
89  *  region2
90  * +---------------------+
91  * |---------------------|
92  * ||       pm2.0       ||
93  * |---------------------|
94  * +---------------------+
95  *
96  * *) A NFIT-table may describe a simple system-physical-address range
97  *    with no BLK aliasing.  This type of region may optionally
98  *    reference an NVDIMM.
99  */
100 enum {
101 	NUM_PM  = 3,
102 	NUM_DCR = 5,
103 	NUM_HINTS = 8,
104 	NUM_BDW = NUM_DCR,
105 	NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
106 	NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
107 	DIMM_SIZE = SZ_32M,
108 	LABEL_SIZE = SZ_128K,
109 	SPA_VCD_SIZE = SZ_4M,
110 	SPA0_SIZE = DIMM_SIZE,
111 	SPA1_SIZE = DIMM_SIZE*2,
112 	SPA2_SIZE = DIMM_SIZE,
113 	BDW_SIZE = 64 << 8,
114 	DCR_SIZE = 12,
115 	NUM_NFITS = 2, /* permit testing multiple NFITs per system */
116 };
117 
118 struct nfit_test_dcr {
119 	__le64 bdw_addr;
120 	__le32 bdw_status;
121 	__u8 aperature[BDW_SIZE];
122 };
123 
124 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
125 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
126 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
127 
128 static u32 handle[] = {
129 	[0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
130 	[1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
131 	[2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
132 	[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
133 	[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
134 	[5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
135 	[6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
136 };
137 
138 static unsigned long dimm_fail_cmd_flags[NUM_DCR];
139 
140 struct nfit_test {
141 	struct acpi_nfit_desc acpi_desc;
142 	struct platform_device pdev;
143 	struct list_head resources;
144 	void *nfit_buf;
145 	dma_addr_t nfit_dma;
146 	size_t nfit_size;
147 	int dcr_idx;
148 	int num_dcr;
149 	int num_pm;
150 	void **dimm;
151 	dma_addr_t *dimm_dma;
152 	void **flush;
153 	dma_addr_t *flush_dma;
154 	void **label;
155 	dma_addr_t *label_dma;
156 	void **spa_set;
157 	dma_addr_t *spa_set_dma;
158 	struct nfit_test_dcr **dcr;
159 	dma_addr_t *dcr_dma;
160 	int (*alloc)(struct nfit_test *t);
161 	void (*setup)(struct nfit_test *t);
162 	int setup_hotplug;
163 	union acpi_object **_fit;
164 	dma_addr_t _fit_dma;
165 	struct ars_state {
166 		struct nd_cmd_ars_status *ars_status;
167 		unsigned long deadline;
168 		spinlock_t lock;
169 	} ars_state;
170 	struct device *dimm_dev[NUM_DCR];
171 	struct badrange badrange;
172 	struct work_struct work;
173 };
174 
175 static struct workqueue_struct *nfit_wq;
176 
177 static struct nfit_test *to_nfit_test(struct device *dev)
178 {
179 	struct platform_device *pdev = to_platform_device(dev);
180 
181 	return container_of(pdev, struct nfit_test, pdev);
182 }
183 
184 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
185 		unsigned int buf_len)
186 {
187 	if (buf_len < sizeof(*nd_cmd))
188 		return -EINVAL;
189 
190 	nd_cmd->status = 0;
191 	nd_cmd->config_size = LABEL_SIZE;
192 	nd_cmd->max_xfer = SZ_4K;
193 
194 	return 0;
195 }
196 
197 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
198 		*nd_cmd, unsigned int buf_len, void *label)
199 {
200 	unsigned int len, offset = nd_cmd->in_offset;
201 	int rc;
202 
203 	if (buf_len < sizeof(*nd_cmd))
204 		return -EINVAL;
205 	if (offset >= LABEL_SIZE)
206 		return -EINVAL;
207 	if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
208 		return -EINVAL;
209 
210 	nd_cmd->status = 0;
211 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
212 	memcpy(nd_cmd->out_buf, label + offset, len);
213 	rc = buf_len - sizeof(*nd_cmd) - len;
214 
215 	return rc;
216 }
217 
218 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
219 		unsigned int buf_len, void *label)
220 {
221 	unsigned int len, offset = nd_cmd->in_offset;
222 	u32 *status;
223 	int rc;
224 
225 	if (buf_len < sizeof(*nd_cmd))
226 		return -EINVAL;
227 	if (offset >= LABEL_SIZE)
228 		return -EINVAL;
229 	if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
230 		return -EINVAL;
231 
232 	status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
233 	*status = 0;
234 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
235 	memcpy(label + offset, nd_cmd->in_buf, len);
236 	rc = buf_len - sizeof(*nd_cmd) - (len + 4);
237 
238 	return rc;
239 }
240 
241 #define NFIT_TEST_CLEAR_ERR_UNIT 256
242 
243 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
244 		unsigned int buf_len)
245 {
246 	int ars_recs;
247 
248 	if (buf_len < sizeof(*nd_cmd))
249 		return -EINVAL;
250 
251 	/* for testing, only store up to n records that fit within 4k */
252 	ars_recs = SZ_4K / sizeof(struct nd_ars_record);
253 
254 	nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
255 		+ ars_recs * sizeof(struct nd_ars_record);
256 	nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
257 	nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
258 
259 	return 0;
260 }
261 
262 static void post_ars_status(struct ars_state *ars_state,
263 		struct badrange *badrange, u64 addr, u64 len)
264 {
265 	struct nd_cmd_ars_status *ars_status;
266 	struct nd_ars_record *ars_record;
267 	struct badrange_entry *be;
268 	u64 end = addr + len - 1;
269 	int i = 0;
270 
271 	ars_state->deadline = jiffies + 1*HZ;
272 	ars_status = ars_state->ars_status;
273 	ars_status->status = 0;
274 	ars_status->address = addr;
275 	ars_status->length = len;
276 	ars_status->type = ND_ARS_PERSISTENT;
277 
278 	spin_lock(&badrange->lock);
279 	list_for_each_entry(be, &badrange->list, list) {
280 		u64 be_end = be->start + be->length - 1;
281 		u64 rstart, rend;
282 
283 		/* skip entries outside the range */
284 		if (be_end < addr || be->start > end)
285 			continue;
286 
287 		rstart = (be->start < addr) ? addr : be->start;
288 		rend = (be_end < end) ? be_end : end;
289 		ars_record = &ars_status->records[i];
290 		ars_record->handle = 0;
291 		ars_record->err_address = rstart;
292 		ars_record->length = rend - rstart + 1;
293 		i++;
294 	}
295 	spin_unlock(&badrange->lock);
296 	ars_status->num_records = i;
297 	ars_status->out_length = sizeof(struct nd_cmd_ars_status)
298 		+ i * sizeof(struct nd_ars_record);
299 }
300 
301 static int nfit_test_cmd_ars_start(struct nfit_test *t,
302 		struct ars_state *ars_state,
303 		struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
304 		int *cmd_rc)
305 {
306 	if (buf_len < sizeof(*ars_start))
307 		return -EINVAL;
308 
309 	spin_lock(&ars_state->lock);
310 	if (time_before(jiffies, ars_state->deadline)) {
311 		ars_start->status = NFIT_ARS_START_BUSY;
312 		*cmd_rc = -EBUSY;
313 	} else {
314 		ars_start->status = 0;
315 		ars_start->scrub_time = 1;
316 		post_ars_status(ars_state, &t->badrange, ars_start->address,
317 				ars_start->length);
318 		*cmd_rc = 0;
319 	}
320 	spin_unlock(&ars_state->lock);
321 
322 	return 0;
323 }
324 
325 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
326 		struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
327 		int *cmd_rc)
328 {
329 	if (buf_len < ars_state->ars_status->out_length)
330 		return -EINVAL;
331 
332 	spin_lock(&ars_state->lock);
333 	if (time_before(jiffies, ars_state->deadline)) {
334 		memset(ars_status, 0, buf_len);
335 		ars_status->status = NFIT_ARS_STATUS_BUSY;
336 		ars_status->out_length = sizeof(*ars_status);
337 		*cmd_rc = -EBUSY;
338 	} else {
339 		memcpy(ars_status, ars_state->ars_status,
340 				ars_state->ars_status->out_length);
341 		*cmd_rc = 0;
342 	}
343 	spin_unlock(&ars_state->lock);
344 	return 0;
345 }
346 
347 static int nfit_test_cmd_clear_error(struct nfit_test *t,
348 		struct nd_cmd_clear_error *clear_err,
349 		unsigned int buf_len, int *cmd_rc)
350 {
351 	const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
352 	if (buf_len < sizeof(*clear_err))
353 		return -EINVAL;
354 
355 	if ((clear_err->address & mask) || (clear_err->length & mask))
356 		return -EINVAL;
357 
358 	badrange_forget(&t->badrange, clear_err->address, clear_err->length);
359 	clear_err->status = 0;
360 	clear_err->cleared = clear_err->length;
361 	*cmd_rc = 0;
362 	return 0;
363 }
364 
365 struct region_search_spa {
366 	u64 addr;
367 	struct nd_region *region;
368 };
369 
370 static int is_region_device(struct device *dev)
371 {
372 	return !strncmp(dev->kobj.name, "region", 6);
373 }
374 
375 static int nfit_test_search_region_spa(struct device *dev, void *data)
376 {
377 	struct region_search_spa *ctx = data;
378 	struct nd_region *nd_region;
379 	resource_size_t ndr_end;
380 
381 	if (!is_region_device(dev))
382 		return 0;
383 
384 	nd_region = to_nd_region(dev);
385 	ndr_end = nd_region->ndr_start + nd_region->ndr_size;
386 
387 	if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) {
388 		ctx->region = nd_region;
389 		return 1;
390 	}
391 
392 	return 0;
393 }
394 
395 static int nfit_test_search_spa(struct nvdimm_bus *bus,
396 		struct nd_cmd_translate_spa *spa)
397 {
398 	int ret;
399 	struct nd_region *nd_region = NULL;
400 	struct nvdimm *nvdimm = NULL;
401 	struct nd_mapping *nd_mapping = NULL;
402 	struct region_search_spa ctx = {
403 		.addr = spa->spa,
404 		.region = NULL,
405 	};
406 	u64 dpa;
407 
408 	ret = device_for_each_child(&bus->dev, &ctx,
409 				nfit_test_search_region_spa);
410 
411 	if (!ret)
412 		return -ENODEV;
413 
414 	nd_region = ctx.region;
415 
416 	dpa = ctx.addr - nd_region->ndr_start;
417 
418 	/*
419 	 * last dimm is selected for test
420 	 */
421 	nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
422 	nvdimm = nd_mapping->nvdimm;
423 
424 	spa->devices[0].nfit_device_handle = handle[nvdimm->id];
425 	spa->num_nvdimms = 1;
426 	spa->devices[0].dpa = dpa;
427 
428 	return 0;
429 }
430 
431 static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus,
432 		struct nd_cmd_translate_spa *spa, unsigned int buf_len)
433 {
434 	if (buf_len < spa->translate_length)
435 		return -EINVAL;
436 
437 	if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms)
438 		spa->status = 2;
439 
440 	return 0;
441 }
442 
443 static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
444 {
445 	static const struct nd_smart_payload smart_data = {
446 		.flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
447 			| ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
448 			| ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
449 		.health = ND_SMART_NON_CRITICAL_HEALTH,
450 		.temperature = 23 * 16,
451 		.spares = 75,
452 		.alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
453 		.life_used = 5,
454 		.shutdown_state = 0,
455 		.vendor_size = 0,
456 	};
457 
458 	if (buf_len < sizeof(*smart))
459 		return -EINVAL;
460 	memcpy(smart->data, &smart_data, sizeof(smart_data));
461 	return 0;
462 }
463 
464 static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
465 		unsigned int buf_len)
466 {
467 	static const struct nd_smart_threshold_payload smart_t_data = {
468 		.alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
469 		.temperature = 40 * 16,
470 		.spares = 5,
471 	};
472 
473 	if (buf_len < sizeof(*smart_t))
474 		return -EINVAL;
475 	memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
476 	return 0;
477 }
478 
479 static void uc_error_notify(struct work_struct *work)
480 {
481 	struct nfit_test *t = container_of(work, typeof(*t), work);
482 
483 	__acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR);
484 }
485 
486 static int nfit_test_cmd_ars_error_inject(struct nfit_test *t,
487 		struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len)
488 {
489 	int rc;
490 
491 	if (buf_len != sizeof(*err_inj)) {
492 		rc = -EINVAL;
493 		goto err;
494 	}
495 
496 	if (err_inj->err_inj_spa_range_length <= 0) {
497 		rc = -EINVAL;
498 		goto err;
499 	}
500 
501 	rc =  badrange_add(&t->badrange, err_inj->err_inj_spa_range_base,
502 			err_inj->err_inj_spa_range_length);
503 	if (rc < 0)
504 		goto err;
505 
506 	if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY))
507 		queue_work(nfit_wq, &t->work);
508 
509 	err_inj->status = 0;
510 	return 0;
511 
512 err:
513 	err_inj->status = NFIT_ARS_INJECT_INVALID;
514 	return rc;
515 }
516 
517 static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t,
518 		struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len)
519 {
520 	int rc;
521 
522 	if (buf_len != sizeof(*err_clr)) {
523 		rc = -EINVAL;
524 		goto err;
525 	}
526 
527 	if (err_clr->err_inj_clr_spa_range_length <= 0) {
528 		rc = -EINVAL;
529 		goto err;
530 	}
531 
532 	badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base,
533 			err_clr->err_inj_clr_spa_range_length);
534 
535 	err_clr->status = 0;
536 	return 0;
537 
538 err:
539 	err_clr->status = NFIT_ARS_INJECT_INVALID;
540 	return rc;
541 }
542 
543 static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
544 		struct nd_cmd_ars_err_inj_stat *err_stat,
545 		unsigned int buf_len)
546 {
547 	struct badrange_entry *be;
548 	int max = SZ_4K / sizeof(struct nd_error_stat_query_record);
549 	int i = 0;
550 
551 	err_stat->status = 0;
552 	spin_lock(&t->badrange.lock);
553 	list_for_each_entry(be, &t->badrange.list, list) {
554 		err_stat->record[i].err_inj_stat_spa_range_base = be->start;
555 		err_stat->record[i].err_inj_stat_spa_range_length = be->length;
556 		i++;
557 		if (i > max)
558 			break;
559 	}
560 	spin_unlock(&t->badrange.lock);
561 	err_stat->inj_err_rec_count = i;
562 
563 	return 0;
564 }
565 
566 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
567 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
568 		unsigned int buf_len, int *cmd_rc)
569 {
570 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
571 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
572 	unsigned int func = cmd;
573 	int i, rc = 0, __cmd_rc;
574 
575 	if (!cmd_rc)
576 		cmd_rc = &__cmd_rc;
577 	*cmd_rc = 0;
578 
579 	if (nvdimm) {
580 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
581 		unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
582 
583 		if (!nfit_mem)
584 			return -ENOTTY;
585 
586 		if (cmd == ND_CMD_CALL) {
587 			struct nd_cmd_pkg *call_pkg = buf;
588 
589 			buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
590 			buf = (void *) call_pkg->nd_payload;
591 			func = call_pkg->nd_command;
592 			if (call_pkg->nd_family != nfit_mem->family)
593 				return -ENOTTY;
594 		}
595 
596 		if (!test_bit(cmd, &cmd_mask)
597 				|| !test_bit(func, &nfit_mem->dsm_mask))
598 			return -ENOTTY;
599 
600 		/* lookup label space for the given dimm */
601 		for (i = 0; i < ARRAY_SIZE(handle); i++)
602 			if (__to_nfit_memdev(nfit_mem)->device_handle ==
603 					handle[i])
604 				break;
605 		if (i >= ARRAY_SIZE(handle))
606 			return -ENXIO;
607 
608 		if ((1 << func) & dimm_fail_cmd_flags[i])
609 			return -EIO;
610 
611 		switch (func) {
612 		case ND_CMD_GET_CONFIG_SIZE:
613 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
614 			break;
615 		case ND_CMD_GET_CONFIG_DATA:
616 			rc = nfit_test_cmd_get_config_data(buf, buf_len,
617 				t->label[i - t->dcr_idx]);
618 			break;
619 		case ND_CMD_SET_CONFIG_DATA:
620 			rc = nfit_test_cmd_set_config_data(buf, buf_len,
621 				t->label[i - t->dcr_idx]);
622 			break;
623 		case ND_CMD_SMART:
624 			rc = nfit_test_cmd_smart(buf, buf_len);
625 			break;
626 		case ND_CMD_SMART_THRESHOLD:
627 			rc = nfit_test_cmd_smart_threshold(buf, buf_len);
628 			device_lock(&t->pdev.dev);
629 			__acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
630 			device_unlock(&t->pdev.dev);
631 			break;
632 		default:
633 			return -ENOTTY;
634 		}
635 	} else {
636 		struct ars_state *ars_state = &t->ars_state;
637 		struct nd_cmd_pkg *call_pkg = buf;
638 
639 		if (!nd_desc)
640 			return -ENOTTY;
641 
642 		if (cmd == ND_CMD_CALL) {
643 			func = call_pkg->nd_command;
644 
645 			buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
646 			buf = (void *) call_pkg->nd_payload;
647 
648 			switch (func) {
649 			case NFIT_CMD_TRANSLATE_SPA:
650 				rc = nfit_test_cmd_translate_spa(
651 					acpi_desc->nvdimm_bus, buf, buf_len);
652 				return rc;
653 			case NFIT_CMD_ARS_INJECT_SET:
654 				rc = nfit_test_cmd_ars_error_inject(t, buf,
655 					buf_len);
656 				return rc;
657 			case NFIT_CMD_ARS_INJECT_CLEAR:
658 				rc = nfit_test_cmd_ars_inject_clear(t, buf,
659 					buf_len);
660 				return rc;
661 			case NFIT_CMD_ARS_INJECT_GET:
662 				rc = nfit_test_cmd_ars_inject_status(t, buf,
663 					buf_len);
664 				return rc;
665 			default:
666 				return -ENOTTY;
667 			}
668 		}
669 
670 		if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
671 			return -ENOTTY;
672 
673 		switch (func) {
674 		case ND_CMD_ARS_CAP:
675 			rc = nfit_test_cmd_ars_cap(buf, buf_len);
676 			break;
677 		case ND_CMD_ARS_START:
678 			rc = nfit_test_cmd_ars_start(t, ars_state, buf,
679 					buf_len, cmd_rc);
680 			break;
681 		case ND_CMD_ARS_STATUS:
682 			rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
683 					cmd_rc);
684 			break;
685 		case ND_CMD_CLEAR_ERROR:
686 			rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc);
687 			break;
688 		default:
689 			return -ENOTTY;
690 		}
691 	}
692 
693 	return rc;
694 }
695 
696 static DEFINE_SPINLOCK(nfit_test_lock);
697 static struct nfit_test *instances[NUM_NFITS];
698 
699 static void release_nfit_res(void *data)
700 {
701 	struct nfit_test_resource *nfit_res = data;
702 
703 	spin_lock(&nfit_test_lock);
704 	list_del(&nfit_res->list);
705 	spin_unlock(&nfit_test_lock);
706 
707 	vfree(nfit_res->buf);
708 	kfree(nfit_res);
709 }
710 
711 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
712 		void *buf)
713 {
714 	struct device *dev = &t->pdev.dev;
715 	struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
716 			GFP_KERNEL);
717 	int rc;
718 
719 	if (!buf || !nfit_res)
720 		goto err;
721 	rc = devm_add_action(dev, release_nfit_res, nfit_res);
722 	if (rc)
723 		goto err;
724 	INIT_LIST_HEAD(&nfit_res->list);
725 	memset(buf, 0, size);
726 	nfit_res->dev = dev;
727 	nfit_res->buf = buf;
728 	nfit_res->res.start = *dma;
729 	nfit_res->res.end = *dma + size - 1;
730 	nfit_res->res.name = "NFIT";
731 	spin_lock_init(&nfit_res->lock);
732 	INIT_LIST_HEAD(&nfit_res->requests);
733 	spin_lock(&nfit_test_lock);
734 	list_add(&nfit_res->list, &t->resources);
735 	spin_unlock(&nfit_test_lock);
736 
737 	return nfit_res->buf;
738  err:
739 	if (buf)
740 		vfree(buf);
741 	kfree(nfit_res);
742 	return NULL;
743 }
744 
745 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
746 {
747 	void *buf = vmalloc(size);
748 
749 	*dma = (unsigned long) buf;
750 	return __test_alloc(t, size, dma, buf);
751 }
752 
753 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
754 {
755 	int i;
756 
757 	for (i = 0; i < ARRAY_SIZE(instances); i++) {
758 		struct nfit_test_resource *n, *nfit_res = NULL;
759 		struct nfit_test *t = instances[i];
760 
761 		if (!t)
762 			continue;
763 		spin_lock(&nfit_test_lock);
764 		list_for_each_entry(n, &t->resources, list) {
765 			if (addr >= n->res.start && (addr < n->res.start
766 						+ resource_size(&n->res))) {
767 				nfit_res = n;
768 				break;
769 			} else if (addr >= (unsigned long) n->buf
770 					&& (addr < (unsigned long) n->buf
771 						+ resource_size(&n->res))) {
772 				nfit_res = n;
773 				break;
774 			}
775 		}
776 		spin_unlock(&nfit_test_lock);
777 		if (nfit_res)
778 			return nfit_res;
779 	}
780 
781 	return NULL;
782 }
783 
784 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
785 {
786 	/* for testing, only store up to n records that fit within 4k */
787 	ars_state->ars_status = devm_kzalloc(dev,
788 			sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL);
789 	if (!ars_state->ars_status)
790 		return -ENOMEM;
791 	spin_lock_init(&ars_state->lock);
792 	return 0;
793 }
794 
795 static void put_dimms(void *data)
796 {
797 	struct device **dimm_dev = data;
798 	int i;
799 
800 	for (i = 0; i < NUM_DCR; i++)
801 		if (dimm_dev[i])
802 			device_unregister(dimm_dev[i]);
803 }
804 
805 static struct class *nfit_test_dimm;
806 
807 static int dimm_name_to_id(struct device *dev)
808 {
809 	int dimm;
810 
811 	if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
812 			|| dimm >= NUM_DCR || dimm < 0)
813 		return -ENXIO;
814 	return dimm;
815 }
816 
817 
818 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
819 		char *buf)
820 {
821 	int dimm = dimm_name_to_id(dev);
822 
823 	if (dimm < 0)
824 		return dimm;
825 
826 	return sprintf(buf, "%#x", handle[dimm]);
827 }
828 DEVICE_ATTR_RO(handle);
829 
830 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
831 		char *buf)
832 {
833 	int dimm = dimm_name_to_id(dev);
834 
835 	if (dimm < 0)
836 		return dimm;
837 
838 	return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
839 }
840 
841 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
842 		const char *buf, size_t size)
843 {
844 	int dimm = dimm_name_to_id(dev);
845 	unsigned long val;
846 	ssize_t rc;
847 
848 	if (dimm < 0)
849 		return dimm;
850 
851 	rc = kstrtol(buf, 0, &val);
852 	if (rc)
853 		return rc;
854 
855 	dimm_fail_cmd_flags[dimm] = val;
856 	return size;
857 }
858 static DEVICE_ATTR_RW(fail_cmd);
859 
860 static struct attribute *nfit_test_dimm_attributes[] = {
861 	&dev_attr_fail_cmd.attr,
862 	&dev_attr_handle.attr,
863 	NULL,
864 };
865 
866 static struct attribute_group nfit_test_dimm_attribute_group = {
867 	.attrs = nfit_test_dimm_attributes,
868 };
869 
870 static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
871 	&nfit_test_dimm_attribute_group,
872 	NULL,
873 };
874 
875 static int nfit_test0_alloc(struct nfit_test *t)
876 {
877 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
878 			+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
879 			+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
880 			+ offsetof(struct acpi_nfit_control_region,
881 					window_size) * NUM_DCR
882 			+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
883 			+ (sizeof(struct acpi_nfit_flush_address)
884 					+ sizeof(u64) * NUM_HINTS) * NUM_DCR;
885 	int i;
886 
887 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
888 	if (!t->nfit_buf)
889 		return -ENOMEM;
890 	t->nfit_size = nfit_size;
891 
892 	t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
893 	if (!t->spa_set[0])
894 		return -ENOMEM;
895 
896 	t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
897 	if (!t->spa_set[1])
898 		return -ENOMEM;
899 
900 	t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
901 	if (!t->spa_set[2])
902 		return -ENOMEM;
903 
904 	for (i = 0; i < t->num_dcr; i++) {
905 		t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
906 		if (!t->dimm[i])
907 			return -ENOMEM;
908 
909 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
910 		if (!t->label[i])
911 			return -ENOMEM;
912 		sprintf(t->label[i], "label%d", i);
913 
914 		t->flush[i] = test_alloc(t, max(PAGE_SIZE,
915 					sizeof(u64) * NUM_HINTS),
916 				&t->flush_dma[i]);
917 		if (!t->flush[i])
918 			return -ENOMEM;
919 	}
920 
921 	for (i = 0; i < t->num_dcr; i++) {
922 		t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
923 		if (!t->dcr[i])
924 			return -ENOMEM;
925 	}
926 
927 	t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
928 	if (!t->_fit)
929 		return -ENOMEM;
930 
931 	if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
932 		return -ENOMEM;
933 	for (i = 0; i < NUM_DCR; i++) {
934 		t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
935 				&t->pdev.dev, 0, NULL,
936 				nfit_test_dimm_attribute_groups,
937 				"test_dimm%d", i);
938 		if (!t->dimm_dev[i])
939 			return -ENOMEM;
940 	}
941 
942 	return ars_state_init(&t->pdev.dev, &t->ars_state);
943 }
944 
945 static int nfit_test1_alloc(struct nfit_test *t)
946 {
947 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
948 		+ sizeof(struct acpi_nfit_memory_map) * 2
949 		+ offsetof(struct acpi_nfit_control_region, window_size) * 2;
950 	int i;
951 
952 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
953 	if (!t->nfit_buf)
954 		return -ENOMEM;
955 	t->nfit_size = nfit_size;
956 
957 	t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
958 	if (!t->spa_set[0])
959 		return -ENOMEM;
960 
961 	for (i = 0; i < t->num_dcr; i++) {
962 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
963 		if (!t->label[i])
964 			return -ENOMEM;
965 		sprintf(t->label[i], "label%d", i);
966 	}
967 
968 	t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
969 	if (!t->spa_set[1])
970 		return -ENOMEM;
971 
972 	return ars_state_init(&t->pdev.dev, &t->ars_state);
973 }
974 
975 static void dcr_common_init(struct acpi_nfit_control_region *dcr)
976 {
977 	dcr->vendor_id = 0xabcd;
978 	dcr->device_id = 0;
979 	dcr->revision_id = 1;
980 	dcr->valid_fields = 1;
981 	dcr->manufacturing_location = 0xa;
982 	dcr->manufacturing_date = cpu_to_be16(2016);
983 }
984 
985 static void nfit_test0_setup(struct nfit_test *t)
986 {
987 	const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
988 		+ (sizeof(u64) * NUM_HINTS);
989 	struct acpi_nfit_desc *acpi_desc;
990 	struct acpi_nfit_memory_map *memdev;
991 	void *nfit_buf = t->nfit_buf;
992 	struct acpi_nfit_system_address *spa;
993 	struct acpi_nfit_control_region *dcr;
994 	struct acpi_nfit_data_region *bdw;
995 	struct acpi_nfit_flush_address *flush;
996 	unsigned int offset, i;
997 
998 	/*
999 	 * spa0 (interleave first half of dimm0 and dimm1, note storage
1000 	 * does not actually alias the related block-data-window
1001 	 * regions)
1002 	 */
1003 	spa = nfit_buf;
1004 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1005 	spa->header.length = sizeof(*spa);
1006 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1007 	spa->range_index = 0+1;
1008 	spa->address = t->spa_set_dma[0];
1009 	spa->length = SPA0_SIZE;
1010 
1011 	/*
1012 	 * spa1 (interleave last half of the 4 DIMMS, note storage
1013 	 * does not actually alias the related block-data-window
1014 	 * regions)
1015 	 */
1016 	spa = nfit_buf + sizeof(*spa);
1017 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1018 	spa->header.length = sizeof(*spa);
1019 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1020 	spa->range_index = 1+1;
1021 	spa->address = t->spa_set_dma[1];
1022 	spa->length = SPA1_SIZE;
1023 
1024 	/* spa2 (dcr0) dimm0 */
1025 	spa = nfit_buf + sizeof(*spa) * 2;
1026 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1027 	spa->header.length = sizeof(*spa);
1028 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1029 	spa->range_index = 2+1;
1030 	spa->address = t->dcr_dma[0];
1031 	spa->length = DCR_SIZE;
1032 
1033 	/* spa3 (dcr1) dimm1 */
1034 	spa = nfit_buf + sizeof(*spa) * 3;
1035 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1036 	spa->header.length = sizeof(*spa);
1037 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1038 	spa->range_index = 3+1;
1039 	spa->address = t->dcr_dma[1];
1040 	spa->length = DCR_SIZE;
1041 
1042 	/* spa4 (dcr2) dimm2 */
1043 	spa = nfit_buf + sizeof(*spa) * 4;
1044 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1045 	spa->header.length = sizeof(*spa);
1046 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1047 	spa->range_index = 4+1;
1048 	spa->address = t->dcr_dma[2];
1049 	spa->length = DCR_SIZE;
1050 
1051 	/* spa5 (dcr3) dimm3 */
1052 	spa = nfit_buf + sizeof(*spa) * 5;
1053 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1054 	spa->header.length = sizeof(*spa);
1055 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1056 	spa->range_index = 5+1;
1057 	spa->address = t->dcr_dma[3];
1058 	spa->length = DCR_SIZE;
1059 
1060 	/* spa6 (bdw for dcr0) dimm0 */
1061 	spa = nfit_buf + sizeof(*spa) * 6;
1062 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1063 	spa->header.length = sizeof(*spa);
1064 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1065 	spa->range_index = 6+1;
1066 	spa->address = t->dimm_dma[0];
1067 	spa->length = DIMM_SIZE;
1068 
1069 	/* spa7 (bdw for dcr1) dimm1 */
1070 	spa = nfit_buf + sizeof(*spa) * 7;
1071 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1072 	spa->header.length = sizeof(*spa);
1073 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1074 	spa->range_index = 7+1;
1075 	spa->address = t->dimm_dma[1];
1076 	spa->length = DIMM_SIZE;
1077 
1078 	/* spa8 (bdw for dcr2) dimm2 */
1079 	spa = nfit_buf + sizeof(*spa) * 8;
1080 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1081 	spa->header.length = sizeof(*spa);
1082 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1083 	spa->range_index = 8+1;
1084 	spa->address = t->dimm_dma[2];
1085 	spa->length = DIMM_SIZE;
1086 
1087 	/* spa9 (bdw for dcr3) dimm3 */
1088 	spa = nfit_buf + sizeof(*spa) * 9;
1089 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1090 	spa->header.length = sizeof(*spa);
1091 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1092 	spa->range_index = 9+1;
1093 	spa->address = t->dimm_dma[3];
1094 	spa->length = DIMM_SIZE;
1095 
1096 	offset = sizeof(*spa) * 10;
1097 	/* mem-region0 (spa0, dimm0) */
1098 	memdev = nfit_buf + offset;
1099 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1100 	memdev->header.length = sizeof(*memdev);
1101 	memdev->device_handle = handle[0];
1102 	memdev->physical_id = 0;
1103 	memdev->region_id = 0;
1104 	memdev->range_index = 0+1;
1105 	memdev->region_index = 4+1;
1106 	memdev->region_size = SPA0_SIZE/2;
1107 	memdev->region_offset = 1;
1108 	memdev->address = 0;
1109 	memdev->interleave_index = 0;
1110 	memdev->interleave_ways = 2;
1111 
1112 	/* mem-region1 (spa0, dimm1) */
1113 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
1114 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1115 	memdev->header.length = sizeof(*memdev);
1116 	memdev->device_handle = handle[1];
1117 	memdev->physical_id = 1;
1118 	memdev->region_id = 0;
1119 	memdev->range_index = 0+1;
1120 	memdev->region_index = 5+1;
1121 	memdev->region_size = SPA0_SIZE/2;
1122 	memdev->region_offset = (1 << 8);
1123 	memdev->address = 0;
1124 	memdev->interleave_index = 0;
1125 	memdev->interleave_ways = 2;
1126 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1127 
1128 	/* mem-region2 (spa1, dimm0) */
1129 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
1130 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1131 	memdev->header.length = sizeof(*memdev);
1132 	memdev->device_handle = handle[0];
1133 	memdev->physical_id = 0;
1134 	memdev->region_id = 1;
1135 	memdev->range_index = 1+1;
1136 	memdev->region_index = 4+1;
1137 	memdev->region_size = SPA1_SIZE/4;
1138 	memdev->region_offset = (1 << 16);
1139 	memdev->address = SPA0_SIZE/2;
1140 	memdev->interleave_index = 0;
1141 	memdev->interleave_ways = 4;
1142 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1143 
1144 	/* mem-region3 (spa1, dimm1) */
1145 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
1146 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1147 	memdev->header.length = sizeof(*memdev);
1148 	memdev->device_handle = handle[1];
1149 	memdev->physical_id = 1;
1150 	memdev->region_id = 1;
1151 	memdev->range_index = 1+1;
1152 	memdev->region_index = 5+1;
1153 	memdev->region_size = SPA1_SIZE/4;
1154 	memdev->region_offset = (1 << 24);
1155 	memdev->address = SPA0_SIZE/2;
1156 	memdev->interleave_index = 0;
1157 	memdev->interleave_ways = 4;
1158 
1159 	/* mem-region4 (spa1, dimm2) */
1160 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
1161 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1162 	memdev->header.length = sizeof(*memdev);
1163 	memdev->device_handle = handle[2];
1164 	memdev->physical_id = 2;
1165 	memdev->region_id = 0;
1166 	memdev->range_index = 1+1;
1167 	memdev->region_index = 6+1;
1168 	memdev->region_size = SPA1_SIZE/4;
1169 	memdev->region_offset = (1ULL << 32);
1170 	memdev->address = SPA0_SIZE/2;
1171 	memdev->interleave_index = 0;
1172 	memdev->interleave_ways = 4;
1173 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1174 
1175 	/* mem-region5 (spa1, dimm3) */
1176 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
1177 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1178 	memdev->header.length = sizeof(*memdev);
1179 	memdev->device_handle = handle[3];
1180 	memdev->physical_id = 3;
1181 	memdev->region_id = 0;
1182 	memdev->range_index = 1+1;
1183 	memdev->region_index = 7+1;
1184 	memdev->region_size = SPA1_SIZE/4;
1185 	memdev->region_offset = (1ULL << 40);
1186 	memdev->address = SPA0_SIZE/2;
1187 	memdev->interleave_index = 0;
1188 	memdev->interleave_ways = 4;
1189 
1190 	/* mem-region6 (spa/dcr0, dimm0) */
1191 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
1192 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1193 	memdev->header.length = sizeof(*memdev);
1194 	memdev->device_handle = handle[0];
1195 	memdev->physical_id = 0;
1196 	memdev->region_id = 0;
1197 	memdev->range_index = 2+1;
1198 	memdev->region_index = 0+1;
1199 	memdev->region_size = 0;
1200 	memdev->region_offset = 0;
1201 	memdev->address = 0;
1202 	memdev->interleave_index = 0;
1203 	memdev->interleave_ways = 1;
1204 
1205 	/* mem-region7 (spa/dcr1, dimm1) */
1206 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
1207 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1208 	memdev->header.length = sizeof(*memdev);
1209 	memdev->device_handle = handle[1];
1210 	memdev->physical_id = 1;
1211 	memdev->region_id = 0;
1212 	memdev->range_index = 3+1;
1213 	memdev->region_index = 1+1;
1214 	memdev->region_size = 0;
1215 	memdev->region_offset = 0;
1216 	memdev->address = 0;
1217 	memdev->interleave_index = 0;
1218 	memdev->interleave_ways = 1;
1219 
1220 	/* mem-region8 (spa/dcr2, dimm2) */
1221 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
1222 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1223 	memdev->header.length = sizeof(*memdev);
1224 	memdev->device_handle = handle[2];
1225 	memdev->physical_id = 2;
1226 	memdev->region_id = 0;
1227 	memdev->range_index = 4+1;
1228 	memdev->region_index = 2+1;
1229 	memdev->region_size = 0;
1230 	memdev->region_offset = 0;
1231 	memdev->address = 0;
1232 	memdev->interleave_index = 0;
1233 	memdev->interleave_ways = 1;
1234 
1235 	/* mem-region9 (spa/dcr3, dimm3) */
1236 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
1237 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1238 	memdev->header.length = sizeof(*memdev);
1239 	memdev->device_handle = handle[3];
1240 	memdev->physical_id = 3;
1241 	memdev->region_id = 0;
1242 	memdev->range_index = 5+1;
1243 	memdev->region_index = 3+1;
1244 	memdev->region_size = 0;
1245 	memdev->region_offset = 0;
1246 	memdev->address = 0;
1247 	memdev->interleave_index = 0;
1248 	memdev->interleave_ways = 1;
1249 
1250 	/* mem-region10 (spa/bdw0, dimm0) */
1251 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
1252 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1253 	memdev->header.length = sizeof(*memdev);
1254 	memdev->device_handle = handle[0];
1255 	memdev->physical_id = 0;
1256 	memdev->region_id = 0;
1257 	memdev->range_index = 6+1;
1258 	memdev->region_index = 0+1;
1259 	memdev->region_size = 0;
1260 	memdev->region_offset = 0;
1261 	memdev->address = 0;
1262 	memdev->interleave_index = 0;
1263 	memdev->interleave_ways = 1;
1264 
1265 	/* mem-region11 (spa/bdw1, dimm1) */
1266 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
1267 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1268 	memdev->header.length = sizeof(*memdev);
1269 	memdev->device_handle = handle[1];
1270 	memdev->physical_id = 1;
1271 	memdev->region_id = 0;
1272 	memdev->range_index = 7+1;
1273 	memdev->region_index = 1+1;
1274 	memdev->region_size = 0;
1275 	memdev->region_offset = 0;
1276 	memdev->address = 0;
1277 	memdev->interleave_index = 0;
1278 	memdev->interleave_ways = 1;
1279 
1280 	/* mem-region12 (spa/bdw2, dimm2) */
1281 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
1282 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1283 	memdev->header.length = sizeof(*memdev);
1284 	memdev->device_handle = handle[2];
1285 	memdev->physical_id = 2;
1286 	memdev->region_id = 0;
1287 	memdev->range_index = 8+1;
1288 	memdev->region_index = 2+1;
1289 	memdev->region_size = 0;
1290 	memdev->region_offset = 0;
1291 	memdev->address = 0;
1292 	memdev->interleave_index = 0;
1293 	memdev->interleave_ways = 1;
1294 
1295 	/* mem-region13 (spa/dcr3, dimm3) */
1296 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
1297 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1298 	memdev->header.length = sizeof(*memdev);
1299 	memdev->device_handle = handle[3];
1300 	memdev->physical_id = 3;
1301 	memdev->region_id = 0;
1302 	memdev->range_index = 9+1;
1303 	memdev->region_index = 3+1;
1304 	memdev->region_size = 0;
1305 	memdev->region_offset = 0;
1306 	memdev->address = 0;
1307 	memdev->interleave_index = 0;
1308 	memdev->interleave_ways = 1;
1309 	memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1310 
1311 	offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
1312 	/* dcr-descriptor0: blk */
1313 	dcr = nfit_buf + offset;
1314 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1315 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1316 	dcr->region_index = 0+1;
1317 	dcr_common_init(dcr);
1318 	dcr->serial_number = ~handle[0];
1319 	dcr->code = NFIT_FIC_BLK;
1320 	dcr->windows = 1;
1321 	dcr->window_size = DCR_SIZE;
1322 	dcr->command_offset = 0;
1323 	dcr->command_size = 8;
1324 	dcr->status_offset = 8;
1325 	dcr->status_size = 4;
1326 
1327 	/* dcr-descriptor1: blk */
1328 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
1329 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1330 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1331 	dcr->region_index = 1+1;
1332 	dcr_common_init(dcr);
1333 	dcr->serial_number = ~handle[1];
1334 	dcr->code = NFIT_FIC_BLK;
1335 	dcr->windows = 1;
1336 	dcr->window_size = DCR_SIZE;
1337 	dcr->command_offset = 0;
1338 	dcr->command_size = 8;
1339 	dcr->status_offset = 8;
1340 	dcr->status_size = 4;
1341 
1342 	/* dcr-descriptor2: blk */
1343 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
1344 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1345 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1346 	dcr->region_index = 2+1;
1347 	dcr_common_init(dcr);
1348 	dcr->serial_number = ~handle[2];
1349 	dcr->code = NFIT_FIC_BLK;
1350 	dcr->windows = 1;
1351 	dcr->window_size = DCR_SIZE;
1352 	dcr->command_offset = 0;
1353 	dcr->command_size = 8;
1354 	dcr->status_offset = 8;
1355 	dcr->status_size = 4;
1356 
1357 	/* dcr-descriptor3: blk */
1358 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
1359 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1360 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
1361 	dcr->region_index = 3+1;
1362 	dcr_common_init(dcr);
1363 	dcr->serial_number = ~handle[3];
1364 	dcr->code = NFIT_FIC_BLK;
1365 	dcr->windows = 1;
1366 	dcr->window_size = DCR_SIZE;
1367 	dcr->command_offset = 0;
1368 	dcr->command_size = 8;
1369 	dcr->status_offset = 8;
1370 	dcr->status_size = 4;
1371 
1372 	offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
1373 	/* dcr-descriptor0: pmem */
1374 	dcr = nfit_buf + offset;
1375 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1376 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1377 			window_size);
1378 	dcr->region_index = 4+1;
1379 	dcr_common_init(dcr);
1380 	dcr->serial_number = ~handle[0];
1381 	dcr->code = NFIT_FIC_BYTEN;
1382 	dcr->windows = 0;
1383 
1384 	/* dcr-descriptor1: pmem */
1385 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1386 			window_size);
1387 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1388 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1389 			window_size);
1390 	dcr->region_index = 5+1;
1391 	dcr_common_init(dcr);
1392 	dcr->serial_number = ~handle[1];
1393 	dcr->code = NFIT_FIC_BYTEN;
1394 	dcr->windows = 0;
1395 
1396 	/* dcr-descriptor2: pmem */
1397 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1398 			window_size) * 2;
1399 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1400 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1401 			window_size);
1402 	dcr->region_index = 6+1;
1403 	dcr_common_init(dcr);
1404 	dcr->serial_number = ~handle[2];
1405 	dcr->code = NFIT_FIC_BYTEN;
1406 	dcr->windows = 0;
1407 
1408 	/* dcr-descriptor3: pmem */
1409 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1410 			window_size) * 3;
1411 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1412 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1413 			window_size);
1414 	dcr->region_index = 7+1;
1415 	dcr_common_init(dcr);
1416 	dcr->serial_number = ~handle[3];
1417 	dcr->code = NFIT_FIC_BYTEN;
1418 	dcr->windows = 0;
1419 
1420 	offset = offset + offsetof(struct acpi_nfit_control_region,
1421 			window_size) * 4;
1422 	/* bdw0 (spa/dcr0, dimm0) */
1423 	bdw = nfit_buf + offset;
1424 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1425 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1426 	bdw->region_index = 0+1;
1427 	bdw->windows = 1;
1428 	bdw->offset = 0;
1429 	bdw->size = BDW_SIZE;
1430 	bdw->capacity = DIMM_SIZE;
1431 	bdw->start_address = 0;
1432 
1433 	/* bdw1 (spa/dcr1, dimm1) */
1434 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1435 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1436 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1437 	bdw->region_index = 1+1;
1438 	bdw->windows = 1;
1439 	bdw->offset = 0;
1440 	bdw->size = BDW_SIZE;
1441 	bdw->capacity = DIMM_SIZE;
1442 	bdw->start_address = 0;
1443 
1444 	/* bdw2 (spa/dcr2, dimm2) */
1445 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1446 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1447 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1448 	bdw->region_index = 2+1;
1449 	bdw->windows = 1;
1450 	bdw->offset = 0;
1451 	bdw->size = BDW_SIZE;
1452 	bdw->capacity = DIMM_SIZE;
1453 	bdw->start_address = 0;
1454 
1455 	/* bdw3 (spa/dcr3, dimm3) */
1456 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1457 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1458 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1459 	bdw->region_index = 3+1;
1460 	bdw->windows = 1;
1461 	bdw->offset = 0;
1462 	bdw->size = BDW_SIZE;
1463 	bdw->capacity = DIMM_SIZE;
1464 	bdw->start_address = 0;
1465 
1466 	offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1467 	/* flush0 (dimm0) */
1468 	flush = nfit_buf + offset;
1469 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1470 	flush->header.length = flush_hint_size;
1471 	flush->device_handle = handle[0];
1472 	flush->hint_count = NUM_HINTS;
1473 	for (i = 0; i < NUM_HINTS; i++)
1474 		flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
1475 
1476 	/* flush1 (dimm1) */
1477 	flush = nfit_buf + offset + flush_hint_size * 1;
1478 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1479 	flush->header.length = flush_hint_size;
1480 	flush->device_handle = handle[1];
1481 	flush->hint_count = NUM_HINTS;
1482 	for (i = 0; i < NUM_HINTS; i++)
1483 		flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
1484 
1485 	/* flush2 (dimm2) */
1486 	flush = nfit_buf + offset + flush_hint_size  * 2;
1487 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1488 	flush->header.length = flush_hint_size;
1489 	flush->device_handle = handle[2];
1490 	flush->hint_count = NUM_HINTS;
1491 	for (i = 0; i < NUM_HINTS; i++)
1492 		flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
1493 
1494 	/* flush3 (dimm3) */
1495 	flush = nfit_buf + offset + flush_hint_size * 3;
1496 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1497 	flush->header.length = flush_hint_size;
1498 	flush->device_handle = handle[3];
1499 	flush->hint_count = NUM_HINTS;
1500 	for (i = 0; i < NUM_HINTS; i++)
1501 		flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
1502 
1503 	if (t->setup_hotplug) {
1504 		offset = offset + flush_hint_size * 4;
1505 		/* dcr-descriptor4: blk */
1506 		dcr = nfit_buf + offset;
1507 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1508 		dcr->header.length = sizeof(struct acpi_nfit_control_region);
1509 		dcr->region_index = 8+1;
1510 		dcr_common_init(dcr);
1511 		dcr->serial_number = ~handle[4];
1512 		dcr->code = NFIT_FIC_BLK;
1513 		dcr->windows = 1;
1514 		dcr->window_size = DCR_SIZE;
1515 		dcr->command_offset = 0;
1516 		dcr->command_size = 8;
1517 		dcr->status_offset = 8;
1518 		dcr->status_size = 4;
1519 
1520 		offset = offset + sizeof(struct acpi_nfit_control_region);
1521 		/* dcr-descriptor4: pmem */
1522 		dcr = nfit_buf + offset;
1523 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1524 		dcr->header.length = offsetof(struct acpi_nfit_control_region,
1525 				window_size);
1526 		dcr->region_index = 9+1;
1527 		dcr_common_init(dcr);
1528 		dcr->serial_number = ~handle[4];
1529 		dcr->code = NFIT_FIC_BYTEN;
1530 		dcr->windows = 0;
1531 
1532 		offset = offset + offsetof(struct acpi_nfit_control_region,
1533 				window_size);
1534 		/* bdw4 (spa/dcr4, dimm4) */
1535 		bdw = nfit_buf + offset;
1536 		bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1537 		bdw->header.length = sizeof(struct acpi_nfit_data_region);
1538 		bdw->region_index = 8+1;
1539 		bdw->windows = 1;
1540 		bdw->offset = 0;
1541 		bdw->size = BDW_SIZE;
1542 		bdw->capacity = DIMM_SIZE;
1543 		bdw->start_address = 0;
1544 
1545 		offset = offset + sizeof(struct acpi_nfit_data_region);
1546 		/* spa10 (dcr4) dimm4 */
1547 		spa = nfit_buf + offset;
1548 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1549 		spa->header.length = sizeof(*spa);
1550 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1551 		spa->range_index = 10+1;
1552 		spa->address = t->dcr_dma[4];
1553 		spa->length = DCR_SIZE;
1554 
1555 		/*
1556 		 * spa11 (single-dimm interleave for hotplug, note storage
1557 		 * does not actually alias the related block-data-window
1558 		 * regions)
1559 		 */
1560 		spa = nfit_buf + offset + sizeof(*spa);
1561 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1562 		spa->header.length = sizeof(*spa);
1563 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1564 		spa->range_index = 11+1;
1565 		spa->address = t->spa_set_dma[2];
1566 		spa->length = SPA0_SIZE;
1567 
1568 		/* spa12 (bdw for dcr4) dimm4 */
1569 		spa = nfit_buf + offset + sizeof(*spa) * 2;
1570 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1571 		spa->header.length = sizeof(*spa);
1572 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1573 		spa->range_index = 12+1;
1574 		spa->address = t->dimm_dma[4];
1575 		spa->length = DIMM_SIZE;
1576 
1577 		offset = offset + sizeof(*spa) * 3;
1578 		/* mem-region14 (spa/dcr4, dimm4) */
1579 		memdev = nfit_buf + offset;
1580 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1581 		memdev->header.length = sizeof(*memdev);
1582 		memdev->device_handle = handle[4];
1583 		memdev->physical_id = 4;
1584 		memdev->region_id = 0;
1585 		memdev->range_index = 10+1;
1586 		memdev->region_index = 8+1;
1587 		memdev->region_size = 0;
1588 		memdev->region_offset = 0;
1589 		memdev->address = 0;
1590 		memdev->interleave_index = 0;
1591 		memdev->interleave_ways = 1;
1592 
1593 		/* mem-region15 (spa0, dimm4) */
1594 		memdev = nfit_buf + offset +
1595 				sizeof(struct acpi_nfit_memory_map);
1596 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1597 		memdev->header.length = sizeof(*memdev);
1598 		memdev->device_handle = handle[4];
1599 		memdev->physical_id = 4;
1600 		memdev->region_id = 0;
1601 		memdev->range_index = 11+1;
1602 		memdev->region_index = 9+1;
1603 		memdev->region_size = SPA0_SIZE;
1604 		memdev->region_offset = (1ULL << 48);
1605 		memdev->address = 0;
1606 		memdev->interleave_index = 0;
1607 		memdev->interleave_ways = 1;
1608 		memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1609 
1610 		/* mem-region16 (spa/bdw4, dimm4) */
1611 		memdev = nfit_buf + offset +
1612 				sizeof(struct acpi_nfit_memory_map) * 2;
1613 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1614 		memdev->header.length = sizeof(*memdev);
1615 		memdev->device_handle = handle[4];
1616 		memdev->physical_id = 4;
1617 		memdev->region_id = 0;
1618 		memdev->range_index = 12+1;
1619 		memdev->region_index = 8+1;
1620 		memdev->region_size = 0;
1621 		memdev->region_offset = 0;
1622 		memdev->address = 0;
1623 		memdev->interleave_index = 0;
1624 		memdev->interleave_ways = 1;
1625 
1626 		offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1627 		/* flush3 (dimm4) */
1628 		flush = nfit_buf + offset;
1629 		flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1630 		flush->header.length = flush_hint_size;
1631 		flush->device_handle = handle[4];
1632 		flush->hint_count = NUM_HINTS;
1633 		for (i = 0; i < NUM_HINTS; i++)
1634 			flush->hint_address[i] = t->flush_dma[4]
1635 				+ i * sizeof(u64);
1636 	}
1637 
1638 	post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
1639 			SPA0_SIZE);
1640 
1641 	acpi_desc = &t->acpi_desc;
1642 	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
1643 	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1644 	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1645 	set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
1646 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1647 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1648 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1649 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1650 	set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
1651 	set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
1652 	set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en);
1653 	set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en);
1654 	set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en);
1655 	set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en);
1656 }
1657 
1658 static void nfit_test1_setup(struct nfit_test *t)
1659 {
1660 	size_t offset;
1661 	void *nfit_buf = t->nfit_buf;
1662 	struct acpi_nfit_memory_map *memdev;
1663 	struct acpi_nfit_control_region *dcr;
1664 	struct acpi_nfit_system_address *spa;
1665 	struct acpi_nfit_desc *acpi_desc;
1666 
1667 	offset = 0;
1668 	/* spa0 (flat range with no bdw aliasing) */
1669 	spa = nfit_buf + offset;
1670 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1671 	spa->header.length = sizeof(*spa);
1672 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1673 	spa->range_index = 0+1;
1674 	spa->address = t->spa_set_dma[0];
1675 	spa->length = SPA2_SIZE;
1676 
1677 	/* virtual cd region */
1678 	spa = nfit_buf + sizeof(*spa);
1679 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1680 	spa->header.length = sizeof(*spa);
1681 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
1682 	spa->range_index = 0;
1683 	spa->address = t->spa_set_dma[1];
1684 	spa->length = SPA_VCD_SIZE;
1685 
1686 	offset += sizeof(*spa) * 2;
1687 	/* mem-region0 (spa0, dimm0) */
1688 	memdev = nfit_buf + offset;
1689 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1690 	memdev->header.length = sizeof(*memdev);
1691 	memdev->device_handle = handle[5];
1692 	memdev->physical_id = 0;
1693 	memdev->region_id = 0;
1694 	memdev->range_index = 0+1;
1695 	memdev->region_index = 0+1;
1696 	memdev->region_size = SPA2_SIZE;
1697 	memdev->region_offset = 0;
1698 	memdev->address = 0;
1699 	memdev->interleave_index = 0;
1700 	memdev->interleave_ways = 1;
1701 	memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1702 		| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1703 		| ACPI_NFIT_MEM_NOT_ARMED;
1704 
1705 	offset += sizeof(*memdev);
1706 	/* dcr-descriptor0 */
1707 	dcr = nfit_buf + offset;
1708 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1709 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1710 			window_size);
1711 	dcr->region_index = 0+1;
1712 	dcr_common_init(dcr);
1713 	dcr->serial_number = ~handle[5];
1714 	dcr->code = NFIT_FIC_BYTE;
1715 	dcr->windows = 0;
1716 
1717 	offset += dcr->header.length;
1718 	memdev = nfit_buf + offset;
1719 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1720 	memdev->header.length = sizeof(*memdev);
1721 	memdev->device_handle = handle[6];
1722 	memdev->physical_id = 0;
1723 	memdev->region_id = 0;
1724 	memdev->range_index = 0;
1725 	memdev->region_index = 0+2;
1726 	memdev->region_size = SPA2_SIZE;
1727 	memdev->region_offset = 0;
1728 	memdev->address = 0;
1729 	memdev->interleave_index = 0;
1730 	memdev->interleave_ways = 1;
1731 	memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
1732 
1733 	/* dcr-descriptor1 */
1734 	offset += sizeof(*memdev);
1735 	dcr = nfit_buf + offset;
1736 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1737 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1738 			window_size);
1739 	dcr->region_index = 0+2;
1740 	dcr_common_init(dcr);
1741 	dcr->serial_number = ~handle[6];
1742 	dcr->code = NFIT_FIC_BYTE;
1743 	dcr->windows = 0;
1744 
1745 	post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
1746 			SPA2_SIZE);
1747 
1748 	acpi_desc = &t->acpi_desc;
1749 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1750 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1751 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1752 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1753 }
1754 
1755 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1756 		void *iobuf, u64 len, int rw)
1757 {
1758 	struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1759 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1760 	struct nd_region *nd_region = &ndbr->nd_region;
1761 	unsigned int lane;
1762 
1763 	lane = nd_region_acquire_lane(nd_region);
1764 	if (rw)
1765 		memcpy(mmio->addr.base + dpa, iobuf, len);
1766 	else {
1767 		memcpy(iobuf, mmio->addr.base + dpa, len);
1768 
1769 		/* give us some some coverage of the arch_invalidate_pmem() API */
1770 		arch_invalidate_pmem(mmio->addr.base + dpa, len);
1771 	}
1772 	nd_region_release_lane(nd_region, lane);
1773 
1774 	return 0;
1775 }
1776 
1777 static unsigned long nfit_ctl_handle;
1778 
1779 union acpi_object *result;
1780 
1781 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
1782 		const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
1783 {
1784 	if (handle != &nfit_ctl_handle)
1785 		return ERR_PTR(-ENXIO);
1786 
1787 	return result;
1788 }
1789 
1790 static int setup_result(void *buf, size_t size)
1791 {
1792 	result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
1793 	if (!result)
1794 		return -ENOMEM;
1795 	result->package.type = ACPI_TYPE_BUFFER,
1796 	result->buffer.pointer = (void *) (result + 1);
1797 	result->buffer.length = size;
1798 	memcpy(result->buffer.pointer, buf, size);
1799 	memset(buf, 0, size);
1800 	return 0;
1801 }
1802 
1803 static int nfit_ctl_test(struct device *dev)
1804 {
1805 	int rc, cmd_rc;
1806 	struct nvdimm *nvdimm;
1807 	struct acpi_device *adev;
1808 	struct nfit_mem *nfit_mem;
1809 	struct nd_ars_record *record;
1810 	struct acpi_nfit_desc *acpi_desc;
1811 	const u64 test_val = 0x0123456789abcdefULL;
1812 	unsigned long mask, cmd_size, offset;
1813 	union {
1814 		struct nd_cmd_get_config_size cfg_size;
1815 		struct nd_cmd_clear_error clear_err;
1816 		struct nd_cmd_ars_status ars_stat;
1817 		struct nd_cmd_ars_cap ars_cap;
1818 		char buf[sizeof(struct nd_cmd_ars_status)
1819 			+ sizeof(struct nd_ars_record)];
1820 	} cmds;
1821 
1822 	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
1823 	if (!adev)
1824 		return -ENOMEM;
1825 	*adev = (struct acpi_device) {
1826 		.handle = &nfit_ctl_handle,
1827 		.dev = {
1828 			.init_name = "test-adev",
1829 		},
1830 	};
1831 
1832 	acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
1833 	if (!acpi_desc)
1834 		return -ENOMEM;
1835 	*acpi_desc = (struct acpi_nfit_desc) {
1836 		.nd_desc = {
1837 			.cmd_mask = 1UL << ND_CMD_ARS_CAP
1838 				| 1UL << ND_CMD_ARS_START
1839 				| 1UL << ND_CMD_ARS_STATUS
1840 				| 1UL << ND_CMD_CLEAR_ERROR
1841 				| 1UL << ND_CMD_CALL,
1842 			.module = THIS_MODULE,
1843 			.provider_name = "ACPI.NFIT",
1844 			.ndctl = acpi_nfit_ctl,
1845 			.bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
1846 				| 1UL << NFIT_CMD_ARS_INJECT_SET
1847 				| 1UL << NFIT_CMD_ARS_INJECT_CLEAR
1848 				| 1UL << NFIT_CMD_ARS_INJECT_GET,
1849 		},
1850 		.dev = &adev->dev,
1851 	};
1852 
1853 	nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
1854 	if (!nfit_mem)
1855 		return -ENOMEM;
1856 
1857 	mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
1858 		| 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
1859 		| 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
1860 		| 1UL << ND_CMD_VENDOR;
1861 	*nfit_mem = (struct nfit_mem) {
1862 		.adev = adev,
1863 		.family = NVDIMM_FAMILY_INTEL,
1864 		.dsm_mask = mask,
1865 	};
1866 
1867 	nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
1868 	if (!nvdimm)
1869 		return -ENOMEM;
1870 	*nvdimm = (struct nvdimm) {
1871 		.provider_data = nfit_mem,
1872 		.cmd_mask = mask,
1873 		.dev = {
1874 			.init_name = "test-dimm",
1875 		},
1876 	};
1877 
1878 
1879 	/* basic checkout of a typical 'get config size' command */
1880 	cmd_size = sizeof(cmds.cfg_size);
1881 	cmds.cfg_size = (struct nd_cmd_get_config_size) {
1882 		.status = 0,
1883 		.config_size = SZ_128K,
1884 		.max_xfer = SZ_4K,
1885 	};
1886 	rc = setup_result(cmds.buf, cmd_size);
1887 	if (rc)
1888 		return rc;
1889 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
1890 			cmds.buf, cmd_size, &cmd_rc);
1891 
1892 	if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
1893 			|| cmds.cfg_size.config_size != SZ_128K
1894 			|| cmds.cfg_size.max_xfer != SZ_4K) {
1895 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1896 				__func__, __LINE__, rc, cmd_rc);
1897 		return -EIO;
1898 	}
1899 
1900 
1901 	/* test ars_status with zero output */
1902 	cmd_size = offsetof(struct nd_cmd_ars_status, address);
1903 	cmds.ars_stat = (struct nd_cmd_ars_status) {
1904 		.out_length = 0,
1905 	};
1906 	rc = setup_result(cmds.buf, cmd_size);
1907 	if (rc)
1908 		return rc;
1909 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1910 			cmds.buf, cmd_size, &cmd_rc);
1911 
1912 	if (rc < 0 || cmd_rc) {
1913 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1914 				__func__, __LINE__, rc, cmd_rc);
1915 		return -EIO;
1916 	}
1917 
1918 
1919 	/* test ars_cap with benign extended status */
1920 	cmd_size = sizeof(cmds.ars_cap);
1921 	cmds.ars_cap = (struct nd_cmd_ars_cap) {
1922 		.status = ND_ARS_PERSISTENT << 16,
1923 	};
1924 	offset = offsetof(struct nd_cmd_ars_cap, status);
1925 	rc = setup_result(cmds.buf + offset, cmd_size - offset);
1926 	if (rc)
1927 		return rc;
1928 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
1929 			cmds.buf, cmd_size, &cmd_rc);
1930 
1931 	if (rc < 0 || cmd_rc) {
1932 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1933 				__func__, __LINE__, rc, cmd_rc);
1934 		return -EIO;
1935 	}
1936 
1937 
1938 	/* test ars_status with 'status' trimmed from 'out_length' */
1939 	cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
1940 	cmds.ars_stat = (struct nd_cmd_ars_status) {
1941 		.out_length = cmd_size - 4,
1942 	};
1943 	record = &cmds.ars_stat.records[0];
1944 	*record = (struct nd_ars_record) {
1945 		.length = test_val,
1946 	};
1947 	rc = setup_result(cmds.buf, cmd_size);
1948 	if (rc)
1949 		return rc;
1950 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1951 			cmds.buf, cmd_size, &cmd_rc);
1952 
1953 	if (rc < 0 || cmd_rc || record->length != test_val) {
1954 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1955 				__func__, __LINE__, rc, cmd_rc);
1956 		return -EIO;
1957 	}
1958 
1959 
1960 	/* test ars_status with 'Output (Size)' including 'status' */
1961 	cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
1962 	cmds.ars_stat = (struct nd_cmd_ars_status) {
1963 		.out_length = cmd_size,
1964 	};
1965 	record = &cmds.ars_stat.records[0];
1966 	*record = (struct nd_ars_record) {
1967 		.length = test_val,
1968 	};
1969 	rc = setup_result(cmds.buf, cmd_size);
1970 	if (rc)
1971 		return rc;
1972 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1973 			cmds.buf, cmd_size, &cmd_rc);
1974 
1975 	if (rc < 0 || cmd_rc || record->length != test_val) {
1976 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1977 				__func__, __LINE__, rc, cmd_rc);
1978 		return -EIO;
1979 	}
1980 
1981 
1982 	/* test extended status for get_config_size results in failure */
1983 	cmd_size = sizeof(cmds.cfg_size);
1984 	cmds.cfg_size = (struct nd_cmd_get_config_size) {
1985 		.status = 1 << 16,
1986 	};
1987 	rc = setup_result(cmds.buf, cmd_size);
1988 	if (rc)
1989 		return rc;
1990 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
1991 			cmds.buf, cmd_size, &cmd_rc);
1992 
1993 	if (rc < 0 || cmd_rc >= 0) {
1994 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1995 				__func__, __LINE__, rc, cmd_rc);
1996 		return -EIO;
1997 	}
1998 
1999 	/* test clear error */
2000 	cmd_size = sizeof(cmds.clear_err);
2001 	cmds.clear_err = (struct nd_cmd_clear_error) {
2002 		.length = 512,
2003 		.cleared = 512,
2004 	};
2005 	rc = setup_result(cmds.buf, cmd_size);
2006 	if (rc)
2007 		return rc;
2008 	rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
2009 			cmds.buf, cmd_size, &cmd_rc);
2010 	if (rc < 0 || cmd_rc) {
2011 		dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2012 				__func__, __LINE__, rc, cmd_rc);
2013 		return -EIO;
2014 	}
2015 
2016 	return 0;
2017 }
2018 
2019 static int nfit_test_probe(struct platform_device *pdev)
2020 {
2021 	struct nvdimm_bus_descriptor *nd_desc;
2022 	struct acpi_nfit_desc *acpi_desc;
2023 	struct device *dev = &pdev->dev;
2024 	struct nfit_test *nfit_test;
2025 	struct nfit_mem *nfit_mem;
2026 	union acpi_object *obj;
2027 	int rc;
2028 
2029 	if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
2030 		rc = nfit_ctl_test(&pdev->dev);
2031 		if (rc)
2032 			return rc;
2033 	}
2034 
2035 	nfit_test = to_nfit_test(&pdev->dev);
2036 
2037 	/* common alloc */
2038 	if (nfit_test->num_dcr) {
2039 		int num = nfit_test->num_dcr;
2040 
2041 		nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
2042 				GFP_KERNEL);
2043 		nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
2044 				GFP_KERNEL);
2045 		nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
2046 				GFP_KERNEL);
2047 		nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
2048 				GFP_KERNEL);
2049 		nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
2050 				GFP_KERNEL);
2051 		nfit_test->label_dma = devm_kcalloc(dev, num,
2052 				sizeof(dma_addr_t), GFP_KERNEL);
2053 		nfit_test->dcr = devm_kcalloc(dev, num,
2054 				sizeof(struct nfit_test_dcr *), GFP_KERNEL);
2055 		nfit_test->dcr_dma = devm_kcalloc(dev, num,
2056 				sizeof(dma_addr_t), GFP_KERNEL);
2057 		if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
2058 				&& nfit_test->label_dma && nfit_test->dcr
2059 				&& nfit_test->dcr_dma && nfit_test->flush
2060 				&& nfit_test->flush_dma)
2061 			/* pass */;
2062 		else
2063 			return -ENOMEM;
2064 	}
2065 
2066 	if (nfit_test->num_pm) {
2067 		int num = nfit_test->num_pm;
2068 
2069 		nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
2070 				GFP_KERNEL);
2071 		nfit_test->spa_set_dma = devm_kcalloc(dev, num,
2072 				sizeof(dma_addr_t), GFP_KERNEL);
2073 		if (nfit_test->spa_set && nfit_test->spa_set_dma)
2074 			/* pass */;
2075 		else
2076 			return -ENOMEM;
2077 	}
2078 
2079 	/* per-nfit specific alloc */
2080 	if (nfit_test->alloc(nfit_test))
2081 		return -ENOMEM;
2082 
2083 	nfit_test->setup(nfit_test);
2084 	acpi_desc = &nfit_test->acpi_desc;
2085 	acpi_nfit_desc_init(acpi_desc, &pdev->dev);
2086 	acpi_desc->blk_do_io = nfit_test_blk_do_io;
2087 	nd_desc = &acpi_desc->nd_desc;
2088 	nd_desc->provider_name = NULL;
2089 	nd_desc->module = THIS_MODULE;
2090 	nd_desc->ndctl = nfit_test_ctl;
2091 
2092 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
2093 			nfit_test->nfit_size);
2094 	if (rc)
2095 		return rc;
2096 
2097 	rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
2098 	if (rc)
2099 		return rc;
2100 
2101 	if (nfit_test->setup != nfit_test0_setup)
2102 		return 0;
2103 
2104 	nfit_test->setup_hotplug = 1;
2105 	nfit_test->setup(nfit_test);
2106 
2107 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
2108 	if (!obj)
2109 		return -ENOMEM;
2110 	obj->type = ACPI_TYPE_BUFFER;
2111 	obj->buffer.length = nfit_test->nfit_size;
2112 	obj->buffer.pointer = nfit_test->nfit_buf;
2113 	*(nfit_test->_fit) = obj;
2114 	__acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
2115 
2116 	/* associate dimm devices with nfit_mem data for notification testing */
2117 	mutex_lock(&acpi_desc->init_mutex);
2118 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2119 		u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
2120 		int i;
2121 
2122 		for (i = 0; i < NUM_DCR; i++)
2123 			if (nfit_handle == handle[i])
2124 				dev_set_drvdata(nfit_test->dimm_dev[i],
2125 						nfit_mem);
2126 	}
2127 	mutex_unlock(&acpi_desc->init_mutex);
2128 
2129 	return 0;
2130 }
2131 
2132 static int nfit_test_remove(struct platform_device *pdev)
2133 {
2134 	return 0;
2135 }
2136 
2137 static void nfit_test_release(struct device *dev)
2138 {
2139 	struct nfit_test *nfit_test = to_nfit_test(dev);
2140 
2141 	kfree(nfit_test);
2142 }
2143 
2144 static const struct platform_device_id nfit_test_id[] = {
2145 	{ KBUILD_MODNAME },
2146 	{ },
2147 };
2148 
2149 static struct platform_driver nfit_test_driver = {
2150 	.probe = nfit_test_probe,
2151 	.remove = nfit_test_remove,
2152 	.driver = {
2153 		.name = KBUILD_MODNAME,
2154 	},
2155 	.id_table = nfit_test_id,
2156 };
2157 
2158 static __init int nfit_test_init(void)
2159 {
2160 	int rc, i;
2161 
2162 	nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
2163 
2164 	nfit_wq = create_singlethread_workqueue("nfit");
2165 	if (!nfit_wq)
2166 		return -ENOMEM;
2167 
2168 	nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
2169 	if (IS_ERR(nfit_test_dimm)) {
2170 		rc = PTR_ERR(nfit_test_dimm);
2171 		goto err_register;
2172 	}
2173 
2174 	for (i = 0; i < NUM_NFITS; i++) {
2175 		struct nfit_test *nfit_test;
2176 		struct platform_device *pdev;
2177 
2178 		nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
2179 		if (!nfit_test) {
2180 			rc = -ENOMEM;
2181 			goto err_register;
2182 		}
2183 		INIT_LIST_HEAD(&nfit_test->resources);
2184 		badrange_init(&nfit_test->badrange);
2185 		switch (i) {
2186 		case 0:
2187 			nfit_test->num_pm = NUM_PM;
2188 			nfit_test->dcr_idx = 0;
2189 			nfit_test->num_dcr = NUM_DCR;
2190 			nfit_test->alloc = nfit_test0_alloc;
2191 			nfit_test->setup = nfit_test0_setup;
2192 			break;
2193 		case 1:
2194 			nfit_test->num_pm = 2;
2195 			nfit_test->dcr_idx = NUM_DCR;
2196 			nfit_test->num_dcr = 2;
2197 			nfit_test->alloc = nfit_test1_alloc;
2198 			nfit_test->setup = nfit_test1_setup;
2199 			break;
2200 		default:
2201 			rc = -EINVAL;
2202 			goto err_register;
2203 		}
2204 		pdev = &nfit_test->pdev;
2205 		pdev->name = KBUILD_MODNAME;
2206 		pdev->id = i;
2207 		pdev->dev.release = nfit_test_release;
2208 		rc = platform_device_register(pdev);
2209 		if (rc) {
2210 			put_device(&pdev->dev);
2211 			goto err_register;
2212 		}
2213 		get_device(&pdev->dev);
2214 
2215 		rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2216 		if (rc)
2217 			goto err_register;
2218 
2219 		instances[i] = nfit_test;
2220 		INIT_WORK(&nfit_test->work, uc_error_notify);
2221 	}
2222 
2223 	rc = platform_driver_register(&nfit_test_driver);
2224 	if (rc)
2225 		goto err_register;
2226 	return 0;
2227 
2228  err_register:
2229 	destroy_workqueue(nfit_wq);
2230 	for (i = 0; i < NUM_NFITS; i++)
2231 		if (instances[i])
2232 			platform_device_unregister(&instances[i]->pdev);
2233 	nfit_test_teardown();
2234 	for (i = 0; i < NUM_NFITS; i++)
2235 		if (instances[i])
2236 			put_device(&instances[i]->pdev.dev);
2237 
2238 	return rc;
2239 }
2240 
2241 static __exit void nfit_test_exit(void)
2242 {
2243 	int i;
2244 
2245 	flush_workqueue(nfit_wq);
2246 	destroy_workqueue(nfit_wq);
2247 	for (i = 0; i < NUM_NFITS; i++)
2248 		platform_device_unregister(&instances[i]->pdev);
2249 	platform_driver_unregister(&nfit_test_driver);
2250 	nfit_test_teardown();
2251 
2252 	for (i = 0; i < NUM_NFITS; i++)
2253 		put_device(&instances[i]->pdev.dev);
2254 	class_destroy(nfit_test_dimm);
2255 }
2256 
2257 module_init(nfit_test_init);
2258 module_exit(nfit_test_exit);
2259 MODULE_LICENSE("GPL v2");
2260 MODULE_AUTHOR("Intel Corporation");
2261