xref: /openbmc/linux/tools/testing/nvdimm/test/nfit.c (revision 92a76f6d)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/libnvdimm.h>
17 #include <linux/vmalloc.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/ndctl.h>
22 #include <linux/sizes.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <nfit.h>
26 #include <nd.h>
27 #include "nfit_test.h"
28 
29 /*
30  * Generate an NFIT table to describe the following topology:
31  *
32  * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
33  *
34  *                     (a)                       (b)            DIMM   BLK-REGION
35  *           +----------+--------------+----------+---------+
36  * +------+  |  blk2.0  |     pm0.0    |  blk2.1  |  pm1.0  |    0      region2
37  * | imc0 +--+- - - - - region0 - - - -+----------+         +
38  * +--+---+  |  blk3.0  |     pm0.0    |  blk3.1  |  pm1.0  |    1      region3
39  *    |      +----------+--------------v----------v         v
40  * +--+---+                            |                    |
41  * | cpu0 |                                    region1
42  * +--+---+                            |                    |
43  *    |      +-------------------------^----------^         ^
44  * +--+---+  |                 blk4.0             |  pm1.0  |    2      region4
45  * | imc1 +--+-------------------------+----------+         +
46  * +------+  |                 blk5.0             |  pm1.0  |    3      region5
47  *           +-------------------------+----------+-+-------+
48  *
49  * +--+---+
50  * | cpu1 |
51  * +--+---+                   (Hotplug DIMM)
52  *    |      +----------------------------------------------+
53  * +--+---+  |                 blk6.0/pm7.0                 |    4      region6/7
54  * | imc0 +--+----------------------------------------------+
55  * +------+
56  *
57  *
58  * *) In this layout we have four dimms and two memory controllers in one
59  *    socket.  Each unique interface (BLK or PMEM) to DPA space
60  *    is identified by a region device with a dynamically assigned id.
61  *
62  * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
63  *    A single PMEM namespace "pm0.0" is created using half of the
64  *    REGION0 SPA-range.  REGION0 spans dimm0 and dimm1.  PMEM namespace
65  *    allocate from from the bottom of a region.  The unallocated
66  *    portion of REGION0 aliases with REGION2 and REGION3.  That
67  *    unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
68  *    "blk3.0") starting at the base of each DIMM to offset (a) in those
69  *    DIMMs.  "pm0.0", "blk2.0" and "blk3.0" are free-form readable
70  *    names that can be assigned to a namespace.
71  *
72  * *) In the last portion of dimm0 and dimm1 we have an interleaved
73  *    SPA range, REGION1, that spans those two dimms as well as dimm2
74  *    and dimm3.  Some of REGION1 allocated to a PMEM namespace named
75  *    "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
76  *    dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
77  *    "blk5.0".
78  *
79  * *) The portion of dimm2 and dimm3 that do not participate in the
80  *    REGION1 interleaved SPA range (i.e. the DPA address below offset
81  *    (b) are also included in the "blk4.0" and "blk5.0" namespaces.
82  *    Note, that BLK namespaces need not be contiguous in DPA-space, and
83  *    can consume aliased capacity from multiple interleave sets.
84  *
85  * BUS1: Legacy NVDIMM (single contiguous range)
86  *
87  *  region2
88  * +---------------------+
89  * |---------------------|
90  * ||       pm2.0       ||
91  * |---------------------|
92  * +---------------------+
93  *
94  * *) A NFIT-table may describe a simple system-physical-address range
95  *    with no BLK aliasing.  This type of region may optionally
96  *    reference an NVDIMM.
97  */
98 enum {
99 	NUM_PM  = 3,
100 	NUM_DCR = 5,
101 	NUM_BDW = NUM_DCR,
102 	NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
103 	NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
104 	DIMM_SIZE = SZ_32M,
105 	LABEL_SIZE = SZ_128K,
106 	SPA0_SIZE = DIMM_SIZE,
107 	SPA1_SIZE = DIMM_SIZE*2,
108 	SPA2_SIZE = DIMM_SIZE,
109 	BDW_SIZE = 64 << 8,
110 	DCR_SIZE = 12,
111 	NUM_NFITS = 2, /* permit testing multiple NFITs per system */
112 };
113 
114 struct nfit_test_dcr {
115 	__le64 bdw_addr;
116 	__le32 bdw_status;
117 	__u8 aperature[BDW_SIZE];
118 };
119 
120 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
121 	(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
122 	 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
123 
124 static u32 handle[NUM_DCR] = {
125 	[0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
126 	[1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
127 	[2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
128 	[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
129 	[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
130 };
131 
132 struct nfit_test {
133 	struct acpi_nfit_desc acpi_desc;
134 	struct platform_device pdev;
135 	struct list_head resources;
136 	void *nfit_buf;
137 	dma_addr_t nfit_dma;
138 	size_t nfit_size;
139 	int num_dcr;
140 	int num_pm;
141 	void **dimm;
142 	dma_addr_t *dimm_dma;
143 	void **flush;
144 	dma_addr_t *flush_dma;
145 	void **label;
146 	dma_addr_t *label_dma;
147 	void **spa_set;
148 	dma_addr_t *spa_set_dma;
149 	struct nfit_test_dcr **dcr;
150 	dma_addr_t *dcr_dma;
151 	int (*alloc)(struct nfit_test *t);
152 	void (*setup)(struct nfit_test *t);
153 	int setup_hotplug;
154 	struct ars_state {
155 		struct nd_cmd_ars_status *ars_status;
156 		unsigned long deadline;
157 		spinlock_t lock;
158 	} ars_state;
159 };
160 
161 static struct nfit_test *to_nfit_test(struct device *dev)
162 {
163 	struct platform_device *pdev = to_platform_device(dev);
164 
165 	return container_of(pdev, struct nfit_test, pdev);
166 }
167 
168 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
169 		unsigned int buf_len)
170 {
171 	if (buf_len < sizeof(*nd_cmd))
172 		return -EINVAL;
173 
174 	nd_cmd->status = 0;
175 	nd_cmd->config_size = LABEL_SIZE;
176 	nd_cmd->max_xfer = SZ_4K;
177 
178 	return 0;
179 }
180 
181 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
182 		*nd_cmd, unsigned int buf_len, void *label)
183 {
184 	unsigned int len, offset = nd_cmd->in_offset;
185 	int rc;
186 
187 	if (buf_len < sizeof(*nd_cmd))
188 		return -EINVAL;
189 	if (offset >= LABEL_SIZE)
190 		return -EINVAL;
191 	if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
192 		return -EINVAL;
193 
194 	nd_cmd->status = 0;
195 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
196 	memcpy(nd_cmd->out_buf, label + offset, len);
197 	rc = buf_len - sizeof(*nd_cmd) - len;
198 
199 	return rc;
200 }
201 
202 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
203 		unsigned int buf_len, void *label)
204 {
205 	unsigned int len, offset = nd_cmd->in_offset;
206 	u32 *status;
207 	int rc;
208 
209 	if (buf_len < sizeof(*nd_cmd))
210 		return -EINVAL;
211 	if (offset >= LABEL_SIZE)
212 		return -EINVAL;
213 	if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
214 		return -EINVAL;
215 
216 	status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
217 	*status = 0;
218 	len = min(nd_cmd->in_length, LABEL_SIZE - offset);
219 	memcpy(label + offset, nd_cmd->in_buf, len);
220 	rc = buf_len - sizeof(*nd_cmd) - (len + 4);
221 
222 	return rc;
223 }
224 
225 #define NFIT_TEST_ARS_RECORDS 4
226 #define NFIT_TEST_CLEAR_ERR_UNIT 256
227 
228 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
229 		unsigned int buf_len)
230 {
231 	if (buf_len < sizeof(*nd_cmd))
232 		return -EINVAL;
233 
234 	nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
235 		+ NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
236 	nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
237 	nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
238 
239 	return 0;
240 }
241 
242 /*
243  * Initialize the ars_state to return an ars_result 1 second in the future with
244  * a 4K error range in the middle of the requested address range.
245  */
246 static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
247 {
248 	struct nd_cmd_ars_status *ars_status;
249 	struct nd_ars_record *ars_record;
250 
251 	ars_state->deadline = jiffies + 1*HZ;
252 	ars_status = ars_state->ars_status;
253 	ars_status->status = 0;
254 	ars_status->out_length = sizeof(struct nd_cmd_ars_status)
255 		+ sizeof(struct nd_ars_record);
256 	ars_status->address = addr;
257 	ars_status->length = len;
258 	ars_status->type = ND_ARS_PERSISTENT;
259 	ars_status->num_records = 1;
260 	ars_record = &ars_status->records[0];
261 	ars_record->handle = 0;
262 	ars_record->err_address = addr + len / 2;
263 	ars_record->length = SZ_4K;
264 }
265 
266 static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
267 		struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
268 		int *cmd_rc)
269 {
270 	if (buf_len < sizeof(*ars_start))
271 		return -EINVAL;
272 
273 	spin_lock(&ars_state->lock);
274 	if (time_before(jiffies, ars_state->deadline)) {
275 		ars_start->status = NFIT_ARS_START_BUSY;
276 		*cmd_rc = -EBUSY;
277 	} else {
278 		ars_start->status = 0;
279 		ars_start->scrub_time = 1;
280 		post_ars_status(ars_state, ars_start->address,
281 				ars_start->length);
282 		*cmd_rc = 0;
283 	}
284 	spin_unlock(&ars_state->lock);
285 
286 	return 0;
287 }
288 
289 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
290 		struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
291 		int *cmd_rc)
292 {
293 	if (buf_len < ars_state->ars_status->out_length)
294 		return -EINVAL;
295 
296 	spin_lock(&ars_state->lock);
297 	if (time_before(jiffies, ars_state->deadline)) {
298 		memset(ars_status, 0, buf_len);
299 		ars_status->status = NFIT_ARS_STATUS_BUSY;
300 		ars_status->out_length = sizeof(*ars_status);
301 		*cmd_rc = -EBUSY;
302 	} else {
303 		memcpy(ars_status, ars_state->ars_status,
304 				ars_state->ars_status->out_length);
305 		*cmd_rc = 0;
306 	}
307 	spin_unlock(&ars_state->lock);
308 	return 0;
309 }
310 
311 static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
312 		unsigned int buf_len, int *cmd_rc)
313 {
314 	const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
315 	if (buf_len < sizeof(*clear_err))
316 		return -EINVAL;
317 
318 	if ((clear_err->address & mask) || (clear_err->length & mask))
319 		return -EINVAL;
320 
321 	/*
322 	 * Report 'all clear' success for all commands even though a new
323 	 * scrub will find errors again.  This is enough to have the
324 	 * error removed from the 'badblocks' tracking in the pmem
325 	 * driver.
326 	 */
327 	clear_err->status = 0;
328 	clear_err->cleared = clear_err->length;
329 	*cmd_rc = 0;
330 	return 0;
331 }
332 
333 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
334 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
335 		unsigned int buf_len, int *cmd_rc)
336 {
337 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
338 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
339 	int i, rc = 0, __cmd_rc;
340 
341 	if (!cmd_rc)
342 		cmd_rc = &__cmd_rc;
343 	*cmd_rc = 0;
344 
345 	if (nvdimm) {
346 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
347 
348 		if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
349 			return -ENOTTY;
350 
351 		/* lookup label space for the given dimm */
352 		for (i = 0; i < ARRAY_SIZE(handle); i++)
353 			if (__to_nfit_memdev(nfit_mem)->device_handle ==
354 					handle[i])
355 				break;
356 		if (i >= ARRAY_SIZE(handle))
357 			return -ENXIO;
358 
359 		switch (cmd) {
360 		case ND_CMD_GET_CONFIG_SIZE:
361 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
362 			break;
363 		case ND_CMD_GET_CONFIG_DATA:
364 			rc = nfit_test_cmd_get_config_data(buf, buf_len,
365 				t->label[i]);
366 			break;
367 		case ND_CMD_SET_CONFIG_DATA:
368 			rc = nfit_test_cmd_set_config_data(buf, buf_len,
369 				t->label[i]);
370 			break;
371 		default:
372 			return -ENOTTY;
373 		}
374 	} else {
375 		struct ars_state *ars_state = &t->ars_state;
376 
377 		if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask))
378 			return -ENOTTY;
379 
380 		switch (cmd) {
381 		case ND_CMD_ARS_CAP:
382 			rc = nfit_test_cmd_ars_cap(buf, buf_len);
383 			break;
384 		case ND_CMD_ARS_START:
385 			rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
386 					cmd_rc);
387 			break;
388 		case ND_CMD_ARS_STATUS:
389 			rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
390 					cmd_rc);
391 			break;
392 		case ND_CMD_CLEAR_ERROR:
393 			rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
394 			break;
395 		default:
396 			return -ENOTTY;
397 		}
398 	}
399 
400 	return rc;
401 }
402 
403 static DEFINE_SPINLOCK(nfit_test_lock);
404 static struct nfit_test *instances[NUM_NFITS];
405 
406 static void release_nfit_res(void *data)
407 {
408 	struct nfit_test_resource *nfit_res = data;
409 	struct resource *res = nfit_res->res;
410 
411 	spin_lock(&nfit_test_lock);
412 	list_del(&nfit_res->list);
413 	spin_unlock(&nfit_test_lock);
414 
415 	if (is_vmalloc_addr(nfit_res->buf))
416 		vfree(nfit_res->buf);
417 	else
418 		dma_free_coherent(nfit_res->dev, resource_size(res),
419 				nfit_res->buf, res->start);
420 	kfree(res);
421 	kfree(nfit_res);
422 }
423 
424 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
425 		void *buf)
426 {
427 	struct device *dev = &t->pdev.dev;
428 	struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
429 	struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
430 			GFP_KERNEL);
431 	int rc;
432 
433 	if (!res || !buf || !nfit_res)
434 		goto err;
435 	rc = devm_add_action(dev, release_nfit_res, nfit_res);
436 	if (rc)
437 		goto err;
438 	INIT_LIST_HEAD(&nfit_res->list);
439 	memset(buf, 0, size);
440 	nfit_res->dev = dev;
441 	nfit_res->buf = buf;
442 	nfit_res->res = res;
443 	res->start = *dma;
444 	res->end = *dma + size - 1;
445 	res->name = "NFIT";
446 	spin_lock(&nfit_test_lock);
447 	list_add(&nfit_res->list, &t->resources);
448 	spin_unlock(&nfit_test_lock);
449 
450 	return nfit_res->buf;
451  err:
452 	if (buf && !is_vmalloc_addr(buf))
453 		dma_free_coherent(dev, size, buf, *dma);
454 	else if (buf)
455 		vfree(buf);
456 	kfree(res);
457 	kfree(nfit_res);
458 	return NULL;
459 }
460 
461 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
462 {
463 	void *buf = vmalloc(size);
464 
465 	*dma = (unsigned long) buf;
466 	return __test_alloc(t, size, dma, buf);
467 }
468 
469 static void *test_alloc_coherent(struct nfit_test *t, size_t size,
470 		dma_addr_t *dma)
471 {
472 	struct device *dev = &t->pdev.dev;
473 	void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
474 
475 	return __test_alloc(t, size, dma, buf);
476 }
477 
478 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
479 {
480 	int i;
481 
482 	for (i = 0; i < ARRAY_SIZE(instances); i++) {
483 		struct nfit_test_resource *n, *nfit_res = NULL;
484 		struct nfit_test *t = instances[i];
485 
486 		if (!t)
487 			continue;
488 		spin_lock(&nfit_test_lock);
489 		list_for_each_entry(n, &t->resources, list) {
490 			if (addr >= n->res->start && (addr < n->res->start
491 						+ resource_size(n->res))) {
492 				nfit_res = n;
493 				break;
494 			} else if (addr >= (unsigned long) n->buf
495 					&& (addr < (unsigned long) n->buf
496 						+ resource_size(n->res))) {
497 				nfit_res = n;
498 				break;
499 			}
500 		}
501 		spin_unlock(&nfit_test_lock);
502 		if (nfit_res)
503 			return nfit_res;
504 	}
505 
506 	return NULL;
507 }
508 
509 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
510 {
511 	ars_state->ars_status = devm_kzalloc(dev,
512 			sizeof(struct nd_cmd_ars_status)
513 			+ sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
514 			GFP_KERNEL);
515 	if (!ars_state->ars_status)
516 		return -ENOMEM;
517 	spin_lock_init(&ars_state->lock);
518 	return 0;
519 }
520 
521 static int nfit_test0_alloc(struct nfit_test *t)
522 {
523 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
524 			+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
525 			+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
526 			+ offsetof(struct acpi_nfit_control_region,
527 					window_size) * NUM_DCR
528 			+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
529 			+ sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
530 	int i;
531 
532 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
533 	if (!t->nfit_buf)
534 		return -ENOMEM;
535 	t->nfit_size = nfit_size;
536 
537 	t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
538 	if (!t->spa_set[0])
539 		return -ENOMEM;
540 
541 	t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
542 	if (!t->spa_set[1])
543 		return -ENOMEM;
544 
545 	t->spa_set[2] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[2]);
546 	if (!t->spa_set[2])
547 		return -ENOMEM;
548 
549 	for (i = 0; i < NUM_DCR; i++) {
550 		t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
551 		if (!t->dimm[i])
552 			return -ENOMEM;
553 
554 		t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
555 		if (!t->label[i])
556 			return -ENOMEM;
557 		sprintf(t->label[i], "label%d", i);
558 
559 		t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
560 		if (!t->flush[i])
561 			return -ENOMEM;
562 	}
563 
564 	for (i = 0; i < NUM_DCR; i++) {
565 		t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
566 		if (!t->dcr[i])
567 			return -ENOMEM;
568 	}
569 
570 	return ars_state_init(&t->pdev.dev, &t->ars_state);
571 }
572 
573 static int nfit_test1_alloc(struct nfit_test *t)
574 {
575 	size_t nfit_size = sizeof(struct acpi_nfit_system_address)
576 		+ sizeof(struct acpi_nfit_memory_map)
577 		+ offsetof(struct acpi_nfit_control_region, window_size);
578 
579 	t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
580 	if (!t->nfit_buf)
581 		return -ENOMEM;
582 	t->nfit_size = nfit_size;
583 
584 	t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
585 	if (!t->spa_set[0])
586 		return -ENOMEM;
587 
588 	return ars_state_init(&t->pdev.dev, &t->ars_state);
589 }
590 
591 static void nfit_test0_setup(struct nfit_test *t)
592 {
593 	struct acpi_nfit_desc *acpi_desc;
594 	struct acpi_nfit_memory_map *memdev;
595 	void *nfit_buf = t->nfit_buf;
596 	struct acpi_nfit_system_address *spa;
597 	struct acpi_nfit_control_region *dcr;
598 	struct acpi_nfit_data_region *bdw;
599 	struct acpi_nfit_flush_address *flush;
600 	unsigned int offset;
601 
602 	/*
603 	 * spa0 (interleave first half of dimm0 and dimm1, note storage
604 	 * does not actually alias the related block-data-window
605 	 * regions)
606 	 */
607 	spa = nfit_buf;
608 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
609 	spa->header.length = sizeof(*spa);
610 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
611 	spa->range_index = 0+1;
612 	spa->address = t->spa_set_dma[0];
613 	spa->length = SPA0_SIZE;
614 
615 	/*
616 	 * spa1 (interleave last half of the 4 DIMMS, note storage
617 	 * does not actually alias the related block-data-window
618 	 * regions)
619 	 */
620 	spa = nfit_buf + sizeof(*spa);
621 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
622 	spa->header.length = sizeof(*spa);
623 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
624 	spa->range_index = 1+1;
625 	spa->address = t->spa_set_dma[1];
626 	spa->length = SPA1_SIZE;
627 
628 	/* spa2 (dcr0) dimm0 */
629 	spa = nfit_buf + sizeof(*spa) * 2;
630 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
631 	spa->header.length = sizeof(*spa);
632 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
633 	spa->range_index = 2+1;
634 	spa->address = t->dcr_dma[0];
635 	spa->length = DCR_SIZE;
636 
637 	/* spa3 (dcr1) dimm1 */
638 	spa = nfit_buf + sizeof(*spa) * 3;
639 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
640 	spa->header.length = sizeof(*spa);
641 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
642 	spa->range_index = 3+1;
643 	spa->address = t->dcr_dma[1];
644 	spa->length = DCR_SIZE;
645 
646 	/* spa4 (dcr2) dimm2 */
647 	spa = nfit_buf + sizeof(*spa) * 4;
648 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
649 	spa->header.length = sizeof(*spa);
650 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
651 	spa->range_index = 4+1;
652 	spa->address = t->dcr_dma[2];
653 	spa->length = DCR_SIZE;
654 
655 	/* spa5 (dcr3) dimm3 */
656 	spa = nfit_buf + sizeof(*spa) * 5;
657 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
658 	spa->header.length = sizeof(*spa);
659 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
660 	spa->range_index = 5+1;
661 	spa->address = t->dcr_dma[3];
662 	spa->length = DCR_SIZE;
663 
664 	/* spa6 (bdw for dcr0) dimm0 */
665 	spa = nfit_buf + sizeof(*spa) * 6;
666 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
667 	spa->header.length = sizeof(*spa);
668 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
669 	spa->range_index = 6+1;
670 	spa->address = t->dimm_dma[0];
671 	spa->length = DIMM_SIZE;
672 
673 	/* spa7 (bdw for dcr1) dimm1 */
674 	spa = nfit_buf + sizeof(*spa) * 7;
675 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
676 	spa->header.length = sizeof(*spa);
677 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
678 	spa->range_index = 7+1;
679 	spa->address = t->dimm_dma[1];
680 	spa->length = DIMM_SIZE;
681 
682 	/* spa8 (bdw for dcr2) dimm2 */
683 	spa = nfit_buf + sizeof(*spa) * 8;
684 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
685 	spa->header.length = sizeof(*spa);
686 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
687 	spa->range_index = 8+1;
688 	spa->address = t->dimm_dma[2];
689 	spa->length = DIMM_SIZE;
690 
691 	/* spa9 (bdw for dcr3) dimm3 */
692 	spa = nfit_buf + sizeof(*spa) * 9;
693 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
694 	spa->header.length = sizeof(*spa);
695 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
696 	spa->range_index = 9+1;
697 	spa->address = t->dimm_dma[3];
698 	spa->length = DIMM_SIZE;
699 
700 	offset = sizeof(*spa) * 10;
701 	/* mem-region0 (spa0, dimm0) */
702 	memdev = nfit_buf + offset;
703 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
704 	memdev->header.length = sizeof(*memdev);
705 	memdev->device_handle = handle[0];
706 	memdev->physical_id = 0;
707 	memdev->region_id = 0;
708 	memdev->range_index = 0+1;
709 	memdev->region_index = 4+1;
710 	memdev->region_size = SPA0_SIZE/2;
711 	memdev->region_offset = t->spa_set_dma[0];
712 	memdev->address = 0;
713 	memdev->interleave_index = 0;
714 	memdev->interleave_ways = 2;
715 
716 	/* mem-region1 (spa0, dimm1) */
717 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
718 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
719 	memdev->header.length = sizeof(*memdev);
720 	memdev->device_handle = handle[1];
721 	memdev->physical_id = 1;
722 	memdev->region_id = 0;
723 	memdev->range_index = 0+1;
724 	memdev->region_index = 5+1;
725 	memdev->region_size = SPA0_SIZE/2;
726 	memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
727 	memdev->address = 0;
728 	memdev->interleave_index = 0;
729 	memdev->interleave_ways = 2;
730 
731 	/* mem-region2 (spa1, dimm0) */
732 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
733 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
734 	memdev->header.length = sizeof(*memdev);
735 	memdev->device_handle = handle[0];
736 	memdev->physical_id = 0;
737 	memdev->region_id = 1;
738 	memdev->range_index = 1+1;
739 	memdev->region_index = 4+1;
740 	memdev->region_size = SPA1_SIZE/4;
741 	memdev->region_offset = t->spa_set_dma[1];
742 	memdev->address = SPA0_SIZE/2;
743 	memdev->interleave_index = 0;
744 	memdev->interleave_ways = 4;
745 
746 	/* mem-region3 (spa1, dimm1) */
747 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
748 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
749 	memdev->header.length = sizeof(*memdev);
750 	memdev->device_handle = handle[1];
751 	memdev->physical_id = 1;
752 	memdev->region_id = 1;
753 	memdev->range_index = 1+1;
754 	memdev->region_index = 5+1;
755 	memdev->region_size = SPA1_SIZE/4;
756 	memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
757 	memdev->address = SPA0_SIZE/2;
758 	memdev->interleave_index = 0;
759 	memdev->interleave_ways = 4;
760 
761 	/* mem-region4 (spa1, dimm2) */
762 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
763 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
764 	memdev->header.length = sizeof(*memdev);
765 	memdev->device_handle = handle[2];
766 	memdev->physical_id = 2;
767 	memdev->region_id = 0;
768 	memdev->range_index = 1+1;
769 	memdev->region_index = 6+1;
770 	memdev->region_size = SPA1_SIZE/4;
771 	memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
772 	memdev->address = SPA0_SIZE/2;
773 	memdev->interleave_index = 0;
774 	memdev->interleave_ways = 4;
775 
776 	/* mem-region5 (spa1, dimm3) */
777 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
778 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
779 	memdev->header.length = sizeof(*memdev);
780 	memdev->device_handle = handle[3];
781 	memdev->physical_id = 3;
782 	memdev->region_id = 0;
783 	memdev->range_index = 1+1;
784 	memdev->region_index = 7+1;
785 	memdev->region_size = SPA1_SIZE/4;
786 	memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
787 	memdev->address = SPA0_SIZE/2;
788 	memdev->interleave_index = 0;
789 	memdev->interleave_ways = 4;
790 
791 	/* mem-region6 (spa/dcr0, dimm0) */
792 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
793 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
794 	memdev->header.length = sizeof(*memdev);
795 	memdev->device_handle = handle[0];
796 	memdev->physical_id = 0;
797 	memdev->region_id = 0;
798 	memdev->range_index = 2+1;
799 	memdev->region_index = 0+1;
800 	memdev->region_size = 0;
801 	memdev->region_offset = 0;
802 	memdev->address = 0;
803 	memdev->interleave_index = 0;
804 	memdev->interleave_ways = 1;
805 
806 	/* mem-region7 (spa/dcr1, dimm1) */
807 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
808 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
809 	memdev->header.length = sizeof(*memdev);
810 	memdev->device_handle = handle[1];
811 	memdev->physical_id = 1;
812 	memdev->region_id = 0;
813 	memdev->range_index = 3+1;
814 	memdev->region_index = 1+1;
815 	memdev->region_size = 0;
816 	memdev->region_offset = 0;
817 	memdev->address = 0;
818 	memdev->interleave_index = 0;
819 	memdev->interleave_ways = 1;
820 
821 	/* mem-region8 (spa/dcr2, dimm2) */
822 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
823 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
824 	memdev->header.length = sizeof(*memdev);
825 	memdev->device_handle = handle[2];
826 	memdev->physical_id = 2;
827 	memdev->region_id = 0;
828 	memdev->range_index = 4+1;
829 	memdev->region_index = 2+1;
830 	memdev->region_size = 0;
831 	memdev->region_offset = 0;
832 	memdev->address = 0;
833 	memdev->interleave_index = 0;
834 	memdev->interleave_ways = 1;
835 
836 	/* mem-region9 (spa/dcr3, dimm3) */
837 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
838 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
839 	memdev->header.length = sizeof(*memdev);
840 	memdev->device_handle = handle[3];
841 	memdev->physical_id = 3;
842 	memdev->region_id = 0;
843 	memdev->range_index = 5+1;
844 	memdev->region_index = 3+1;
845 	memdev->region_size = 0;
846 	memdev->region_offset = 0;
847 	memdev->address = 0;
848 	memdev->interleave_index = 0;
849 	memdev->interleave_ways = 1;
850 
851 	/* mem-region10 (spa/bdw0, dimm0) */
852 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
853 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
854 	memdev->header.length = sizeof(*memdev);
855 	memdev->device_handle = handle[0];
856 	memdev->physical_id = 0;
857 	memdev->region_id = 0;
858 	memdev->range_index = 6+1;
859 	memdev->region_index = 0+1;
860 	memdev->region_size = 0;
861 	memdev->region_offset = 0;
862 	memdev->address = 0;
863 	memdev->interleave_index = 0;
864 	memdev->interleave_ways = 1;
865 
866 	/* mem-region11 (spa/bdw1, dimm1) */
867 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
868 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
869 	memdev->header.length = sizeof(*memdev);
870 	memdev->device_handle = handle[1];
871 	memdev->physical_id = 1;
872 	memdev->region_id = 0;
873 	memdev->range_index = 7+1;
874 	memdev->region_index = 1+1;
875 	memdev->region_size = 0;
876 	memdev->region_offset = 0;
877 	memdev->address = 0;
878 	memdev->interleave_index = 0;
879 	memdev->interleave_ways = 1;
880 
881 	/* mem-region12 (spa/bdw2, dimm2) */
882 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
883 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
884 	memdev->header.length = sizeof(*memdev);
885 	memdev->device_handle = handle[2];
886 	memdev->physical_id = 2;
887 	memdev->region_id = 0;
888 	memdev->range_index = 8+1;
889 	memdev->region_index = 2+1;
890 	memdev->region_size = 0;
891 	memdev->region_offset = 0;
892 	memdev->address = 0;
893 	memdev->interleave_index = 0;
894 	memdev->interleave_ways = 1;
895 
896 	/* mem-region13 (spa/dcr3, dimm3) */
897 	memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
898 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
899 	memdev->header.length = sizeof(*memdev);
900 	memdev->device_handle = handle[3];
901 	memdev->physical_id = 3;
902 	memdev->region_id = 0;
903 	memdev->range_index = 9+1;
904 	memdev->region_index = 3+1;
905 	memdev->region_size = 0;
906 	memdev->region_offset = 0;
907 	memdev->address = 0;
908 	memdev->interleave_index = 0;
909 	memdev->interleave_ways = 1;
910 
911 	offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
912 	/* dcr-descriptor0: blk */
913 	dcr = nfit_buf + offset;
914 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
915 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
916 	dcr->region_index = 0+1;
917 	dcr->vendor_id = 0xabcd;
918 	dcr->device_id = 0;
919 	dcr->revision_id = 1;
920 	dcr->serial_number = ~handle[0];
921 	dcr->code = NFIT_FIC_BLK;
922 	dcr->windows = 1;
923 	dcr->window_size = DCR_SIZE;
924 	dcr->command_offset = 0;
925 	dcr->command_size = 8;
926 	dcr->status_offset = 8;
927 	dcr->status_size = 4;
928 
929 	/* dcr-descriptor1: blk */
930 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
931 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
932 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
933 	dcr->region_index = 1+1;
934 	dcr->vendor_id = 0xabcd;
935 	dcr->device_id = 0;
936 	dcr->revision_id = 1;
937 	dcr->serial_number = ~handle[1];
938 	dcr->code = NFIT_FIC_BLK;
939 	dcr->windows = 1;
940 	dcr->window_size = DCR_SIZE;
941 	dcr->command_offset = 0;
942 	dcr->command_size = 8;
943 	dcr->status_offset = 8;
944 	dcr->status_size = 4;
945 
946 	/* dcr-descriptor2: blk */
947 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
948 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
949 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
950 	dcr->region_index = 2+1;
951 	dcr->vendor_id = 0xabcd;
952 	dcr->device_id = 0;
953 	dcr->revision_id = 1;
954 	dcr->serial_number = ~handle[2];
955 	dcr->code = NFIT_FIC_BLK;
956 	dcr->windows = 1;
957 	dcr->window_size = DCR_SIZE;
958 	dcr->command_offset = 0;
959 	dcr->command_size = 8;
960 	dcr->status_offset = 8;
961 	dcr->status_size = 4;
962 
963 	/* dcr-descriptor3: blk */
964 	dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
965 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
966 	dcr->header.length = sizeof(struct acpi_nfit_control_region);
967 	dcr->region_index = 3+1;
968 	dcr->vendor_id = 0xabcd;
969 	dcr->device_id = 0;
970 	dcr->revision_id = 1;
971 	dcr->serial_number = ~handle[3];
972 	dcr->code = NFIT_FIC_BLK;
973 	dcr->windows = 1;
974 	dcr->window_size = DCR_SIZE;
975 	dcr->command_offset = 0;
976 	dcr->command_size = 8;
977 	dcr->status_offset = 8;
978 	dcr->status_size = 4;
979 
980 	offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
981 	/* dcr-descriptor0: pmem */
982 	dcr = nfit_buf + offset;
983 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
984 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
985 			window_size);
986 	dcr->region_index = 4+1;
987 	dcr->vendor_id = 0xabcd;
988 	dcr->device_id = 0;
989 	dcr->revision_id = 1;
990 	dcr->serial_number = ~handle[0];
991 	dcr->code = NFIT_FIC_BYTEN;
992 	dcr->windows = 0;
993 
994 	/* dcr-descriptor1: pmem */
995 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
996 			window_size);
997 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
998 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
999 			window_size);
1000 	dcr->region_index = 5+1;
1001 	dcr->vendor_id = 0xabcd;
1002 	dcr->device_id = 0;
1003 	dcr->revision_id = 1;
1004 	dcr->serial_number = ~handle[1];
1005 	dcr->code = NFIT_FIC_BYTEN;
1006 	dcr->windows = 0;
1007 
1008 	/* dcr-descriptor2: pmem */
1009 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1010 			window_size) * 2;
1011 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1012 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1013 			window_size);
1014 	dcr->region_index = 6+1;
1015 	dcr->vendor_id = 0xabcd;
1016 	dcr->device_id = 0;
1017 	dcr->revision_id = 1;
1018 	dcr->serial_number = ~handle[2];
1019 	dcr->code = NFIT_FIC_BYTEN;
1020 	dcr->windows = 0;
1021 
1022 	/* dcr-descriptor3: pmem */
1023 	dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1024 			window_size) * 3;
1025 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1026 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1027 			window_size);
1028 	dcr->region_index = 7+1;
1029 	dcr->vendor_id = 0xabcd;
1030 	dcr->device_id = 0;
1031 	dcr->revision_id = 1;
1032 	dcr->serial_number = ~handle[3];
1033 	dcr->code = NFIT_FIC_BYTEN;
1034 	dcr->windows = 0;
1035 
1036 	offset = offset + offsetof(struct acpi_nfit_control_region,
1037 			window_size) * 4;
1038 	/* bdw0 (spa/dcr0, dimm0) */
1039 	bdw = nfit_buf + offset;
1040 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1041 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1042 	bdw->region_index = 0+1;
1043 	bdw->windows = 1;
1044 	bdw->offset = 0;
1045 	bdw->size = BDW_SIZE;
1046 	bdw->capacity = DIMM_SIZE;
1047 	bdw->start_address = 0;
1048 
1049 	/* bdw1 (spa/dcr1, dimm1) */
1050 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1051 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1052 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1053 	bdw->region_index = 1+1;
1054 	bdw->windows = 1;
1055 	bdw->offset = 0;
1056 	bdw->size = BDW_SIZE;
1057 	bdw->capacity = DIMM_SIZE;
1058 	bdw->start_address = 0;
1059 
1060 	/* bdw2 (spa/dcr2, dimm2) */
1061 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1062 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1063 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1064 	bdw->region_index = 2+1;
1065 	bdw->windows = 1;
1066 	bdw->offset = 0;
1067 	bdw->size = BDW_SIZE;
1068 	bdw->capacity = DIMM_SIZE;
1069 	bdw->start_address = 0;
1070 
1071 	/* bdw3 (spa/dcr3, dimm3) */
1072 	bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1073 	bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1074 	bdw->header.length = sizeof(struct acpi_nfit_data_region);
1075 	bdw->region_index = 3+1;
1076 	bdw->windows = 1;
1077 	bdw->offset = 0;
1078 	bdw->size = BDW_SIZE;
1079 	bdw->capacity = DIMM_SIZE;
1080 	bdw->start_address = 0;
1081 
1082 	offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1083 	/* flush0 (dimm0) */
1084 	flush = nfit_buf + offset;
1085 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1086 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1087 	flush->device_handle = handle[0];
1088 	flush->hint_count = 1;
1089 	flush->hint_address[0] = t->flush_dma[0];
1090 
1091 	/* flush1 (dimm1) */
1092 	flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
1093 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1094 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1095 	flush->device_handle = handle[1];
1096 	flush->hint_count = 1;
1097 	flush->hint_address[0] = t->flush_dma[1];
1098 
1099 	/* flush2 (dimm2) */
1100 	flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
1101 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1102 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1103 	flush->device_handle = handle[2];
1104 	flush->hint_count = 1;
1105 	flush->hint_address[0] = t->flush_dma[2];
1106 
1107 	/* flush3 (dimm3) */
1108 	flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
1109 	flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1110 	flush->header.length = sizeof(struct acpi_nfit_flush_address);
1111 	flush->device_handle = handle[3];
1112 	flush->hint_count = 1;
1113 	flush->hint_address[0] = t->flush_dma[3];
1114 
1115 	if (t->setup_hotplug) {
1116 		offset = offset + sizeof(struct acpi_nfit_flush_address) * 4;
1117 		/* dcr-descriptor4: blk */
1118 		dcr = nfit_buf + offset;
1119 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1120 		dcr->header.length = sizeof(struct acpi_nfit_control_region);
1121 		dcr->region_index = 8+1;
1122 		dcr->vendor_id = 0xabcd;
1123 		dcr->device_id = 0;
1124 		dcr->revision_id = 1;
1125 		dcr->serial_number = ~handle[4];
1126 		dcr->code = NFIT_FIC_BLK;
1127 		dcr->windows = 1;
1128 		dcr->window_size = DCR_SIZE;
1129 		dcr->command_offset = 0;
1130 		dcr->command_size = 8;
1131 		dcr->status_offset = 8;
1132 		dcr->status_size = 4;
1133 
1134 		offset = offset + sizeof(struct acpi_nfit_control_region);
1135 		/* dcr-descriptor4: pmem */
1136 		dcr = nfit_buf + offset;
1137 		dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1138 		dcr->header.length = offsetof(struct acpi_nfit_control_region,
1139 				window_size);
1140 		dcr->region_index = 9+1;
1141 		dcr->vendor_id = 0xabcd;
1142 		dcr->device_id = 0;
1143 		dcr->revision_id = 1;
1144 		dcr->serial_number = ~handle[4];
1145 		dcr->code = NFIT_FIC_BYTEN;
1146 		dcr->windows = 0;
1147 
1148 		offset = offset + offsetof(struct acpi_nfit_control_region,
1149 				window_size);
1150 		/* bdw4 (spa/dcr4, dimm4) */
1151 		bdw = nfit_buf + offset;
1152 		bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1153 		bdw->header.length = sizeof(struct acpi_nfit_data_region);
1154 		bdw->region_index = 8+1;
1155 		bdw->windows = 1;
1156 		bdw->offset = 0;
1157 		bdw->size = BDW_SIZE;
1158 		bdw->capacity = DIMM_SIZE;
1159 		bdw->start_address = 0;
1160 
1161 		offset = offset + sizeof(struct acpi_nfit_data_region);
1162 		/* spa10 (dcr4) dimm4 */
1163 		spa = nfit_buf + offset;
1164 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1165 		spa->header.length = sizeof(*spa);
1166 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1167 		spa->range_index = 10+1;
1168 		spa->address = t->dcr_dma[4];
1169 		spa->length = DCR_SIZE;
1170 
1171 		/*
1172 		 * spa11 (single-dimm interleave for hotplug, note storage
1173 		 * does not actually alias the related block-data-window
1174 		 * regions)
1175 		 */
1176 		spa = nfit_buf + offset + sizeof(*spa);
1177 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1178 		spa->header.length = sizeof(*spa);
1179 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1180 		spa->range_index = 11+1;
1181 		spa->address = t->spa_set_dma[2];
1182 		spa->length = SPA0_SIZE;
1183 
1184 		/* spa12 (bdw for dcr4) dimm4 */
1185 		spa = nfit_buf + offset + sizeof(*spa) * 2;
1186 		spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1187 		spa->header.length = sizeof(*spa);
1188 		memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1189 		spa->range_index = 12+1;
1190 		spa->address = t->dimm_dma[4];
1191 		spa->length = DIMM_SIZE;
1192 
1193 		offset = offset + sizeof(*spa) * 3;
1194 		/* mem-region14 (spa/dcr4, dimm4) */
1195 		memdev = nfit_buf + offset;
1196 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1197 		memdev->header.length = sizeof(*memdev);
1198 		memdev->device_handle = handle[4];
1199 		memdev->physical_id = 4;
1200 		memdev->region_id = 0;
1201 		memdev->range_index = 10+1;
1202 		memdev->region_index = 8+1;
1203 		memdev->region_size = 0;
1204 		memdev->region_offset = 0;
1205 		memdev->address = 0;
1206 		memdev->interleave_index = 0;
1207 		memdev->interleave_ways = 1;
1208 
1209 		/* mem-region15 (spa0, dimm4) */
1210 		memdev = nfit_buf + offset +
1211 				sizeof(struct acpi_nfit_memory_map);
1212 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1213 		memdev->header.length = sizeof(*memdev);
1214 		memdev->device_handle = handle[4];
1215 		memdev->physical_id = 4;
1216 		memdev->region_id = 0;
1217 		memdev->range_index = 11+1;
1218 		memdev->region_index = 9+1;
1219 		memdev->region_size = SPA0_SIZE;
1220 		memdev->region_offset = t->spa_set_dma[2];
1221 		memdev->address = 0;
1222 		memdev->interleave_index = 0;
1223 		memdev->interleave_ways = 1;
1224 
1225 		/* mem-region16 (spa/bdw4, dimm4) */
1226 		memdev = nfit_buf + offset +
1227 				sizeof(struct acpi_nfit_memory_map) * 2;
1228 		memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1229 		memdev->header.length = sizeof(*memdev);
1230 		memdev->device_handle = handle[4];
1231 		memdev->physical_id = 4;
1232 		memdev->region_id = 0;
1233 		memdev->range_index = 12+1;
1234 		memdev->region_index = 8+1;
1235 		memdev->region_size = 0;
1236 		memdev->region_offset = 0;
1237 		memdev->address = 0;
1238 		memdev->interleave_index = 0;
1239 		memdev->interleave_ways = 1;
1240 
1241 		offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1242 		/* flush3 (dimm4) */
1243 		flush = nfit_buf + offset;
1244 		flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1245 		flush->header.length = sizeof(struct acpi_nfit_flush_address);
1246 		flush->device_handle = handle[4];
1247 		flush->hint_count = 1;
1248 		flush->hint_address[0] = t->flush_dma[4];
1249 	}
1250 
1251 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
1252 
1253 	acpi_desc = &t->acpi_desc;
1254 	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
1255 	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
1256 	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
1257 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
1258 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
1259 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
1260 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_dsm_force_en);
1261 }
1262 
1263 static void nfit_test1_setup(struct nfit_test *t)
1264 {
1265 	size_t offset;
1266 	void *nfit_buf = t->nfit_buf;
1267 	struct acpi_nfit_memory_map *memdev;
1268 	struct acpi_nfit_control_region *dcr;
1269 	struct acpi_nfit_system_address *spa;
1270 	struct acpi_nfit_desc *acpi_desc;
1271 
1272 	offset = 0;
1273 	/* spa0 (flat range with no bdw aliasing) */
1274 	spa = nfit_buf + offset;
1275 	spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1276 	spa->header.length = sizeof(*spa);
1277 	memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1278 	spa->range_index = 0+1;
1279 	spa->address = t->spa_set_dma[0];
1280 	spa->length = SPA2_SIZE;
1281 
1282 	offset += sizeof(*spa);
1283 	/* mem-region0 (spa0, dimm0) */
1284 	memdev = nfit_buf + offset;
1285 	memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1286 	memdev->header.length = sizeof(*memdev);
1287 	memdev->device_handle = 0;
1288 	memdev->physical_id = 0;
1289 	memdev->region_id = 0;
1290 	memdev->range_index = 0+1;
1291 	memdev->region_index = 0+1;
1292 	memdev->region_size = SPA2_SIZE;
1293 	memdev->region_offset = 0;
1294 	memdev->address = 0;
1295 	memdev->interleave_index = 0;
1296 	memdev->interleave_ways = 1;
1297 	memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1298 		| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1299 		| ACPI_NFIT_MEM_NOT_ARMED;
1300 
1301 	offset += sizeof(*memdev);
1302 	/* dcr-descriptor0 */
1303 	dcr = nfit_buf + offset;
1304 	dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1305 	dcr->header.length = offsetof(struct acpi_nfit_control_region,
1306 			window_size);
1307 	dcr->region_index = 0+1;
1308 	dcr->vendor_id = 0xabcd;
1309 	dcr->device_id = 0;
1310 	dcr->revision_id = 1;
1311 	dcr->serial_number = ~0;
1312 	dcr->code = NFIT_FIC_BYTE;
1313 	dcr->windows = 0;
1314 
1315 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
1316 
1317 	acpi_desc = &t->acpi_desc;
1318 	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
1319 	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
1320 	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
1321 	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_dsm_force_en);
1322 }
1323 
1324 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1325 		void *iobuf, u64 len, int rw)
1326 {
1327 	struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1328 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1329 	struct nd_region *nd_region = &ndbr->nd_region;
1330 	unsigned int lane;
1331 
1332 	lane = nd_region_acquire_lane(nd_region);
1333 	if (rw)
1334 		memcpy(mmio->addr.base + dpa, iobuf, len);
1335 	else {
1336 		memcpy(iobuf, mmio->addr.base + dpa, len);
1337 
1338 		/* give us some some coverage of the mmio_flush_range() API */
1339 		mmio_flush_range(mmio->addr.base + dpa, len);
1340 	}
1341 	nd_region_release_lane(nd_region, lane);
1342 
1343 	return 0;
1344 }
1345 
1346 static int nfit_test_probe(struct platform_device *pdev)
1347 {
1348 	struct nvdimm_bus_descriptor *nd_desc;
1349 	struct acpi_nfit_desc *acpi_desc;
1350 	struct device *dev = &pdev->dev;
1351 	struct nfit_test *nfit_test;
1352 	int rc;
1353 
1354 	nfit_test = to_nfit_test(&pdev->dev);
1355 
1356 	/* common alloc */
1357 	if (nfit_test->num_dcr) {
1358 		int num = nfit_test->num_dcr;
1359 
1360 		nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
1361 				GFP_KERNEL);
1362 		nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1363 				GFP_KERNEL);
1364 		nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
1365 				GFP_KERNEL);
1366 		nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1367 				GFP_KERNEL);
1368 		nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
1369 				GFP_KERNEL);
1370 		nfit_test->label_dma = devm_kcalloc(dev, num,
1371 				sizeof(dma_addr_t), GFP_KERNEL);
1372 		nfit_test->dcr = devm_kcalloc(dev, num,
1373 				sizeof(struct nfit_test_dcr *), GFP_KERNEL);
1374 		nfit_test->dcr_dma = devm_kcalloc(dev, num,
1375 				sizeof(dma_addr_t), GFP_KERNEL);
1376 		if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
1377 				&& nfit_test->label_dma && nfit_test->dcr
1378 				&& nfit_test->dcr_dma && nfit_test->flush
1379 				&& nfit_test->flush_dma)
1380 			/* pass */;
1381 		else
1382 			return -ENOMEM;
1383 	}
1384 
1385 	if (nfit_test->num_pm) {
1386 		int num = nfit_test->num_pm;
1387 
1388 		nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
1389 				GFP_KERNEL);
1390 		nfit_test->spa_set_dma = devm_kcalloc(dev, num,
1391 				sizeof(dma_addr_t), GFP_KERNEL);
1392 		if (nfit_test->spa_set && nfit_test->spa_set_dma)
1393 			/* pass */;
1394 		else
1395 			return -ENOMEM;
1396 	}
1397 
1398 	/* per-nfit specific alloc */
1399 	if (nfit_test->alloc(nfit_test))
1400 		return -ENOMEM;
1401 
1402 	nfit_test->setup(nfit_test);
1403 	acpi_desc = &nfit_test->acpi_desc;
1404 	acpi_nfit_desc_init(acpi_desc, &pdev->dev);
1405 	acpi_desc->nfit = nfit_test->nfit_buf;
1406 	acpi_desc->blk_do_io = nfit_test_blk_do_io;
1407 	nd_desc = &acpi_desc->nd_desc;
1408 	nd_desc->provider_name = NULL;
1409 	nd_desc->ndctl = nfit_test_ctl;
1410 	acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
1411 	if (!acpi_desc->nvdimm_bus)
1412 		return -ENXIO;
1413 
1414 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1415 	if (rc) {
1416 		nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1417 		return rc;
1418 	}
1419 
1420 	if (nfit_test->setup != nfit_test0_setup)
1421 		return 0;
1422 
1423 	nfit_test->setup_hotplug = 1;
1424 	nfit_test->setup(nfit_test);
1425 
1426 	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1427 	if (rc) {
1428 		nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1429 		return rc;
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 static int nfit_test_remove(struct platform_device *pdev)
1436 {
1437 	struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
1438 	struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
1439 
1440 	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1441 
1442 	return 0;
1443 }
1444 
1445 static void nfit_test_release(struct device *dev)
1446 {
1447 	struct nfit_test *nfit_test = to_nfit_test(dev);
1448 
1449 	kfree(nfit_test);
1450 }
1451 
1452 static const struct platform_device_id nfit_test_id[] = {
1453 	{ KBUILD_MODNAME },
1454 	{ },
1455 };
1456 
1457 static struct platform_driver nfit_test_driver = {
1458 	.probe = nfit_test_probe,
1459 	.remove = nfit_test_remove,
1460 	.driver = {
1461 		.name = KBUILD_MODNAME,
1462 	},
1463 	.id_table = nfit_test_id,
1464 };
1465 
1466 #ifdef CONFIG_CMA_SIZE_MBYTES
1467 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
1468 #else
1469 #define CMA_SIZE_MBYTES 0
1470 #endif
1471 
1472 static __init int nfit_test_init(void)
1473 {
1474 	int rc, i;
1475 
1476 	nfit_test_setup(nfit_test_lookup);
1477 
1478 	for (i = 0; i < NUM_NFITS; i++) {
1479 		struct nfit_test *nfit_test;
1480 		struct platform_device *pdev;
1481 		static int once;
1482 
1483 		nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
1484 		if (!nfit_test) {
1485 			rc = -ENOMEM;
1486 			goto err_register;
1487 		}
1488 		INIT_LIST_HEAD(&nfit_test->resources);
1489 		switch (i) {
1490 		case 0:
1491 			nfit_test->num_pm = NUM_PM;
1492 			nfit_test->num_dcr = NUM_DCR;
1493 			nfit_test->alloc = nfit_test0_alloc;
1494 			nfit_test->setup = nfit_test0_setup;
1495 			break;
1496 		case 1:
1497 			nfit_test->num_pm = 1;
1498 			nfit_test->alloc = nfit_test1_alloc;
1499 			nfit_test->setup = nfit_test1_setup;
1500 			break;
1501 		default:
1502 			rc = -EINVAL;
1503 			goto err_register;
1504 		}
1505 		pdev = &nfit_test->pdev;
1506 		pdev->name = KBUILD_MODNAME;
1507 		pdev->id = i;
1508 		pdev->dev.release = nfit_test_release;
1509 		rc = platform_device_register(pdev);
1510 		if (rc) {
1511 			put_device(&pdev->dev);
1512 			goto err_register;
1513 		}
1514 
1515 		rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1516 		if (rc)
1517 			goto err_register;
1518 
1519 		instances[i] = nfit_test;
1520 
1521 		if (!once++) {
1522 			dma_addr_t dma;
1523 			void *buf;
1524 
1525 			buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
1526 					GFP_KERNEL);
1527 			if (!buf) {
1528 				rc = -ENOMEM;
1529 				dev_warn(&pdev->dev, "need 128M of free cma\n");
1530 				goto err_register;
1531 			}
1532 			dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
1533 		}
1534 	}
1535 
1536 	rc = platform_driver_register(&nfit_test_driver);
1537 	if (rc)
1538 		goto err_register;
1539 	return 0;
1540 
1541  err_register:
1542 	for (i = 0; i < NUM_NFITS; i++)
1543 		if (instances[i])
1544 			platform_device_unregister(&instances[i]->pdev);
1545 	nfit_test_teardown();
1546 	return rc;
1547 }
1548 
1549 static __exit void nfit_test_exit(void)
1550 {
1551 	int i;
1552 
1553 	platform_driver_unregister(&nfit_test_driver);
1554 	for (i = 0; i < NUM_NFITS; i++)
1555 		platform_device_unregister(&instances[i]->pdev);
1556 	nfit_test_teardown();
1557 }
1558 
1559 module_init(nfit_test_init);
1560 module_exit(nfit_test_exit);
1561 MODULE_LICENSE("GPL v2");
1562 MODULE_AUTHOR("Intel Corporation");
1563