1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 #include <linux/platform_device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/workqueue.h> 17 #include <linux/libnvdimm.h> 18 #include <linux/vmalloc.h> 19 #include <linux/device.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/ndctl.h> 23 #include <linux/sizes.h> 24 #include <linux/list.h> 25 #include <linux/slab.h> 26 #include <nfit.h> 27 #include <nd.h> 28 #include "nfit_test.h" 29 30 /* 31 * Generate an NFIT table to describe the following topology: 32 * 33 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions 34 * 35 * (a) (b) DIMM BLK-REGION 36 * +----------+--------------+----------+---------+ 37 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2 38 * | imc0 +--+- - - - - region0 - - - -+----------+ + 39 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3 40 * | +----------+--------------v----------v v 41 * +--+---+ | | 42 * | cpu0 | region1 43 * +--+---+ | | 44 * | +-------------------------^----------^ ^ 45 * +--+---+ | blk4.0 | pm1.0 | 2 region4 46 * | imc1 +--+-------------------------+----------+ + 47 * +------+ | blk5.0 | pm1.0 | 3 region5 48 * +-------------------------+----------+-+-------+ 49 * 50 * +--+---+ 51 * | cpu1 | 52 * +--+---+ (Hotplug DIMM) 53 * | +----------------------------------------------+ 54 * +--+---+ | blk6.0/pm7.0 | 4 region6/7 55 * | imc0 +--+----------------------------------------------+ 56 * +------+ 57 * 58 * 59 * *) In this layout we have four dimms and two memory controllers in one 60 * socket. Each unique interface (BLK or PMEM) to DPA space 61 * is identified by a region device with a dynamically assigned id. 62 * 63 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0. 64 * A single PMEM namespace "pm0.0" is created using half of the 65 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace 66 * allocate from from the bottom of a region. The unallocated 67 * portion of REGION0 aliases with REGION2 and REGION3. That 68 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and 69 * "blk3.0") starting at the base of each DIMM to offset (a) in those 70 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable 71 * names that can be assigned to a namespace. 72 * 73 * *) In the last portion of dimm0 and dimm1 we have an interleaved 74 * SPA range, REGION1, that spans those two dimms as well as dimm2 75 * and dimm3. Some of REGION1 allocated to a PMEM namespace named 76 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each 77 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and 78 * "blk5.0". 79 * 80 * *) The portion of dimm2 and dimm3 that do not participate in the 81 * REGION1 interleaved SPA range (i.e. the DPA address below offset 82 * (b) are also included in the "blk4.0" and "blk5.0" namespaces. 83 * Note, that BLK namespaces need not be contiguous in DPA-space, and 84 * can consume aliased capacity from multiple interleave sets. 85 * 86 * BUS1: Legacy NVDIMM (single contiguous range) 87 * 88 * region2 89 * +---------------------+ 90 * |---------------------| 91 * || pm2.0 || 92 * |---------------------| 93 * +---------------------+ 94 * 95 * *) A NFIT-table may describe a simple system-physical-address range 96 * with no BLK aliasing. This type of region may optionally 97 * reference an NVDIMM. 98 */ 99 enum { 100 NUM_PM = 3, 101 NUM_DCR = 5, 102 NUM_HINTS = 8, 103 NUM_BDW = NUM_DCR, 104 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW, 105 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */, 106 DIMM_SIZE = SZ_32M, 107 LABEL_SIZE = SZ_128K, 108 SPA_VCD_SIZE = SZ_4M, 109 SPA0_SIZE = DIMM_SIZE, 110 SPA1_SIZE = DIMM_SIZE*2, 111 SPA2_SIZE = DIMM_SIZE, 112 BDW_SIZE = 64 << 8, 113 DCR_SIZE = 12, 114 NUM_NFITS = 2, /* permit testing multiple NFITs per system */ 115 }; 116 117 struct nfit_test_dcr { 118 __le64 bdw_addr; 119 __le32 bdw_status; 120 __u8 aperature[BDW_SIZE]; 121 }; 122 123 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \ 124 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \ 125 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf)) 126 127 static u32 handle[NUM_DCR] = { 128 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0), 129 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1), 130 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0), 131 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1), 132 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0), 133 }; 134 135 static unsigned long dimm_fail_cmd_flags[NUM_DCR]; 136 137 struct nfit_test { 138 struct acpi_nfit_desc acpi_desc; 139 struct platform_device pdev; 140 struct list_head resources; 141 void *nfit_buf; 142 dma_addr_t nfit_dma; 143 size_t nfit_size; 144 int num_dcr; 145 int num_pm; 146 void **dimm; 147 dma_addr_t *dimm_dma; 148 void **flush; 149 dma_addr_t *flush_dma; 150 void **label; 151 dma_addr_t *label_dma; 152 void **spa_set; 153 dma_addr_t *spa_set_dma; 154 struct nfit_test_dcr **dcr; 155 dma_addr_t *dcr_dma; 156 int (*alloc)(struct nfit_test *t); 157 void (*setup)(struct nfit_test *t); 158 int setup_hotplug; 159 union acpi_object **_fit; 160 dma_addr_t _fit_dma; 161 struct ars_state { 162 struct nd_cmd_ars_status *ars_status; 163 unsigned long deadline; 164 spinlock_t lock; 165 } ars_state; 166 struct device *dimm_dev[NUM_DCR]; 167 }; 168 169 static struct nfit_test *to_nfit_test(struct device *dev) 170 { 171 struct platform_device *pdev = to_platform_device(dev); 172 173 return container_of(pdev, struct nfit_test, pdev); 174 } 175 176 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd, 177 unsigned int buf_len) 178 { 179 if (buf_len < sizeof(*nd_cmd)) 180 return -EINVAL; 181 182 nd_cmd->status = 0; 183 nd_cmd->config_size = LABEL_SIZE; 184 nd_cmd->max_xfer = SZ_4K; 185 186 return 0; 187 } 188 189 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr 190 *nd_cmd, unsigned int buf_len, void *label) 191 { 192 unsigned int len, offset = nd_cmd->in_offset; 193 int rc; 194 195 if (buf_len < sizeof(*nd_cmd)) 196 return -EINVAL; 197 if (offset >= LABEL_SIZE) 198 return -EINVAL; 199 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) 200 return -EINVAL; 201 202 nd_cmd->status = 0; 203 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 204 memcpy(nd_cmd->out_buf, label + offset, len); 205 rc = buf_len - sizeof(*nd_cmd) - len; 206 207 return rc; 208 } 209 210 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd, 211 unsigned int buf_len, void *label) 212 { 213 unsigned int len, offset = nd_cmd->in_offset; 214 u32 *status; 215 int rc; 216 217 if (buf_len < sizeof(*nd_cmd)) 218 return -EINVAL; 219 if (offset >= LABEL_SIZE) 220 return -EINVAL; 221 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) 222 return -EINVAL; 223 224 status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd); 225 *status = 0; 226 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 227 memcpy(label + offset, nd_cmd->in_buf, len); 228 rc = buf_len - sizeof(*nd_cmd) - (len + 4); 229 230 return rc; 231 } 232 233 #define NFIT_TEST_ARS_RECORDS 4 234 #define NFIT_TEST_CLEAR_ERR_UNIT 256 235 236 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, 237 unsigned int buf_len) 238 { 239 if (buf_len < sizeof(*nd_cmd)) 240 return -EINVAL; 241 242 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status) 243 + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record); 244 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; 245 nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT; 246 247 return 0; 248 } 249 250 /* 251 * Initialize the ars_state to return an ars_result 1 second in the future with 252 * a 4K error range in the middle of the requested address range. 253 */ 254 static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len) 255 { 256 struct nd_cmd_ars_status *ars_status; 257 struct nd_ars_record *ars_record; 258 259 ars_state->deadline = jiffies + 1*HZ; 260 ars_status = ars_state->ars_status; 261 ars_status->status = 0; 262 ars_status->out_length = sizeof(struct nd_cmd_ars_status) 263 + sizeof(struct nd_ars_record); 264 ars_status->address = addr; 265 ars_status->length = len; 266 ars_status->type = ND_ARS_PERSISTENT; 267 ars_status->num_records = 1; 268 ars_record = &ars_status->records[0]; 269 ars_record->handle = 0; 270 ars_record->err_address = addr + len / 2; 271 ars_record->length = SZ_4K; 272 } 273 274 static int nfit_test_cmd_ars_start(struct ars_state *ars_state, 275 struct nd_cmd_ars_start *ars_start, unsigned int buf_len, 276 int *cmd_rc) 277 { 278 if (buf_len < sizeof(*ars_start)) 279 return -EINVAL; 280 281 spin_lock(&ars_state->lock); 282 if (time_before(jiffies, ars_state->deadline)) { 283 ars_start->status = NFIT_ARS_START_BUSY; 284 *cmd_rc = -EBUSY; 285 } else { 286 ars_start->status = 0; 287 ars_start->scrub_time = 1; 288 post_ars_status(ars_state, ars_start->address, 289 ars_start->length); 290 *cmd_rc = 0; 291 } 292 spin_unlock(&ars_state->lock); 293 294 return 0; 295 } 296 297 static int nfit_test_cmd_ars_status(struct ars_state *ars_state, 298 struct nd_cmd_ars_status *ars_status, unsigned int buf_len, 299 int *cmd_rc) 300 { 301 if (buf_len < ars_state->ars_status->out_length) 302 return -EINVAL; 303 304 spin_lock(&ars_state->lock); 305 if (time_before(jiffies, ars_state->deadline)) { 306 memset(ars_status, 0, buf_len); 307 ars_status->status = NFIT_ARS_STATUS_BUSY; 308 ars_status->out_length = sizeof(*ars_status); 309 *cmd_rc = -EBUSY; 310 } else { 311 memcpy(ars_status, ars_state->ars_status, 312 ars_state->ars_status->out_length); 313 *cmd_rc = 0; 314 } 315 spin_unlock(&ars_state->lock); 316 return 0; 317 } 318 319 static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err, 320 unsigned int buf_len, int *cmd_rc) 321 { 322 const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1; 323 if (buf_len < sizeof(*clear_err)) 324 return -EINVAL; 325 326 if ((clear_err->address & mask) || (clear_err->length & mask)) 327 return -EINVAL; 328 329 /* 330 * Report 'all clear' success for all commands even though a new 331 * scrub will find errors again. This is enough to have the 332 * error removed from the 'badblocks' tracking in the pmem 333 * driver. 334 */ 335 clear_err->status = 0; 336 clear_err->cleared = clear_err->length; 337 *cmd_rc = 0; 338 return 0; 339 } 340 341 static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len) 342 { 343 static const struct nd_smart_payload smart_data = { 344 .flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID 345 | ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID 346 | ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID, 347 .health = ND_SMART_NON_CRITICAL_HEALTH, 348 .temperature = 23 * 16, 349 .spares = 75, 350 .alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP, 351 .life_used = 5, 352 .shutdown_state = 0, 353 .vendor_size = 0, 354 }; 355 356 if (buf_len < sizeof(*smart)) 357 return -EINVAL; 358 memcpy(smart->data, &smart_data, sizeof(smart_data)); 359 return 0; 360 } 361 362 static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t, 363 unsigned int buf_len) 364 { 365 static const struct nd_smart_threshold_payload smart_t_data = { 366 .alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP, 367 .temperature = 40 * 16, 368 .spares = 5, 369 }; 370 371 if (buf_len < sizeof(*smart_t)) 372 return -EINVAL; 373 memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data)); 374 return 0; 375 } 376 377 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, 378 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 379 unsigned int buf_len, int *cmd_rc) 380 { 381 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 382 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); 383 unsigned int func = cmd; 384 int i, rc = 0, __cmd_rc; 385 386 if (!cmd_rc) 387 cmd_rc = &__cmd_rc; 388 *cmd_rc = 0; 389 390 if (nvdimm) { 391 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 392 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); 393 394 if (!nfit_mem) 395 return -ENOTTY; 396 397 if (cmd == ND_CMD_CALL) { 398 struct nd_cmd_pkg *call_pkg = buf; 399 400 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out; 401 buf = (void *) call_pkg->nd_payload; 402 func = call_pkg->nd_command; 403 if (call_pkg->nd_family != nfit_mem->family) 404 return -ENOTTY; 405 } 406 407 if (!test_bit(cmd, &cmd_mask) 408 || !test_bit(func, &nfit_mem->dsm_mask)) 409 return -ENOTTY; 410 411 /* lookup label space for the given dimm */ 412 for (i = 0; i < ARRAY_SIZE(handle); i++) 413 if (__to_nfit_memdev(nfit_mem)->device_handle == 414 handle[i]) 415 break; 416 if (i >= ARRAY_SIZE(handle)) 417 return -ENXIO; 418 419 if ((1 << func) & dimm_fail_cmd_flags[i]) 420 return -EIO; 421 422 switch (func) { 423 case ND_CMD_GET_CONFIG_SIZE: 424 rc = nfit_test_cmd_get_config_size(buf, buf_len); 425 break; 426 case ND_CMD_GET_CONFIG_DATA: 427 rc = nfit_test_cmd_get_config_data(buf, buf_len, 428 t->label[i]); 429 break; 430 case ND_CMD_SET_CONFIG_DATA: 431 rc = nfit_test_cmd_set_config_data(buf, buf_len, 432 t->label[i]); 433 break; 434 case ND_CMD_SMART: 435 rc = nfit_test_cmd_smart(buf, buf_len); 436 break; 437 case ND_CMD_SMART_THRESHOLD: 438 rc = nfit_test_cmd_smart_threshold(buf, buf_len); 439 device_lock(&t->pdev.dev); 440 __acpi_nvdimm_notify(t->dimm_dev[i], 0x81); 441 device_unlock(&t->pdev.dev); 442 break; 443 default: 444 return -ENOTTY; 445 } 446 } else { 447 struct ars_state *ars_state = &t->ars_state; 448 449 if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask)) 450 return -ENOTTY; 451 452 switch (func) { 453 case ND_CMD_ARS_CAP: 454 rc = nfit_test_cmd_ars_cap(buf, buf_len); 455 break; 456 case ND_CMD_ARS_START: 457 rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len, 458 cmd_rc); 459 break; 460 case ND_CMD_ARS_STATUS: 461 rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len, 462 cmd_rc); 463 break; 464 case ND_CMD_CLEAR_ERROR: 465 rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc); 466 break; 467 default: 468 return -ENOTTY; 469 } 470 } 471 472 return rc; 473 } 474 475 static DEFINE_SPINLOCK(nfit_test_lock); 476 static struct nfit_test *instances[NUM_NFITS]; 477 478 static void release_nfit_res(void *data) 479 { 480 struct nfit_test_resource *nfit_res = data; 481 482 spin_lock(&nfit_test_lock); 483 list_del(&nfit_res->list); 484 spin_unlock(&nfit_test_lock); 485 486 vfree(nfit_res->buf); 487 kfree(nfit_res); 488 } 489 490 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, 491 void *buf) 492 { 493 struct device *dev = &t->pdev.dev; 494 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), 495 GFP_KERNEL); 496 int rc; 497 498 if (!buf || !nfit_res) 499 goto err; 500 rc = devm_add_action(dev, release_nfit_res, nfit_res); 501 if (rc) 502 goto err; 503 INIT_LIST_HEAD(&nfit_res->list); 504 memset(buf, 0, size); 505 nfit_res->dev = dev; 506 nfit_res->buf = buf; 507 nfit_res->res.start = *dma; 508 nfit_res->res.end = *dma + size - 1; 509 nfit_res->res.name = "NFIT"; 510 spin_lock_init(&nfit_res->lock); 511 INIT_LIST_HEAD(&nfit_res->requests); 512 spin_lock(&nfit_test_lock); 513 list_add(&nfit_res->list, &t->resources); 514 spin_unlock(&nfit_test_lock); 515 516 return nfit_res->buf; 517 err: 518 if (buf) 519 vfree(buf); 520 kfree(nfit_res); 521 return NULL; 522 } 523 524 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) 525 { 526 void *buf = vmalloc(size); 527 528 *dma = (unsigned long) buf; 529 return __test_alloc(t, size, dma, buf); 530 } 531 532 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) 533 { 534 int i; 535 536 for (i = 0; i < ARRAY_SIZE(instances); i++) { 537 struct nfit_test_resource *n, *nfit_res = NULL; 538 struct nfit_test *t = instances[i]; 539 540 if (!t) 541 continue; 542 spin_lock(&nfit_test_lock); 543 list_for_each_entry(n, &t->resources, list) { 544 if (addr >= n->res.start && (addr < n->res.start 545 + resource_size(&n->res))) { 546 nfit_res = n; 547 break; 548 } else if (addr >= (unsigned long) n->buf 549 && (addr < (unsigned long) n->buf 550 + resource_size(&n->res))) { 551 nfit_res = n; 552 break; 553 } 554 } 555 spin_unlock(&nfit_test_lock); 556 if (nfit_res) 557 return nfit_res; 558 } 559 560 return NULL; 561 } 562 563 static int ars_state_init(struct device *dev, struct ars_state *ars_state) 564 { 565 ars_state->ars_status = devm_kzalloc(dev, 566 sizeof(struct nd_cmd_ars_status) 567 + sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS, 568 GFP_KERNEL); 569 if (!ars_state->ars_status) 570 return -ENOMEM; 571 spin_lock_init(&ars_state->lock); 572 return 0; 573 } 574 575 static void put_dimms(void *data) 576 { 577 struct device **dimm_dev = data; 578 int i; 579 580 for (i = 0; i < NUM_DCR; i++) 581 if (dimm_dev[i]) 582 device_unregister(dimm_dev[i]); 583 } 584 585 static struct class *nfit_test_dimm; 586 587 static int dimm_name_to_id(struct device *dev) 588 { 589 int dimm; 590 591 if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 592 || dimm >= NUM_DCR || dimm < 0) 593 return -ENXIO; 594 return dimm; 595 } 596 597 598 static ssize_t handle_show(struct device *dev, struct device_attribute *attr, 599 char *buf) 600 { 601 int dimm = dimm_name_to_id(dev); 602 603 if (dimm < 0) 604 return dimm; 605 606 return sprintf(buf, "%#x", handle[dimm]); 607 } 608 DEVICE_ATTR_RO(handle); 609 610 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr, 611 char *buf) 612 { 613 int dimm = dimm_name_to_id(dev); 614 615 if (dimm < 0) 616 return dimm; 617 618 return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]); 619 } 620 621 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, 622 const char *buf, size_t size) 623 { 624 int dimm = dimm_name_to_id(dev); 625 unsigned long val; 626 ssize_t rc; 627 628 if (dimm < 0) 629 return dimm; 630 631 rc = kstrtol(buf, 0, &val); 632 if (rc) 633 return rc; 634 635 dimm_fail_cmd_flags[dimm] = val; 636 return size; 637 } 638 static DEVICE_ATTR_RW(fail_cmd); 639 640 static struct attribute *nfit_test_dimm_attributes[] = { 641 &dev_attr_fail_cmd.attr, 642 &dev_attr_handle.attr, 643 NULL, 644 }; 645 646 static struct attribute_group nfit_test_dimm_attribute_group = { 647 .attrs = nfit_test_dimm_attributes, 648 }; 649 650 static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { 651 &nfit_test_dimm_attribute_group, 652 NULL, 653 }; 654 655 static int nfit_test0_alloc(struct nfit_test *t) 656 { 657 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA 658 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 659 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 660 + offsetof(struct acpi_nfit_control_region, 661 window_size) * NUM_DCR 662 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 663 + (sizeof(struct acpi_nfit_flush_address) 664 + sizeof(u64) * NUM_HINTS) * NUM_DCR; 665 int i; 666 667 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 668 if (!t->nfit_buf) 669 return -ENOMEM; 670 t->nfit_size = nfit_size; 671 672 t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]); 673 if (!t->spa_set[0]) 674 return -ENOMEM; 675 676 t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]); 677 if (!t->spa_set[1]) 678 return -ENOMEM; 679 680 t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]); 681 if (!t->spa_set[2]) 682 return -ENOMEM; 683 684 for (i = 0; i < NUM_DCR; i++) { 685 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]); 686 if (!t->dimm[i]) 687 return -ENOMEM; 688 689 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 690 if (!t->label[i]) 691 return -ENOMEM; 692 sprintf(t->label[i], "label%d", i); 693 694 t->flush[i] = test_alloc(t, max(PAGE_SIZE, 695 sizeof(u64) * NUM_HINTS), 696 &t->flush_dma[i]); 697 if (!t->flush[i]) 698 return -ENOMEM; 699 } 700 701 for (i = 0; i < NUM_DCR; i++) { 702 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]); 703 if (!t->dcr[i]) 704 return -ENOMEM; 705 } 706 707 t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma); 708 if (!t->_fit) 709 return -ENOMEM; 710 711 if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) 712 return -ENOMEM; 713 for (i = 0; i < NUM_DCR; i++) { 714 t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, 715 &t->pdev.dev, 0, NULL, 716 nfit_test_dimm_attribute_groups, 717 "test_dimm%d", i); 718 if (!t->dimm_dev[i]) 719 return -ENOMEM; 720 } 721 722 return ars_state_init(&t->pdev.dev, &t->ars_state); 723 } 724 725 static int nfit_test1_alloc(struct nfit_test *t) 726 { 727 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2 728 + sizeof(struct acpi_nfit_memory_map) 729 + offsetof(struct acpi_nfit_control_region, window_size); 730 731 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 732 if (!t->nfit_buf) 733 return -ENOMEM; 734 t->nfit_size = nfit_size; 735 736 t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]); 737 if (!t->spa_set[0]) 738 return -ENOMEM; 739 740 t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]); 741 if (!t->spa_set[1]) 742 return -ENOMEM; 743 744 return ars_state_init(&t->pdev.dev, &t->ars_state); 745 } 746 747 static void dcr_common_init(struct acpi_nfit_control_region *dcr) 748 { 749 dcr->vendor_id = 0xabcd; 750 dcr->device_id = 0; 751 dcr->revision_id = 1; 752 dcr->valid_fields = 1; 753 dcr->manufacturing_location = 0xa; 754 dcr->manufacturing_date = cpu_to_be16(2016); 755 } 756 757 static void nfit_test0_setup(struct nfit_test *t) 758 { 759 const int flush_hint_size = sizeof(struct acpi_nfit_flush_address) 760 + (sizeof(u64) * NUM_HINTS); 761 struct acpi_nfit_desc *acpi_desc; 762 struct acpi_nfit_memory_map *memdev; 763 void *nfit_buf = t->nfit_buf; 764 struct acpi_nfit_system_address *spa; 765 struct acpi_nfit_control_region *dcr; 766 struct acpi_nfit_data_region *bdw; 767 struct acpi_nfit_flush_address *flush; 768 unsigned int offset, i; 769 770 /* 771 * spa0 (interleave first half of dimm0 and dimm1, note storage 772 * does not actually alias the related block-data-window 773 * regions) 774 */ 775 spa = nfit_buf; 776 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 777 spa->header.length = sizeof(*spa); 778 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 779 spa->range_index = 0+1; 780 spa->address = t->spa_set_dma[0]; 781 spa->length = SPA0_SIZE; 782 783 /* 784 * spa1 (interleave last half of the 4 DIMMS, note storage 785 * does not actually alias the related block-data-window 786 * regions) 787 */ 788 spa = nfit_buf + sizeof(*spa); 789 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 790 spa->header.length = sizeof(*spa); 791 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 792 spa->range_index = 1+1; 793 spa->address = t->spa_set_dma[1]; 794 spa->length = SPA1_SIZE; 795 796 /* spa2 (dcr0) dimm0 */ 797 spa = nfit_buf + sizeof(*spa) * 2; 798 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 799 spa->header.length = sizeof(*spa); 800 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 801 spa->range_index = 2+1; 802 spa->address = t->dcr_dma[0]; 803 spa->length = DCR_SIZE; 804 805 /* spa3 (dcr1) dimm1 */ 806 spa = nfit_buf + sizeof(*spa) * 3; 807 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 808 spa->header.length = sizeof(*spa); 809 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 810 spa->range_index = 3+1; 811 spa->address = t->dcr_dma[1]; 812 spa->length = DCR_SIZE; 813 814 /* spa4 (dcr2) dimm2 */ 815 spa = nfit_buf + sizeof(*spa) * 4; 816 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 817 spa->header.length = sizeof(*spa); 818 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 819 spa->range_index = 4+1; 820 spa->address = t->dcr_dma[2]; 821 spa->length = DCR_SIZE; 822 823 /* spa5 (dcr3) dimm3 */ 824 spa = nfit_buf + sizeof(*spa) * 5; 825 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 826 spa->header.length = sizeof(*spa); 827 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 828 spa->range_index = 5+1; 829 spa->address = t->dcr_dma[3]; 830 spa->length = DCR_SIZE; 831 832 /* spa6 (bdw for dcr0) dimm0 */ 833 spa = nfit_buf + sizeof(*spa) * 6; 834 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 835 spa->header.length = sizeof(*spa); 836 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 837 spa->range_index = 6+1; 838 spa->address = t->dimm_dma[0]; 839 spa->length = DIMM_SIZE; 840 841 /* spa7 (bdw for dcr1) dimm1 */ 842 spa = nfit_buf + sizeof(*spa) * 7; 843 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 844 spa->header.length = sizeof(*spa); 845 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 846 spa->range_index = 7+1; 847 spa->address = t->dimm_dma[1]; 848 spa->length = DIMM_SIZE; 849 850 /* spa8 (bdw for dcr2) dimm2 */ 851 spa = nfit_buf + sizeof(*spa) * 8; 852 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 853 spa->header.length = sizeof(*spa); 854 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 855 spa->range_index = 8+1; 856 spa->address = t->dimm_dma[2]; 857 spa->length = DIMM_SIZE; 858 859 /* spa9 (bdw for dcr3) dimm3 */ 860 spa = nfit_buf + sizeof(*spa) * 9; 861 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 862 spa->header.length = sizeof(*spa); 863 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 864 spa->range_index = 9+1; 865 spa->address = t->dimm_dma[3]; 866 spa->length = DIMM_SIZE; 867 868 offset = sizeof(*spa) * 10; 869 /* mem-region0 (spa0, dimm0) */ 870 memdev = nfit_buf + offset; 871 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 872 memdev->header.length = sizeof(*memdev); 873 memdev->device_handle = handle[0]; 874 memdev->physical_id = 0; 875 memdev->region_id = 0; 876 memdev->range_index = 0+1; 877 memdev->region_index = 4+1; 878 memdev->region_size = SPA0_SIZE/2; 879 memdev->region_offset = t->spa_set_dma[0]; 880 memdev->address = 0; 881 memdev->interleave_index = 0; 882 memdev->interleave_ways = 2; 883 884 /* mem-region1 (spa0, dimm1) */ 885 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map); 886 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 887 memdev->header.length = sizeof(*memdev); 888 memdev->device_handle = handle[1]; 889 memdev->physical_id = 1; 890 memdev->region_id = 0; 891 memdev->range_index = 0+1; 892 memdev->region_index = 5+1; 893 memdev->region_size = SPA0_SIZE/2; 894 memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2; 895 memdev->address = 0; 896 memdev->interleave_index = 0; 897 memdev->interleave_ways = 2; 898 899 /* mem-region2 (spa1, dimm0) */ 900 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2; 901 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 902 memdev->header.length = sizeof(*memdev); 903 memdev->device_handle = handle[0]; 904 memdev->physical_id = 0; 905 memdev->region_id = 1; 906 memdev->range_index = 1+1; 907 memdev->region_index = 4+1; 908 memdev->region_size = SPA1_SIZE/4; 909 memdev->region_offset = t->spa_set_dma[1]; 910 memdev->address = SPA0_SIZE/2; 911 memdev->interleave_index = 0; 912 memdev->interleave_ways = 4; 913 914 /* mem-region3 (spa1, dimm1) */ 915 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3; 916 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 917 memdev->header.length = sizeof(*memdev); 918 memdev->device_handle = handle[1]; 919 memdev->physical_id = 1; 920 memdev->region_id = 1; 921 memdev->range_index = 1+1; 922 memdev->region_index = 5+1; 923 memdev->region_size = SPA1_SIZE/4; 924 memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4; 925 memdev->address = SPA0_SIZE/2; 926 memdev->interleave_index = 0; 927 memdev->interleave_ways = 4; 928 929 /* mem-region4 (spa1, dimm2) */ 930 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4; 931 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 932 memdev->header.length = sizeof(*memdev); 933 memdev->device_handle = handle[2]; 934 memdev->physical_id = 2; 935 memdev->region_id = 0; 936 memdev->range_index = 1+1; 937 memdev->region_index = 6+1; 938 memdev->region_size = SPA1_SIZE/4; 939 memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4; 940 memdev->address = SPA0_SIZE/2; 941 memdev->interleave_index = 0; 942 memdev->interleave_ways = 4; 943 944 /* mem-region5 (spa1, dimm3) */ 945 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5; 946 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 947 memdev->header.length = sizeof(*memdev); 948 memdev->device_handle = handle[3]; 949 memdev->physical_id = 3; 950 memdev->region_id = 0; 951 memdev->range_index = 1+1; 952 memdev->region_index = 7+1; 953 memdev->region_size = SPA1_SIZE/4; 954 memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4; 955 memdev->address = SPA0_SIZE/2; 956 memdev->interleave_index = 0; 957 memdev->interleave_ways = 4; 958 959 /* mem-region6 (spa/dcr0, dimm0) */ 960 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6; 961 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 962 memdev->header.length = sizeof(*memdev); 963 memdev->device_handle = handle[0]; 964 memdev->physical_id = 0; 965 memdev->region_id = 0; 966 memdev->range_index = 2+1; 967 memdev->region_index = 0+1; 968 memdev->region_size = 0; 969 memdev->region_offset = 0; 970 memdev->address = 0; 971 memdev->interleave_index = 0; 972 memdev->interleave_ways = 1; 973 974 /* mem-region7 (spa/dcr1, dimm1) */ 975 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7; 976 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 977 memdev->header.length = sizeof(*memdev); 978 memdev->device_handle = handle[1]; 979 memdev->physical_id = 1; 980 memdev->region_id = 0; 981 memdev->range_index = 3+1; 982 memdev->region_index = 1+1; 983 memdev->region_size = 0; 984 memdev->region_offset = 0; 985 memdev->address = 0; 986 memdev->interleave_index = 0; 987 memdev->interleave_ways = 1; 988 989 /* mem-region8 (spa/dcr2, dimm2) */ 990 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8; 991 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 992 memdev->header.length = sizeof(*memdev); 993 memdev->device_handle = handle[2]; 994 memdev->physical_id = 2; 995 memdev->region_id = 0; 996 memdev->range_index = 4+1; 997 memdev->region_index = 2+1; 998 memdev->region_size = 0; 999 memdev->region_offset = 0; 1000 memdev->address = 0; 1001 memdev->interleave_index = 0; 1002 memdev->interleave_ways = 1; 1003 1004 /* mem-region9 (spa/dcr3, dimm3) */ 1005 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9; 1006 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1007 memdev->header.length = sizeof(*memdev); 1008 memdev->device_handle = handle[3]; 1009 memdev->physical_id = 3; 1010 memdev->region_id = 0; 1011 memdev->range_index = 5+1; 1012 memdev->region_index = 3+1; 1013 memdev->region_size = 0; 1014 memdev->region_offset = 0; 1015 memdev->address = 0; 1016 memdev->interleave_index = 0; 1017 memdev->interleave_ways = 1; 1018 1019 /* mem-region10 (spa/bdw0, dimm0) */ 1020 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10; 1021 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1022 memdev->header.length = sizeof(*memdev); 1023 memdev->device_handle = handle[0]; 1024 memdev->physical_id = 0; 1025 memdev->region_id = 0; 1026 memdev->range_index = 6+1; 1027 memdev->region_index = 0+1; 1028 memdev->region_size = 0; 1029 memdev->region_offset = 0; 1030 memdev->address = 0; 1031 memdev->interleave_index = 0; 1032 memdev->interleave_ways = 1; 1033 1034 /* mem-region11 (spa/bdw1, dimm1) */ 1035 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11; 1036 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1037 memdev->header.length = sizeof(*memdev); 1038 memdev->device_handle = handle[1]; 1039 memdev->physical_id = 1; 1040 memdev->region_id = 0; 1041 memdev->range_index = 7+1; 1042 memdev->region_index = 1+1; 1043 memdev->region_size = 0; 1044 memdev->region_offset = 0; 1045 memdev->address = 0; 1046 memdev->interleave_index = 0; 1047 memdev->interleave_ways = 1; 1048 1049 /* mem-region12 (spa/bdw2, dimm2) */ 1050 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12; 1051 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1052 memdev->header.length = sizeof(*memdev); 1053 memdev->device_handle = handle[2]; 1054 memdev->physical_id = 2; 1055 memdev->region_id = 0; 1056 memdev->range_index = 8+1; 1057 memdev->region_index = 2+1; 1058 memdev->region_size = 0; 1059 memdev->region_offset = 0; 1060 memdev->address = 0; 1061 memdev->interleave_index = 0; 1062 memdev->interleave_ways = 1; 1063 1064 /* mem-region13 (spa/dcr3, dimm3) */ 1065 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13; 1066 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1067 memdev->header.length = sizeof(*memdev); 1068 memdev->device_handle = handle[3]; 1069 memdev->physical_id = 3; 1070 memdev->region_id = 0; 1071 memdev->range_index = 9+1; 1072 memdev->region_index = 3+1; 1073 memdev->region_size = 0; 1074 memdev->region_offset = 0; 1075 memdev->address = 0; 1076 memdev->interleave_index = 0; 1077 memdev->interleave_ways = 1; 1078 1079 offset = offset + sizeof(struct acpi_nfit_memory_map) * 14; 1080 /* dcr-descriptor0: blk */ 1081 dcr = nfit_buf + offset; 1082 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1083 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1084 dcr->region_index = 0+1; 1085 dcr_common_init(dcr); 1086 dcr->serial_number = ~handle[0]; 1087 dcr->code = NFIT_FIC_BLK; 1088 dcr->windows = 1; 1089 dcr->window_size = DCR_SIZE; 1090 dcr->command_offset = 0; 1091 dcr->command_size = 8; 1092 dcr->status_offset = 8; 1093 dcr->status_size = 4; 1094 1095 /* dcr-descriptor1: blk */ 1096 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region); 1097 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1098 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1099 dcr->region_index = 1+1; 1100 dcr_common_init(dcr); 1101 dcr->serial_number = ~handle[1]; 1102 dcr->code = NFIT_FIC_BLK; 1103 dcr->windows = 1; 1104 dcr->window_size = DCR_SIZE; 1105 dcr->command_offset = 0; 1106 dcr->command_size = 8; 1107 dcr->status_offset = 8; 1108 dcr->status_size = 4; 1109 1110 /* dcr-descriptor2: blk */ 1111 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2; 1112 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1113 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1114 dcr->region_index = 2+1; 1115 dcr_common_init(dcr); 1116 dcr->serial_number = ~handle[2]; 1117 dcr->code = NFIT_FIC_BLK; 1118 dcr->windows = 1; 1119 dcr->window_size = DCR_SIZE; 1120 dcr->command_offset = 0; 1121 dcr->command_size = 8; 1122 dcr->status_offset = 8; 1123 dcr->status_size = 4; 1124 1125 /* dcr-descriptor3: blk */ 1126 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3; 1127 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1128 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1129 dcr->region_index = 3+1; 1130 dcr_common_init(dcr); 1131 dcr->serial_number = ~handle[3]; 1132 dcr->code = NFIT_FIC_BLK; 1133 dcr->windows = 1; 1134 dcr->window_size = DCR_SIZE; 1135 dcr->command_offset = 0; 1136 dcr->command_size = 8; 1137 dcr->status_offset = 8; 1138 dcr->status_size = 4; 1139 1140 offset = offset + sizeof(struct acpi_nfit_control_region) * 4; 1141 /* dcr-descriptor0: pmem */ 1142 dcr = nfit_buf + offset; 1143 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1144 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1145 window_size); 1146 dcr->region_index = 4+1; 1147 dcr_common_init(dcr); 1148 dcr->serial_number = ~handle[0]; 1149 dcr->code = NFIT_FIC_BYTEN; 1150 dcr->windows = 0; 1151 1152 /* dcr-descriptor1: pmem */ 1153 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region, 1154 window_size); 1155 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1156 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1157 window_size); 1158 dcr->region_index = 5+1; 1159 dcr_common_init(dcr); 1160 dcr->serial_number = ~handle[1]; 1161 dcr->code = NFIT_FIC_BYTEN; 1162 dcr->windows = 0; 1163 1164 /* dcr-descriptor2: pmem */ 1165 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region, 1166 window_size) * 2; 1167 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1168 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1169 window_size); 1170 dcr->region_index = 6+1; 1171 dcr_common_init(dcr); 1172 dcr->serial_number = ~handle[2]; 1173 dcr->code = NFIT_FIC_BYTEN; 1174 dcr->windows = 0; 1175 1176 /* dcr-descriptor3: pmem */ 1177 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region, 1178 window_size) * 3; 1179 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1180 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1181 window_size); 1182 dcr->region_index = 7+1; 1183 dcr_common_init(dcr); 1184 dcr->serial_number = ~handle[3]; 1185 dcr->code = NFIT_FIC_BYTEN; 1186 dcr->windows = 0; 1187 1188 offset = offset + offsetof(struct acpi_nfit_control_region, 1189 window_size) * 4; 1190 /* bdw0 (spa/dcr0, dimm0) */ 1191 bdw = nfit_buf + offset; 1192 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1193 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1194 bdw->region_index = 0+1; 1195 bdw->windows = 1; 1196 bdw->offset = 0; 1197 bdw->size = BDW_SIZE; 1198 bdw->capacity = DIMM_SIZE; 1199 bdw->start_address = 0; 1200 1201 /* bdw1 (spa/dcr1, dimm1) */ 1202 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region); 1203 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1204 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1205 bdw->region_index = 1+1; 1206 bdw->windows = 1; 1207 bdw->offset = 0; 1208 bdw->size = BDW_SIZE; 1209 bdw->capacity = DIMM_SIZE; 1210 bdw->start_address = 0; 1211 1212 /* bdw2 (spa/dcr2, dimm2) */ 1213 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2; 1214 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1215 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1216 bdw->region_index = 2+1; 1217 bdw->windows = 1; 1218 bdw->offset = 0; 1219 bdw->size = BDW_SIZE; 1220 bdw->capacity = DIMM_SIZE; 1221 bdw->start_address = 0; 1222 1223 /* bdw3 (spa/dcr3, dimm3) */ 1224 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3; 1225 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1226 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1227 bdw->region_index = 3+1; 1228 bdw->windows = 1; 1229 bdw->offset = 0; 1230 bdw->size = BDW_SIZE; 1231 bdw->capacity = DIMM_SIZE; 1232 bdw->start_address = 0; 1233 1234 offset = offset + sizeof(struct acpi_nfit_data_region) * 4; 1235 /* flush0 (dimm0) */ 1236 flush = nfit_buf + offset; 1237 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1238 flush->header.length = flush_hint_size; 1239 flush->device_handle = handle[0]; 1240 flush->hint_count = NUM_HINTS; 1241 for (i = 0; i < NUM_HINTS; i++) 1242 flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64); 1243 1244 /* flush1 (dimm1) */ 1245 flush = nfit_buf + offset + flush_hint_size * 1; 1246 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1247 flush->header.length = flush_hint_size; 1248 flush->device_handle = handle[1]; 1249 flush->hint_count = NUM_HINTS; 1250 for (i = 0; i < NUM_HINTS; i++) 1251 flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64); 1252 1253 /* flush2 (dimm2) */ 1254 flush = nfit_buf + offset + flush_hint_size * 2; 1255 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1256 flush->header.length = flush_hint_size; 1257 flush->device_handle = handle[2]; 1258 flush->hint_count = NUM_HINTS; 1259 for (i = 0; i < NUM_HINTS; i++) 1260 flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64); 1261 1262 /* flush3 (dimm3) */ 1263 flush = nfit_buf + offset + flush_hint_size * 3; 1264 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1265 flush->header.length = flush_hint_size; 1266 flush->device_handle = handle[3]; 1267 flush->hint_count = NUM_HINTS; 1268 for (i = 0; i < NUM_HINTS; i++) 1269 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64); 1270 1271 if (t->setup_hotplug) { 1272 offset = offset + flush_hint_size * 4; 1273 /* dcr-descriptor4: blk */ 1274 dcr = nfit_buf + offset; 1275 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1276 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1277 dcr->region_index = 8+1; 1278 dcr_common_init(dcr); 1279 dcr->serial_number = ~handle[4]; 1280 dcr->code = NFIT_FIC_BLK; 1281 dcr->windows = 1; 1282 dcr->window_size = DCR_SIZE; 1283 dcr->command_offset = 0; 1284 dcr->command_size = 8; 1285 dcr->status_offset = 8; 1286 dcr->status_size = 4; 1287 1288 offset = offset + sizeof(struct acpi_nfit_control_region); 1289 /* dcr-descriptor4: pmem */ 1290 dcr = nfit_buf + offset; 1291 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1292 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1293 window_size); 1294 dcr->region_index = 9+1; 1295 dcr_common_init(dcr); 1296 dcr->serial_number = ~handle[4]; 1297 dcr->code = NFIT_FIC_BYTEN; 1298 dcr->windows = 0; 1299 1300 offset = offset + offsetof(struct acpi_nfit_control_region, 1301 window_size); 1302 /* bdw4 (spa/dcr4, dimm4) */ 1303 bdw = nfit_buf + offset; 1304 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1305 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1306 bdw->region_index = 8+1; 1307 bdw->windows = 1; 1308 bdw->offset = 0; 1309 bdw->size = BDW_SIZE; 1310 bdw->capacity = DIMM_SIZE; 1311 bdw->start_address = 0; 1312 1313 offset = offset + sizeof(struct acpi_nfit_data_region); 1314 /* spa10 (dcr4) dimm4 */ 1315 spa = nfit_buf + offset; 1316 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1317 spa->header.length = sizeof(*spa); 1318 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1319 spa->range_index = 10+1; 1320 spa->address = t->dcr_dma[4]; 1321 spa->length = DCR_SIZE; 1322 1323 /* 1324 * spa11 (single-dimm interleave for hotplug, note storage 1325 * does not actually alias the related block-data-window 1326 * regions) 1327 */ 1328 spa = nfit_buf + offset + sizeof(*spa); 1329 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1330 spa->header.length = sizeof(*spa); 1331 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 1332 spa->range_index = 11+1; 1333 spa->address = t->spa_set_dma[2]; 1334 spa->length = SPA0_SIZE; 1335 1336 /* spa12 (bdw for dcr4) dimm4 */ 1337 spa = nfit_buf + offset + sizeof(*spa) * 2; 1338 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1339 spa->header.length = sizeof(*spa); 1340 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1341 spa->range_index = 12+1; 1342 spa->address = t->dimm_dma[4]; 1343 spa->length = DIMM_SIZE; 1344 1345 offset = offset + sizeof(*spa) * 3; 1346 /* mem-region14 (spa/dcr4, dimm4) */ 1347 memdev = nfit_buf + offset; 1348 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1349 memdev->header.length = sizeof(*memdev); 1350 memdev->device_handle = handle[4]; 1351 memdev->physical_id = 4; 1352 memdev->region_id = 0; 1353 memdev->range_index = 10+1; 1354 memdev->region_index = 8+1; 1355 memdev->region_size = 0; 1356 memdev->region_offset = 0; 1357 memdev->address = 0; 1358 memdev->interleave_index = 0; 1359 memdev->interleave_ways = 1; 1360 1361 /* mem-region15 (spa0, dimm4) */ 1362 memdev = nfit_buf + offset + 1363 sizeof(struct acpi_nfit_memory_map); 1364 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1365 memdev->header.length = sizeof(*memdev); 1366 memdev->device_handle = handle[4]; 1367 memdev->physical_id = 4; 1368 memdev->region_id = 0; 1369 memdev->range_index = 11+1; 1370 memdev->region_index = 9+1; 1371 memdev->region_size = SPA0_SIZE; 1372 memdev->region_offset = t->spa_set_dma[2]; 1373 memdev->address = 0; 1374 memdev->interleave_index = 0; 1375 memdev->interleave_ways = 1; 1376 1377 /* mem-region16 (spa/bdw4, dimm4) */ 1378 memdev = nfit_buf + offset + 1379 sizeof(struct acpi_nfit_memory_map) * 2; 1380 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1381 memdev->header.length = sizeof(*memdev); 1382 memdev->device_handle = handle[4]; 1383 memdev->physical_id = 4; 1384 memdev->region_id = 0; 1385 memdev->range_index = 12+1; 1386 memdev->region_index = 8+1; 1387 memdev->region_size = 0; 1388 memdev->region_offset = 0; 1389 memdev->address = 0; 1390 memdev->interleave_index = 0; 1391 memdev->interleave_ways = 1; 1392 1393 offset = offset + sizeof(struct acpi_nfit_memory_map) * 3; 1394 /* flush3 (dimm4) */ 1395 flush = nfit_buf + offset; 1396 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1397 flush->header.length = flush_hint_size; 1398 flush->device_handle = handle[4]; 1399 flush->hint_count = NUM_HINTS; 1400 for (i = 0; i < NUM_HINTS; i++) 1401 flush->hint_address[i] = t->flush_dma[4] 1402 + i * sizeof(u64); 1403 } 1404 1405 post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE); 1406 1407 acpi_desc = &t->acpi_desc; 1408 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); 1409 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 1410 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 1411 set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en); 1412 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 1413 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 1414 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 1415 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 1416 set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 1417 } 1418 1419 static void nfit_test1_setup(struct nfit_test *t) 1420 { 1421 size_t offset; 1422 void *nfit_buf = t->nfit_buf; 1423 struct acpi_nfit_memory_map *memdev; 1424 struct acpi_nfit_control_region *dcr; 1425 struct acpi_nfit_system_address *spa; 1426 struct acpi_nfit_desc *acpi_desc; 1427 1428 offset = 0; 1429 /* spa0 (flat range with no bdw aliasing) */ 1430 spa = nfit_buf + offset; 1431 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1432 spa->header.length = sizeof(*spa); 1433 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 1434 spa->range_index = 0+1; 1435 spa->address = t->spa_set_dma[0]; 1436 spa->length = SPA2_SIZE; 1437 1438 /* virtual cd region */ 1439 spa = nfit_buf + sizeof(*spa); 1440 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1441 spa->header.length = sizeof(*spa); 1442 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16); 1443 spa->range_index = 0; 1444 spa->address = t->spa_set_dma[1]; 1445 spa->length = SPA_VCD_SIZE; 1446 1447 offset += sizeof(*spa) * 2; 1448 /* mem-region0 (spa0, dimm0) */ 1449 memdev = nfit_buf + offset; 1450 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1451 memdev->header.length = sizeof(*memdev); 1452 memdev->device_handle = 0; 1453 memdev->physical_id = 0; 1454 memdev->region_id = 0; 1455 memdev->range_index = 0+1; 1456 memdev->region_index = 0+1; 1457 memdev->region_size = SPA2_SIZE; 1458 memdev->region_offset = 0; 1459 memdev->address = 0; 1460 memdev->interleave_index = 0; 1461 memdev->interleave_ways = 1; 1462 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED 1463 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED 1464 | ACPI_NFIT_MEM_NOT_ARMED; 1465 1466 offset += sizeof(*memdev); 1467 /* dcr-descriptor0 */ 1468 dcr = nfit_buf + offset; 1469 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1470 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1471 window_size); 1472 dcr->region_index = 0+1; 1473 dcr_common_init(dcr); 1474 dcr->serial_number = ~0; 1475 dcr->code = NFIT_FIC_BYTE; 1476 dcr->windows = 0; 1477 1478 post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE); 1479 1480 acpi_desc = &t->acpi_desc; 1481 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 1482 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 1483 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 1484 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 1485 } 1486 1487 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 1488 void *iobuf, u64 len, int rw) 1489 { 1490 struct nfit_blk *nfit_blk = ndbr->blk_provider_data; 1491 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1492 struct nd_region *nd_region = &ndbr->nd_region; 1493 unsigned int lane; 1494 1495 lane = nd_region_acquire_lane(nd_region); 1496 if (rw) 1497 memcpy(mmio->addr.base + dpa, iobuf, len); 1498 else { 1499 memcpy(iobuf, mmio->addr.base + dpa, len); 1500 1501 /* give us some some coverage of the mmio_flush_range() API */ 1502 mmio_flush_range(mmio->addr.base + dpa, len); 1503 } 1504 nd_region_release_lane(nd_region, lane); 1505 1506 return 0; 1507 } 1508 1509 static int nfit_test_probe(struct platform_device *pdev) 1510 { 1511 struct nvdimm_bus_descriptor *nd_desc; 1512 struct acpi_nfit_desc *acpi_desc; 1513 struct device *dev = &pdev->dev; 1514 struct nfit_test *nfit_test; 1515 struct nfit_mem *nfit_mem; 1516 union acpi_object *obj; 1517 int rc; 1518 1519 nfit_test = to_nfit_test(&pdev->dev); 1520 1521 /* common alloc */ 1522 if (nfit_test->num_dcr) { 1523 int num = nfit_test->num_dcr; 1524 1525 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *), 1526 GFP_KERNEL); 1527 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 1528 GFP_KERNEL); 1529 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *), 1530 GFP_KERNEL); 1531 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 1532 GFP_KERNEL); 1533 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 1534 GFP_KERNEL); 1535 nfit_test->label_dma = devm_kcalloc(dev, num, 1536 sizeof(dma_addr_t), GFP_KERNEL); 1537 nfit_test->dcr = devm_kcalloc(dev, num, 1538 sizeof(struct nfit_test_dcr *), GFP_KERNEL); 1539 nfit_test->dcr_dma = devm_kcalloc(dev, num, 1540 sizeof(dma_addr_t), GFP_KERNEL); 1541 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 1542 && nfit_test->label_dma && nfit_test->dcr 1543 && nfit_test->dcr_dma && nfit_test->flush 1544 && nfit_test->flush_dma) 1545 /* pass */; 1546 else 1547 return -ENOMEM; 1548 } 1549 1550 if (nfit_test->num_pm) { 1551 int num = nfit_test->num_pm; 1552 1553 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *), 1554 GFP_KERNEL); 1555 nfit_test->spa_set_dma = devm_kcalloc(dev, num, 1556 sizeof(dma_addr_t), GFP_KERNEL); 1557 if (nfit_test->spa_set && nfit_test->spa_set_dma) 1558 /* pass */; 1559 else 1560 return -ENOMEM; 1561 } 1562 1563 /* per-nfit specific alloc */ 1564 if (nfit_test->alloc(nfit_test)) 1565 return -ENOMEM; 1566 1567 nfit_test->setup(nfit_test); 1568 acpi_desc = &nfit_test->acpi_desc; 1569 acpi_nfit_desc_init(acpi_desc, &pdev->dev); 1570 acpi_desc->blk_do_io = nfit_test_blk_do_io; 1571 nd_desc = &acpi_desc->nd_desc; 1572 nd_desc->provider_name = NULL; 1573 nd_desc->module = THIS_MODULE; 1574 nd_desc->ndctl = nfit_test_ctl; 1575 1576 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf, 1577 nfit_test->nfit_size); 1578 if (rc) 1579 return rc; 1580 1581 if (nfit_test->setup != nfit_test0_setup) 1582 return 0; 1583 1584 nfit_test->setup_hotplug = 1; 1585 nfit_test->setup(nfit_test); 1586 1587 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 1588 if (!obj) 1589 return -ENOMEM; 1590 obj->type = ACPI_TYPE_BUFFER; 1591 obj->buffer.length = nfit_test->nfit_size; 1592 obj->buffer.pointer = nfit_test->nfit_buf; 1593 *(nfit_test->_fit) = obj; 1594 __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80); 1595 1596 /* associate dimm devices with nfit_mem data for notification testing */ 1597 mutex_lock(&acpi_desc->init_mutex); 1598 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1599 u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle; 1600 int i; 1601 1602 for (i = 0; i < NUM_DCR; i++) 1603 if (nfit_handle == handle[i]) 1604 dev_set_drvdata(nfit_test->dimm_dev[i], 1605 nfit_mem); 1606 } 1607 mutex_unlock(&acpi_desc->init_mutex); 1608 1609 return 0; 1610 } 1611 1612 static int nfit_test_remove(struct platform_device *pdev) 1613 { 1614 return 0; 1615 } 1616 1617 static void nfit_test_release(struct device *dev) 1618 { 1619 struct nfit_test *nfit_test = to_nfit_test(dev); 1620 1621 kfree(nfit_test); 1622 } 1623 1624 static const struct platform_device_id nfit_test_id[] = { 1625 { KBUILD_MODNAME }, 1626 { }, 1627 }; 1628 1629 static struct platform_driver nfit_test_driver = { 1630 .probe = nfit_test_probe, 1631 .remove = nfit_test_remove, 1632 .driver = { 1633 .name = KBUILD_MODNAME, 1634 }, 1635 .id_table = nfit_test_id, 1636 }; 1637 1638 static __init int nfit_test_init(void) 1639 { 1640 int rc, i; 1641 1642 nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm"); 1643 if (IS_ERR(nfit_test_dimm)) 1644 return PTR_ERR(nfit_test_dimm); 1645 1646 nfit_test_setup(nfit_test_lookup); 1647 1648 for (i = 0; i < NUM_NFITS; i++) { 1649 struct nfit_test *nfit_test; 1650 struct platform_device *pdev; 1651 1652 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL); 1653 if (!nfit_test) { 1654 rc = -ENOMEM; 1655 goto err_register; 1656 } 1657 INIT_LIST_HEAD(&nfit_test->resources); 1658 switch (i) { 1659 case 0: 1660 nfit_test->num_pm = NUM_PM; 1661 nfit_test->num_dcr = NUM_DCR; 1662 nfit_test->alloc = nfit_test0_alloc; 1663 nfit_test->setup = nfit_test0_setup; 1664 break; 1665 case 1: 1666 nfit_test->num_pm = 1; 1667 nfit_test->alloc = nfit_test1_alloc; 1668 nfit_test->setup = nfit_test1_setup; 1669 break; 1670 default: 1671 rc = -EINVAL; 1672 goto err_register; 1673 } 1674 pdev = &nfit_test->pdev; 1675 pdev->name = KBUILD_MODNAME; 1676 pdev->id = i; 1677 pdev->dev.release = nfit_test_release; 1678 rc = platform_device_register(pdev); 1679 if (rc) { 1680 put_device(&pdev->dev); 1681 goto err_register; 1682 } 1683 1684 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1685 if (rc) 1686 goto err_register; 1687 1688 instances[i] = nfit_test; 1689 } 1690 1691 rc = platform_driver_register(&nfit_test_driver); 1692 if (rc) 1693 goto err_register; 1694 return 0; 1695 1696 err_register: 1697 for (i = 0; i < NUM_NFITS; i++) 1698 if (instances[i]) 1699 platform_device_unregister(&instances[i]->pdev); 1700 nfit_test_teardown(); 1701 return rc; 1702 } 1703 1704 static __exit void nfit_test_exit(void) 1705 { 1706 int i; 1707 1708 platform_driver_unregister(&nfit_test_driver); 1709 for (i = 0; i < NUM_NFITS; i++) 1710 platform_device_unregister(&instances[i]->pdev); 1711 nfit_test_teardown(); 1712 class_destroy(nfit_test_dimm); 1713 } 1714 1715 module_init(nfit_test_init); 1716 module_exit(nfit_test_exit); 1717 MODULE_LICENSE("GPL v2"); 1718 MODULE_AUTHOR("Intel Corporation"); 1719