1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 #include <linux/platform_device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/workqueue.h> 17 #include <linux/libnvdimm.h> 18 #include <linux/vmalloc.h> 19 #include <linux/device.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/ndctl.h> 23 #include <linux/sizes.h> 24 #include <linux/list.h> 25 #include <linux/slab.h> 26 #include <nd-core.h> 27 #include <nfit.h> 28 #include <nd.h> 29 #include "nfit_test.h" 30 #include "../watermark.h" 31 32 /* 33 * Generate an NFIT table to describe the following topology: 34 * 35 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions 36 * 37 * (a) (b) DIMM BLK-REGION 38 * +----------+--------------+----------+---------+ 39 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2 40 * | imc0 +--+- - - - - region0 - - - -+----------+ + 41 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3 42 * | +----------+--------------v----------v v 43 * +--+---+ | | 44 * | cpu0 | region1 45 * +--+---+ | | 46 * | +-------------------------^----------^ ^ 47 * +--+---+ | blk4.0 | pm1.0 | 2 region4 48 * | imc1 +--+-------------------------+----------+ + 49 * +------+ | blk5.0 | pm1.0 | 3 region5 50 * +-------------------------+----------+-+-------+ 51 * 52 * +--+---+ 53 * | cpu1 | 54 * +--+---+ (Hotplug DIMM) 55 * | +----------------------------------------------+ 56 * +--+---+ | blk6.0/pm7.0 | 4 region6/7 57 * | imc0 +--+----------------------------------------------+ 58 * +------+ 59 * 60 * 61 * *) In this layout we have four dimms and two memory controllers in one 62 * socket. Each unique interface (BLK or PMEM) to DPA space 63 * is identified by a region device with a dynamically assigned id. 64 * 65 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0. 66 * A single PMEM namespace "pm0.0" is created using half of the 67 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace 68 * allocate from from the bottom of a region. The unallocated 69 * portion of REGION0 aliases with REGION2 and REGION3. That 70 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and 71 * "blk3.0") starting at the base of each DIMM to offset (a) in those 72 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable 73 * names that can be assigned to a namespace. 74 * 75 * *) In the last portion of dimm0 and dimm1 we have an interleaved 76 * SPA range, REGION1, that spans those two dimms as well as dimm2 77 * and dimm3. Some of REGION1 allocated to a PMEM namespace named 78 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each 79 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and 80 * "blk5.0". 81 * 82 * *) The portion of dimm2 and dimm3 that do not participate in the 83 * REGION1 interleaved SPA range (i.e. the DPA address below offset 84 * (b) are also included in the "blk4.0" and "blk5.0" namespaces. 85 * Note, that BLK namespaces need not be contiguous in DPA-space, and 86 * can consume aliased capacity from multiple interleave sets. 87 * 88 * BUS1: Legacy NVDIMM (single contiguous range) 89 * 90 * region2 91 * +---------------------+ 92 * |---------------------| 93 * || pm2.0 || 94 * |---------------------| 95 * +---------------------+ 96 * 97 * *) A NFIT-table may describe a simple system-physical-address range 98 * with no BLK aliasing. This type of region may optionally 99 * reference an NVDIMM. 100 */ 101 enum { 102 NUM_PM = 3, 103 NUM_DCR = 5, 104 NUM_HINTS = 8, 105 NUM_BDW = NUM_DCR, 106 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW, 107 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ 108 + 4 /* spa1 iset */ + 1 /* spa11 iset */, 109 DIMM_SIZE = SZ_32M, 110 LABEL_SIZE = SZ_128K, 111 SPA_VCD_SIZE = SZ_4M, 112 SPA0_SIZE = DIMM_SIZE, 113 SPA1_SIZE = DIMM_SIZE*2, 114 SPA2_SIZE = DIMM_SIZE, 115 BDW_SIZE = 64 << 8, 116 DCR_SIZE = 12, 117 NUM_NFITS = 2, /* permit testing multiple NFITs per system */ 118 }; 119 120 struct nfit_test_dcr { 121 __le64 bdw_addr; 122 __le32 bdw_status; 123 __u8 aperature[BDW_SIZE]; 124 }; 125 126 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \ 127 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \ 128 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf)) 129 130 static u32 handle[] = { 131 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0), 132 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1), 133 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0), 134 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1), 135 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0), 136 [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0), 137 [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1), 138 }; 139 140 static unsigned long dimm_fail_cmd_flags[NUM_DCR]; 141 142 struct nfit_test_fw { 143 enum intel_fw_update_state state; 144 u32 context; 145 u64 version; 146 u32 size_received; 147 u64 end_time; 148 }; 149 150 struct nfit_test { 151 struct acpi_nfit_desc acpi_desc; 152 struct platform_device pdev; 153 struct list_head resources; 154 void *nfit_buf; 155 dma_addr_t nfit_dma; 156 size_t nfit_size; 157 size_t nfit_filled; 158 int dcr_idx; 159 int num_dcr; 160 int num_pm; 161 void **dimm; 162 dma_addr_t *dimm_dma; 163 void **flush; 164 dma_addr_t *flush_dma; 165 void **label; 166 dma_addr_t *label_dma; 167 void **spa_set; 168 dma_addr_t *spa_set_dma; 169 struct nfit_test_dcr **dcr; 170 dma_addr_t *dcr_dma; 171 int (*alloc)(struct nfit_test *t); 172 void (*setup)(struct nfit_test *t); 173 int setup_hotplug; 174 union acpi_object **_fit; 175 dma_addr_t _fit_dma; 176 struct ars_state { 177 struct nd_cmd_ars_status *ars_status; 178 unsigned long deadline; 179 spinlock_t lock; 180 } ars_state; 181 struct device *dimm_dev[NUM_DCR]; 182 struct nd_intel_smart *smart; 183 struct nd_intel_smart_threshold *smart_threshold; 184 struct badrange badrange; 185 struct work_struct work; 186 struct nfit_test_fw *fw; 187 }; 188 189 static struct workqueue_struct *nfit_wq; 190 191 static struct nfit_test *to_nfit_test(struct device *dev) 192 { 193 struct platform_device *pdev = to_platform_device(dev); 194 195 return container_of(pdev, struct nfit_test, pdev); 196 } 197 198 static int nd_intel_test_get_fw_info(struct nfit_test *t, 199 struct nd_intel_fw_info *nd_cmd, unsigned int buf_len, 200 int idx) 201 { 202 struct device *dev = &t->pdev.dev; 203 struct nfit_test_fw *fw = &t->fw[idx]; 204 205 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n", 206 __func__, t, nd_cmd, buf_len, idx); 207 208 if (buf_len < sizeof(*nd_cmd)) 209 return -EINVAL; 210 211 nd_cmd->status = 0; 212 nd_cmd->storage_size = INTEL_FW_STORAGE_SIZE; 213 nd_cmd->max_send_len = INTEL_FW_MAX_SEND_LEN; 214 nd_cmd->query_interval = INTEL_FW_QUERY_INTERVAL; 215 nd_cmd->max_query_time = INTEL_FW_QUERY_MAX_TIME; 216 nd_cmd->update_cap = 0; 217 nd_cmd->fis_version = INTEL_FW_FIS_VERSION; 218 nd_cmd->run_version = 0; 219 nd_cmd->updated_version = fw->version; 220 221 return 0; 222 } 223 224 static int nd_intel_test_start_update(struct nfit_test *t, 225 struct nd_intel_fw_start *nd_cmd, unsigned int buf_len, 226 int idx) 227 { 228 struct device *dev = &t->pdev.dev; 229 struct nfit_test_fw *fw = &t->fw[idx]; 230 231 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 232 __func__, t, nd_cmd, buf_len, idx); 233 234 if (buf_len < sizeof(*nd_cmd)) 235 return -EINVAL; 236 237 if (fw->state != FW_STATE_NEW) { 238 /* extended status, FW update in progress */ 239 nd_cmd->status = 0x10007; 240 return 0; 241 } 242 243 fw->state = FW_STATE_IN_PROGRESS; 244 fw->context++; 245 fw->size_received = 0; 246 nd_cmd->status = 0; 247 nd_cmd->context = fw->context; 248 249 dev_dbg(dev, "%s: context issued: %#x\n", __func__, nd_cmd->context); 250 251 return 0; 252 } 253 254 static int nd_intel_test_send_data(struct nfit_test *t, 255 struct nd_intel_fw_send_data *nd_cmd, unsigned int buf_len, 256 int idx) 257 { 258 struct device *dev = &t->pdev.dev; 259 struct nfit_test_fw *fw = &t->fw[idx]; 260 u32 *status = (u32 *)&nd_cmd->data[nd_cmd->length]; 261 262 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 263 __func__, t, nd_cmd, buf_len, idx); 264 265 if (buf_len < sizeof(*nd_cmd)) 266 return -EINVAL; 267 268 269 dev_dbg(dev, "%s: cmd->status: %#x\n", __func__, *status); 270 dev_dbg(dev, "%s: cmd->data[0]: %#x\n", __func__, nd_cmd->data[0]); 271 dev_dbg(dev, "%s: cmd->data[%u]: %#x\n", __func__, nd_cmd->length-1, 272 nd_cmd->data[nd_cmd->length-1]); 273 274 if (fw->state != FW_STATE_IN_PROGRESS) { 275 dev_dbg(dev, "%s: not in IN_PROGRESS state\n", __func__); 276 *status = 0x5; 277 return 0; 278 } 279 280 if (nd_cmd->context != fw->context) { 281 dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n", 282 __func__, nd_cmd->context, fw->context); 283 *status = 0x10007; 284 return 0; 285 } 286 287 /* 288 * check offset + len > size of fw storage 289 * check length is > max send length 290 */ 291 if (nd_cmd->offset + nd_cmd->length > INTEL_FW_STORAGE_SIZE || 292 nd_cmd->length > INTEL_FW_MAX_SEND_LEN) { 293 *status = 0x3; 294 dev_dbg(dev, "%s: buffer boundary violation\n", __func__); 295 return 0; 296 } 297 298 fw->size_received += nd_cmd->length; 299 dev_dbg(dev, "%s: copying %u bytes, %u bytes so far\n", 300 __func__, nd_cmd->length, fw->size_received); 301 *status = 0; 302 return 0; 303 } 304 305 static int nd_intel_test_finish_fw(struct nfit_test *t, 306 struct nd_intel_fw_finish_update *nd_cmd, 307 unsigned int buf_len, int idx) 308 { 309 struct device *dev = &t->pdev.dev; 310 struct nfit_test_fw *fw = &t->fw[idx]; 311 312 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 313 __func__, t, nd_cmd, buf_len, idx); 314 315 if (fw->state == FW_STATE_UPDATED) { 316 /* update already done, need cold boot */ 317 nd_cmd->status = 0x20007; 318 return 0; 319 } 320 321 dev_dbg(dev, "%s: context: %#x ctrl_flags: %#x\n", 322 __func__, nd_cmd->context, nd_cmd->ctrl_flags); 323 324 switch (nd_cmd->ctrl_flags) { 325 case 0: /* finish */ 326 if (nd_cmd->context != fw->context) { 327 dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n", 328 __func__, nd_cmd->context, 329 fw->context); 330 nd_cmd->status = 0x10007; 331 return 0; 332 } 333 nd_cmd->status = 0; 334 fw->state = FW_STATE_VERIFY; 335 /* set 1 second of time for firmware "update" */ 336 fw->end_time = jiffies + HZ; 337 break; 338 339 case 1: /* abort */ 340 fw->size_received = 0; 341 /* successfully aborted status */ 342 nd_cmd->status = 0x40007; 343 fw->state = FW_STATE_NEW; 344 dev_dbg(dev, "%s: abort successful\n", __func__); 345 break; 346 347 default: /* bad control flag */ 348 dev_warn(dev, "%s: unknown control flag: %#x\n", 349 __func__, nd_cmd->ctrl_flags); 350 return -EINVAL; 351 } 352 353 return 0; 354 } 355 356 static int nd_intel_test_finish_query(struct nfit_test *t, 357 struct nd_intel_fw_finish_query *nd_cmd, 358 unsigned int buf_len, int idx) 359 { 360 struct device *dev = &t->pdev.dev; 361 struct nfit_test_fw *fw = &t->fw[idx]; 362 363 dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n", 364 __func__, t, nd_cmd, buf_len, idx); 365 366 if (buf_len < sizeof(*nd_cmd)) 367 return -EINVAL; 368 369 if (nd_cmd->context != fw->context) { 370 dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n", 371 __func__, nd_cmd->context, fw->context); 372 nd_cmd->status = 0x10007; 373 return 0; 374 } 375 376 dev_dbg(dev, "%s context: %#x\n", __func__, nd_cmd->context); 377 378 switch (fw->state) { 379 case FW_STATE_NEW: 380 nd_cmd->updated_fw_rev = 0; 381 nd_cmd->status = 0; 382 dev_dbg(dev, "%s: new state\n", __func__); 383 break; 384 385 case FW_STATE_IN_PROGRESS: 386 /* sequencing error */ 387 nd_cmd->status = 0x40007; 388 nd_cmd->updated_fw_rev = 0; 389 dev_dbg(dev, "%s: sequence error\n", __func__); 390 break; 391 392 case FW_STATE_VERIFY: 393 if (time_is_after_jiffies64(fw->end_time)) { 394 nd_cmd->updated_fw_rev = 0; 395 nd_cmd->status = 0x20007; 396 dev_dbg(dev, "%s: still verifying\n", __func__); 397 break; 398 } 399 400 dev_dbg(dev, "%s: transition out verify\n", __func__); 401 fw->state = FW_STATE_UPDATED; 402 /* we are going to fall through if it's "done" */ 403 case FW_STATE_UPDATED: 404 nd_cmd->status = 0; 405 /* bogus test version */ 406 fw->version = nd_cmd->updated_fw_rev = 407 INTEL_FW_FAKE_VERSION; 408 dev_dbg(dev, "%s: updated\n", __func__); 409 break; 410 411 default: /* we should never get here */ 412 return -EINVAL; 413 } 414 415 return 0; 416 } 417 418 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd, 419 unsigned int buf_len) 420 { 421 if (buf_len < sizeof(*nd_cmd)) 422 return -EINVAL; 423 424 nd_cmd->status = 0; 425 nd_cmd->config_size = LABEL_SIZE; 426 nd_cmd->max_xfer = SZ_4K; 427 428 return 0; 429 } 430 431 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr 432 *nd_cmd, unsigned int buf_len, void *label) 433 { 434 unsigned int len, offset = nd_cmd->in_offset; 435 int rc; 436 437 if (buf_len < sizeof(*nd_cmd)) 438 return -EINVAL; 439 if (offset >= LABEL_SIZE) 440 return -EINVAL; 441 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) 442 return -EINVAL; 443 444 nd_cmd->status = 0; 445 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 446 memcpy(nd_cmd->out_buf, label + offset, len); 447 rc = buf_len - sizeof(*nd_cmd) - len; 448 449 return rc; 450 } 451 452 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd, 453 unsigned int buf_len, void *label) 454 { 455 unsigned int len, offset = nd_cmd->in_offset; 456 u32 *status; 457 int rc; 458 459 if (buf_len < sizeof(*nd_cmd)) 460 return -EINVAL; 461 if (offset >= LABEL_SIZE) 462 return -EINVAL; 463 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) 464 return -EINVAL; 465 466 status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd); 467 *status = 0; 468 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 469 memcpy(label + offset, nd_cmd->in_buf, len); 470 rc = buf_len - sizeof(*nd_cmd) - (len + 4); 471 472 return rc; 473 } 474 475 #define NFIT_TEST_CLEAR_ERR_UNIT 256 476 477 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, 478 unsigned int buf_len) 479 { 480 int ars_recs; 481 482 if (buf_len < sizeof(*nd_cmd)) 483 return -EINVAL; 484 485 /* for testing, only store up to n records that fit within 4k */ 486 ars_recs = SZ_4K / sizeof(struct nd_ars_record); 487 488 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status) 489 + ars_recs * sizeof(struct nd_ars_record); 490 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; 491 nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT; 492 493 return 0; 494 } 495 496 static void post_ars_status(struct ars_state *ars_state, 497 struct badrange *badrange, u64 addr, u64 len) 498 { 499 struct nd_cmd_ars_status *ars_status; 500 struct nd_ars_record *ars_record; 501 struct badrange_entry *be; 502 u64 end = addr + len - 1; 503 int i = 0; 504 505 ars_state->deadline = jiffies + 1*HZ; 506 ars_status = ars_state->ars_status; 507 ars_status->status = 0; 508 ars_status->address = addr; 509 ars_status->length = len; 510 ars_status->type = ND_ARS_PERSISTENT; 511 512 spin_lock(&badrange->lock); 513 list_for_each_entry(be, &badrange->list, list) { 514 u64 be_end = be->start + be->length - 1; 515 u64 rstart, rend; 516 517 /* skip entries outside the range */ 518 if (be_end < addr || be->start > end) 519 continue; 520 521 rstart = (be->start < addr) ? addr : be->start; 522 rend = (be_end < end) ? be_end : end; 523 ars_record = &ars_status->records[i]; 524 ars_record->handle = 0; 525 ars_record->err_address = rstart; 526 ars_record->length = rend - rstart + 1; 527 i++; 528 } 529 spin_unlock(&badrange->lock); 530 ars_status->num_records = i; 531 ars_status->out_length = sizeof(struct nd_cmd_ars_status) 532 + i * sizeof(struct nd_ars_record); 533 } 534 535 static int nfit_test_cmd_ars_start(struct nfit_test *t, 536 struct ars_state *ars_state, 537 struct nd_cmd_ars_start *ars_start, unsigned int buf_len, 538 int *cmd_rc) 539 { 540 if (buf_len < sizeof(*ars_start)) 541 return -EINVAL; 542 543 spin_lock(&ars_state->lock); 544 if (time_before(jiffies, ars_state->deadline)) { 545 ars_start->status = NFIT_ARS_START_BUSY; 546 *cmd_rc = -EBUSY; 547 } else { 548 ars_start->status = 0; 549 ars_start->scrub_time = 1; 550 post_ars_status(ars_state, &t->badrange, ars_start->address, 551 ars_start->length); 552 *cmd_rc = 0; 553 } 554 spin_unlock(&ars_state->lock); 555 556 return 0; 557 } 558 559 static int nfit_test_cmd_ars_status(struct ars_state *ars_state, 560 struct nd_cmd_ars_status *ars_status, unsigned int buf_len, 561 int *cmd_rc) 562 { 563 if (buf_len < ars_state->ars_status->out_length) 564 return -EINVAL; 565 566 spin_lock(&ars_state->lock); 567 if (time_before(jiffies, ars_state->deadline)) { 568 memset(ars_status, 0, buf_len); 569 ars_status->status = NFIT_ARS_STATUS_BUSY; 570 ars_status->out_length = sizeof(*ars_status); 571 *cmd_rc = -EBUSY; 572 } else { 573 memcpy(ars_status, ars_state->ars_status, 574 ars_state->ars_status->out_length); 575 *cmd_rc = 0; 576 } 577 spin_unlock(&ars_state->lock); 578 return 0; 579 } 580 581 static int nfit_test_cmd_clear_error(struct nfit_test *t, 582 struct nd_cmd_clear_error *clear_err, 583 unsigned int buf_len, int *cmd_rc) 584 { 585 const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1; 586 if (buf_len < sizeof(*clear_err)) 587 return -EINVAL; 588 589 if ((clear_err->address & mask) || (clear_err->length & mask)) 590 return -EINVAL; 591 592 badrange_forget(&t->badrange, clear_err->address, clear_err->length); 593 clear_err->status = 0; 594 clear_err->cleared = clear_err->length; 595 *cmd_rc = 0; 596 return 0; 597 } 598 599 struct region_search_spa { 600 u64 addr; 601 struct nd_region *region; 602 }; 603 604 static int is_region_device(struct device *dev) 605 { 606 return !strncmp(dev->kobj.name, "region", 6); 607 } 608 609 static int nfit_test_search_region_spa(struct device *dev, void *data) 610 { 611 struct region_search_spa *ctx = data; 612 struct nd_region *nd_region; 613 resource_size_t ndr_end; 614 615 if (!is_region_device(dev)) 616 return 0; 617 618 nd_region = to_nd_region(dev); 619 ndr_end = nd_region->ndr_start + nd_region->ndr_size; 620 621 if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) { 622 ctx->region = nd_region; 623 return 1; 624 } 625 626 return 0; 627 } 628 629 static int nfit_test_search_spa(struct nvdimm_bus *bus, 630 struct nd_cmd_translate_spa *spa) 631 { 632 int ret; 633 struct nd_region *nd_region = NULL; 634 struct nvdimm *nvdimm = NULL; 635 struct nd_mapping *nd_mapping = NULL; 636 struct region_search_spa ctx = { 637 .addr = spa->spa, 638 .region = NULL, 639 }; 640 u64 dpa; 641 642 ret = device_for_each_child(&bus->dev, &ctx, 643 nfit_test_search_region_spa); 644 645 if (!ret) 646 return -ENODEV; 647 648 nd_region = ctx.region; 649 650 dpa = ctx.addr - nd_region->ndr_start; 651 652 /* 653 * last dimm is selected for test 654 */ 655 nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1]; 656 nvdimm = nd_mapping->nvdimm; 657 658 spa->devices[0].nfit_device_handle = handle[nvdimm->id]; 659 spa->num_nvdimms = 1; 660 spa->devices[0].dpa = dpa; 661 662 return 0; 663 } 664 665 static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus, 666 struct nd_cmd_translate_spa *spa, unsigned int buf_len) 667 { 668 if (buf_len < spa->translate_length) 669 return -EINVAL; 670 671 if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms) 672 spa->status = 2; 673 674 return 0; 675 } 676 677 static int nfit_test_cmd_smart(struct nd_intel_smart *smart, unsigned int buf_len, 678 struct nd_intel_smart *smart_data) 679 { 680 if (buf_len < sizeof(*smart)) 681 return -EINVAL; 682 memcpy(smart, smart_data, sizeof(*smart)); 683 return 0; 684 } 685 686 static int nfit_test_cmd_smart_threshold( 687 struct nd_intel_smart_threshold *out, 688 unsigned int buf_len, 689 struct nd_intel_smart_threshold *smart_t) 690 { 691 if (buf_len < sizeof(*smart_t)) 692 return -EINVAL; 693 memcpy(out, smart_t, sizeof(*smart_t)); 694 return 0; 695 } 696 697 static void smart_notify(struct device *bus_dev, 698 struct device *dimm_dev, struct nd_intel_smart *smart, 699 struct nd_intel_smart_threshold *thresh) 700 { 701 dev_dbg(dimm_dev, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n", 702 __func__, thresh->alarm_control, thresh->spares, 703 smart->spares, thresh->media_temperature, 704 smart->media_temperature, thresh->ctrl_temperature, 705 smart->ctrl_temperature); 706 if (((thresh->alarm_control & ND_INTEL_SMART_SPARE_TRIP) 707 && smart->spares 708 <= thresh->spares) 709 || ((thresh->alarm_control & ND_INTEL_SMART_TEMP_TRIP) 710 && smart->media_temperature 711 >= thresh->media_temperature) 712 || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP) 713 && smart->ctrl_temperature 714 >= thresh->ctrl_temperature) 715 || (smart->health != ND_INTEL_SMART_NON_CRITICAL_HEALTH) 716 || (smart->shutdown_state != 0)) { 717 device_lock(bus_dev); 718 __acpi_nvdimm_notify(dimm_dev, 0x81); 719 device_unlock(bus_dev); 720 } 721 } 722 723 static int nfit_test_cmd_smart_set_threshold( 724 struct nd_intel_smart_set_threshold *in, 725 unsigned int buf_len, 726 struct nd_intel_smart_threshold *thresh, 727 struct nd_intel_smart *smart, 728 struct device *bus_dev, struct device *dimm_dev) 729 { 730 unsigned int size; 731 732 size = sizeof(*in) - 4; 733 if (buf_len < size) 734 return -EINVAL; 735 memcpy(thresh->data, in, size); 736 in->status = 0; 737 smart_notify(bus_dev, dimm_dev, smart, thresh); 738 739 return 0; 740 } 741 742 static int nfit_test_cmd_smart_inject( 743 struct nd_intel_smart_inject *inj, 744 unsigned int buf_len, 745 struct nd_intel_smart_threshold *thresh, 746 struct nd_intel_smart *smart, 747 struct device *bus_dev, struct device *dimm_dev) 748 { 749 if (buf_len != sizeof(*inj)) 750 return -EINVAL; 751 752 if (inj->mtemp_enable) 753 smart->media_temperature = inj->media_temperature; 754 if (inj->spare_enable) 755 smart->spares = inj->spares; 756 if (inj->fatal_enable) 757 smart->health = ND_INTEL_SMART_FATAL_HEALTH; 758 if (inj->unsafe_shutdown_enable) { 759 smart->shutdown_state = 1; 760 smart->shutdown_count++; 761 } 762 inj->status = 0; 763 smart_notify(bus_dev, dimm_dev, smart, thresh); 764 765 return 0; 766 } 767 768 static void uc_error_notify(struct work_struct *work) 769 { 770 struct nfit_test *t = container_of(work, typeof(*t), work); 771 772 __acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR); 773 } 774 775 static int nfit_test_cmd_ars_error_inject(struct nfit_test *t, 776 struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len) 777 { 778 int rc; 779 780 if (buf_len != sizeof(*err_inj)) { 781 rc = -EINVAL; 782 goto err; 783 } 784 785 if (err_inj->err_inj_spa_range_length <= 0) { 786 rc = -EINVAL; 787 goto err; 788 } 789 790 rc = badrange_add(&t->badrange, err_inj->err_inj_spa_range_base, 791 err_inj->err_inj_spa_range_length); 792 if (rc < 0) 793 goto err; 794 795 if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY)) 796 queue_work(nfit_wq, &t->work); 797 798 err_inj->status = 0; 799 return 0; 800 801 err: 802 err_inj->status = NFIT_ARS_INJECT_INVALID; 803 return rc; 804 } 805 806 static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t, 807 struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len) 808 { 809 int rc; 810 811 if (buf_len != sizeof(*err_clr)) { 812 rc = -EINVAL; 813 goto err; 814 } 815 816 if (err_clr->err_inj_clr_spa_range_length <= 0) { 817 rc = -EINVAL; 818 goto err; 819 } 820 821 badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base, 822 err_clr->err_inj_clr_spa_range_length); 823 824 err_clr->status = 0; 825 return 0; 826 827 err: 828 err_clr->status = NFIT_ARS_INJECT_INVALID; 829 return rc; 830 } 831 832 static int nfit_test_cmd_ars_inject_status(struct nfit_test *t, 833 struct nd_cmd_ars_err_inj_stat *err_stat, 834 unsigned int buf_len) 835 { 836 struct badrange_entry *be; 837 int max = SZ_4K / sizeof(struct nd_error_stat_query_record); 838 int i = 0; 839 840 err_stat->status = 0; 841 spin_lock(&t->badrange.lock); 842 list_for_each_entry(be, &t->badrange.list, list) { 843 err_stat->record[i].err_inj_stat_spa_range_base = be->start; 844 err_stat->record[i].err_inj_stat_spa_range_length = be->length; 845 i++; 846 if (i > max) 847 break; 848 } 849 spin_unlock(&t->badrange.lock); 850 err_stat->inj_err_rec_count = i; 851 852 return 0; 853 } 854 855 static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t, 856 struct nd_intel_lss *nd_cmd, unsigned int buf_len) 857 { 858 struct device *dev = &t->pdev.dev; 859 860 if (buf_len < sizeof(*nd_cmd)) 861 return -EINVAL; 862 863 switch (nd_cmd->enable) { 864 case 0: 865 nd_cmd->status = 0; 866 dev_dbg(dev, "%s: Latch System Shutdown Status disabled\n", 867 __func__); 868 break; 869 case 1: 870 nd_cmd->status = 0; 871 dev_dbg(dev, "%s: Latch System Shutdown Status enabled\n", 872 __func__); 873 break; 874 default: 875 dev_warn(dev, "Unknown enable value: %#x\n", nd_cmd->enable); 876 nd_cmd->status = 0x3; 877 break; 878 } 879 880 881 return 0; 882 } 883 884 static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) 885 { 886 int i; 887 888 /* lookup per-dimm data */ 889 for (i = 0; i < ARRAY_SIZE(handle); i++) 890 if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i]) 891 break; 892 if (i >= ARRAY_SIZE(handle)) 893 return -ENXIO; 894 895 if ((1 << func) & dimm_fail_cmd_flags[i]) 896 return -EIO; 897 898 return i; 899 } 900 901 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, 902 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 903 unsigned int buf_len, int *cmd_rc) 904 { 905 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 906 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); 907 unsigned int func = cmd; 908 int i, rc = 0, __cmd_rc; 909 910 if (!cmd_rc) 911 cmd_rc = &__cmd_rc; 912 *cmd_rc = 0; 913 914 if (nvdimm) { 915 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 916 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); 917 918 if (!nfit_mem) 919 return -ENOTTY; 920 921 if (cmd == ND_CMD_CALL) { 922 struct nd_cmd_pkg *call_pkg = buf; 923 924 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out; 925 buf = (void *) call_pkg->nd_payload; 926 func = call_pkg->nd_command; 927 if (call_pkg->nd_family != nfit_mem->family) 928 return -ENOTTY; 929 930 i = get_dimm(nfit_mem, func); 931 if (i < 0) 932 return i; 933 934 switch (func) { 935 case ND_INTEL_ENABLE_LSS_STATUS: 936 return nd_intel_test_cmd_set_lss_status(t, 937 buf, buf_len); 938 case ND_INTEL_FW_GET_INFO: 939 return nd_intel_test_get_fw_info(t, buf, 940 buf_len, i - t->dcr_idx); 941 case ND_INTEL_FW_START_UPDATE: 942 return nd_intel_test_start_update(t, buf, 943 buf_len, i - t->dcr_idx); 944 case ND_INTEL_FW_SEND_DATA: 945 return nd_intel_test_send_data(t, buf, 946 buf_len, i - t->dcr_idx); 947 case ND_INTEL_FW_FINISH_UPDATE: 948 return nd_intel_test_finish_fw(t, buf, 949 buf_len, i - t->dcr_idx); 950 case ND_INTEL_FW_FINISH_QUERY: 951 return nd_intel_test_finish_query(t, buf, 952 buf_len, i - t->dcr_idx); 953 case ND_INTEL_SMART: 954 return nfit_test_cmd_smart(buf, buf_len, 955 &t->smart[i - t->dcr_idx]); 956 case ND_INTEL_SMART_THRESHOLD: 957 return nfit_test_cmd_smart_threshold(buf, 958 buf_len, 959 &t->smart_threshold[i - 960 t->dcr_idx]); 961 case ND_INTEL_SMART_SET_THRESHOLD: 962 return nfit_test_cmd_smart_set_threshold(buf, 963 buf_len, 964 &t->smart_threshold[i - 965 t->dcr_idx], 966 &t->smart[i - t->dcr_idx], 967 &t->pdev.dev, t->dimm_dev[i]); 968 case ND_INTEL_SMART_INJECT: 969 return nfit_test_cmd_smart_inject(buf, 970 buf_len, 971 &t->smart_threshold[i - 972 t->dcr_idx], 973 &t->smart[i - t->dcr_idx], 974 &t->pdev.dev, t->dimm_dev[i]); 975 default: 976 return -ENOTTY; 977 } 978 } 979 980 if (!test_bit(cmd, &cmd_mask) 981 || !test_bit(func, &nfit_mem->dsm_mask)) 982 return -ENOTTY; 983 984 i = get_dimm(nfit_mem, func); 985 if (i < 0) 986 return i; 987 988 switch (func) { 989 case ND_CMD_GET_CONFIG_SIZE: 990 rc = nfit_test_cmd_get_config_size(buf, buf_len); 991 break; 992 case ND_CMD_GET_CONFIG_DATA: 993 rc = nfit_test_cmd_get_config_data(buf, buf_len, 994 t->label[i - t->dcr_idx]); 995 break; 996 case ND_CMD_SET_CONFIG_DATA: 997 rc = nfit_test_cmd_set_config_data(buf, buf_len, 998 t->label[i - t->dcr_idx]); 999 break; 1000 default: 1001 return -ENOTTY; 1002 } 1003 } else { 1004 struct ars_state *ars_state = &t->ars_state; 1005 struct nd_cmd_pkg *call_pkg = buf; 1006 1007 if (!nd_desc) 1008 return -ENOTTY; 1009 1010 if (cmd == ND_CMD_CALL) { 1011 func = call_pkg->nd_command; 1012 1013 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out; 1014 buf = (void *) call_pkg->nd_payload; 1015 1016 switch (func) { 1017 case NFIT_CMD_TRANSLATE_SPA: 1018 rc = nfit_test_cmd_translate_spa( 1019 acpi_desc->nvdimm_bus, buf, buf_len); 1020 return rc; 1021 case NFIT_CMD_ARS_INJECT_SET: 1022 rc = nfit_test_cmd_ars_error_inject(t, buf, 1023 buf_len); 1024 return rc; 1025 case NFIT_CMD_ARS_INJECT_CLEAR: 1026 rc = nfit_test_cmd_ars_inject_clear(t, buf, 1027 buf_len); 1028 return rc; 1029 case NFIT_CMD_ARS_INJECT_GET: 1030 rc = nfit_test_cmd_ars_inject_status(t, buf, 1031 buf_len); 1032 return rc; 1033 default: 1034 return -ENOTTY; 1035 } 1036 } 1037 1038 if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask)) 1039 return -ENOTTY; 1040 1041 switch (func) { 1042 case ND_CMD_ARS_CAP: 1043 rc = nfit_test_cmd_ars_cap(buf, buf_len); 1044 break; 1045 case ND_CMD_ARS_START: 1046 rc = nfit_test_cmd_ars_start(t, ars_state, buf, 1047 buf_len, cmd_rc); 1048 break; 1049 case ND_CMD_ARS_STATUS: 1050 rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len, 1051 cmd_rc); 1052 break; 1053 case ND_CMD_CLEAR_ERROR: 1054 rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc); 1055 break; 1056 default: 1057 return -ENOTTY; 1058 } 1059 } 1060 1061 return rc; 1062 } 1063 1064 static DEFINE_SPINLOCK(nfit_test_lock); 1065 static struct nfit_test *instances[NUM_NFITS]; 1066 1067 static void release_nfit_res(void *data) 1068 { 1069 struct nfit_test_resource *nfit_res = data; 1070 1071 spin_lock(&nfit_test_lock); 1072 list_del(&nfit_res->list); 1073 spin_unlock(&nfit_test_lock); 1074 1075 vfree(nfit_res->buf); 1076 kfree(nfit_res); 1077 } 1078 1079 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, 1080 void *buf) 1081 { 1082 struct device *dev = &t->pdev.dev; 1083 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), 1084 GFP_KERNEL); 1085 int rc; 1086 1087 if (!buf || !nfit_res) 1088 goto err; 1089 rc = devm_add_action(dev, release_nfit_res, nfit_res); 1090 if (rc) 1091 goto err; 1092 INIT_LIST_HEAD(&nfit_res->list); 1093 memset(buf, 0, size); 1094 nfit_res->dev = dev; 1095 nfit_res->buf = buf; 1096 nfit_res->res.start = *dma; 1097 nfit_res->res.end = *dma + size - 1; 1098 nfit_res->res.name = "NFIT"; 1099 spin_lock_init(&nfit_res->lock); 1100 INIT_LIST_HEAD(&nfit_res->requests); 1101 spin_lock(&nfit_test_lock); 1102 list_add(&nfit_res->list, &t->resources); 1103 spin_unlock(&nfit_test_lock); 1104 1105 return nfit_res->buf; 1106 err: 1107 if (buf) 1108 vfree(buf); 1109 kfree(nfit_res); 1110 return NULL; 1111 } 1112 1113 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) 1114 { 1115 void *buf = vmalloc(size); 1116 1117 *dma = (unsigned long) buf; 1118 return __test_alloc(t, size, dma, buf); 1119 } 1120 1121 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) 1122 { 1123 int i; 1124 1125 for (i = 0; i < ARRAY_SIZE(instances); i++) { 1126 struct nfit_test_resource *n, *nfit_res = NULL; 1127 struct nfit_test *t = instances[i]; 1128 1129 if (!t) 1130 continue; 1131 spin_lock(&nfit_test_lock); 1132 list_for_each_entry(n, &t->resources, list) { 1133 if (addr >= n->res.start && (addr < n->res.start 1134 + resource_size(&n->res))) { 1135 nfit_res = n; 1136 break; 1137 } else if (addr >= (unsigned long) n->buf 1138 && (addr < (unsigned long) n->buf 1139 + resource_size(&n->res))) { 1140 nfit_res = n; 1141 break; 1142 } 1143 } 1144 spin_unlock(&nfit_test_lock); 1145 if (nfit_res) 1146 return nfit_res; 1147 } 1148 1149 return NULL; 1150 } 1151 1152 static int ars_state_init(struct device *dev, struct ars_state *ars_state) 1153 { 1154 /* for testing, only store up to n records that fit within 4k */ 1155 ars_state->ars_status = devm_kzalloc(dev, 1156 sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL); 1157 if (!ars_state->ars_status) 1158 return -ENOMEM; 1159 spin_lock_init(&ars_state->lock); 1160 return 0; 1161 } 1162 1163 static void put_dimms(void *data) 1164 { 1165 struct device **dimm_dev = data; 1166 int i; 1167 1168 for (i = 0; i < NUM_DCR; i++) 1169 if (dimm_dev[i]) 1170 device_unregister(dimm_dev[i]); 1171 } 1172 1173 static struct class *nfit_test_dimm; 1174 1175 static int dimm_name_to_id(struct device *dev) 1176 { 1177 int dimm; 1178 1179 if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 1180 || dimm >= NUM_DCR || dimm < 0) 1181 return -ENXIO; 1182 return dimm; 1183 } 1184 1185 1186 static ssize_t handle_show(struct device *dev, struct device_attribute *attr, 1187 char *buf) 1188 { 1189 int dimm = dimm_name_to_id(dev); 1190 1191 if (dimm < 0) 1192 return dimm; 1193 1194 return sprintf(buf, "%#x", handle[dimm]); 1195 } 1196 DEVICE_ATTR_RO(handle); 1197 1198 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr, 1199 char *buf) 1200 { 1201 int dimm = dimm_name_to_id(dev); 1202 1203 if (dimm < 0) 1204 return dimm; 1205 1206 return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]); 1207 } 1208 1209 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, 1210 const char *buf, size_t size) 1211 { 1212 int dimm = dimm_name_to_id(dev); 1213 unsigned long val; 1214 ssize_t rc; 1215 1216 if (dimm < 0) 1217 return dimm; 1218 1219 rc = kstrtol(buf, 0, &val); 1220 if (rc) 1221 return rc; 1222 1223 dimm_fail_cmd_flags[dimm] = val; 1224 return size; 1225 } 1226 static DEVICE_ATTR_RW(fail_cmd); 1227 1228 static struct attribute *nfit_test_dimm_attributes[] = { 1229 &dev_attr_fail_cmd.attr, 1230 &dev_attr_handle.attr, 1231 NULL, 1232 }; 1233 1234 static struct attribute_group nfit_test_dimm_attribute_group = { 1235 .attrs = nfit_test_dimm_attributes, 1236 }; 1237 1238 static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { 1239 &nfit_test_dimm_attribute_group, 1240 NULL, 1241 }; 1242 1243 static void smart_init(struct nfit_test *t) 1244 { 1245 int i; 1246 const struct nd_intel_smart_threshold smart_t_data = { 1247 .alarm_control = ND_INTEL_SMART_SPARE_TRIP 1248 | ND_INTEL_SMART_TEMP_TRIP, 1249 .media_temperature = 40 * 16, 1250 .ctrl_temperature = 30 * 16, 1251 .spares = 5, 1252 }; 1253 const struct nd_intel_smart smart_data = { 1254 .flags = ND_INTEL_SMART_HEALTH_VALID 1255 | ND_INTEL_SMART_SPARES_VALID 1256 | ND_INTEL_SMART_ALARM_VALID 1257 | ND_INTEL_SMART_USED_VALID 1258 | ND_INTEL_SMART_SHUTDOWN_VALID 1259 | ND_INTEL_SMART_MTEMP_VALID, 1260 .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH, 1261 .media_temperature = 23 * 16, 1262 .ctrl_temperature = 25 * 16, 1263 .pmic_temperature = 40 * 16, 1264 .spares = 75, 1265 .alarm_flags = ND_INTEL_SMART_SPARE_TRIP 1266 | ND_INTEL_SMART_TEMP_TRIP, 1267 .ait_status = 1, 1268 .life_used = 5, 1269 .shutdown_state = 0, 1270 .vendor_size = 0, 1271 .shutdown_count = 100, 1272 }; 1273 1274 for (i = 0; i < t->num_dcr; i++) { 1275 memcpy(&t->smart[i], &smart_data, sizeof(smart_data)); 1276 memcpy(&t->smart_threshold[i], &smart_t_data, 1277 sizeof(smart_t_data)); 1278 } 1279 } 1280 1281 static int nfit_test0_alloc(struct nfit_test *t) 1282 { 1283 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA 1284 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 1285 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 1286 + offsetof(struct acpi_nfit_control_region, 1287 window_size) * NUM_DCR 1288 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 1289 + (sizeof(struct acpi_nfit_flush_address) 1290 + sizeof(u64) * NUM_HINTS) * NUM_DCR 1291 + sizeof(struct acpi_nfit_capabilities); 1292 int i; 1293 1294 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 1295 if (!t->nfit_buf) 1296 return -ENOMEM; 1297 t->nfit_size = nfit_size; 1298 1299 t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]); 1300 if (!t->spa_set[0]) 1301 return -ENOMEM; 1302 1303 t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]); 1304 if (!t->spa_set[1]) 1305 return -ENOMEM; 1306 1307 t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]); 1308 if (!t->spa_set[2]) 1309 return -ENOMEM; 1310 1311 for (i = 0; i < t->num_dcr; i++) { 1312 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]); 1313 if (!t->dimm[i]) 1314 return -ENOMEM; 1315 1316 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 1317 if (!t->label[i]) 1318 return -ENOMEM; 1319 sprintf(t->label[i], "label%d", i); 1320 1321 t->flush[i] = test_alloc(t, max(PAGE_SIZE, 1322 sizeof(u64) * NUM_HINTS), 1323 &t->flush_dma[i]); 1324 if (!t->flush[i]) 1325 return -ENOMEM; 1326 } 1327 1328 for (i = 0; i < t->num_dcr; i++) { 1329 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]); 1330 if (!t->dcr[i]) 1331 return -ENOMEM; 1332 } 1333 1334 t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma); 1335 if (!t->_fit) 1336 return -ENOMEM; 1337 1338 if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) 1339 return -ENOMEM; 1340 for (i = 0; i < NUM_DCR; i++) { 1341 t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, 1342 &t->pdev.dev, 0, NULL, 1343 nfit_test_dimm_attribute_groups, 1344 "test_dimm%d", i); 1345 if (!t->dimm_dev[i]) 1346 return -ENOMEM; 1347 } 1348 1349 smart_init(t); 1350 return ars_state_init(&t->pdev.dev, &t->ars_state); 1351 } 1352 1353 static int nfit_test1_alloc(struct nfit_test *t) 1354 { 1355 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2 1356 + sizeof(struct acpi_nfit_memory_map) * 2 1357 + offsetof(struct acpi_nfit_control_region, window_size) * 2; 1358 int i; 1359 1360 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 1361 if (!t->nfit_buf) 1362 return -ENOMEM; 1363 t->nfit_size = nfit_size; 1364 1365 t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]); 1366 if (!t->spa_set[0]) 1367 return -ENOMEM; 1368 1369 for (i = 0; i < t->num_dcr; i++) { 1370 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 1371 if (!t->label[i]) 1372 return -ENOMEM; 1373 sprintf(t->label[i], "label%d", i); 1374 } 1375 1376 t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]); 1377 if (!t->spa_set[1]) 1378 return -ENOMEM; 1379 1380 smart_init(t); 1381 return ars_state_init(&t->pdev.dev, &t->ars_state); 1382 } 1383 1384 static void dcr_common_init(struct acpi_nfit_control_region *dcr) 1385 { 1386 dcr->vendor_id = 0xabcd; 1387 dcr->device_id = 0; 1388 dcr->revision_id = 1; 1389 dcr->valid_fields = 1; 1390 dcr->manufacturing_location = 0xa; 1391 dcr->manufacturing_date = cpu_to_be16(2016); 1392 } 1393 1394 static void nfit_test0_setup(struct nfit_test *t) 1395 { 1396 const int flush_hint_size = sizeof(struct acpi_nfit_flush_address) 1397 + (sizeof(u64) * NUM_HINTS); 1398 struct acpi_nfit_desc *acpi_desc; 1399 struct acpi_nfit_memory_map *memdev; 1400 void *nfit_buf = t->nfit_buf; 1401 struct acpi_nfit_system_address *spa; 1402 struct acpi_nfit_control_region *dcr; 1403 struct acpi_nfit_data_region *bdw; 1404 struct acpi_nfit_flush_address *flush; 1405 struct acpi_nfit_capabilities *pcap; 1406 unsigned int offset = 0, i; 1407 1408 /* 1409 * spa0 (interleave first half of dimm0 and dimm1, note storage 1410 * does not actually alias the related block-data-window 1411 * regions) 1412 */ 1413 spa = nfit_buf; 1414 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1415 spa->header.length = sizeof(*spa); 1416 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 1417 spa->range_index = 0+1; 1418 spa->address = t->spa_set_dma[0]; 1419 spa->length = SPA0_SIZE; 1420 offset += spa->header.length; 1421 1422 /* 1423 * spa1 (interleave last half of the 4 DIMMS, note storage 1424 * does not actually alias the related block-data-window 1425 * regions) 1426 */ 1427 spa = nfit_buf + offset; 1428 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1429 spa->header.length = sizeof(*spa); 1430 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 1431 spa->range_index = 1+1; 1432 spa->address = t->spa_set_dma[1]; 1433 spa->length = SPA1_SIZE; 1434 offset += spa->header.length; 1435 1436 /* spa2 (dcr0) dimm0 */ 1437 spa = nfit_buf + offset; 1438 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1439 spa->header.length = sizeof(*spa); 1440 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1441 spa->range_index = 2+1; 1442 spa->address = t->dcr_dma[0]; 1443 spa->length = DCR_SIZE; 1444 offset += spa->header.length; 1445 1446 /* spa3 (dcr1) dimm1 */ 1447 spa = nfit_buf + offset; 1448 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1449 spa->header.length = sizeof(*spa); 1450 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1451 spa->range_index = 3+1; 1452 spa->address = t->dcr_dma[1]; 1453 spa->length = DCR_SIZE; 1454 offset += spa->header.length; 1455 1456 /* spa4 (dcr2) dimm2 */ 1457 spa = nfit_buf + offset; 1458 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1459 spa->header.length = sizeof(*spa); 1460 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1461 spa->range_index = 4+1; 1462 spa->address = t->dcr_dma[2]; 1463 spa->length = DCR_SIZE; 1464 offset += spa->header.length; 1465 1466 /* spa5 (dcr3) dimm3 */ 1467 spa = nfit_buf + offset; 1468 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1469 spa->header.length = sizeof(*spa); 1470 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1471 spa->range_index = 5+1; 1472 spa->address = t->dcr_dma[3]; 1473 spa->length = DCR_SIZE; 1474 offset += spa->header.length; 1475 1476 /* spa6 (bdw for dcr0) dimm0 */ 1477 spa = nfit_buf + offset; 1478 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1479 spa->header.length = sizeof(*spa); 1480 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1481 spa->range_index = 6+1; 1482 spa->address = t->dimm_dma[0]; 1483 spa->length = DIMM_SIZE; 1484 offset += spa->header.length; 1485 1486 /* spa7 (bdw for dcr1) dimm1 */ 1487 spa = nfit_buf + offset; 1488 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1489 spa->header.length = sizeof(*spa); 1490 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1491 spa->range_index = 7+1; 1492 spa->address = t->dimm_dma[1]; 1493 spa->length = DIMM_SIZE; 1494 offset += spa->header.length; 1495 1496 /* spa8 (bdw for dcr2) dimm2 */ 1497 spa = nfit_buf + offset; 1498 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1499 spa->header.length = sizeof(*spa); 1500 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1501 spa->range_index = 8+1; 1502 spa->address = t->dimm_dma[2]; 1503 spa->length = DIMM_SIZE; 1504 offset += spa->header.length; 1505 1506 /* spa9 (bdw for dcr3) dimm3 */ 1507 spa = nfit_buf + offset; 1508 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1509 spa->header.length = sizeof(*spa); 1510 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1511 spa->range_index = 9+1; 1512 spa->address = t->dimm_dma[3]; 1513 spa->length = DIMM_SIZE; 1514 offset += spa->header.length; 1515 1516 /* mem-region0 (spa0, dimm0) */ 1517 memdev = nfit_buf + offset; 1518 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1519 memdev->header.length = sizeof(*memdev); 1520 memdev->device_handle = handle[0]; 1521 memdev->physical_id = 0; 1522 memdev->region_id = 0; 1523 memdev->range_index = 0+1; 1524 memdev->region_index = 4+1; 1525 memdev->region_size = SPA0_SIZE/2; 1526 memdev->region_offset = 1; 1527 memdev->address = 0; 1528 memdev->interleave_index = 0; 1529 memdev->interleave_ways = 2; 1530 offset += memdev->header.length; 1531 1532 /* mem-region1 (spa0, dimm1) */ 1533 memdev = nfit_buf + offset; 1534 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1535 memdev->header.length = sizeof(*memdev); 1536 memdev->device_handle = handle[1]; 1537 memdev->physical_id = 1; 1538 memdev->region_id = 0; 1539 memdev->range_index = 0+1; 1540 memdev->region_index = 5+1; 1541 memdev->region_size = SPA0_SIZE/2; 1542 memdev->region_offset = (1 << 8); 1543 memdev->address = 0; 1544 memdev->interleave_index = 0; 1545 memdev->interleave_ways = 2; 1546 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1547 offset += memdev->header.length; 1548 1549 /* mem-region2 (spa1, dimm0) */ 1550 memdev = nfit_buf + offset; 1551 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1552 memdev->header.length = sizeof(*memdev); 1553 memdev->device_handle = handle[0]; 1554 memdev->physical_id = 0; 1555 memdev->region_id = 1; 1556 memdev->range_index = 1+1; 1557 memdev->region_index = 4+1; 1558 memdev->region_size = SPA1_SIZE/4; 1559 memdev->region_offset = (1 << 16); 1560 memdev->address = SPA0_SIZE/2; 1561 memdev->interleave_index = 0; 1562 memdev->interleave_ways = 4; 1563 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1564 offset += memdev->header.length; 1565 1566 /* mem-region3 (spa1, dimm1) */ 1567 memdev = nfit_buf + offset; 1568 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1569 memdev->header.length = sizeof(*memdev); 1570 memdev->device_handle = handle[1]; 1571 memdev->physical_id = 1; 1572 memdev->region_id = 1; 1573 memdev->range_index = 1+1; 1574 memdev->region_index = 5+1; 1575 memdev->region_size = SPA1_SIZE/4; 1576 memdev->region_offset = (1 << 24); 1577 memdev->address = SPA0_SIZE/2; 1578 memdev->interleave_index = 0; 1579 memdev->interleave_ways = 4; 1580 offset += memdev->header.length; 1581 1582 /* mem-region4 (spa1, dimm2) */ 1583 memdev = nfit_buf + offset; 1584 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1585 memdev->header.length = sizeof(*memdev); 1586 memdev->device_handle = handle[2]; 1587 memdev->physical_id = 2; 1588 memdev->region_id = 0; 1589 memdev->range_index = 1+1; 1590 memdev->region_index = 6+1; 1591 memdev->region_size = SPA1_SIZE/4; 1592 memdev->region_offset = (1ULL << 32); 1593 memdev->address = SPA0_SIZE/2; 1594 memdev->interleave_index = 0; 1595 memdev->interleave_ways = 4; 1596 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1597 offset += memdev->header.length; 1598 1599 /* mem-region5 (spa1, dimm3) */ 1600 memdev = nfit_buf + offset; 1601 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1602 memdev->header.length = sizeof(*memdev); 1603 memdev->device_handle = handle[3]; 1604 memdev->physical_id = 3; 1605 memdev->region_id = 0; 1606 memdev->range_index = 1+1; 1607 memdev->region_index = 7+1; 1608 memdev->region_size = SPA1_SIZE/4; 1609 memdev->region_offset = (1ULL << 40); 1610 memdev->address = SPA0_SIZE/2; 1611 memdev->interleave_index = 0; 1612 memdev->interleave_ways = 4; 1613 offset += memdev->header.length; 1614 1615 /* mem-region6 (spa/dcr0, dimm0) */ 1616 memdev = nfit_buf + offset; 1617 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1618 memdev->header.length = sizeof(*memdev); 1619 memdev->device_handle = handle[0]; 1620 memdev->physical_id = 0; 1621 memdev->region_id = 0; 1622 memdev->range_index = 2+1; 1623 memdev->region_index = 0+1; 1624 memdev->region_size = 0; 1625 memdev->region_offset = 0; 1626 memdev->address = 0; 1627 memdev->interleave_index = 0; 1628 memdev->interleave_ways = 1; 1629 offset += memdev->header.length; 1630 1631 /* mem-region7 (spa/dcr1, dimm1) */ 1632 memdev = nfit_buf + offset; 1633 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1634 memdev->header.length = sizeof(*memdev); 1635 memdev->device_handle = handle[1]; 1636 memdev->physical_id = 1; 1637 memdev->region_id = 0; 1638 memdev->range_index = 3+1; 1639 memdev->region_index = 1+1; 1640 memdev->region_size = 0; 1641 memdev->region_offset = 0; 1642 memdev->address = 0; 1643 memdev->interleave_index = 0; 1644 memdev->interleave_ways = 1; 1645 offset += memdev->header.length; 1646 1647 /* mem-region8 (spa/dcr2, dimm2) */ 1648 memdev = nfit_buf + offset; 1649 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1650 memdev->header.length = sizeof(*memdev); 1651 memdev->device_handle = handle[2]; 1652 memdev->physical_id = 2; 1653 memdev->region_id = 0; 1654 memdev->range_index = 4+1; 1655 memdev->region_index = 2+1; 1656 memdev->region_size = 0; 1657 memdev->region_offset = 0; 1658 memdev->address = 0; 1659 memdev->interleave_index = 0; 1660 memdev->interleave_ways = 1; 1661 offset += memdev->header.length; 1662 1663 /* mem-region9 (spa/dcr3, dimm3) */ 1664 memdev = nfit_buf + offset; 1665 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1666 memdev->header.length = sizeof(*memdev); 1667 memdev->device_handle = handle[3]; 1668 memdev->physical_id = 3; 1669 memdev->region_id = 0; 1670 memdev->range_index = 5+1; 1671 memdev->region_index = 3+1; 1672 memdev->region_size = 0; 1673 memdev->region_offset = 0; 1674 memdev->address = 0; 1675 memdev->interleave_index = 0; 1676 memdev->interleave_ways = 1; 1677 offset += memdev->header.length; 1678 1679 /* mem-region10 (spa/bdw0, dimm0) */ 1680 memdev = nfit_buf + offset; 1681 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1682 memdev->header.length = sizeof(*memdev); 1683 memdev->device_handle = handle[0]; 1684 memdev->physical_id = 0; 1685 memdev->region_id = 0; 1686 memdev->range_index = 6+1; 1687 memdev->region_index = 0+1; 1688 memdev->region_size = 0; 1689 memdev->region_offset = 0; 1690 memdev->address = 0; 1691 memdev->interleave_index = 0; 1692 memdev->interleave_ways = 1; 1693 offset += memdev->header.length; 1694 1695 /* mem-region11 (spa/bdw1, dimm1) */ 1696 memdev = nfit_buf + offset; 1697 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1698 memdev->header.length = sizeof(*memdev); 1699 memdev->device_handle = handle[1]; 1700 memdev->physical_id = 1; 1701 memdev->region_id = 0; 1702 memdev->range_index = 7+1; 1703 memdev->region_index = 1+1; 1704 memdev->region_size = 0; 1705 memdev->region_offset = 0; 1706 memdev->address = 0; 1707 memdev->interleave_index = 0; 1708 memdev->interleave_ways = 1; 1709 offset += memdev->header.length; 1710 1711 /* mem-region12 (spa/bdw2, dimm2) */ 1712 memdev = nfit_buf + offset; 1713 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1714 memdev->header.length = sizeof(*memdev); 1715 memdev->device_handle = handle[2]; 1716 memdev->physical_id = 2; 1717 memdev->region_id = 0; 1718 memdev->range_index = 8+1; 1719 memdev->region_index = 2+1; 1720 memdev->region_size = 0; 1721 memdev->region_offset = 0; 1722 memdev->address = 0; 1723 memdev->interleave_index = 0; 1724 memdev->interleave_ways = 1; 1725 offset += memdev->header.length; 1726 1727 /* mem-region13 (spa/dcr3, dimm3) */ 1728 memdev = nfit_buf + offset; 1729 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1730 memdev->header.length = sizeof(*memdev); 1731 memdev->device_handle = handle[3]; 1732 memdev->physical_id = 3; 1733 memdev->region_id = 0; 1734 memdev->range_index = 9+1; 1735 memdev->region_index = 3+1; 1736 memdev->region_size = 0; 1737 memdev->region_offset = 0; 1738 memdev->address = 0; 1739 memdev->interleave_index = 0; 1740 memdev->interleave_ways = 1; 1741 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1742 offset += memdev->header.length; 1743 1744 /* dcr-descriptor0: blk */ 1745 dcr = nfit_buf + offset; 1746 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1747 dcr->header.length = sizeof(*dcr); 1748 dcr->region_index = 0+1; 1749 dcr_common_init(dcr); 1750 dcr->serial_number = ~handle[0]; 1751 dcr->code = NFIT_FIC_BLK; 1752 dcr->windows = 1; 1753 dcr->window_size = DCR_SIZE; 1754 dcr->command_offset = 0; 1755 dcr->command_size = 8; 1756 dcr->status_offset = 8; 1757 dcr->status_size = 4; 1758 offset += dcr->header.length; 1759 1760 /* dcr-descriptor1: blk */ 1761 dcr = nfit_buf + offset; 1762 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1763 dcr->header.length = sizeof(*dcr); 1764 dcr->region_index = 1+1; 1765 dcr_common_init(dcr); 1766 dcr->serial_number = ~handle[1]; 1767 dcr->code = NFIT_FIC_BLK; 1768 dcr->windows = 1; 1769 dcr->window_size = DCR_SIZE; 1770 dcr->command_offset = 0; 1771 dcr->command_size = 8; 1772 dcr->status_offset = 8; 1773 dcr->status_size = 4; 1774 offset += dcr->header.length; 1775 1776 /* dcr-descriptor2: blk */ 1777 dcr = nfit_buf + offset; 1778 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1779 dcr->header.length = sizeof(*dcr); 1780 dcr->region_index = 2+1; 1781 dcr_common_init(dcr); 1782 dcr->serial_number = ~handle[2]; 1783 dcr->code = NFIT_FIC_BLK; 1784 dcr->windows = 1; 1785 dcr->window_size = DCR_SIZE; 1786 dcr->command_offset = 0; 1787 dcr->command_size = 8; 1788 dcr->status_offset = 8; 1789 dcr->status_size = 4; 1790 offset += dcr->header.length; 1791 1792 /* dcr-descriptor3: blk */ 1793 dcr = nfit_buf + offset; 1794 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1795 dcr->header.length = sizeof(*dcr); 1796 dcr->region_index = 3+1; 1797 dcr_common_init(dcr); 1798 dcr->serial_number = ~handle[3]; 1799 dcr->code = NFIT_FIC_BLK; 1800 dcr->windows = 1; 1801 dcr->window_size = DCR_SIZE; 1802 dcr->command_offset = 0; 1803 dcr->command_size = 8; 1804 dcr->status_offset = 8; 1805 dcr->status_size = 4; 1806 offset += dcr->header.length; 1807 1808 /* dcr-descriptor0: pmem */ 1809 dcr = nfit_buf + offset; 1810 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1811 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1812 window_size); 1813 dcr->region_index = 4+1; 1814 dcr_common_init(dcr); 1815 dcr->serial_number = ~handle[0]; 1816 dcr->code = NFIT_FIC_BYTEN; 1817 dcr->windows = 0; 1818 offset += dcr->header.length; 1819 1820 /* dcr-descriptor1: pmem */ 1821 dcr = nfit_buf + offset; 1822 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1823 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1824 window_size); 1825 dcr->region_index = 5+1; 1826 dcr_common_init(dcr); 1827 dcr->serial_number = ~handle[1]; 1828 dcr->code = NFIT_FIC_BYTEN; 1829 dcr->windows = 0; 1830 offset += dcr->header.length; 1831 1832 /* dcr-descriptor2: pmem */ 1833 dcr = nfit_buf + offset; 1834 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1835 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1836 window_size); 1837 dcr->region_index = 6+1; 1838 dcr_common_init(dcr); 1839 dcr->serial_number = ~handle[2]; 1840 dcr->code = NFIT_FIC_BYTEN; 1841 dcr->windows = 0; 1842 offset += dcr->header.length; 1843 1844 /* dcr-descriptor3: pmem */ 1845 dcr = nfit_buf + offset; 1846 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1847 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1848 window_size); 1849 dcr->region_index = 7+1; 1850 dcr_common_init(dcr); 1851 dcr->serial_number = ~handle[3]; 1852 dcr->code = NFIT_FIC_BYTEN; 1853 dcr->windows = 0; 1854 offset += dcr->header.length; 1855 1856 /* bdw0 (spa/dcr0, dimm0) */ 1857 bdw = nfit_buf + offset; 1858 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1859 bdw->header.length = sizeof(*bdw); 1860 bdw->region_index = 0+1; 1861 bdw->windows = 1; 1862 bdw->offset = 0; 1863 bdw->size = BDW_SIZE; 1864 bdw->capacity = DIMM_SIZE; 1865 bdw->start_address = 0; 1866 offset += bdw->header.length; 1867 1868 /* bdw1 (spa/dcr1, dimm1) */ 1869 bdw = nfit_buf + offset; 1870 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1871 bdw->header.length = sizeof(*bdw); 1872 bdw->region_index = 1+1; 1873 bdw->windows = 1; 1874 bdw->offset = 0; 1875 bdw->size = BDW_SIZE; 1876 bdw->capacity = DIMM_SIZE; 1877 bdw->start_address = 0; 1878 offset += bdw->header.length; 1879 1880 /* bdw2 (spa/dcr2, dimm2) */ 1881 bdw = nfit_buf + offset; 1882 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1883 bdw->header.length = sizeof(*bdw); 1884 bdw->region_index = 2+1; 1885 bdw->windows = 1; 1886 bdw->offset = 0; 1887 bdw->size = BDW_SIZE; 1888 bdw->capacity = DIMM_SIZE; 1889 bdw->start_address = 0; 1890 offset += bdw->header.length; 1891 1892 /* bdw3 (spa/dcr3, dimm3) */ 1893 bdw = nfit_buf + offset; 1894 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1895 bdw->header.length = sizeof(*bdw); 1896 bdw->region_index = 3+1; 1897 bdw->windows = 1; 1898 bdw->offset = 0; 1899 bdw->size = BDW_SIZE; 1900 bdw->capacity = DIMM_SIZE; 1901 bdw->start_address = 0; 1902 offset += bdw->header.length; 1903 1904 /* flush0 (dimm0) */ 1905 flush = nfit_buf + offset; 1906 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1907 flush->header.length = flush_hint_size; 1908 flush->device_handle = handle[0]; 1909 flush->hint_count = NUM_HINTS; 1910 for (i = 0; i < NUM_HINTS; i++) 1911 flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64); 1912 offset += flush->header.length; 1913 1914 /* flush1 (dimm1) */ 1915 flush = nfit_buf + offset; 1916 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1917 flush->header.length = flush_hint_size; 1918 flush->device_handle = handle[1]; 1919 flush->hint_count = NUM_HINTS; 1920 for (i = 0; i < NUM_HINTS; i++) 1921 flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64); 1922 offset += flush->header.length; 1923 1924 /* flush2 (dimm2) */ 1925 flush = nfit_buf + offset; 1926 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1927 flush->header.length = flush_hint_size; 1928 flush->device_handle = handle[2]; 1929 flush->hint_count = NUM_HINTS; 1930 for (i = 0; i < NUM_HINTS; i++) 1931 flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64); 1932 offset += flush->header.length; 1933 1934 /* flush3 (dimm3) */ 1935 flush = nfit_buf + offset; 1936 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1937 flush->header.length = flush_hint_size; 1938 flush->device_handle = handle[3]; 1939 flush->hint_count = NUM_HINTS; 1940 for (i = 0; i < NUM_HINTS; i++) 1941 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64); 1942 offset += flush->header.length; 1943 1944 /* platform capabilities */ 1945 pcap = nfit_buf + offset; 1946 pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; 1947 pcap->header.length = sizeof(*pcap); 1948 pcap->highest_capability = 1; 1949 pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH | 1950 ACPI_NFIT_CAPABILITY_MEM_FLUSH; 1951 offset += pcap->header.length; 1952 1953 if (t->setup_hotplug) { 1954 /* dcr-descriptor4: blk */ 1955 dcr = nfit_buf + offset; 1956 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1957 dcr->header.length = sizeof(*dcr); 1958 dcr->region_index = 8+1; 1959 dcr_common_init(dcr); 1960 dcr->serial_number = ~handle[4]; 1961 dcr->code = NFIT_FIC_BLK; 1962 dcr->windows = 1; 1963 dcr->window_size = DCR_SIZE; 1964 dcr->command_offset = 0; 1965 dcr->command_size = 8; 1966 dcr->status_offset = 8; 1967 dcr->status_size = 4; 1968 offset += dcr->header.length; 1969 1970 /* dcr-descriptor4: pmem */ 1971 dcr = nfit_buf + offset; 1972 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1973 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1974 window_size); 1975 dcr->region_index = 9+1; 1976 dcr_common_init(dcr); 1977 dcr->serial_number = ~handle[4]; 1978 dcr->code = NFIT_FIC_BYTEN; 1979 dcr->windows = 0; 1980 offset += dcr->header.length; 1981 1982 /* bdw4 (spa/dcr4, dimm4) */ 1983 bdw = nfit_buf + offset; 1984 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1985 bdw->header.length = sizeof(*bdw); 1986 bdw->region_index = 8+1; 1987 bdw->windows = 1; 1988 bdw->offset = 0; 1989 bdw->size = BDW_SIZE; 1990 bdw->capacity = DIMM_SIZE; 1991 bdw->start_address = 0; 1992 offset += bdw->header.length; 1993 1994 /* spa10 (dcr4) dimm4 */ 1995 spa = nfit_buf + offset; 1996 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1997 spa->header.length = sizeof(*spa); 1998 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1999 spa->range_index = 10+1; 2000 spa->address = t->dcr_dma[4]; 2001 spa->length = DCR_SIZE; 2002 offset += spa->header.length; 2003 2004 /* 2005 * spa11 (single-dimm interleave for hotplug, note storage 2006 * does not actually alias the related block-data-window 2007 * regions) 2008 */ 2009 spa = nfit_buf + offset; 2010 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2011 spa->header.length = sizeof(*spa); 2012 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 2013 spa->range_index = 11+1; 2014 spa->address = t->spa_set_dma[2]; 2015 spa->length = SPA0_SIZE; 2016 offset += spa->header.length; 2017 2018 /* spa12 (bdw for dcr4) dimm4 */ 2019 spa = nfit_buf + offset; 2020 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2021 spa->header.length = sizeof(*spa); 2022 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 2023 spa->range_index = 12+1; 2024 spa->address = t->dimm_dma[4]; 2025 spa->length = DIMM_SIZE; 2026 offset += spa->header.length; 2027 2028 /* mem-region14 (spa/dcr4, dimm4) */ 2029 memdev = nfit_buf + offset; 2030 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2031 memdev->header.length = sizeof(*memdev); 2032 memdev->device_handle = handle[4]; 2033 memdev->physical_id = 4; 2034 memdev->region_id = 0; 2035 memdev->range_index = 10+1; 2036 memdev->region_index = 8+1; 2037 memdev->region_size = 0; 2038 memdev->region_offset = 0; 2039 memdev->address = 0; 2040 memdev->interleave_index = 0; 2041 memdev->interleave_ways = 1; 2042 offset += memdev->header.length; 2043 2044 /* mem-region15 (spa11, dimm4) */ 2045 memdev = nfit_buf + offset; 2046 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2047 memdev->header.length = sizeof(*memdev); 2048 memdev->device_handle = handle[4]; 2049 memdev->physical_id = 4; 2050 memdev->region_id = 0; 2051 memdev->range_index = 11+1; 2052 memdev->region_index = 9+1; 2053 memdev->region_size = SPA0_SIZE; 2054 memdev->region_offset = (1ULL << 48); 2055 memdev->address = 0; 2056 memdev->interleave_index = 0; 2057 memdev->interleave_ways = 1; 2058 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 2059 offset += memdev->header.length; 2060 2061 /* mem-region16 (spa/bdw4, dimm4) */ 2062 memdev = nfit_buf + offset; 2063 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2064 memdev->header.length = sizeof(*memdev); 2065 memdev->device_handle = handle[4]; 2066 memdev->physical_id = 4; 2067 memdev->region_id = 0; 2068 memdev->range_index = 12+1; 2069 memdev->region_index = 8+1; 2070 memdev->region_size = 0; 2071 memdev->region_offset = 0; 2072 memdev->address = 0; 2073 memdev->interleave_index = 0; 2074 memdev->interleave_ways = 1; 2075 offset += memdev->header.length; 2076 2077 /* flush3 (dimm4) */ 2078 flush = nfit_buf + offset; 2079 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 2080 flush->header.length = flush_hint_size; 2081 flush->device_handle = handle[4]; 2082 flush->hint_count = NUM_HINTS; 2083 for (i = 0; i < NUM_HINTS; i++) 2084 flush->hint_address[i] = t->flush_dma[4] 2085 + i * sizeof(u64); 2086 offset += flush->header.length; 2087 2088 /* sanity check to make sure we've filled the buffer */ 2089 WARN_ON(offset != t->nfit_size); 2090 } 2091 2092 t->nfit_filled = offset; 2093 2094 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0], 2095 SPA0_SIZE); 2096 2097 acpi_desc = &t->acpi_desc; 2098 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); 2099 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2100 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2101 set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en); 2102 set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 2103 set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 2104 set_bit(ND_INTEL_SMART_INJECT, &acpi_desc->dimm_cmd_force_en); 2105 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 2106 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 2107 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 2108 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 2109 set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en); 2110 set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en); 2111 set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en); 2112 set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en); 2113 set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en); 2114 set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en); 2115 set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en); 2116 set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en); 2117 set_bit(ND_INTEL_FW_FINISH_UPDATE, &acpi_desc->dimm_cmd_force_en); 2118 set_bit(ND_INTEL_FW_FINISH_QUERY, &acpi_desc->dimm_cmd_force_en); 2119 set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); 2120 } 2121 2122 static void nfit_test1_setup(struct nfit_test *t) 2123 { 2124 size_t offset; 2125 void *nfit_buf = t->nfit_buf; 2126 struct acpi_nfit_memory_map *memdev; 2127 struct acpi_nfit_control_region *dcr; 2128 struct acpi_nfit_system_address *spa; 2129 struct acpi_nfit_desc *acpi_desc; 2130 2131 offset = 0; 2132 /* spa0 (flat range with no bdw aliasing) */ 2133 spa = nfit_buf + offset; 2134 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2135 spa->header.length = sizeof(*spa); 2136 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 2137 spa->range_index = 0+1; 2138 spa->address = t->spa_set_dma[0]; 2139 spa->length = SPA2_SIZE; 2140 offset += spa->header.length; 2141 2142 /* virtual cd region */ 2143 spa = nfit_buf + offset; 2144 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2145 spa->header.length = sizeof(*spa); 2146 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16); 2147 spa->range_index = 0; 2148 spa->address = t->spa_set_dma[1]; 2149 spa->length = SPA_VCD_SIZE; 2150 offset += spa->header.length; 2151 2152 /* mem-region0 (spa0, dimm0) */ 2153 memdev = nfit_buf + offset; 2154 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2155 memdev->header.length = sizeof(*memdev); 2156 memdev->device_handle = handle[5]; 2157 memdev->physical_id = 0; 2158 memdev->region_id = 0; 2159 memdev->range_index = 0+1; 2160 memdev->region_index = 0+1; 2161 memdev->region_size = SPA2_SIZE; 2162 memdev->region_offset = 0; 2163 memdev->address = 0; 2164 memdev->interleave_index = 0; 2165 memdev->interleave_ways = 1; 2166 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED 2167 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED 2168 | ACPI_NFIT_MEM_NOT_ARMED; 2169 offset += memdev->header.length; 2170 2171 /* dcr-descriptor0 */ 2172 dcr = nfit_buf + offset; 2173 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2174 dcr->header.length = offsetof(struct acpi_nfit_control_region, 2175 window_size); 2176 dcr->region_index = 0+1; 2177 dcr_common_init(dcr); 2178 dcr->serial_number = ~handle[5]; 2179 dcr->code = NFIT_FIC_BYTE; 2180 dcr->windows = 0; 2181 offset += dcr->header.length; 2182 2183 memdev = nfit_buf + offset; 2184 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2185 memdev->header.length = sizeof(*memdev); 2186 memdev->device_handle = handle[6]; 2187 memdev->physical_id = 0; 2188 memdev->region_id = 0; 2189 memdev->range_index = 0; 2190 memdev->region_index = 0+2; 2191 memdev->region_size = SPA2_SIZE; 2192 memdev->region_offset = 0; 2193 memdev->address = 0; 2194 memdev->interleave_index = 0; 2195 memdev->interleave_ways = 1; 2196 memdev->flags = ACPI_NFIT_MEM_MAP_FAILED; 2197 offset += memdev->header.length; 2198 2199 /* dcr-descriptor1 */ 2200 dcr = nfit_buf + offset; 2201 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2202 dcr->header.length = offsetof(struct acpi_nfit_control_region, 2203 window_size); 2204 dcr->region_index = 0+2; 2205 dcr_common_init(dcr); 2206 dcr->serial_number = ~handle[6]; 2207 dcr->code = NFIT_FIC_BYTE; 2208 dcr->windows = 0; 2209 offset += dcr->header.length; 2210 2211 /* sanity check to make sure we've filled the buffer */ 2212 WARN_ON(offset != t->nfit_size); 2213 2214 t->nfit_filled = offset; 2215 2216 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0], 2217 SPA2_SIZE); 2218 2219 acpi_desc = &t->acpi_desc; 2220 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 2221 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 2222 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 2223 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 2224 set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); 2225 } 2226 2227 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 2228 void *iobuf, u64 len, int rw) 2229 { 2230 struct nfit_blk *nfit_blk = ndbr->blk_provider_data; 2231 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 2232 struct nd_region *nd_region = &ndbr->nd_region; 2233 unsigned int lane; 2234 2235 lane = nd_region_acquire_lane(nd_region); 2236 if (rw) 2237 memcpy(mmio->addr.base + dpa, iobuf, len); 2238 else { 2239 memcpy(iobuf, mmio->addr.base + dpa, len); 2240 2241 /* give us some some coverage of the arch_invalidate_pmem() API */ 2242 arch_invalidate_pmem(mmio->addr.base + dpa, len); 2243 } 2244 nd_region_release_lane(nd_region, lane); 2245 2246 return 0; 2247 } 2248 2249 static unsigned long nfit_ctl_handle; 2250 2251 union acpi_object *result; 2252 2253 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle, 2254 const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4) 2255 { 2256 if (handle != &nfit_ctl_handle) 2257 return ERR_PTR(-ENXIO); 2258 2259 return result; 2260 } 2261 2262 static int setup_result(void *buf, size_t size) 2263 { 2264 result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL); 2265 if (!result) 2266 return -ENOMEM; 2267 result->package.type = ACPI_TYPE_BUFFER, 2268 result->buffer.pointer = (void *) (result + 1); 2269 result->buffer.length = size; 2270 memcpy(result->buffer.pointer, buf, size); 2271 memset(buf, 0, size); 2272 return 0; 2273 } 2274 2275 static int nfit_ctl_test(struct device *dev) 2276 { 2277 int rc, cmd_rc; 2278 struct nvdimm *nvdimm; 2279 struct acpi_device *adev; 2280 struct nfit_mem *nfit_mem; 2281 struct nd_ars_record *record; 2282 struct acpi_nfit_desc *acpi_desc; 2283 const u64 test_val = 0x0123456789abcdefULL; 2284 unsigned long mask, cmd_size, offset; 2285 union { 2286 struct nd_cmd_get_config_size cfg_size; 2287 struct nd_cmd_clear_error clear_err; 2288 struct nd_cmd_ars_status ars_stat; 2289 struct nd_cmd_ars_cap ars_cap; 2290 char buf[sizeof(struct nd_cmd_ars_status) 2291 + sizeof(struct nd_ars_record)]; 2292 } cmds; 2293 2294 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); 2295 if (!adev) 2296 return -ENOMEM; 2297 *adev = (struct acpi_device) { 2298 .handle = &nfit_ctl_handle, 2299 .dev = { 2300 .init_name = "test-adev", 2301 }, 2302 }; 2303 2304 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 2305 if (!acpi_desc) 2306 return -ENOMEM; 2307 *acpi_desc = (struct acpi_nfit_desc) { 2308 .nd_desc = { 2309 .cmd_mask = 1UL << ND_CMD_ARS_CAP 2310 | 1UL << ND_CMD_ARS_START 2311 | 1UL << ND_CMD_ARS_STATUS 2312 | 1UL << ND_CMD_CLEAR_ERROR 2313 | 1UL << ND_CMD_CALL, 2314 .module = THIS_MODULE, 2315 .provider_name = "ACPI.NFIT", 2316 .ndctl = acpi_nfit_ctl, 2317 .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA 2318 | 1UL << NFIT_CMD_ARS_INJECT_SET 2319 | 1UL << NFIT_CMD_ARS_INJECT_CLEAR 2320 | 1UL << NFIT_CMD_ARS_INJECT_GET, 2321 }, 2322 .dev = &adev->dev, 2323 }; 2324 2325 nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL); 2326 if (!nfit_mem) 2327 return -ENOMEM; 2328 2329 mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD 2330 | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE 2331 | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA 2332 | 1UL << ND_CMD_VENDOR; 2333 *nfit_mem = (struct nfit_mem) { 2334 .adev = adev, 2335 .family = NVDIMM_FAMILY_INTEL, 2336 .dsm_mask = mask, 2337 }; 2338 2339 nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL); 2340 if (!nvdimm) 2341 return -ENOMEM; 2342 *nvdimm = (struct nvdimm) { 2343 .provider_data = nfit_mem, 2344 .cmd_mask = mask, 2345 .dev = { 2346 .init_name = "test-dimm", 2347 }, 2348 }; 2349 2350 2351 /* basic checkout of a typical 'get config size' command */ 2352 cmd_size = sizeof(cmds.cfg_size); 2353 cmds.cfg_size = (struct nd_cmd_get_config_size) { 2354 .status = 0, 2355 .config_size = SZ_128K, 2356 .max_xfer = SZ_4K, 2357 }; 2358 rc = setup_result(cmds.buf, cmd_size); 2359 if (rc) 2360 return rc; 2361 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE, 2362 cmds.buf, cmd_size, &cmd_rc); 2363 2364 if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0 2365 || cmds.cfg_size.config_size != SZ_128K 2366 || cmds.cfg_size.max_xfer != SZ_4K) { 2367 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2368 __func__, __LINE__, rc, cmd_rc); 2369 return -EIO; 2370 } 2371 2372 2373 /* test ars_status with zero output */ 2374 cmd_size = offsetof(struct nd_cmd_ars_status, address); 2375 cmds.ars_stat = (struct nd_cmd_ars_status) { 2376 .out_length = 0, 2377 }; 2378 rc = setup_result(cmds.buf, cmd_size); 2379 if (rc) 2380 return rc; 2381 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS, 2382 cmds.buf, cmd_size, &cmd_rc); 2383 2384 if (rc < 0 || cmd_rc) { 2385 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2386 __func__, __LINE__, rc, cmd_rc); 2387 return -EIO; 2388 } 2389 2390 2391 /* test ars_cap with benign extended status */ 2392 cmd_size = sizeof(cmds.ars_cap); 2393 cmds.ars_cap = (struct nd_cmd_ars_cap) { 2394 .status = ND_ARS_PERSISTENT << 16, 2395 }; 2396 offset = offsetof(struct nd_cmd_ars_cap, status); 2397 rc = setup_result(cmds.buf + offset, cmd_size - offset); 2398 if (rc) 2399 return rc; 2400 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP, 2401 cmds.buf, cmd_size, &cmd_rc); 2402 2403 if (rc < 0 || cmd_rc) { 2404 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2405 __func__, __LINE__, rc, cmd_rc); 2406 return -EIO; 2407 } 2408 2409 2410 /* test ars_status with 'status' trimmed from 'out_length' */ 2411 cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record); 2412 cmds.ars_stat = (struct nd_cmd_ars_status) { 2413 .out_length = cmd_size - 4, 2414 }; 2415 record = &cmds.ars_stat.records[0]; 2416 *record = (struct nd_ars_record) { 2417 .length = test_val, 2418 }; 2419 rc = setup_result(cmds.buf, cmd_size); 2420 if (rc) 2421 return rc; 2422 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS, 2423 cmds.buf, cmd_size, &cmd_rc); 2424 2425 if (rc < 0 || cmd_rc || record->length != test_val) { 2426 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2427 __func__, __LINE__, rc, cmd_rc); 2428 return -EIO; 2429 } 2430 2431 2432 /* test ars_status with 'Output (Size)' including 'status' */ 2433 cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record); 2434 cmds.ars_stat = (struct nd_cmd_ars_status) { 2435 .out_length = cmd_size, 2436 }; 2437 record = &cmds.ars_stat.records[0]; 2438 *record = (struct nd_ars_record) { 2439 .length = test_val, 2440 }; 2441 rc = setup_result(cmds.buf, cmd_size); 2442 if (rc) 2443 return rc; 2444 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS, 2445 cmds.buf, cmd_size, &cmd_rc); 2446 2447 if (rc < 0 || cmd_rc || record->length != test_val) { 2448 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2449 __func__, __LINE__, rc, cmd_rc); 2450 return -EIO; 2451 } 2452 2453 2454 /* test extended status for get_config_size results in failure */ 2455 cmd_size = sizeof(cmds.cfg_size); 2456 cmds.cfg_size = (struct nd_cmd_get_config_size) { 2457 .status = 1 << 16, 2458 }; 2459 rc = setup_result(cmds.buf, cmd_size); 2460 if (rc) 2461 return rc; 2462 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE, 2463 cmds.buf, cmd_size, &cmd_rc); 2464 2465 if (rc < 0 || cmd_rc >= 0) { 2466 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2467 __func__, __LINE__, rc, cmd_rc); 2468 return -EIO; 2469 } 2470 2471 /* test clear error */ 2472 cmd_size = sizeof(cmds.clear_err); 2473 cmds.clear_err = (struct nd_cmd_clear_error) { 2474 .length = 512, 2475 .cleared = 512, 2476 }; 2477 rc = setup_result(cmds.buf, cmd_size); 2478 if (rc) 2479 return rc; 2480 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR, 2481 cmds.buf, cmd_size, &cmd_rc); 2482 if (rc < 0 || cmd_rc) { 2483 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n", 2484 __func__, __LINE__, rc, cmd_rc); 2485 return -EIO; 2486 } 2487 2488 return 0; 2489 } 2490 2491 static int nfit_test_probe(struct platform_device *pdev) 2492 { 2493 struct nvdimm_bus_descriptor *nd_desc; 2494 struct acpi_nfit_desc *acpi_desc; 2495 struct device *dev = &pdev->dev; 2496 struct nfit_test *nfit_test; 2497 struct nfit_mem *nfit_mem; 2498 union acpi_object *obj; 2499 int rc; 2500 2501 if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) { 2502 rc = nfit_ctl_test(&pdev->dev); 2503 if (rc) 2504 return rc; 2505 } 2506 2507 nfit_test = to_nfit_test(&pdev->dev); 2508 2509 /* common alloc */ 2510 if (nfit_test->num_dcr) { 2511 int num = nfit_test->num_dcr; 2512 2513 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *), 2514 GFP_KERNEL); 2515 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 2516 GFP_KERNEL); 2517 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *), 2518 GFP_KERNEL); 2519 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 2520 GFP_KERNEL); 2521 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 2522 GFP_KERNEL); 2523 nfit_test->label_dma = devm_kcalloc(dev, num, 2524 sizeof(dma_addr_t), GFP_KERNEL); 2525 nfit_test->dcr = devm_kcalloc(dev, num, 2526 sizeof(struct nfit_test_dcr *), GFP_KERNEL); 2527 nfit_test->dcr_dma = devm_kcalloc(dev, num, 2528 sizeof(dma_addr_t), GFP_KERNEL); 2529 nfit_test->smart = devm_kcalloc(dev, num, 2530 sizeof(struct nd_intel_smart), GFP_KERNEL); 2531 nfit_test->smart_threshold = devm_kcalloc(dev, num, 2532 sizeof(struct nd_intel_smart_threshold), 2533 GFP_KERNEL); 2534 nfit_test->fw = devm_kcalloc(dev, num, 2535 sizeof(struct nfit_test_fw), GFP_KERNEL); 2536 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 2537 && nfit_test->label_dma && nfit_test->dcr 2538 && nfit_test->dcr_dma && nfit_test->flush 2539 && nfit_test->flush_dma 2540 && nfit_test->fw) 2541 /* pass */; 2542 else 2543 return -ENOMEM; 2544 } 2545 2546 if (nfit_test->num_pm) { 2547 int num = nfit_test->num_pm; 2548 2549 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *), 2550 GFP_KERNEL); 2551 nfit_test->spa_set_dma = devm_kcalloc(dev, num, 2552 sizeof(dma_addr_t), GFP_KERNEL); 2553 if (nfit_test->spa_set && nfit_test->spa_set_dma) 2554 /* pass */; 2555 else 2556 return -ENOMEM; 2557 } 2558 2559 /* per-nfit specific alloc */ 2560 if (nfit_test->alloc(nfit_test)) 2561 return -ENOMEM; 2562 2563 nfit_test->setup(nfit_test); 2564 acpi_desc = &nfit_test->acpi_desc; 2565 acpi_nfit_desc_init(acpi_desc, &pdev->dev); 2566 acpi_desc->blk_do_io = nfit_test_blk_do_io; 2567 nd_desc = &acpi_desc->nd_desc; 2568 nd_desc->provider_name = NULL; 2569 nd_desc->module = THIS_MODULE; 2570 nd_desc->ndctl = nfit_test_ctl; 2571 2572 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf, 2573 nfit_test->nfit_filled); 2574 if (rc) 2575 return rc; 2576 2577 rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc); 2578 if (rc) 2579 return rc; 2580 2581 if (nfit_test->setup != nfit_test0_setup) 2582 return 0; 2583 2584 nfit_test->setup_hotplug = 1; 2585 nfit_test->setup(nfit_test); 2586 2587 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 2588 if (!obj) 2589 return -ENOMEM; 2590 obj->type = ACPI_TYPE_BUFFER; 2591 obj->buffer.length = nfit_test->nfit_size; 2592 obj->buffer.pointer = nfit_test->nfit_buf; 2593 *(nfit_test->_fit) = obj; 2594 __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80); 2595 2596 /* associate dimm devices with nfit_mem data for notification testing */ 2597 mutex_lock(&acpi_desc->init_mutex); 2598 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 2599 u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle; 2600 int i; 2601 2602 for (i = 0; i < NUM_DCR; i++) 2603 if (nfit_handle == handle[i]) 2604 dev_set_drvdata(nfit_test->dimm_dev[i], 2605 nfit_mem); 2606 } 2607 mutex_unlock(&acpi_desc->init_mutex); 2608 2609 return 0; 2610 } 2611 2612 static int nfit_test_remove(struct platform_device *pdev) 2613 { 2614 return 0; 2615 } 2616 2617 static void nfit_test_release(struct device *dev) 2618 { 2619 struct nfit_test *nfit_test = to_nfit_test(dev); 2620 2621 kfree(nfit_test); 2622 } 2623 2624 static const struct platform_device_id nfit_test_id[] = { 2625 { KBUILD_MODNAME }, 2626 { }, 2627 }; 2628 2629 static struct platform_driver nfit_test_driver = { 2630 .probe = nfit_test_probe, 2631 .remove = nfit_test_remove, 2632 .driver = { 2633 .name = KBUILD_MODNAME, 2634 }, 2635 .id_table = nfit_test_id, 2636 }; 2637 2638 static __init int nfit_test_init(void) 2639 { 2640 int rc, i; 2641 2642 pmem_test(); 2643 libnvdimm_test(); 2644 acpi_nfit_test(); 2645 device_dax_test(); 2646 2647 nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm); 2648 2649 nfit_wq = create_singlethread_workqueue("nfit"); 2650 if (!nfit_wq) 2651 return -ENOMEM; 2652 2653 nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm"); 2654 if (IS_ERR(nfit_test_dimm)) { 2655 rc = PTR_ERR(nfit_test_dimm); 2656 goto err_register; 2657 } 2658 2659 for (i = 0; i < NUM_NFITS; i++) { 2660 struct nfit_test *nfit_test; 2661 struct platform_device *pdev; 2662 2663 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL); 2664 if (!nfit_test) { 2665 rc = -ENOMEM; 2666 goto err_register; 2667 } 2668 INIT_LIST_HEAD(&nfit_test->resources); 2669 badrange_init(&nfit_test->badrange); 2670 switch (i) { 2671 case 0: 2672 nfit_test->num_pm = NUM_PM; 2673 nfit_test->dcr_idx = 0; 2674 nfit_test->num_dcr = NUM_DCR; 2675 nfit_test->alloc = nfit_test0_alloc; 2676 nfit_test->setup = nfit_test0_setup; 2677 break; 2678 case 1: 2679 nfit_test->num_pm = 2; 2680 nfit_test->dcr_idx = NUM_DCR; 2681 nfit_test->num_dcr = 2; 2682 nfit_test->alloc = nfit_test1_alloc; 2683 nfit_test->setup = nfit_test1_setup; 2684 break; 2685 default: 2686 rc = -EINVAL; 2687 goto err_register; 2688 } 2689 pdev = &nfit_test->pdev; 2690 pdev->name = KBUILD_MODNAME; 2691 pdev->id = i; 2692 pdev->dev.release = nfit_test_release; 2693 rc = platform_device_register(pdev); 2694 if (rc) { 2695 put_device(&pdev->dev); 2696 goto err_register; 2697 } 2698 get_device(&pdev->dev); 2699 2700 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2701 if (rc) 2702 goto err_register; 2703 2704 instances[i] = nfit_test; 2705 INIT_WORK(&nfit_test->work, uc_error_notify); 2706 } 2707 2708 rc = platform_driver_register(&nfit_test_driver); 2709 if (rc) 2710 goto err_register; 2711 return 0; 2712 2713 err_register: 2714 destroy_workqueue(nfit_wq); 2715 for (i = 0; i < NUM_NFITS; i++) 2716 if (instances[i]) 2717 platform_device_unregister(&instances[i]->pdev); 2718 nfit_test_teardown(); 2719 for (i = 0; i < NUM_NFITS; i++) 2720 if (instances[i]) 2721 put_device(&instances[i]->pdev.dev); 2722 2723 return rc; 2724 } 2725 2726 static __exit void nfit_test_exit(void) 2727 { 2728 int i; 2729 2730 flush_workqueue(nfit_wq); 2731 destroy_workqueue(nfit_wq); 2732 for (i = 0; i < NUM_NFITS; i++) 2733 platform_device_unregister(&instances[i]->pdev); 2734 platform_driver_unregister(&nfit_test_driver); 2735 nfit_test_teardown(); 2736 2737 for (i = 0; i < NUM_NFITS; i++) 2738 put_device(&instances[i]->pdev.dev); 2739 class_destroy(nfit_test_dimm); 2740 } 2741 2742 module_init(nfit_test_init); 2743 module_exit(nfit_test_exit); 2744 MODULE_LICENSE("GPL v2"); 2745 MODULE_AUTHOR("Intel Corporation"); 2746