1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/platform_device.h> 5 #include <linux/device.h> 6 #include <linux/module.h> 7 #include <linux/genalloc.h> 8 #include <linux/vmalloc.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/list_sort.h> 11 #include <linux/libnvdimm.h> 12 #include <linux/ndctl.h> 13 #include <nd-core.h> 14 #include <linux/printk.h> 15 #include <linux/seq_buf.h> 16 17 #include "../watermark.h" 18 #include "nfit_test.h" 19 #include "ndtest.h" 20 21 enum { 22 DIMM_SIZE = SZ_32M, 23 LABEL_SIZE = SZ_128K, 24 NUM_INSTANCES = 2, 25 NUM_DCR = 4, 26 NDTEST_MAX_MAPPING = 6, 27 }; 28 29 #define NDTEST_SCM_DIMM_CMD_MASK \ 30 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \ 31 (1ul << ND_CMD_GET_CONFIG_DATA) | \ 32 (1ul << ND_CMD_SET_CONFIG_DATA) | \ 33 (1ul << ND_CMD_CALL)) 34 35 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \ 36 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \ 37 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf)) 38 39 static DEFINE_SPINLOCK(ndtest_lock); 40 static struct ndtest_priv *instances[NUM_INSTANCES]; 41 static struct class *ndtest_dimm_class; 42 static struct gen_pool *ndtest_pool; 43 44 static struct ndtest_dimm dimm_group1[] = { 45 { 46 .size = DIMM_SIZE, 47 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0), 48 .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72", 49 .physical_id = 0, 50 .num_formats = 2, 51 }, 52 { 53 .size = DIMM_SIZE, 54 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1), 55 .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72", 56 .physical_id = 1, 57 .num_formats = 2, 58 }, 59 { 60 .size = DIMM_SIZE, 61 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0), 62 .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72", 63 .physical_id = 2, 64 .num_formats = 2, 65 }, 66 { 67 .size = DIMM_SIZE, 68 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1), 69 .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72", 70 .physical_id = 3, 71 .num_formats = 2, 72 }, 73 { 74 .size = DIMM_SIZE, 75 .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0), 76 .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72", 77 .physical_id = 4, 78 .num_formats = 2, 79 }, 80 }; 81 82 static struct ndtest_dimm dimm_group2[] = { 83 { 84 .size = DIMM_SIZE, 85 .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0), 86 .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72", 87 .physical_id = 0, 88 .num_formats = 1, 89 .flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY | 90 PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY | 91 PAPR_PMEM_HEALTH_FATAL, 92 }, 93 }; 94 95 static struct ndtest_mapping region0_mapping[] = { 96 { 97 .dimm = 0, 98 .position = 0, 99 .start = 0, 100 .size = SZ_16M, 101 }, 102 { 103 .dimm = 1, 104 .position = 1, 105 .start = 0, 106 .size = SZ_16M, 107 } 108 }; 109 110 static struct ndtest_mapping region1_mapping[] = { 111 { 112 .dimm = 0, 113 .position = 0, 114 .start = SZ_16M, 115 .size = SZ_16M, 116 }, 117 { 118 .dimm = 1, 119 .position = 1, 120 .start = SZ_16M, 121 .size = SZ_16M, 122 }, 123 { 124 .dimm = 2, 125 .position = 2, 126 .start = SZ_16M, 127 .size = SZ_16M, 128 }, 129 { 130 .dimm = 3, 131 .position = 3, 132 .start = SZ_16M, 133 .size = SZ_16M, 134 }, 135 }; 136 137 static struct ndtest_mapping region2_mapping[] = { 138 { 139 .dimm = 0, 140 .position = 0, 141 .start = 0, 142 .size = DIMM_SIZE, 143 }, 144 }; 145 146 static struct ndtest_mapping region3_mapping[] = { 147 { 148 .dimm = 1, 149 .start = 0, 150 .size = DIMM_SIZE, 151 } 152 }; 153 154 static struct ndtest_mapping region4_mapping[] = { 155 { 156 .dimm = 2, 157 .start = 0, 158 .size = DIMM_SIZE, 159 } 160 }; 161 162 static struct ndtest_mapping region5_mapping[] = { 163 { 164 .dimm = 3, 165 .start = 0, 166 .size = DIMM_SIZE, 167 } 168 }; 169 170 static struct ndtest_region bus0_regions[] = { 171 { 172 .type = ND_DEVICE_NAMESPACE_PMEM, 173 .num_mappings = ARRAY_SIZE(region0_mapping), 174 .mapping = region0_mapping, 175 .size = DIMM_SIZE, 176 .range_index = 1, 177 }, 178 { 179 .type = ND_DEVICE_NAMESPACE_PMEM, 180 .num_mappings = ARRAY_SIZE(region1_mapping), 181 .mapping = region1_mapping, 182 .size = DIMM_SIZE * 2, 183 .range_index = 2, 184 }, 185 { 186 .type = ND_DEVICE_NAMESPACE_BLK, 187 .num_mappings = ARRAY_SIZE(region2_mapping), 188 .mapping = region2_mapping, 189 .size = DIMM_SIZE, 190 .range_index = 3, 191 }, 192 { 193 .type = ND_DEVICE_NAMESPACE_BLK, 194 .num_mappings = ARRAY_SIZE(region3_mapping), 195 .mapping = region3_mapping, 196 .size = DIMM_SIZE, 197 .range_index = 4, 198 }, 199 { 200 .type = ND_DEVICE_NAMESPACE_BLK, 201 .num_mappings = ARRAY_SIZE(region4_mapping), 202 .mapping = region4_mapping, 203 .size = DIMM_SIZE, 204 .range_index = 5, 205 }, 206 { 207 .type = ND_DEVICE_NAMESPACE_BLK, 208 .num_mappings = ARRAY_SIZE(region5_mapping), 209 .mapping = region5_mapping, 210 .size = DIMM_SIZE, 211 .range_index = 6, 212 }, 213 }; 214 215 static struct ndtest_mapping region6_mapping[] = { 216 { 217 .dimm = 0, 218 .position = 0, 219 .start = 0, 220 .size = DIMM_SIZE, 221 }, 222 }; 223 224 static struct ndtest_region bus1_regions[] = { 225 { 226 .type = ND_DEVICE_NAMESPACE_IO, 227 .num_mappings = ARRAY_SIZE(region6_mapping), 228 .mapping = region6_mapping, 229 .size = DIMM_SIZE, 230 .range_index = 1, 231 }, 232 }; 233 234 static struct ndtest_config bus_configs[NUM_INSTANCES] = { 235 /* bus 1 */ 236 { 237 .dimm_start = 0, 238 .dimm_count = ARRAY_SIZE(dimm_group1), 239 .dimms = dimm_group1, 240 .regions = bus0_regions, 241 .num_regions = ARRAY_SIZE(bus0_regions), 242 }, 243 /* bus 2 */ 244 { 245 .dimm_start = ARRAY_SIZE(dimm_group1), 246 .dimm_count = ARRAY_SIZE(dimm_group2), 247 .dimms = dimm_group2, 248 .regions = bus1_regions, 249 .num_regions = ARRAY_SIZE(bus1_regions), 250 }, 251 }; 252 253 static inline struct ndtest_priv *to_ndtest_priv(struct device *dev) 254 { 255 struct platform_device *pdev = to_platform_device(dev); 256 257 return container_of(pdev, struct ndtest_priv, pdev); 258 } 259 260 static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len, 261 struct nd_cmd_get_config_data_hdr *hdr) 262 { 263 unsigned int len; 264 265 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE) 266 return -EINVAL; 267 268 hdr->status = 0; 269 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset); 270 memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len); 271 272 return buf_len - len; 273 } 274 275 static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len, 276 struct nd_cmd_set_config_hdr *hdr) 277 { 278 unsigned int len; 279 280 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE) 281 return -EINVAL; 282 283 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset); 284 memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len); 285 286 return buf_len - len; 287 } 288 289 static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len, 290 struct nd_cmd_get_config_size *size) 291 { 292 size->status = 0; 293 size->max_xfer = 8; 294 size->config_size = dimm->config_size; 295 296 return 0; 297 } 298 299 static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc, 300 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 301 unsigned int buf_len, int *cmd_rc) 302 { 303 struct ndtest_dimm *dimm; 304 int _cmd_rc; 305 306 if (!cmd_rc) 307 cmd_rc = &_cmd_rc; 308 309 *cmd_rc = 0; 310 311 if (!nvdimm) 312 return -EINVAL; 313 314 dimm = nvdimm_provider_data(nvdimm); 315 if (!dimm) 316 return -EINVAL; 317 318 switch (cmd) { 319 case ND_CMD_GET_CONFIG_SIZE: 320 *cmd_rc = ndtest_get_config_size(dimm, buf_len, buf); 321 break; 322 case ND_CMD_GET_CONFIG_DATA: 323 *cmd_rc = ndtest_config_get(dimm, buf_len, buf); 324 break; 325 case ND_CMD_SET_CONFIG_DATA: 326 *cmd_rc = ndtest_config_set(dimm, buf_len, buf); 327 break; 328 default: 329 return -EINVAL; 330 } 331 332 /* Failures for a DIMM can be injected using fail_cmd and 333 * fail_cmd_code, see the device attributes below 334 */ 335 if ((1 << cmd) & dimm->fail_cmd) 336 return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO; 337 338 return 0; 339 } 340 341 static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 342 void *iobuf, u64 len, int rw) 343 { 344 struct ndtest_dimm *dimm = ndbr->blk_provider_data; 345 struct ndtest_blk_mmio *mmio = dimm->mmio; 346 struct nd_region *nd_region = &ndbr->nd_region; 347 unsigned int lane; 348 349 if (!mmio) 350 return -ENOMEM; 351 352 lane = nd_region_acquire_lane(nd_region); 353 if (rw) 354 memcpy(mmio->base + dpa, iobuf, len); 355 else { 356 memcpy(iobuf, mmio->base + dpa, len); 357 arch_invalidate_pmem(mmio->base + dpa, len); 358 } 359 360 nd_region_release_lane(nd_region, lane); 361 362 return 0; 363 } 364 365 static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 366 struct device *dev) 367 { 368 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 369 struct nvdimm *nvdimm; 370 struct ndtest_dimm *dimm; 371 struct ndtest_blk_mmio *mmio; 372 373 nvdimm = nd_blk_region_to_dimm(ndbr); 374 dimm = nvdimm_provider_data(nvdimm); 375 376 nd_blk_region_set_provider_data(ndbr, dimm); 377 dimm->blk_region = to_nd_region(dev); 378 379 mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL); 380 if (!mmio) 381 return -ENOMEM; 382 383 mmio->base = (void __iomem *) devm_nvdimm_memremap( 384 dev, dimm->address, 12, nd_blk_memremap_flags(ndbr)); 385 if (!mmio->base) { 386 dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm)); 387 return -ENOMEM; 388 } 389 mmio->size = dimm->size; 390 mmio->base_offset = 0; 391 392 dimm->mmio = mmio; 393 394 return 0; 395 } 396 397 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr) 398 { 399 int i; 400 401 for (i = 0; i < NUM_INSTANCES; i++) { 402 struct nfit_test_resource *n, *nfit_res = NULL; 403 struct ndtest_priv *t = instances[i]; 404 405 if (!t) 406 continue; 407 spin_lock(&ndtest_lock); 408 list_for_each_entry(n, &t->resources, list) { 409 if (addr >= n->res.start && (addr < n->res.start 410 + resource_size(&n->res))) { 411 nfit_res = n; 412 break; 413 } else if (addr >= (unsigned long) n->buf 414 && (addr < (unsigned long) n->buf 415 + resource_size(&n->res))) { 416 nfit_res = n; 417 break; 418 } 419 } 420 spin_unlock(&ndtest_lock); 421 if (nfit_res) 422 return nfit_res; 423 } 424 425 pr_warn("Failed to get resource\n"); 426 427 return NULL; 428 } 429 430 static void ndtest_release_resource(void *data) 431 { 432 struct nfit_test_resource *res = data; 433 434 spin_lock(&ndtest_lock); 435 list_del(&res->list); 436 spin_unlock(&ndtest_lock); 437 438 if (resource_size(&res->res) >= DIMM_SIZE) 439 gen_pool_free(ndtest_pool, res->res.start, 440 resource_size(&res->res)); 441 vfree(res->buf); 442 kfree(res); 443 } 444 445 static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size, 446 dma_addr_t *dma) 447 { 448 dma_addr_t __dma; 449 void *buf; 450 struct nfit_test_resource *res; 451 struct genpool_data_align data = { 452 .align = SZ_128M, 453 }; 454 455 res = kzalloc(sizeof(*res), GFP_KERNEL); 456 if (!res) 457 return NULL; 458 459 buf = vmalloc(size); 460 if (size >= DIMM_SIZE) 461 __dma = gen_pool_alloc_algo(ndtest_pool, size, 462 gen_pool_first_fit_align, &data); 463 else 464 __dma = (unsigned long) buf; 465 466 if (!__dma) 467 goto buf_err; 468 469 INIT_LIST_HEAD(&res->list); 470 res->dev = &p->pdev.dev; 471 res->buf = buf; 472 res->res.start = __dma; 473 res->res.end = __dma + size - 1; 474 res->res.name = "NFIT"; 475 spin_lock_init(&res->lock); 476 INIT_LIST_HEAD(&res->requests); 477 spin_lock(&ndtest_lock); 478 list_add(&res->list, &p->resources); 479 spin_unlock(&ndtest_lock); 480 481 if (dma) 482 *dma = __dma; 483 484 if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res)) 485 return res->buf; 486 487 buf_err: 488 if (__dma && size >= DIMM_SIZE) 489 gen_pool_free(ndtest_pool, __dma, size); 490 if (buf) 491 vfree(buf); 492 kfree(res); 493 494 return NULL; 495 } 496 497 static ssize_t range_index_show(struct device *dev, 498 struct device_attribute *attr, char *buf) 499 { 500 struct nd_region *nd_region = to_nd_region(dev); 501 struct ndtest_region *region = nd_region_provider_data(nd_region); 502 503 return sprintf(buf, "%d\n", region->range_index); 504 } 505 static DEVICE_ATTR_RO(range_index); 506 507 static struct attribute *ndtest_region_attributes[] = { 508 &dev_attr_range_index.attr, 509 NULL, 510 }; 511 512 static const struct attribute_group ndtest_region_attribute_group = { 513 .name = "papr", 514 .attrs = ndtest_region_attributes, 515 }; 516 517 static const struct attribute_group *ndtest_region_attribute_groups[] = { 518 &ndtest_region_attribute_group, 519 NULL, 520 }; 521 522 static int ndtest_create_region(struct ndtest_priv *p, 523 struct ndtest_region *region) 524 { 525 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING]; 526 struct nd_blk_region_desc ndbr_desc; 527 struct nd_interleave_set *nd_set; 528 struct nd_region_desc *ndr_desc; 529 struct resource res; 530 int i, ndimm = region->mapping[0].dimm; 531 u64 uuid[2]; 532 533 memset(&res, 0, sizeof(res)); 534 memset(&mappings, 0, sizeof(mappings)); 535 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 536 ndr_desc = &ndbr_desc.ndr_desc; 537 538 if (!ndtest_alloc_resource(p, region->size, &res.start)) 539 return -ENOMEM; 540 541 res.end = res.start + region->size - 1; 542 ndr_desc->mapping = mappings; 543 ndr_desc->res = &res; 544 ndr_desc->provider_data = region; 545 ndr_desc->attr_groups = ndtest_region_attribute_groups; 546 547 if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) { 548 pr_err("failed to parse UUID\n"); 549 return -ENXIO; 550 } 551 552 nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL); 553 if (!nd_set) 554 return -ENOMEM; 555 556 nd_set->cookie1 = cpu_to_le64(uuid[0]); 557 nd_set->cookie2 = cpu_to_le64(uuid[1]); 558 nd_set->altcookie = nd_set->cookie1; 559 ndr_desc->nd_set = nd_set; 560 561 if (region->type == ND_DEVICE_NAMESPACE_BLK) { 562 mappings[0].start = 0; 563 mappings[0].size = DIMM_SIZE; 564 mappings[0].nvdimm = p->config->dimms[ndimm].nvdimm; 565 566 ndr_desc->mapping = &mappings[0]; 567 ndr_desc->num_mappings = 1; 568 ndr_desc->num_lanes = 1; 569 ndbr_desc.enable = ndtest_blk_region_enable; 570 ndbr_desc.do_io = ndtest_blk_do_io; 571 region->region = nvdimm_blk_region_create(p->bus, ndr_desc); 572 573 goto done; 574 } 575 576 for (i = 0; i < region->num_mappings; i++) { 577 ndimm = region->mapping[i].dimm; 578 mappings[i].start = region->mapping[i].start; 579 mappings[i].size = region->mapping[i].size; 580 mappings[i].position = region->mapping[i].position; 581 mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm; 582 } 583 584 ndr_desc->num_mappings = region->num_mappings; 585 region->region = nvdimm_pmem_region_create(p->bus, ndr_desc); 586 587 done: 588 if (!region->region) { 589 dev_err(&p->pdev.dev, "Error registering region %pR\n", 590 ndr_desc->res); 591 return -ENXIO; 592 } 593 594 return 0; 595 } 596 597 static int ndtest_init_regions(struct ndtest_priv *p) 598 { 599 int i, ret = 0; 600 601 for (i = 0; i < p->config->num_regions; i++) { 602 ret = ndtest_create_region(p, &p->config->regions[i]); 603 if (ret) 604 return ret; 605 } 606 607 return 0; 608 } 609 610 static void put_dimms(void *data) 611 { 612 struct ndtest_priv *p = data; 613 int i; 614 615 for (i = 0; i < p->config->dimm_count; i++) 616 if (p->config->dimms[i].dev) { 617 device_unregister(p->config->dimms[i].dev); 618 p->config->dimms[i].dev = NULL; 619 } 620 } 621 622 static ssize_t handle_show(struct device *dev, struct device_attribute *attr, 623 char *buf) 624 { 625 struct ndtest_dimm *dimm = dev_get_drvdata(dev); 626 627 return sprintf(buf, "%#x\n", dimm->handle); 628 } 629 static DEVICE_ATTR_RO(handle); 630 631 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr, 632 char *buf) 633 { 634 struct ndtest_dimm *dimm = dev_get_drvdata(dev); 635 636 return sprintf(buf, "%#x\n", dimm->fail_cmd); 637 } 638 639 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, 640 const char *buf, size_t size) 641 { 642 struct ndtest_dimm *dimm = dev_get_drvdata(dev); 643 unsigned long val; 644 ssize_t rc; 645 646 rc = kstrtol(buf, 0, &val); 647 if (rc) 648 return rc; 649 650 dimm->fail_cmd = val; 651 652 return size; 653 } 654 static DEVICE_ATTR_RW(fail_cmd); 655 656 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr, 657 char *buf) 658 { 659 struct ndtest_dimm *dimm = dev_get_drvdata(dev); 660 661 return sprintf(buf, "%d\n", dimm->fail_cmd_code); 662 } 663 664 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr, 665 const char *buf, size_t size) 666 { 667 struct ndtest_dimm *dimm = dev_get_drvdata(dev); 668 unsigned long val; 669 ssize_t rc; 670 671 rc = kstrtol(buf, 0, &val); 672 if (rc) 673 return rc; 674 675 dimm->fail_cmd_code = val; 676 return size; 677 } 678 static DEVICE_ATTR_RW(fail_cmd_code); 679 680 static struct attribute *dimm_attributes[] = { 681 &dev_attr_handle.attr, 682 &dev_attr_fail_cmd.attr, 683 &dev_attr_fail_cmd_code.attr, 684 NULL, 685 }; 686 687 static struct attribute_group dimm_attribute_group = { 688 .attrs = dimm_attributes, 689 }; 690 691 static const struct attribute_group *dimm_attribute_groups[] = { 692 &dimm_attribute_group, 693 NULL, 694 }; 695 696 static ssize_t phys_id_show(struct device *dev, 697 struct device_attribute *attr, char *buf) 698 { 699 struct nvdimm *nvdimm = to_nvdimm(dev); 700 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm); 701 702 return sprintf(buf, "%#x\n", dimm->physical_id); 703 } 704 static DEVICE_ATTR_RO(phys_id); 705 706 static ssize_t vendor_show(struct device *dev, 707 struct device_attribute *attr, char *buf) 708 { 709 return sprintf(buf, "0x1234567\n"); 710 } 711 static DEVICE_ATTR_RO(vendor); 712 713 static ssize_t id_show(struct device *dev, 714 struct device_attribute *attr, char *buf) 715 { 716 struct nvdimm *nvdimm = to_nvdimm(dev); 717 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm); 718 719 return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd, 720 0xa, 2016, ~(dimm->handle)); 721 } 722 static DEVICE_ATTR_RO(id); 723 724 static ssize_t nvdimm_handle_show(struct device *dev, 725 struct device_attribute *attr, char *buf) 726 { 727 struct nvdimm *nvdimm = to_nvdimm(dev); 728 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm); 729 730 return sprintf(buf, "%#x\n", dimm->handle); 731 } 732 733 static struct device_attribute dev_attr_nvdimm_show_handle = { 734 .attr = { .name = "handle", .mode = 0444 }, 735 .show = nvdimm_handle_show, 736 }; 737 738 static ssize_t subsystem_vendor_show(struct device *dev, 739 struct device_attribute *attr, char *buf) 740 { 741 return sprintf(buf, "0x%04x\n", 0); 742 } 743 static DEVICE_ATTR_RO(subsystem_vendor); 744 745 static ssize_t dirty_shutdown_show(struct device *dev, 746 struct device_attribute *attr, char *buf) 747 { 748 return sprintf(buf, "%d\n", 42); 749 } 750 static DEVICE_ATTR_RO(dirty_shutdown); 751 752 static ssize_t formats_show(struct device *dev, 753 struct device_attribute *attr, char *buf) 754 { 755 struct nvdimm *nvdimm = to_nvdimm(dev); 756 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm); 757 758 return sprintf(buf, "%d\n", dimm->num_formats); 759 } 760 static DEVICE_ATTR_RO(formats); 761 762 static ssize_t format_show(struct device *dev, 763 struct device_attribute *attr, char *buf) 764 { 765 struct nvdimm *nvdimm = to_nvdimm(dev); 766 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm); 767 768 if (dimm->num_formats > 1) 769 return sprintf(buf, "0x201\n"); 770 771 return sprintf(buf, "0x101\n"); 772 } 773 static DEVICE_ATTR_RO(format); 774 775 static ssize_t format1_show(struct device *dev, struct device_attribute *attr, 776 char *buf) 777 { 778 return sprintf(buf, "0x301\n"); 779 } 780 static DEVICE_ATTR_RO(format1); 781 782 static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj, 783 struct attribute *a, int n) 784 { 785 struct device *dev = container_of(kobj, struct device, kobj); 786 struct nvdimm *nvdimm = to_nvdimm(dev); 787 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm); 788 789 if (a == &dev_attr_format1.attr && dimm->num_formats <= 1) 790 return 0; 791 792 return a->mode; 793 } 794 795 static ssize_t flags_show(struct device *dev, 796 struct device_attribute *attr, char *buf) 797 { 798 struct nvdimm *nvdimm = to_nvdimm(dev); 799 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm); 800 struct seq_buf s; 801 u64 flags; 802 803 flags = dimm->flags; 804 805 seq_buf_init(&s, buf, PAGE_SIZE); 806 if (flags & PAPR_PMEM_UNARMED_MASK) 807 seq_buf_printf(&s, "not_armed "); 808 809 if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK) 810 seq_buf_printf(&s, "flush_fail "); 811 812 if (flags & PAPR_PMEM_BAD_RESTORE_MASK) 813 seq_buf_printf(&s, "restore_fail "); 814 815 if (flags & PAPR_PMEM_SAVE_MASK) 816 seq_buf_printf(&s, "save_fail "); 817 818 if (flags & PAPR_PMEM_SMART_EVENT_MASK) 819 seq_buf_printf(&s, "smart_notify "); 820 821 822 if (seq_buf_used(&s)) 823 seq_buf_printf(&s, "\n"); 824 825 return seq_buf_used(&s); 826 } 827 static DEVICE_ATTR_RO(flags); 828 829 static struct attribute *ndtest_nvdimm_attributes[] = { 830 &dev_attr_nvdimm_show_handle.attr, 831 &dev_attr_vendor.attr, 832 &dev_attr_id.attr, 833 &dev_attr_phys_id.attr, 834 &dev_attr_subsystem_vendor.attr, 835 &dev_attr_dirty_shutdown.attr, 836 &dev_attr_formats.attr, 837 &dev_attr_format.attr, 838 &dev_attr_format1.attr, 839 &dev_attr_flags.attr, 840 NULL, 841 }; 842 843 static const struct attribute_group ndtest_nvdimm_attribute_group = { 844 .name = "papr", 845 .attrs = ndtest_nvdimm_attributes, 846 .is_visible = ndtest_nvdimm_attr_visible, 847 }; 848 849 static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = { 850 &ndtest_nvdimm_attribute_group, 851 NULL, 852 }; 853 854 static int ndtest_dimm_register(struct ndtest_priv *priv, 855 struct ndtest_dimm *dimm, int id) 856 { 857 struct device *dev = &priv->pdev.dev; 858 unsigned long dimm_flags = dimm->flags; 859 860 if (dimm->num_formats > 1) { 861 set_bit(NDD_ALIASING, &dimm_flags); 862 set_bit(NDD_LABELING, &dimm_flags); 863 } 864 865 if (dimm->flags & PAPR_PMEM_UNARMED_MASK) 866 set_bit(NDD_UNARMED, &dimm_flags); 867 868 dimm->nvdimm = nvdimm_create(priv->bus, dimm, 869 ndtest_nvdimm_attribute_groups, dimm_flags, 870 NDTEST_SCM_DIMM_CMD_MASK, 0, NULL); 871 if (!dimm->nvdimm) { 872 dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn); 873 return -ENXIO; 874 } 875 876 dimm->dev = device_create_with_groups(ndtest_dimm_class, 877 &priv->pdev.dev, 878 0, dimm, dimm_attribute_groups, 879 "test_dimm%d", id); 880 if (!dimm->dev) { 881 pr_err("Could not create dimm device attributes\n"); 882 return -ENOMEM; 883 } 884 885 return 0; 886 } 887 888 static int ndtest_nvdimm_init(struct ndtest_priv *p) 889 { 890 struct ndtest_dimm *d; 891 void *res; 892 int i, id; 893 894 for (i = 0; i < p->config->dimm_count; i++) { 895 d = &p->config->dimms[i]; 896 d->id = id = p->config->dimm_start + i; 897 res = ndtest_alloc_resource(p, LABEL_SIZE, NULL); 898 if (!res) 899 return -ENOMEM; 900 901 d->label_area = res; 902 sprintf(d->label_area, "label%d", id); 903 d->config_size = LABEL_SIZE; 904 905 if (!ndtest_alloc_resource(p, d->size, 906 &p->dimm_dma[id])) 907 return -ENOMEM; 908 909 if (!ndtest_alloc_resource(p, LABEL_SIZE, 910 &p->label_dma[id])) 911 return -ENOMEM; 912 913 if (!ndtest_alloc_resource(p, LABEL_SIZE, 914 &p->dcr_dma[id])) 915 return -ENOMEM; 916 917 d->address = p->dimm_dma[id]; 918 919 ndtest_dimm_register(p, d, id); 920 } 921 922 return 0; 923 } 924 925 static ssize_t compatible_show(struct device *dev, 926 struct device_attribute *attr, char *buf) 927 { 928 return sprintf(buf, "nvdimm_test"); 929 } 930 static DEVICE_ATTR_RO(compatible); 931 932 static struct attribute *of_node_attributes[] = { 933 &dev_attr_compatible.attr, 934 NULL 935 }; 936 937 static const struct attribute_group of_node_attribute_group = { 938 .name = "of_node", 939 .attrs = of_node_attributes, 940 }; 941 942 static const struct attribute_group *ndtest_attribute_groups[] = { 943 &of_node_attribute_group, 944 NULL, 945 }; 946 947 static int ndtest_bus_register(struct ndtest_priv *p) 948 { 949 p->config = &bus_configs[p->pdev.id]; 950 951 p->bus_desc.ndctl = ndtest_ctl; 952 p->bus_desc.module = THIS_MODULE; 953 p->bus_desc.provider_name = NULL; 954 p->bus_desc.attr_groups = ndtest_attribute_groups; 955 956 p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc); 957 if (!p->bus) { 958 dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn); 959 return -ENOMEM; 960 } 961 962 return 0; 963 } 964 965 static int ndtest_remove(struct platform_device *pdev) 966 { 967 struct ndtest_priv *p = to_ndtest_priv(&pdev->dev); 968 969 nvdimm_bus_unregister(p->bus); 970 return 0; 971 } 972 973 static int ndtest_probe(struct platform_device *pdev) 974 { 975 struct ndtest_priv *p; 976 int rc; 977 978 p = to_ndtest_priv(&pdev->dev); 979 if (ndtest_bus_register(p)) 980 return -ENOMEM; 981 982 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 983 sizeof(dma_addr_t), GFP_KERNEL); 984 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 985 sizeof(dma_addr_t), GFP_KERNEL); 986 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 987 sizeof(dma_addr_t), GFP_KERNEL); 988 989 rc = ndtest_nvdimm_init(p); 990 if (rc) 991 goto err; 992 993 rc = ndtest_init_regions(p); 994 if (rc) 995 goto err; 996 997 rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p); 998 if (rc) 999 goto err; 1000 1001 platform_set_drvdata(pdev, p); 1002 1003 return 0; 1004 1005 err: 1006 pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__); 1007 return rc; 1008 } 1009 1010 static const struct platform_device_id ndtest_id[] = { 1011 { KBUILD_MODNAME }, 1012 { }, 1013 }; 1014 1015 static struct platform_driver ndtest_driver = { 1016 .probe = ndtest_probe, 1017 .remove = ndtest_remove, 1018 .driver = { 1019 .name = KBUILD_MODNAME, 1020 }, 1021 .id_table = ndtest_id, 1022 }; 1023 1024 static void ndtest_release(struct device *dev) 1025 { 1026 struct ndtest_priv *p = to_ndtest_priv(dev); 1027 1028 kfree(p); 1029 } 1030 1031 static void cleanup_devices(void) 1032 { 1033 int i; 1034 1035 for (i = 0; i < NUM_INSTANCES; i++) 1036 if (instances[i]) 1037 platform_device_unregister(&instances[i]->pdev); 1038 1039 nfit_test_teardown(); 1040 1041 if (ndtest_pool) 1042 gen_pool_destroy(ndtest_pool); 1043 1044 1045 if (ndtest_dimm_class) 1046 class_destroy(ndtest_dimm_class); 1047 } 1048 1049 static __init int ndtest_init(void) 1050 { 1051 int rc, i; 1052 1053 pmem_test(); 1054 libnvdimm_test(); 1055 device_dax_test(); 1056 dax_pmem_test(); 1057 dax_pmem_core_test(); 1058 #ifdef CONFIG_DEV_DAX_PMEM_COMPAT 1059 dax_pmem_compat_test(); 1060 #endif 1061 1062 nfit_test_setup(ndtest_resource_lookup, NULL); 1063 1064 ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm"); 1065 if (IS_ERR(ndtest_dimm_class)) { 1066 rc = PTR_ERR(ndtest_dimm_class); 1067 goto err_register; 1068 } 1069 1070 ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE); 1071 if (!ndtest_pool) { 1072 rc = -ENOMEM; 1073 goto err_register; 1074 } 1075 1076 if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) { 1077 rc = -ENOMEM; 1078 goto err_register; 1079 } 1080 1081 /* Each instance can be taken as a bus, which can have multiple dimms */ 1082 for (i = 0; i < NUM_INSTANCES; i++) { 1083 struct ndtest_priv *priv; 1084 struct platform_device *pdev; 1085 1086 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1087 if (!priv) { 1088 rc = -ENOMEM; 1089 goto err_register; 1090 } 1091 1092 INIT_LIST_HEAD(&priv->resources); 1093 pdev = &priv->pdev; 1094 pdev->name = KBUILD_MODNAME; 1095 pdev->id = i; 1096 pdev->dev.release = ndtest_release; 1097 rc = platform_device_register(pdev); 1098 if (rc) { 1099 put_device(&pdev->dev); 1100 goto err_register; 1101 } 1102 get_device(&pdev->dev); 1103 1104 instances[i] = priv; 1105 } 1106 1107 rc = platform_driver_register(&ndtest_driver); 1108 if (rc) 1109 goto err_register; 1110 1111 return 0; 1112 1113 err_register: 1114 pr_err("Error registering platform device\n"); 1115 cleanup_devices(); 1116 1117 return rc; 1118 } 1119 1120 static __exit void ndtest_exit(void) 1121 { 1122 cleanup_devices(); 1123 platform_driver_unregister(&ndtest_driver); 1124 } 1125 1126 module_init(ndtest_init); 1127 module_exit(ndtest_exit); 1128 MODULE_LICENSE("GPL"); 1129 MODULE_AUTHOR("IBM Corporation"); 1130