1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 #include <linux/platform_device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/libnvdimm.h> 17 #include <linux/vmalloc.h> 18 #include <linux/device.h> 19 #include <linux/module.h> 20 #include <linux/ndctl.h> 21 #include <linux/sizes.h> 22 #include <linux/slab.h> 23 #include <nfit.h> 24 #include <nd.h> 25 #include "nfit_test.h" 26 27 /* 28 * Generate an NFIT table to describe the following topology: 29 * 30 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions 31 * 32 * (a) (b) DIMM BLK-REGION 33 * +----------+--------------+----------+---------+ 34 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2 35 * | imc0 +--+- - - - - region0 - - - -+----------+ + 36 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3 37 * | +----------+--------------v----------v v 38 * +--+---+ | | 39 * | cpu0 | region1 40 * +--+---+ | | 41 * | +-------------------------^----------^ ^ 42 * +--+---+ | blk4.0 | pm1.0 | 2 region4 43 * | imc1 +--+-------------------------+----------+ + 44 * +------+ | blk5.0 | pm1.0 | 3 region5 45 * +-------------------------+----------+-+-------+ 46 * 47 * *) In this layout we have four dimms and two memory controllers in one 48 * socket. Each unique interface (BLK or PMEM) to DPA space 49 * is identified by a region device with a dynamically assigned id. 50 * 51 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0. 52 * A single PMEM namespace "pm0.0" is created using half of the 53 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace 54 * allocate from from the bottom of a region. The unallocated 55 * portion of REGION0 aliases with REGION2 and REGION3. That 56 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and 57 * "blk3.0") starting at the base of each DIMM to offset (a) in those 58 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable 59 * names that can be assigned to a namespace. 60 * 61 * *) In the last portion of dimm0 and dimm1 we have an interleaved 62 * SPA range, REGION1, that spans those two dimms as well as dimm2 63 * and dimm3. Some of REGION1 allocated to a PMEM namespace named 64 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each 65 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and 66 * "blk5.0". 67 * 68 * *) The portion of dimm2 and dimm3 that do not participate in the 69 * REGION1 interleaved SPA range (i.e. the DPA address below offset 70 * (b) are also included in the "blk4.0" and "blk5.0" namespaces. 71 * Note, that BLK namespaces need not be contiguous in DPA-space, and 72 * can consume aliased capacity from multiple interleave sets. 73 * 74 * BUS1: Legacy NVDIMM (single contiguous range) 75 * 76 * region2 77 * +---------------------+ 78 * |---------------------| 79 * || pm2.0 || 80 * |---------------------| 81 * +---------------------+ 82 * 83 * *) A NFIT-table may describe a simple system-physical-address range 84 * with no BLK aliasing. This type of region may optionally 85 * reference an NVDIMM. 86 */ 87 enum { 88 NUM_PM = 2, 89 NUM_DCR = 4, 90 NUM_BDW = NUM_DCR, 91 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW, 92 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */, 93 DIMM_SIZE = SZ_32M, 94 LABEL_SIZE = SZ_128K, 95 SPA0_SIZE = DIMM_SIZE, 96 SPA1_SIZE = DIMM_SIZE*2, 97 SPA2_SIZE = DIMM_SIZE, 98 BDW_SIZE = 64 << 8, 99 DCR_SIZE = 12, 100 NUM_NFITS = 2, /* permit testing multiple NFITs per system */ 101 }; 102 103 struct nfit_test_dcr { 104 __le64 bdw_addr; 105 __le32 bdw_status; 106 __u8 aperature[BDW_SIZE]; 107 }; 108 109 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \ 110 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \ 111 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf)) 112 113 static u32 handle[NUM_DCR] = { 114 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0), 115 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1), 116 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0), 117 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1), 118 }; 119 120 struct nfit_test { 121 struct acpi_nfit_desc acpi_desc; 122 struct platform_device pdev; 123 struct list_head resources; 124 void *nfit_buf; 125 dma_addr_t nfit_dma; 126 size_t nfit_size; 127 int num_dcr; 128 int num_pm; 129 void **dimm; 130 dma_addr_t *dimm_dma; 131 void **flush; 132 dma_addr_t *flush_dma; 133 void **label; 134 dma_addr_t *label_dma; 135 void **spa_set; 136 dma_addr_t *spa_set_dma; 137 struct nfit_test_dcr **dcr; 138 dma_addr_t *dcr_dma; 139 int (*alloc)(struct nfit_test *t); 140 void (*setup)(struct nfit_test *t); 141 }; 142 143 static struct nfit_test *to_nfit_test(struct device *dev) 144 { 145 struct platform_device *pdev = to_platform_device(dev); 146 147 return container_of(pdev, struct nfit_test, pdev); 148 } 149 150 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, 151 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 152 unsigned int buf_len) 153 { 154 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 155 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); 156 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 157 int i, rc; 158 159 if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask)) 160 return -ENOTTY; 161 162 /* lookup label space for the given dimm */ 163 for (i = 0; i < ARRAY_SIZE(handle); i++) 164 if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i]) 165 break; 166 if (i >= ARRAY_SIZE(handle)) 167 return -ENXIO; 168 169 switch (cmd) { 170 case ND_CMD_GET_CONFIG_SIZE: { 171 struct nd_cmd_get_config_size *nd_cmd = buf; 172 173 if (buf_len < sizeof(*nd_cmd)) 174 return -EINVAL; 175 nd_cmd->status = 0; 176 nd_cmd->config_size = LABEL_SIZE; 177 nd_cmd->max_xfer = SZ_4K; 178 rc = 0; 179 break; 180 } 181 case ND_CMD_GET_CONFIG_DATA: { 182 struct nd_cmd_get_config_data_hdr *nd_cmd = buf; 183 unsigned int len, offset = nd_cmd->in_offset; 184 185 if (buf_len < sizeof(*nd_cmd)) 186 return -EINVAL; 187 if (offset >= LABEL_SIZE) 188 return -EINVAL; 189 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) 190 return -EINVAL; 191 192 nd_cmd->status = 0; 193 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 194 memcpy(nd_cmd->out_buf, t->label[i] + offset, len); 195 rc = buf_len - sizeof(*nd_cmd) - len; 196 break; 197 } 198 case ND_CMD_SET_CONFIG_DATA: { 199 struct nd_cmd_set_config_hdr *nd_cmd = buf; 200 unsigned int len, offset = nd_cmd->in_offset; 201 u32 *status; 202 203 if (buf_len < sizeof(*nd_cmd)) 204 return -EINVAL; 205 if (offset >= LABEL_SIZE) 206 return -EINVAL; 207 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) 208 return -EINVAL; 209 210 status = buf + nd_cmd->in_length + sizeof(*nd_cmd); 211 *status = 0; 212 len = min(nd_cmd->in_length, LABEL_SIZE - offset); 213 memcpy(t->label[i] + offset, nd_cmd->in_buf, len); 214 rc = buf_len - sizeof(*nd_cmd) - (len + 4); 215 break; 216 } 217 default: 218 return -ENOTTY; 219 } 220 221 return rc; 222 } 223 224 static DEFINE_SPINLOCK(nfit_test_lock); 225 static struct nfit_test *instances[NUM_NFITS]; 226 227 static void release_nfit_res(void *data) 228 { 229 struct nfit_test_resource *nfit_res = data; 230 struct resource *res = nfit_res->res; 231 232 spin_lock(&nfit_test_lock); 233 list_del(&nfit_res->list); 234 spin_unlock(&nfit_test_lock); 235 236 if (is_vmalloc_addr(nfit_res->buf)) 237 vfree(nfit_res->buf); 238 else 239 dma_free_coherent(nfit_res->dev, resource_size(res), 240 nfit_res->buf, res->start); 241 kfree(res); 242 kfree(nfit_res); 243 } 244 245 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, 246 void *buf) 247 { 248 struct device *dev = &t->pdev.dev; 249 struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL); 250 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), 251 GFP_KERNEL); 252 int rc; 253 254 if (!res || !buf || !nfit_res) 255 goto err; 256 rc = devm_add_action(dev, release_nfit_res, nfit_res); 257 if (rc) 258 goto err; 259 INIT_LIST_HEAD(&nfit_res->list); 260 memset(buf, 0, size); 261 nfit_res->dev = dev; 262 nfit_res->buf = buf; 263 nfit_res->res = res; 264 res->start = *dma; 265 res->end = *dma + size - 1; 266 res->name = "NFIT"; 267 spin_lock(&nfit_test_lock); 268 list_add(&nfit_res->list, &t->resources); 269 spin_unlock(&nfit_test_lock); 270 271 return nfit_res->buf; 272 err: 273 if (buf && !is_vmalloc_addr(buf)) 274 dma_free_coherent(dev, size, buf, *dma); 275 else if (buf) 276 vfree(buf); 277 kfree(res); 278 kfree(nfit_res); 279 return NULL; 280 } 281 282 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) 283 { 284 void *buf = vmalloc(size); 285 286 *dma = (unsigned long) buf; 287 return __test_alloc(t, size, dma, buf); 288 } 289 290 static void *test_alloc_coherent(struct nfit_test *t, size_t size, 291 dma_addr_t *dma) 292 { 293 struct device *dev = &t->pdev.dev; 294 void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL); 295 296 return __test_alloc(t, size, dma, buf); 297 } 298 299 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) 300 { 301 int i; 302 303 for (i = 0; i < ARRAY_SIZE(instances); i++) { 304 struct nfit_test_resource *n, *nfit_res = NULL; 305 struct nfit_test *t = instances[i]; 306 307 if (!t) 308 continue; 309 spin_lock(&nfit_test_lock); 310 list_for_each_entry(n, &t->resources, list) { 311 if (addr >= n->res->start && (addr < n->res->start 312 + resource_size(n->res))) { 313 nfit_res = n; 314 break; 315 } else if (addr >= (unsigned long) n->buf 316 && (addr < (unsigned long) n->buf 317 + resource_size(n->res))) { 318 nfit_res = n; 319 break; 320 } 321 } 322 spin_unlock(&nfit_test_lock); 323 if (nfit_res) 324 return nfit_res; 325 } 326 327 return NULL; 328 } 329 330 static int nfit_test0_alloc(struct nfit_test *t) 331 { 332 size_t nfit_size = sizeof(struct acpi_table_nfit) 333 + sizeof(struct acpi_nfit_system_address) * NUM_SPA 334 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 335 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 336 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 337 + sizeof(struct acpi_nfit_flush_address) * NUM_DCR; 338 int i; 339 340 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 341 if (!t->nfit_buf) 342 return -ENOMEM; 343 t->nfit_size = nfit_size; 344 345 t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]); 346 if (!t->spa_set[0]) 347 return -ENOMEM; 348 349 t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]); 350 if (!t->spa_set[1]) 351 return -ENOMEM; 352 353 for (i = 0; i < NUM_DCR; i++) { 354 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]); 355 if (!t->dimm[i]) 356 return -ENOMEM; 357 358 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 359 if (!t->label[i]) 360 return -ENOMEM; 361 sprintf(t->label[i], "label%d", i); 362 363 t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]); 364 if (!t->flush[i]) 365 return -ENOMEM; 366 } 367 368 for (i = 0; i < NUM_DCR; i++) { 369 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]); 370 if (!t->dcr[i]) 371 return -ENOMEM; 372 } 373 374 return 0; 375 } 376 377 static int nfit_test1_alloc(struct nfit_test *t) 378 { 379 size_t nfit_size = sizeof(struct acpi_table_nfit) 380 + sizeof(struct acpi_nfit_system_address) 381 + sizeof(struct acpi_nfit_memory_map) 382 + sizeof(struct acpi_nfit_control_region); 383 384 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 385 if (!t->nfit_buf) 386 return -ENOMEM; 387 t->nfit_size = nfit_size; 388 389 t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]); 390 if (!t->spa_set[0]) 391 return -ENOMEM; 392 393 return 0; 394 } 395 396 static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size) 397 { 398 memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4); 399 nfit->header.length = size; 400 nfit->header.revision = 1; 401 memcpy(nfit->header.oem_id, "LIBND", 6); 402 memcpy(nfit->header.oem_table_id, "TEST", 5); 403 nfit->header.oem_revision = 1; 404 memcpy(nfit->header.asl_compiler_id, "TST", 4); 405 nfit->header.asl_compiler_revision = 1; 406 } 407 408 static void nfit_test0_setup(struct nfit_test *t) 409 { 410 struct nvdimm_bus_descriptor *nd_desc; 411 struct acpi_nfit_desc *acpi_desc; 412 struct acpi_nfit_memory_map *memdev; 413 void *nfit_buf = t->nfit_buf; 414 size_t size = t->nfit_size; 415 struct acpi_nfit_system_address *spa; 416 struct acpi_nfit_control_region *dcr; 417 struct acpi_nfit_data_region *bdw; 418 struct acpi_nfit_flush_address *flush; 419 unsigned int offset; 420 421 nfit_test_init_header(nfit_buf, size); 422 423 /* 424 * spa0 (interleave first half of dimm0 and dimm1, note storage 425 * does not actually alias the related block-data-window 426 * regions) 427 */ 428 spa = nfit_buf + sizeof(struct acpi_table_nfit); 429 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 430 spa->header.length = sizeof(*spa); 431 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 432 spa->range_index = 0+1; 433 spa->address = t->spa_set_dma[0]; 434 spa->length = SPA0_SIZE; 435 436 /* 437 * spa1 (interleave last half of the 4 DIMMS, note storage 438 * does not actually alias the related block-data-window 439 * regions) 440 */ 441 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa); 442 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 443 spa->header.length = sizeof(*spa); 444 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 445 spa->range_index = 1+1; 446 spa->address = t->spa_set_dma[1]; 447 spa->length = SPA1_SIZE; 448 449 /* spa2 (dcr0) dimm0 */ 450 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2; 451 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 452 spa->header.length = sizeof(*spa); 453 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 454 spa->range_index = 2+1; 455 spa->address = t->dcr_dma[0]; 456 spa->length = DCR_SIZE; 457 458 /* spa3 (dcr1) dimm1 */ 459 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3; 460 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 461 spa->header.length = sizeof(*spa); 462 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 463 spa->range_index = 3+1; 464 spa->address = t->dcr_dma[1]; 465 spa->length = DCR_SIZE; 466 467 /* spa4 (dcr2) dimm2 */ 468 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4; 469 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 470 spa->header.length = sizeof(*spa); 471 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 472 spa->range_index = 4+1; 473 spa->address = t->dcr_dma[2]; 474 spa->length = DCR_SIZE; 475 476 /* spa5 (dcr3) dimm3 */ 477 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5; 478 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 479 spa->header.length = sizeof(*spa); 480 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 481 spa->range_index = 5+1; 482 spa->address = t->dcr_dma[3]; 483 spa->length = DCR_SIZE; 484 485 /* spa6 (bdw for dcr0) dimm0 */ 486 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6; 487 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 488 spa->header.length = sizeof(*spa); 489 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 490 spa->range_index = 6+1; 491 spa->address = t->dimm_dma[0]; 492 spa->length = DIMM_SIZE; 493 494 /* spa7 (bdw for dcr1) dimm1 */ 495 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7; 496 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 497 spa->header.length = sizeof(*spa); 498 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 499 spa->range_index = 7+1; 500 spa->address = t->dimm_dma[1]; 501 spa->length = DIMM_SIZE; 502 503 /* spa8 (bdw for dcr2) dimm2 */ 504 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8; 505 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 506 spa->header.length = sizeof(*spa); 507 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 508 spa->range_index = 8+1; 509 spa->address = t->dimm_dma[2]; 510 spa->length = DIMM_SIZE; 511 512 /* spa9 (bdw for dcr3) dimm3 */ 513 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9; 514 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 515 spa->header.length = sizeof(*spa); 516 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 517 spa->range_index = 9+1; 518 spa->address = t->dimm_dma[3]; 519 spa->length = DIMM_SIZE; 520 521 offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10; 522 /* mem-region0 (spa0, dimm0) */ 523 memdev = nfit_buf + offset; 524 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 525 memdev->header.length = sizeof(*memdev); 526 memdev->device_handle = handle[0]; 527 memdev->physical_id = 0; 528 memdev->region_id = 0; 529 memdev->range_index = 0+1; 530 memdev->region_index = 0+1; 531 memdev->region_size = SPA0_SIZE/2; 532 memdev->region_offset = t->spa_set_dma[0]; 533 memdev->address = 0; 534 memdev->interleave_index = 0; 535 memdev->interleave_ways = 2; 536 537 /* mem-region1 (spa0, dimm1) */ 538 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map); 539 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 540 memdev->header.length = sizeof(*memdev); 541 memdev->device_handle = handle[1]; 542 memdev->physical_id = 1; 543 memdev->region_id = 0; 544 memdev->range_index = 0+1; 545 memdev->region_index = 1+1; 546 memdev->region_size = SPA0_SIZE/2; 547 memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2; 548 memdev->address = 0; 549 memdev->interleave_index = 0; 550 memdev->interleave_ways = 2; 551 552 /* mem-region2 (spa1, dimm0) */ 553 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2; 554 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 555 memdev->header.length = sizeof(*memdev); 556 memdev->device_handle = handle[0]; 557 memdev->physical_id = 0; 558 memdev->region_id = 1; 559 memdev->range_index = 1+1; 560 memdev->region_index = 0+1; 561 memdev->region_size = SPA1_SIZE/4; 562 memdev->region_offset = t->spa_set_dma[1]; 563 memdev->address = SPA0_SIZE/2; 564 memdev->interleave_index = 0; 565 memdev->interleave_ways = 4; 566 567 /* mem-region3 (spa1, dimm1) */ 568 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3; 569 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 570 memdev->header.length = sizeof(*memdev); 571 memdev->device_handle = handle[1]; 572 memdev->physical_id = 1; 573 memdev->region_id = 1; 574 memdev->range_index = 1+1; 575 memdev->region_index = 1+1; 576 memdev->region_size = SPA1_SIZE/4; 577 memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4; 578 memdev->address = SPA0_SIZE/2; 579 memdev->interleave_index = 0; 580 memdev->interleave_ways = 4; 581 582 /* mem-region4 (spa1, dimm2) */ 583 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4; 584 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 585 memdev->header.length = sizeof(*memdev); 586 memdev->device_handle = handle[2]; 587 memdev->physical_id = 2; 588 memdev->region_id = 0; 589 memdev->range_index = 1+1; 590 memdev->region_index = 2+1; 591 memdev->region_size = SPA1_SIZE/4; 592 memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4; 593 memdev->address = SPA0_SIZE/2; 594 memdev->interleave_index = 0; 595 memdev->interleave_ways = 4; 596 597 /* mem-region5 (spa1, dimm3) */ 598 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5; 599 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 600 memdev->header.length = sizeof(*memdev); 601 memdev->device_handle = handle[3]; 602 memdev->physical_id = 3; 603 memdev->region_id = 0; 604 memdev->range_index = 1+1; 605 memdev->region_index = 3+1; 606 memdev->region_size = SPA1_SIZE/4; 607 memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4; 608 memdev->address = SPA0_SIZE/2; 609 memdev->interleave_index = 0; 610 memdev->interleave_ways = 4; 611 612 /* mem-region6 (spa/dcr0, dimm0) */ 613 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6; 614 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 615 memdev->header.length = sizeof(*memdev); 616 memdev->device_handle = handle[0]; 617 memdev->physical_id = 0; 618 memdev->region_id = 0; 619 memdev->range_index = 2+1; 620 memdev->region_index = 0+1; 621 memdev->region_size = 0; 622 memdev->region_offset = 0; 623 memdev->address = 0; 624 memdev->interleave_index = 0; 625 memdev->interleave_ways = 1; 626 627 /* mem-region7 (spa/dcr1, dimm1) */ 628 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7; 629 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 630 memdev->header.length = sizeof(*memdev); 631 memdev->device_handle = handle[1]; 632 memdev->physical_id = 1; 633 memdev->region_id = 0; 634 memdev->range_index = 3+1; 635 memdev->region_index = 1+1; 636 memdev->region_size = 0; 637 memdev->region_offset = 0; 638 memdev->address = 0; 639 memdev->interleave_index = 0; 640 memdev->interleave_ways = 1; 641 642 /* mem-region8 (spa/dcr2, dimm2) */ 643 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8; 644 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 645 memdev->header.length = sizeof(*memdev); 646 memdev->device_handle = handle[2]; 647 memdev->physical_id = 2; 648 memdev->region_id = 0; 649 memdev->range_index = 4+1; 650 memdev->region_index = 2+1; 651 memdev->region_size = 0; 652 memdev->region_offset = 0; 653 memdev->address = 0; 654 memdev->interleave_index = 0; 655 memdev->interleave_ways = 1; 656 657 /* mem-region9 (spa/dcr3, dimm3) */ 658 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9; 659 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 660 memdev->header.length = sizeof(*memdev); 661 memdev->device_handle = handle[3]; 662 memdev->physical_id = 3; 663 memdev->region_id = 0; 664 memdev->range_index = 5+1; 665 memdev->region_index = 3+1; 666 memdev->region_size = 0; 667 memdev->region_offset = 0; 668 memdev->address = 0; 669 memdev->interleave_index = 0; 670 memdev->interleave_ways = 1; 671 672 /* mem-region10 (spa/bdw0, dimm0) */ 673 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10; 674 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 675 memdev->header.length = sizeof(*memdev); 676 memdev->device_handle = handle[0]; 677 memdev->physical_id = 0; 678 memdev->region_id = 0; 679 memdev->range_index = 6+1; 680 memdev->region_index = 0+1; 681 memdev->region_size = 0; 682 memdev->region_offset = 0; 683 memdev->address = 0; 684 memdev->interleave_index = 0; 685 memdev->interleave_ways = 1; 686 687 /* mem-region11 (spa/bdw1, dimm1) */ 688 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11; 689 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 690 memdev->header.length = sizeof(*memdev); 691 memdev->device_handle = handle[1]; 692 memdev->physical_id = 1; 693 memdev->region_id = 0; 694 memdev->range_index = 7+1; 695 memdev->region_index = 1+1; 696 memdev->region_size = 0; 697 memdev->region_offset = 0; 698 memdev->address = 0; 699 memdev->interleave_index = 0; 700 memdev->interleave_ways = 1; 701 702 /* mem-region12 (spa/bdw2, dimm2) */ 703 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12; 704 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 705 memdev->header.length = sizeof(*memdev); 706 memdev->device_handle = handle[2]; 707 memdev->physical_id = 2; 708 memdev->region_id = 0; 709 memdev->range_index = 8+1; 710 memdev->region_index = 2+1; 711 memdev->region_size = 0; 712 memdev->region_offset = 0; 713 memdev->address = 0; 714 memdev->interleave_index = 0; 715 memdev->interleave_ways = 1; 716 717 /* mem-region13 (spa/dcr3, dimm3) */ 718 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13; 719 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 720 memdev->header.length = sizeof(*memdev); 721 memdev->device_handle = handle[3]; 722 memdev->physical_id = 3; 723 memdev->region_id = 0; 724 memdev->range_index = 9+1; 725 memdev->region_index = 3+1; 726 memdev->region_size = 0; 727 memdev->region_offset = 0; 728 memdev->address = 0; 729 memdev->interleave_index = 0; 730 memdev->interleave_ways = 1; 731 732 offset = offset + sizeof(struct acpi_nfit_memory_map) * 14; 733 /* dcr-descriptor0 */ 734 dcr = nfit_buf + offset; 735 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 736 dcr->header.length = sizeof(struct acpi_nfit_control_region); 737 dcr->region_index = 0+1; 738 dcr->vendor_id = 0xabcd; 739 dcr->device_id = 0; 740 dcr->revision_id = 1; 741 dcr->serial_number = ~handle[0]; 742 dcr->windows = 1; 743 dcr->window_size = DCR_SIZE; 744 dcr->command_offset = 0; 745 dcr->command_size = 8; 746 dcr->status_offset = 8; 747 dcr->status_size = 4; 748 749 /* dcr-descriptor1 */ 750 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region); 751 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 752 dcr->header.length = sizeof(struct acpi_nfit_control_region); 753 dcr->region_index = 1+1; 754 dcr->vendor_id = 0xabcd; 755 dcr->device_id = 0; 756 dcr->revision_id = 1; 757 dcr->serial_number = ~handle[1]; 758 dcr->windows = 1; 759 dcr->window_size = DCR_SIZE; 760 dcr->command_offset = 0; 761 dcr->command_size = 8; 762 dcr->status_offset = 8; 763 dcr->status_size = 4; 764 765 /* dcr-descriptor2 */ 766 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2; 767 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 768 dcr->header.length = sizeof(struct acpi_nfit_control_region); 769 dcr->region_index = 2+1; 770 dcr->vendor_id = 0xabcd; 771 dcr->device_id = 0; 772 dcr->revision_id = 1; 773 dcr->serial_number = ~handle[2]; 774 dcr->windows = 1; 775 dcr->window_size = DCR_SIZE; 776 dcr->command_offset = 0; 777 dcr->command_size = 8; 778 dcr->status_offset = 8; 779 dcr->status_size = 4; 780 781 /* dcr-descriptor3 */ 782 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3; 783 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 784 dcr->header.length = sizeof(struct acpi_nfit_control_region); 785 dcr->region_index = 3+1; 786 dcr->vendor_id = 0xabcd; 787 dcr->device_id = 0; 788 dcr->revision_id = 1; 789 dcr->serial_number = ~handle[3]; 790 dcr->windows = 1; 791 dcr->window_size = DCR_SIZE; 792 dcr->command_offset = 0; 793 dcr->command_size = 8; 794 dcr->status_offset = 8; 795 dcr->status_size = 4; 796 797 offset = offset + sizeof(struct acpi_nfit_control_region) * 4; 798 /* bdw0 (spa/dcr0, dimm0) */ 799 bdw = nfit_buf + offset; 800 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 801 bdw->header.length = sizeof(struct acpi_nfit_data_region); 802 bdw->region_index = 0+1; 803 bdw->windows = 1; 804 bdw->offset = 0; 805 bdw->size = BDW_SIZE; 806 bdw->capacity = DIMM_SIZE; 807 bdw->start_address = 0; 808 809 /* bdw1 (spa/dcr1, dimm1) */ 810 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region); 811 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 812 bdw->header.length = sizeof(struct acpi_nfit_data_region); 813 bdw->region_index = 1+1; 814 bdw->windows = 1; 815 bdw->offset = 0; 816 bdw->size = BDW_SIZE; 817 bdw->capacity = DIMM_SIZE; 818 bdw->start_address = 0; 819 820 /* bdw2 (spa/dcr2, dimm2) */ 821 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2; 822 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 823 bdw->header.length = sizeof(struct acpi_nfit_data_region); 824 bdw->region_index = 2+1; 825 bdw->windows = 1; 826 bdw->offset = 0; 827 bdw->size = BDW_SIZE; 828 bdw->capacity = DIMM_SIZE; 829 bdw->start_address = 0; 830 831 /* bdw3 (spa/dcr3, dimm3) */ 832 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3; 833 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 834 bdw->header.length = sizeof(struct acpi_nfit_data_region); 835 bdw->region_index = 3+1; 836 bdw->windows = 1; 837 bdw->offset = 0; 838 bdw->size = BDW_SIZE; 839 bdw->capacity = DIMM_SIZE; 840 bdw->start_address = 0; 841 842 offset = offset + sizeof(struct acpi_nfit_data_region) * 4; 843 /* flush0 (dimm0) */ 844 flush = nfit_buf + offset; 845 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 846 flush->header.length = sizeof(struct acpi_nfit_flush_address); 847 flush->device_handle = handle[0]; 848 flush->hint_count = 1; 849 flush->hint_address[0] = t->flush_dma[0]; 850 851 /* flush1 (dimm1) */ 852 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1; 853 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 854 flush->header.length = sizeof(struct acpi_nfit_flush_address); 855 flush->device_handle = handle[1]; 856 flush->hint_count = 1; 857 flush->hint_address[0] = t->flush_dma[1]; 858 859 /* flush2 (dimm2) */ 860 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2; 861 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 862 flush->header.length = sizeof(struct acpi_nfit_flush_address); 863 flush->device_handle = handle[2]; 864 flush->hint_count = 1; 865 flush->hint_address[0] = t->flush_dma[2]; 866 867 /* flush3 (dimm3) */ 868 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3; 869 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 870 flush->header.length = sizeof(struct acpi_nfit_flush_address); 871 flush->device_handle = handle[3]; 872 flush->hint_count = 1; 873 flush->hint_address[0] = t->flush_dma[3]; 874 875 acpi_desc = &t->acpi_desc; 876 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en); 877 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 878 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 879 nd_desc = &acpi_desc->nd_desc; 880 nd_desc->ndctl = nfit_test_ctl; 881 } 882 883 static void nfit_test1_setup(struct nfit_test *t) 884 { 885 size_t size = t->nfit_size, offset; 886 void *nfit_buf = t->nfit_buf; 887 struct acpi_nfit_memory_map *memdev; 888 struct acpi_nfit_control_region *dcr; 889 struct acpi_nfit_system_address *spa; 890 891 nfit_test_init_header(nfit_buf, size); 892 893 offset = sizeof(struct acpi_table_nfit); 894 /* spa0 (flat range with no bdw aliasing) */ 895 spa = nfit_buf + offset; 896 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 897 spa->header.length = sizeof(*spa); 898 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 899 spa->range_index = 0+1; 900 spa->address = t->spa_set_dma[0]; 901 spa->length = SPA2_SIZE; 902 903 offset += sizeof(*spa); 904 /* mem-region0 (spa0, dimm0) */ 905 memdev = nfit_buf + offset; 906 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 907 memdev->header.length = sizeof(*memdev); 908 memdev->device_handle = 0; 909 memdev->physical_id = 0; 910 memdev->region_id = 0; 911 memdev->range_index = 0+1; 912 memdev->region_index = 0+1; 913 memdev->region_size = SPA2_SIZE; 914 memdev->region_offset = 0; 915 memdev->address = 0; 916 memdev->interleave_index = 0; 917 memdev->interleave_ways = 1; 918 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED 919 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED 920 | ACPI_NFIT_MEM_ARMED; 921 922 offset += sizeof(*memdev); 923 /* dcr-descriptor0 */ 924 dcr = nfit_buf + offset; 925 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 926 dcr->header.length = sizeof(struct acpi_nfit_control_region); 927 dcr->region_index = 0+1; 928 dcr->vendor_id = 0xabcd; 929 dcr->device_id = 0; 930 dcr->revision_id = 1; 931 dcr->serial_number = ~0; 932 dcr->code = 0x201; 933 dcr->windows = 0; 934 dcr->window_size = 0; 935 dcr->command_offset = 0; 936 dcr->command_size = 0; 937 dcr->status_offset = 0; 938 dcr->status_size = 0; 939 } 940 941 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 942 void *iobuf, u64 len, int rw) 943 { 944 struct nfit_blk *nfit_blk = ndbr->blk_provider_data; 945 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 946 struct nd_region *nd_region = &ndbr->nd_region; 947 unsigned int lane; 948 949 lane = nd_region_acquire_lane(nd_region); 950 if (rw) 951 memcpy(mmio->base + dpa, iobuf, len); 952 else 953 memcpy(iobuf, mmio->base + dpa, len); 954 nd_region_release_lane(nd_region, lane); 955 956 return 0; 957 } 958 959 static int nfit_test_probe(struct platform_device *pdev) 960 { 961 struct nvdimm_bus_descriptor *nd_desc; 962 struct acpi_nfit_desc *acpi_desc; 963 struct device *dev = &pdev->dev; 964 struct nfit_test *nfit_test; 965 int rc; 966 967 nfit_test = to_nfit_test(&pdev->dev); 968 969 /* common alloc */ 970 if (nfit_test->num_dcr) { 971 int num = nfit_test->num_dcr; 972 973 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *), 974 GFP_KERNEL); 975 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 976 GFP_KERNEL); 977 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *), 978 GFP_KERNEL); 979 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 980 GFP_KERNEL); 981 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 982 GFP_KERNEL); 983 nfit_test->label_dma = devm_kcalloc(dev, num, 984 sizeof(dma_addr_t), GFP_KERNEL); 985 nfit_test->dcr = devm_kcalloc(dev, num, 986 sizeof(struct nfit_test_dcr *), GFP_KERNEL); 987 nfit_test->dcr_dma = devm_kcalloc(dev, num, 988 sizeof(dma_addr_t), GFP_KERNEL); 989 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 990 && nfit_test->label_dma && nfit_test->dcr 991 && nfit_test->dcr_dma && nfit_test->flush 992 && nfit_test->flush_dma) 993 /* pass */; 994 else 995 return -ENOMEM; 996 } 997 998 if (nfit_test->num_pm) { 999 int num = nfit_test->num_pm; 1000 1001 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *), 1002 GFP_KERNEL); 1003 nfit_test->spa_set_dma = devm_kcalloc(dev, num, 1004 sizeof(dma_addr_t), GFP_KERNEL); 1005 if (nfit_test->spa_set && nfit_test->spa_set_dma) 1006 /* pass */; 1007 else 1008 return -ENOMEM; 1009 } 1010 1011 /* per-nfit specific alloc */ 1012 if (nfit_test->alloc(nfit_test)) 1013 return -ENOMEM; 1014 1015 nfit_test->setup(nfit_test); 1016 acpi_desc = &nfit_test->acpi_desc; 1017 acpi_desc->dev = &pdev->dev; 1018 acpi_desc->nfit = nfit_test->nfit_buf; 1019 acpi_desc->blk_do_io = nfit_test_blk_do_io; 1020 nd_desc = &acpi_desc->nd_desc; 1021 nd_desc->attr_groups = acpi_nfit_attribute_groups; 1022 acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc); 1023 if (!acpi_desc->nvdimm_bus) 1024 return -ENXIO; 1025 1026 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size); 1027 if (rc) { 1028 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1029 return rc; 1030 } 1031 1032 return 0; 1033 } 1034 1035 static int nfit_test_remove(struct platform_device *pdev) 1036 { 1037 struct nfit_test *nfit_test = to_nfit_test(&pdev->dev); 1038 struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc; 1039 1040 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1041 1042 return 0; 1043 } 1044 1045 static void nfit_test_release(struct device *dev) 1046 { 1047 struct nfit_test *nfit_test = to_nfit_test(dev); 1048 1049 kfree(nfit_test); 1050 } 1051 1052 static const struct platform_device_id nfit_test_id[] = { 1053 { KBUILD_MODNAME }, 1054 { }, 1055 }; 1056 1057 static struct platform_driver nfit_test_driver = { 1058 .probe = nfit_test_probe, 1059 .remove = nfit_test_remove, 1060 .driver = { 1061 .name = KBUILD_MODNAME, 1062 }, 1063 .id_table = nfit_test_id, 1064 }; 1065 1066 #ifdef CONFIG_CMA_SIZE_MBYTES 1067 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 1068 #else 1069 #define CMA_SIZE_MBYTES 0 1070 #endif 1071 1072 static __init int nfit_test_init(void) 1073 { 1074 int rc, i; 1075 1076 nfit_test_setup(nfit_test_lookup); 1077 1078 for (i = 0; i < NUM_NFITS; i++) { 1079 struct nfit_test *nfit_test; 1080 struct platform_device *pdev; 1081 static int once; 1082 1083 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL); 1084 if (!nfit_test) { 1085 rc = -ENOMEM; 1086 goto err_register; 1087 } 1088 INIT_LIST_HEAD(&nfit_test->resources); 1089 switch (i) { 1090 case 0: 1091 nfit_test->num_pm = NUM_PM; 1092 nfit_test->num_dcr = NUM_DCR; 1093 nfit_test->alloc = nfit_test0_alloc; 1094 nfit_test->setup = nfit_test0_setup; 1095 break; 1096 case 1: 1097 nfit_test->num_pm = 1; 1098 nfit_test->alloc = nfit_test1_alloc; 1099 nfit_test->setup = nfit_test1_setup; 1100 break; 1101 default: 1102 rc = -EINVAL; 1103 goto err_register; 1104 } 1105 pdev = &nfit_test->pdev; 1106 pdev->name = KBUILD_MODNAME; 1107 pdev->id = i; 1108 pdev->dev.release = nfit_test_release; 1109 rc = platform_device_register(pdev); 1110 if (rc) { 1111 put_device(&pdev->dev); 1112 goto err_register; 1113 } 1114 1115 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1116 if (rc) 1117 goto err_register; 1118 1119 instances[i] = nfit_test; 1120 1121 if (!once++) { 1122 dma_addr_t dma; 1123 void *buf; 1124 1125 buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma, 1126 GFP_KERNEL); 1127 if (!buf) { 1128 rc = -ENOMEM; 1129 dev_warn(&pdev->dev, "need 128M of free cma\n"); 1130 goto err_register; 1131 } 1132 dma_free_coherent(&pdev->dev, SZ_128M, buf, dma); 1133 } 1134 } 1135 1136 rc = platform_driver_register(&nfit_test_driver); 1137 if (rc) 1138 goto err_register; 1139 return 0; 1140 1141 err_register: 1142 for (i = 0; i < NUM_NFITS; i++) 1143 if (instances[i]) 1144 platform_device_unregister(&instances[i]->pdev); 1145 nfit_test_teardown(); 1146 return rc; 1147 } 1148 1149 static __exit void nfit_test_exit(void) 1150 { 1151 int i; 1152 1153 platform_driver_unregister(&nfit_test_driver); 1154 for (i = 0; i < NUM_NFITS; i++) 1155 platform_device_unregister(&instances[i]->pdev); 1156 nfit_test_teardown(); 1157 } 1158 1159 module_init(nfit_test_init); 1160 module_exit(nfit_test_exit); 1161 MODULE_LICENSE("GPL v2"); 1162 MODULE_AUTHOR("Intel Corporation"); 1163