1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 #include <linux/vmalloc.h> 15 #include <linux/device.h> 16 #include <linux/ndctl.h> 17 #include <linux/slab.h> 18 #include <linux/io.h> 19 #include <linux/fs.h> 20 #include <linux/mm.h> 21 #include "nd-core.h" 22 #include "label.h" 23 #include "nd.h" 24 25 static DEFINE_IDA(dimm_ida); 26 27 /* 28 * Retrieve bus and dimm handle and return if this bus supports 29 * get_config_data commands 30 */ 31 int nvdimm_check_config_data(struct device *dev) 32 { 33 struct nvdimm *nvdimm = to_nvdimm(dev); 34 35 if (!nvdimm->cmd_mask || 36 !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { 37 if (test_bit(NDD_ALIASING, &nvdimm->flags)) 38 return -ENXIO; 39 else 40 return -ENOTTY; 41 } 42 43 return 0; 44 } 45 46 static int validate_dimm(struct nvdimm_drvdata *ndd) 47 { 48 int rc; 49 50 if (!ndd) 51 return -EINVAL; 52 53 rc = nvdimm_check_config_data(ndd->dev); 54 if (rc) 55 dev_dbg(ndd->dev, "%pf: %s error: %d\n", 56 __builtin_return_address(0), __func__, rc); 57 return rc; 58 } 59 60 /** 61 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area 62 * @nvdimm: dimm to initialize 63 */ 64 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) 65 { 66 struct nd_cmd_get_config_size *cmd = &ndd->nsarea; 67 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 68 struct nvdimm_bus_descriptor *nd_desc; 69 int rc = validate_dimm(ndd); 70 int cmd_rc = 0; 71 72 if (rc) 73 return rc; 74 75 if (cmd->config_size) 76 return 0; /* already valid */ 77 78 memset(cmd, 0, sizeof(*cmd)); 79 nd_desc = nvdimm_bus->nd_desc; 80 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 81 ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc); 82 if (rc < 0) 83 return rc; 84 return cmd_rc; 85 } 86 87 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) 88 { 89 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 90 struct nd_cmd_get_config_data_hdr *cmd; 91 struct nvdimm_bus_descriptor *nd_desc; 92 int rc = validate_dimm(ndd); 93 u32 max_cmd_size, config_size; 94 size_t offset; 95 96 if (rc) 97 return rc; 98 99 if (ndd->data) 100 return 0; 101 102 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 103 || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) { 104 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n", 105 ndd->nsarea.max_xfer, ndd->nsarea.config_size); 106 return -ENXIO; 107 } 108 109 ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL); 110 if (!ndd->data) 111 return -ENOMEM; 112 113 max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer); 114 cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); 115 if (!cmd) 116 return -ENOMEM; 117 118 nd_desc = nvdimm_bus->nd_desc; 119 for (config_size = ndd->nsarea.config_size, offset = 0; 120 config_size; config_size -= cmd->in_length, 121 offset += cmd->in_length) { 122 cmd->in_length = min(config_size, max_cmd_size); 123 cmd->in_offset = offset; 124 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 125 ND_CMD_GET_CONFIG_DATA, cmd, 126 cmd->in_length + sizeof(*cmd), NULL); 127 if (rc || cmd->status) { 128 rc = -ENXIO; 129 break; 130 } 131 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); 132 } 133 dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc); 134 kfree(cmd); 135 136 return rc; 137 } 138 139 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, 140 void *buf, size_t len) 141 { 142 int rc = validate_dimm(ndd); 143 size_t max_cmd_size, buf_offset; 144 struct nd_cmd_set_config_hdr *cmd; 145 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 146 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 147 148 if (rc) 149 return rc; 150 151 if (!ndd->data) 152 return -ENXIO; 153 154 if (offset + len > ndd->nsarea.config_size) 155 return -ENXIO; 156 157 max_cmd_size = min_t(u32, PAGE_SIZE, len); 158 max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer); 159 cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); 160 if (!cmd) 161 return -ENOMEM; 162 163 for (buf_offset = 0; len; len -= cmd->in_length, 164 buf_offset += cmd->in_length) { 165 size_t cmd_size; 166 u32 *status; 167 168 cmd->in_offset = offset + buf_offset; 169 cmd->in_length = min(max_cmd_size, len); 170 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); 171 172 /* status is output in the last 4-bytes of the command buffer */ 173 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); 174 status = ((void *) cmd) + cmd_size - sizeof(u32); 175 176 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 177 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); 178 if (rc || *status) { 179 rc = rc ? rc : -ENXIO; 180 break; 181 } 182 } 183 kfree(cmd); 184 185 return rc; 186 } 187 188 void nvdimm_set_aliasing(struct device *dev) 189 { 190 struct nvdimm *nvdimm = to_nvdimm(dev); 191 192 set_bit(NDD_ALIASING, &nvdimm->flags); 193 } 194 195 void nvdimm_set_locked(struct device *dev) 196 { 197 struct nvdimm *nvdimm = to_nvdimm(dev); 198 199 set_bit(NDD_LOCKED, &nvdimm->flags); 200 } 201 202 static void nvdimm_release(struct device *dev) 203 { 204 struct nvdimm *nvdimm = to_nvdimm(dev); 205 206 ida_simple_remove(&dimm_ida, nvdimm->id); 207 kfree(nvdimm); 208 } 209 210 static struct device_type nvdimm_device_type = { 211 .name = "nvdimm", 212 .release = nvdimm_release, 213 }; 214 215 bool is_nvdimm(struct device *dev) 216 { 217 return dev->type == &nvdimm_device_type; 218 } 219 220 struct nvdimm *to_nvdimm(struct device *dev) 221 { 222 struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); 223 224 WARN_ON(!is_nvdimm(dev)); 225 return nvdimm; 226 } 227 EXPORT_SYMBOL_GPL(to_nvdimm); 228 229 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) 230 { 231 struct nd_region *nd_region = &ndbr->nd_region; 232 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 233 234 return nd_mapping->nvdimm; 235 } 236 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); 237 238 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) 239 { 240 struct nvdimm *nvdimm = nd_mapping->nvdimm; 241 242 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); 243 244 return dev_get_drvdata(&nvdimm->dev); 245 } 246 EXPORT_SYMBOL(to_ndd); 247 248 void nvdimm_drvdata_release(struct kref *kref) 249 { 250 struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); 251 struct device *dev = ndd->dev; 252 struct resource *res, *_r; 253 254 dev_dbg(dev, "%s\n", __func__); 255 256 nvdimm_bus_lock(dev); 257 for_each_dpa_resource_safe(ndd, res, _r) 258 nvdimm_free_dpa(ndd, res); 259 nvdimm_bus_unlock(dev); 260 261 kvfree(ndd->data); 262 kfree(ndd); 263 put_device(dev); 264 } 265 266 void get_ndd(struct nvdimm_drvdata *ndd) 267 { 268 kref_get(&ndd->kref); 269 } 270 271 void put_ndd(struct nvdimm_drvdata *ndd) 272 { 273 if (ndd) 274 kref_put(&ndd->kref, nvdimm_drvdata_release); 275 } 276 277 const char *nvdimm_name(struct nvdimm *nvdimm) 278 { 279 return dev_name(&nvdimm->dev); 280 } 281 EXPORT_SYMBOL_GPL(nvdimm_name); 282 283 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) 284 { 285 return &nvdimm->dev.kobj; 286 } 287 EXPORT_SYMBOL_GPL(nvdimm_kobj); 288 289 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) 290 { 291 return nvdimm->cmd_mask; 292 } 293 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask); 294 295 void *nvdimm_provider_data(struct nvdimm *nvdimm) 296 { 297 if (nvdimm) 298 return nvdimm->provider_data; 299 return NULL; 300 } 301 EXPORT_SYMBOL_GPL(nvdimm_provider_data); 302 303 static ssize_t commands_show(struct device *dev, 304 struct device_attribute *attr, char *buf) 305 { 306 struct nvdimm *nvdimm = to_nvdimm(dev); 307 int cmd, len = 0; 308 309 if (!nvdimm->cmd_mask) 310 return sprintf(buf, "\n"); 311 312 for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) 313 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); 314 len += sprintf(buf + len, "\n"); 315 return len; 316 } 317 static DEVICE_ATTR_RO(commands); 318 319 static ssize_t state_show(struct device *dev, struct device_attribute *attr, 320 char *buf) 321 { 322 struct nvdimm *nvdimm = to_nvdimm(dev); 323 324 /* 325 * The state may be in the process of changing, userspace should 326 * quiesce probing if it wants a static answer 327 */ 328 nvdimm_bus_lock(dev); 329 nvdimm_bus_unlock(dev); 330 return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) 331 ? "active" : "idle"); 332 } 333 static DEVICE_ATTR_RO(state); 334 335 static ssize_t available_slots_show(struct device *dev, 336 struct device_attribute *attr, char *buf) 337 { 338 struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); 339 ssize_t rc; 340 u32 nfree; 341 342 if (!ndd) 343 return -ENXIO; 344 345 nvdimm_bus_lock(dev); 346 nfree = nd_label_nfree(ndd); 347 if (nfree - 1 > nfree) { 348 dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); 349 nfree = 0; 350 } else 351 nfree--; 352 rc = sprintf(buf, "%d\n", nfree); 353 nvdimm_bus_unlock(dev); 354 return rc; 355 } 356 static DEVICE_ATTR_RO(available_slots); 357 358 static struct attribute *nvdimm_attributes[] = { 359 &dev_attr_state.attr, 360 &dev_attr_commands.attr, 361 &dev_attr_available_slots.attr, 362 NULL, 363 }; 364 365 struct attribute_group nvdimm_attribute_group = { 366 .attrs = nvdimm_attributes, 367 }; 368 EXPORT_SYMBOL_GPL(nvdimm_attribute_group); 369 370 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, 371 const struct attribute_group **groups, unsigned long flags, 372 unsigned long cmd_mask, int num_flush, 373 struct resource *flush_wpq) 374 { 375 struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); 376 struct device *dev; 377 378 if (!nvdimm) 379 return NULL; 380 381 nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); 382 if (nvdimm->id < 0) { 383 kfree(nvdimm); 384 return NULL; 385 } 386 nvdimm->provider_data = provider_data; 387 nvdimm->flags = flags; 388 nvdimm->cmd_mask = cmd_mask; 389 nvdimm->num_flush = num_flush; 390 nvdimm->flush_wpq = flush_wpq; 391 atomic_set(&nvdimm->busy, 0); 392 dev = &nvdimm->dev; 393 dev_set_name(dev, "nmem%d", nvdimm->id); 394 dev->parent = &nvdimm_bus->dev; 395 dev->type = &nvdimm_device_type; 396 dev->devt = MKDEV(nvdimm_major, nvdimm->id); 397 dev->groups = groups; 398 nd_device_register(dev); 399 400 return nvdimm; 401 } 402 EXPORT_SYMBOL_GPL(nvdimm_create); 403 404 int alias_dpa_busy(struct device *dev, void *data) 405 { 406 resource_size_t map_end, blk_start, new; 407 struct blk_alloc_info *info = data; 408 struct nd_mapping *nd_mapping; 409 struct nd_region *nd_region; 410 struct nvdimm_drvdata *ndd; 411 struct resource *res; 412 int i; 413 414 if (!is_nd_pmem(dev)) 415 return 0; 416 417 nd_region = to_nd_region(dev); 418 for (i = 0; i < nd_region->ndr_mappings; i++) { 419 nd_mapping = &nd_region->mapping[i]; 420 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) 421 break; 422 } 423 424 if (i >= nd_region->ndr_mappings) 425 return 0; 426 427 ndd = to_ndd(nd_mapping); 428 map_end = nd_mapping->start + nd_mapping->size - 1; 429 blk_start = nd_mapping->start; 430 431 /* 432 * In the allocation case ->res is set to free space that we are 433 * looking to validate against PMEM aliasing collision rules 434 * (i.e. BLK is allocated after all aliased PMEM). 435 */ 436 if (info->res) { 437 if (info->res->start >= nd_mapping->start 438 && info->res->start < map_end) 439 /* pass */; 440 else 441 return 0; 442 } 443 444 retry: 445 /* 446 * Find the free dpa from the end of the last pmem allocation to 447 * the end of the interleave-set mapping. 448 */ 449 for_each_dpa_resource(ndd, res) { 450 if (strncmp(res->name, "pmem", 4) != 0) 451 continue; 452 if ((res->start >= blk_start && res->start < map_end) 453 || (res->end >= blk_start 454 && res->end <= map_end)) { 455 new = max(blk_start, min(map_end + 1, res->end + 1)); 456 if (new != blk_start) { 457 blk_start = new; 458 goto retry; 459 } 460 } 461 } 462 463 /* update the free space range with the probed blk_start */ 464 if (info->res && blk_start > info->res->start) { 465 info->res->start = max(info->res->start, blk_start); 466 if (info->res->start > info->res->end) 467 info->res->end = info->res->start - 1; 468 return 1; 469 } 470 471 info->available -= blk_start - nd_mapping->start; 472 473 return 0; 474 } 475 476 /** 477 * nd_blk_available_dpa - account the unused dpa of BLK region 478 * @nd_mapping: container of dpa-resource-root + labels 479 * 480 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but 481 * we arrange for them to never start at an lower dpa than the last 482 * PMEM allocation in an aliased region. 483 */ 484 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) 485 { 486 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 487 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 488 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 489 struct blk_alloc_info info = { 490 .nd_mapping = nd_mapping, 491 .available = nd_mapping->size, 492 .res = NULL, 493 }; 494 struct resource *res; 495 496 if (!ndd) 497 return 0; 498 499 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); 500 501 /* now account for busy blk allocations in unaliased dpa */ 502 for_each_dpa_resource(ndd, res) { 503 if (strncmp(res->name, "blk", 3) != 0) 504 continue; 505 info.available -= resource_size(res); 506 } 507 508 return info.available; 509 } 510 511 /** 512 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa 513 * @nd_mapping: container of dpa-resource-root + labels 514 * @nd_region: constrain available space check to this reference region 515 * @overlap: calculate available space assuming this level of overlap 516 * 517 * Validate that a PMEM label, if present, aligns with the start of an 518 * interleave set and truncate the available size at the lowest BLK 519 * overlap point. 520 * 521 * The expectation is that this routine is called multiple times as it 522 * probes for the largest BLK encroachment for any single member DIMM of 523 * the interleave set. Once that value is determined the PMEM-limit for 524 * the set can be established. 525 */ 526 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, 527 struct nd_mapping *nd_mapping, resource_size_t *overlap) 528 { 529 resource_size_t map_start, map_end, busy = 0, available, blk_start; 530 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 531 struct resource *res; 532 const char *reason; 533 534 if (!ndd) 535 return 0; 536 537 map_start = nd_mapping->start; 538 map_end = map_start + nd_mapping->size - 1; 539 blk_start = max(map_start, map_end + 1 - *overlap); 540 for_each_dpa_resource(ndd, res) { 541 if (res->start >= map_start && res->start < map_end) { 542 if (strncmp(res->name, "blk", 3) == 0) 543 blk_start = min(blk_start, 544 max(map_start, res->start)); 545 else if (res->end > map_end) { 546 reason = "misaligned to iset"; 547 goto err; 548 } else 549 busy += resource_size(res); 550 } else if (res->end >= map_start && res->end <= map_end) { 551 if (strncmp(res->name, "blk", 3) == 0) { 552 /* 553 * If a BLK allocation overlaps the start of 554 * PMEM the entire interleave set may now only 555 * be used for BLK. 556 */ 557 blk_start = map_start; 558 } else 559 busy += resource_size(res); 560 } else if (map_start > res->start && map_start < res->end) { 561 /* total eclipse of the mapping */ 562 busy += nd_mapping->size; 563 blk_start = map_start; 564 } 565 } 566 567 *overlap = map_end + 1 - blk_start; 568 available = blk_start - map_start; 569 if (busy < available) 570 return available - busy; 571 return 0; 572 573 err: 574 nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); 575 return 0; 576 } 577 578 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) 579 { 580 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); 581 kfree(res->name); 582 __release_region(&ndd->dpa, res->start, resource_size(res)); 583 } 584 585 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, 586 struct nd_label_id *label_id, resource_size_t start, 587 resource_size_t n) 588 { 589 char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); 590 struct resource *res; 591 592 if (!name) 593 return NULL; 594 595 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); 596 res = __request_region(&ndd->dpa, start, n, name, 0); 597 if (!res) 598 kfree(name); 599 return res; 600 } 601 602 /** 603 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id 604 * @nvdimm: container of dpa-resource-root + labels 605 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> 606 */ 607 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, 608 struct nd_label_id *label_id) 609 { 610 resource_size_t allocated = 0; 611 struct resource *res; 612 613 for_each_dpa_resource(ndd, res) 614 if (strcmp(res->name, label_id->id) == 0) 615 allocated += resource_size(res); 616 617 return allocated; 618 } 619 620 static int count_dimms(struct device *dev, void *c) 621 { 622 int *count = c; 623 624 if (is_nvdimm(dev)) 625 (*count)++; 626 return 0; 627 } 628 629 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) 630 { 631 int count = 0; 632 /* Flush any possible dimm registration failures */ 633 nd_synchronize(); 634 635 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); 636 dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count); 637 if (count != dimm_count) 638 return -ENXIO; 639 return 0; 640 } 641 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); 642 643 void __exit nvdimm_devs_exit(void) 644 { 645 ida_destroy(&dimm_ida); 646 } 647