1 /* 2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/pagemap.h> 14 #include <linux/module.h> 15 #include <linux/device.h> 16 #include <linux/pfn_t.h> 17 #include <linux/cdev.h> 18 #include <linux/slab.h> 19 #include <linux/dax.h> 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include <linux/mman.h> 23 #include "dax-private.h" 24 #include "dax.h" 25 26 static struct class *dax_class; 27 28 /* 29 * Rely on the fact that drvdata is set before the attributes are 30 * registered, and that the attributes are unregistered before drvdata 31 * is cleared to assume that drvdata is always valid. 32 */ 33 static ssize_t id_show(struct device *dev, 34 struct device_attribute *attr, char *buf) 35 { 36 struct dax_region *dax_region = dev_get_drvdata(dev); 37 38 return sprintf(buf, "%d\n", dax_region->id); 39 } 40 static DEVICE_ATTR_RO(id); 41 42 static ssize_t region_size_show(struct device *dev, 43 struct device_attribute *attr, char *buf) 44 { 45 struct dax_region *dax_region = dev_get_drvdata(dev); 46 47 return sprintf(buf, "%llu\n", (unsigned long long) 48 resource_size(&dax_region->res)); 49 } 50 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 51 region_size_show, NULL); 52 53 static ssize_t align_show(struct device *dev, 54 struct device_attribute *attr, char *buf) 55 { 56 struct dax_region *dax_region = dev_get_drvdata(dev); 57 58 return sprintf(buf, "%u\n", dax_region->align); 59 } 60 static DEVICE_ATTR_RO(align); 61 62 static struct attribute *dax_region_attributes[] = { 63 &dev_attr_region_size.attr, 64 &dev_attr_align.attr, 65 &dev_attr_id.attr, 66 NULL, 67 }; 68 69 static const struct attribute_group dax_region_attribute_group = { 70 .name = "dax_region", 71 .attrs = dax_region_attributes, 72 }; 73 74 static const struct attribute_group *dax_region_attribute_groups[] = { 75 &dax_region_attribute_group, 76 NULL, 77 }; 78 79 static void dax_region_free(struct kref *kref) 80 { 81 struct dax_region *dax_region; 82 83 dax_region = container_of(kref, struct dax_region, kref); 84 kfree(dax_region); 85 } 86 87 void dax_region_put(struct dax_region *dax_region) 88 { 89 kref_put(&dax_region->kref, dax_region_free); 90 } 91 EXPORT_SYMBOL_GPL(dax_region_put); 92 93 static void dax_region_unregister(void *region) 94 { 95 struct dax_region *dax_region = region; 96 97 sysfs_remove_groups(&dax_region->dev->kobj, 98 dax_region_attribute_groups); 99 dax_region_put(dax_region); 100 } 101 102 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 103 struct resource *res, unsigned int align, void *addr, 104 unsigned long pfn_flags) 105 { 106 struct dax_region *dax_region; 107 108 /* 109 * The DAX core assumes that it can store its private data in 110 * parent->driver_data. This WARN is a reminder / safeguard for 111 * developers of device-dax drivers. 112 */ 113 if (dev_get_drvdata(parent)) { 114 dev_WARN(parent, "dax core failed to setup private data\n"); 115 return NULL; 116 } 117 118 if (!IS_ALIGNED(res->start, align) 119 || !IS_ALIGNED(resource_size(res), align)) 120 return NULL; 121 122 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 123 if (!dax_region) 124 return NULL; 125 126 dev_set_drvdata(parent, dax_region); 127 memcpy(&dax_region->res, res, sizeof(*res)); 128 dax_region->pfn_flags = pfn_flags; 129 kref_init(&dax_region->kref); 130 dax_region->id = region_id; 131 ida_init(&dax_region->ida); 132 dax_region->align = align; 133 dax_region->dev = parent; 134 dax_region->base = addr; 135 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 136 kfree(dax_region); 137 return NULL; 138 } 139 140 kref_get(&dax_region->kref); 141 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 142 return NULL; 143 return dax_region; 144 } 145 EXPORT_SYMBOL_GPL(alloc_dax_region); 146 147 static struct dev_dax *to_dev_dax(struct device *dev) 148 { 149 return container_of(dev, struct dev_dax, dev); 150 } 151 152 static ssize_t size_show(struct device *dev, 153 struct device_attribute *attr, char *buf) 154 { 155 struct dev_dax *dev_dax = to_dev_dax(dev); 156 unsigned long long size = 0; 157 int i; 158 159 for (i = 0; i < dev_dax->num_resources; i++) 160 size += resource_size(&dev_dax->res[i]); 161 162 return sprintf(buf, "%llu\n", size); 163 } 164 static DEVICE_ATTR_RO(size); 165 166 static struct attribute *dev_dax_attributes[] = { 167 &dev_attr_size.attr, 168 NULL, 169 }; 170 171 static const struct attribute_group dev_dax_attribute_group = { 172 .attrs = dev_dax_attributes, 173 }; 174 175 static const struct attribute_group *dax_attribute_groups[] = { 176 &dev_dax_attribute_group, 177 NULL, 178 }; 179 180 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, 181 const char *func) 182 { 183 struct dax_region *dax_region = dev_dax->region; 184 struct device *dev = &dev_dax->dev; 185 unsigned long mask; 186 187 if (!dax_alive(dev_dax->dax_dev)) 188 return -ENXIO; 189 190 /* prevent private mappings from being established */ 191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 192 dev_info_ratelimited(dev, 193 "%s: %s: fail, attempted private mapping\n", 194 current->comm, func); 195 return -EINVAL; 196 } 197 198 mask = dax_region->align - 1; 199 if (vma->vm_start & mask || vma->vm_end & mask) { 200 dev_info_ratelimited(dev, 201 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", 202 current->comm, func, vma->vm_start, vma->vm_end, 203 mask); 204 return -EINVAL; 205 } 206 207 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV 208 && (vma->vm_flags & VM_DONTCOPY) == 0) { 209 dev_info_ratelimited(dev, 210 "%s: %s: fail, dax range requires MADV_DONTFORK\n", 211 current->comm, func); 212 return -EINVAL; 213 } 214 215 if (!vma_is_dax(vma)) { 216 dev_info_ratelimited(dev, 217 "%s: %s: fail, vma is not DAX capable\n", 218 current->comm, func); 219 return -EINVAL; 220 } 221 222 return 0; 223 } 224 225 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */ 226 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, 227 unsigned long size) 228 { 229 struct resource *res; 230 /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ 231 phys_addr_t uninitialized_var(phys); 232 int i; 233 234 for (i = 0; i < dev_dax->num_resources; i++) { 235 res = &dev_dax->res[i]; 236 phys = pgoff * PAGE_SIZE + res->start; 237 if (phys >= res->start && phys <= res->end) 238 break; 239 pgoff -= PHYS_PFN(resource_size(res)); 240 } 241 242 if (i < dev_dax->num_resources) { 243 res = &dev_dax->res[i]; 244 if (phys + size - 1 <= res->end) 245 return phys; 246 } 247 248 return -1; 249 } 250 251 static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, 252 struct vm_fault *vmf, pfn_t *pfn) 253 { 254 struct device *dev = &dev_dax->dev; 255 struct dax_region *dax_region; 256 phys_addr_t phys; 257 unsigned int fault_size = PAGE_SIZE; 258 259 if (check_vma(dev_dax, vmf->vma, __func__)) 260 return VM_FAULT_SIGBUS; 261 262 dax_region = dev_dax->region; 263 if (dax_region->align > PAGE_SIZE) { 264 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 265 dax_region->align, fault_size); 266 return VM_FAULT_SIGBUS; 267 } 268 269 if (fault_size != dax_region->align) 270 return VM_FAULT_SIGBUS; 271 272 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); 273 if (phys == -1) { 274 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); 275 return VM_FAULT_SIGBUS; 276 } 277 278 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 279 280 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); 281 } 282 283 static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, 284 struct vm_fault *vmf, pfn_t *pfn) 285 { 286 unsigned long pmd_addr = vmf->address & PMD_MASK; 287 struct device *dev = &dev_dax->dev; 288 struct dax_region *dax_region; 289 phys_addr_t phys; 290 pgoff_t pgoff; 291 unsigned int fault_size = PMD_SIZE; 292 293 if (check_vma(dev_dax, vmf->vma, __func__)) 294 return VM_FAULT_SIGBUS; 295 296 dax_region = dev_dax->region; 297 if (dax_region->align > PMD_SIZE) { 298 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 299 dax_region->align, fault_size); 300 return VM_FAULT_SIGBUS; 301 } 302 303 /* dax pmd mappings require pfn_t_devmap() */ 304 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 305 dev_dbg(dev, "region lacks devmap flags\n"); 306 return VM_FAULT_SIGBUS; 307 } 308 309 if (fault_size < dax_region->align) 310 return VM_FAULT_SIGBUS; 311 else if (fault_size > dax_region->align) 312 return VM_FAULT_FALLBACK; 313 314 /* if we are outside of the VMA */ 315 if (pmd_addr < vmf->vma->vm_start || 316 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 317 return VM_FAULT_SIGBUS; 318 319 pgoff = linear_page_index(vmf->vma, pmd_addr); 320 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); 321 if (phys == -1) { 322 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 323 return VM_FAULT_SIGBUS; 324 } 325 326 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 327 328 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn, 329 vmf->flags & FAULT_FLAG_WRITE); 330 } 331 332 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 333 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, 334 struct vm_fault *vmf, pfn_t *pfn) 335 { 336 unsigned long pud_addr = vmf->address & PUD_MASK; 337 struct device *dev = &dev_dax->dev; 338 struct dax_region *dax_region; 339 phys_addr_t phys; 340 pgoff_t pgoff; 341 unsigned int fault_size = PUD_SIZE; 342 343 344 if (check_vma(dev_dax, vmf->vma, __func__)) 345 return VM_FAULT_SIGBUS; 346 347 dax_region = dev_dax->region; 348 if (dax_region->align > PUD_SIZE) { 349 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 350 dax_region->align, fault_size); 351 return VM_FAULT_SIGBUS; 352 } 353 354 /* dax pud mappings require pfn_t_devmap() */ 355 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 356 dev_dbg(dev, "region lacks devmap flags\n"); 357 return VM_FAULT_SIGBUS; 358 } 359 360 if (fault_size < dax_region->align) 361 return VM_FAULT_SIGBUS; 362 else if (fault_size > dax_region->align) 363 return VM_FAULT_FALLBACK; 364 365 /* if we are outside of the VMA */ 366 if (pud_addr < vmf->vma->vm_start || 367 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) 368 return VM_FAULT_SIGBUS; 369 370 pgoff = linear_page_index(vmf->vma, pud_addr); 371 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); 372 if (phys == -1) { 373 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 374 return VM_FAULT_SIGBUS; 375 } 376 377 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 378 379 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn, 380 vmf->flags & FAULT_FLAG_WRITE); 381 } 382 #else 383 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, 384 struct vm_fault *vmf, pfn_t *pfn) 385 { 386 return VM_FAULT_FALLBACK; 387 } 388 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 389 390 static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, 391 enum page_entry_size pe_size) 392 { 393 struct file *filp = vmf->vma->vm_file; 394 unsigned long fault_size; 395 int rc, id; 396 pfn_t pfn; 397 struct dev_dax *dev_dax = filp->private_data; 398 399 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, 400 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", 401 vmf->vma->vm_start, vmf->vma->vm_end, pe_size); 402 403 id = dax_read_lock(); 404 switch (pe_size) { 405 case PE_SIZE_PTE: 406 fault_size = PAGE_SIZE; 407 rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); 408 break; 409 case PE_SIZE_PMD: 410 fault_size = PMD_SIZE; 411 rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); 412 break; 413 case PE_SIZE_PUD: 414 fault_size = PUD_SIZE; 415 rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); 416 break; 417 default: 418 rc = VM_FAULT_SIGBUS; 419 } 420 421 if (rc == VM_FAULT_NOPAGE) { 422 unsigned long i; 423 pgoff_t pgoff; 424 425 /* 426 * In the device-dax case the only possibility for a 427 * VM_FAULT_NOPAGE result is when device-dax capacity is 428 * mapped. No need to consider the zero page, or racing 429 * conflicting mappings. 430 */ 431 pgoff = linear_page_index(vmf->vma, vmf->address 432 & ~(fault_size - 1)); 433 for (i = 0; i < fault_size / PAGE_SIZE; i++) { 434 struct page *page; 435 436 page = pfn_to_page(pfn_t_to_pfn(pfn) + i); 437 if (page->mapping) 438 continue; 439 page->mapping = filp->f_mapping; 440 page->index = pgoff + i; 441 } 442 } 443 dax_read_unlock(id); 444 445 return rc; 446 } 447 448 static vm_fault_t dev_dax_fault(struct vm_fault *vmf) 449 { 450 return dev_dax_huge_fault(vmf, PE_SIZE_PTE); 451 } 452 453 static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) 454 { 455 struct file *filp = vma->vm_file; 456 struct dev_dax *dev_dax = filp->private_data; 457 struct dax_region *dax_region = dev_dax->region; 458 459 if (!IS_ALIGNED(addr, dax_region->align)) 460 return -EINVAL; 461 return 0; 462 } 463 464 static unsigned long dev_dax_pagesize(struct vm_area_struct *vma) 465 { 466 struct file *filp = vma->vm_file; 467 struct dev_dax *dev_dax = filp->private_data; 468 struct dax_region *dax_region = dev_dax->region; 469 470 return dax_region->align; 471 } 472 473 static const struct vm_operations_struct dax_vm_ops = { 474 .fault = dev_dax_fault, 475 .huge_fault = dev_dax_huge_fault, 476 .split = dev_dax_split, 477 .pagesize = dev_dax_pagesize, 478 }; 479 480 static int dax_mmap(struct file *filp, struct vm_area_struct *vma) 481 { 482 struct dev_dax *dev_dax = filp->private_data; 483 int rc, id; 484 485 dev_dbg(&dev_dax->dev, "trace\n"); 486 487 /* 488 * We lock to check dax_dev liveness and will re-check at 489 * fault time. 490 */ 491 id = dax_read_lock(); 492 rc = check_vma(dev_dax, vma, __func__); 493 dax_read_unlock(id); 494 if (rc) 495 return rc; 496 497 vma->vm_ops = &dax_vm_ops; 498 vma->vm_flags |= VM_HUGEPAGE; 499 return 0; 500 } 501 502 /* return an unmapped area aligned to the dax region specified alignment */ 503 static unsigned long dax_get_unmapped_area(struct file *filp, 504 unsigned long addr, unsigned long len, unsigned long pgoff, 505 unsigned long flags) 506 { 507 unsigned long off, off_end, off_align, len_align, addr_align, align; 508 struct dev_dax *dev_dax = filp ? filp->private_data : NULL; 509 struct dax_region *dax_region; 510 511 if (!dev_dax || addr) 512 goto out; 513 514 dax_region = dev_dax->region; 515 align = dax_region->align; 516 off = pgoff << PAGE_SHIFT; 517 off_end = off + len; 518 off_align = round_up(off, align); 519 520 if ((off_end <= off_align) || ((off_end - off_align) < align)) 521 goto out; 522 523 len_align = len + align; 524 if ((off + len_align) < off) 525 goto out; 526 527 addr_align = current->mm->get_unmapped_area(filp, addr, len_align, 528 pgoff, flags); 529 if (!IS_ERR_VALUE(addr_align)) { 530 addr_align += (off - addr_align) & (align - 1); 531 return addr_align; 532 } 533 out: 534 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 535 } 536 537 static int dax_open(struct inode *inode, struct file *filp) 538 { 539 struct dax_device *dax_dev = inode_dax(inode); 540 struct inode *__dax_inode = dax_inode(dax_dev); 541 struct dev_dax *dev_dax = dax_get_private(dax_dev); 542 543 dev_dbg(&dev_dax->dev, "trace\n"); 544 inode->i_mapping = __dax_inode->i_mapping; 545 inode->i_mapping->host = __dax_inode; 546 filp->f_mapping = inode->i_mapping; 547 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 548 filp->private_data = dev_dax; 549 inode->i_flags = S_DAX; 550 551 return 0; 552 } 553 554 static int dax_release(struct inode *inode, struct file *filp) 555 { 556 struct dev_dax *dev_dax = filp->private_data; 557 558 dev_dbg(&dev_dax->dev, "trace\n"); 559 return 0; 560 } 561 562 static const struct file_operations dax_fops = { 563 .llseek = noop_llseek, 564 .owner = THIS_MODULE, 565 .open = dax_open, 566 .release = dax_release, 567 .get_unmapped_area = dax_get_unmapped_area, 568 .mmap = dax_mmap, 569 .mmap_supported_flags = MAP_SYNC, 570 }; 571 572 static void dev_dax_release(struct device *dev) 573 { 574 struct dev_dax *dev_dax = to_dev_dax(dev); 575 struct dax_region *dax_region = dev_dax->region; 576 struct dax_device *dax_dev = dev_dax->dax_dev; 577 578 if (dev_dax->id >= 0) 579 ida_simple_remove(&dax_region->ida, dev_dax->id); 580 dax_region_put(dax_region); 581 put_dax(dax_dev); 582 kfree(dev_dax); 583 } 584 585 static void kill_dev_dax(struct dev_dax *dev_dax) 586 { 587 struct dax_device *dax_dev = dev_dax->dax_dev; 588 struct inode *inode = dax_inode(dax_dev); 589 590 kill_dax(dax_dev); 591 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 592 } 593 594 static void unregister_dev_dax(void *dev) 595 { 596 struct dev_dax *dev_dax = to_dev_dax(dev); 597 struct dax_device *dax_dev = dev_dax->dax_dev; 598 struct inode *inode = dax_inode(dax_dev); 599 struct cdev *cdev = inode->i_cdev; 600 601 dev_dbg(dev, "trace\n"); 602 603 kill_dev_dax(dev_dax); 604 cdev_device_del(cdev, dev); 605 put_device(dev); 606 } 607 608 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 609 int id, struct resource *res, int count) 610 { 611 struct device *parent = dax_region->dev; 612 struct dax_device *dax_dev; 613 struct dev_dax *dev_dax; 614 struct inode *inode; 615 struct device *dev; 616 struct cdev *cdev; 617 int rc, i; 618 619 if (!count) 620 return ERR_PTR(-EINVAL); 621 622 dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL); 623 if (!dev_dax) 624 return ERR_PTR(-ENOMEM); 625 626 for (i = 0; i < count; i++) { 627 if (!IS_ALIGNED(res[i].start, dax_region->align) 628 || !IS_ALIGNED(resource_size(&res[i]), 629 dax_region->align)) { 630 rc = -EINVAL; 631 break; 632 } 633 dev_dax->res[i].start = res[i].start; 634 dev_dax->res[i].end = res[i].end; 635 } 636 637 if (i < count) 638 goto err_id; 639 640 if (id < 0) { 641 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); 642 dev_dax->id = id; 643 if (id < 0) { 644 rc = id; 645 goto err_id; 646 } 647 } else { 648 /* region provider owns @id lifetime */ 649 dev_dax->id = -1; 650 } 651 652 /* 653 * No 'host' or dax_operations since there is no access to this 654 * device outside of mmap of the resulting character device. 655 */ 656 dax_dev = alloc_dax(dev_dax, NULL, NULL); 657 if (!dax_dev) { 658 rc = -ENOMEM; 659 goto err_dax; 660 } 661 662 /* from here on we're committed to teardown via dax_dev_release() */ 663 dev = &dev_dax->dev; 664 device_initialize(dev); 665 666 inode = dax_inode(dax_dev); 667 cdev = inode->i_cdev; 668 cdev_init(cdev, &dax_fops); 669 cdev->owner = parent->driver->owner; 670 671 dev_dax->num_resources = count; 672 dev_dax->dax_dev = dax_dev; 673 dev_dax->region = dax_region; 674 kref_get(&dax_region->kref); 675 676 dev->devt = inode->i_rdev; 677 dev->class = dax_class; 678 dev->parent = parent; 679 dev->groups = dax_attribute_groups; 680 dev->release = dev_dax_release; 681 dev_set_name(dev, "dax%d.%d", dax_region->id, id); 682 683 rc = cdev_device_add(cdev, dev); 684 if (rc) { 685 kill_dev_dax(dev_dax); 686 put_device(dev); 687 return ERR_PTR(rc); 688 } 689 690 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 691 if (rc) 692 return ERR_PTR(rc); 693 694 return dev_dax; 695 696 err_dax: 697 if (dev_dax->id >= 0) 698 ida_simple_remove(&dax_region->ida, dev_dax->id); 699 err_id: 700 kfree(dev_dax); 701 702 return ERR_PTR(rc); 703 } 704 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 705 706 static int __init dax_init(void) 707 { 708 dax_class = class_create(THIS_MODULE, "dax"); 709 return PTR_ERR_OR_ZERO(dax_class); 710 } 711 712 static void __exit dax_exit(void) 713 { 714 class_destroy(dax_class); 715 } 716 717 MODULE_AUTHOR("Intel Corporation"); 718 MODULE_LICENSE("GPL v2"); 719 subsys_initcall(dax_init); 720 module_exit(dax_exit); 721