1 /* 2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/pagemap.h> 14 #include <linux/module.h> 15 #include <linux/device.h> 16 #include <linux/pfn_t.h> 17 #include <linux/cdev.h> 18 #include <linux/slab.h> 19 #include <linux/dax.h> 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include <linux/mman.h> 23 #include "dax-private.h" 24 #include "dax.h" 25 26 static struct class *dax_class; 27 28 /* 29 * Rely on the fact that drvdata is set before the attributes are 30 * registered, and that the attributes are unregistered before drvdata 31 * is cleared to assume that drvdata is always valid. 32 */ 33 static ssize_t id_show(struct device *dev, 34 struct device_attribute *attr, char *buf) 35 { 36 struct dax_region *dax_region = dev_get_drvdata(dev); 37 38 return sprintf(buf, "%d\n", dax_region->id); 39 } 40 static DEVICE_ATTR_RO(id); 41 42 static ssize_t region_size_show(struct device *dev, 43 struct device_attribute *attr, char *buf) 44 { 45 struct dax_region *dax_region = dev_get_drvdata(dev); 46 47 return sprintf(buf, "%llu\n", (unsigned long long) 48 resource_size(&dax_region->res)); 49 } 50 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 51 region_size_show, NULL); 52 53 static ssize_t align_show(struct device *dev, 54 struct device_attribute *attr, char *buf) 55 { 56 struct dax_region *dax_region = dev_get_drvdata(dev); 57 58 return sprintf(buf, "%u\n", dax_region->align); 59 } 60 static DEVICE_ATTR_RO(align); 61 62 static struct attribute *dax_region_attributes[] = { 63 &dev_attr_region_size.attr, 64 &dev_attr_align.attr, 65 &dev_attr_id.attr, 66 NULL, 67 }; 68 69 static const struct attribute_group dax_region_attribute_group = { 70 .name = "dax_region", 71 .attrs = dax_region_attributes, 72 }; 73 74 static const struct attribute_group *dax_region_attribute_groups[] = { 75 &dax_region_attribute_group, 76 NULL, 77 }; 78 79 static void dax_region_free(struct kref *kref) 80 { 81 struct dax_region *dax_region; 82 83 dax_region = container_of(kref, struct dax_region, kref); 84 kfree(dax_region); 85 } 86 87 void dax_region_put(struct dax_region *dax_region) 88 { 89 kref_put(&dax_region->kref, dax_region_free); 90 } 91 EXPORT_SYMBOL_GPL(dax_region_put); 92 93 static void dax_region_unregister(void *region) 94 { 95 struct dax_region *dax_region = region; 96 97 sysfs_remove_groups(&dax_region->dev->kobj, 98 dax_region_attribute_groups); 99 dax_region_put(dax_region); 100 } 101 102 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 103 struct resource *res, unsigned int align, void *addr, 104 unsigned long pfn_flags) 105 { 106 struct dax_region *dax_region; 107 108 /* 109 * The DAX core assumes that it can store its private data in 110 * parent->driver_data. This WARN is a reminder / safeguard for 111 * developers of device-dax drivers. 112 */ 113 if (dev_get_drvdata(parent)) { 114 dev_WARN(parent, "dax core failed to setup private data\n"); 115 return NULL; 116 } 117 118 if (!IS_ALIGNED(res->start, align) 119 || !IS_ALIGNED(resource_size(res), align)) 120 return NULL; 121 122 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 123 if (!dax_region) 124 return NULL; 125 126 dev_set_drvdata(parent, dax_region); 127 memcpy(&dax_region->res, res, sizeof(*res)); 128 dax_region->pfn_flags = pfn_flags; 129 kref_init(&dax_region->kref); 130 dax_region->id = region_id; 131 ida_init(&dax_region->ida); 132 dax_region->align = align; 133 dax_region->dev = parent; 134 dax_region->base = addr; 135 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 136 kfree(dax_region); 137 return NULL; 138 } 139 140 kref_get(&dax_region->kref); 141 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 142 return NULL; 143 return dax_region; 144 } 145 EXPORT_SYMBOL_GPL(alloc_dax_region); 146 147 static struct dev_dax *to_dev_dax(struct device *dev) 148 { 149 return container_of(dev, struct dev_dax, dev); 150 } 151 152 static ssize_t size_show(struct device *dev, 153 struct device_attribute *attr, char *buf) 154 { 155 struct dev_dax *dev_dax = to_dev_dax(dev); 156 unsigned long long size = 0; 157 int i; 158 159 for (i = 0; i < dev_dax->num_resources; i++) 160 size += resource_size(&dev_dax->res[i]); 161 162 return sprintf(buf, "%llu\n", size); 163 } 164 static DEVICE_ATTR_RO(size); 165 166 static struct attribute *dev_dax_attributes[] = { 167 &dev_attr_size.attr, 168 NULL, 169 }; 170 171 static const struct attribute_group dev_dax_attribute_group = { 172 .attrs = dev_dax_attributes, 173 }; 174 175 static const struct attribute_group *dax_attribute_groups[] = { 176 &dev_dax_attribute_group, 177 NULL, 178 }; 179 180 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, 181 const char *func) 182 { 183 struct dax_region *dax_region = dev_dax->region; 184 struct device *dev = &dev_dax->dev; 185 unsigned long mask; 186 187 if (!dax_alive(dev_dax->dax_dev)) 188 return -ENXIO; 189 190 /* prevent private mappings from being established */ 191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 192 dev_info_ratelimited(dev, 193 "%s: %s: fail, attempted private mapping\n", 194 current->comm, func); 195 return -EINVAL; 196 } 197 198 mask = dax_region->align - 1; 199 if (vma->vm_start & mask || vma->vm_end & mask) { 200 dev_info_ratelimited(dev, 201 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", 202 current->comm, func, vma->vm_start, vma->vm_end, 203 mask); 204 return -EINVAL; 205 } 206 207 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV 208 && (vma->vm_flags & VM_DONTCOPY) == 0) { 209 dev_info_ratelimited(dev, 210 "%s: %s: fail, dax range requires MADV_DONTFORK\n", 211 current->comm, func); 212 return -EINVAL; 213 } 214 215 if (!vma_is_dax(vma)) { 216 dev_info_ratelimited(dev, 217 "%s: %s: fail, vma is not DAX capable\n", 218 current->comm, func); 219 return -EINVAL; 220 } 221 222 return 0; 223 } 224 225 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */ 226 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, 227 unsigned long size) 228 { 229 struct resource *res; 230 /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ 231 phys_addr_t uninitialized_var(phys); 232 int i; 233 234 for (i = 0; i < dev_dax->num_resources; i++) { 235 res = &dev_dax->res[i]; 236 phys = pgoff * PAGE_SIZE + res->start; 237 if (phys >= res->start && phys <= res->end) 238 break; 239 pgoff -= PHYS_PFN(resource_size(res)); 240 } 241 242 if (i < dev_dax->num_resources) { 243 res = &dev_dax->res[i]; 244 if (phys + size - 1 <= res->end) 245 return phys; 246 } 247 248 return -1; 249 } 250 251 static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, 252 struct vm_fault *vmf, pfn_t *pfn) 253 { 254 struct device *dev = &dev_dax->dev; 255 struct dax_region *dax_region; 256 phys_addr_t phys; 257 unsigned int fault_size = PAGE_SIZE; 258 259 if (check_vma(dev_dax, vmf->vma, __func__)) 260 return VM_FAULT_SIGBUS; 261 262 dax_region = dev_dax->region; 263 if (dax_region->align > PAGE_SIZE) { 264 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 265 dax_region->align, fault_size); 266 return VM_FAULT_SIGBUS; 267 } 268 269 if (fault_size != dax_region->align) 270 return VM_FAULT_SIGBUS; 271 272 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); 273 if (phys == -1) { 274 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); 275 return VM_FAULT_SIGBUS; 276 } 277 278 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 279 280 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); 281 } 282 283 static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, 284 struct vm_fault *vmf, pfn_t *pfn) 285 { 286 unsigned long pmd_addr = vmf->address & PMD_MASK; 287 struct device *dev = &dev_dax->dev; 288 struct dax_region *dax_region; 289 phys_addr_t phys; 290 pgoff_t pgoff; 291 unsigned int fault_size = PMD_SIZE; 292 293 if (check_vma(dev_dax, vmf->vma, __func__)) 294 return VM_FAULT_SIGBUS; 295 296 dax_region = dev_dax->region; 297 if (dax_region->align > PMD_SIZE) { 298 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 299 dax_region->align, fault_size); 300 return VM_FAULT_SIGBUS; 301 } 302 303 /* dax pmd mappings require pfn_t_devmap() */ 304 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 305 dev_dbg(dev, "region lacks devmap flags\n"); 306 return VM_FAULT_SIGBUS; 307 } 308 309 if (fault_size < dax_region->align) 310 return VM_FAULT_SIGBUS; 311 else if (fault_size > dax_region->align) 312 return VM_FAULT_FALLBACK; 313 314 /* if we are outside of the VMA */ 315 if (pmd_addr < vmf->vma->vm_start || 316 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 317 return VM_FAULT_SIGBUS; 318 319 pgoff = linear_page_index(vmf->vma, pmd_addr); 320 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); 321 if (phys == -1) { 322 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 323 return VM_FAULT_SIGBUS; 324 } 325 326 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 327 328 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn, 329 vmf->flags & FAULT_FLAG_WRITE); 330 } 331 332 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 333 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, 334 struct vm_fault *vmf, pfn_t *pfn) 335 { 336 unsigned long pud_addr = vmf->address & PUD_MASK; 337 struct device *dev = &dev_dax->dev; 338 struct dax_region *dax_region; 339 phys_addr_t phys; 340 pgoff_t pgoff; 341 unsigned int fault_size = PUD_SIZE; 342 343 344 if (check_vma(dev_dax, vmf->vma, __func__)) 345 return VM_FAULT_SIGBUS; 346 347 dax_region = dev_dax->region; 348 if (dax_region->align > PUD_SIZE) { 349 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 350 dax_region->align, fault_size); 351 return VM_FAULT_SIGBUS; 352 } 353 354 /* dax pud mappings require pfn_t_devmap() */ 355 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 356 dev_dbg(dev, "region lacks devmap flags\n"); 357 return VM_FAULT_SIGBUS; 358 } 359 360 if (fault_size < dax_region->align) 361 return VM_FAULT_SIGBUS; 362 else if (fault_size > dax_region->align) 363 return VM_FAULT_FALLBACK; 364 365 /* if we are outside of the VMA */ 366 if (pud_addr < vmf->vma->vm_start || 367 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) 368 return VM_FAULT_SIGBUS; 369 370 pgoff = linear_page_index(vmf->vma, pud_addr); 371 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); 372 if (phys == -1) { 373 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 374 return VM_FAULT_SIGBUS; 375 } 376 377 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 378 379 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn, 380 vmf->flags & FAULT_FLAG_WRITE); 381 } 382 #else 383 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, 384 struct vm_fault *vmf, pfn_t *pfn) 385 { 386 return VM_FAULT_FALLBACK; 387 } 388 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 389 390 static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, 391 enum page_entry_size pe_size) 392 { 393 struct file *filp = vmf->vma->vm_file; 394 unsigned long fault_size; 395 vm_fault_t rc = VM_FAULT_SIGBUS; 396 int id; 397 pfn_t pfn; 398 struct dev_dax *dev_dax = filp->private_data; 399 400 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, 401 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", 402 vmf->vma->vm_start, vmf->vma->vm_end, pe_size); 403 404 id = dax_read_lock(); 405 switch (pe_size) { 406 case PE_SIZE_PTE: 407 fault_size = PAGE_SIZE; 408 rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); 409 break; 410 case PE_SIZE_PMD: 411 fault_size = PMD_SIZE; 412 rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); 413 break; 414 case PE_SIZE_PUD: 415 fault_size = PUD_SIZE; 416 rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); 417 break; 418 default: 419 rc = VM_FAULT_SIGBUS; 420 } 421 422 if (rc == VM_FAULT_NOPAGE) { 423 unsigned long i; 424 pgoff_t pgoff; 425 426 /* 427 * In the device-dax case the only possibility for a 428 * VM_FAULT_NOPAGE result is when device-dax capacity is 429 * mapped. No need to consider the zero page, or racing 430 * conflicting mappings. 431 */ 432 pgoff = linear_page_index(vmf->vma, vmf->address 433 & ~(fault_size - 1)); 434 for (i = 0; i < fault_size / PAGE_SIZE; i++) { 435 struct page *page; 436 437 page = pfn_to_page(pfn_t_to_pfn(pfn) + i); 438 if (page->mapping) 439 continue; 440 page->mapping = filp->f_mapping; 441 page->index = pgoff + i; 442 } 443 } 444 dax_read_unlock(id); 445 446 return rc; 447 } 448 449 static vm_fault_t dev_dax_fault(struct vm_fault *vmf) 450 { 451 return dev_dax_huge_fault(vmf, PE_SIZE_PTE); 452 } 453 454 static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) 455 { 456 struct file *filp = vma->vm_file; 457 struct dev_dax *dev_dax = filp->private_data; 458 struct dax_region *dax_region = dev_dax->region; 459 460 if (!IS_ALIGNED(addr, dax_region->align)) 461 return -EINVAL; 462 return 0; 463 } 464 465 static unsigned long dev_dax_pagesize(struct vm_area_struct *vma) 466 { 467 struct file *filp = vma->vm_file; 468 struct dev_dax *dev_dax = filp->private_data; 469 struct dax_region *dax_region = dev_dax->region; 470 471 return dax_region->align; 472 } 473 474 static const struct vm_operations_struct dax_vm_ops = { 475 .fault = dev_dax_fault, 476 .huge_fault = dev_dax_huge_fault, 477 .split = dev_dax_split, 478 .pagesize = dev_dax_pagesize, 479 }; 480 481 static int dax_mmap(struct file *filp, struct vm_area_struct *vma) 482 { 483 struct dev_dax *dev_dax = filp->private_data; 484 int rc, id; 485 486 dev_dbg(&dev_dax->dev, "trace\n"); 487 488 /* 489 * We lock to check dax_dev liveness and will re-check at 490 * fault time. 491 */ 492 id = dax_read_lock(); 493 rc = check_vma(dev_dax, vma, __func__); 494 dax_read_unlock(id); 495 if (rc) 496 return rc; 497 498 vma->vm_ops = &dax_vm_ops; 499 vma->vm_flags |= VM_HUGEPAGE; 500 return 0; 501 } 502 503 /* return an unmapped area aligned to the dax region specified alignment */ 504 static unsigned long dax_get_unmapped_area(struct file *filp, 505 unsigned long addr, unsigned long len, unsigned long pgoff, 506 unsigned long flags) 507 { 508 unsigned long off, off_end, off_align, len_align, addr_align, align; 509 struct dev_dax *dev_dax = filp ? filp->private_data : NULL; 510 struct dax_region *dax_region; 511 512 if (!dev_dax || addr) 513 goto out; 514 515 dax_region = dev_dax->region; 516 align = dax_region->align; 517 off = pgoff << PAGE_SHIFT; 518 off_end = off + len; 519 off_align = round_up(off, align); 520 521 if ((off_end <= off_align) || ((off_end - off_align) < align)) 522 goto out; 523 524 len_align = len + align; 525 if ((off + len_align) < off) 526 goto out; 527 528 addr_align = current->mm->get_unmapped_area(filp, addr, len_align, 529 pgoff, flags); 530 if (!IS_ERR_VALUE(addr_align)) { 531 addr_align += (off - addr_align) & (align - 1); 532 return addr_align; 533 } 534 out: 535 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 536 } 537 538 static int dax_open(struct inode *inode, struct file *filp) 539 { 540 struct dax_device *dax_dev = inode_dax(inode); 541 struct inode *__dax_inode = dax_inode(dax_dev); 542 struct dev_dax *dev_dax = dax_get_private(dax_dev); 543 544 dev_dbg(&dev_dax->dev, "trace\n"); 545 inode->i_mapping = __dax_inode->i_mapping; 546 inode->i_mapping->host = __dax_inode; 547 filp->f_mapping = inode->i_mapping; 548 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 549 filp->private_data = dev_dax; 550 inode->i_flags = S_DAX; 551 552 return 0; 553 } 554 555 static int dax_release(struct inode *inode, struct file *filp) 556 { 557 struct dev_dax *dev_dax = filp->private_data; 558 559 dev_dbg(&dev_dax->dev, "trace\n"); 560 return 0; 561 } 562 563 static const struct file_operations dax_fops = { 564 .llseek = noop_llseek, 565 .owner = THIS_MODULE, 566 .open = dax_open, 567 .release = dax_release, 568 .get_unmapped_area = dax_get_unmapped_area, 569 .mmap = dax_mmap, 570 .mmap_supported_flags = MAP_SYNC, 571 }; 572 573 static void dev_dax_release(struct device *dev) 574 { 575 struct dev_dax *dev_dax = to_dev_dax(dev); 576 struct dax_region *dax_region = dev_dax->region; 577 struct dax_device *dax_dev = dev_dax->dax_dev; 578 579 if (dev_dax->id >= 0) 580 ida_simple_remove(&dax_region->ida, dev_dax->id); 581 dax_region_put(dax_region); 582 put_dax(dax_dev); 583 kfree(dev_dax); 584 } 585 586 static void kill_dev_dax(struct dev_dax *dev_dax) 587 { 588 struct dax_device *dax_dev = dev_dax->dax_dev; 589 struct inode *inode = dax_inode(dax_dev); 590 591 kill_dax(dax_dev); 592 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 593 } 594 595 static void unregister_dev_dax(void *dev) 596 { 597 struct dev_dax *dev_dax = to_dev_dax(dev); 598 struct dax_device *dax_dev = dev_dax->dax_dev; 599 struct inode *inode = dax_inode(dax_dev); 600 struct cdev *cdev = inode->i_cdev; 601 602 dev_dbg(dev, "trace\n"); 603 604 kill_dev_dax(dev_dax); 605 cdev_device_del(cdev, dev); 606 put_device(dev); 607 } 608 609 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 610 int id, struct resource *res, int count) 611 { 612 struct device *parent = dax_region->dev; 613 struct dax_device *dax_dev; 614 struct dev_dax *dev_dax; 615 struct inode *inode; 616 struct device *dev; 617 struct cdev *cdev; 618 int rc, i; 619 620 if (!count) 621 return ERR_PTR(-EINVAL); 622 623 dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL); 624 if (!dev_dax) 625 return ERR_PTR(-ENOMEM); 626 627 for (i = 0; i < count; i++) { 628 if (!IS_ALIGNED(res[i].start, dax_region->align) 629 || !IS_ALIGNED(resource_size(&res[i]), 630 dax_region->align)) { 631 rc = -EINVAL; 632 break; 633 } 634 dev_dax->res[i].start = res[i].start; 635 dev_dax->res[i].end = res[i].end; 636 } 637 638 if (i < count) 639 goto err_id; 640 641 if (id < 0) { 642 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); 643 dev_dax->id = id; 644 if (id < 0) { 645 rc = id; 646 goto err_id; 647 } 648 } else { 649 /* region provider owns @id lifetime */ 650 dev_dax->id = -1; 651 } 652 653 /* 654 * No 'host' or dax_operations since there is no access to this 655 * device outside of mmap of the resulting character device. 656 */ 657 dax_dev = alloc_dax(dev_dax, NULL, NULL); 658 if (!dax_dev) { 659 rc = -ENOMEM; 660 goto err_dax; 661 } 662 663 /* from here on we're committed to teardown via dax_dev_release() */ 664 dev = &dev_dax->dev; 665 device_initialize(dev); 666 667 inode = dax_inode(dax_dev); 668 cdev = inode->i_cdev; 669 cdev_init(cdev, &dax_fops); 670 cdev->owner = parent->driver->owner; 671 672 dev_dax->num_resources = count; 673 dev_dax->dax_dev = dax_dev; 674 dev_dax->region = dax_region; 675 kref_get(&dax_region->kref); 676 677 dev->devt = inode->i_rdev; 678 dev->class = dax_class; 679 dev->parent = parent; 680 dev->groups = dax_attribute_groups; 681 dev->release = dev_dax_release; 682 dev_set_name(dev, "dax%d.%d", dax_region->id, id); 683 684 rc = cdev_device_add(cdev, dev); 685 if (rc) { 686 kill_dev_dax(dev_dax); 687 put_device(dev); 688 return ERR_PTR(rc); 689 } 690 691 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 692 if (rc) 693 return ERR_PTR(rc); 694 695 return dev_dax; 696 697 err_dax: 698 if (dev_dax->id >= 0) 699 ida_simple_remove(&dax_region->ida, dev_dax->id); 700 err_id: 701 kfree(dev_dax); 702 703 return ERR_PTR(rc); 704 } 705 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 706 707 static int __init dax_init(void) 708 { 709 dax_class = class_create(THIS_MODULE, "dax"); 710 return PTR_ERR_OR_ZERO(dax_class); 711 } 712 713 static void __exit dax_exit(void) 714 { 715 class_destroy(dax_class); 716 } 717 718 MODULE_AUTHOR("Intel Corporation"); 719 MODULE_LICENSE("GPL v2"); 720 subsys_initcall(dax_init); 721 module_exit(dax_exit); 722