1 /* 2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/pagemap.h> 14 #include <linux/module.h> 15 #include <linux/device.h> 16 #include <linux/pfn_t.h> 17 #include <linux/cdev.h> 18 #include <linux/slab.h> 19 #include <linux/dax.h> 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include <linux/mman.h> 23 #include "dax-private.h" 24 #include "dax.h" 25 26 static struct class *dax_class; 27 28 /* 29 * Rely on the fact that drvdata is set before the attributes are 30 * registered, and that the attributes are unregistered before drvdata 31 * is cleared to assume that drvdata is always valid. 32 */ 33 static ssize_t id_show(struct device *dev, 34 struct device_attribute *attr, char *buf) 35 { 36 struct dax_region *dax_region = dev_get_drvdata(dev); 37 38 return sprintf(buf, "%d\n", dax_region->id); 39 } 40 static DEVICE_ATTR_RO(id); 41 42 static ssize_t region_size_show(struct device *dev, 43 struct device_attribute *attr, char *buf) 44 { 45 struct dax_region *dax_region = dev_get_drvdata(dev); 46 47 return sprintf(buf, "%llu\n", (unsigned long long) 48 resource_size(&dax_region->res)); 49 } 50 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 51 region_size_show, NULL); 52 53 static ssize_t align_show(struct device *dev, 54 struct device_attribute *attr, char *buf) 55 { 56 struct dax_region *dax_region = dev_get_drvdata(dev); 57 58 return sprintf(buf, "%u\n", dax_region->align); 59 } 60 static DEVICE_ATTR_RO(align); 61 62 static struct attribute *dax_region_attributes[] = { 63 &dev_attr_region_size.attr, 64 &dev_attr_align.attr, 65 &dev_attr_id.attr, 66 NULL, 67 }; 68 69 static const struct attribute_group dax_region_attribute_group = { 70 .name = "dax_region", 71 .attrs = dax_region_attributes, 72 }; 73 74 static const struct attribute_group *dax_region_attribute_groups[] = { 75 &dax_region_attribute_group, 76 NULL, 77 }; 78 79 static void dax_region_free(struct kref *kref) 80 { 81 struct dax_region *dax_region; 82 83 dax_region = container_of(kref, struct dax_region, kref); 84 kfree(dax_region); 85 } 86 87 void dax_region_put(struct dax_region *dax_region) 88 { 89 kref_put(&dax_region->kref, dax_region_free); 90 } 91 EXPORT_SYMBOL_GPL(dax_region_put); 92 93 static void dax_region_unregister(void *region) 94 { 95 struct dax_region *dax_region = region; 96 97 sysfs_remove_groups(&dax_region->dev->kobj, 98 dax_region_attribute_groups); 99 dax_region_put(dax_region); 100 } 101 102 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 103 struct resource *res, unsigned int align, void *addr, 104 unsigned long pfn_flags) 105 { 106 struct dax_region *dax_region; 107 108 /* 109 * The DAX core assumes that it can store its private data in 110 * parent->driver_data. This WARN is a reminder / safeguard for 111 * developers of device-dax drivers. 112 */ 113 if (dev_get_drvdata(parent)) { 114 dev_WARN(parent, "dax core failed to setup private data\n"); 115 return NULL; 116 } 117 118 if (!IS_ALIGNED(res->start, align) 119 || !IS_ALIGNED(resource_size(res), align)) 120 return NULL; 121 122 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 123 if (!dax_region) 124 return NULL; 125 126 dev_set_drvdata(parent, dax_region); 127 memcpy(&dax_region->res, res, sizeof(*res)); 128 dax_region->pfn_flags = pfn_flags; 129 kref_init(&dax_region->kref); 130 dax_region->id = region_id; 131 ida_init(&dax_region->ida); 132 dax_region->align = align; 133 dax_region->dev = parent; 134 dax_region->base = addr; 135 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 136 kfree(dax_region); 137 return NULL; 138 } 139 140 kref_get(&dax_region->kref); 141 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 142 return NULL; 143 return dax_region; 144 } 145 EXPORT_SYMBOL_GPL(alloc_dax_region); 146 147 static struct dev_dax *to_dev_dax(struct device *dev) 148 { 149 return container_of(dev, struct dev_dax, dev); 150 } 151 152 static ssize_t size_show(struct device *dev, 153 struct device_attribute *attr, char *buf) 154 { 155 struct dev_dax *dev_dax = to_dev_dax(dev); 156 unsigned long long size = 0; 157 int i; 158 159 for (i = 0; i < dev_dax->num_resources; i++) 160 size += resource_size(&dev_dax->res[i]); 161 162 return sprintf(buf, "%llu\n", size); 163 } 164 static DEVICE_ATTR_RO(size); 165 166 static struct attribute *dev_dax_attributes[] = { 167 &dev_attr_size.attr, 168 NULL, 169 }; 170 171 static const struct attribute_group dev_dax_attribute_group = { 172 .attrs = dev_dax_attributes, 173 }; 174 175 static const struct attribute_group *dax_attribute_groups[] = { 176 &dev_dax_attribute_group, 177 NULL, 178 }; 179 180 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, 181 const char *func) 182 { 183 struct dax_region *dax_region = dev_dax->region; 184 struct device *dev = &dev_dax->dev; 185 unsigned long mask; 186 187 if (!dax_alive(dev_dax->dax_dev)) 188 return -ENXIO; 189 190 /* prevent private mappings from being established */ 191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 192 dev_info_ratelimited(dev, 193 "%s: %s: fail, attempted private mapping\n", 194 current->comm, func); 195 return -EINVAL; 196 } 197 198 mask = dax_region->align - 1; 199 if (vma->vm_start & mask || vma->vm_end & mask) { 200 dev_info_ratelimited(dev, 201 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", 202 current->comm, func, vma->vm_start, vma->vm_end, 203 mask); 204 return -EINVAL; 205 } 206 207 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV 208 && (vma->vm_flags & VM_DONTCOPY) == 0) { 209 dev_info_ratelimited(dev, 210 "%s: %s: fail, dax range requires MADV_DONTFORK\n", 211 current->comm, func); 212 return -EINVAL; 213 } 214 215 if (!vma_is_dax(vma)) { 216 dev_info_ratelimited(dev, 217 "%s: %s: fail, vma is not DAX capable\n", 218 current->comm, func); 219 return -EINVAL; 220 } 221 222 return 0; 223 } 224 225 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */ 226 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, 227 unsigned long size) 228 { 229 struct resource *res; 230 /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ 231 phys_addr_t uninitialized_var(phys); 232 int i; 233 234 for (i = 0; i < dev_dax->num_resources; i++) { 235 res = &dev_dax->res[i]; 236 phys = pgoff * PAGE_SIZE + res->start; 237 if (phys >= res->start && phys <= res->end) 238 break; 239 pgoff -= PHYS_PFN(resource_size(res)); 240 } 241 242 if (i < dev_dax->num_resources) { 243 res = &dev_dax->res[i]; 244 if (phys + size - 1 <= res->end) 245 return phys; 246 } 247 248 return -1; 249 } 250 251 static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 252 { 253 struct device *dev = &dev_dax->dev; 254 struct dax_region *dax_region; 255 int rc = VM_FAULT_SIGBUS; 256 phys_addr_t phys; 257 pfn_t pfn; 258 unsigned int fault_size = PAGE_SIZE; 259 260 if (check_vma(dev_dax, vmf->vma, __func__)) 261 return VM_FAULT_SIGBUS; 262 263 dax_region = dev_dax->region; 264 if (dax_region->align > PAGE_SIZE) { 265 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 266 dax_region->align, fault_size); 267 return VM_FAULT_SIGBUS; 268 } 269 270 if (fault_size != dax_region->align) 271 return VM_FAULT_SIGBUS; 272 273 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); 274 if (phys == -1) { 275 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); 276 return VM_FAULT_SIGBUS; 277 } 278 279 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 280 281 rc = vm_insert_mixed(vmf->vma, vmf->address, pfn); 282 283 if (rc == -ENOMEM) 284 return VM_FAULT_OOM; 285 if (rc < 0 && rc != -EBUSY) 286 return VM_FAULT_SIGBUS; 287 288 return VM_FAULT_NOPAGE; 289 } 290 291 static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 292 { 293 unsigned long pmd_addr = vmf->address & PMD_MASK; 294 struct device *dev = &dev_dax->dev; 295 struct dax_region *dax_region; 296 phys_addr_t phys; 297 pgoff_t pgoff; 298 pfn_t pfn; 299 unsigned int fault_size = PMD_SIZE; 300 301 if (check_vma(dev_dax, vmf->vma, __func__)) 302 return VM_FAULT_SIGBUS; 303 304 dax_region = dev_dax->region; 305 if (dax_region->align > PMD_SIZE) { 306 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 307 dax_region->align, fault_size); 308 return VM_FAULT_SIGBUS; 309 } 310 311 /* dax pmd mappings require pfn_t_devmap() */ 312 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 313 dev_dbg(dev, "region lacks devmap flags\n"); 314 return VM_FAULT_SIGBUS; 315 } 316 317 if (fault_size < dax_region->align) 318 return VM_FAULT_SIGBUS; 319 else if (fault_size > dax_region->align) 320 return VM_FAULT_FALLBACK; 321 322 /* if we are outside of the VMA */ 323 if (pmd_addr < vmf->vma->vm_start || 324 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 325 return VM_FAULT_SIGBUS; 326 327 pgoff = linear_page_index(vmf->vma, pmd_addr); 328 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); 329 if (phys == -1) { 330 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 331 return VM_FAULT_SIGBUS; 332 } 333 334 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 335 336 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn, 337 vmf->flags & FAULT_FLAG_WRITE); 338 } 339 340 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 341 static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 342 { 343 unsigned long pud_addr = vmf->address & PUD_MASK; 344 struct device *dev = &dev_dax->dev; 345 struct dax_region *dax_region; 346 phys_addr_t phys; 347 pgoff_t pgoff; 348 pfn_t pfn; 349 unsigned int fault_size = PUD_SIZE; 350 351 352 if (check_vma(dev_dax, vmf->vma, __func__)) 353 return VM_FAULT_SIGBUS; 354 355 dax_region = dev_dax->region; 356 if (dax_region->align > PUD_SIZE) { 357 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 358 dax_region->align, fault_size); 359 return VM_FAULT_SIGBUS; 360 } 361 362 /* dax pud mappings require pfn_t_devmap() */ 363 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 364 dev_dbg(dev, "region lacks devmap flags\n"); 365 return VM_FAULT_SIGBUS; 366 } 367 368 if (fault_size < dax_region->align) 369 return VM_FAULT_SIGBUS; 370 else if (fault_size > dax_region->align) 371 return VM_FAULT_FALLBACK; 372 373 /* if we are outside of the VMA */ 374 if (pud_addr < vmf->vma->vm_start || 375 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) 376 return VM_FAULT_SIGBUS; 377 378 pgoff = linear_page_index(vmf->vma, pud_addr); 379 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); 380 if (phys == -1) { 381 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 382 return VM_FAULT_SIGBUS; 383 } 384 385 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 386 387 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn, 388 vmf->flags & FAULT_FLAG_WRITE); 389 } 390 #else 391 static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 392 { 393 return VM_FAULT_FALLBACK; 394 } 395 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 396 397 static int dev_dax_huge_fault(struct vm_fault *vmf, 398 enum page_entry_size pe_size) 399 { 400 int rc, id; 401 struct file *filp = vmf->vma->vm_file; 402 struct dev_dax *dev_dax = filp->private_data; 403 404 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, 405 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", 406 vmf->vma->vm_start, vmf->vma->vm_end, pe_size); 407 408 id = dax_read_lock(); 409 switch (pe_size) { 410 case PE_SIZE_PTE: 411 rc = __dev_dax_pte_fault(dev_dax, vmf); 412 break; 413 case PE_SIZE_PMD: 414 rc = __dev_dax_pmd_fault(dev_dax, vmf); 415 break; 416 case PE_SIZE_PUD: 417 rc = __dev_dax_pud_fault(dev_dax, vmf); 418 break; 419 default: 420 rc = VM_FAULT_SIGBUS; 421 } 422 dax_read_unlock(id); 423 424 return rc; 425 } 426 427 static int dev_dax_fault(struct vm_fault *vmf) 428 { 429 return dev_dax_huge_fault(vmf, PE_SIZE_PTE); 430 } 431 432 static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) 433 { 434 struct file *filp = vma->vm_file; 435 struct dev_dax *dev_dax = filp->private_data; 436 struct dax_region *dax_region = dev_dax->region; 437 438 if (!IS_ALIGNED(addr, dax_region->align)) 439 return -EINVAL; 440 return 0; 441 } 442 443 static unsigned long dev_dax_pagesize(struct vm_area_struct *vma) 444 { 445 struct file *filp = vma->vm_file; 446 struct dev_dax *dev_dax = filp->private_data; 447 struct dax_region *dax_region = dev_dax->region; 448 449 return dax_region->align; 450 } 451 452 static const struct vm_operations_struct dax_vm_ops = { 453 .fault = dev_dax_fault, 454 .huge_fault = dev_dax_huge_fault, 455 .split = dev_dax_split, 456 .pagesize = dev_dax_pagesize, 457 }; 458 459 static int dax_mmap(struct file *filp, struct vm_area_struct *vma) 460 { 461 struct dev_dax *dev_dax = filp->private_data; 462 int rc, id; 463 464 dev_dbg(&dev_dax->dev, "trace\n"); 465 466 /* 467 * We lock to check dax_dev liveness and will re-check at 468 * fault time. 469 */ 470 id = dax_read_lock(); 471 rc = check_vma(dev_dax, vma, __func__); 472 dax_read_unlock(id); 473 if (rc) 474 return rc; 475 476 vma->vm_ops = &dax_vm_ops; 477 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 478 return 0; 479 } 480 481 /* return an unmapped area aligned to the dax region specified alignment */ 482 static unsigned long dax_get_unmapped_area(struct file *filp, 483 unsigned long addr, unsigned long len, unsigned long pgoff, 484 unsigned long flags) 485 { 486 unsigned long off, off_end, off_align, len_align, addr_align, align; 487 struct dev_dax *dev_dax = filp ? filp->private_data : NULL; 488 struct dax_region *dax_region; 489 490 if (!dev_dax || addr) 491 goto out; 492 493 dax_region = dev_dax->region; 494 align = dax_region->align; 495 off = pgoff << PAGE_SHIFT; 496 off_end = off + len; 497 off_align = round_up(off, align); 498 499 if ((off_end <= off_align) || ((off_end - off_align) < align)) 500 goto out; 501 502 len_align = len + align; 503 if ((off + len_align) < off) 504 goto out; 505 506 addr_align = current->mm->get_unmapped_area(filp, addr, len_align, 507 pgoff, flags); 508 if (!IS_ERR_VALUE(addr_align)) { 509 addr_align += (off - addr_align) & (align - 1); 510 return addr_align; 511 } 512 out: 513 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 514 } 515 516 static int dax_open(struct inode *inode, struct file *filp) 517 { 518 struct dax_device *dax_dev = inode_dax(inode); 519 struct inode *__dax_inode = dax_inode(dax_dev); 520 struct dev_dax *dev_dax = dax_get_private(dax_dev); 521 522 dev_dbg(&dev_dax->dev, "trace\n"); 523 inode->i_mapping = __dax_inode->i_mapping; 524 inode->i_mapping->host = __dax_inode; 525 filp->f_mapping = inode->i_mapping; 526 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 527 filp->private_data = dev_dax; 528 inode->i_flags = S_DAX; 529 530 return 0; 531 } 532 533 static int dax_release(struct inode *inode, struct file *filp) 534 { 535 struct dev_dax *dev_dax = filp->private_data; 536 537 dev_dbg(&dev_dax->dev, "trace\n"); 538 return 0; 539 } 540 541 static const struct file_operations dax_fops = { 542 .llseek = noop_llseek, 543 .owner = THIS_MODULE, 544 .open = dax_open, 545 .release = dax_release, 546 .get_unmapped_area = dax_get_unmapped_area, 547 .mmap = dax_mmap, 548 .mmap_supported_flags = MAP_SYNC, 549 }; 550 551 static void dev_dax_release(struct device *dev) 552 { 553 struct dev_dax *dev_dax = to_dev_dax(dev); 554 struct dax_region *dax_region = dev_dax->region; 555 struct dax_device *dax_dev = dev_dax->dax_dev; 556 557 if (dev_dax->id >= 0) 558 ida_simple_remove(&dax_region->ida, dev_dax->id); 559 dax_region_put(dax_region); 560 put_dax(dax_dev); 561 kfree(dev_dax); 562 } 563 564 static void kill_dev_dax(struct dev_dax *dev_dax) 565 { 566 struct dax_device *dax_dev = dev_dax->dax_dev; 567 struct inode *inode = dax_inode(dax_dev); 568 569 kill_dax(dax_dev); 570 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 571 } 572 573 static void unregister_dev_dax(void *dev) 574 { 575 struct dev_dax *dev_dax = to_dev_dax(dev); 576 struct dax_device *dax_dev = dev_dax->dax_dev; 577 struct inode *inode = dax_inode(dax_dev); 578 struct cdev *cdev = inode->i_cdev; 579 580 dev_dbg(dev, "trace\n"); 581 582 kill_dev_dax(dev_dax); 583 cdev_device_del(cdev, dev); 584 put_device(dev); 585 } 586 587 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 588 int id, struct resource *res, int count) 589 { 590 struct device *parent = dax_region->dev; 591 struct dax_device *dax_dev; 592 struct dev_dax *dev_dax; 593 struct inode *inode; 594 struct device *dev; 595 struct cdev *cdev; 596 int rc, i; 597 598 if (!count) 599 return ERR_PTR(-EINVAL); 600 601 dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL); 602 if (!dev_dax) 603 return ERR_PTR(-ENOMEM); 604 605 for (i = 0; i < count; i++) { 606 if (!IS_ALIGNED(res[i].start, dax_region->align) 607 || !IS_ALIGNED(resource_size(&res[i]), 608 dax_region->align)) { 609 rc = -EINVAL; 610 break; 611 } 612 dev_dax->res[i].start = res[i].start; 613 dev_dax->res[i].end = res[i].end; 614 } 615 616 if (i < count) 617 goto err_id; 618 619 if (id < 0) { 620 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); 621 dev_dax->id = id; 622 if (id < 0) { 623 rc = id; 624 goto err_id; 625 } 626 } else { 627 /* region provider owns @id lifetime */ 628 dev_dax->id = -1; 629 } 630 631 /* 632 * No 'host' or dax_operations since there is no access to this 633 * device outside of mmap of the resulting character device. 634 */ 635 dax_dev = alloc_dax(dev_dax, NULL, NULL); 636 if (!dax_dev) { 637 rc = -ENOMEM; 638 goto err_dax; 639 } 640 641 /* from here on we're committed to teardown via dax_dev_release() */ 642 dev = &dev_dax->dev; 643 device_initialize(dev); 644 645 inode = dax_inode(dax_dev); 646 cdev = inode->i_cdev; 647 cdev_init(cdev, &dax_fops); 648 cdev->owner = parent->driver->owner; 649 650 dev_dax->num_resources = count; 651 dev_dax->dax_dev = dax_dev; 652 dev_dax->region = dax_region; 653 kref_get(&dax_region->kref); 654 655 dev->devt = inode->i_rdev; 656 dev->class = dax_class; 657 dev->parent = parent; 658 dev->groups = dax_attribute_groups; 659 dev->release = dev_dax_release; 660 dev_set_name(dev, "dax%d.%d", dax_region->id, id); 661 662 rc = cdev_device_add(cdev, dev); 663 if (rc) { 664 kill_dev_dax(dev_dax); 665 put_device(dev); 666 return ERR_PTR(rc); 667 } 668 669 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 670 if (rc) 671 return ERR_PTR(rc); 672 673 return dev_dax; 674 675 err_dax: 676 if (dev_dax->id >= 0) 677 ida_simple_remove(&dax_region->ida, dev_dax->id); 678 err_id: 679 kfree(dev_dax); 680 681 return ERR_PTR(rc); 682 } 683 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 684 685 static int __init dax_init(void) 686 { 687 dax_class = class_create(THIS_MODULE, "dax"); 688 return PTR_ERR_OR_ZERO(dax_class); 689 } 690 691 static void __exit dax_exit(void) 692 { 693 class_destroy(dax_class); 694 } 695 696 MODULE_AUTHOR("Intel Corporation"); 697 MODULE_LICENSE("GPL v2"); 698 subsys_initcall(dax_init); 699 module_exit(dax_exit); 700