1 /* 2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/pagemap.h> 14 #include <linux/module.h> 15 #include <linux/device.h> 16 #include <linux/pfn_t.h> 17 #include <linux/cdev.h> 18 #include <linux/slab.h> 19 #include <linux/dax.h> 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include "dax-private.h" 23 #include "dax.h" 24 25 static struct class *dax_class; 26 27 /* 28 * Rely on the fact that drvdata is set before the attributes are 29 * registered, and that the attributes are unregistered before drvdata 30 * is cleared to assume that drvdata is always valid. 31 */ 32 static ssize_t id_show(struct device *dev, 33 struct device_attribute *attr, char *buf) 34 { 35 struct dax_region *dax_region = dev_get_drvdata(dev); 36 37 return sprintf(buf, "%d\n", dax_region->id); 38 } 39 static DEVICE_ATTR_RO(id); 40 41 static ssize_t region_size_show(struct device *dev, 42 struct device_attribute *attr, char *buf) 43 { 44 struct dax_region *dax_region = dev_get_drvdata(dev); 45 46 return sprintf(buf, "%llu\n", (unsigned long long) 47 resource_size(&dax_region->res)); 48 } 49 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, 50 region_size_show, NULL); 51 52 static ssize_t align_show(struct device *dev, 53 struct device_attribute *attr, char *buf) 54 { 55 struct dax_region *dax_region = dev_get_drvdata(dev); 56 57 return sprintf(buf, "%u\n", dax_region->align); 58 } 59 static DEVICE_ATTR_RO(align); 60 61 static struct attribute *dax_region_attributes[] = { 62 &dev_attr_region_size.attr, 63 &dev_attr_align.attr, 64 &dev_attr_id.attr, 65 NULL, 66 }; 67 68 static const struct attribute_group dax_region_attribute_group = { 69 .name = "dax_region", 70 .attrs = dax_region_attributes, 71 }; 72 73 static const struct attribute_group *dax_region_attribute_groups[] = { 74 &dax_region_attribute_group, 75 NULL, 76 }; 77 78 static void dax_region_free(struct kref *kref) 79 { 80 struct dax_region *dax_region; 81 82 dax_region = container_of(kref, struct dax_region, kref); 83 kfree(dax_region); 84 } 85 86 void dax_region_put(struct dax_region *dax_region) 87 { 88 kref_put(&dax_region->kref, dax_region_free); 89 } 90 EXPORT_SYMBOL_GPL(dax_region_put); 91 92 static void dax_region_unregister(void *region) 93 { 94 struct dax_region *dax_region = region; 95 96 sysfs_remove_groups(&dax_region->dev->kobj, 97 dax_region_attribute_groups); 98 dax_region_put(dax_region); 99 } 100 101 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 102 struct resource *res, unsigned int align, void *addr, 103 unsigned long pfn_flags) 104 { 105 struct dax_region *dax_region; 106 107 /* 108 * The DAX core assumes that it can store its private data in 109 * parent->driver_data. This WARN is a reminder / safeguard for 110 * developers of device-dax drivers. 111 */ 112 if (dev_get_drvdata(parent)) { 113 dev_WARN(parent, "dax core failed to setup private data\n"); 114 return NULL; 115 } 116 117 if (!IS_ALIGNED(res->start, align) 118 || !IS_ALIGNED(resource_size(res), align)) 119 return NULL; 120 121 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); 122 if (!dax_region) 123 return NULL; 124 125 dev_set_drvdata(parent, dax_region); 126 memcpy(&dax_region->res, res, sizeof(*res)); 127 dax_region->pfn_flags = pfn_flags; 128 kref_init(&dax_region->kref); 129 dax_region->id = region_id; 130 ida_init(&dax_region->ida); 131 dax_region->align = align; 132 dax_region->dev = parent; 133 dax_region->base = addr; 134 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { 135 kfree(dax_region); 136 return NULL; 137 } 138 139 kref_get(&dax_region->kref); 140 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 141 return NULL; 142 return dax_region; 143 } 144 EXPORT_SYMBOL_GPL(alloc_dax_region); 145 146 static struct dev_dax *to_dev_dax(struct device *dev) 147 { 148 return container_of(dev, struct dev_dax, dev); 149 } 150 151 static ssize_t size_show(struct device *dev, 152 struct device_attribute *attr, char *buf) 153 { 154 struct dev_dax *dev_dax = to_dev_dax(dev); 155 unsigned long long size = 0; 156 int i; 157 158 for (i = 0; i < dev_dax->num_resources; i++) 159 size += resource_size(&dev_dax->res[i]); 160 161 return sprintf(buf, "%llu\n", size); 162 } 163 static DEVICE_ATTR_RO(size); 164 165 static struct attribute *dev_dax_attributes[] = { 166 &dev_attr_size.attr, 167 NULL, 168 }; 169 170 static const struct attribute_group dev_dax_attribute_group = { 171 .attrs = dev_dax_attributes, 172 }; 173 174 static const struct attribute_group *dax_attribute_groups[] = { 175 &dev_dax_attribute_group, 176 NULL, 177 }; 178 179 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, 180 const char *func) 181 { 182 struct dax_region *dax_region = dev_dax->region; 183 struct device *dev = &dev_dax->dev; 184 unsigned long mask; 185 186 if (!dax_alive(dev_dax->dax_dev)) 187 return -ENXIO; 188 189 /* prevent private mappings from being established */ 190 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 191 dev_info(dev, "%s: %s: fail, attempted private mapping\n", 192 current->comm, func); 193 return -EINVAL; 194 } 195 196 mask = dax_region->align - 1; 197 if (vma->vm_start & mask || vma->vm_end & mask) { 198 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", 199 current->comm, func, vma->vm_start, vma->vm_end, 200 mask); 201 return -EINVAL; 202 } 203 204 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV 205 && (vma->vm_flags & VM_DONTCOPY) == 0) { 206 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", 207 current->comm, func); 208 return -EINVAL; 209 } 210 211 if (!vma_is_dax(vma)) { 212 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", 213 current->comm, func); 214 return -EINVAL; 215 } 216 217 return 0; 218 } 219 220 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */ 221 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, 222 unsigned long size) 223 { 224 struct resource *res; 225 /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ 226 phys_addr_t uninitialized_var(phys); 227 int i; 228 229 for (i = 0; i < dev_dax->num_resources; i++) { 230 res = &dev_dax->res[i]; 231 phys = pgoff * PAGE_SIZE + res->start; 232 if (phys >= res->start && phys <= res->end) 233 break; 234 pgoff -= PHYS_PFN(resource_size(res)); 235 } 236 237 if (i < dev_dax->num_resources) { 238 res = &dev_dax->res[i]; 239 if (phys + size - 1 <= res->end) 240 return phys; 241 } 242 243 return -1; 244 } 245 246 static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 247 { 248 struct device *dev = &dev_dax->dev; 249 struct dax_region *dax_region; 250 int rc = VM_FAULT_SIGBUS; 251 phys_addr_t phys; 252 pfn_t pfn; 253 unsigned int fault_size = PAGE_SIZE; 254 255 if (check_vma(dev_dax, vmf->vma, __func__)) 256 return VM_FAULT_SIGBUS; 257 258 dax_region = dev_dax->region; 259 if (dax_region->align > PAGE_SIZE) { 260 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 261 dax_region->align, fault_size); 262 return VM_FAULT_SIGBUS; 263 } 264 265 if (fault_size != dax_region->align) 266 return VM_FAULT_SIGBUS; 267 268 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); 269 if (phys == -1) { 270 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); 271 return VM_FAULT_SIGBUS; 272 } 273 274 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 275 276 rc = vm_insert_mixed(vmf->vma, vmf->address, pfn); 277 278 if (rc == -ENOMEM) 279 return VM_FAULT_OOM; 280 if (rc < 0 && rc != -EBUSY) 281 return VM_FAULT_SIGBUS; 282 283 return VM_FAULT_NOPAGE; 284 } 285 286 static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 287 { 288 unsigned long pmd_addr = vmf->address & PMD_MASK; 289 struct device *dev = &dev_dax->dev; 290 struct dax_region *dax_region; 291 phys_addr_t phys; 292 pgoff_t pgoff; 293 pfn_t pfn; 294 unsigned int fault_size = PMD_SIZE; 295 296 if (check_vma(dev_dax, vmf->vma, __func__)) 297 return VM_FAULT_SIGBUS; 298 299 dax_region = dev_dax->region; 300 if (dax_region->align > PMD_SIZE) { 301 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 302 dax_region->align, fault_size); 303 return VM_FAULT_SIGBUS; 304 } 305 306 /* dax pmd mappings require pfn_t_devmap() */ 307 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 308 dev_dbg(dev, "region lacks devmap flags\n"); 309 return VM_FAULT_SIGBUS; 310 } 311 312 if (fault_size < dax_region->align) 313 return VM_FAULT_SIGBUS; 314 else if (fault_size > dax_region->align) 315 return VM_FAULT_FALLBACK; 316 317 /* if we are outside of the VMA */ 318 if (pmd_addr < vmf->vma->vm_start || 319 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 320 return VM_FAULT_SIGBUS; 321 322 pgoff = linear_page_index(vmf->vma, pmd_addr); 323 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); 324 if (phys == -1) { 325 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 326 return VM_FAULT_SIGBUS; 327 } 328 329 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 330 331 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn, 332 vmf->flags & FAULT_FLAG_WRITE); 333 } 334 335 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 336 static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 337 { 338 unsigned long pud_addr = vmf->address & PUD_MASK; 339 struct device *dev = &dev_dax->dev; 340 struct dax_region *dax_region; 341 phys_addr_t phys; 342 pgoff_t pgoff; 343 pfn_t pfn; 344 unsigned int fault_size = PUD_SIZE; 345 346 347 if (check_vma(dev_dax, vmf->vma, __func__)) 348 return VM_FAULT_SIGBUS; 349 350 dax_region = dev_dax->region; 351 if (dax_region->align > PUD_SIZE) { 352 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", 353 dax_region->align, fault_size); 354 return VM_FAULT_SIGBUS; 355 } 356 357 /* dax pud mappings require pfn_t_devmap() */ 358 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 359 dev_dbg(dev, "region lacks devmap flags\n"); 360 return VM_FAULT_SIGBUS; 361 } 362 363 if (fault_size < dax_region->align) 364 return VM_FAULT_SIGBUS; 365 else if (fault_size > dax_region->align) 366 return VM_FAULT_FALLBACK; 367 368 /* if we are outside of the VMA */ 369 if (pud_addr < vmf->vma->vm_start || 370 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) 371 return VM_FAULT_SIGBUS; 372 373 pgoff = linear_page_index(vmf->vma, pud_addr); 374 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); 375 if (phys == -1) { 376 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); 377 return VM_FAULT_SIGBUS; 378 } 379 380 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); 381 382 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn, 383 vmf->flags & FAULT_FLAG_WRITE); 384 } 385 #else 386 static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) 387 { 388 return VM_FAULT_FALLBACK; 389 } 390 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 391 392 static int dev_dax_huge_fault(struct vm_fault *vmf, 393 enum page_entry_size pe_size) 394 { 395 int rc, id; 396 struct file *filp = vmf->vma->vm_file; 397 struct dev_dax *dev_dax = filp->private_data; 398 399 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, 400 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", 401 vmf->vma->vm_start, vmf->vma->vm_end, pe_size); 402 403 id = dax_read_lock(); 404 switch (pe_size) { 405 case PE_SIZE_PTE: 406 rc = __dev_dax_pte_fault(dev_dax, vmf); 407 break; 408 case PE_SIZE_PMD: 409 rc = __dev_dax_pmd_fault(dev_dax, vmf); 410 break; 411 case PE_SIZE_PUD: 412 rc = __dev_dax_pud_fault(dev_dax, vmf); 413 break; 414 default: 415 rc = VM_FAULT_SIGBUS; 416 } 417 dax_read_unlock(id); 418 419 return rc; 420 } 421 422 static int dev_dax_fault(struct vm_fault *vmf) 423 { 424 return dev_dax_huge_fault(vmf, PE_SIZE_PTE); 425 } 426 427 static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) 428 { 429 struct file *filp = vma->vm_file; 430 struct dev_dax *dev_dax = filp->private_data; 431 struct dax_region *dax_region = dev_dax->region; 432 433 if (!IS_ALIGNED(addr, dax_region->align)) 434 return -EINVAL; 435 return 0; 436 } 437 438 static unsigned long dev_dax_pagesize(struct vm_area_struct *vma) 439 { 440 struct file *filp = vma->vm_file; 441 struct dev_dax *dev_dax = filp->private_data; 442 struct dax_region *dax_region = dev_dax->region; 443 444 return dax_region->align; 445 } 446 447 static const struct vm_operations_struct dax_vm_ops = { 448 .fault = dev_dax_fault, 449 .huge_fault = dev_dax_huge_fault, 450 .split = dev_dax_split, 451 .pagesize = dev_dax_pagesize, 452 }; 453 454 static int dax_mmap(struct file *filp, struct vm_area_struct *vma) 455 { 456 struct dev_dax *dev_dax = filp->private_data; 457 int rc, id; 458 459 dev_dbg(&dev_dax->dev, "trace\n"); 460 461 /* 462 * We lock to check dax_dev liveness and will re-check at 463 * fault time. 464 */ 465 id = dax_read_lock(); 466 rc = check_vma(dev_dax, vma, __func__); 467 dax_read_unlock(id); 468 if (rc) 469 return rc; 470 471 vma->vm_ops = &dax_vm_ops; 472 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 473 return 0; 474 } 475 476 /* return an unmapped area aligned to the dax region specified alignment */ 477 static unsigned long dax_get_unmapped_area(struct file *filp, 478 unsigned long addr, unsigned long len, unsigned long pgoff, 479 unsigned long flags) 480 { 481 unsigned long off, off_end, off_align, len_align, addr_align, align; 482 struct dev_dax *dev_dax = filp ? filp->private_data : NULL; 483 struct dax_region *dax_region; 484 485 if (!dev_dax || addr) 486 goto out; 487 488 dax_region = dev_dax->region; 489 align = dax_region->align; 490 off = pgoff << PAGE_SHIFT; 491 off_end = off + len; 492 off_align = round_up(off, align); 493 494 if ((off_end <= off_align) || ((off_end - off_align) < align)) 495 goto out; 496 497 len_align = len + align; 498 if ((off + len_align) < off) 499 goto out; 500 501 addr_align = current->mm->get_unmapped_area(filp, addr, len_align, 502 pgoff, flags); 503 if (!IS_ERR_VALUE(addr_align)) { 504 addr_align += (off - addr_align) & (align - 1); 505 return addr_align; 506 } 507 out: 508 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 509 } 510 511 static int dax_open(struct inode *inode, struct file *filp) 512 { 513 struct dax_device *dax_dev = inode_dax(inode); 514 struct inode *__dax_inode = dax_inode(dax_dev); 515 struct dev_dax *dev_dax = dax_get_private(dax_dev); 516 517 dev_dbg(&dev_dax->dev, "trace\n"); 518 inode->i_mapping = __dax_inode->i_mapping; 519 inode->i_mapping->host = __dax_inode; 520 filp->f_mapping = inode->i_mapping; 521 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 522 filp->private_data = dev_dax; 523 inode->i_flags = S_DAX; 524 525 return 0; 526 } 527 528 static int dax_release(struct inode *inode, struct file *filp) 529 { 530 struct dev_dax *dev_dax = filp->private_data; 531 532 dev_dbg(&dev_dax->dev, "trace\n"); 533 return 0; 534 } 535 536 static const struct file_operations dax_fops = { 537 .llseek = noop_llseek, 538 .owner = THIS_MODULE, 539 .open = dax_open, 540 .release = dax_release, 541 .get_unmapped_area = dax_get_unmapped_area, 542 .mmap = dax_mmap, 543 }; 544 545 static void dev_dax_release(struct device *dev) 546 { 547 struct dev_dax *dev_dax = to_dev_dax(dev); 548 struct dax_region *dax_region = dev_dax->region; 549 struct dax_device *dax_dev = dev_dax->dax_dev; 550 551 if (dev_dax->id >= 0) 552 ida_simple_remove(&dax_region->ida, dev_dax->id); 553 dax_region_put(dax_region); 554 put_dax(dax_dev); 555 kfree(dev_dax); 556 } 557 558 static void kill_dev_dax(struct dev_dax *dev_dax) 559 { 560 struct dax_device *dax_dev = dev_dax->dax_dev; 561 struct inode *inode = dax_inode(dax_dev); 562 563 kill_dax(dax_dev); 564 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 565 } 566 567 static void unregister_dev_dax(void *dev) 568 { 569 struct dev_dax *dev_dax = to_dev_dax(dev); 570 struct dax_device *dax_dev = dev_dax->dax_dev; 571 struct inode *inode = dax_inode(dax_dev); 572 struct cdev *cdev = inode->i_cdev; 573 574 dev_dbg(dev, "trace\n"); 575 576 kill_dev_dax(dev_dax); 577 cdev_device_del(cdev, dev); 578 put_device(dev); 579 } 580 581 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 582 int id, struct resource *res, int count) 583 { 584 struct device *parent = dax_region->dev; 585 struct dax_device *dax_dev; 586 struct dev_dax *dev_dax; 587 struct inode *inode; 588 struct device *dev; 589 struct cdev *cdev; 590 int rc, i; 591 592 if (!count) 593 return ERR_PTR(-EINVAL); 594 595 dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL); 596 if (!dev_dax) 597 return ERR_PTR(-ENOMEM); 598 599 for (i = 0; i < count; i++) { 600 if (!IS_ALIGNED(res[i].start, dax_region->align) 601 || !IS_ALIGNED(resource_size(&res[i]), 602 dax_region->align)) { 603 rc = -EINVAL; 604 break; 605 } 606 dev_dax->res[i].start = res[i].start; 607 dev_dax->res[i].end = res[i].end; 608 } 609 610 if (i < count) 611 goto err_id; 612 613 if (id < 0) { 614 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); 615 dev_dax->id = id; 616 if (id < 0) { 617 rc = id; 618 goto err_id; 619 } 620 } else { 621 /* region provider owns @id lifetime */ 622 dev_dax->id = -1; 623 } 624 625 /* 626 * No 'host' or dax_operations since there is no access to this 627 * device outside of mmap of the resulting character device. 628 */ 629 dax_dev = alloc_dax(dev_dax, NULL, NULL); 630 if (!dax_dev) { 631 rc = -ENOMEM; 632 goto err_dax; 633 } 634 635 /* from here on we're committed to teardown via dax_dev_release() */ 636 dev = &dev_dax->dev; 637 device_initialize(dev); 638 639 inode = dax_inode(dax_dev); 640 cdev = inode->i_cdev; 641 cdev_init(cdev, &dax_fops); 642 cdev->owner = parent->driver->owner; 643 644 dev_dax->num_resources = count; 645 dev_dax->dax_dev = dax_dev; 646 dev_dax->region = dax_region; 647 kref_get(&dax_region->kref); 648 649 dev->devt = inode->i_rdev; 650 dev->class = dax_class; 651 dev->parent = parent; 652 dev->groups = dax_attribute_groups; 653 dev->release = dev_dax_release; 654 dev_set_name(dev, "dax%d.%d", dax_region->id, id); 655 656 rc = cdev_device_add(cdev, dev); 657 if (rc) { 658 kill_dev_dax(dev_dax); 659 put_device(dev); 660 return ERR_PTR(rc); 661 } 662 663 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); 664 if (rc) 665 return ERR_PTR(rc); 666 667 return dev_dax; 668 669 err_dax: 670 if (dev_dax->id >= 0) 671 ida_simple_remove(&dax_region->ida, dev_dax->id); 672 err_id: 673 kfree(dev_dax); 674 675 return ERR_PTR(rc); 676 } 677 EXPORT_SYMBOL_GPL(devm_create_dev_dax); 678 679 static int __init dax_init(void) 680 { 681 dax_class = class_create(THIS_MODULE, "dax"); 682 return PTR_ERR_OR_ZERO(dax_class); 683 } 684 685 static void __exit dax_exit(void) 686 { 687 class_destroy(dax_class); 688 } 689 690 MODULE_AUTHOR("Intel Corporation"); 691 MODULE_LICENSE("GPL v2"); 692 subsys_initcall(dax_init); 693 module_exit(dax_exit); 694