1 /* 2 * linux/drivers/char/mem.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * Added devfs support. 7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> 8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/miscdevice.h> 13 #include <linux/slab.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mman.h> 16 #include <linux/random.h> 17 #include <linux/init.h> 18 #include <linux/raw.h> 19 #include <linux/tty.h> 20 #include <linux/capability.h> 21 #include <linux/ptrace.h> 22 #include <linux/device.h> 23 #include <linux/highmem.h> 24 #include <linux/crash_dump.h> 25 #include <linux/backing-dev.h> 26 #include <linux/bootmem.h> 27 #include <linux/splice.h> 28 #include <linux/pfn.h> 29 #include <linux/smp_lock.h> 30 31 #include <asm/uaccess.h> 32 #include <asm/io.h> 33 34 #ifdef CONFIG_IA64 35 # include <linux/efi.h> 36 #endif 37 38 /* 39 * Architectures vary in how they handle caching for addresses 40 * outside of main memory. 41 * 42 */ 43 static inline int uncached_access(struct file *file, unsigned long addr) 44 { 45 #if defined(CONFIG_IA64) 46 /* 47 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. 48 */ 49 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); 50 #elif defined(CONFIG_MIPS) 51 { 52 extern int __uncached_access(struct file *file, 53 unsigned long addr); 54 55 return __uncached_access(file, addr); 56 } 57 #else 58 /* 59 * Accessing memory above the top the kernel knows about or through a file pointer 60 * that was marked O_SYNC will be done non-cached. 61 */ 62 if (file->f_flags & O_SYNC) 63 return 1; 64 return addr >= __pa(high_memory); 65 #endif 66 } 67 68 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 69 static inline int valid_phys_addr_range(unsigned long addr, size_t count) 70 { 71 if (addr + count > __pa(high_memory)) 72 return 0; 73 74 return 1; 75 } 76 77 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 78 { 79 return 1; 80 } 81 #endif 82 83 #ifdef CONFIG_STRICT_DEVMEM 84 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 85 { 86 u64 from = ((u64)pfn) << PAGE_SHIFT; 87 u64 to = from + size; 88 u64 cursor = from; 89 90 while (cursor < to) { 91 if (!devmem_is_allowed(pfn)) { 92 printk(KERN_INFO 93 "Program %s tried to access /dev/mem between %Lx->%Lx.\n", 94 current->comm, from, to); 95 return 0; 96 } 97 cursor += PAGE_SIZE; 98 pfn++; 99 } 100 return 1; 101 } 102 #else 103 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 104 { 105 return 1; 106 } 107 #endif 108 109 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr) 110 { 111 } 112 113 /* 114 * This funcion reads the *physical* memory. The f_pos points directly to the 115 * memory location. 116 */ 117 static ssize_t read_mem(struct file * file, char __user * buf, 118 size_t count, loff_t *ppos) 119 { 120 unsigned long p = *ppos; 121 ssize_t read, sz; 122 char *ptr; 123 124 if (!valid_phys_addr_range(p, count)) 125 return -EFAULT; 126 read = 0; 127 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 128 /* we don't have page 0 mapped on sparc and m68k.. */ 129 if (p < PAGE_SIZE) { 130 sz = PAGE_SIZE - p; 131 if (sz > count) 132 sz = count; 133 if (sz > 0) { 134 if (clear_user(buf, sz)) 135 return -EFAULT; 136 buf += sz; 137 p += sz; 138 count -= sz; 139 read += sz; 140 } 141 } 142 #endif 143 144 while (count > 0) { 145 /* 146 * Handle first page in case it's not aligned 147 */ 148 if (-p & (PAGE_SIZE - 1)) 149 sz = -p & (PAGE_SIZE - 1); 150 else 151 sz = PAGE_SIZE; 152 153 sz = min_t(unsigned long, sz, count); 154 155 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 156 return -EPERM; 157 158 /* 159 * On ia64 if a page has been mapped somewhere as 160 * uncached, then it must also be accessed uncached 161 * by the kernel or data corruption may occur 162 */ 163 ptr = xlate_dev_mem_ptr(p); 164 if (!ptr) 165 return -EFAULT; 166 167 if (copy_to_user(buf, ptr, sz)) { 168 unxlate_dev_mem_ptr(p, ptr); 169 return -EFAULT; 170 } 171 172 unxlate_dev_mem_ptr(p, ptr); 173 174 buf += sz; 175 p += sz; 176 count -= sz; 177 read += sz; 178 } 179 180 *ppos += read; 181 return read; 182 } 183 184 static ssize_t write_mem(struct file * file, const char __user * buf, 185 size_t count, loff_t *ppos) 186 { 187 unsigned long p = *ppos; 188 ssize_t written, sz; 189 unsigned long copied; 190 void *ptr; 191 192 if (!valid_phys_addr_range(p, count)) 193 return -EFAULT; 194 195 written = 0; 196 197 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 198 /* we don't have page 0 mapped on sparc and m68k.. */ 199 if (p < PAGE_SIZE) { 200 unsigned long sz = PAGE_SIZE - p; 201 if (sz > count) 202 sz = count; 203 /* Hmm. Do something? */ 204 buf += sz; 205 p += sz; 206 count -= sz; 207 written += sz; 208 } 209 #endif 210 211 while (count > 0) { 212 /* 213 * Handle first page in case it's not aligned 214 */ 215 if (-p & (PAGE_SIZE - 1)) 216 sz = -p & (PAGE_SIZE - 1); 217 else 218 sz = PAGE_SIZE; 219 220 sz = min_t(unsigned long, sz, count); 221 222 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 223 return -EPERM; 224 225 /* 226 * On ia64 if a page has been mapped somewhere as 227 * uncached, then it must also be accessed uncached 228 * by the kernel or data corruption may occur 229 */ 230 ptr = xlate_dev_mem_ptr(p); 231 if (!ptr) { 232 if (written) 233 break; 234 return -EFAULT; 235 } 236 237 copied = copy_from_user(ptr, buf, sz); 238 if (copied) { 239 written += sz - copied; 240 unxlate_dev_mem_ptr(p, ptr); 241 if (written) 242 break; 243 return -EFAULT; 244 } 245 246 unxlate_dev_mem_ptr(p, ptr); 247 248 buf += sz; 249 p += sz; 250 count -= sz; 251 written += sz; 252 } 253 254 *ppos += written; 255 return written; 256 } 257 258 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file, 259 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) 260 { 261 return 1; 262 } 263 264 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT 265 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 266 unsigned long size, pgprot_t vma_prot) 267 { 268 #ifdef pgprot_noncached 269 unsigned long offset = pfn << PAGE_SHIFT; 270 271 if (uncached_access(file, offset)) 272 return pgprot_noncached(vma_prot); 273 #endif 274 return vma_prot; 275 } 276 #endif 277 278 #ifndef CONFIG_MMU 279 static unsigned long get_unmapped_area_mem(struct file *file, 280 unsigned long addr, 281 unsigned long len, 282 unsigned long pgoff, 283 unsigned long flags) 284 { 285 if (!valid_mmap_phys_addr_range(pgoff, len)) 286 return (unsigned long) -EINVAL; 287 return pgoff << PAGE_SHIFT; 288 } 289 290 /* can't do an in-place private mapping if there's no MMU */ 291 static inline int private_mapping_ok(struct vm_area_struct *vma) 292 { 293 return vma->vm_flags & VM_MAYSHARE; 294 } 295 #else 296 #define get_unmapped_area_mem NULL 297 298 static inline int private_mapping_ok(struct vm_area_struct *vma) 299 { 300 return 1; 301 } 302 #endif 303 304 void __attribute__((weak)) 305 map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot) 306 { 307 /* nothing. architectures can override. */ 308 } 309 310 void __attribute__((weak)) 311 unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot) 312 { 313 /* nothing. architectures can override. */ 314 } 315 316 static void mmap_mem_open(struct vm_area_struct *vma) 317 { 318 map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start, 319 vma->vm_page_prot); 320 } 321 322 static void mmap_mem_close(struct vm_area_struct *vma) 323 { 324 unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start, 325 vma->vm_page_prot); 326 } 327 328 static struct vm_operations_struct mmap_mem_ops = { 329 .open = mmap_mem_open, 330 .close = mmap_mem_close, 331 #ifdef CONFIG_HAVE_IOREMAP_PROT 332 .access = generic_access_phys 333 #endif 334 }; 335 336 static int mmap_mem(struct file * file, struct vm_area_struct * vma) 337 { 338 size_t size = vma->vm_end - vma->vm_start; 339 340 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 341 return -EINVAL; 342 343 if (!private_mapping_ok(vma)) 344 return -ENOSYS; 345 346 if (!range_is_allowed(vma->vm_pgoff, size)) 347 return -EPERM; 348 349 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, 350 &vma->vm_page_prot)) 351 return -EINVAL; 352 353 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 354 size, 355 vma->vm_page_prot); 356 357 vma->vm_ops = &mmap_mem_ops; 358 359 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 360 if (remap_pfn_range(vma, 361 vma->vm_start, 362 vma->vm_pgoff, 363 size, 364 vma->vm_page_prot)) { 365 unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot); 366 return -EAGAIN; 367 } 368 return 0; 369 } 370 371 #ifdef CONFIG_DEVKMEM 372 static int mmap_kmem(struct file * file, struct vm_area_struct * vma) 373 { 374 unsigned long pfn; 375 376 /* Turn a kernel-virtual address into a physical page frame */ 377 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; 378 379 /* 380 * RED-PEN: on some architectures there is more mapped memory 381 * than available in mem_map which pfn_valid checks 382 * for. Perhaps should add a new macro here. 383 * 384 * RED-PEN: vmalloc is not supported right now. 385 */ 386 if (!pfn_valid(pfn)) 387 return -EIO; 388 389 vma->vm_pgoff = pfn; 390 return mmap_mem(file, vma); 391 } 392 #endif 393 394 #ifdef CONFIG_CRASH_DUMP 395 /* 396 * Read memory corresponding to the old kernel. 397 */ 398 static ssize_t read_oldmem(struct file *file, char __user *buf, 399 size_t count, loff_t *ppos) 400 { 401 unsigned long pfn, offset; 402 size_t read = 0, csize; 403 int rc = 0; 404 405 while (count) { 406 pfn = *ppos / PAGE_SIZE; 407 if (pfn > saved_max_pfn) 408 return read; 409 410 offset = (unsigned long)(*ppos % PAGE_SIZE); 411 if (count > PAGE_SIZE - offset) 412 csize = PAGE_SIZE - offset; 413 else 414 csize = count; 415 416 rc = copy_oldmem_page(pfn, buf, csize, offset, 1); 417 if (rc < 0) 418 return rc; 419 buf += csize; 420 *ppos += csize; 421 read += csize; 422 count -= csize; 423 } 424 return read; 425 } 426 #endif 427 428 extern long vread(char *buf, char *addr, unsigned long count); 429 extern long vwrite(char *buf, char *addr, unsigned long count); 430 431 #ifdef CONFIG_DEVKMEM 432 /* 433 * This function reads the *virtual* memory as seen by the kernel. 434 */ 435 static ssize_t read_kmem(struct file *file, char __user *buf, 436 size_t count, loff_t *ppos) 437 { 438 unsigned long p = *ppos; 439 ssize_t low_count, read, sz; 440 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 441 442 read = 0; 443 if (p < (unsigned long) high_memory) { 444 low_count = count; 445 if (count > (unsigned long) high_memory - p) 446 low_count = (unsigned long) high_memory - p; 447 448 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 449 /* we don't have page 0 mapped on sparc and m68k.. */ 450 if (p < PAGE_SIZE && low_count > 0) { 451 size_t tmp = PAGE_SIZE - p; 452 if (tmp > low_count) tmp = low_count; 453 if (clear_user(buf, tmp)) 454 return -EFAULT; 455 buf += tmp; 456 p += tmp; 457 read += tmp; 458 low_count -= tmp; 459 count -= tmp; 460 } 461 #endif 462 while (low_count > 0) { 463 /* 464 * Handle first page in case it's not aligned 465 */ 466 if (-p & (PAGE_SIZE - 1)) 467 sz = -p & (PAGE_SIZE - 1); 468 else 469 sz = PAGE_SIZE; 470 471 sz = min_t(unsigned long, sz, low_count); 472 473 /* 474 * On ia64 if a page has been mapped somewhere as 475 * uncached, then it must also be accessed uncached 476 * by the kernel or data corruption may occur 477 */ 478 kbuf = xlate_dev_kmem_ptr((char *)p); 479 480 if (copy_to_user(buf, kbuf, sz)) 481 return -EFAULT; 482 buf += sz; 483 p += sz; 484 read += sz; 485 low_count -= sz; 486 count -= sz; 487 } 488 } 489 490 if (count > 0) { 491 kbuf = (char *)__get_free_page(GFP_KERNEL); 492 if (!kbuf) 493 return -ENOMEM; 494 while (count > 0) { 495 int len = count; 496 497 if (len > PAGE_SIZE) 498 len = PAGE_SIZE; 499 len = vread(kbuf, (char *)p, len); 500 if (!len) 501 break; 502 if (copy_to_user(buf, kbuf, len)) { 503 free_page((unsigned long)kbuf); 504 return -EFAULT; 505 } 506 count -= len; 507 buf += len; 508 read += len; 509 p += len; 510 } 511 free_page((unsigned long)kbuf); 512 } 513 *ppos = p; 514 return read; 515 } 516 517 518 static inline ssize_t 519 do_write_kmem(void *p, unsigned long realp, const char __user * buf, 520 size_t count, loff_t *ppos) 521 { 522 ssize_t written, sz; 523 unsigned long copied; 524 525 written = 0; 526 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 527 /* we don't have page 0 mapped on sparc and m68k.. */ 528 if (realp < PAGE_SIZE) { 529 unsigned long sz = PAGE_SIZE - realp; 530 if (sz > count) 531 sz = count; 532 /* Hmm. Do something? */ 533 buf += sz; 534 p += sz; 535 realp += sz; 536 count -= sz; 537 written += sz; 538 } 539 #endif 540 541 while (count > 0) { 542 char *ptr; 543 /* 544 * Handle first page in case it's not aligned 545 */ 546 if (-realp & (PAGE_SIZE - 1)) 547 sz = -realp & (PAGE_SIZE - 1); 548 else 549 sz = PAGE_SIZE; 550 551 sz = min_t(unsigned long, sz, count); 552 553 /* 554 * On ia64 if a page has been mapped somewhere as 555 * uncached, then it must also be accessed uncached 556 * by the kernel or data corruption may occur 557 */ 558 ptr = xlate_dev_kmem_ptr(p); 559 560 copied = copy_from_user(ptr, buf, sz); 561 if (copied) { 562 written += sz - copied; 563 if (written) 564 break; 565 return -EFAULT; 566 } 567 buf += sz; 568 p += sz; 569 realp += sz; 570 count -= sz; 571 written += sz; 572 } 573 574 *ppos += written; 575 return written; 576 } 577 578 579 /* 580 * This function writes to the *virtual* memory as seen by the kernel. 581 */ 582 static ssize_t write_kmem(struct file * file, const char __user * buf, 583 size_t count, loff_t *ppos) 584 { 585 unsigned long p = *ppos; 586 ssize_t wrote = 0; 587 ssize_t virtr = 0; 588 ssize_t written; 589 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 590 591 if (p < (unsigned long) high_memory) { 592 593 wrote = count; 594 if (count > (unsigned long) high_memory - p) 595 wrote = (unsigned long) high_memory - p; 596 597 written = do_write_kmem((void*)p, p, buf, wrote, ppos); 598 if (written != wrote) 599 return written; 600 wrote = written; 601 p += wrote; 602 buf += wrote; 603 count -= wrote; 604 } 605 606 if (count > 0) { 607 kbuf = (char *)__get_free_page(GFP_KERNEL); 608 if (!kbuf) 609 return wrote ? wrote : -ENOMEM; 610 while (count > 0) { 611 int len = count; 612 613 if (len > PAGE_SIZE) 614 len = PAGE_SIZE; 615 if (len) { 616 written = copy_from_user(kbuf, buf, len); 617 if (written) { 618 if (wrote + virtr) 619 break; 620 free_page((unsigned long)kbuf); 621 return -EFAULT; 622 } 623 } 624 len = vwrite(kbuf, (char *)p, len); 625 count -= len; 626 buf += len; 627 virtr += len; 628 p += len; 629 } 630 free_page((unsigned long)kbuf); 631 } 632 633 *ppos = p; 634 return virtr + wrote; 635 } 636 #endif 637 638 #ifdef CONFIG_DEVPORT 639 static ssize_t read_port(struct file * file, char __user * buf, 640 size_t count, loff_t *ppos) 641 { 642 unsigned long i = *ppos; 643 char __user *tmp = buf; 644 645 if (!access_ok(VERIFY_WRITE, buf, count)) 646 return -EFAULT; 647 while (count-- > 0 && i < 65536) { 648 if (__put_user(inb(i),tmp) < 0) 649 return -EFAULT; 650 i++; 651 tmp++; 652 } 653 *ppos = i; 654 return tmp-buf; 655 } 656 657 static ssize_t write_port(struct file * file, const char __user * buf, 658 size_t count, loff_t *ppos) 659 { 660 unsigned long i = *ppos; 661 const char __user * tmp = buf; 662 663 if (!access_ok(VERIFY_READ,buf,count)) 664 return -EFAULT; 665 while (count-- > 0 && i < 65536) { 666 char c; 667 if (__get_user(c, tmp)) { 668 if (tmp > buf) 669 break; 670 return -EFAULT; 671 } 672 outb(c,i); 673 i++; 674 tmp++; 675 } 676 *ppos = i; 677 return tmp-buf; 678 } 679 #endif 680 681 static ssize_t read_null(struct file * file, char __user * buf, 682 size_t count, loff_t *ppos) 683 { 684 return 0; 685 } 686 687 static ssize_t write_null(struct file * file, const char __user * buf, 688 size_t count, loff_t *ppos) 689 { 690 return count; 691 } 692 693 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, 694 struct splice_desc *sd) 695 { 696 return sd->len; 697 } 698 699 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out, 700 loff_t *ppos, size_t len, unsigned int flags) 701 { 702 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 703 } 704 705 static ssize_t read_zero(struct file * file, char __user * buf, 706 size_t count, loff_t *ppos) 707 { 708 size_t written; 709 710 if (!count) 711 return 0; 712 713 if (!access_ok(VERIFY_WRITE, buf, count)) 714 return -EFAULT; 715 716 written = 0; 717 while (count) { 718 unsigned long unwritten; 719 size_t chunk = count; 720 721 if (chunk > PAGE_SIZE) 722 chunk = PAGE_SIZE; /* Just for latency reasons */ 723 unwritten = clear_user(buf, chunk); 724 written += chunk - unwritten; 725 if (unwritten) 726 break; 727 buf += chunk; 728 count -= chunk; 729 cond_resched(); 730 } 731 return written ? written : -EFAULT; 732 } 733 734 static int mmap_zero(struct file * file, struct vm_area_struct * vma) 735 { 736 #ifndef CONFIG_MMU 737 return -ENOSYS; 738 #endif 739 if (vma->vm_flags & VM_SHARED) 740 return shmem_zero_setup(vma); 741 return 0; 742 } 743 744 static ssize_t write_full(struct file * file, const char __user * buf, 745 size_t count, loff_t *ppos) 746 { 747 return -ENOSPC; 748 } 749 750 /* 751 * Special lseek() function for /dev/null and /dev/zero. Most notably, you 752 * can fopen() both devices with "a" now. This was previously impossible. 753 * -- SRB. 754 */ 755 756 static loff_t null_lseek(struct file * file, loff_t offset, int orig) 757 { 758 return file->f_pos = 0; 759 } 760 761 /* 762 * The memory devices use the full 32/64 bits of the offset, and so we cannot 763 * check against negative addresses: they are ok. The return value is weird, 764 * though, in that case (0). 765 * 766 * also note that seeking relative to the "end of file" isn't supported: 767 * it has no meaning, so it returns -EINVAL. 768 */ 769 static loff_t memory_lseek(struct file * file, loff_t offset, int orig) 770 { 771 loff_t ret; 772 773 mutex_lock(&file->f_path.dentry->d_inode->i_mutex); 774 switch (orig) { 775 case 0: 776 file->f_pos = offset; 777 ret = file->f_pos; 778 force_successful_syscall_return(); 779 break; 780 case 1: 781 file->f_pos += offset; 782 ret = file->f_pos; 783 force_successful_syscall_return(); 784 break; 785 default: 786 ret = -EINVAL; 787 } 788 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); 789 return ret; 790 } 791 792 static int open_port(struct inode * inode, struct file * filp) 793 { 794 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 795 } 796 797 #define zero_lseek null_lseek 798 #define full_lseek null_lseek 799 #define write_zero write_null 800 #define read_full read_zero 801 #define open_mem open_port 802 #define open_kmem open_mem 803 #define open_oldmem open_mem 804 805 static const struct file_operations mem_fops = { 806 .llseek = memory_lseek, 807 .read = read_mem, 808 .write = write_mem, 809 .mmap = mmap_mem, 810 .open = open_mem, 811 .get_unmapped_area = get_unmapped_area_mem, 812 }; 813 814 #ifdef CONFIG_DEVKMEM 815 static const struct file_operations kmem_fops = { 816 .llseek = memory_lseek, 817 .read = read_kmem, 818 .write = write_kmem, 819 .mmap = mmap_kmem, 820 .open = open_kmem, 821 .get_unmapped_area = get_unmapped_area_mem, 822 }; 823 #endif 824 825 static const struct file_operations null_fops = { 826 .llseek = null_lseek, 827 .read = read_null, 828 .write = write_null, 829 .splice_write = splice_write_null, 830 }; 831 832 #ifdef CONFIG_DEVPORT 833 static const struct file_operations port_fops = { 834 .llseek = memory_lseek, 835 .read = read_port, 836 .write = write_port, 837 .open = open_port, 838 }; 839 #endif 840 841 static const struct file_operations zero_fops = { 842 .llseek = zero_lseek, 843 .read = read_zero, 844 .write = write_zero, 845 .mmap = mmap_zero, 846 }; 847 848 /* 849 * capabilities for /dev/zero 850 * - permits private mappings, "copies" are taken of the source of zeros 851 */ 852 static struct backing_dev_info zero_bdi = { 853 .capabilities = BDI_CAP_MAP_COPY, 854 }; 855 856 static const struct file_operations full_fops = { 857 .llseek = full_lseek, 858 .read = read_full, 859 .write = write_full, 860 }; 861 862 #ifdef CONFIG_CRASH_DUMP 863 static const struct file_operations oldmem_fops = { 864 .read = read_oldmem, 865 .open = open_oldmem, 866 }; 867 #endif 868 869 static ssize_t kmsg_write(struct file * file, const char __user * buf, 870 size_t count, loff_t *ppos) 871 { 872 char *tmp; 873 ssize_t ret; 874 875 tmp = kmalloc(count + 1, GFP_KERNEL); 876 if (tmp == NULL) 877 return -ENOMEM; 878 ret = -EFAULT; 879 if (!copy_from_user(tmp, buf, count)) { 880 tmp[count] = 0; 881 ret = printk("%s", tmp); 882 if (ret > count) 883 /* printk can add a prefix */ 884 ret = count; 885 } 886 kfree(tmp); 887 return ret; 888 } 889 890 static const struct file_operations kmsg_fops = { 891 .write = kmsg_write, 892 }; 893 894 static int memory_open(struct inode * inode, struct file * filp) 895 { 896 int ret = 0; 897 898 lock_kernel(); 899 switch (iminor(inode)) { 900 case 1: 901 filp->f_op = &mem_fops; 902 filp->f_mapping->backing_dev_info = 903 &directly_mappable_cdev_bdi; 904 break; 905 #ifdef CONFIG_DEVKMEM 906 case 2: 907 filp->f_op = &kmem_fops; 908 filp->f_mapping->backing_dev_info = 909 &directly_mappable_cdev_bdi; 910 break; 911 #endif 912 case 3: 913 filp->f_op = &null_fops; 914 break; 915 #ifdef CONFIG_DEVPORT 916 case 4: 917 filp->f_op = &port_fops; 918 break; 919 #endif 920 case 5: 921 filp->f_mapping->backing_dev_info = &zero_bdi; 922 filp->f_op = &zero_fops; 923 break; 924 case 7: 925 filp->f_op = &full_fops; 926 break; 927 case 8: 928 filp->f_op = &random_fops; 929 break; 930 case 9: 931 filp->f_op = &urandom_fops; 932 break; 933 case 11: 934 filp->f_op = &kmsg_fops; 935 break; 936 #ifdef CONFIG_CRASH_DUMP 937 case 12: 938 filp->f_op = &oldmem_fops; 939 break; 940 #endif 941 default: 942 unlock_kernel(); 943 return -ENXIO; 944 } 945 if (filp->f_op && filp->f_op->open) 946 ret = filp->f_op->open(inode,filp); 947 unlock_kernel(); 948 return ret; 949 } 950 951 static const struct file_operations memory_fops = { 952 .open = memory_open, /* just a selector for the real open */ 953 }; 954 955 static const struct { 956 unsigned int minor; 957 char *name; 958 umode_t mode; 959 const struct file_operations *fops; 960 } devlist[] = { /* list of minor devices */ 961 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, 962 #ifdef CONFIG_DEVKMEM 963 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, 964 #endif 965 {3, "null", S_IRUGO | S_IWUGO, &null_fops}, 966 #ifdef CONFIG_DEVPORT 967 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, 968 #endif 969 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, 970 {7, "full", S_IRUGO | S_IWUGO, &full_fops}, 971 {8, "random", S_IRUGO | S_IWUSR, &random_fops}, 972 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, 973 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, 974 #ifdef CONFIG_CRASH_DUMP 975 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, 976 #endif 977 }; 978 979 static struct class *mem_class; 980 981 static int __init chr_dev_init(void) 982 { 983 int i; 984 int err; 985 986 err = bdi_init(&zero_bdi); 987 if (err) 988 return err; 989 990 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) 991 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 992 993 mem_class = class_create(THIS_MODULE, "mem"); 994 for (i = 0; i < ARRAY_SIZE(devlist); i++) 995 device_create(mem_class, NULL, 996 MKDEV(MEM_MAJOR, devlist[i].minor), NULL, 997 devlist[i].name); 998 999 return 0; 1000 } 1001 1002 fs_initcall(chr_dev_init); 1003