1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/drivers/char/mem.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * Added devfs support. 8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> 9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> 10 */ 11 12 #include <linux/mm.h> 13 #include <linux/moduleparam.h> 14 #include <linux/miscdevice.h> 15 #include <linux/slab.h> 16 #include <linux/vmalloc.h> 17 #include <linux/mman.h> 18 #include <linux/random.h> 19 #include <linux/init.h> 20 #include <linux/tty.h> 21 #include <linux/capability.h> 22 #include <linux/ptrace.h> 23 #include <linux/device.h> 24 #include <linux/highmem.h> 25 #include <linux/backing-dev.h> 26 #include <linux/shmem_fs.h> 27 #include <linux/splice.h> 28 #include <linux/pfn.h> 29 #include <linux/export.h> 30 #include <linux/io.h> 31 #include <linux/uio.h> 32 #include <linux/uaccess.h> 33 #include <linux/security.h> 34 35 #ifdef CONFIG_IA64 36 # include <linux/efi.h> 37 #endif 38 39 #define DEVMEM_MINOR 1 40 #define DEVPORT_MINOR 4 41 42 static inline unsigned long size_inside_page(unsigned long start, 43 unsigned long size) 44 { 45 unsigned long sz; 46 47 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); 48 49 return min(sz, size); 50 } 51 52 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 53 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) 54 { 55 return addr + count <= __pa(high_memory); 56 } 57 58 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 59 { 60 return 1; 61 } 62 #endif 63 64 #ifdef CONFIG_STRICT_DEVMEM 65 static inline int page_is_allowed(unsigned long pfn) 66 { 67 return devmem_is_allowed(pfn); 68 } 69 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 70 { 71 u64 from = ((u64)pfn) << PAGE_SHIFT; 72 u64 to = from + size; 73 u64 cursor = from; 74 75 while (cursor < to) { 76 if (!devmem_is_allowed(pfn)) 77 return 0; 78 cursor += PAGE_SIZE; 79 pfn++; 80 } 81 return 1; 82 } 83 #else 84 static inline int page_is_allowed(unsigned long pfn) 85 { 86 return 1; 87 } 88 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 89 { 90 return 1; 91 } 92 #endif 93 94 static inline bool should_stop_iteration(void) 95 { 96 if (need_resched()) 97 cond_resched(); 98 return signal_pending(current); 99 } 100 101 /* 102 * This funcion reads the *physical* memory. The f_pos points directly to the 103 * memory location. 104 */ 105 static ssize_t read_mem(struct file *file, char __user *buf, 106 size_t count, loff_t *ppos) 107 { 108 phys_addr_t p = *ppos; 109 ssize_t read, sz; 110 void *ptr; 111 char *bounce; 112 int err; 113 114 if (p != *ppos) 115 return 0; 116 117 if (!valid_phys_addr_range(p, count)) 118 return -EFAULT; 119 read = 0; 120 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 121 /* we don't have page 0 mapped on sparc and m68k.. */ 122 if (p < PAGE_SIZE) { 123 sz = size_inside_page(p, count); 124 if (sz > 0) { 125 if (clear_user(buf, sz)) 126 return -EFAULT; 127 buf += sz; 128 p += sz; 129 count -= sz; 130 read += sz; 131 } 132 } 133 #endif 134 135 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL); 136 if (!bounce) 137 return -ENOMEM; 138 139 while (count > 0) { 140 unsigned long remaining; 141 int allowed, probe; 142 143 sz = size_inside_page(p, count); 144 145 err = -EPERM; 146 allowed = page_is_allowed(p >> PAGE_SHIFT); 147 if (!allowed) 148 goto failed; 149 150 err = -EFAULT; 151 if (allowed == 2) { 152 /* Show zeros for restricted memory. */ 153 remaining = clear_user(buf, sz); 154 } else { 155 /* 156 * On ia64 if a page has been mapped somewhere as 157 * uncached, then it must also be accessed uncached 158 * by the kernel or data corruption may occur. 159 */ 160 ptr = xlate_dev_mem_ptr(p); 161 if (!ptr) 162 goto failed; 163 164 probe = copy_from_kernel_nofault(bounce, ptr, sz); 165 unxlate_dev_mem_ptr(p, ptr); 166 if (probe) 167 goto failed; 168 169 remaining = copy_to_user(buf, bounce, sz); 170 } 171 172 if (remaining) 173 goto failed; 174 175 buf += sz; 176 p += sz; 177 count -= sz; 178 read += sz; 179 if (should_stop_iteration()) 180 break; 181 } 182 kfree(bounce); 183 184 *ppos += read; 185 return read; 186 187 failed: 188 kfree(bounce); 189 return err; 190 } 191 192 static ssize_t write_mem(struct file *file, const char __user *buf, 193 size_t count, loff_t *ppos) 194 { 195 phys_addr_t p = *ppos; 196 ssize_t written, sz; 197 unsigned long copied; 198 void *ptr; 199 200 if (p != *ppos) 201 return -EFBIG; 202 203 if (!valid_phys_addr_range(p, count)) 204 return -EFAULT; 205 206 written = 0; 207 208 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 209 /* we don't have page 0 mapped on sparc and m68k.. */ 210 if (p < PAGE_SIZE) { 211 sz = size_inside_page(p, count); 212 /* Hmm. Do something? */ 213 buf += sz; 214 p += sz; 215 count -= sz; 216 written += sz; 217 } 218 #endif 219 220 while (count > 0) { 221 int allowed; 222 223 sz = size_inside_page(p, count); 224 225 allowed = page_is_allowed(p >> PAGE_SHIFT); 226 if (!allowed) 227 return -EPERM; 228 229 /* Skip actual writing when a page is marked as restricted. */ 230 if (allowed == 1) { 231 /* 232 * On ia64 if a page has been mapped somewhere as 233 * uncached, then it must also be accessed uncached 234 * by the kernel or data corruption may occur. 235 */ 236 ptr = xlate_dev_mem_ptr(p); 237 if (!ptr) { 238 if (written) 239 break; 240 return -EFAULT; 241 } 242 243 copied = copy_from_user(ptr, buf, sz); 244 unxlate_dev_mem_ptr(p, ptr); 245 if (copied) { 246 written += sz - copied; 247 if (written) 248 break; 249 return -EFAULT; 250 } 251 } 252 253 buf += sz; 254 p += sz; 255 count -= sz; 256 written += sz; 257 if (should_stop_iteration()) 258 break; 259 } 260 261 *ppos += written; 262 return written; 263 } 264 265 int __weak phys_mem_access_prot_allowed(struct file *file, 266 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) 267 { 268 return 1; 269 } 270 271 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT 272 273 /* 274 * Architectures vary in how they handle caching for addresses 275 * outside of main memory. 276 * 277 */ 278 #ifdef pgprot_noncached 279 static int uncached_access(struct file *file, phys_addr_t addr) 280 { 281 #if defined(CONFIG_IA64) 282 /* 283 * On ia64, we ignore O_DSYNC because we cannot tolerate memory 284 * attribute aliases. 285 */ 286 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); 287 #else 288 /* 289 * Accessing memory above the top the kernel knows about or through a 290 * file pointer 291 * that was marked O_DSYNC will be done non-cached. 292 */ 293 if (file->f_flags & O_DSYNC) 294 return 1; 295 return addr >= __pa(high_memory); 296 #endif 297 } 298 #endif 299 300 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 301 unsigned long size, pgprot_t vma_prot) 302 { 303 #ifdef pgprot_noncached 304 phys_addr_t offset = pfn << PAGE_SHIFT; 305 306 if (uncached_access(file, offset)) 307 return pgprot_noncached(vma_prot); 308 #endif 309 return vma_prot; 310 } 311 #endif 312 313 #ifndef CONFIG_MMU 314 static unsigned long get_unmapped_area_mem(struct file *file, 315 unsigned long addr, 316 unsigned long len, 317 unsigned long pgoff, 318 unsigned long flags) 319 { 320 if (!valid_mmap_phys_addr_range(pgoff, len)) 321 return (unsigned long) -EINVAL; 322 return pgoff << PAGE_SHIFT; 323 } 324 325 /* permit direct mmap, for read, write or exec */ 326 static unsigned memory_mmap_capabilities(struct file *file) 327 { 328 return NOMMU_MAP_DIRECT | 329 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; 330 } 331 332 static unsigned zero_mmap_capabilities(struct file *file) 333 { 334 return NOMMU_MAP_COPY; 335 } 336 337 /* can't do an in-place private mapping if there's no MMU */ 338 static inline int private_mapping_ok(struct vm_area_struct *vma) 339 { 340 return is_nommu_shared_mapping(vma->vm_flags); 341 } 342 #else 343 344 static inline int private_mapping_ok(struct vm_area_struct *vma) 345 { 346 return 1; 347 } 348 #endif 349 350 static const struct vm_operations_struct mmap_mem_ops = { 351 #ifdef CONFIG_HAVE_IOREMAP_PROT 352 .access = generic_access_phys 353 #endif 354 }; 355 356 static int mmap_mem(struct file *file, struct vm_area_struct *vma) 357 { 358 size_t size = vma->vm_end - vma->vm_start; 359 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; 360 361 /* Does it even fit in phys_addr_t? */ 362 if (offset >> PAGE_SHIFT != vma->vm_pgoff) 363 return -EINVAL; 364 365 /* It's illegal to wrap around the end of the physical address space. */ 366 if (offset + (phys_addr_t)size - 1 < offset) 367 return -EINVAL; 368 369 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 370 return -EINVAL; 371 372 if (!private_mapping_ok(vma)) 373 return -ENOSYS; 374 375 if (!range_is_allowed(vma->vm_pgoff, size)) 376 return -EPERM; 377 378 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, 379 &vma->vm_page_prot)) 380 return -EINVAL; 381 382 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 383 size, 384 vma->vm_page_prot); 385 386 vma->vm_ops = &mmap_mem_ops; 387 388 /* Remap-pfn-range will mark the range VM_IO */ 389 if (remap_pfn_range(vma, 390 vma->vm_start, 391 vma->vm_pgoff, 392 size, 393 vma->vm_page_prot)) { 394 return -EAGAIN; 395 } 396 return 0; 397 } 398 399 static ssize_t read_port(struct file *file, char __user *buf, 400 size_t count, loff_t *ppos) 401 { 402 unsigned long i = *ppos; 403 char __user *tmp = buf; 404 405 if (!access_ok(buf, count)) 406 return -EFAULT; 407 while (count-- > 0 && i < 65536) { 408 if (__put_user(inb(i), tmp) < 0) 409 return -EFAULT; 410 i++; 411 tmp++; 412 } 413 *ppos = i; 414 return tmp-buf; 415 } 416 417 static ssize_t write_port(struct file *file, const char __user *buf, 418 size_t count, loff_t *ppos) 419 { 420 unsigned long i = *ppos; 421 const char __user *tmp = buf; 422 423 if (!access_ok(buf, count)) 424 return -EFAULT; 425 while (count-- > 0 && i < 65536) { 426 char c; 427 428 if (__get_user(c, tmp)) { 429 if (tmp > buf) 430 break; 431 return -EFAULT; 432 } 433 outb(c, i); 434 i++; 435 tmp++; 436 } 437 *ppos = i; 438 return tmp-buf; 439 } 440 441 static ssize_t read_null(struct file *file, char __user *buf, 442 size_t count, loff_t *ppos) 443 { 444 return 0; 445 } 446 447 static ssize_t write_null(struct file *file, const char __user *buf, 448 size_t count, loff_t *ppos) 449 { 450 return count; 451 } 452 453 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) 454 { 455 return 0; 456 } 457 458 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) 459 { 460 size_t count = iov_iter_count(from); 461 iov_iter_advance(from, count); 462 return count; 463 } 464 465 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, 466 struct splice_desc *sd) 467 { 468 return sd->len; 469 } 470 471 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, 472 loff_t *ppos, size_t len, unsigned int flags) 473 { 474 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 475 } 476 477 static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags) 478 { 479 return 0; 480 } 481 482 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) 483 { 484 size_t written = 0; 485 486 while (iov_iter_count(iter)) { 487 size_t chunk = iov_iter_count(iter), n; 488 489 if (chunk > PAGE_SIZE) 490 chunk = PAGE_SIZE; /* Just for latency reasons */ 491 n = iov_iter_zero(chunk, iter); 492 if (!n && iov_iter_count(iter)) 493 return written ? written : -EFAULT; 494 written += n; 495 if (signal_pending(current)) 496 return written ? written : -ERESTARTSYS; 497 if (!need_resched()) 498 continue; 499 if (iocb->ki_flags & IOCB_NOWAIT) 500 return written ? written : -EAGAIN; 501 cond_resched(); 502 } 503 return written; 504 } 505 506 static ssize_t read_zero(struct file *file, char __user *buf, 507 size_t count, loff_t *ppos) 508 { 509 size_t cleared = 0; 510 511 while (count) { 512 size_t chunk = min_t(size_t, count, PAGE_SIZE); 513 size_t left; 514 515 left = clear_user(buf + cleared, chunk); 516 if (unlikely(left)) { 517 cleared += (chunk - left); 518 if (!cleared) 519 return -EFAULT; 520 break; 521 } 522 cleared += chunk; 523 count -= chunk; 524 525 if (signal_pending(current)) 526 break; 527 cond_resched(); 528 } 529 530 return cleared; 531 } 532 533 static int mmap_zero(struct file *file, struct vm_area_struct *vma) 534 { 535 #ifndef CONFIG_MMU 536 return -ENOSYS; 537 #endif 538 if (vma->vm_flags & VM_SHARED) 539 return shmem_zero_setup(vma); 540 vma_set_anonymous(vma); 541 return 0; 542 } 543 544 static unsigned long get_unmapped_area_zero(struct file *file, 545 unsigned long addr, unsigned long len, 546 unsigned long pgoff, unsigned long flags) 547 { 548 #ifdef CONFIG_MMU 549 if (flags & MAP_SHARED) { 550 /* 551 * mmap_zero() will call shmem_zero_setup() to create a file, 552 * so use shmem's get_unmapped_area in case it can be huge; 553 * and pass NULL for file as in mmap.c's get_unmapped_area(), 554 * so as not to confuse shmem with our handle on "/dev/zero". 555 */ 556 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); 557 } 558 559 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ 560 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 561 #else 562 return -ENOSYS; 563 #endif 564 } 565 566 static ssize_t write_full(struct file *file, const char __user *buf, 567 size_t count, loff_t *ppos) 568 { 569 return -ENOSPC; 570 } 571 572 /* 573 * Special lseek() function for /dev/null and /dev/zero. Most notably, you 574 * can fopen() both devices with "a" now. This was previously impossible. 575 * -- SRB. 576 */ 577 static loff_t null_lseek(struct file *file, loff_t offset, int orig) 578 { 579 return file->f_pos = 0; 580 } 581 582 /* 583 * The memory devices use the full 32/64 bits of the offset, and so we cannot 584 * check against negative addresses: they are ok. The return value is weird, 585 * though, in that case (0). 586 * 587 * also note that seeking relative to the "end of file" isn't supported: 588 * it has no meaning, so it returns -EINVAL. 589 */ 590 static loff_t memory_lseek(struct file *file, loff_t offset, int orig) 591 { 592 loff_t ret; 593 594 inode_lock(file_inode(file)); 595 switch (orig) { 596 case SEEK_CUR: 597 offset += file->f_pos; 598 fallthrough; 599 case SEEK_SET: 600 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ 601 if ((unsigned long long)offset >= -MAX_ERRNO) { 602 ret = -EOVERFLOW; 603 break; 604 } 605 file->f_pos = offset; 606 ret = file->f_pos; 607 force_successful_syscall_return(); 608 break; 609 default: 610 ret = -EINVAL; 611 } 612 inode_unlock(file_inode(file)); 613 return ret; 614 } 615 616 static int open_port(struct inode *inode, struct file *filp) 617 { 618 int rc; 619 620 if (!capable(CAP_SYS_RAWIO)) 621 return -EPERM; 622 623 rc = security_locked_down(LOCKDOWN_DEV_MEM); 624 if (rc) 625 return rc; 626 627 if (iminor(inode) != DEVMEM_MINOR) 628 return 0; 629 630 /* 631 * Use a unified address space to have a single point to manage 632 * revocations when drivers want to take over a /dev/mem mapped 633 * range. 634 */ 635 filp->f_mapping = iomem_get_mapping(); 636 637 return 0; 638 } 639 640 #define zero_lseek null_lseek 641 #define full_lseek null_lseek 642 #define write_zero write_null 643 #define write_iter_zero write_iter_null 644 #define open_mem open_port 645 646 static const struct file_operations __maybe_unused mem_fops = { 647 .llseek = memory_lseek, 648 .read = read_mem, 649 .write = write_mem, 650 .mmap = mmap_mem, 651 .open = open_mem, 652 #ifndef CONFIG_MMU 653 .get_unmapped_area = get_unmapped_area_mem, 654 .mmap_capabilities = memory_mmap_capabilities, 655 #endif 656 }; 657 658 static const struct file_operations null_fops = { 659 .llseek = null_lseek, 660 .read = read_null, 661 .write = write_null, 662 .read_iter = read_iter_null, 663 .write_iter = write_iter_null, 664 .splice_write = splice_write_null, 665 .uring_cmd = uring_cmd_null, 666 }; 667 668 static const struct file_operations __maybe_unused port_fops = { 669 .llseek = memory_lseek, 670 .read = read_port, 671 .write = write_port, 672 .open = open_port, 673 }; 674 675 static const struct file_operations zero_fops = { 676 .llseek = zero_lseek, 677 .write = write_zero, 678 .read_iter = read_iter_zero, 679 .read = read_zero, 680 .write_iter = write_iter_zero, 681 .mmap = mmap_zero, 682 .get_unmapped_area = get_unmapped_area_zero, 683 #ifndef CONFIG_MMU 684 .mmap_capabilities = zero_mmap_capabilities, 685 #endif 686 }; 687 688 static const struct file_operations full_fops = { 689 .llseek = full_lseek, 690 .read_iter = read_iter_zero, 691 .write = write_full, 692 }; 693 694 static const struct memdev { 695 const char *name; 696 const struct file_operations *fops; 697 fmode_t fmode; 698 umode_t mode; 699 } devlist[] = { 700 #ifdef CONFIG_DEVMEM 701 [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 }, 702 #endif 703 [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 }, 704 #ifdef CONFIG_DEVPORT 705 [4] = { "port", &port_fops, 0, 0 }, 706 #endif 707 [5] = { "zero", &zero_fops, FMODE_NOWAIT, 0666 }, 708 [7] = { "full", &full_fops, 0, 0666 }, 709 [8] = { "random", &random_fops, FMODE_NOWAIT, 0666 }, 710 [9] = { "urandom", &urandom_fops, FMODE_NOWAIT, 0666 }, 711 #ifdef CONFIG_PRINTK 712 [11] = { "kmsg", &kmsg_fops, 0, 0644 }, 713 #endif 714 }; 715 716 static int memory_open(struct inode *inode, struct file *filp) 717 { 718 int minor; 719 const struct memdev *dev; 720 721 minor = iminor(inode); 722 if (minor >= ARRAY_SIZE(devlist)) 723 return -ENXIO; 724 725 dev = &devlist[minor]; 726 if (!dev->fops) 727 return -ENXIO; 728 729 filp->f_op = dev->fops; 730 filp->f_mode |= dev->fmode; 731 732 if (dev->fops->open) 733 return dev->fops->open(inode, filp); 734 735 return 0; 736 } 737 738 static const struct file_operations memory_fops = { 739 .open = memory_open, 740 .llseek = noop_llseek, 741 }; 742 743 static char *mem_devnode(const struct device *dev, umode_t *mode) 744 { 745 if (mode && devlist[MINOR(dev->devt)].mode) 746 *mode = devlist[MINOR(dev->devt)].mode; 747 return NULL; 748 } 749 750 #ifdef CONFIG_DEVMEM_BOOTPARAM 751 static bool devmem; 752 module_param(devmem, bool, 0444); 753 MODULE_PARM_DESC(devmem, "kernel parameter to activate /dev/mem"); 754 #endif 755 756 static const struct class mem_class = { 757 .name = "mem", 758 .devnode = mem_devnode, 759 }; 760 761 static int __init chr_dev_init(void) 762 { 763 int retval; 764 int minor; 765 766 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) 767 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 768 769 retval = class_register(&mem_class); 770 if (retval) 771 return retval; 772 773 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { 774 if (!devlist[minor].name) 775 continue; 776 777 #ifdef CONFIG_DEVMEM_BOOTPARAM 778 if (minor == DEVMEM_MINOR && !devmem) 779 continue; 780 #endif 781 /* 782 * Create /dev/port? 783 */ 784 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) 785 continue; 786 787 device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor), 788 NULL, devlist[minor].name); 789 } 790 791 return tty_init(); 792 } 793 794 fs_initcall(chr_dev_init); 795