xref: /openbmc/linux/drivers/char/mem.c (revision 31b90347)
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/bootmem.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
29 #include <linux/io.h>
30 #include <linux/aio.h>
31 
32 #include <asm/uaccess.h>
33 
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37 
38 #define DEVPORT_MINOR	4
39 
40 static inline unsigned long size_inside_page(unsigned long start,
41 					     unsigned long size)
42 {
43 	unsigned long sz;
44 
45 	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
46 
47 	return min(sz, size);
48 }
49 
50 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
52 {
53 	return addr + count <= __pa(high_memory);
54 }
55 
56 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
57 {
58 	return 1;
59 }
60 #endif
61 
62 #ifdef CONFIG_STRICT_DEVMEM
63 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64 {
65 	u64 from = ((u64)pfn) << PAGE_SHIFT;
66 	u64 to = from + size;
67 	u64 cursor = from;
68 
69 	while (cursor < to) {
70 		if (!devmem_is_allowed(pfn)) {
71 			printk(KERN_INFO
72 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
73 				current->comm, from, to);
74 			return 0;
75 		}
76 		cursor += PAGE_SIZE;
77 		pfn++;
78 	}
79 	return 1;
80 }
81 #else
82 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
83 {
84 	return 1;
85 }
86 #endif
87 
88 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
89 {
90 }
91 
92 /*
93  * This funcion reads the *physical* memory. The f_pos points directly to the
94  * memory location.
95  */
96 static ssize_t read_mem(struct file *file, char __user *buf,
97 			size_t count, loff_t *ppos)
98 {
99 	phys_addr_t p = *ppos;
100 	ssize_t read, sz;
101 	char *ptr;
102 
103 	if (!valid_phys_addr_range(p, count))
104 		return -EFAULT;
105 	read = 0;
106 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
107 	/* we don't have page 0 mapped on sparc and m68k.. */
108 	if (p < PAGE_SIZE) {
109 		sz = size_inside_page(p, count);
110 		if (sz > 0) {
111 			if (clear_user(buf, sz))
112 				return -EFAULT;
113 			buf += sz;
114 			p += sz;
115 			count -= sz;
116 			read += sz;
117 		}
118 	}
119 #endif
120 
121 	while (count > 0) {
122 		unsigned long remaining;
123 
124 		sz = size_inside_page(p, count);
125 
126 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
127 			return -EPERM;
128 
129 		/*
130 		 * On ia64 if a page has been mapped somewhere as uncached, then
131 		 * it must also be accessed uncached by the kernel or data
132 		 * corruption may occur.
133 		 */
134 		ptr = xlate_dev_mem_ptr(p);
135 		if (!ptr)
136 			return -EFAULT;
137 
138 		remaining = copy_to_user(buf, ptr, sz);
139 		unxlate_dev_mem_ptr(p, ptr);
140 		if (remaining)
141 			return -EFAULT;
142 
143 		buf += sz;
144 		p += sz;
145 		count -= sz;
146 		read += sz;
147 	}
148 
149 	*ppos += read;
150 	return read;
151 }
152 
153 static ssize_t write_mem(struct file *file, const char __user *buf,
154 			 size_t count, loff_t *ppos)
155 {
156 	phys_addr_t p = *ppos;
157 	ssize_t written, sz;
158 	unsigned long copied;
159 	void *ptr;
160 
161 	if (!valid_phys_addr_range(p, count))
162 		return -EFAULT;
163 
164 	written = 0;
165 
166 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
167 	/* we don't have page 0 mapped on sparc and m68k.. */
168 	if (p < PAGE_SIZE) {
169 		sz = size_inside_page(p, count);
170 		/* Hmm. Do something? */
171 		buf += sz;
172 		p += sz;
173 		count -= sz;
174 		written += sz;
175 	}
176 #endif
177 
178 	while (count > 0) {
179 		sz = size_inside_page(p, count);
180 
181 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
182 			return -EPERM;
183 
184 		/*
185 		 * On ia64 if a page has been mapped somewhere as uncached, then
186 		 * it must also be accessed uncached by the kernel or data
187 		 * corruption may occur.
188 		 */
189 		ptr = xlate_dev_mem_ptr(p);
190 		if (!ptr) {
191 			if (written)
192 				break;
193 			return -EFAULT;
194 		}
195 
196 		copied = copy_from_user(ptr, buf, sz);
197 		unxlate_dev_mem_ptr(p, ptr);
198 		if (copied) {
199 			written += sz - copied;
200 			if (written)
201 				break;
202 			return -EFAULT;
203 		}
204 
205 		buf += sz;
206 		p += sz;
207 		count -= sz;
208 		written += sz;
209 	}
210 
211 	*ppos += written;
212 	return written;
213 }
214 
215 int __weak phys_mem_access_prot_allowed(struct file *file,
216 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
217 {
218 	return 1;
219 }
220 
221 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
222 
223 /*
224  * Architectures vary in how they handle caching for addresses
225  * outside of main memory.
226  *
227  */
228 #ifdef pgprot_noncached
229 static int uncached_access(struct file *file, phys_addr_t addr)
230 {
231 #if defined(CONFIG_IA64)
232 	/*
233 	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
234 	 * attribute aliases.
235 	 */
236 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
237 #elif defined(CONFIG_MIPS)
238 	{
239 		extern int __uncached_access(struct file *file,
240 					     unsigned long addr);
241 
242 		return __uncached_access(file, addr);
243 	}
244 #else
245 	/*
246 	 * Accessing memory above the top the kernel knows about or through a
247 	 * file pointer
248 	 * that was marked O_DSYNC will be done non-cached.
249 	 */
250 	if (file->f_flags & O_DSYNC)
251 		return 1;
252 	return addr >= __pa(high_memory);
253 #endif
254 }
255 #endif
256 
257 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
258 				     unsigned long size, pgprot_t vma_prot)
259 {
260 #ifdef pgprot_noncached
261 	phys_addr_t offset = pfn << PAGE_SHIFT;
262 
263 	if (uncached_access(file, offset))
264 		return pgprot_noncached(vma_prot);
265 #endif
266 	return vma_prot;
267 }
268 #endif
269 
270 #ifndef CONFIG_MMU
271 static unsigned long get_unmapped_area_mem(struct file *file,
272 					   unsigned long addr,
273 					   unsigned long len,
274 					   unsigned long pgoff,
275 					   unsigned long flags)
276 {
277 	if (!valid_mmap_phys_addr_range(pgoff, len))
278 		return (unsigned long) -EINVAL;
279 	return pgoff << PAGE_SHIFT;
280 }
281 
282 /* can't do an in-place private mapping if there's no MMU */
283 static inline int private_mapping_ok(struct vm_area_struct *vma)
284 {
285 	return vma->vm_flags & VM_MAYSHARE;
286 }
287 #else
288 #define get_unmapped_area_mem	NULL
289 
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
291 {
292 	return 1;
293 }
294 #endif
295 
296 static const struct vm_operations_struct mmap_mem_ops = {
297 #ifdef CONFIG_HAVE_IOREMAP_PROT
298 	.access = generic_access_phys
299 #endif
300 };
301 
302 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
303 {
304 	size_t size = vma->vm_end - vma->vm_start;
305 
306 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
307 		return -EINVAL;
308 
309 	if (!private_mapping_ok(vma))
310 		return -ENOSYS;
311 
312 	if (!range_is_allowed(vma->vm_pgoff, size))
313 		return -EPERM;
314 
315 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
316 						&vma->vm_page_prot))
317 		return -EINVAL;
318 
319 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
320 						 size,
321 						 vma->vm_page_prot);
322 
323 	vma->vm_ops = &mmap_mem_ops;
324 
325 	/* Remap-pfn-range will mark the range VM_IO */
326 	if (remap_pfn_range(vma,
327 			    vma->vm_start,
328 			    vma->vm_pgoff,
329 			    size,
330 			    vma->vm_page_prot)) {
331 		return -EAGAIN;
332 	}
333 	return 0;
334 }
335 
336 #ifdef CONFIG_DEVKMEM
337 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
338 {
339 	unsigned long pfn;
340 
341 	/* Turn a kernel-virtual address into a physical page frame */
342 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
343 
344 	/*
345 	 * RED-PEN: on some architectures there is more mapped memory than
346 	 * available in mem_map which pfn_valid checks for. Perhaps should add a
347 	 * new macro here.
348 	 *
349 	 * RED-PEN: vmalloc is not supported right now.
350 	 */
351 	if (!pfn_valid(pfn))
352 		return -EIO;
353 
354 	vma->vm_pgoff = pfn;
355 	return mmap_mem(file, vma);
356 }
357 #endif
358 
359 #ifdef CONFIG_DEVKMEM
360 /*
361  * This function reads the *virtual* memory as seen by the kernel.
362  */
363 static ssize_t read_kmem(struct file *file, char __user *buf,
364 			 size_t count, loff_t *ppos)
365 {
366 	unsigned long p = *ppos;
367 	ssize_t low_count, read, sz;
368 	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
369 	int err = 0;
370 
371 	read = 0;
372 	if (p < (unsigned long) high_memory) {
373 		low_count = count;
374 		if (count > (unsigned long)high_memory - p)
375 			low_count = (unsigned long)high_memory - p;
376 
377 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
378 		/* we don't have page 0 mapped on sparc and m68k.. */
379 		if (p < PAGE_SIZE && low_count > 0) {
380 			sz = size_inside_page(p, low_count);
381 			if (clear_user(buf, sz))
382 				return -EFAULT;
383 			buf += sz;
384 			p += sz;
385 			read += sz;
386 			low_count -= sz;
387 			count -= sz;
388 		}
389 #endif
390 		while (low_count > 0) {
391 			sz = size_inside_page(p, low_count);
392 
393 			/*
394 			 * On ia64 if a page has been mapped somewhere as
395 			 * uncached, then it must also be accessed uncached
396 			 * by the kernel or data corruption may occur
397 			 */
398 			kbuf = xlate_dev_kmem_ptr((char *)p);
399 
400 			if (copy_to_user(buf, kbuf, sz))
401 				return -EFAULT;
402 			buf += sz;
403 			p += sz;
404 			read += sz;
405 			low_count -= sz;
406 			count -= sz;
407 		}
408 	}
409 
410 	if (count > 0) {
411 		kbuf = (char *)__get_free_page(GFP_KERNEL);
412 		if (!kbuf)
413 			return -ENOMEM;
414 		while (count > 0) {
415 			sz = size_inside_page(p, count);
416 			if (!is_vmalloc_or_module_addr((void *)p)) {
417 				err = -ENXIO;
418 				break;
419 			}
420 			sz = vread(kbuf, (char *)p, sz);
421 			if (!sz)
422 				break;
423 			if (copy_to_user(buf, kbuf, sz)) {
424 				err = -EFAULT;
425 				break;
426 			}
427 			count -= sz;
428 			buf += sz;
429 			read += sz;
430 			p += sz;
431 		}
432 		free_page((unsigned long)kbuf);
433 	}
434 	*ppos = p;
435 	return read ? read : err;
436 }
437 
438 
439 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
440 				size_t count, loff_t *ppos)
441 {
442 	ssize_t written, sz;
443 	unsigned long copied;
444 
445 	written = 0;
446 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
447 	/* we don't have page 0 mapped on sparc and m68k.. */
448 	if (p < PAGE_SIZE) {
449 		sz = size_inside_page(p, count);
450 		/* Hmm. Do something? */
451 		buf += sz;
452 		p += sz;
453 		count -= sz;
454 		written += sz;
455 	}
456 #endif
457 
458 	while (count > 0) {
459 		char *ptr;
460 
461 		sz = size_inside_page(p, count);
462 
463 		/*
464 		 * On ia64 if a page has been mapped somewhere as uncached, then
465 		 * it must also be accessed uncached by the kernel or data
466 		 * corruption may occur.
467 		 */
468 		ptr = xlate_dev_kmem_ptr((char *)p);
469 
470 		copied = copy_from_user(ptr, buf, sz);
471 		if (copied) {
472 			written += sz - copied;
473 			if (written)
474 				break;
475 			return -EFAULT;
476 		}
477 		buf += sz;
478 		p += sz;
479 		count -= sz;
480 		written += sz;
481 	}
482 
483 	*ppos += written;
484 	return written;
485 }
486 
487 /*
488  * This function writes to the *virtual* memory as seen by the kernel.
489  */
490 static ssize_t write_kmem(struct file *file, const char __user *buf,
491 			  size_t count, loff_t *ppos)
492 {
493 	unsigned long p = *ppos;
494 	ssize_t wrote = 0;
495 	ssize_t virtr = 0;
496 	char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
497 	int err = 0;
498 
499 	if (p < (unsigned long) high_memory) {
500 		unsigned long to_write = min_t(unsigned long, count,
501 					       (unsigned long)high_memory - p);
502 		wrote = do_write_kmem(p, buf, to_write, ppos);
503 		if (wrote != to_write)
504 			return wrote;
505 		p += wrote;
506 		buf += wrote;
507 		count -= wrote;
508 	}
509 
510 	if (count > 0) {
511 		kbuf = (char *)__get_free_page(GFP_KERNEL);
512 		if (!kbuf)
513 			return wrote ? wrote : -ENOMEM;
514 		while (count > 0) {
515 			unsigned long sz = size_inside_page(p, count);
516 			unsigned long n;
517 
518 			if (!is_vmalloc_or_module_addr((void *)p)) {
519 				err = -ENXIO;
520 				break;
521 			}
522 			n = copy_from_user(kbuf, buf, sz);
523 			if (n) {
524 				err = -EFAULT;
525 				break;
526 			}
527 			vwrite(kbuf, (char *)p, sz);
528 			count -= sz;
529 			buf += sz;
530 			virtr += sz;
531 			p += sz;
532 		}
533 		free_page((unsigned long)kbuf);
534 	}
535 
536 	*ppos = p;
537 	return virtr + wrote ? : err;
538 }
539 #endif
540 
541 #ifdef CONFIG_DEVPORT
542 static ssize_t read_port(struct file *file, char __user *buf,
543 			 size_t count, loff_t *ppos)
544 {
545 	unsigned long i = *ppos;
546 	char __user *tmp = buf;
547 
548 	if (!access_ok(VERIFY_WRITE, buf, count))
549 		return -EFAULT;
550 	while (count-- > 0 && i < 65536) {
551 		if (__put_user(inb(i), tmp) < 0)
552 			return -EFAULT;
553 		i++;
554 		tmp++;
555 	}
556 	*ppos = i;
557 	return tmp-buf;
558 }
559 
560 static ssize_t write_port(struct file *file, const char __user *buf,
561 			  size_t count, loff_t *ppos)
562 {
563 	unsigned long i = *ppos;
564 	const char __user *tmp = buf;
565 
566 	if (!access_ok(VERIFY_READ, buf, count))
567 		return -EFAULT;
568 	while (count-- > 0 && i < 65536) {
569 		char c;
570 		if (__get_user(c, tmp)) {
571 			if (tmp > buf)
572 				break;
573 			return -EFAULT;
574 		}
575 		outb(c, i);
576 		i++;
577 		tmp++;
578 	}
579 	*ppos = i;
580 	return tmp-buf;
581 }
582 #endif
583 
584 static ssize_t read_null(struct file *file, char __user *buf,
585 			 size_t count, loff_t *ppos)
586 {
587 	return 0;
588 }
589 
590 static ssize_t write_null(struct file *file, const char __user *buf,
591 			  size_t count, loff_t *ppos)
592 {
593 	return count;
594 }
595 
596 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
597 			     unsigned long nr_segs, loff_t pos)
598 {
599 	return 0;
600 }
601 
602 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
603 			      unsigned long nr_segs, loff_t pos)
604 {
605 	return iov_length(iov, nr_segs);
606 }
607 
608 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
609 			struct splice_desc *sd)
610 {
611 	return sd->len;
612 }
613 
614 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
615 				 loff_t *ppos, size_t len, unsigned int flags)
616 {
617 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
618 }
619 
620 static ssize_t read_zero(struct file *file, char __user *buf,
621 			 size_t count, loff_t *ppos)
622 {
623 	size_t written;
624 
625 	if (!count)
626 		return 0;
627 
628 	if (!access_ok(VERIFY_WRITE, buf, count))
629 		return -EFAULT;
630 
631 	written = 0;
632 	while (count) {
633 		unsigned long unwritten;
634 		size_t chunk = count;
635 
636 		if (chunk > PAGE_SIZE)
637 			chunk = PAGE_SIZE;	/* Just for latency reasons */
638 		unwritten = __clear_user(buf, chunk);
639 		written += chunk - unwritten;
640 		if (unwritten)
641 			break;
642 		if (signal_pending(current))
643 			return written ? written : -ERESTARTSYS;
644 		buf += chunk;
645 		count -= chunk;
646 		cond_resched();
647 	}
648 	return written ? written : -EFAULT;
649 }
650 
651 static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
652 			     unsigned long nr_segs, loff_t pos)
653 {
654 	size_t written = 0;
655 	unsigned long i;
656 	ssize_t ret;
657 
658 	for (i = 0; i < nr_segs; i++) {
659 		ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
660 				&pos);
661 		if (ret < 0)
662 			break;
663 		written += ret;
664 	}
665 
666 	return written ? written : -EFAULT;
667 }
668 
669 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
670 {
671 #ifndef CONFIG_MMU
672 	return -ENOSYS;
673 #endif
674 	if (vma->vm_flags & VM_SHARED)
675 		return shmem_zero_setup(vma);
676 	return 0;
677 }
678 
679 static ssize_t write_full(struct file *file, const char __user *buf,
680 			  size_t count, loff_t *ppos)
681 {
682 	return -ENOSPC;
683 }
684 
685 /*
686  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
687  * can fopen() both devices with "a" now.  This was previously impossible.
688  * -- SRB.
689  */
690 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
691 {
692 	return file->f_pos = 0;
693 }
694 
695 /*
696  * The memory devices use the full 32/64 bits of the offset, and so we cannot
697  * check against negative addresses: they are ok. The return value is weird,
698  * though, in that case (0).
699  *
700  * also note that seeking relative to the "end of file" isn't supported:
701  * it has no meaning, so it returns -EINVAL.
702  */
703 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
704 {
705 	loff_t ret;
706 
707 	mutex_lock(&file_inode(file)->i_mutex);
708 	switch (orig) {
709 	case SEEK_CUR:
710 		offset += file->f_pos;
711 	case SEEK_SET:
712 		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
713 		if (IS_ERR_VALUE((unsigned long long)offset)) {
714 			ret = -EOVERFLOW;
715 			break;
716 		}
717 		file->f_pos = offset;
718 		ret = file->f_pos;
719 		force_successful_syscall_return();
720 		break;
721 	default:
722 		ret = -EINVAL;
723 	}
724 	mutex_unlock(&file_inode(file)->i_mutex);
725 	return ret;
726 }
727 
728 static int open_port(struct inode *inode, struct file *filp)
729 {
730 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
731 }
732 
733 #define zero_lseek	null_lseek
734 #define full_lseek      null_lseek
735 #define write_zero	write_null
736 #define read_full       read_zero
737 #define aio_write_zero	aio_write_null
738 #define open_mem	open_port
739 #define open_kmem	open_mem
740 
741 static const struct file_operations mem_fops = {
742 	.llseek		= memory_lseek,
743 	.read		= read_mem,
744 	.write		= write_mem,
745 	.mmap		= mmap_mem,
746 	.open		= open_mem,
747 	.get_unmapped_area = get_unmapped_area_mem,
748 };
749 
750 #ifdef CONFIG_DEVKMEM
751 static const struct file_operations kmem_fops = {
752 	.llseek		= memory_lseek,
753 	.read		= read_kmem,
754 	.write		= write_kmem,
755 	.mmap		= mmap_kmem,
756 	.open		= open_kmem,
757 	.get_unmapped_area = get_unmapped_area_mem,
758 };
759 #endif
760 
761 static const struct file_operations null_fops = {
762 	.llseek		= null_lseek,
763 	.read		= read_null,
764 	.write		= write_null,
765 	.aio_read	= aio_read_null,
766 	.aio_write	= aio_write_null,
767 	.splice_write	= splice_write_null,
768 };
769 
770 #ifdef CONFIG_DEVPORT
771 static const struct file_operations port_fops = {
772 	.llseek		= memory_lseek,
773 	.read		= read_port,
774 	.write		= write_port,
775 	.open		= open_port,
776 };
777 #endif
778 
779 static const struct file_operations zero_fops = {
780 	.llseek		= zero_lseek,
781 	.read		= read_zero,
782 	.write		= write_zero,
783 	.aio_read	= aio_read_zero,
784 	.aio_write	= aio_write_zero,
785 	.mmap		= mmap_zero,
786 };
787 
788 /*
789  * capabilities for /dev/zero
790  * - permits private mappings, "copies" are taken of the source of zeros
791  * - no writeback happens
792  */
793 static struct backing_dev_info zero_bdi = {
794 	.name		= "char/mem",
795 	.capabilities	= BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
796 };
797 
798 static const struct file_operations full_fops = {
799 	.llseek		= full_lseek,
800 	.read		= read_full,
801 	.write		= write_full,
802 };
803 
804 static const struct memdev {
805 	const char *name;
806 	umode_t mode;
807 	const struct file_operations *fops;
808 	struct backing_dev_info *dev_info;
809 } devlist[] = {
810 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
811 #ifdef CONFIG_DEVKMEM
812 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
813 #endif
814 	 [3] = { "null", 0666, &null_fops, NULL },
815 #ifdef CONFIG_DEVPORT
816 	 [4] = { "port", 0, &port_fops, NULL },
817 #endif
818 	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
819 	 [7] = { "full", 0666, &full_fops, NULL },
820 	 [8] = { "random", 0666, &random_fops, NULL },
821 	 [9] = { "urandom", 0666, &urandom_fops, NULL },
822 #ifdef CONFIG_PRINTK
823 	[11] = { "kmsg", 0644, &kmsg_fops, NULL },
824 #endif
825 };
826 
827 static int memory_open(struct inode *inode, struct file *filp)
828 {
829 	int minor;
830 	const struct memdev *dev;
831 
832 	minor = iminor(inode);
833 	if (minor >= ARRAY_SIZE(devlist))
834 		return -ENXIO;
835 
836 	dev = &devlist[minor];
837 	if (!dev->fops)
838 		return -ENXIO;
839 
840 	filp->f_op = dev->fops;
841 	if (dev->dev_info)
842 		filp->f_mapping->backing_dev_info = dev->dev_info;
843 
844 	/* Is /dev/mem or /dev/kmem ? */
845 	if (dev->dev_info == &directly_mappable_cdev_bdi)
846 		filp->f_mode |= FMODE_UNSIGNED_OFFSET;
847 
848 	if (dev->fops->open)
849 		return dev->fops->open(inode, filp);
850 
851 	return 0;
852 }
853 
854 static const struct file_operations memory_fops = {
855 	.open = memory_open,
856 	.llseek = noop_llseek,
857 };
858 
859 static char *mem_devnode(struct device *dev, umode_t *mode)
860 {
861 	if (mode && devlist[MINOR(dev->devt)].mode)
862 		*mode = devlist[MINOR(dev->devt)].mode;
863 	return NULL;
864 }
865 
866 static struct class *mem_class;
867 
868 static int __init chr_dev_init(void)
869 {
870 	int minor;
871 	int err;
872 
873 	err = bdi_init(&zero_bdi);
874 	if (err)
875 		return err;
876 
877 	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
878 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
879 
880 	mem_class = class_create(THIS_MODULE, "mem");
881 	if (IS_ERR(mem_class))
882 		return PTR_ERR(mem_class);
883 
884 	mem_class->devnode = mem_devnode;
885 	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
886 		if (!devlist[minor].name)
887 			continue;
888 
889 		/*
890 		 * Create /dev/port?
891 		 */
892 		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
893 			continue;
894 
895 		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
896 			      NULL, devlist[minor].name);
897 	}
898 
899 	return tty_init();
900 }
901 
902 fs_initcall(chr_dev_init);
903