xref: /openbmc/linux/drivers/char/mem.c (revision b627b4ed)
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/smp_lock.h>
30 
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37 
38 /*
39  * Architectures vary in how they handle caching for addresses
40  * outside of main memory.
41  *
42  */
43 static inline int uncached_access(struct file *file, unsigned long addr)
44 {
45 #if defined(CONFIG_IA64)
46 	/*
47 	 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
48 	 */
49 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
50 #elif defined(CONFIG_MIPS)
51 	{
52 		extern int __uncached_access(struct file *file,
53 					     unsigned long addr);
54 
55 		return __uncached_access(file, addr);
56 	}
57 #else
58 	/*
59 	 * Accessing memory above the top the kernel knows about or through a file pointer
60 	 * that was marked O_SYNC will be done non-cached.
61 	 */
62 	if (file->f_flags & O_SYNC)
63 		return 1;
64 	return addr >= __pa(high_memory);
65 #endif
66 }
67 
68 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
69 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
70 {
71 	if (addr + count > __pa(high_memory))
72 		return 0;
73 
74 	return 1;
75 }
76 
77 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
78 {
79 	return 1;
80 }
81 #endif
82 
83 #ifdef CONFIG_STRICT_DEVMEM
84 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
85 {
86 	u64 from = ((u64)pfn) << PAGE_SHIFT;
87 	u64 to = from + size;
88 	u64 cursor = from;
89 
90 	while (cursor < to) {
91 		if (!devmem_is_allowed(pfn)) {
92 			printk(KERN_INFO
93 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
94 				current->comm, from, to);
95 			return 0;
96 		}
97 		cursor += PAGE_SIZE;
98 		pfn++;
99 	}
100 	return 1;
101 }
102 #else
103 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
104 {
105 	return 1;
106 }
107 #endif
108 
109 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
110 {
111 }
112 
113 /*
114  * This funcion reads the *physical* memory. The f_pos points directly to the
115  * memory location.
116  */
117 static ssize_t read_mem(struct file * file, char __user * buf,
118 			size_t count, loff_t *ppos)
119 {
120 	unsigned long p = *ppos;
121 	ssize_t read, sz;
122 	char *ptr;
123 
124 	if (!valid_phys_addr_range(p, count))
125 		return -EFAULT;
126 	read = 0;
127 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
128 	/* we don't have page 0 mapped on sparc and m68k.. */
129 	if (p < PAGE_SIZE) {
130 		sz = PAGE_SIZE - p;
131 		if (sz > count)
132 			sz = count;
133 		if (sz > 0) {
134 			if (clear_user(buf, sz))
135 				return -EFAULT;
136 			buf += sz;
137 			p += sz;
138 			count -= sz;
139 			read += sz;
140 		}
141 	}
142 #endif
143 
144 	while (count > 0) {
145 		/*
146 		 * Handle first page in case it's not aligned
147 		 */
148 		if (-p & (PAGE_SIZE - 1))
149 			sz = -p & (PAGE_SIZE - 1);
150 		else
151 			sz = PAGE_SIZE;
152 
153 		sz = min_t(unsigned long, sz, count);
154 
155 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
156 			return -EPERM;
157 
158 		/*
159 		 * On ia64 if a page has been mapped somewhere as
160 		 * uncached, then it must also be accessed uncached
161 		 * by the kernel or data corruption may occur
162 		 */
163 		ptr = xlate_dev_mem_ptr(p);
164 		if (!ptr)
165 			return -EFAULT;
166 
167 		if (copy_to_user(buf, ptr, sz)) {
168 			unxlate_dev_mem_ptr(p, ptr);
169 			return -EFAULT;
170 		}
171 
172 		unxlate_dev_mem_ptr(p, ptr);
173 
174 		buf += sz;
175 		p += sz;
176 		count -= sz;
177 		read += sz;
178 	}
179 
180 	*ppos += read;
181 	return read;
182 }
183 
184 static ssize_t write_mem(struct file * file, const char __user * buf,
185 			 size_t count, loff_t *ppos)
186 {
187 	unsigned long p = *ppos;
188 	ssize_t written, sz;
189 	unsigned long copied;
190 	void *ptr;
191 
192 	if (!valid_phys_addr_range(p, count))
193 		return -EFAULT;
194 
195 	written = 0;
196 
197 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
198 	/* we don't have page 0 mapped on sparc and m68k.. */
199 	if (p < PAGE_SIZE) {
200 		unsigned long sz = PAGE_SIZE - p;
201 		if (sz > count)
202 			sz = count;
203 		/* Hmm. Do something? */
204 		buf += sz;
205 		p += sz;
206 		count -= sz;
207 		written += sz;
208 	}
209 #endif
210 
211 	while (count > 0) {
212 		/*
213 		 * Handle first page in case it's not aligned
214 		 */
215 		if (-p & (PAGE_SIZE - 1))
216 			sz = -p & (PAGE_SIZE - 1);
217 		else
218 			sz = PAGE_SIZE;
219 
220 		sz = min_t(unsigned long, sz, count);
221 
222 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
223 			return -EPERM;
224 
225 		/*
226 		 * On ia64 if a page has been mapped somewhere as
227 		 * uncached, then it must also be accessed uncached
228 		 * by the kernel or data corruption may occur
229 		 */
230 		ptr = xlate_dev_mem_ptr(p);
231 		if (!ptr) {
232 			if (written)
233 				break;
234 			return -EFAULT;
235 		}
236 
237 		copied = copy_from_user(ptr, buf, sz);
238 		if (copied) {
239 			written += sz - copied;
240 			unxlate_dev_mem_ptr(p, ptr);
241 			if (written)
242 				break;
243 			return -EFAULT;
244 		}
245 
246 		unxlate_dev_mem_ptr(p, ptr);
247 
248 		buf += sz;
249 		p += sz;
250 		count -= sz;
251 		written += sz;
252 	}
253 
254 	*ppos += written;
255 	return written;
256 }
257 
258 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
259 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
260 {
261 	return 1;
262 }
263 
264 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
265 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
266 				     unsigned long size, pgprot_t vma_prot)
267 {
268 #ifdef pgprot_noncached
269 	unsigned long offset = pfn << PAGE_SHIFT;
270 
271 	if (uncached_access(file, offset))
272 		return pgprot_noncached(vma_prot);
273 #endif
274 	return vma_prot;
275 }
276 #endif
277 
278 #ifndef CONFIG_MMU
279 static unsigned long get_unmapped_area_mem(struct file *file,
280 					   unsigned long addr,
281 					   unsigned long len,
282 					   unsigned long pgoff,
283 					   unsigned long flags)
284 {
285 	if (!valid_mmap_phys_addr_range(pgoff, len))
286 		return (unsigned long) -EINVAL;
287 	return pgoff << PAGE_SHIFT;
288 }
289 
290 /* can't do an in-place private mapping if there's no MMU */
291 static inline int private_mapping_ok(struct vm_area_struct *vma)
292 {
293 	return vma->vm_flags & VM_MAYSHARE;
294 }
295 #else
296 #define get_unmapped_area_mem	NULL
297 
298 static inline int private_mapping_ok(struct vm_area_struct *vma)
299 {
300 	return 1;
301 }
302 #endif
303 
304 void __attribute__((weak))
305 map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
306 {
307 	/* nothing. architectures can override. */
308 }
309 
310 void __attribute__((weak))
311 unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
312 {
313 	/* nothing. architectures can override. */
314 }
315 
316 static void mmap_mem_open(struct vm_area_struct *vma)
317 {
318 	map_devmem(vma->vm_pgoff,  vma->vm_end - vma->vm_start,
319 			vma->vm_page_prot);
320 }
321 
322 static void mmap_mem_close(struct vm_area_struct *vma)
323 {
324 	unmap_devmem(vma->vm_pgoff,  vma->vm_end - vma->vm_start,
325 			vma->vm_page_prot);
326 }
327 
328 static struct vm_operations_struct mmap_mem_ops = {
329 	.open  = mmap_mem_open,
330 	.close = mmap_mem_close,
331 #ifdef CONFIG_HAVE_IOREMAP_PROT
332 	.access = generic_access_phys
333 #endif
334 };
335 
336 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
337 {
338 	size_t size = vma->vm_end - vma->vm_start;
339 
340 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
341 		return -EINVAL;
342 
343 	if (!private_mapping_ok(vma))
344 		return -ENOSYS;
345 
346 	if (!range_is_allowed(vma->vm_pgoff, size))
347 		return -EPERM;
348 
349 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
350 						&vma->vm_page_prot))
351 		return -EINVAL;
352 
353 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
354 						 size,
355 						 vma->vm_page_prot);
356 
357 	vma->vm_ops = &mmap_mem_ops;
358 
359 	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
360 	if (remap_pfn_range(vma,
361 			    vma->vm_start,
362 			    vma->vm_pgoff,
363 			    size,
364 			    vma->vm_page_prot)) {
365 		unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
366 		return -EAGAIN;
367 	}
368 	return 0;
369 }
370 
371 #ifdef CONFIG_DEVKMEM
372 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
373 {
374 	unsigned long pfn;
375 
376 	/* Turn a kernel-virtual address into a physical page frame */
377 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
378 
379 	/*
380 	 * RED-PEN: on some architectures there is more mapped memory
381 	 * than available in mem_map which pfn_valid checks
382 	 * for. Perhaps should add a new macro here.
383 	 *
384 	 * RED-PEN: vmalloc is not supported right now.
385 	 */
386 	if (!pfn_valid(pfn))
387 		return -EIO;
388 
389 	vma->vm_pgoff = pfn;
390 	return mmap_mem(file, vma);
391 }
392 #endif
393 
394 #ifdef CONFIG_CRASH_DUMP
395 /*
396  * Read memory corresponding to the old kernel.
397  */
398 static ssize_t read_oldmem(struct file *file, char __user *buf,
399 				size_t count, loff_t *ppos)
400 {
401 	unsigned long pfn, offset;
402 	size_t read = 0, csize;
403 	int rc = 0;
404 
405 	while (count) {
406 		pfn = *ppos / PAGE_SIZE;
407 		if (pfn > saved_max_pfn)
408 			return read;
409 
410 		offset = (unsigned long)(*ppos % PAGE_SIZE);
411 		if (count > PAGE_SIZE - offset)
412 			csize = PAGE_SIZE - offset;
413 		else
414 			csize = count;
415 
416 		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
417 		if (rc < 0)
418 			return rc;
419 		buf += csize;
420 		*ppos += csize;
421 		read += csize;
422 		count -= csize;
423 	}
424 	return read;
425 }
426 #endif
427 
428 #ifdef CONFIG_DEVKMEM
429 /*
430  * This function reads the *virtual* memory as seen by the kernel.
431  */
432 static ssize_t read_kmem(struct file *file, char __user *buf,
433 			 size_t count, loff_t *ppos)
434 {
435 	unsigned long p = *ppos;
436 	ssize_t low_count, read, sz;
437 	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
438 
439 	read = 0;
440 	if (p < (unsigned long) high_memory) {
441 		low_count = count;
442 		if (count > (unsigned long) high_memory - p)
443 			low_count = (unsigned long) high_memory - p;
444 
445 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
446 		/* we don't have page 0 mapped on sparc and m68k.. */
447 		if (p < PAGE_SIZE && low_count > 0) {
448 			size_t tmp = PAGE_SIZE - p;
449 			if (tmp > low_count) tmp = low_count;
450 			if (clear_user(buf, tmp))
451 				return -EFAULT;
452 			buf += tmp;
453 			p += tmp;
454 			read += tmp;
455 			low_count -= tmp;
456 			count -= tmp;
457 		}
458 #endif
459 		while (low_count > 0) {
460 			/*
461 			 * Handle first page in case it's not aligned
462 			 */
463 			if (-p & (PAGE_SIZE - 1))
464 				sz = -p & (PAGE_SIZE - 1);
465 			else
466 				sz = PAGE_SIZE;
467 
468 			sz = min_t(unsigned long, sz, low_count);
469 
470 			/*
471 			 * On ia64 if a page has been mapped somewhere as
472 			 * uncached, then it must also be accessed uncached
473 			 * by the kernel or data corruption may occur
474 			 */
475 			kbuf = xlate_dev_kmem_ptr((char *)p);
476 
477 			if (copy_to_user(buf, kbuf, sz))
478 				return -EFAULT;
479 			buf += sz;
480 			p += sz;
481 			read += sz;
482 			low_count -= sz;
483 			count -= sz;
484 		}
485 	}
486 
487 	if (count > 0) {
488 		kbuf = (char *)__get_free_page(GFP_KERNEL);
489 		if (!kbuf)
490 			return -ENOMEM;
491 		while (count > 0) {
492 			int len = count;
493 
494 			if (len > PAGE_SIZE)
495 				len = PAGE_SIZE;
496 			len = vread(kbuf, (char *)p, len);
497 			if (!len)
498 				break;
499 			if (copy_to_user(buf, kbuf, len)) {
500 				free_page((unsigned long)kbuf);
501 				return -EFAULT;
502 			}
503 			count -= len;
504 			buf += len;
505 			read += len;
506 			p += len;
507 		}
508 		free_page((unsigned long)kbuf);
509 	}
510  	*ppos = p;
511  	return read;
512 }
513 
514 
515 static inline ssize_t
516 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
517 	      size_t count, loff_t *ppos)
518 {
519 	ssize_t written, sz;
520 	unsigned long copied;
521 
522 	written = 0;
523 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
524 	/* we don't have page 0 mapped on sparc and m68k.. */
525 	if (realp < PAGE_SIZE) {
526 		unsigned long sz = PAGE_SIZE - realp;
527 		if (sz > count)
528 			sz = count;
529 		/* Hmm. Do something? */
530 		buf += sz;
531 		p += sz;
532 		realp += sz;
533 		count -= sz;
534 		written += sz;
535 	}
536 #endif
537 
538 	while (count > 0) {
539 		char *ptr;
540 		/*
541 		 * Handle first page in case it's not aligned
542 		 */
543 		if (-realp & (PAGE_SIZE - 1))
544 			sz = -realp & (PAGE_SIZE - 1);
545 		else
546 			sz = PAGE_SIZE;
547 
548 		sz = min_t(unsigned long, sz, count);
549 
550 		/*
551 		 * On ia64 if a page has been mapped somewhere as
552 		 * uncached, then it must also be accessed uncached
553 		 * by the kernel or data corruption may occur
554 		 */
555 		ptr = xlate_dev_kmem_ptr(p);
556 
557 		copied = copy_from_user(ptr, buf, sz);
558 		if (copied) {
559 			written += sz - copied;
560 			if (written)
561 				break;
562 			return -EFAULT;
563 		}
564 		buf += sz;
565 		p += sz;
566 		realp += sz;
567 		count -= sz;
568 		written += sz;
569 	}
570 
571 	*ppos += written;
572 	return written;
573 }
574 
575 
576 /*
577  * This function writes to the *virtual* memory as seen by the kernel.
578  */
579 static ssize_t write_kmem(struct file * file, const char __user * buf,
580 			  size_t count, loff_t *ppos)
581 {
582 	unsigned long p = *ppos;
583 	ssize_t wrote = 0;
584 	ssize_t virtr = 0;
585 	ssize_t written;
586 	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
587 
588 	if (p < (unsigned long) high_memory) {
589 
590 		wrote = count;
591 		if (count > (unsigned long) high_memory - p)
592 			wrote = (unsigned long) high_memory - p;
593 
594 		written = do_write_kmem((void*)p, p, buf, wrote, ppos);
595 		if (written != wrote)
596 			return written;
597 		wrote = written;
598 		p += wrote;
599 		buf += wrote;
600 		count -= wrote;
601 	}
602 
603 	if (count > 0) {
604 		kbuf = (char *)__get_free_page(GFP_KERNEL);
605 		if (!kbuf)
606 			return wrote ? wrote : -ENOMEM;
607 		while (count > 0) {
608 			int len = count;
609 
610 			if (len > PAGE_SIZE)
611 				len = PAGE_SIZE;
612 			if (len) {
613 				written = copy_from_user(kbuf, buf, len);
614 				if (written) {
615 					if (wrote + virtr)
616 						break;
617 					free_page((unsigned long)kbuf);
618 					return -EFAULT;
619 				}
620 			}
621 			len = vwrite(kbuf, (char *)p, len);
622 			count -= len;
623 			buf += len;
624 			virtr += len;
625 			p += len;
626 		}
627 		free_page((unsigned long)kbuf);
628 	}
629 
630  	*ppos = p;
631  	return virtr + wrote;
632 }
633 #endif
634 
635 #ifdef CONFIG_DEVPORT
636 static ssize_t read_port(struct file * file, char __user * buf,
637 			 size_t count, loff_t *ppos)
638 {
639 	unsigned long i = *ppos;
640 	char __user *tmp = buf;
641 
642 	if (!access_ok(VERIFY_WRITE, buf, count))
643 		return -EFAULT;
644 	while (count-- > 0 && i < 65536) {
645 		if (__put_user(inb(i),tmp) < 0)
646 			return -EFAULT;
647 		i++;
648 		tmp++;
649 	}
650 	*ppos = i;
651 	return tmp-buf;
652 }
653 
654 static ssize_t write_port(struct file * file, const char __user * buf,
655 			  size_t count, loff_t *ppos)
656 {
657 	unsigned long i = *ppos;
658 	const char __user * tmp = buf;
659 
660 	if (!access_ok(VERIFY_READ,buf,count))
661 		return -EFAULT;
662 	while (count-- > 0 && i < 65536) {
663 		char c;
664 		if (__get_user(c, tmp)) {
665 			if (tmp > buf)
666 				break;
667 			return -EFAULT;
668 		}
669 		outb(c,i);
670 		i++;
671 		tmp++;
672 	}
673 	*ppos = i;
674 	return tmp-buf;
675 }
676 #endif
677 
678 static ssize_t read_null(struct file * file, char __user * buf,
679 			 size_t count, loff_t *ppos)
680 {
681 	return 0;
682 }
683 
684 static ssize_t write_null(struct file * file, const char __user * buf,
685 			  size_t count, loff_t *ppos)
686 {
687 	return count;
688 }
689 
690 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
691 			struct splice_desc *sd)
692 {
693 	return sd->len;
694 }
695 
696 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
697 				 loff_t *ppos, size_t len, unsigned int flags)
698 {
699 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
700 }
701 
702 static ssize_t read_zero(struct file * file, char __user * buf,
703 			 size_t count, loff_t *ppos)
704 {
705 	size_t written;
706 
707 	if (!count)
708 		return 0;
709 
710 	if (!access_ok(VERIFY_WRITE, buf, count))
711 		return -EFAULT;
712 
713 	written = 0;
714 	while (count) {
715 		unsigned long unwritten;
716 		size_t chunk = count;
717 
718 		if (chunk > PAGE_SIZE)
719 			chunk = PAGE_SIZE;	/* Just for latency reasons */
720 		unwritten = clear_user(buf, chunk);
721 		written += chunk - unwritten;
722 		if (unwritten)
723 			break;
724 		buf += chunk;
725 		count -= chunk;
726 		cond_resched();
727 	}
728 	return written ? written : -EFAULT;
729 }
730 
731 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
732 {
733 #ifndef CONFIG_MMU
734 	return -ENOSYS;
735 #endif
736 	if (vma->vm_flags & VM_SHARED)
737 		return shmem_zero_setup(vma);
738 	return 0;
739 }
740 
741 static ssize_t write_full(struct file * file, const char __user * buf,
742 			  size_t count, loff_t *ppos)
743 {
744 	return -ENOSPC;
745 }
746 
747 /*
748  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
749  * can fopen() both devices with "a" now.  This was previously impossible.
750  * -- SRB.
751  */
752 
753 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
754 {
755 	return file->f_pos = 0;
756 }
757 
758 /*
759  * The memory devices use the full 32/64 bits of the offset, and so we cannot
760  * check against negative addresses: they are ok. The return value is weird,
761  * though, in that case (0).
762  *
763  * also note that seeking relative to the "end of file" isn't supported:
764  * it has no meaning, so it returns -EINVAL.
765  */
766 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
767 {
768 	loff_t ret;
769 
770 	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
771 	switch (orig) {
772 		case 0:
773 			file->f_pos = offset;
774 			ret = file->f_pos;
775 			force_successful_syscall_return();
776 			break;
777 		case 1:
778 			file->f_pos += offset;
779 			ret = file->f_pos;
780 			force_successful_syscall_return();
781 			break;
782 		default:
783 			ret = -EINVAL;
784 	}
785 	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
786 	return ret;
787 }
788 
789 static int open_port(struct inode * inode, struct file * filp)
790 {
791 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
792 }
793 
794 #define zero_lseek	null_lseek
795 #define full_lseek      null_lseek
796 #define write_zero	write_null
797 #define read_full       read_zero
798 #define open_mem	open_port
799 #define open_kmem	open_mem
800 #define open_oldmem	open_mem
801 
802 static const struct file_operations mem_fops = {
803 	.llseek		= memory_lseek,
804 	.read		= read_mem,
805 	.write		= write_mem,
806 	.mmap		= mmap_mem,
807 	.open		= open_mem,
808 	.get_unmapped_area = get_unmapped_area_mem,
809 };
810 
811 #ifdef CONFIG_DEVKMEM
812 static const struct file_operations kmem_fops = {
813 	.llseek		= memory_lseek,
814 	.read		= read_kmem,
815 	.write		= write_kmem,
816 	.mmap		= mmap_kmem,
817 	.open		= open_kmem,
818 	.get_unmapped_area = get_unmapped_area_mem,
819 };
820 #endif
821 
822 static const struct file_operations null_fops = {
823 	.llseek		= null_lseek,
824 	.read		= read_null,
825 	.write		= write_null,
826 	.splice_write	= splice_write_null,
827 };
828 
829 #ifdef CONFIG_DEVPORT
830 static const struct file_operations port_fops = {
831 	.llseek		= memory_lseek,
832 	.read		= read_port,
833 	.write		= write_port,
834 	.open		= open_port,
835 };
836 #endif
837 
838 static const struct file_operations zero_fops = {
839 	.llseek		= zero_lseek,
840 	.read		= read_zero,
841 	.write		= write_zero,
842 	.mmap		= mmap_zero,
843 };
844 
845 /*
846  * capabilities for /dev/zero
847  * - permits private mappings, "copies" are taken of the source of zeros
848  */
849 static struct backing_dev_info zero_bdi = {
850 	.capabilities	= BDI_CAP_MAP_COPY,
851 };
852 
853 static const struct file_operations full_fops = {
854 	.llseek		= full_lseek,
855 	.read		= read_full,
856 	.write		= write_full,
857 };
858 
859 #ifdef CONFIG_CRASH_DUMP
860 static const struct file_operations oldmem_fops = {
861 	.read	= read_oldmem,
862 	.open	= open_oldmem,
863 };
864 #endif
865 
866 static ssize_t kmsg_write(struct file * file, const char __user * buf,
867 			  size_t count, loff_t *ppos)
868 {
869 	char *tmp;
870 	ssize_t ret;
871 
872 	tmp = kmalloc(count + 1, GFP_KERNEL);
873 	if (tmp == NULL)
874 		return -ENOMEM;
875 	ret = -EFAULT;
876 	if (!copy_from_user(tmp, buf, count)) {
877 		tmp[count] = 0;
878 		ret = printk("%s", tmp);
879 		if (ret > count)
880 			/* printk can add a prefix */
881 			ret = count;
882 	}
883 	kfree(tmp);
884 	return ret;
885 }
886 
887 static const struct file_operations kmsg_fops = {
888 	.write =	kmsg_write,
889 };
890 
891 static int memory_open(struct inode * inode, struct file * filp)
892 {
893 	int ret = 0;
894 
895 	lock_kernel();
896 	switch (iminor(inode)) {
897 		case 1:
898 			filp->f_op = &mem_fops;
899 			filp->f_mapping->backing_dev_info =
900 				&directly_mappable_cdev_bdi;
901 			break;
902 #ifdef CONFIG_DEVKMEM
903 		case 2:
904 			filp->f_op = &kmem_fops;
905 			filp->f_mapping->backing_dev_info =
906 				&directly_mappable_cdev_bdi;
907 			break;
908 #endif
909 		case 3:
910 			filp->f_op = &null_fops;
911 			break;
912 #ifdef CONFIG_DEVPORT
913 		case 4:
914 			filp->f_op = &port_fops;
915 			break;
916 #endif
917 		case 5:
918 			filp->f_mapping->backing_dev_info = &zero_bdi;
919 			filp->f_op = &zero_fops;
920 			break;
921 		case 7:
922 			filp->f_op = &full_fops;
923 			break;
924 		case 8:
925 			filp->f_op = &random_fops;
926 			break;
927 		case 9:
928 			filp->f_op = &urandom_fops;
929 			break;
930 		case 11:
931 			filp->f_op = &kmsg_fops;
932 			break;
933 #ifdef CONFIG_CRASH_DUMP
934 		case 12:
935 			filp->f_op = &oldmem_fops;
936 			break;
937 #endif
938 		default:
939 			unlock_kernel();
940 			return -ENXIO;
941 	}
942 	if (filp->f_op && filp->f_op->open)
943 		ret = filp->f_op->open(inode,filp);
944 	unlock_kernel();
945 	return ret;
946 }
947 
948 static const struct file_operations memory_fops = {
949 	.open		= memory_open,	/* just a selector for the real open */
950 };
951 
952 static const struct {
953 	unsigned int		minor;
954 	char			*name;
955 	umode_t			mode;
956 	const struct file_operations	*fops;
957 } devlist[] = { /* list of minor devices */
958 	{1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
959 #ifdef CONFIG_DEVKMEM
960 	{2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
961 #endif
962 	{3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
963 #ifdef CONFIG_DEVPORT
964 	{4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
965 #endif
966 	{5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
967 	{7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
968 	{8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
969 	{9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
970 	{11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
971 #ifdef CONFIG_CRASH_DUMP
972 	{12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
973 #endif
974 };
975 
976 static struct class *mem_class;
977 
978 static int __init chr_dev_init(void)
979 {
980 	int i;
981 	int err;
982 
983 	err = bdi_init(&zero_bdi);
984 	if (err)
985 		return err;
986 
987 	if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
988 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
989 
990 	mem_class = class_create(THIS_MODULE, "mem");
991 	for (i = 0; i < ARRAY_SIZE(devlist); i++)
992 		device_create(mem_class, NULL,
993 			      MKDEV(MEM_MAJOR, devlist[i].minor), NULL,
994 			      devlist[i].name);
995 
996 	return 0;
997 }
998 
999 fs_initcall(chr_dev_init);
1000