xref: /openbmc/linux/drivers/char/mem.c (revision 22246614)
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36 
37 /*
38  * Architectures vary in how they handle caching for addresses
39  * outside of main memory.
40  *
41  */
42 static inline int uncached_access(struct file *file, unsigned long addr)
43 {
44 #if defined(CONFIG_IA64)
45 	/*
46 	 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
47 	 */
48 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
49 #elif defined(CONFIG_MIPS)
50 	{
51 		extern int __uncached_access(struct file *file,
52 					     unsigned long addr);
53 
54 		return __uncached_access(file, addr);
55 	}
56 #else
57 	/*
58 	 * Accessing memory above the top the kernel knows about or through a file pointer
59 	 * that was marked O_SYNC will be done non-cached.
60 	 */
61 	if (file->f_flags & O_SYNC)
62 		return 1;
63 	return addr >= __pa(high_memory);
64 #endif
65 }
66 
67 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
68 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
69 {
70 	if (addr + count > __pa(high_memory))
71 		return 0;
72 
73 	return 1;
74 }
75 
76 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
77 {
78 	return 1;
79 }
80 #endif
81 
82 #ifdef CONFIG_NONPROMISC_DEVMEM
83 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84 {
85 	u64 from = ((u64)pfn) << PAGE_SHIFT;
86 	u64 to = from + size;
87 	u64 cursor = from;
88 
89 	while (cursor < to) {
90 		if (!devmem_is_allowed(pfn)) {
91 			printk(KERN_INFO
92 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
93 				current->comm, from, to);
94 			return 0;
95 		}
96 		cursor += PAGE_SIZE;
97 		pfn++;
98 	}
99 	return 1;
100 }
101 #else
102 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
103 {
104 	return 1;
105 }
106 #endif
107 
108 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
109 {
110 }
111 
112 /*
113  * This funcion reads the *physical* memory. The f_pos points directly to the
114  * memory location.
115  */
116 static ssize_t read_mem(struct file * file, char __user * buf,
117 			size_t count, loff_t *ppos)
118 {
119 	unsigned long p = *ppos;
120 	ssize_t read, sz;
121 	char *ptr;
122 
123 	if (!valid_phys_addr_range(p, count))
124 		return -EFAULT;
125 	read = 0;
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 	/* we don't have page 0 mapped on sparc and m68k.. */
128 	if (p < PAGE_SIZE) {
129 		sz = PAGE_SIZE - p;
130 		if (sz > count)
131 			sz = count;
132 		if (sz > 0) {
133 			if (clear_user(buf, sz))
134 				return -EFAULT;
135 			buf += sz;
136 			p += sz;
137 			count -= sz;
138 			read += sz;
139 		}
140 	}
141 #endif
142 
143 	while (count > 0) {
144 		/*
145 		 * Handle first page in case it's not aligned
146 		 */
147 		if (-p & (PAGE_SIZE - 1))
148 			sz = -p & (PAGE_SIZE - 1);
149 		else
150 			sz = PAGE_SIZE;
151 
152 		sz = min_t(unsigned long, sz, count);
153 
154 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
155 			return -EPERM;
156 
157 		/*
158 		 * On ia64 if a page has been mapped somewhere as
159 		 * uncached, then it must also be accessed uncached
160 		 * by the kernel or data corruption may occur
161 		 */
162 		ptr = xlate_dev_mem_ptr(p);
163 		if (!ptr)
164 			return -EFAULT;
165 
166 		if (copy_to_user(buf, ptr, sz)) {
167 			unxlate_dev_mem_ptr(p, ptr);
168 			return -EFAULT;
169 		}
170 
171 		unxlate_dev_mem_ptr(p, ptr);
172 
173 		buf += sz;
174 		p += sz;
175 		count -= sz;
176 		read += sz;
177 	}
178 
179 	*ppos += read;
180 	return read;
181 }
182 
183 static ssize_t write_mem(struct file * file, const char __user * buf,
184 			 size_t count, loff_t *ppos)
185 {
186 	unsigned long p = *ppos;
187 	ssize_t written, sz;
188 	unsigned long copied;
189 	void *ptr;
190 
191 	if (!valid_phys_addr_range(p, count))
192 		return -EFAULT;
193 
194 	written = 0;
195 
196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 	/* we don't have page 0 mapped on sparc and m68k.. */
198 	if (p < PAGE_SIZE) {
199 		unsigned long sz = PAGE_SIZE - p;
200 		if (sz > count)
201 			sz = count;
202 		/* Hmm. Do something? */
203 		buf += sz;
204 		p += sz;
205 		count -= sz;
206 		written += sz;
207 	}
208 #endif
209 
210 	while (count > 0) {
211 		/*
212 		 * Handle first page in case it's not aligned
213 		 */
214 		if (-p & (PAGE_SIZE - 1))
215 			sz = -p & (PAGE_SIZE - 1);
216 		else
217 			sz = PAGE_SIZE;
218 
219 		sz = min_t(unsigned long, sz, count);
220 
221 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222 			return -EPERM;
223 
224 		/*
225 		 * On ia64 if a page has been mapped somewhere as
226 		 * uncached, then it must also be accessed uncached
227 		 * by the kernel or data corruption may occur
228 		 */
229 		ptr = xlate_dev_mem_ptr(p);
230 		if (!ptr) {
231 			if (written)
232 				break;
233 			return -EFAULT;
234 		}
235 
236 		copied = copy_from_user(ptr, buf, sz);
237 		if (copied) {
238 			written += sz - copied;
239 			unxlate_dev_mem_ptr(p, ptr);
240 			if (written)
241 				break;
242 			return -EFAULT;
243 		}
244 
245 		unxlate_dev_mem_ptr(p, ptr);
246 
247 		buf += sz;
248 		p += sz;
249 		count -= sz;
250 		written += sz;
251 	}
252 
253 	*ppos += written;
254 	return written;
255 }
256 
257 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
259 {
260 	return 1;
261 }
262 
263 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
264 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265 				     unsigned long size, pgprot_t vma_prot)
266 {
267 #ifdef pgprot_noncached
268 	unsigned long offset = pfn << PAGE_SHIFT;
269 
270 	if (uncached_access(file, offset))
271 		return pgprot_noncached(vma_prot);
272 #endif
273 	return vma_prot;
274 }
275 #endif
276 
277 #ifndef CONFIG_MMU
278 static unsigned long get_unmapped_area_mem(struct file *file,
279 					   unsigned long addr,
280 					   unsigned long len,
281 					   unsigned long pgoff,
282 					   unsigned long flags)
283 {
284 	if (!valid_mmap_phys_addr_range(pgoff, len))
285 		return (unsigned long) -EINVAL;
286 	return pgoff << PAGE_SHIFT;
287 }
288 
289 /* can't do an in-place private mapping if there's no MMU */
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
291 {
292 	return vma->vm_flags & VM_MAYSHARE;
293 }
294 #else
295 #define get_unmapped_area_mem	NULL
296 
297 static inline int private_mapping_ok(struct vm_area_struct *vma)
298 {
299 	return 1;
300 }
301 #endif
302 
303 void __attribute__((weak))
304 map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
305 {
306 	/* nothing. architectures can override. */
307 }
308 
309 void __attribute__((weak))
310 unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
311 {
312 	/* nothing. architectures can override. */
313 }
314 
315 static void mmap_mem_open(struct vm_area_struct *vma)
316 {
317 	map_devmem(vma->vm_pgoff,  vma->vm_end - vma->vm_start,
318 			vma->vm_page_prot);
319 }
320 
321 static void mmap_mem_close(struct vm_area_struct *vma)
322 {
323 	unmap_devmem(vma->vm_pgoff,  vma->vm_end - vma->vm_start,
324 			vma->vm_page_prot);
325 }
326 
327 static struct vm_operations_struct mmap_mem_ops = {
328 	.open  = mmap_mem_open,
329 	.close = mmap_mem_close
330 };
331 
332 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
333 {
334 	size_t size = vma->vm_end - vma->vm_start;
335 
336 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
337 		return -EINVAL;
338 
339 	if (!private_mapping_ok(vma))
340 		return -ENOSYS;
341 
342 	if (!range_is_allowed(vma->vm_pgoff, size))
343 		return -EPERM;
344 
345 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
346 						&vma->vm_page_prot))
347 		return -EINVAL;
348 
349 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
350 						 size,
351 						 vma->vm_page_prot);
352 
353 	vma->vm_ops = &mmap_mem_ops;
354 
355 	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
356 	if (remap_pfn_range(vma,
357 			    vma->vm_start,
358 			    vma->vm_pgoff,
359 			    size,
360 			    vma->vm_page_prot)) {
361 		unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
362 		return -EAGAIN;
363 	}
364 	return 0;
365 }
366 
367 #ifdef CONFIG_DEVKMEM
368 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
369 {
370 	unsigned long pfn;
371 
372 	/* Turn a kernel-virtual address into a physical page frame */
373 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
374 
375 	/*
376 	 * RED-PEN: on some architectures there is more mapped memory
377 	 * than available in mem_map which pfn_valid checks
378 	 * for. Perhaps should add a new macro here.
379 	 *
380 	 * RED-PEN: vmalloc is not supported right now.
381 	 */
382 	if (!pfn_valid(pfn))
383 		return -EIO;
384 
385 	vma->vm_pgoff = pfn;
386 	return mmap_mem(file, vma);
387 }
388 #endif
389 
390 #ifdef CONFIG_CRASH_DUMP
391 /*
392  * Read memory corresponding to the old kernel.
393  */
394 static ssize_t read_oldmem(struct file *file, char __user *buf,
395 				size_t count, loff_t *ppos)
396 {
397 	unsigned long pfn, offset;
398 	size_t read = 0, csize;
399 	int rc = 0;
400 
401 	while (count) {
402 		pfn = *ppos / PAGE_SIZE;
403 		if (pfn > saved_max_pfn)
404 			return read;
405 
406 		offset = (unsigned long)(*ppos % PAGE_SIZE);
407 		if (count > PAGE_SIZE - offset)
408 			csize = PAGE_SIZE - offset;
409 		else
410 			csize = count;
411 
412 		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
413 		if (rc < 0)
414 			return rc;
415 		buf += csize;
416 		*ppos += csize;
417 		read += csize;
418 		count -= csize;
419 	}
420 	return read;
421 }
422 #endif
423 
424 extern long vread(char *buf, char *addr, unsigned long count);
425 extern long vwrite(char *buf, char *addr, unsigned long count);
426 
427 #ifdef CONFIG_DEVKMEM
428 /*
429  * This function reads the *virtual* memory as seen by the kernel.
430  */
431 static ssize_t read_kmem(struct file *file, char __user *buf,
432 			 size_t count, loff_t *ppos)
433 {
434 	unsigned long p = *ppos;
435 	ssize_t low_count, read, sz;
436 	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
437 
438 	read = 0;
439 	if (p < (unsigned long) high_memory) {
440 		low_count = count;
441 		if (count > (unsigned long) high_memory - p)
442 			low_count = (unsigned long) high_memory - p;
443 
444 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
445 		/* we don't have page 0 mapped on sparc and m68k.. */
446 		if (p < PAGE_SIZE && low_count > 0) {
447 			size_t tmp = PAGE_SIZE - p;
448 			if (tmp > low_count) tmp = low_count;
449 			if (clear_user(buf, tmp))
450 				return -EFAULT;
451 			buf += tmp;
452 			p += tmp;
453 			read += tmp;
454 			low_count -= tmp;
455 			count -= tmp;
456 		}
457 #endif
458 		while (low_count > 0) {
459 			/*
460 			 * Handle first page in case it's not aligned
461 			 */
462 			if (-p & (PAGE_SIZE - 1))
463 				sz = -p & (PAGE_SIZE - 1);
464 			else
465 				sz = PAGE_SIZE;
466 
467 			sz = min_t(unsigned long, sz, low_count);
468 
469 			/*
470 			 * On ia64 if a page has been mapped somewhere as
471 			 * uncached, then it must also be accessed uncached
472 			 * by the kernel or data corruption may occur
473 			 */
474 			kbuf = xlate_dev_kmem_ptr((char *)p);
475 
476 			if (copy_to_user(buf, kbuf, sz))
477 				return -EFAULT;
478 			buf += sz;
479 			p += sz;
480 			read += sz;
481 			low_count -= sz;
482 			count -= sz;
483 		}
484 	}
485 
486 	if (count > 0) {
487 		kbuf = (char *)__get_free_page(GFP_KERNEL);
488 		if (!kbuf)
489 			return -ENOMEM;
490 		while (count > 0) {
491 			int len = count;
492 
493 			if (len > PAGE_SIZE)
494 				len = PAGE_SIZE;
495 			len = vread(kbuf, (char *)p, len);
496 			if (!len)
497 				break;
498 			if (copy_to_user(buf, kbuf, len)) {
499 				free_page((unsigned long)kbuf);
500 				return -EFAULT;
501 			}
502 			count -= len;
503 			buf += len;
504 			read += len;
505 			p += len;
506 		}
507 		free_page((unsigned long)kbuf);
508 	}
509  	*ppos = p;
510  	return read;
511 }
512 
513 
514 static inline ssize_t
515 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
516 	      size_t count, loff_t *ppos)
517 {
518 	ssize_t written, sz;
519 	unsigned long copied;
520 
521 	written = 0;
522 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
523 	/* we don't have page 0 mapped on sparc and m68k.. */
524 	if (realp < PAGE_SIZE) {
525 		unsigned long sz = PAGE_SIZE - realp;
526 		if (sz > count)
527 			sz = count;
528 		/* Hmm. Do something? */
529 		buf += sz;
530 		p += sz;
531 		realp += sz;
532 		count -= sz;
533 		written += sz;
534 	}
535 #endif
536 
537 	while (count > 0) {
538 		char *ptr;
539 		/*
540 		 * Handle first page in case it's not aligned
541 		 */
542 		if (-realp & (PAGE_SIZE - 1))
543 			sz = -realp & (PAGE_SIZE - 1);
544 		else
545 			sz = PAGE_SIZE;
546 
547 		sz = min_t(unsigned long, sz, count);
548 
549 		/*
550 		 * On ia64 if a page has been mapped somewhere as
551 		 * uncached, then it must also be accessed uncached
552 		 * by the kernel or data corruption may occur
553 		 */
554 		ptr = xlate_dev_kmem_ptr(p);
555 
556 		copied = copy_from_user(ptr, buf, sz);
557 		if (copied) {
558 			written += sz - copied;
559 			if (written)
560 				break;
561 			return -EFAULT;
562 		}
563 		buf += sz;
564 		p += sz;
565 		realp += sz;
566 		count -= sz;
567 		written += sz;
568 	}
569 
570 	*ppos += written;
571 	return written;
572 }
573 
574 
575 /*
576  * This function writes to the *virtual* memory as seen by the kernel.
577  */
578 static ssize_t write_kmem(struct file * file, const char __user * buf,
579 			  size_t count, loff_t *ppos)
580 {
581 	unsigned long p = *ppos;
582 	ssize_t wrote = 0;
583 	ssize_t virtr = 0;
584 	ssize_t written;
585 	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
586 
587 	if (p < (unsigned long) high_memory) {
588 
589 		wrote = count;
590 		if (count > (unsigned long) high_memory - p)
591 			wrote = (unsigned long) high_memory - p;
592 
593 		written = do_write_kmem((void*)p, p, buf, wrote, ppos);
594 		if (written != wrote)
595 			return written;
596 		wrote = written;
597 		p += wrote;
598 		buf += wrote;
599 		count -= wrote;
600 	}
601 
602 	if (count > 0) {
603 		kbuf = (char *)__get_free_page(GFP_KERNEL);
604 		if (!kbuf)
605 			return wrote ? wrote : -ENOMEM;
606 		while (count > 0) {
607 			int len = count;
608 
609 			if (len > PAGE_SIZE)
610 				len = PAGE_SIZE;
611 			if (len) {
612 				written = copy_from_user(kbuf, buf, len);
613 				if (written) {
614 					if (wrote + virtr)
615 						break;
616 					free_page((unsigned long)kbuf);
617 					return -EFAULT;
618 				}
619 			}
620 			len = vwrite(kbuf, (char *)p, len);
621 			count -= len;
622 			buf += len;
623 			virtr += len;
624 			p += len;
625 		}
626 		free_page((unsigned long)kbuf);
627 	}
628 
629  	*ppos = p;
630  	return virtr + wrote;
631 }
632 #endif
633 
634 #ifdef CONFIG_DEVPORT
635 static ssize_t read_port(struct file * file, char __user * buf,
636 			 size_t count, loff_t *ppos)
637 {
638 	unsigned long i = *ppos;
639 	char __user *tmp = buf;
640 
641 	if (!access_ok(VERIFY_WRITE, buf, count))
642 		return -EFAULT;
643 	while (count-- > 0 && i < 65536) {
644 		if (__put_user(inb(i),tmp) < 0)
645 			return -EFAULT;
646 		i++;
647 		tmp++;
648 	}
649 	*ppos = i;
650 	return tmp-buf;
651 }
652 
653 static ssize_t write_port(struct file * file, const char __user * buf,
654 			  size_t count, loff_t *ppos)
655 {
656 	unsigned long i = *ppos;
657 	const char __user * tmp = buf;
658 
659 	if (!access_ok(VERIFY_READ,buf,count))
660 		return -EFAULT;
661 	while (count-- > 0 && i < 65536) {
662 		char c;
663 		if (__get_user(c, tmp)) {
664 			if (tmp > buf)
665 				break;
666 			return -EFAULT;
667 		}
668 		outb(c,i);
669 		i++;
670 		tmp++;
671 	}
672 	*ppos = i;
673 	return tmp-buf;
674 }
675 #endif
676 
677 static ssize_t read_null(struct file * file, char __user * buf,
678 			 size_t count, loff_t *ppos)
679 {
680 	return 0;
681 }
682 
683 static ssize_t write_null(struct file * file, const char __user * buf,
684 			  size_t count, loff_t *ppos)
685 {
686 	return count;
687 }
688 
689 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
690 			struct splice_desc *sd)
691 {
692 	return sd->len;
693 }
694 
695 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
696 				 loff_t *ppos, size_t len, unsigned int flags)
697 {
698 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
699 }
700 
701 static ssize_t read_zero(struct file * file, char __user * buf,
702 			 size_t count, loff_t *ppos)
703 {
704 	size_t written;
705 
706 	if (!count)
707 		return 0;
708 
709 	if (!access_ok(VERIFY_WRITE, buf, count))
710 		return -EFAULT;
711 
712 	written = 0;
713 	while (count) {
714 		unsigned long unwritten;
715 		size_t chunk = count;
716 
717 		if (chunk > PAGE_SIZE)
718 			chunk = PAGE_SIZE;	/* Just for latency reasons */
719 		unwritten = clear_user(buf, chunk);
720 		written += chunk - unwritten;
721 		if (unwritten)
722 			break;
723 		buf += chunk;
724 		count -= chunk;
725 		cond_resched();
726 	}
727 	return written ? written : -EFAULT;
728 }
729 
730 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
731 {
732 #ifndef CONFIG_MMU
733 	return -ENOSYS;
734 #endif
735 	if (vma->vm_flags & VM_SHARED)
736 		return shmem_zero_setup(vma);
737 	return 0;
738 }
739 
740 static ssize_t write_full(struct file * file, const char __user * buf,
741 			  size_t count, loff_t *ppos)
742 {
743 	return -ENOSPC;
744 }
745 
746 /*
747  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
748  * can fopen() both devices with "a" now.  This was previously impossible.
749  * -- SRB.
750  */
751 
752 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
753 {
754 	return file->f_pos = 0;
755 }
756 
757 /*
758  * The memory devices use the full 32/64 bits of the offset, and so we cannot
759  * check against negative addresses: they are ok. The return value is weird,
760  * though, in that case (0).
761  *
762  * also note that seeking relative to the "end of file" isn't supported:
763  * it has no meaning, so it returns -EINVAL.
764  */
765 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
766 {
767 	loff_t ret;
768 
769 	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
770 	switch (orig) {
771 		case 0:
772 			file->f_pos = offset;
773 			ret = file->f_pos;
774 			force_successful_syscall_return();
775 			break;
776 		case 1:
777 			file->f_pos += offset;
778 			ret = file->f_pos;
779 			force_successful_syscall_return();
780 			break;
781 		default:
782 			ret = -EINVAL;
783 	}
784 	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
785 	return ret;
786 }
787 
788 static int open_port(struct inode * inode, struct file * filp)
789 {
790 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
791 }
792 
793 #define zero_lseek	null_lseek
794 #define full_lseek      null_lseek
795 #define write_zero	write_null
796 #define read_full       read_zero
797 #define open_mem	open_port
798 #define open_kmem	open_mem
799 #define open_oldmem	open_mem
800 
801 static const struct file_operations mem_fops = {
802 	.llseek		= memory_lseek,
803 	.read		= read_mem,
804 	.write		= write_mem,
805 	.mmap		= mmap_mem,
806 	.open		= open_mem,
807 	.get_unmapped_area = get_unmapped_area_mem,
808 };
809 
810 #ifdef CONFIG_DEVKMEM
811 static const struct file_operations kmem_fops = {
812 	.llseek		= memory_lseek,
813 	.read		= read_kmem,
814 	.write		= write_kmem,
815 	.mmap		= mmap_kmem,
816 	.open		= open_kmem,
817 	.get_unmapped_area = get_unmapped_area_mem,
818 };
819 #endif
820 
821 static const struct file_operations null_fops = {
822 	.llseek		= null_lseek,
823 	.read		= read_null,
824 	.write		= write_null,
825 	.splice_write	= splice_write_null,
826 };
827 
828 #ifdef CONFIG_DEVPORT
829 static const struct file_operations port_fops = {
830 	.llseek		= memory_lseek,
831 	.read		= read_port,
832 	.write		= write_port,
833 	.open		= open_port,
834 };
835 #endif
836 
837 static const struct file_operations zero_fops = {
838 	.llseek		= zero_lseek,
839 	.read		= read_zero,
840 	.write		= write_zero,
841 	.mmap		= mmap_zero,
842 };
843 
844 /*
845  * capabilities for /dev/zero
846  * - permits private mappings, "copies" are taken of the source of zeros
847  */
848 static struct backing_dev_info zero_bdi = {
849 	.capabilities	= BDI_CAP_MAP_COPY,
850 };
851 
852 static const struct file_operations full_fops = {
853 	.llseek		= full_lseek,
854 	.read		= read_full,
855 	.write		= write_full,
856 };
857 
858 #ifdef CONFIG_CRASH_DUMP
859 static const struct file_operations oldmem_fops = {
860 	.read	= read_oldmem,
861 	.open	= open_oldmem,
862 };
863 #endif
864 
865 static ssize_t kmsg_write(struct file * file, const char __user * buf,
866 			  size_t count, loff_t *ppos)
867 {
868 	char *tmp;
869 	ssize_t ret;
870 
871 	tmp = kmalloc(count + 1, GFP_KERNEL);
872 	if (tmp == NULL)
873 		return -ENOMEM;
874 	ret = -EFAULT;
875 	if (!copy_from_user(tmp, buf, count)) {
876 		tmp[count] = 0;
877 		ret = printk("%s", tmp);
878 		if (ret > count)
879 			/* printk can add a prefix */
880 			ret = count;
881 	}
882 	kfree(tmp);
883 	return ret;
884 }
885 
886 static const struct file_operations kmsg_fops = {
887 	.write =	kmsg_write,
888 };
889 
890 static int memory_open(struct inode * inode, struct file * filp)
891 {
892 	switch (iminor(inode)) {
893 		case 1:
894 			filp->f_op = &mem_fops;
895 			filp->f_mapping->backing_dev_info =
896 				&directly_mappable_cdev_bdi;
897 			break;
898 #ifdef CONFIG_DEVKMEM
899 		case 2:
900 			filp->f_op = &kmem_fops;
901 			filp->f_mapping->backing_dev_info =
902 				&directly_mappable_cdev_bdi;
903 			break;
904 #endif
905 		case 3:
906 			filp->f_op = &null_fops;
907 			break;
908 #ifdef CONFIG_DEVPORT
909 		case 4:
910 			filp->f_op = &port_fops;
911 			break;
912 #endif
913 		case 5:
914 			filp->f_mapping->backing_dev_info = &zero_bdi;
915 			filp->f_op = &zero_fops;
916 			break;
917 		case 7:
918 			filp->f_op = &full_fops;
919 			break;
920 		case 8:
921 			filp->f_op = &random_fops;
922 			break;
923 		case 9:
924 			filp->f_op = &urandom_fops;
925 			break;
926 		case 11:
927 			filp->f_op = &kmsg_fops;
928 			break;
929 #ifdef CONFIG_CRASH_DUMP
930 		case 12:
931 			filp->f_op = &oldmem_fops;
932 			break;
933 #endif
934 		default:
935 			return -ENXIO;
936 	}
937 	if (filp->f_op && filp->f_op->open)
938 		return filp->f_op->open(inode,filp);
939 	return 0;
940 }
941 
942 static const struct file_operations memory_fops = {
943 	.open		= memory_open,	/* just a selector for the real open */
944 };
945 
946 static const struct {
947 	unsigned int		minor;
948 	char			*name;
949 	umode_t			mode;
950 	const struct file_operations	*fops;
951 } devlist[] = { /* list of minor devices */
952 	{1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
953 #ifdef CONFIG_DEVKMEM
954 	{2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
955 #endif
956 	{3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
957 #ifdef CONFIG_DEVPORT
958 	{4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
959 #endif
960 	{5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
961 	{7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
962 	{8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
963 	{9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
964 	{11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
965 #ifdef CONFIG_CRASH_DUMP
966 	{12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
967 #endif
968 };
969 
970 static struct class *mem_class;
971 
972 static int __init chr_dev_init(void)
973 {
974 	int i;
975 	int err;
976 
977 	err = bdi_init(&zero_bdi);
978 	if (err)
979 		return err;
980 
981 	if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
982 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
983 
984 	mem_class = class_create(THIS_MODULE, "mem");
985 	for (i = 0; i < ARRAY_SIZE(devlist); i++)
986 		device_create(mem_class, NULL,
987 			      MKDEV(MEM_MAJOR, devlist[i].minor),
988 			      devlist[i].name);
989 
990 	return 0;
991 }
992 
993 fs_initcall(chr_dev_init);
994