xref: /openbmc/linux/drivers/char/mem.c (revision 63dc02bd)
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37 
38 static inline unsigned long size_inside_page(unsigned long start,
39 					     unsigned long size)
40 {
41 	unsigned long sz;
42 
43 	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
44 
45 	return min(sz, size);
46 }
47 
48 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
49 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
50 {
51 	return addr + count <= __pa(high_memory);
52 }
53 
54 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
55 {
56 	return 1;
57 }
58 #endif
59 
60 #ifdef CONFIG_STRICT_DEVMEM
61 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
62 {
63 	u64 from = ((u64)pfn) << PAGE_SHIFT;
64 	u64 to = from + size;
65 	u64 cursor = from;
66 
67 	while (cursor < to) {
68 		if (!devmem_is_allowed(pfn)) {
69 			printk(KERN_INFO
70 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
71 				current->comm, from, to);
72 			return 0;
73 		}
74 		cursor += PAGE_SIZE;
75 		pfn++;
76 	}
77 	return 1;
78 }
79 #else
80 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
81 {
82 	return 1;
83 }
84 #endif
85 
86 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
87 {
88 }
89 
90 /*
91  * This funcion reads the *physical* memory. The f_pos points directly to the
92  * memory location.
93  */
94 static ssize_t read_mem(struct file *file, char __user *buf,
95 			size_t count, loff_t *ppos)
96 {
97 	unsigned long p = *ppos;
98 	ssize_t read, sz;
99 	char *ptr;
100 
101 	if (!valid_phys_addr_range(p, count))
102 		return -EFAULT;
103 	read = 0;
104 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
105 	/* we don't have page 0 mapped on sparc and m68k.. */
106 	if (p < PAGE_SIZE) {
107 		sz = size_inside_page(p, count);
108 		if (sz > 0) {
109 			if (clear_user(buf, sz))
110 				return -EFAULT;
111 			buf += sz;
112 			p += sz;
113 			count -= sz;
114 			read += sz;
115 		}
116 	}
117 #endif
118 
119 	while (count > 0) {
120 		unsigned long remaining;
121 
122 		sz = size_inside_page(p, count);
123 
124 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
125 			return -EPERM;
126 
127 		/*
128 		 * On ia64 if a page has been mapped somewhere as uncached, then
129 		 * it must also be accessed uncached by the kernel or data
130 		 * corruption may occur.
131 		 */
132 		ptr = xlate_dev_mem_ptr(p);
133 		if (!ptr)
134 			return -EFAULT;
135 
136 		remaining = copy_to_user(buf, ptr, sz);
137 		unxlate_dev_mem_ptr(p, ptr);
138 		if (remaining)
139 			return -EFAULT;
140 
141 		buf += sz;
142 		p += sz;
143 		count -= sz;
144 		read += sz;
145 	}
146 
147 	*ppos += read;
148 	return read;
149 }
150 
151 static ssize_t write_mem(struct file *file, const char __user *buf,
152 			 size_t count, loff_t *ppos)
153 {
154 	unsigned long p = *ppos;
155 	ssize_t written, sz;
156 	unsigned long copied;
157 	void *ptr;
158 
159 	if (!valid_phys_addr_range(p, count))
160 		return -EFAULT;
161 
162 	written = 0;
163 
164 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
165 	/* we don't have page 0 mapped on sparc and m68k.. */
166 	if (p < PAGE_SIZE) {
167 		sz = size_inside_page(p, count);
168 		/* Hmm. Do something? */
169 		buf += sz;
170 		p += sz;
171 		count -= sz;
172 		written += sz;
173 	}
174 #endif
175 
176 	while (count > 0) {
177 		sz = size_inside_page(p, count);
178 
179 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
180 			return -EPERM;
181 
182 		/*
183 		 * On ia64 if a page has been mapped somewhere as uncached, then
184 		 * it must also be accessed uncached by the kernel or data
185 		 * corruption may occur.
186 		 */
187 		ptr = xlate_dev_mem_ptr(p);
188 		if (!ptr) {
189 			if (written)
190 				break;
191 			return -EFAULT;
192 		}
193 
194 		copied = copy_from_user(ptr, buf, sz);
195 		unxlate_dev_mem_ptr(p, ptr);
196 		if (copied) {
197 			written += sz - copied;
198 			if (written)
199 				break;
200 			return -EFAULT;
201 		}
202 
203 		buf += sz;
204 		p += sz;
205 		count -= sz;
206 		written += sz;
207 	}
208 
209 	*ppos += written;
210 	return written;
211 }
212 
213 int __weak phys_mem_access_prot_allowed(struct file *file,
214 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
215 {
216 	return 1;
217 }
218 
219 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
220 
221 /*
222  * Architectures vary in how they handle caching for addresses
223  * outside of main memory.
224  *
225  */
226 #ifdef pgprot_noncached
227 static int uncached_access(struct file *file, unsigned long addr)
228 {
229 #if defined(CONFIG_IA64)
230 	/*
231 	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
232 	 * attribute aliases.
233 	 */
234 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
235 #elif defined(CONFIG_MIPS)
236 	{
237 		extern int __uncached_access(struct file *file,
238 					     unsigned long addr);
239 
240 		return __uncached_access(file, addr);
241 	}
242 #else
243 	/*
244 	 * Accessing memory above the top the kernel knows about or through a
245 	 * file pointer
246 	 * that was marked O_DSYNC will be done non-cached.
247 	 */
248 	if (file->f_flags & O_DSYNC)
249 		return 1;
250 	return addr >= __pa(high_memory);
251 #endif
252 }
253 #endif
254 
255 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
256 				     unsigned long size, pgprot_t vma_prot)
257 {
258 #ifdef pgprot_noncached
259 	unsigned long offset = pfn << PAGE_SHIFT;
260 
261 	if (uncached_access(file, offset))
262 		return pgprot_noncached(vma_prot);
263 #endif
264 	return vma_prot;
265 }
266 #endif
267 
268 #ifndef CONFIG_MMU
269 static unsigned long get_unmapped_area_mem(struct file *file,
270 					   unsigned long addr,
271 					   unsigned long len,
272 					   unsigned long pgoff,
273 					   unsigned long flags)
274 {
275 	if (!valid_mmap_phys_addr_range(pgoff, len))
276 		return (unsigned long) -EINVAL;
277 	return pgoff << PAGE_SHIFT;
278 }
279 
280 /* can't do an in-place private mapping if there's no MMU */
281 static inline int private_mapping_ok(struct vm_area_struct *vma)
282 {
283 	return vma->vm_flags & VM_MAYSHARE;
284 }
285 #else
286 #define get_unmapped_area_mem	NULL
287 
288 static inline int private_mapping_ok(struct vm_area_struct *vma)
289 {
290 	return 1;
291 }
292 #endif
293 
294 static const struct vm_operations_struct mmap_mem_ops = {
295 #ifdef CONFIG_HAVE_IOREMAP_PROT
296 	.access = generic_access_phys
297 #endif
298 };
299 
300 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
301 {
302 	size_t size = vma->vm_end - vma->vm_start;
303 
304 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
305 		return -EINVAL;
306 
307 	if (!private_mapping_ok(vma))
308 		return -ENOSYS;
309 
310 	if (!range_is_allowed(vma->vm_pgoff, size))
311 		return -EPERM;
312 
313 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
314 						&vma->vm_page_prot))
315 		return -EINVAL;
316 
317 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
318 						 size,
319 						 vma->vm_page_prot);
320 
321 	vma->vm_ops = &mmap_mem_ops;
322 
323 	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
324 	if (remap_pfn_range(vma,
325 			    vma->vm_start,
326 			    vma->vm_pgoff,
327 			    size,
328 			    vma->vm_page_prot)) {
329 		return -EAGAIN;
330 	}
331 	return 0;
332 }
333 
334 #ifdef CONFIG_DEVKMEM
335 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
336 {
337 	unsigned long pfn;
338 
339 	/* Turn a kernel-virtual address into a physical page frame */
340 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
341 
342 	/*
343 	 * RED-PEN: on some architectures there is more mapped memory than
344 	 * available in mem_map which pfn_valid checks for. Perhaps should add a
345 	 * new macro here.
346 	 *
347 	 * RED-PEN: vmalloc is not supported right now.
348 	 */
349 	if (!pfn_valid(pfn))
350 		return -EIO;
351 
352 	vma->vm_pgoff = pfn;
353 	return mmap_mem(file, vma);
354 }
355 #endif
356 
357 #ifdef CONFIG_CRASH_DUMP
358 /*
359  * Read memory corresponding to the old kernel.
360  */
361 static ssize_t read_oldmem(struct file *file, char __user *buf,
362 				size_t count, loff_t *ppos)
363 {
364 	unsigned long pfn, offset;
365 	size_t read = 0, csize;
366 	int rc = 0;
367 
368 	while (count) {
369 		pfn = *ppos / PAGE_SIZE;
370 		if (pfn > saved_max_pfn)
371 			return read;
372 
373 		offset = (unsigned long)(*ppos % PAGE_SIZE);
374 		if (count > PAGE_SIZE - offset)
375 			csize = PAGE_SIZE - offset;
376 		else
377 			csize = count;
378 
379 		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
380 		if (rc < 0)
381 			return rc;
382 		buf += csize;
383 		*ppos += csize;
384 		read += csize;
385 		count -= csize;
386 	}
387 	return read;
388 }
389 #endif
390 
391 #ifdef CONFIG_DEVKMEM
392 /*
393  * This function reads the *virtual* memory as seen by the kernel.
394  */
395 static ssize_t read_kmem(struct file *file, char __user *buf,
396 			 size_t count, loff_t *ppos)
397 {
398 	unsigned long p = *ppos;
399 	ssize_t low_count, read, sz;
400 	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
401 	int err = 0;
402 
403 	read = 0;
404 	if (p < (unsigned long) high_memory) {
405 		low_count = count;
406 		if (count > (unsigned long)high_memory - p)
407 			low_count = (unsigned long)high_memory - p;
408 
409 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
410 		/* we don't have page 0 mapped on sparc and m68k.. */
411 		if (p < PAGE_SIZE && low_count > 0) {
412 			sz = size_inside_page(p, low_count);
413 			if (clear_user(buf, sz))
414 				return -EFAULT;
415 			buf += sz;
416 			p += sz;
417 			read += sz;
418 			low_count -= sz;
419 			count -= sz;
420 		}
421 #endif
422 		while (low_count > 0) {
423 			sz = size_inside_page(p, low_count);
424 
425 			/*
426 			 * On ia64 if a page has been mapped somewhere as
427 			 * uncached, then it must also be accessed uncached
428 			 * by the kernel or data corruption may occur
429 			 */
430 			kbuf = xlate_dev_kmem_ptr((char *)p);
431 
432 			if (copy_to_user(buf, kbuf, sz))
433 				return -EFAULT;
434 			buf += sz;
435 			p += sz;
436 			read += sz;
437 			low_count -= sz;
438 			count -= sz;
439 		}
440 	}
441 
442 	if (count > 0) {
443 		kbuf = (char *)__get_free_page(GFP_KERNEL);
444 		if (!kbuf)
445 			return -ENOMEM;
446 		while (count > 0) {
447 			sz = size_inside_page(p, count);
448 			if (!is_vmalloc_or_module_addr((void *)p)) {
449 				err = -ENXIO;
450 				break;
451 			}
452 			sz = vread(kbuf, (char *)p, sz);
453 			if (!sz)
454 				break;
455 			if (copy_to_user(buf, kbuf, sz)) {
456 				err = -EFAULT;
457 				break;
458 			}
459 			count -= sz;
460 			buf += sz;
461 			read += sz;
462 			p += sz;
463 		}
464 		free_page((unsigned long)kbuf);
465 	}
466 	*ppos = p;
467 	return read ? read : err;
468 }
469 
470 
471 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
472 				size_t count, loff_t *ppos)
473 {
474 	ssize_t written, sz;
475 	unsigned long copied;
476 
477 	written = 0;
478 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
479 	/* we don't have page 0 mapped on sparc and m68k.. */
480 	if (p < PAGE_SIZE) {
481 		sz = size_inside_page(p, count);
482 		/* Hmm. Do something? */
483 		buf += sz;
484 		p += sz;
485 		count -= sz;
486 		written += sz;
487 	}
488 #endif
489 
490 	while (count > 0) {
491 		char *ptr;
492 
493 		sz = size_inside_page(p, count);
494 
495 		/*
496 		 * On ia64 if a page has been mapped somewhere as uncached, then
497 		 * it must also be accessed uncached by the kernel or data
498 		 * corruption may occur.
499 		 */
500 		ptr = xlate_dev_kmem_ptr((char *)p);
501 
502 		copied = copy_from_user(ptr, buf, sz);
503 		if (copied) {
504 			written += sz - copied;
505 			if (written)
506 				break;
507 			return -EFAULT;
508 		}
509 		buf += sz;
510 		p += sz;
511 		count -= sz;
512 		written += sz;
513 	}
514 
515 	*ppos += written;
516 	return written;
517 }
518 
519 /*
520  * This function writes to the *virtual* memory as seen by the kernel.
521  */
522 static ssize_t write_kmem(struct file *file, const char __user *buf,
523 			  size_t count, loff_t *ppos)
524 {
525 	unsigned long p = *ppos;
526 	ssize_t wrote = 0;
527 	ssize_t virtr = 0;
528 	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
529 	int err = 0;
530 
531 	if (p < (unsigned long) high_memory) {
532 		unsigned long to_write = min_t(unsigned long, count,
533 					       (unsigned long)high_memory - p);
534 		wrote = do_write_kmem(p, buf, to_write, ppos);
535 		if (wrote != to_write)
536 			return wrote;
537 		p += wrote;
538 		buf += wrote;
539 		count -= wrote;
540 	}
541 
542 	if (count > 0) {
543 		kbuf = (char *)__get_free_page(GFP_KERNEL);
544 		if (!kbuf)
545 			return wrote ? wrote : -ENOMEM;
546 		while (count > 0) {
547 			unsigned long sz = size_inside_page(p, count);
548 			unsigned long n;
549 
550 			if (!is_vmalloc_or_module_addr((void *)p)) {
551 				err = -ENXIO;
552 				break;
553 			}
554 			n = copy_from_user(kbuf, buf, sz);
555 			if (n) {
556 				err = -EFAULT;
557 				break;
558 			}
559 			vwrite(kbuf, (char *)p, sz);
560 			count -= sz;
561 			buf += sz;
562 			virtr += sz;
563 			p += sz;
564 		}
565 		free_page((unsigned long)kbuf);
566 	}
567 
568 	*ppos = p;
569 	return virtr + wrote ? : err;
570 }
571 #endif
572 
573 #ifdef CONFIG_DEVPORT
574 static ssize_t read_port(struct file *file, char __user *buf,
575 			 size_t count, loff_t *ppos)
576 {
577 	unsigned long i = *ppos;
578 	char __user *tmp = buf;
579 
580 	if (!access_ok(VERIFY_WRITE, buf, count))
581 		return -EFAULT;
582 	while (count-- > 0 && i < 65536) {
583 		if (__put_user(inb(i), tmp) < 0)
584 			return -EFAULT;
585 		i++;
586 		tmp++;
587 	}
588 	*ppos = i;
589 	return tmp-buf;
590 }
591 
592 static ssize_t write_port(struct file *file, const char __user *buf,
593 			  size_t count, loff_t *ppos)
594 {
595 	unsigned long i = *ppos;
596 	const char __user * tmp = buf;
597 
598 	if (!access_ok(VERIFY_READ, buf, count))
599 		return -EFAULT;
600 	while (count-- > 0 && i < 65536) {
601 		char c;
602 		if (__get_user(c, tmp)) {
603 			if (tmp > buf)
604 				break;
605 			return -EFAULT;
606 		}
607 		outb(c, i);
608 		i++;
609 		tmp++;
610 	}
611 	*ppos = i;
612 	return tmp-buf;
613 }
614 #endif
615 
616 static ssize_t read_null(struct file *file, char __user *buf,
617 			 size_t count, loff_t *ppos)
618 {
619 	return 0;
620 }
621 
622 static ssize_t write_null(struct file *file, const char __user *buf,
623 			  size_t count, loff_t *ppos)
624 {
625 	return count;
626 }
627 
628 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
629 			struct splice_desc *sd)
630 {
631 	return sd->len;
632 }
633 
634 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
635 				 loff_t *ppos, size_t len, unsigned int flags)
636 {
637 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
638 }
639 
640 static ssize_t read_zero(struct file *file, char __user *buf,
641 			 size_t count, loff_t *ppos)
642 {
643 	size_t written;
644 
645 	if (!count)
646 		return 0;
647 
648 	if (!access_ok(VERIFY_WRITE, buf, count))
649 		return -EFAULT;
650 
651 	written = 0;
652 	while (count) {
653 		unsigned long unwritten;
654 		size_t chunk = count;
655 
656 		if (chunk > PAGE_SIZE)
657 			chunk = PAGE_SIZE;	/* Just for latency reasons */
658 		unwritten = __clear_user(buf, chunk);
659 		written += chunk - unwritten;
660 		if (unwritten)
661 			break;
662 		if (signal_pending(current))
663 			return written ? written : -ERESTARTSYS;
664 		buf += chunk;
665 		count -= chunk;
666 		cond_resched();
667 	}
668 	return written ? written : -EFAULT;
669 }
670 
671 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
672 {
673 #ifndef CONFIG_MMU
674 	return -ENOSYS;
675 #endif
676 	if (vma->vm_flags & VM_SHARED)
677 		return shmem_zero_setup(vma);
678 	return 0;
679 }
680 
681 static ssize_t write_full(struct file *file, const char __user *buf,
682 			  size_t count, loff_t *ppos)
683 {
684 	return -ENOSPC;
685 }
686 
687 /*
688  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
689  * can fopen() both devices with "a" now.  This was previously impossible.
690  * -- SRB.
691  */
692 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
693 {
694 	return file->f_pos = 0;
695 }
696 
697 /*
698  * The memory devices use the full 32/64 bits of the offset, and so we cannot
699  * check against negative addresses: they are ok. The return value is weird,
700  * though, in that case (0).
701  *
702  * also note that seeking relative to the "end of file" isn't supported:
703  * it has no meaning, so it returns -EINVAL.
704  */
705 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
706 {
707 	loff_t ret;
708 
709 	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
710 	switch (orig) {
711 	case SEEK_CUR:
712 		offset += file->f_pos;
713 	case SEEK_SET:
714 		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
715 		if ((unsigned long long)offset >= ~0xFFFULL) {
716 			ret = -EOVERFLOW;
717 			break;
718 		}
719 		file->f_pos = offset;
720 		ret = file->f_pos;
721 		force_successful_syscall_return();
722 		break;
723 	default:
724 		ret = -EINVAL;
725 	}
726 	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
727 	return ret;
728 }
729 
730 static int open_port(struct inode * inode, struct file * filp)
731 {
732 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
733 }
734 
735 #define zero_lseek	null_lseek
736 #define full_lseek      null_lseek
737 #define write_zero	write_null
738 #define read_full       read_zero
739 #define open_mem	open_port
740 #define open_kmem	open_mem
741 #define open_oldmem	open_mem
742 
743 static const struct file_operations mem_fops = {
744 	.llseek		= memory_lseek,
745 	.read		= read_mem,
746 	.write		= write_mem,
747 	.mmap		= mmap_mem,
748 	.open		= open_mem,
749 	.get_unmapped_area = get_unmapped_area_mem,
750 };
751 
752 #ifdef CONFIG_DEVKMEM
753 static const struct file_operations kmem_fops = {
754 	.llseek		= memory_lseek,
755 	.read		= read_kmem,
756 	.write		= write_kmem,
757 	.mmap		= mmap_kmem,
758 	.open		= open_kmem,
759 	.get_unmapped_area = get_unmapped_area_mem,
760 };
761 #endif
762 
763 static const struct file_operations null_fops = {
764 	.llseek		= null_lseek,
765 	.read		= read_null,
766 	.write		= write_null,
767 	.splice_write	= splice_write_null,
768 };
769 
770 #ifdef CONFIG_DEVPORT
771 static const struct file_operations port_fops = {
772 	.llseek		= memory_lseek,
773 	.read		= read_port,
774 	.write		= write_port,
775 	.open		= open_port,
776 };
777 #endif
778 
779 static const struct file_operations zero_fops = {
780 	.llseek		= zero_lseek,
781 	.read		= read_zero,
782 	.write		= write_zero,
783 	.mmap		= mmap_zero,
784 };
785 
786 /*
787  * capabilities for /dev/zero
788  * - permits private mappings, "copies" are taken of the source of zeros
789  * - no writeback happens
790  */
791 static struct backing_dev_info zero_bdi = {
792 	.name		= "char/mem",
793 	.capabilities	= BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
794 };
795 
796 static const struct file_operations full_fops = {
797 	.llseek		= full_lseek,
798 	.read		= read_full,
799 	.write		= write_full,
800 };
801 
802 #ifdef CONFIG_CRASH_DUMP
803 static const struct file_operations oldmem_fops = {
804 	.read	= read_oldmem,
805 	.open	= open_oldmem,
806 	.llseek = default_llseek,
807 };
808 #endif
809 
810 static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
811 			   unsigned long count, loff_t pos)
812 {
813 	char *line, *p;
814 	int i;
815 	ssize_t ret = -EFAULT;
816 	size_t len = iov_length(iv, count);
817 
818 	line = kmalloc(len + 1, GFP_KERNEL);
819 	if (line == NULL)
820 		return -ENOMEM;
821 
822 	/*
823 	 * copy all vectors into a single string, to ensure we do
824 	 * not interleave our log line with other printk calls
825 	 */
826 	p = line;
827 	for (i = 0; i < count; i++) {
828 		if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len))
829 			goto out;
830 		p += iv[i].iov_len;
831 	}
832 	p[0] = '\0';
833 
834 	ret = printk("%s", line);
835 	/* printk can add a prefix */
836 	if (ret > len)
837 		ret = len;
838 out:
839 	kfree(line);
840 	return ret;
841 }
842 
843 static const struct file_operations kmsg_fops = {
844 	.aio_write = kmsg_writev,
845 	.llseek = noop_llseek,
846 };
847 
848 static const struct memdev {
849 	const char *name;
850 	umode_t mode;
851 	const struct file_operations *fops;
852 	struct backing_dev_info *dev_info;
853 } devlist[] = {
854 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
855 #ifdef CONFIG_DEVKMEM
856 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
857 #endif
858 	 [3] = { "null", 0666, &null_fops, NULL },
859 #ifdef CONFIG_DEVPORT
860 	 [4] = { "port", 0, &port_fops, NULL },
861 #endif
862 	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
863 	 [7] = { "full", 0666, &full_fops, NULL },
864 	 [8] = { "random", 0666, &random_fops, NULL },
865 	 [9] = { "urandom", 0666, &urandom_fops, NULL },
866 	[11] = { "kmsg", 0, &kmsg_fops, NULL },
867 #ifdef CONFIG_CRASH_DUMP
868 	[12] = { "oldmem", 0, &oldmem_fops, NULL },
869 #endif
870 };
871 
872 static int memory_open(struct inode *inode, struct file *filp)
873 {
874 	int minor;
875 	const struct memdev *dev;
876 
877 	minor = iminor(inode);
878 	if (minor >= ARRAY_SIZE(devlist))
879 		return -ENXIO;
880 
881 	dev = &devlist[minor];
882 	if (!dev->fops)
883 		return -ENXIO;
884 
885 	filp->f_op = dev->fops;
886 	if (dev->dev_info)
887 		filp->f_mapping->backing_dev_info = dev->dev_info;
888 
889 	/* Is /dev/mem or /dev/kmem ? */
890 	if (dev->dev_info == &directly_mappable_cdev_bdi)
891 		filp->f_mode |= FMODE_UNSIGNED_OFFSET;
892 
893 	if (dev->fops->open)
894 		return dev->fops->open(inode, filp);
895 
896 	return 0;
897 }
898 
899 static const struct file_operations memory_fops = {
900 	.open = memory_open,
901 	.llseek = noop_llseek,
902 };
903 
904 static char *mem_devnode(struct device *dev, umode_t *mode)
905 {
906 	if (mode && devlist[MINOR(dev->devt)].mode)
907 		*mode = devlist[MINOR(dev->devt)].mode;
908 	return NULL;
909 }
910 
911 static struct class *mem_class;
912 
913 static int __init chr_dev_init(void)
914 {
915 	int minor;
916 	int err;
917 
918 	err = bdi_init(&zero_bdi);
919 	if (err)
920 		return err;
921 
922 	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
923 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
924 
925 	mem_class = class_create(THIS_MODULE, "mem");
926 	if (IS_ERR(mem_class))
927 		return PTR_ERR(mem_class);
928 
929 	mem_class->devnode = mem_devnode;
930 	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
931 		if (!devlist[minor].name)
932 			continue;
933 		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
934 			      NULL, devlist[minor].name);
935 	}
936 
937 	return tty_init();
938 }
939 
940 fs_initcall(chr_dev_init);
941