xref: /openbmc/linux/drivers/char/mem.c (revision 4949009e)
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/splice.h>
26 #include <linux/pfn.h>
27 #include <linux/export.h>
28 #include <linux/io.h>
29 #include <linux/aio.h>
30 
31 #include <asm/uaccess.h>
32 
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36 
37 #define DEVPORT_MINOR	4
38 
39 static inline unsigned long size_inside_page(unsigned long start,
40 					     unsigned long size)
41 {
42 	unsigned long sz;
43 
44 	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
45 
46 	return min(sz, size);
47 }
48 
49 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
50 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
51 {
52 	return addr + count <= __pa(high_memory);
53 }
54 
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
56 {
57 	return 1;
58 }
59 #endif
60 
61 #ifdef CONFIG_STRICT_DEVMEM
62 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
63 {
64 	u64 from = ((u64)pfn) << PAGE_SHIFT;
65 	u64 to = from + size;
66 	u64 cursor = from;
67 
68 	while (cursor < to) {
69 		if (!devmem_is_allowed(pfn)) {
70 			printk(KERN_INFO
71 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
72 				current->comm, from, to);
73 			return 0;
74 		}
75 		cursor += PAGE_SIZE;
76 		pfn++;
77 	}
78 	return 1;
79 }
80 #else
81 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
82 {
83 	return 1;
84 }
85 #endif
86 
87 #ifndef unxlate_dev_mem_ptr
88 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
89 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
90 {
91 }
92 #endif
93 
94 /*
95  * This funcion reads the *physical* memory. The f_pos points directly to the
96  * memory location.
97  */
98 static ssize_t read_mem(struct file *file, char __user *buf,
99 			size_t count, loff_t *ppos)
100 {
101 	phys_addr_t p = *ppos;
102 	ssize_t read, sz;
103 	void *ptr;
104 
105 	if (p != *ppos)
106 		return 0;
107 
108 	if (!valid_phys_addr_range(p, count))
109 		return -EFAULT;
110 	read = 0;
111 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
112 	/* we don't have page 0 mapped on sparc and m68k.. */
113 	if (p < PAGE_SIZE) {
114 		sz = size_inside_page(p, count);
115 		if (sz > 0) {
116 			if (clear_user(buf, sz))
117 				return -EFAULT;
118 			buf += sz;
119 			p += sz;
120 			count -= sz;
121 			read += sz;
122 		}
123 	}
124 #endif
125 
126 	while (count > 0) {
127 		unsigned long remaining;
128 
129 		sz = size_inside_page(p, count);
130 
131 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
132 			return -EPERM;
133 
134 		/*
135 		 * On ia64 if a page has been mapped somewhere as uncached, then
136 		 * it must also be accessed uncached by the kernel or data
137 		 * corruption may occur.
138 		 */
139 		ptr = xlate_dev_mem_ptr(p);
140 		if (!ptr)
141 			return -EFAULT;
142 
143 		remaining = copy_to_user(buf, ptr, sz);
144 		unxlate_dev_mem_ptr(p, ptr);
145 		if (remaining)
146 			return -EFAULT;
147 
148 		buf += sz;
149 		p += sz;
150 		count -= sz;
151 		read += sz;
152 	}
153 
154 	*ppos += read;
155 	return read;
156 }
157 
158 static ssize_t write_mem(struct file *file, const char __user *buf,
159 			 size_t count, loff_t *ppos)
160 {
161 	phys_addr_t p = *ppos;
162 	ssize_t written, sz;
163 	unsigned long copied;
164 	void *ptr;
165 
166 	if (p != *ppos)
167 		return -EFBIG;
168 
169 	if (!valid_phys_addr_range(p, count))
170 		return -EFAULT;
171 
172 	written = 0;
173 
174 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
175 	/* we don't have page 0 mapped on sparc and m68k.. */
176 	if (p < PAGE_SIZE) {
177 		sz = size_inside_page(p, count);
178 		/* Hmm. Do something? */
179 		buf += sz;
180 		p += sz;
181 		count -= sz;
182 		written += sz;
183 	}
184 #endif
185 
186 	while (count > 0) {
187 		sz = size_inside_page(p, count);
188 
189 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
190 			return -EPERM;
191 
192 		/*
193 		 * On ia64 if a page has been mapped somewhere as uncached, then
194 		 * it must also be accessed uncached by the kernel or data
195 		 * corruption may occur.
196 		 */
197 		ptr = xlate_dev_mem_ptr(p);
198 		if (!ptr) {
199 			if (written)
200 				break;
201 			return -EFAULT;
202 		}
203 
204 		copied = copy_from_user(ptr, buf, sz);
205 		unxlate_dev_mem_ptr(p, ptr);
206 		if (copied) {
207 			written += sz - copied;
208 			if (written)
209 				break;
210 			return -EFAULT;
211 		}
212 
213 		buf += sz;
214 		p += sz;
215 		count -= sz;
216 		written += sz;
217 	}
218 
219 	*ppos += written;
220 	return written;
221 }
222 
223 int __weak phys_mem_access_prot_allowed(struct file *file,
224 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
225 {
226 	return 1;
227 }
228 
229 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
230 
231 /*
232  * Architectures vary in how they handle caching for addresses
233  * outside of main memory.
234  *
235  */
236 #ifdef pgprot_noncached
237 static int uncached_access(struct file *file, phys_addr_t addr)
238 {
239 #if defined(CONFIG_IA64)
240 	/*
241 	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
242 	 * attribute aliases.
243 	 */
244 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
245 #elif defined(CONFIG_MIPS)
246 	{
247 		extern int __uncached_access(struct file *file,
248 					     unsigned long addr);
249 
250 		return __uncached_access(file, addr);
251 	}
252 #else
253 	/*
254 	 * Accessing memory above the top the kernel knows about or through a
255 	 * file pointer
256 	 * that was marked O_DSYNC will be done non-cached.
257 	 */
258 	if (file->f_flags & O_DSYNC)
259 		return 1;
260 	return addr >= __pa(high_memory);
261 #endif
262 }
263 #endif
264 
265 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
266 				     unsigned long size, pgprot_t vma_prot)
267 {
268 #ifdef pgprot_noncached
269 	phys_addr_t offset = pfn << PAGE_SHIFT;
270 
271 	if (uncached_access(file, offset))
272 		return pgprot_noncached(vma_prot);
273 #endif
274 	return vma_prot;
275 }
276 #endif
277 
278 #ifndef CONFIG_MMU
279 static unsigned long get_unmapped_area_mem(struct file *file,
280 					   unsigned long addr,
281 					   unsigned long len,
282 					   unsigned long pgoff,
283 					   unsigned long flags)
284 {
285 	if (!valid_mmap_phys_addr_range(pgoff, len))
286 		return (unsigned long) -EINVAL;
287 	return pgoff << PAGE_SHIFT;
288 }
289 
290 /* can't do an in-place private mapping if there's no MMU */
291 static inline int private_mapping_ok(struct vm_area_struct *vma)
292 {
293 	return vma->vm_flags & VM_MAYSHARE;
294 }
295 #else
296 #define get_unmapped_area_mem	NULL
297 
298 static inline int private_mapping_ok(struct vm_area_struct *vma)
299 {
300 	return 1;
301 }
302 #endif
303 
304 static const struct vm_operations_struct mmap_mem_ops = {
305 #ifdef CONFIG_HAVE_IOREMAP_PROT
306 	.access = generic_access_phys
307 #endif
308 };
309 
310 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
311 {
312 	size_t size = vma->vm_end - vma->vm_start;
313 
314 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
315 		return -EINVAL;
316 
317 	if (!private_mapping_ok(vma))
318 		return -ENOSYS;
319 
320 	if (!range_is_allowed(vma->vm_pgoff, size))
321 		return -EPERM;
322 
323 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
324 						&vma->vm_page_prot))
325 		return -EINVAL;
326 
327 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
328 						 size,
329 						 vma->vm_page_prot);
330 
331 	vma->vm_ops = &mmap_mem_ops;
332 
333 	/* Remap-pfn-range will mark the range VM_IO */
334 	if (remap_pfn_range(vma,
335 			    vma->vm_start,
336 			    vma->vm_pgoff,
337 			    size,
338 			    vma->vm_page_prot)) {
339 		return -EAGAIN;
340 	}
341 	return 0;
342 }
343 
344 #ifdef CONFIG_DEVKMEM
345 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
346 {
347 	unsigned long pfn;
348 
349 	/* Turn a kernel-virtual address into a physical page frame */
350 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
351 
352 	/*
353 	 * RED-PEN: on some architectures there is more mapped memory than
354 	 * available in mem_map which pfn_valid checks for. Perhaps should add a
355 	 * new macro here.
356 	 *
357 	 * RED-PEN: vmalloc is not supported right now.
358 	 */
359 	if (!pfn_valid(pfn))
360 		return -EIO;
361 
362 	vma->vm_pgoff = pfn;
363 	return mmap_mem(file, vma);
364 }
365 #endif
366 
367 #ifdef CONFIG_DEVKMEM
368 /*
369  * This function reads the *virtual* memory as seen by the kernel.
370  */
371 static ssize_t read_kmem(struct file *file, char __user *buf,
372 			 size_t count, loff_t *ppos)
373 {
374 	unsigned long p = *ppos;
375 	ssize_t low_count, read, sz;
376 	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
377 	int err = 0;
378 
379 	read = 0;
380 	if (p < (unsigned long) high_memory) {
381 		low_count = count;
382 		if (count > (unsigned long)high_memory - p)
383 			low_count = (unsigned long)high_memory - p;
384 
385 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
386 		/* we don't have page 0 mapped on sparc and m68k.. */
387 		if (p < PAGE_SIZE && low_count > 0) {
388 			sz = size_inside_page(p, low_count);
389 			if (clear_user(buf, sz))
390 				return -EFAULT;
391 			buf += sz;
392 			p += sz;
393 			read += sz;
394 			low_count -= sz;
395 			count -= sz;
396 		}
397 #endif
398 		while (low_count > 0) {
399 			sz = size_inside_page(p, low_count);
400 
401 			/*
402 			 * On ia64 if a page has been mapped somewhere as
403 			 * uncached, then it must also be accessed uncached
404 			 * by the kernel or data corruption may occur
405 			 */
406 			kbuf = xlate_dev_kmem_ptr((void *)p);
407 
408 			if (copy_to_user(buf, kbuf, sz))
409 				return -EFAULT;
410 			buf += sz;
411 			p += sz;
412 			read += sz;
413 			low_count -= sz;
414 			count -= sz;
415 		}
416 	}
417 
418 	if (count > 0) {
419 		kbuf = (char *)__get_free_page(GFP_KERNEL);
420 		if (!kbuf)
421 			return -ENOMEM;
422 		while (count > 0) {
423 			sz = size_inside_page(p, count);
424 			if (!is_vmalloc_or_module_addr((void *)p)) {
425 				err = -ENXIO;
426 				break;
427 			}
428 			sz = vread(kbuf, (char *)p, sz);
429 			if (!sz)
430 				break;
431 			if (copy_to_user(buf, kbuf, sz)) {
432 				err = -EFAULT;
433 				break;
434 			}
435 			count -= sz;
436 			buf += sz;
437 			read += sz;
438 			p += sz;
439 		}
440 		free_page((unsigned long)kbuf);
441 	}
442 	*ppos = p;
443 	return read ? read : err;
444 }
445 
446 
447 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
448 				size_t count, loff_t *ppos)
449 {
450 	ssize_t written, sz;
451 	unsigned long copied;
452 
453 	written = 0;
454 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
455 	/* we don't have page 0 mapped on sparc and m68k.. */
456 	if (p < PAGE_SIZE) {
457 		sz = size_inside_page(p, count);
458 		/* Hmm. Do something? */
459 		buf += sz;
460 		p += sz;
461 		count -= sz;
462 		written += sz;
463 	}
464 #endif
465 
466 	while (count > 0) {
467 		void *ptr;
468 
469 		sz = size_inside_page(p, count);
470 
471 		/*
472 		 * On ia64 if a page has been mapped somewhere as uncached, then
473 		 * it must also be accessed uncached by the kernel or data
474 		 * corruption may occur.
475 		 */
476 		ptr = xlate_dev_kmem_ptr((void *)p);
477 
478 		copied = copy_from_user(ptr, buf, sz);
479 		if (copied) {
480 			written += sz - copied;
481 			if (written)
482 				break;
483 			return -EFAULT;
484 		}
485 		buf += sz;
486 		p += sz;
487 		count -= sz;
488 		written += sz;
489 	}
490 
491 	*ppos += written;
492 	return written;
493 }
494 
495 /*
496  * This function writes to the *virtual* memory as seen by the kernel.
497  */
498 static ssize_t write_kmem(struct file *file, const char __user *buf,
499 			  size_t count, loff_t *ppos)
500 {
501 	unsigned long p = *ppos;
502 	ssize_t wrote = 0;
503 	ssize_t virtr = 0;
504 	char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
505 	int err = 0;
506 
507 	if (p < (unsigned long) high_memory) {
508 		unsigned long to_write = min_t(unsigned long, count,
509 					       (unsigned long)high_memory - p);
510 		wrote = do_write_kmem(p, buf, to_write, ppos);
511 		if (wrote != to_write)
512 			return wrote;
513 		p += wrote;
514 		buf += wrote;
515 		count -= wrote;
516 	}
517 
518 	if (count > 0) {
519 		kbuf = (char *)__get_free_page(GFP_KERNEL);
520 		if (!kbuf)
521 			return wrote ? wrote : -ENOMEM;
522 		while (count > 0) {
523 			unsigned long sz = size_inside_page(p, count);
524 			unsigned long n;
525 
526 			if (!is_vmalloc_or_module_addr((void *)p)) {
527 				err = -ENXIO;
528 				break;
529 			}
530 			n = copy_from_user(kbuf, buf, sz);
531 			if (n) {
532 				err = -EFAULT;
533 				break;
534 			}
535 			vwrite(kbuf, (char *)p, sz);
536 			count -= sz;
537 			buf += sz;
538 			virtr += sz;
539 			p += sz;
540 		}
541 		free_page((unsigned long)kbuf);
542 	}
543 
544 	*ppos = p;
545 	return virtr + wrote ? : err;
546 }
547 #endif
548 
549 #ifdef CONFIG_DEVPORT
550 static ssize_t read_port(struct file *file, char __user *buf,
551 			 size_t count, loff_t *ppos)
552 {
553 	unsigned long i = *ppos;
554 	char __user *tmp = buf;
555 
556 	if (!access_ok(VERIFY_WRITE, buf, count))
557 		return -EFAULT;
558 	while (count-- > 0 && i < 65536) {
559 		if (__put_user(inb(i), tmp) < 0)
560 			return -EFAULT;
561 		i++;
562 		tmp++;
563 	}
564 	*ppos = i;
565 	return tmp-buf;
566 }
567 
568 static ssize_t write_port(struct file *file, const char __user *buf,
569 			  size_t count, loff_t *ppos)
570 {
571 	unsigned long i = *ppos;
572 	const char __user *tmp = buf;
573 
574 	if (!access_ok(VERIFY_READ, buf, count))
575 		return -EFAULT;
576 	while (count-- > 0 && i < 65536) {
577 		char c;
578 		if (__get_user(c, tmp)) {
579 			if (tmp > buf)
580 				break;
581 			return -EFAULT;
582 		}
583 		outb(c, i);
584 		i++;
585 		tmp++;
586 	}
587 	*ppos = i;
588 	return tmp-buf;
589 }
590 #endif
591 
592 static ssize_t read_null(struct file *file, char __user *buf,
593 			 size_t count, loff_t *ppos)
594 {
595 	return 0;
596 }
597 
598 static ssize_t write_null(struct file *file, const char __user *buf,
599 			  size_t count, loff_t *ppos)
600 {
601 	return count;
602 }
603 
604 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
605 			     unsigned long nr_segs, loff_t pos)
606 {
607 	return 0;
608 }
609 
610 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
611 			      unsigned long nr_segs, loff_t pos)
612 {
613 	return iov_length(iov, nr_segs);
614 }
615 
616 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
617 			struct splice_desc *sd)
618 {
619 	return sd->len;
620 }
621 
622 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
623 				 loff_t *ppos, size_t len, unsigned int flags)
624 {
625 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
626 }
627 
628 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
629 {
630 	size_t written = 0;
631 
632 	while (iov_iter_count(iter)) {
633 		size_t chunk = iov_iter_count(iter), n;
634 		if (chunk > PAGE_SIZE)
635 			chunk = PAGE_SIZE;	/* Just for latency reasons */
636 		n = iov_iter_zero(chunk, iter);
637 		if (!n && iov_iter_count(iter))
638 			return written ? written : -EFAULT;
639 		written += n;
640 		if (signal_pending(current))
641 			return written ? written : -ERESTARTSYS;
642 		cond_resched();
643 	}
644 	return written;
645 }
646 
647 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
648 {
649 #ifndef CONFIG_MMU
650 	return -ENOSYS;
651 #endif
652 	if (vma->vm_flags & VM_SHARED)
653 		return shmem_zero_setup(vma);
654 	return 0;
655 }
656 
657 static ssize_t write_full(struct file *file, const char __user *buf,
658 			  size_t count, loff_t *ppos)
659 {
660 	return -ENOSPC;
661 }
662 
663 /*
664  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
665  * can fopen() both devices with "a" now.  This was previously impossible.
666  * -- SRB.
667  */
668 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
669 {
670 	return file->f_pos = 0;
671 }
672 
673 /*
674  * The memory devices use the full 32/64 bits of the offset, and so we cannot
675  * check against negative addresses: they are ok. The return value is weird,
676  * though, in that case (0).
677  *
678  * also note that seeking relative to the "end of file" isn't supported:
679  * it has no meaning, so it returns -EINVAL.
680  */
681 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
682 {
683 	loff_t ret;
684 
685 	mutex_lock(&file_inode(file)->i_mutex);
686 	switch (orig) {
687 	case SEEK_CUR:
688 		offset += file->f_pos;
689 	case SEEK_SET:
690 		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
691 		if (IS_ERR_VALUE((unsigned long long)offset)) {
692 			ret = -EOVERFLOW;
693 			break;
694 		}
695 		file->f_pos = offset;
696 		ret = file->f_pos;
697 		force_successful_syscall_return();
698 		break;
699 	default:
700 		ret = -EINVAL;
701 	}
702 	mutex_unlock(&file_inode(file)->i_mutex);
703 	return ret;
704 }
705 
706 static int open_port(struct inode *inode, struct file *filp)
707 {
708 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
709 }
710 
711 #define zero_lseek	null_lseek
712 #define full_lseek      null_lseek
713 #define write_zero	write_null
714 #define aio_write_zero	aio_write_null
715 #define open_mem	open_port
716 #define open_kmem	open_mem
717 
718 static const struct file_operations mem_fops = {
719 	.llseek		= memory_lseek,
720 	.read		= read_mem,
721 	.write		= write_mem,
722 	.mmap		= mmap_mem,
723 	.open		= open_mem,
724 	.get_unmapped_area = get_unmapped_area_mem,
725 };
726 
727 #ifdef CONFIG_DEVKMEM
728 static const struct file_operations kmem_fops = {
729 	.llseek		= memory_lseek,
730 	.read		= read_kmem,
731 	.write		= write_kmem,
732 	.mmap		= mmap_kmem,
733 	.open		= open_kmem,
734 	.get_unmapped_area = get_unmapped_area_mem,
735 };
736 #endif
737 
738 static const struct file_operations null_fops = {
739 	.llseek		= null_lseek,
740 	.read		= read_null,
741 	.write		= write_null,
742 	.aio_read	= aio_read_null,
743 	.aio_write	= aio_write_null,
744 	.splice_write	= splice_write_null,
745 };
746 
747 #ifdef CONFIG_DEVPORT
748 static const struct file_operations port_fops = {
749 	.llseek		= memory_lseek,
750 	.read		= read_port,
751 	.write		= write_port,
752 	.open		= open_port,
753 };
754 #endif
755 
756 static const struct file_operations zero_fops = {
757 	.llseek		= zero_lseek,
758 	.read		= new_sync_read,
759 	.write		= write_zero,
760 	.read_iter	= read_iter_zero,
761 	.aio_write	= aio_write_zero,
762 	.mmap		= mmap_zero,
763 };
764 
765 /*
766  * capabilities for /dev/zero
767  * - permits private mappings, "copies" are taken of the source of zeros
768  * - no writeback happens
769  */
770 static struct backing_dev_info zero_bdi = {
771 	.name		= "char/mem",
772 	.capabilities	= BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
773 };
774 
775 static const struct file_operations full_fops = {
776 	.llseek		= full_lseek,
777 	.read		= new_sync_read,
778 	.read_iter	= read_iter_zero,
779 	.write		= write_full,
780 };
781 
782 static const struct memdev {
783 	const char *name;
784 	umode_t mode;
785 	const struct file_operations *fops;
786 	struct backing_dev_info *dev_info;
787 } devlist[] = {
788 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
789 #ifdef CONFIG_DEVKMEM
790 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
791 #endif
792 	 [3] = { "null", 0666, &null_fops, NULL },
793 #ifdef CONFIG_DEVPORT
794 	 [4] = { "port", 0, &port_fops, NULL },
795 #endif
796 	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
797 	 [7] = { "full", 0666, &full_fops, NULL },
798 	 [8] = { "random", 0666, &random_fops, NULL },
799 	 [9] = { "urandom", 0666, &urandom_fops, NULL },
800 #ifdef CONFIG_PRINTK
801 	[11] = { "kmsg", 0644, &kmsg_fops, NULL },
802 #endif
803 };
804 
805 static int memory_open(struct inode *inode, struct file *filp)
806 {
807 	int minor;
808 	const struct memdev *dev;
809 
810 	minor = iminor(inode);
811 	if (minor >= ARRAY_SIZE(devlist))
812 		return -ENXIO;
813 
814 	dev = &devlist[minor];
815 	if (!dev->fops)
816 		return -ENXIO;
817 
818 	filp->f_op = dev->fops;
819 	if (dev->dev_info)
820 		filp->f_mapping->backing_dev_info = dev->dev_info;
821 
822 	/* Is /dev/mem or /dev/kmem ? */
823 	if (dev->dev_info == &directly_mappable_cdev_bdi)
824 		filp->f_mode |= FMODE_UNSIGNED_OFFSET;
825 
826 	if (dev->fops->open)
827 		return dev->fops->open(inode, filp);
828 
829 	return 0;
830 }
831 
832 static const struct file_operations memory_fops = {
833 	.open = memory_open,
834 	.llseek = noop_llseek,
835 };
836 
837 static char *mem_devnode(struct device *dev, umode_t *mode)
838 {
839 	if (mode && devlist[MINOR(dev->devt)].mode)
840 		*mode = devlist[MINOR(dev->devt)].mode;
841 	return NULL;
842 }
843 
844 static struct class *mem_class;
845 
846 static int __init chr_dev_init(void)
847 {
848 	int minor;
849 	int err;
850 
851 	err = bdi_init(&zero_bdi);
852 	if (err)
853 		return err;
854 
855 	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
856 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
857 
858 	mem_class = class_create(THIS_MODULE, "mem");
859 	if (IS_ERR(mem_class))
860 		return PTR_ERR(mem_class);
861 
862 	mem_class->devnode = mem_devnode;
863 	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
864 		if (!devlist[minor].name)
865 			continue;
866 
867 		/*
868 		 * Create /dev/port?
869 		 */
870 		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
871 			continue;
872 
873 		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
874 			      NULL, devlist[minor].name);
875 	}
876 
877 	return tty_init();
878 }
879 
880 fs_initcall(chr_dev_init);
881