xref: /openbmc/linux/drivers/char/mem.c (revision 609e478b)
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/splice.h>
26 #include <linux/pfn.h>
27 #include <linux/export.h>
28 #include <linux/io.h>
29 #include <linux/aio.h>
30 
31 #include <asm/uaccess.h>
32 
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36 
37 #define DEVPORT_MINOR	4
38 
39 static inline unsigned long size_inside_page(unsigned long start,
40 					     unsigned long size)
41 {
42 	unsigned long sz;
43 
44 	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
45 
46 	return min(sz, size);
47 }
48 
49 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
50 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
51 {
52 	return addr + count <= __pa(high_memory);
53 }
54 
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
56 {
57 	return 1;
58 }
59 #endif
60 
61 #ifdef CONFIG_STRICT_DEVMEM
62 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
63 {
64 	u64 from = ((u64)pfn) << PAGE_SHIFT;
65 	u64 to = from + size;
66 	u64 cursor = from;
67 
68 	while (cursor < to) {
69 		if (!devmem_is_allowed(pfn)) {
70 			printk(KERN_INFO
71 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
72 				current->comm, from, to);
73 			return 0;
74 		}
75 		cursor += PAGE_SIZE;
76 		pfn++;
77 	}
78 	return 1;
79 }
80 #else
81 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
82 {
83 	return 1;
84 }
85 #endif
86 
87 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
88 {
89 }
90 
91 /*
92  * This funcion reads the *physical* memory. The f_pos points directly to the
93  * memory location.
94  */
95 static ssize_t read_mem(struct file *file, char __user *buf,
96 			size_t count, loff_t *ppos)
97 {
98 	phys_addr_t p = *ppos;
99 	ssize_t read, sz;
100 	char *ptr;
101 
102 	if (p != *ppos)
103 		return 0;
104 
105 	if (!valid_phys_addr_range(p, count))
106 		return -EFAULT;
107 	read = 0;
108 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
109 	/* we don't have page 0 mapped on sparc and m68k.. */
110 	if (p < PAGE_SIZE) {
111 		sz = size_inside_page(p, count);
112 		if (sz > 0) {
113 			if (clear_user(buf, sz))
114 				return -EFAULT;
115 			buf += sz;
116 			p += sz;
117 			count -= sz;
118 			read += sz;
119 		}
120 	}
121 #endif
122 
123 	while (count > 0) {
124 		unsigned long remaining;
125 
126 		sz = size_inside_page(p, count);
127 
128 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
129 			return -EPERM;
130 
131 		/*
132 		 * On ia64 if a page has been mapped somewhere as uncached, then
133 		 * it must also be accessed uncached by the kernel or data
134 		 * corruption may occur.
135 		 */
136 		ptr = xlate_dev_mem_ptr(p);
137 		if (!ptr)
138 			return -EFAULT;
139 
140 		remaining = copy_to_user(buf, ptr, sz);
141 		unxlate_dev_mem_ptr(p, ptr);
142 		if (remaining)
143 			return -EFAULT;
144 
145 		buf += sz;
146 		p += sz;
147 		count -= sz;
148 		read += sz;
149 	}
150 
151 	*ppos += read;
152 	return read;
153 }
154 
155 static ssize_t write_mem(struct file *file, const char __user *buf,
156 			 size_t count, loff_t *ppos)
157 {
158 	phys_addr_t p = *ppos;
159 	ssize_t written, sz;
160 	unsigned long copied;
161 	void *ptr;
162 
163 	if (p != *ppos)
164 		return -EFBIG;
165 
166 	if (!valid_phys_addr_range(p, count))
167 		return -EFAULT;
168 
169 	written = 0;
170 
171 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
172 	/* we don't have page 0 mapped on sparc and m68k.. */
173 	if (p < PAGE_SIZE) {
174 		sz = size_inside_page(p, count);
175 		/* Hmm. Do something? */
176 		buf += sz;
177 		p += sz;
178 		count -= sz;
179 		written += sz;
180 	}
181 #endif
182 
183 	while (count > 0) {
184 		sz = size_inside_page(p, count);
185 
186 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
187 			return -EPERM;
188 
189 		/*
190 		 * On ia64 if a page has been mapped somewhere as uncached, then
191 		 * it must also be accessed uncached by the kernel or data
192 		 * corruption may occur.
193 		 */
194 		ptr = xlate_dev_mem_ptr(p);
195 		if (!ptr) {
196 			if (written)
197 				break;
198 			return -EFAULT;
199 		}
200 
201 		copied = copy_from_user(ptr, buf, sz);
202 		unxlate_dev_mem_ptr(p, ptr);
203 		if (copied) {
204 			written += sz - copied;
205 			if (written)
206 				break;
207 			return -EFAULT;
208 		}
209 
210 		buf += sz;
211 		p += sz;
212 		count -= sz;
213 		written += sz;
214 	}
215 
216 	*ppos += written;
217 	return written;
218 }
219 
220 int __weak phys_mem_access_prot_allowed(struct file *file,
221 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
222 {
223 	return 1;
224 }
225 
226 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
227 
228 /*
229  * Architectures vary in how they handle caching for addresses
230  * outside of main memory.
231  *
232  */
233 #ifdef pgprot_noncached
234 static int uncached_access(struct file *file, phys_addr_t addr)
235 {
236 #if defined(CONFIG_IA64)
237 	/*
238 	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
239 	 * attribute aliases.
240 	 */
241 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
242 #elif defined(CONFIG_MIPS)
243 	{
244 		extern int __uncached_access(struct file *file,
245 					     unsigned long addr);
246 
247 		return __uncached_access(file, addr);
248 	}
249 #else
250 	/*
251 	 * Accessing memory above the top the kernel knows about or through a
252 	 * file pointer
253 	 * that was marked O_DSYNC will be done non-cached.
254 	 */
255 	if (file->f_flags & O_DSYNC)
256 		return 1;
257 	return addr >= __pa(high_memory);
258 #endif
259 }
260 #endif
261 
262 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
263 				     unsigned long size, pgprot_t vma_prot)
264 {
265 #ifdef pgprot_noncached
266 	phys_addr_t offset = pfn << PAGE_SHIFT;
267 
268 	if (uncached_access(file, offset))
269 		return pgprot_noncached(vma_prot);
270 #endif
271 	return vma_prot;
272 }
273 #endif
274 
275 #ifndef CONFIG_MMU
276 static unsigned long get_unmapped_area_mem(struct file *file,
277 					   unsigned long addr,
278 					   unsigned long len,
279 					   unsigned long pgoff,
280 					   unsigned long flags)
281 {
282 	if (!valid_mmap_phys_addr_range(pgoff, len))
283 		return (unsigned long) -EINVAL;
284 	return pgoff << PAGE_SHIFT;
285 }
286 
287 /* can't do an in-place private mapping if there's no MMU */
288 static inline int private_mapping_ok(struct vm_area_struct *vma)
289 {
290 	return vma->vm_flags & VM_MAYSHARE;
291 }
292 #else
293 #define get_unmapped_area_mem	NULL
294 
295 static inline int private_mapping_ok(struct vm_area_struct *vma)
296 {
297 	return 1;
298 }
299 #endif
300 
301 static const struct vm_operations_struct mmap_mem_ops = {
302 #ifdef CONFIG_HAVE_IOREMAP_PROT
303 	.access = generic_access_phys
304 #endif
305 };
306 
307 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
308 {
309 	size_t size = vma->vm_end - vma->vm_start;
310 
311 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
312 		return -EINVAL;
313 
314 	if (!private_mapping_ok(vma))
315 		return -ENOSYS;
316 
317 	if (!range_is_allowed(vma->vm_pgoff, size))
318 		return -EPERM;
319 
320 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
321 						&vma->vm_page_prot))
322 		return -EINVAL;
323 
324 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
325 						 size,
326 						 vma->vm_page_prot);
327 
328 	vma->vm_ops = &mmap_mem_ops;
329 
330 	/* Remap-pfn-range will mark the range VM_IO */
331 	if (remap_pfn_range(vma,
332 			    vma->vm_start,
333 			    vma->vm_pgoff,
334 			    size,
335 			    vma->vm_page_prot)) {
336 		return -EAGAIN;
337 	}
338 	return 0;
339 }
340 
341 #ifdef CONFIG_DEVKMEM
342 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
343 {
344 	unsigned long pfn;
345 
346 	/* Turn a kernel-virtual address into a physical page frame */
347 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
348 
349 	/*
350 	 * RED-PEN: on some architectures there is more mapped memory than
351 	 * available in mem_map which pfn_valid checks for. Perhaps should add a
352 	 * new macro here.
353 	 *
354 	 * RED-PEN: vmalloc is not supported right now.
355 	 */
356 	if (!pfn_valid(pfn))
357 		return -EIO;
358 
359 	vma->vm_pgoff = pfn;
360 	return mmap_mem(file, vma);
361 }
362 #endif
363 
364 #ifdef CONFIG_DEVKMEM
365 /*
366  * This function reads the *virtual* memory as seen by the kernel.
367  */
368 static ssize_t read_kmem(struct file *file, char __user *buf,
369 			 size_t count, loff_t *ppos)
370 {
371 	unsigned long p = *ppos;
372 	ssize_t low_count, read, sz;
373 	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
374 	int err = 0;
375 
376 	read = 0;
377 	if (p < (unsigned long) high_memory) {
378 		low_count = count;
379 		if (count > (unsigned long)high_memory - p)
380 			low_count = (unsigned long)high_memory - p;
381 
382 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
383 		/* we don't have page 0 mapped on sparc and m68k.. */
384 		if (p < PAGE_SIZE && low_count > 0) {
385 			sz = size_inside_page(p, low_count);
386 			if (clear_user(buf, sz))
387 				return -EFAULT;
388 			buf += sz;
389 			p += sz;
390 			read += sz;
391 			low_count -= sz;
392 			count -= sz;
393 		}
394 #endif
395 		while (low_count > 0) {
396 			sz = size_inside_page(p, low_count);
397 
398 			/*
399 			 * On ia64 if a page has been mapped somewhere as
400 			 * uncached, then it must also be accessed uncached
401 			 * by the kernel or data corruption may occur
402 			 */
403 			kbuf = xlate_dev_kmem_ptr((char *)p);
404 
405 			if (copy_to_user(buf, kbuf, sz))
406 				return -EFAULT;
407 			buf += sz;
408 			p += sz;
409 			read += sz;
410 			low_count -= sz;
411 			count -= sz;
412 		}
413 	}
414 
415 	if (count > 0) {
416 		kbuf = (char *)__get_free_page(GFP_KERNEL);
417 		if (!kbuf)
418 			return -ENOMEM;
419 		while (count > 0) {
420 			sz = size_inside_page(p, count);
421 			if (!is_vmalloc_or_module_addr((void *)p)) {
422 				err = -ENXIO;
423 				break;
424 			}
425 			sz = vread(kbuf, (char *)p, sz);
426 			if (!sz)
427 				break;
428 			if (copy_to_user(buf, kbuf, sz)) {
429 				err = -EFAULT;
430 				break;
431 			}
432 			count -= sz;
433 			buf += sz;
434 			read += sz;
435 			p += sz;
436 		}
437 		free_page((unsigned long)kbuf);
438 	}
439 	*ppos = p;
440 	return read ? read : err;
441 }
442 
443 
444 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
445 				size_t count, loff_t *ppos)
446 {
447 	ssize_t written, sz;
448 	unsigned long copied;
449 
450 	written = 0;
451 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
452 	/* we don't have page 0 mapped on sparc and m68k.. */
453 	if (p < PAGE_SIZE) {
454 		sz = size_inside_page(p, count);
455 		/* Hmm. Do something? */
456 		buf += sz;
457 		p += sz;
458 		count -= sz;
459 		written += sz;
460 	}
461 #endif
462 
463 	while (count > 0) {
464 		char *ptr;
465 
466 		sz = size_inside_page(p, count);
467 
468 		/*
469 		 * On ia64 if a page has been mapped somewhere as uncached, then
470 		 * it must also be accessed uncached by the kernel or data
471 		 * corruption may occur.
472 		 */
473 		ptr = xlate_dev_kmem_ptr((char *)p);
474 
475 		copied = copy_from_user(ptr, buf, sz);
476 		if (copied) {
477 			written += sz - copied;
478 			if (written)
479 				break;
480 			return -EFAULT;
481 		}
482 		buf += sz;
483 		p += sz;
484 		count -= sz;
485 		written += sz;
486 	}
487 
488 	*ppos += written;
489 	return written;
490 }
491 
492 /*
493  * This function writes to the *virtual* memory as seen by the kernel.
494  */
495 static ssize_t write_kmem(struct file *file, const char __user *buf,
496 			  size_t count, loff_t *ppos)
497 {
498 	unsigned long p = *ppos;
499 	ssize_t wrote = 0;
500 	ssize_t virtr = 0;
501 	char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
502 	int err = 0;
503 
504 	if (p < (unsigned long) high_memory) {
505 		unsigned long to_write = min_t(unsigned long, count,
506 					       (unsigned long)high_memory - p);
507 		wrote = do_write_kmem(p, buf, to_write, ppos);
508 		if (wrote != to_write)
509 			return wrote;
510 		p += wrote;
511 		buf += wrote;
512 		count -= wrote;
513 	}
514 
515 	if (count > 0) {
516 		kbuf = (char *)__get_free_page(GFP_KERNEL);
517 		if (!kbuf)
518 			return wrote ? wrote : -ENOMEM;
519 		while (count > 0) {
520 			unsigned long sz = size_inside_page(p, count);
521 			unsigned long n;
522 
523 			if (!is_vmalloc_or_module_addr((void *)p)) {
524 				err = -ENXIO;
525 				break;
526 			}
527 			n = copy_from_user(kbuf, buf, sz);
528 			if (n) {
529 				err = -EFAULT;
530 				break;
531 			}
532 			vwrite(kbuf, (char *)p, sz);
533 			count -= sz;
534 			buf += sz;
535 			virtr += sz;
536 			p += sz;
537 		}
538 		free_page((unsigned long)kbuf);
539 	}
540 
541 	*ppos = p;
542 	return virtr + wrote ? : err;
543 }
544 #endif
545 
546 #ifdef CONFIG_DEVPORT
547 static ssize_t read_port(struct file *file, char __user *buf,
548 			 size_t count, loff_t *ppos)
549 {
550 	unsigned long i = *ppos;
551 	char __user *tmp = buf;
552 
553 	if (!access_ok(VERIFY_WRITE, buf, count))
554 		return -EFAULT;
555 	while (count-- > 0 && i < 65536) {
556 		if (__put_user(inb(i), tmp) < 0)
557 			return -EFAULT;
558 		i++;
559 		tmp++;
560 	}
561 	*ppos = i;
562 	return tmp-buf;
563 }
564 
565 static ssize_t write_port(struct file *file, const char __user *buf,
566 			  size_t count, loff_t *ppos)
567 {
568 	unsigned long i = *ppos;
569 	const char __user *tmp = buf;
570 
571 	if (!access_ok(VERIFY_READ, buf, count))
572 		return -EFAULT;
573 	while (count-- > 0 && i < 65536) {
574 		char c;
575 		if (__get_user(c, tmp)) {
576 			if (tmp > buf)
577 				break;
578 			return -EFAULT;
579 		}
580 		outb(c, i);
581 		i++;
582 		tmp++;
583 	}
584 	*ppos = i;
585 	return tmp-buf;
586 }
587 #endif
588 
589 static ssize_t read_null(struct file *file, char __user *buf,
590 			 size_t count, loff_t *ppos)
591 {
592 	return 0;
593 }
594 
595 static ssize_t write_null(struct file *file, const char __user *buf,
596 			  size_t count, loff_t *ppos)
597 {
598 	return count;
599 }
600 
601 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
602 			     unsigned long nr_segs, loff_t pos)
603 {
604 	return 0;
605 }
606 
607 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
608 			      unsigned long nr_segs, loff_t pos)
609 {
610 	return iov_length(iov, nr_segs);
611 }
612 
613 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
614 			struct splice_desc *sd)
615 {
616 	return sd->len;
617 }
618 
619 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
620 				 loff_t *ppos, size_t len, unsigned int flags)
621 {
622 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
623 }
624 
625 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
626 {
627 	size_t written = 0;
628 
629 	while (iov_iter_count(iter)) {
630 		size_t chunk = iov_iter_count(iter), n;
631 		if (chunk > PAGE_SIZE)
632 			chunk = PAGE_SIZE;	/* Just for latency reasons */
633 		n = iov_iter_zero(chunk, iter);
634 		if (!n && iov_iter_count(iter))
635 			return written ? written : -EFAULT;
636 		written += n;
637 		if (signal_pending(current))
638 			return written ? written : -ERESTARTSYS;
639 		cond_resched();
640 	}
641 	return written;
642 }
643 
644 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
645 {
646 #ifndef CONFIG_MMU
647 	return -ENOSYS;
648 #endif
649 	if (vma->vm_flags & VM_SHARED)
650 		return shmem_zero_setup(vma);
651 	return 0;
652 }
653 
654 static ssize_t write_full(struct file *file, const char __user *buf,
655 			  size_t count, loff_t *ppos)
656 {
657 	return -ENOSPC;
658 }
659 
660 /*
661  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
662  * can fopen() both devices with "a" now.  This was previously impossible.
663  * -- SRB.
664  */
665 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
666 {
667 	return file->f_pos = 0;
668 }
669 
670 /*
671  * The memory devices use the full 32/64 bits of the offset, and so we cannot
672  * check against negative addresses: they are ok. The return value is weird,
673  * though, in that case (0).
674  *
675  * also note that seeking relative to the "end of file" isn't supported:
676  * it has no meaning, so it returns -EINVAL.
677  */
678 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
679 {
680 	loff_t ret;
681 
682 	mutex_lock(&file_inode(file)->i_mutex);
683 	switch (orig) {
684 	case SEEK_CUR:
685 		offset += file->f_pos;
686 	case SEEK_SET:
687 		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
688 		if (IS_ERR_VALUE((unsigned long long)offset)) {
689 			ret = -EOVERFLOW;
690 			break;
691 		}
692 		file->f_pos = offset;
693 		ret = file->f_pos;
694 		force_successful_syscall_return();
695 		break;
696 	default:
697 		ret = -EINVAL;
698 	}
699 	mutex_unlock(&file_inode(file)->i_mutex);
700 	return ret;
701 }
702 
703 static int open_port(struct inode *inode, struct file *filp)
704 {
705 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
706 }
707 
708 #define zero_lseek	null_lseek
709 #define full_lseek      null_lseek
710 #define write_zero	write_null
711 #define aio_write_zero	aio_write_null
712 #define open_mem	open_port
713 #define open_kmem	open_mem
714 
715 static const struct file_operations mem_fops = {
716 	.llseek		= memory_lseek,
717 	.read		= read_mem,
718 	.write		= write_mem,
719 	.mmap		= mmap_mem,
720 	.open		= open_mem,
721 	.get_unmapped_area = get_unmapped_area_mem,
722 };
723 
724 #ifdef CONFIG_DEVKMEM
725 static const struct file_operations kmem_fops = {
726 	.llseek		= memory_lseek,
727 	.read		= read_kmem,
728 	.write		= write_kmem,
729 	.mmap		= mmap_kmem,
730 	.open		= open_kmem,
731 	.get_unmapped_area = get_unmapped_area_mem,
732 };
733 #endif
734 
735 static const struct file_operations null_fops = {
736 	.llseek		= null_lseek,
737 	.read		= read_null,
738 	.write		= write_null,
739 	.aio_read	= aio_read_null,
740 	.aio_write	= aio_write_null,
741 	.splice_write	= splice_write_null,
742 };
743 
744 #ifdef CONFIG_DEVPORT
745 static const struct file_operations port_fops = {
746 	.llseek		= memory_lseek,
747 	.read		= read_port,
748 	.write		= write_port,
749 	.open		= open_port,
750 };
751 #endif
752 
753 static const struct file_operations zero_fops = {
754 	.llseek		= zero_lseek,
755 	.read		= new_sync_read,
756 	.write		= write_zero,
757 	.read_iter	= read_iter_zero,
758 	.aio_write	= aio_write_zero,
759 	.mmap		= mmap_zero,
760 };
761 
762 /*
763  * capabilities for /dev/zero
764  * - permits private mappings, "copies" are taken of the source of zeros
765  * - no writeback happens
766  */
767 static struct backing_dev_info zero_bdi = {
768 	.name		= "char/mem",
769 	.capabilities	= BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
770 };
771 
772 static const struct file_operations full_fops = {
773 	.llseek		= full_lseek,
774 	.read		= new_sync_read,
775 	.read_iter	= read_iter_zero,
776 	.write		= write_full,
777 };
778 
779 static const struct memdev {
780 	const char *name;
781 	umode_t mode;
782 	const struct file_operations *fops;
783 	struct backing_dev_info *dev_info;
784 } devlist[] = {
785 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
786 #ifdef CONFIG_DEVKMEM
787 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
788 #endif
789 	 [3] = { "null", 0666, &null_fops, NULL },
790 #ifdef CONFIG_DEVPORT
791 	 [4] = { "port", 0, &port_fops, NULL },
792 #endif
793 	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
794 	 [7] = { "full", 0666, &full_fops, NULL },
795 	 [8] = { "random", 0666, &random_fops, NULL },
796 	 [9] = { "urandom", 0666, &urandom_fops, NULL },
797 #ifdef CONFIG_PRINTK
798 	[11] = { "kmsg", 0644, &kmsg_fops, NULL },
799 #endif
800 };
801 
802 static int memory_open(struct inode *inode, struct file *filp)
803 {
804 	int minor;
805 	const struct memdev *dev;
806 
807 	minor = iminor(inode);
808 	if (minor >= ARRAY_SIZE(devlist))
809 		return -ENXIO;
810 
811 	dev = &devlist[minor];
812 	if (!dev->fops)
813 		return -ENXIO;
814 
815 	filp->f_op = dev->fops;
816 	if (dev->dev_info)
817 		filp->f_mapping->backing_dev_info = dev->dev_info;
818 
819 	/* Is /dev/mem or /dev/kmem ? */
820 	if (dev->dev_info == &directly_mappable_cdev_bdi)
821 		filp->f_mode |= FMODE_UNSIGNED_OFFSET;
822 
823 	if (dev->fops->open)
824 		return dev->fops->open(inode, filp);
825 
826 	return 0;
827 }
828 
829 static const struct file_operations memory_fops = {
830 	.open = memory_open,
831 	.llseek = noop_llseek,
832 };
833 
834 static char *mem_devnode(struct device *dev, umode_t *mode)
835 {
836 	if (mode && devlist[MINOR(dev->devt)].mode)
837 		*mode = devlist[MINOR(dev->devt)].mode;
838 	return NULL;
839 }
840 
841 static struct class *mem_class;
842 
843 static int __init chr_dev_init(void)
844 {
845 	int minor;
846 	int err;
847 
848 	err = bdi_init(&zero_bdi);
849 	if (err)
850 		return err;
851 
852 	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
853 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
854 
855 	mem_class = class_create(THIS_MODULE, "mem");
856 	if (IS_ERR(mem_class))
857 		return PTR_ERR(mem_class);
858 
859 	mem_class->devnode = mem_devnode;
860 	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
861 		if (!devlist[minor].name)
862 			continue;
863 
864 		/*
865 		 * Create /dev/port?
866 		 */
867 		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
868 			continue;
869 
870 		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
871 			      NULL, devlist[minor].name);
872 	}
873 
874 	return tty_init();
875 }
876 
877 fs_initcall(chr_dev_init);
878