xref: /openbmc/linux/mm/nommu.c (revision 34b4e4aa3c470ce8fa2bd78abb1741b4b58baad7)
1 /*
2  *  linux/mm/nommu.c
3  *
4  *  Replacement code for mm functions to support CPU's that don't
5  *  have any form of memory management unit (thus no virtual memory).
6  *
7  *  See Documentation/nommu-mmap.txt
8  *
9  *  Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
10  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/file.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ptrace.h>
24 #include <linux/blkdev.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mount.h>
27 #include <linux/personality.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 
31 #include <asm/uaccess.h>
32 #include <asm/tlb.h>
33 #include <asm/tlbflush.h>
34 
35 void *high_memory;
36 struct page *mem_map;
37 unsigned long max_mapnr;
38 unsigned long num_physpages;
39 unsigned long askedalloc, realalloc;
40 atomic_t vm_committed_space = ATOMIC_INIT(0);
41 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
42 int sysctl_overcommit_ratio = 50; /* default is 50% */
43 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
44 int heap_stack_gap = 0;
45 
46 EXPORT_SYMBOL(mem_map);
47 EXPORT_SYMBOL(__vm_enough_memory);
48 EXPORT_SYMBOL(num_physpages);
49 
50 /* list of shareable VMAs */
51 struct rb_root nommu_vma_tree = RB_ROOT;
52 DECLARE_RWSEM(nommu_vma_sem);
53 
54 struct vm_operations_struct generic_file_vm_ops = {
55 };
56 
57 /*
58  * Handle all mappings that got truncated by a "truncate()"
59  * system call.
60  *
61  * NOTE! We have to be ready to update the memory sharing
62  * between the file and the memory map for a potential last
63  * incomplete page.  Ugly, but necessary.
64  */
65 int vmtruncate(struct inode *inode, loff_t offset)
66 {
67 	struct address_space *mapping = inode->i_mapping;
68 	unsigned long limit;
69 
70 	if (inode->i_size < offset)
71 		goto do_expand;
72 	i_size_write(inode, offset);
73 
74 	truncate_inode_pages(mapping, offset);
75 	goto out_truncate;
76 
77 do_expand:
78 	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
79 	if (limit != RLIM_INFINITY && offset > limit)
80 		goto out_sig;
81 	if (offset > inode->i_sb->s_maxbytes)
82 		goto out;
83 	i_size_write(inode, offset);
84 
85 out_truncate:
86 	if (inode->i_op && inode->i_op->truncate)
87 		inode->i_op->truncate(inode);
88 	return 0;
89 out_sig:
90 	send_sig(SIGXFSZ, current, 0);
91 out:
92 	return -EFBIG;
93 }
94 
95 EXPORT_SYMBOL(vmtruncate);
96 
97 /*
98  * Return the total memory allocated for this pointer, not
99  * just what the caller asked for.
100  *
101  * Doesn't have to be accurate, i.e. may have races.
102  */
103 unsigned int kobjsize(const void *objp)
104 {
105 	struct page *page;
106 
107 	if (!objp || !((page = virt_to_page(objp))))
108 		return 0;
109 
110 	if (PageSlab(page))
111 		return ksize(objp);
112 
113 	BUG_ON(page->index < 0);
114 	BUG_ON(page->index >= MAX_ORDER);
115 
116 	return (PAGE_SIZE << page->index);
117 }
118 
119 /*
120  * get a list of pages in an address range belonging to the specified process
121  * and indicate the VMA that covers each page
122  * - this is potentially dodgy as we may end incrementing the page count of a
123  *   slab page or a secondary page from a compound page
124  * - don't permit access to VMAs that don't support it, such as I/O mappings
125  */
126 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
127 	unsigned long start, int len, int write, int force,
128 	struct page **pages, struct vm_area_struct **vmas)
129 {
130 	struct vm_area_struct *vma;
131 	unsigned long vm_flags;
132 	int i;
133 
134 	/* calculate required read or write permissions.
135 	 * - if 'force' is set, we only require the "MAY" flags.
136 	 */
137 	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
138 	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
139 
140 	for (i = 0; i < len; i++) {
141 		vma = find_vma(mm, start);
142 		if (!vma)
143 			goto finish_or_fault;
144 
145 		/* protect what we can, including chardevs */
146 		if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
147 		    !(vm_flags & vma->vm_flags))
148 			goto finish_or_fault;
149 
150 		if (pages) {
151 			pages[i] = virt_to_page(start);
152 			if (pages[i])
153 				page_cache_get(pages[i]);
154 		}
155 		if (vmas)
156 			vmas[i] = vma;
157 		start += PAGE_SIZE;
158 	}
159 
160 	return i;
161 
162 finish_or_fault:
163 	return i ? : -EFAULT;
164 }
165 EXPORT_SYMBOL(get_user_pages);
166 
167 DEFINE_RWLOCK(vmlist_lock);
168 struct vm_struct *vmlist;
169 
170 void vfree(void *addr)
171 {
172 	kfree(addr);
173 }
174 EXPORT_SYMBOL(vfree);
175 
176 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
177 {
178 	/*
179 	 * kmalloc doesn't like __GFP_HIGHMEM for some reason
180 	 */
181 	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
182 }
183 EXPORT_SYMBOL(__vmalloc);
184 
185 struct page * vmalloc_to_page(void *addr)
186 {
187 	return virt_to_page(addr);
188 }
189 EXPORT_SYMBOL(vmalloc_to_page);
190 
191 unsigned long vmalloc_to_pfn(void *addr)
192 {
193 	return page_to_pfn(virt_to_page(addr));
194 }
195 EXPORT_SYMBOL(vmalloc_to_pfn);
196 
197 long vread(char *buf, char *addr, unsigned long count)
198 {
199 	memcpy(buf, addr, count);
200 	return count;
201 }
202 
203 long vwrite(char *buf, char *addr, unsigned long count)
204 {
205 	/* Don't allow overflow */
206 	if ((unsigned long) addr + count < count)
207 		count = -(unsigned long) addr;
208 
209 	memcpy(addr, buf, count);
210 	return(count);
211 }
212 
213 /*
214  *	vmalloc  -  allocate virtually continguos memory
215  *
216  *	@size:		allocation size
217  *
218  *	Allocate enough pages to cover @size from the page level
219  *	allocator and map them into continguos kernel virtual space.
220  *
221  *	For tight control over page level allocator and protection flags
222  *	use __vmalloc() instead.
223  */
224 void *vmalloc(unsigned long size)
225 {
226        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
227 }
228 EXPORT_SYMBOL(vmalloc);
229 
230 void *vmalloc_node(unsigned long size, int node)
231 {
232 	return vmalloc(size);
233 }
234 EXPORT_SYMBOL(vmalloc_node);
235 
236 /**
237  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
238  *	@size:		allocation size
239  *
240  *	Allocate enough 32bit PA addressable pages to cover @size from the
241  *	page level allocator and map them into continguos kernel virtual space.
242  */
243 void *vmalloc_32(unsigned long size)
244 {
245 	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
246 }
247 EXPORT_SYMBOL(vmalloc_32);
248 
249 /**
250  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
251  *	@size:		allocation size
252  *
253  * The resulting memory area is 32bit addressable and zeroed so it can be
254  * mapped to userspace without leaking data.
255  */
256 void *vmalloc_32_user(unsigned long size)
257 {
258 	return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
259 }
260 EXPORT_SYMBOL(vmalloc_32_user);
261 
262 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
263 {
264 	BUG();
265 	return NULL;
266 }
267 EXPORT_SYMBOL(vmap);
268 
269 void vunmap(void *addr)
270 {
271 	BUG();
272 }
273 EXPORT_SYMBOL(vunmap);
274 
275 /*
276  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
277  * have one.
278  */
279 void  __attribute__((weak)) vmalloc_sync_all(void)
280 {
281 }
282 
283 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
284 		   struct page *page)
285 {
286 	return -EINVAL;
287 }
288 EXPORT_SYMBOL(vm_insert_page);
289 
290 /*
291  *  sys_brk() for the most part doesn't need the global kernel
292  *  lock, except when an application is doing something nasty
293  *  like trying to un-brk an area that has already been mapped
294  *  to a regular file.  in this case, the unmapping will need
295  *  to invoke file system routines that need the global lock.
296  */
297 asmlinkage unsigned long sys_brk(unsigned long brk)
298 {
299 	struct mm_struct *mm = current->mm;
300 
301 	if (brk < mm->start_brk || brk > mm->context.end_brk)
302 		return mm->brk;
303 
304 	if (mm->brk == brk)
305 		return mm->brk;
306 
307 	/*
308 	 * Always allow shrinking brk
309 	 */
310 	if (brk <= mm->brk) {
311 		mm->brk = brk;
312 		return brk;
313 	}
314 
315 	/*
316 	 * Ok, looks good - let it rip.
317 	 */
318 	return mm->brk = brk;
319 }
320 
321 #ifdef DEBUG
322 static void show_process_blocks(void)
323 {
324 	struct vm_list_struct *vml;
325 
326 	printk("Process blocks %d:", current->pid);
327 
328 	for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
329 		printk(" %p: %p", vml, vml->vma);
330 		if (vml->vma)
331 			printk(" (%d @%lx #%d)",
332 			       kobjsize((void *) vml->vma->vm_start),
333 			       vml->vma->vm_start,
334 			       atomic_read(&vml->vma->vm_usage));
335 		printk(vml->next ? " ->" : ".\n");
336 	}
337 }
338 #endif /* DEBUG */
339 
340 /*
341  * add a VMA into a process's mm_struct in the appropriate place in the list
342  * - should be called with mm->mmap_sem held writelocked
343  */
344 static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
345 {
346 	struct vm_list_struct **ppv;
347 
348 	for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
349 		if ((*ppv)->vma->vm_start > vml->vma->vm_start)
350 			break;
351 
352 	vml->next = *ppv;
353 	*ppv = vml;
354 }
355 
356 /*
357  * look up the first VMA in which addr resides, NULL if none
358  * - should be called with mm->mmap_sem at least held readlocked
359  */
360 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
361 {
362 	struct vm_list_struct *loop, *vml;
363 
364 	/* search the vm_start ordered list */
365 	vml = NULL;
366 	for (loop = mm->context.vmlist; loop; loop = loop->next) {
367 		if (loop->vma->vm_start > addr)
368 			break;
369 		vml = loop;
370 	}
371 
372 	if (vml && vml->vma->vm_end > addr)
373 		return vml->vma;
374 
375 	return NULL;
376 }
377 EXPORT_SYMBOL(find_vma);
378 
379 /*
380  * find a VMA
381  * - we don't extend stack VMAs under NOMMU conditions
382  */
383 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
384 {
385 	return find_vma(mm, addr);
386 }
387 
388 int expand_stack(struct vm_area_struct *vma, unsigned long address)
389 {
390 	return -ENOMEM;
391 }
392 
393 /*
394  * look up the first VMA exactly that exactly matches addr
395  * - should be called with mm->mmap_sem at least held readlocked
396  */
397 static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
398 						    unsigned long addr)
399 {
400 	struct vm_list_struct *vml;
401 
402 	/* search the vm_start ordered list */
403 	for (vml = mm->context.vmlist; vml; vml = vml->next) {
404 		if (vml->vma->vm_start == addr)
405 			return vml->vma;
406 		if (vml->vma->vm_start > addr)
407 			break;
408 	}
409 
410 	return NULL;
411 }
412 
413 /*
414  * find a VMA in the global tree
415  */
416 static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
417 {
418 	struct vm_area_struct *vma;
419 	struct rb_node *n = nommu_vma_tree.rb_node;
420 
421 	while (n) {
422 		vma = rb_entry(n, struct vm_area_struct, vm_rb);
423 
424 		if (start < vma->vm_start)
425 			n = n->rb_left;
426 		else if (start > vma->vm_start)
427 			n = n->rb_right;
428 		else
429 			return vma;
430 	}
431 
432 	return NULL;
433 }
434 
435 /*
436  * add a VMA in the global tree
437  */
438 static void add_nommu_vma(struct vm_area_struct *vma)
439 {
440 	struct vm_area_struct *pvma;
441 	struct address_space *mapping;
442 	struct rb_node **p = &nommu_vma_tree.rb_node;
443 	struct rb_node *parent = NULL;
444 
445 	/* add the VMA to the mapping */
446 	if (vma->vm_file) {
447 		mapping = vma->vm_file->f_mapping;
448 
449 		flush_dcache_mmap_lock(mapping);
450 		vma_prio_tree_insert(vma, &mapping->i_mmap);
451 		flush_dcache_mmap_unlock(mapping);
452 	}
453 
454 	/* add the VMA to the master list */
455 	while (*p) {
456 		parent = *p;
457 		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
458 
459 		if (vma->vm_start < pvma->vm_start) {
460 			p = &(*p)->rb_left;
461 		}
462 		else if (vma->vm_start > pvma->vm_start) {
463 			p = &(*p)->rb_right;
464 		}
465 		else {
466 			/* mappings are at the same address - this can only
467 			 * happen for shared-mem chardevs and shared file
468 			 * mappings backed by ramfs/tmpfs */
469 			BUG_ON(!(pvma->vm_flags & VM_SHARED));
470 
471 			if (vma < pvma)
472 				p = &(*p)->rb_left;
473 			else if (vma > pvma)
474 				p = &(*p)->rb_right;
475 			else
476 				BUG();
477 		}
478 	}
479 
480 	rb_link_node(&vma->vm_rb, parent, p);
481 	rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
482 }
483 
484 /*
485  * delete a VMA from the global list
486  */
487 static void delete_nommu_vma(struct vm_area_struct *vma)
488 {
489 	struct address_space *mapping;
490 
491 	/* remove the VMA from the mapping */
492 	if (vma->vm_file) {
493 		mapping = vma->vm_file->f_mapping;
494 
495 		flush_dcache_mmap_lock(mapping);
496 		vma_prio_tree_remove(vma, &mapping->i_mmap);
497 		flush_dcache_mmap_unlock(mapping);
498 	}
499 
500 	/* remove from the master list */
501 	rb_erase(&vma->vm_rb, &nommu_vma_tree);
502 }
503 
504 /*
505  * determine whether a mapping should be permitted and, if so, what sort of
506  * mapping we're capable of supporting
507  */
508 static int validate_mmap_request(struct file *file,
509 				 unsigned long addr,
510 				 unsigned long len,
511 				 unsigned long prot,
512 				 unsigned long flags,
513 				 unsigned long pgoff,
514 				 unsigned long *_capabilities)
515 {
516 	unsigned long capabilities;
517 	unsigned long reqprot = prot;
518 	int ret;
519 
520 	/* do the simple checks first */
521 	if (flags & MAP_FIXED || addr) {
522 		printk(KERN_DEBUG
523 		       "%d: Can't do fixed-address/overlay mmap of RAM\n",
524 		       current->pid);
525 		return -EINVAL;
526 	}
527 
528 	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
529 	    (flags & MAP_TYPE) != MAP_SHARED)
530 		return -EINVAL;
531 
532 	if (!len)
533 		return -EINVAL;
534 
535 	/* Careful about overflows.. */
536 	len = PAGE_ALIGN(len);
537 	if (!len || len > TASK_SIZE)
538 		return -ENOMEM;
539 
540 	/* offset overflow? */
541 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
542 		return -EOVERFLOW;
543 
544 	if (file) {
545 		/* validate file mapping requests */
546 		struct address_space *mapping;
547 
548 		/* files must support mmap */
549 		if (!file->f_op || !file->f_op->mmap)
550 			return -ENODEV;
551 
552 		/* work out if what we've got could possibly be shared
553 		 * - we support chardevs that provide their own "memory"
554 		 * - we support files/blockdevs that are memory backed
555 		 */
556 		mapping = file->f_mapping;
557 		if (!mapping)
558 			mapping = file->f_path.dentry->d_inode->i_mapping;
559 
560 		capabilities = 0;
561 		if (mapping && mapping->backing_dev_info)
562 			capabilities = mapping->backing_dev_info->capabilities;
563 
564 		if (!capabilities) {
565 			/* no explicit capabilities set, so assume some
566 			 * defaults */
567 			switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
568 			case S_IFREG:
569 			case S_IFBLK:
570 				capabilities = BDI_CAP_MAP_COPY;
571 				break;
572 
573 			case S_IFCHR:
574 				capabilities =
575 					BDI_CAP_MAP_DIRECT |
576 					BDI_CAP_READ_MAP |
577 					BDI_CAP_WRITE_MAP;
578 				break;
579 
580 			default:
581 				return -EINVAL;
582 			}
583 		}
584 
585 		/* eliminate any capabilities that we can't support on this
586 		 * device */
587 		if (!file->f_op->get_unmapped_area)
588 			capabilities &= ~BDI_CAP_MAP_DIRECT;
589 		if (!file->f_op->read)
590 			capabilities &= ~BDI_CAP_MAP_COPY;
591 
592 		if (flags & MAP_SHARED) {
593 			/* do checks for writing, appending and locking */
594 			if ((prot & PROT_WRITE) &&
595 			    !(file->f_mode & FMODE_WRITE))
596 				return -EACCES;
597 
598 			if (IS_APPEND(file->f_path.dentry->d_inode) &&
599 			    (file->f_mode & FMODE_WRITE))
600 				return -EACCES;
601 
602 			if (locks_verify_locked(file->f_path.dentry->d_inode))
603 				return -EAGAIN;
604 
605 			if (!(capabilities & BDI_CAP_MAP_DIRECT))
606 				return -ENODEV;
607 
608 			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
609 			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
610 			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
611 			    ) {
612 				printk("MAP_SHARED not completely supported on !MMU\n");
613 				return -EINVAL;
614 			}
615 
616 			/* we mustn't privatise shared mappings */
617 			capabilities &= ~BDI_CAP_MAP_COPY;
618 		}
619 		else {
620 			/* we're going to read the file into private memory we
621 			 * allocate */
622 			if (!(capabilities & BDI_CAP_MAP_COPY))
623 				return -ENODEV;
624 
625 			/* we don't permit a private writable mapping to be
626 			 * shared with the backing device */
627 			if (prot & PROT_WRITE)
628 				capabilities &= ~BDI_CAP_MAP_DIRECT;
629 		}
630 
631 		/* handle executable mappings and implied executable
632 		 * mappings */
633 		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
634 			if (prot & PROT_EXEC)
635 				return -EPERM;
636 		}
637 		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
638 			/* handle implication of PROT_EXEC by PROT_READ */
639 			if (current->personality & READ_IMPLIES_EXEC) {
640 				if (capabilities & BDI_CAP_EXEC_MAP)
641 					prot |= PROT_EXEC;
642 			}
643 		}
644 		else if ((prot & PROT_READ) &&
645 			 (prot & PROT_EXEC) &&
646 			 !(capabilities & BDI_CAP_EXEC_MAP)
647 			 ) {
648 			/* backing file is not executable, try to copy */
649 			capabilities &= ~BDI_CAP_MAP_DIRECT;
650 		}
651 	}
652 	else {
653 		/* anonymous mappings are always memory backed and can be
654 		 * privately mapped
655 		 */
656 		capabilities = BDI_CAP_MAP_COPY;
657 
658 		/* handle PROT_EXEC implication by PROT_READ */
659 		if ((prot & PROT_READ) &&
660 		    (current->personality & READ_IMPLIES_EXEC))
661 			prot |= PROT_EXEC;
662 	}
663 
664 	/* allow the security API to have its say */
665 	ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
666 	if (ret < 0)
667 		return ret;
668 
669 	/* looks okay */
670 	*_capabilities = capabilities;
671 	return 0;
672 }
673 
674 /*
675  * we've determined that we can make the mapping, now translate what we
676  * now know into VMA flags
677  */
678 static unsigned long determine_vm_flags(struct file *file,
679 					unsigned long prot,
680 					unsigned long flags,
681 					unsigned long capabilities)
682 {
683 	unsigned long vm_flags;
684 
685 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
686 	vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
687 	/* vm_flags |= mm->def_flags; */
688 
689 	if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
690 		/* attempt to share read-only copies of mapped file chunks */
691 		if (file && !(prot & PROT_WRITE))
692 			vm_flags |= VM_MAYSHARE;
693 	}
694 	else {
695 		/* overlay a shareable mapping on the backing device or inode
696 		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
697 		 * romfs/cramfs */
698 		if (flags & MAP_SHARED)
699 			vm_flags |= VM_MAYSHARE | VM_SHARED;
700 		else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
701 			vm_flags |= VM_MAYSHARE;
702 	}
703 
704 	/* refuse to let anyone share private mappings with this process if
705 	 * it's being traced - otherwise breakpoints set in it may interfere
706 	 * with another untraced process
707 	 */
708 	if ((flags & MAP_PRIVATE) && (current->ptrace & PT_PTRACED))
709 		vm_flags &= ~VM_MAYSHARE;
710 
711 	return vm_flags;
712 }
713 
714 /*
715  * set up a shared mapping on a file
716  */
717 static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
718 {
719 	int ret;
720 
721 	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
722 	if (ret != -ENOSYS)
723 		return ret;
724 
725 	/* getting an ENOSYS error indicates that direct mmap isn't
726 	 * possible (as opposed to tried but failed) so we'll fall
727 	 * through to making a private copy of the data and mapping
728 	 * that if we can */
729 	return -ENODEV;
730 }
731 
732 /*
733  * set up a private mapping or an anonymous shared mapping
734  */
735 static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
736 {
737 	void *base;
738 	int ret;
739 
740 	/* invoke the file's mapping function so that it can keep track of
741 	 * shared mappings on devices or memory
742 	 * - VM_MAYSHARE will be set if it may attempt to share
743 	 */
744 	if (vma->vm_file) {
745 		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
746 		if (ret != -ENOSYS) {
747 			/* shouldn't return success if we're not sharing */
748 			BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
749 			return ret; /* success or a real error */
750 		}
751 
752 		/* getting an ENOSYS error indicates that direct mmap isn't
753 		 * possible (as opposed to tried but failed) so we'll try to
754 		 * make a private copy of the data and map that instead */
755 	}
756 
757 	/* allocate some memory to hold the mapping
758 	 * - note that this may not return a page-aligned address if the object
759 	 *   we're allocating is smaller than a page
760 	 */
761 	base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
762 	if (!base)
763 		goto enomem;
764 
765 	vma->vm_start = (unsigned long) base;
766 	vma->vm_end = vma->vm_start + len;
767 	vma->vm_flags |= VM_MAPPED_COPY;
768 
769 #ifdef WARN_ON_SLACK
770 	if (len + WARN_ON_SLACK <= kobjsize(result))
771 		printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
772 		       len, current->pid, kobjsize(result) - len);
773 #endif
774 
775 	if (vma->vm_file) {
776 		/* read the contents of a file into the copy */
777 		mm_segment_t old_fs;
778 		loff_t fpos;
779 
780 		fpos = vma->vm_pgoff;
781 		fpos <<= PAGE_SHIFT;
782 
783 		old_fs = get_fs();
784 		set_fs(KERNEL_DS);
785 		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
786 		set_fs(old_fs);
787 
788 		if (ret < 0)
789 			goto error_free;
790 
791 		/* clear the last little bit */
792 		if (ret < len)
793 			memset(base + ret, 0, len - ret);
794 
795 	} else {
796 		/* if it's an anonymous mapping, then just clear it */
797 		memset(base, 0, len);
798 	}
799 
800 	return 0;
801 
802 error_free:
803 	kfree(base);
804 	vma->vm_start = 0;
805 	return ret;
806 
807 enomem:
808 	printk("Allocation of length %lu from process %d failed\n",
809 	       len, current->pid);
810 	show_free_areas();
811 	return -ENOMEM;
812 }
813 
814 /*
815  * handle mapping creation for uClinux
816  */
817 unsigned long do_mmap_pgoff(struct file *file,
818 			    unsigned long addr,
819 			    unsigned long len,
820 			    unsigned long prot,
821 			    unsigned long flags,
822 			    unsigned long pgoff)
823 {
824 	struct vm_list_struct *vml = NULL;
825 	struct vm_area_struct *vma = NULL;
826 	struct rb_node *rb;
827 	unsigned long capabilities, vm_flags;
828 	void *result;
829 	int ret;
830 
831 	/* decide whether we should attempt the mapping, and if so what sort of
832 	 * mapping */
833 	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
834 				    &capabilities);
835 	if (ret < 0)
836 		return ret;
837 
838 	/* we've determined that we can make the mapping, now translate what we
839 	 * now know into VMA flags */
840 	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
841 
842 	/* we're going to need to record the mapping if it works */
843 	vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
844 	if (!vml)
845 		goto error_getting_vml;
846 
847 	down_write(&nommu_vma_sem);
848 
849 	/* if we want to share, we need to check for VMAs created by other
850 	 * mmap() calls that overlap with our proposed mapping
851 	 * - we can only share with an exact match on most regular files
852 	 * - shared mappings on character devices and memory backed files are
853 	 *   permitted to overlap inexactly as far as we are concerned for in
854 	 *   these cases, sharing is handled in the driver or filesystem rather
855 	 *   than here
856 	 */
857 	if (vm_flags & VM_MAYSHARE) {
858 		unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
859 		unsigned long vmpglen;
860 
861 		/* suppress VMA sharing for shared regions */
862 		if (vm_flags & VM_SHARED &&
863 		    capabilities & BDI_CAP_MAP_DIRECT)
864 			goto dont_share_VMAs;
865 
866 		for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
867 			vma = rb_entry(rb, struct vm_area_struct, vm_rb);
868 
869 			if (!(vma->vm_flags & VM_MAYSHARE))
870 				continue;
871 
872 			/* search for overlapping mappings on the same file */
873 			if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
874 				continue;
875 
876 			if (vma->vm_pgoff >= pgoff + pglen)
877 				continue;
878 
879 			vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
880 			vmpglen >>= PAGE_SHIFT;
881 			if (pgoff >= vma->vm_pgoff + vmpglen)
882 				continue;
883 
884 			/* handle inexactly overlapping matches between mappings */
885 			if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
886 				if (!(capabilities & BDI_CAP_MAP_DIRECT))
887 					goto sharing_violation;
888 				continue;
889 			}
890 
891 			/* we've found a VMA we can share */
892 			atomic_inc(&vma->vm_usage);
893 
894 			vml->vma = vma;
895 			result = (void *) vma->vm_start;
896 			goto shared;
897 		}
898 
899 	dont_share_VMAs:
900 		vma = NULL;
901 
902 		/* obtain the address at which to make a shared mapping
903 		 * - this is the hook for quasi-memory character devices to
904 		 *   tell us the location of a shared mapping
905 		 */
906 		if (file && file->f_op->get_unmapped_area) {
907 			addr = file->f_op->get_unmapped_area(file, addr, len,
908 							     pgoff, flags);
909 			if (IS_ERR((void *) addr)) {
910 				ret = addr;
911 				if (ret != (unsigned long) -ENOSYS)
912 					goto error;
913 
914 				/* the driver refused to tell us where to site
915 				 * the mapping so we'll have to attempt to copy
916 				 * it */
917 				ret = (unsigned long) -ENODEV;
918 				if (!(capabilities & BDI_CAP_MAP_COPY))
919 					goto error;
920 
921 				capabilities &= ~BDI_CAP_MAP_DIRECT;
922 			}
923 		}
924 	}
925 
926 	/* we're going to need a VMA struct as well */
927 	vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
928 	if (!vma)
929 		goto error_getting_vma;
930 
931 	INIT_LIST_HEAD(&vma->anon_vma_node);
932 	atomic_set(&vma->vm_usage, 1);
933 	if (file)
934 		get_file(file);
935 	vma->vm_file	= file;
936 	vma->vm_flags	= vm_flags;
937 	vma->vm_start	= addr;
938 	vma->vm_end	= addr + len;
939 	vma->vm_pgoff	= pgoff;
940 
941 	vml->vma = vma;
942 
943 	/* set up the mapping */
944 	if (file && vma->vm_flags & VM_SHARED)
945 		ret = do_mmap_shared_file(vma, len);
946 	else
947 		ret = do_mmap_private(vma, len);
948 	if (ret < 0)
949 		goto error;
950 
951 	/* okay... we have a mapping; now we have to register it */
952 	result = (void *) vma->vm_start;
953 
954 	if (vma->vm_flags & VM_MAPPED_COPY) {
955 		realalloc += kobjsize(result);
956 		askedalloc += len;
957 	}
958 
959 	realalloc += kobjsize(vma);
960 	askedalloc += sizeof(*vma);
961 
962 	current->mm->total_vm += len >> PAGE_SHIFT;
963 
964 	add_nommu_vma(vma);
965 
966  shared:
967 	realalloc += kobjsize(vml);
968 	askedalloc += sizeof(*vml);
969 
970 	add_vma_to_mm(current->mm, vml);
971 
972 	up_write(&nommu_vma_sem);
973 
974 	if (prot & PROT_EXEC)
975 		flush_icache_range((unsigned long) result,
976 				   (unsigned long) result + len);
977 
978 #ifdef DEBUG
979 	printk("do_mmap:\n");
980 	show_process_blocks();
981 #endif
982 
983 	return (unsigned long) result;
984 
985  error:
986 	up_write(&nommu_vma_sem);
987 	kfree(vml);
988 	if (vma) {
989 		if (vma->vm_file)
990 			fput(vma->vm_file);
991 		kfree(vma);
992 	}
993 	return ret;
994 
995  sharing_violation:
996 	up_write(&nommu_vma_sem);
997 	printk("Attempt to share mismatched mappings\n");
998 	kfree(vml);
999 	return -EINVAL;
1000 
1001  error_getting_vma:
1002 	up_write(&nommu_vma_sem);
1003 	kfree(vml);
1004 	printk("Allocation of vma for %lu byte allocation from process %d failed\n",
1005 	       len, current->pid);
1006 	show_free_areas();
1007 	return -ENOMEM;
1008 
1009  error_getting_vml:
1010 	printk("Allocation of vml for %lu byte allocation from process %d failed\n",
1011 	       len, current->pid);
1012 	show_free_areas();
1013 	return -ENOMEM;
1014 }
1015 EXPORT_SYMBOL(do_mmap_pgoff);
1016 
1017 /*
1018  * handle mapping disposal for uClinux
1019  */
1020 static void put_vma(struct vm_area_struct *vma)
1021 {
1022 	if (vma) {
1023 		down_write(&nommu_vma_sem);
1024 
1025 		if (atomic_dec_and_test(&vma->vm_usage)) {
1026 			delete_nommu_vma(vma);
1027 
1028 			if (vma->vm_ops && vma->vm_ops->close)
1029 				vma->vm_ops->close(vma);
1030 
1031 			/* IO memory and memory shared directly out of the pagecache from
1032 			 * ramfs/tmpfs mustn't be released here */
1033 			if (vma->vm_flags & VM_MAPPED_COPY) {
1034 				realalloc -= kobjsize((void *) vma->vm_start);
1035 				askedalloc -= vma->vm_end - vma->vm_start;
1036 				kfree((void *) vma->vm_start);
1037 			}
1038 
1039 			realalloc -= kobjsize(vma);
1040 			askedalloc -= sizeof(*vma);
1041 
1042 			if (vma->vm_file)
1043 				fput(vma->vm_file);
1044 			kfree(vma);
1045 		}
1046 
1047 		up_write(&nommu_vma_sem);
1048 	}
1049 }
1050 
1051 /*
1052  * release a mapping
1053  * - under NOMMU conditions the parameters must match exactly to the mapping to
1054  *   be removed
1055  */
1056 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
1057 {
1058 	struct vm_list_struct *vml, **parent;
1059 	unsigned long end = addr + len;
1060 
1061 #ifdef DEBUG
1062 	printk("do_munmap:\n");
1063 #endif
1064 
1065 	for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
1066 		if ((*parent)->vma->vm_start > addr)
1067 			break;
1068 		if ((*parent)->vma->vm_start == addr &&
1069 		    ((len == 0) || ((*parent)->vma->vm_end == end)))
1070 			goto found;
1071 	}
1072 
1073 	printk("munmap of non-mmaped memory by process %d (%s): %p\n",
1074 	       current->pid, current->comm, (void *) addr);
1075 	return -EINVAL;
1076 
1077  found:
1078 	vml = *parent;
1079 
1080 	put_vma(vml->vma);
1081 
1082 	*parent = vml->next;
1083 	realalloc -= kobjsize(vml);
1084 	askedalloc -= sizeof(*vml);
1085 	kfree(vml);
1086 
1087 	update_hiwater_vm(mm);
1088 	mm->total_vm -= len >> PAGE_SHIFT;
1089 
1090 #ifdef DEBUG
1091 	show_process_blocks();
1092 #endif
1093 
1094 	return 0;
1095 }
1096 EXPORT_SYMBOL(do_munmap);
1097 
1098 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1099 {
1100 	int ret;
1101 	struct mm_struct *mm = current->mm;
1102 
1103 	down_write(&mm->mmap_sem);
1104 	ret = do_munmap(mm, addr, len);
1105 	up_write(&mm->mmap_sem);
1106 	return ret;
1107 }
1108 
1109 /*
1110  * Release all mappings
1111  */
1112 void exit_mmap(struct mm_struct * mm)
1113 {
1114 	struct vm_list_struct *tmp;
1115 
1116 	if (mm) {
1117 #ifdef DEBUG
1118 		printk("Exit_mmap:\n");
1119 #endif
1120 
1121 		mm->total_vm = 0;
1122 
1123 		while ((tmp = mm->context.vmlist)) {
1124 			mm->context.vmlist = tmp->next;
1125 			put_vma(tmp->vma);
1126 
1127 			realalloc -= kobjsize(tmp);
1128 			askedalloc -= sizeof(*tmp);
1129 			kfree(tmp);
1130 		}
1131 
1132 #ifdef DEBUG
1133 		show_process_blocks();
1134 #endif
1135 	}
1136 }
1137 
1138 unsigned long do_brk(unsigned long addr, unsigned long len)
1139 {
1140 	return -ENOMEM;
1141 }
1142 
1143 /*
1144  * expand (or shrink) an existing mapping, potentially moving it at the same
1145  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1146  *
1147  * under NOMMU conditions, we only permit changing a mapping's size, and only
1148  * as long as it stays within the hole allocated by the kmalloc() call in
1149  * do_mmap_pgoff() and the block is not shareable
1150  *
1151  * MREMAP_FIXED is not supported under NOMMU conditions
1152  */
1153 unsigned long do_mremap(unsigned long addr,
1154 			unsigned long old_len, unsigned long new_len,
1155 			unsigned long flags, unsigned long new_addr)
1156 {
1157 	struct vm_area_struct *vma;
1158 
1159 	/* insanity checks first */
1160 	if (new_len == 0)
1161 		return (unsigned long) -EINVAL;
1162 
1163 	if (flags & MREMAP_FIXED && new_addr != addr)
1164 		return (unsigned long) -EINVAL;
1165 
1166 	vma = find_vma_exact(current->mm, addr);
1167 	if (!vma)
1168 		return (unsigned long) -EINVAL;
1169 
1170 	if (vma->vm_end != vma->vm_start + old_len)
1171 		return (unsigned long) -EFAULT;
1172 
1173 	if (vma->vm_flags & VM_MAYSHARE)
1174 		return (unsigned long) -EPERM;
1175 
1176 	if (new_len > kobjsize((void *) addr))
1177 		return (unsigned long) -ENOMEM;
1178 
1179 	/* all checks complete - do it */
1180 	vma->vm_end = vma->vm_start + new_len;
1181 
1182 	askedalloc -= old_len;
1183 	askedalloc += new_len;
1184 
1185 	return vma->vm_start;
1186 }
1187 EXPORT_SYMBOL(do_mremap);
1188 
1189 asmlinkage unsigned long sys_mremap(unsigned long addr,
1190 	unsigned long old_len, unsigned long new_len,
1191 	unsigned long flags, unsigned long new_addr)
1192 {
1193 	unsigned long ret;
1194 
1195 	down_write(&current->mm->mmap_sem);
1196 	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1197 	up_write(&current->mm->mmap_sem);
1198 	return ret;
1199 }
1200 
1201 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1202 			unsigned int foll_flags)
1203 {
1204 	return NULL;
1205 }
1206 
1207 int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1208 		unsigned long to, unsigned long size, pgprot_t prot)
1209 {
1210 	vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
1211 	return 0;
1212 }
1213 EXPORT_SYMBOL(remap_pfn_range);
1214 
1215 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1216 {
1217 }
1218 
1219 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1220 	unsigned long len, unsigned long pgoff, unsigned long flags)
1221 {
1222 	return -ENOMEM;
1223 }
1224 
1225 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1226 {
1227 }
1228 
1229 void unmap_mapping_range(struct address_space *mapping,
1230 			 loff_t const holebegin, loff_t const holelen,
1231 			 int even_cows)
1232 {
1233 }
1234 EXPORT_SYMBOL(unmap_mapping_range);
1235 
1236 /*
1237  * ask for an unmapped area at which to create a mapping on a file
1238  */
1239 unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1240 				unsigned long len, unsigned long pgoff,
1241 				unsigned long flags)
1242 {
1243 	unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
1244 				  unsigned long, unsigned long);
1245 
1246 	get_area = current->mm->get_unmapped_area;
1247 	if (file && file->f_op && file->f_op->get_unmapped_area)
1248 		get_area = file->f_op->get_unmapped_area;
1249 
1250 	if (!get_area)
1251 		return -ENOSYS;
1252 
1253 	return get_area(file, addr, len, pgoff, flags);
1254 }
1255 EXPORT_SYMBOL(get_unmapped_area);
1256 
1257 /*
1258  * Check that a process has enough memory to allocate a new virtual
1259  * mapping. 0 means there is enough memory for the allocation to
1260  * succeed and -ENOMEM implies there is not.
1261  *
1262  * We currently support three overcommit policies, which are set via the
1263  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1264  *
1265  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1266  * Additional code 2002 Jul 20 by Robert Love.
1267  *
1268  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1269  *
1270  * Note this is a helper function intended to be used by LSMs which
1271  * wish to use this logic.
1272  */
1273 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1274 {
1275 	unsigned long free, allowed;
1276 
1277 	vm_acct_memory(pages);
1278 
1279 	/*
1280 	 * Sometimes we want to use more memory than we have
1281 	 */
1282 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1283 		return 0;
1284 
1285 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1286 		unsigned long n;
1287 
1288 		free = global_page_state(NR_FILE_PAGES);
1289 		free += nr_swap_pages;
1290 
1291 		/*
1292 		 * Any slabs which are created with the
1293 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1294 		 * which are reclaimable, under pressure.  The dentry
1295 		 * cache and most inode caches should fall into this
1296 		 */
1297 		free += global_page_state(NR_SLAB_RECLAIMABLE);
1298 
1299 		/*
1300 		 * Leave the last 3% for root
1301 		 */
1302 		if (!cap_sys_admin)
1303 			free -= free / 32;
1304 
1305 		if (free > pages)
1306 			return 0;
1307 
1308 		/*
1309 		 * nr_free_pages() is very expensive on large systems,
1310 		 * only call if we're about to fail.
1311 		 */
1312 		n = nr_free_pages();
1313 
1314 		/*
1315 		 * Leave reserved pages. The pages are not for anonymous pages.
1316 		 */
1317 		if (n <= totalreserve_pages)
1318 			goto error;
1319 		else
1320 			n -= totalreserve_pages;
1321 
1322 		/*
1323 		 * Leave the last 3% for root
1324 		 */
1325 		if (!cap_sys_admin)
1326 			n -= n / 32;
1327 		free += n;
1328 
1329 		if (free > pages)
1330 			return 0;
1331 
1332 		goto error;
1333 	}
1334 
1335 	allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1336 	/*
1337 	 * Leave the last 3% for root
1338 	 */
1339 	if (!cap_sys_admin)
1340 		allowed -= allowed / 32;
1341 	allowed += total_swap_pages;
1342 
1343 	/* Don't let a single process grow too big:
1344 	   leave 3% of the size of this process for other processes */
1345 	allowed -= current->mm->total_vm / 32;
1346 
1347 	/*
1348 	 * cast `allowed' as a signed long because vm_committed_space
1349 	 * sometimes has a negative value
1350 	 */
1351 	if (atomic_read(&vm_committed_space) < (long)allowed)
1352 		return 0;
1353 error:
1354 	vm_unacct_memory(pages);
1355 
1356 	return -ENOMEM;
1357 }
1358 
1359 int in_gate_area_no_task(unsigned long addr)
1360 {
1361 	return 0;
1362 }
1363 
1364 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1365 {
1366 	BUG();
1367 	return 0;
1368 }
1369 EXPORT_SYMBOL(filemap_fault);
1370 
1371 /*
1372  * Access another process' address space.
1373  * - source/target buffer must be kernel space
1374  */
1375 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1376 {
1377 	struct vm_area_struct *vma;
1378 	struct mm_struct *mm;
1379 
1380 	if (addr + len < addr)
1381 		return 0;
1382 
1383 	mm = get_task_mm(tsk);
1384 	if (!mm)
1385 		return 0;
1386 
1387 	down_read(&mm->mmap_sem);
1388 
1389 	/* the access must start within one of the target process's mappings */
1390 	vma = find_vma(mm, addr);
1391 	if (vma) {
1392 		/* don't overrun this mapping */
1393 		if (addr + len >= vma->vm_end)
1394 			len = vma->vm_end - addr;
1395 
1396 		/* only read or write mappings where it is permitted */
1397 		if (write && vma->vm_flags & VM_MAYWRITE)
1398 			len -= copy_to_user((void *) addr, buf, len);
1399 		else if (!write && vma->vm_flags & VM_MAYREAD)
1400 			len -= copy_from_user(buf, (void *) addr, len);
1401 		else
1402 			len = 0;
1403 	} else {
1404 		len = 0;
1405 	}
1406 
1407 	up_read(&mm->mmap_sem);
1408 	mmput(mm);
1409 	return len;
1410 }
1411