xref: /openbmc/linux/mm/nommu.c (revision 462cee296476278acaa54c41925b3273e0e4dd40)
1 /*
2  *  linux/mm/nommu.c
3  *
4  *  Replacement code for mm functions to support CPU's that don't
5  *  have any form of memory management unit (thus no virtual memory).
6  *
7  *  See Documentation/nommu-mmap.txt
8  *
9  *  Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
10  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/file.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ptrace.h>
24 #include <linux/blkdev.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mount.h>
27 #include <linux/personality.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 
31 #include <asm/uaccess.h>
32 #include <asm/tlb.h>
33 #include <asm/tlbflush.h>
34 
35 void *high_memory;
36 struct page *mem_map;
37 unsigned long max_mapnr;
38 unsigned long num_physpages;
39 unsigned long askedalloc, realalloc;
40 atomic_t vm_committed_space = ATOMIC_INIT(0);
41 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
42 int sysctl_overcommit_ratio = 50; /* default is 50% */
43 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
44 int heap_stack_gap = 0;
45 
46 EXPORT_SYMBOL(mem_map);
47 EXPORT_SYMBOL(sysctl_max_map_count);
48 EXPORT_SYMBOL(sysctl_overcommit_memory);
49 EXPORT_SYMBOL(sysctl_overcommit_ratio);
50 EXPORT_SYMBOL(vm_committed_space);
51 EXPORT_SYMBOL(__vm_enough_memory);
52 
53 /* list of shareable VMAs */
54 struct rb_root nommu_vma_tree = RB_ROOT;
55 DECLARE_RWSEM(nommu_vma_sem);
56 
57 struct vm_operations_struct generic_file_vm_ops = {
58 };
59 
60 /*
61  * Handle all mappings that got truncated by a "truncate()"
62  * system call.
63  *
64  * NOTE! We have to be ready to update the memory sharing
65  * between the file and the memory map for a potential last
66  * incomplete page.  Ugly, but necessary.
67  */
68 int vmtruncate(struct inode *inode, loff_t offset)
69 {
70 	struct address_space *mapping = inode->i_mapping;
71 	unsigned long limit;
72 
73 	if (inode->i_size < offset)
74 		goto do_expand;
75 	i_size_write(inode, offset);
76 
77 	truncate_inode_pages(mapping, offset);
78 	goto out_truncate;
79 
80 do_expand:
81 	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
82 	if (limit != RLIM_INFINITY && offset > limit)
83 		goto out_sig;
84 	if (offset > inode->i_sb->s_maxbytes)
85 		goto out;
86 	i_size_write(inode, offset);
87 
88 out_truncate:
89 	if (inode->i_op && inode->i_op->truncate)
90 		inode->i_op->truncate(inode);
91 	return 0;
92 out_sig:
93 	send_sig(SIGXFSZ, current, 0);
94 out:
95 	return -EFBIG;
96 }
97 
98 EXPORT_SYMBOL(vmtruncate);
99 
100 /*
101  * Return the total memory allocated for this pointer, not
102  * just what the caller asked for.
103  *
104  * Doesn't have to be accurate, i.e. may have races.
105  */
106 unsigned int kobjsize(const void *objp)
107 {
108 	struct page *page;
109 
110 	if (!objp || !((page = virt_to_page(objp))))
111 		return 0;
112 
113 	if (PageSlab(page))
114 		return ksize(objp);
115 
116 	BUG_ON(page->index < 0);
117 	BUG_ON(page->index >= MAX_ORDER);
118 
119 	return (PAGE_SIZE << page->index);
120 }
121 
122 /*
123  * The nommu dodgy version :-)
124  */
125 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
126 	unsigned long start, int len, int write, int force,
127 	struct page **pages, struct vm_area_struct **vmas)
128 {
129 	int i;
130 	static struct vm_area_struct dummy_vma;
131 
132 	for (i = 0; i < len; i++) {
133 		if (pages) {
134 			pages[i] = virt_to_page(start);
135 			if (pages[i])
136 				page_cache_get(pages[i]);
137 		}
138 		if (vmas)
139 			vmas[i] = &dummy_vma;
140 		start += PAGE_SIZE;
141 	}
142 	return(i);
143 }
144 
145 DEFINE_RWLOCK(vmlist_lock);
146 struct vm_struct *vmlist;
147 
148 void vfree(void *addr)
149 {
150 	kfree(addr);
151 }
152 
153 void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
154 			pgprot_t prot)
155 {
156 	/*
157 	 * kmalloc doesn't like __GFP_HIGHMEM for some reason
158 	 */
159 	return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
160 }
161 
162 struct page * vmalloc_to_page(void *addr)
163 {
164 	return virt_to_page(addr);
165 }
166 
167 unsigned long vmalloc_to_pfn(void *addr)
168 {
169 	return page_to_pfn(virt_to_page(addr));
170 }
171 
172 
173 long vread(char *buf, char *addr, unsigned long count)
174 {
175 	memcpy(buf, addr, count);
176 	return count;
177 }
178 
179 long vwrite(char *buf, char *addr, unsigned long count)
180 {
181 	/* Don't allow overflow */
182 	if ((unsigned long) addr + count < count)
183 		count = -(unsigned long) addr;
184 
185 	memcpy(addr, buf, count);
186 	return(count);
187 }
188 
189 /*
190  *	vmalloc  -  allocate virtually continguos memory
191  *
192  *	@size:		allocation size
193  *
194  *	Allocate enough pages to cover @size from the page level
195  *	allocator and map them into continguos kernel virtual space.
196  *
197  *	For tight cotrol over page level allocator and protection flags
198  *	use __vmalloc() instead.
199  */
200 void *vmalloc(unsigned long size)
201 {
202        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
203 }
204 
205 /*
206  *	vmalloc_32  -  allocate virtually continguos memory (32bit addressable)
207  *
208  *	@size:		allocation size
209  *
210  *	Allocate enough 32bit PA addressable pages to cover @size from the
211  *	page level allocator and map them into continguos kernel virtual space.
212  */
213 void *vmalloc_32(unsigned long size)
214 {
215 	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
216 }
217 
218 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
219 {
220 	BUG();
221 	return NULL;
222 }
223 
224 void vunmap(void *addr)
225 {
226 	BUG();
227 }
228 
229 /*
230  *  sys_brk() for the most part doesn't need the global kernel
231  *  lock, except when an application is doing something nasty
232  *  like trying to un-brk an area that has already been mapped
233  *  to a regular file.  in this case, the unmapping will need
234  *  to invoke file system routines that need the global lock.
235  */
236 asmlinkage unsigned long sys_brk(unsigned long brk)
237 {
238 	struct mm_struct *mm = current->mm;
239 
240 	if (brk < mm->start_brk || brk > mm->context.end_brk)
241 		return mm->brk;
242 
243 	if (mm->brk == brk)
244 		return mm->brk;
245 
246 	/*
247 	 * Always allow shrinking brk
248 	 */
249 	if (brk <= mm->brk) {
250 		mm->brk = brk;
251 		return brk;
252 	}
253 
254 	/*
255 	 * Ok, looks good - let it rip.
256 	 */
257 	return mm->brk = brk;
258 }
259 
260 #ifdef DEBUG
261 static void show_process_blocks(void)
262 {
263 	struct vm_list_struct *vml;
264 
265 	printk("Process blocks %d:", current->pid);
266 
267 	for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
268 		printk(" %p: %p", vml, vml->vma);
269 		if (vml->vma)
270 			printk(" (%d @%lx #%d)",
271 			       kobjsize((void *) vml->vma->vm_start),
272 			       vml->vma->vm_start,
273 			       atomic_read(&vml->vma->vm_usage));
274 		printk(vml->next ? " ->" : ".\n");
275 	}
276 }
277 #endif /* DEBUG */
278 
279 static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
280 {
281 	struct vm_area_struct *vma;
282 	struct rb_node *n = nommu_vma_tree.rb_node;
283 
284 	while (n) {
285 		vma = rb_entry(n, struct vm_area_struct, vm_rb);
286 
287 		if (start < vma->vm_start)
288 			n = n->rb_left;
289 		else if (start > vma->vm_start)
290 			n = n->rb_right;
291 		else
292 			return vma;
293 	}
294 
295 	return NULL;
296 }
297 
298 static void add_nommu_vma(struct vm_area_struct *vma)
299 {
300 	struct vm_area_struct *pvma;
301 	struct address_space *mapping;
302 	struct rb_node **p = &nommu_vma_tree.rb_node;
303 	struct rb_node *parent = NULL;
304 
305 	/* add the VMA to the mapping */
306 	if (vma->vm_file) {
307 		mapping = vma->vm_file->f_mapping;
308 
309 		flush_dcache_mmap_lock(mapping);
310 		vma_prio_tree_insert(vma, &mapping->i_mmap);
311 		flush_dcache_mmap_unlock(mapping);
312 	}
313 
314 	/* add the VMA to the master list */
315 	while (*p) {
316 		parent = *p;
317 		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
318 
319 		if (vma->vm_start < pvma->vm_start) {
320 			p = &(*p)->rb_left;
321 		}
322 		else if (vma->vm_start > pvma->vm_start) {
323 			p = &(*p)->rb_right;
324 		}
325 		else {
326 			/* mappings are at the same address - this can only
327 			 * happen for shared-mem chardevs and shared file
328 			 * mappings backed by ramfs/tmpfs */
329 			BUG_ON(!(pvma->vm_flags & VM_SHARED));
330 
331 			if (vma < pvma)
332 				p = &(*p)->rb_left;
333 			else if (vma > pvma)
334 				p = &(*p)->rb_right;
335 			else
336 				BUG();
337 		}
338 	}
339 
340 	rb_link_node(&vma->vm_rb, parent, p);
341 	rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
342 }
343 
344 static void delete_nommu_vma(struct vm_area_struct *vma)
345 {
346 	struct address_space *mapping;
347 
348 	/* remove the VMA from the mapping */
349 	if (vma->vm_file) {
350 		mapping = vma->vm_file->f_mapping;
351 
352 		flush_dcache_mmap_lock(mapping);
353 		vma_prio_tree_remove(vma, &mapping->i_mmap);
354 		flush_dcache_mmap_unlock(mapping);
355 	}
356 
357 	/* remove from the master list */
358 	rb_erase(&vma->vm_rb, &nommu_vma_tree);
359 }
360 
361 /*
362  * determine whether a mapping should be permitted and, if so, what sort of
363  * mapping we're capable of supporting
364  */
365 static int validate_mmap_request(struct file *file,
366 				 unsigned long addr,
367 				 unsigned long len,
368 				 unsigned long prot,
369 				 unsigned long flags,
370 				 unsigned long pgoff,
371 				 unsigned long *_capabilities)
372 {
373 	unsigned long capabilities;
374 	unsigned long reqprot = prot;
375 	int ret;
376 
377 	/* do the simple checks first */
378 	if (flags & MAP_FIXED || addr) {
379 		printk(KERN_DEBUG
380 		       "%d: Can't do fixed-address/overlay mmap of RAM\n",
381 		       current->pid);
382 		return -EINVAL;
383 	}
384 
385 	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
386 	    (flags & MAP_TYPE) != MAP_SHARED)
387 		return -EINVAL;
388 
389 	if (PAGE_ALIGN(len) == 0)
390 		return addr;
391 
392 	if (len > TASK_SIZE)
393 		return -EINVAL;
394 
395 	/* offset overflow? */
396 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
397 		return -EINVAL;
398 
399 	if (file) {
400 		/* validate file mapping requests */
401 		struct address_space *mapping;
402 
403 		/* files must support mmap */
404 		if (!file->f_op || !file->f_op->mmap)
405 			return -ENODEV;
406 
407 		/* work out if what we've got could possibly be shared
408 		 * - we support chardevs that provide their own "memory"
409 		 * - we support files/blockdevs that are memory backed
410 		 */
411 		mapping = file->f_mapping;
412 		if (!mapping)
413 			mapping = file->f_dentry->d_inode->i_mapping;
414 
415 		capabilities = 0;
416 		if (mapping && mapping->backing_dev_info)
417 			capabilities = mapping->backing_dev_info->capabilities;
418 
419 		if (!capabilities) {
420 			/* no explicit capabilities set, so assume some
421 			 * defaults */
422 			switch (file->f_dentry->d_inode->i_mode & S_IFMT) {
423 			case S_IFREG:
424 			case S_IFBLK:
425 				capabilities = BDI_CAP_MAP_COPY;
426 				break;
427 
428 			case S_IFCHR:
429 				capabilities =
430 					BDI_CAP_MAP_DIRECT |
431 					BDI_CAP_READ_MAP |
432 					BDI_CAP_WRITE_MAP;
433 				break;
434 
435 			default:
436 				return -EINVAL;
437 			}
438 		}
439 
440 		/* eliminate any capabilities that we can't support on this
441 		 * device */
442 		if (!file->f_op->get_unmapped_area)
443 			capabilities &= ~BDI_CAP_MAP_DIRECT;
444 		if (!file->f_op->read)
445 			capabilities &= ~BDI_CAP_MAP_COPY;
446 
447 		if (flags & MAP_SHARED) {
448 			/* do checks for writing, appending and locking */
449 			if ((prot & PROT_WRITE) &&
450 			    !(file->f_mode & FMODE_WRITE))
451 				return -EACCES;
452 
453 			if (IS_APPEND(file->f_dentry->d_inode) &&
454 			    (file->f_mode & FMODE_WRITE))
455 				return -EACCES;
456 
457 			if (locks_verify_locked(file->f_dentry->d_inode))
458 				return -EAGAIN;
459 
460 			if (!(capabilities & BDI_CAP_MAP_DIRECT))
461 				return -ENODEV;
462 
463 			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
464 			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
465 			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
466 			    ) {
467 				printk("MAP_SHARED not completely supported on !MMU\n");
468 				return -EINVAL;
469 			}
470 
471 			/* we mustn't privatise shared mappings */
472 			capabilities &= ~BDI_CAP_MAP_COPY;
473 		}
474 		else {
475 			/* we're going to read the file into private memory we
476 			 * allocate */
477 			if (!(capabilities & BDI_CAP_MAP_COPY))
478 				return -ENODEV;
479 
480 			/* we don't permit a private writable mapping to be
481 			 * shared with the backing device */
482 			if (prot & PROT_WRITE)
483 				capabilities &= ~BDI_CAP_MAP_DIRECT;
484 		}
485 
486 		/* handle executable mappings and implied executable
487 		 * mappings */
488 		if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) {
489 			if (prot & PROT_EXEC)
490 				return -EPERM;
491 		}
492 		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
493 			/* handle implication of PROT_EXEC by PROT_READ */
494 			if (current->personality & READ_IMPLIES_EXEC) {
495 				if (capabilities & BDI_CAP_EXEC_MAP)
496 					prot |= PROT_EXEC;
497 			}
498 		}
499 		else if ((prot & PROT_READ) &&
500 			 (prot & PROT_EXEC) &&
501 			 !(capabilities & BDI_CAP_EXEC_MAP)
502 			 ) {
503 			/* backing file is not executable, try to copy */
504 			capabilities &= ~BDI_CAP_MAP_DIRECT;
505 		}
506 	}
507 	else {
508 		/* anonymous mappings are always memory backed and can be
509 		 * privately mapped
510 		 */
511 		capabilities = BDI_CAP_MAP_COPY;
512 
513 		/* handle PROT_EXEC implication by PROT_READ */
514 		if ((prot & PROT_READ) &&
515 		    (current->personality & READ_IMPLIES_EXEC))
516 			prot |= PROT_EXEC;
517 	}
518 
519 	/* allow the security API to have its say */
520 	ret = security_file_mmap(file, reqprot, prot, flags);
521 	if (ret < 0)
522 		return ret;
523 
524 	/* looks okay */
525 	*_capabilities = capabilities;
526 	return 0;
527 }
528 
529 /*
530  * we've determined that we can make the mapping, now translate what we
531  * now know into VMA flags
532  */
533 static unsigned long determine_vm_flags(struct file *file,
534 					unsigned long prot,
535 					unsigned long flags,
536 					unsigned long capabilities)
537 {
538 	unsigned long vm_flags;
539 
540 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
541 	vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
542 	/* vm_flags |= mm->def_flags; */
543 
544 	if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
545 		/* attempt to share read-only copies of mapped file chunks */
546 		if (file && !(prot & PROT_WRITE))
547 			vm_flags |= VM_MAYSHARE;
548 	}
549 	else {
550 		/* overlay a shareable mapping on the backing device or inode
551 		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
552 		 * romfs/cramfs */
553 		if (flags & MAP_SHARED)
554 			vm_flags |= VM_MAYSHARE | VM_SHARED;
555 		else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
556 			vm_flags |= VM_MAYSHARE;
557 	}
558 
559 	/* refuse to let anyone share private mappings with this process if
560 	 * it's being traced - otherwise breakpoints set in it may interfere
561 	 * with another untraced process
562 	 */
563 	if ((flags & MAP_PRIVATE) && (current->ptrace & PT_PTRACED))
564 		vm_flags &= ~VM_MAYSHARE;
565 
566 	return vm_flags;
567 }
568 
569 /*
570  * set up a shared mapping on a file
571  */
572 static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
573 {
574 	int ret;
575 
576 	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
577 	if (ret != -ENOSYS)
578 		return ret;
579 
580 	/* getting an ENOSYS error indicates that direct mmap isn't
581 	 * possible (as opposed to tried but failed) so we'll fall
582 	 * through to making a private copy of the data and mapping
583 	 * that if we can */
584 	return -ENODEV;
585 }
586 
587 /*
588  * set up a private mapping or an anonymous shared mapping
589  */
590 static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
591 {
592 	void *base;
593 	int ret;
594 
595 	/* invoke the file's mapping function so that it can keep track of
596 	 * shared mappings on devices or memory
597 	 * - VM_MAYSHARE will be set if it may attempt to share
598 	 */
599 	if (vma->vm_file) {
600 		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
601 		if (ret != -ENOSYS) {
602 			/* shouldn't return success if we're not sharing */
603 			BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
604 			return ret; /* success or a real error */
605 		}
606 
607 		/* getting an ENOSYS error indicates that direct mmap isn't
608 		 * possible (as opposed to tried but failed) so we'll try to
609 		 * make a private copy of the data and map that instead */
610 	}
611 
612 	/* allocate some memory to hold the mapping
613 	 * - note that this may not return a page-aligned address if the object
614 	 *   we're allocating is smaller than a page
615 	 */
616 	base = kmalloc(len, GFP_KERNEL);
617 	if (!base)
618 		goto enomem;
619 
620 	vma->vm_start = (unsigned long) base;
621 	vma->vm_end = vma->vm_start + len;
622 	vma->vm_flags |= VM_MAPPED_COPY;
623 
624 #ifdef WARN_ON_SLACK
625 	if (len + WARN_ON_SLACK <= kobjsize(result))
626 		printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
627 		       len, current->pid, kobjsize(result) - len);
628 #endif
629 
630 	if (vma->vm_file) {
631 		/* read the contents of a file into the copy */
632 		mm_segment_t old_fs;
633 		loff_t fpos;
634 
635 		fpos = vma->vm_pgoff;
636 		fpos <<= PAGE_SHIFT;
637 
638 		old_fs = get_fs();
639 		set_fs(KERNEL_DS);
640 		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
641 		set_fs(old_fs);
642 
643 		if (ret < 0)
644 			goto error_free;
645 
646 		/* clear the last little bit */
647 		if (ret < len)
648 			memset(base + ret, 0, len - ret);
649 
650 	} else {
651 		/* if it's an anonymous mapping, then just clear it */
652 		memset(base, 0, len);
653 	}
654 
655 	return 0;
656 
657 error_free:
658 	kfree(base);
659 	vma->vm_start = 0;
660 	return ret;
661 
662 enomem:
663 	printk("Allocation of length %lu from process %d failed\n",
664 	       len, current->pid);
665 	show_free_areas();
666 	return -ENOMEM;
667 }
668 
669 /*
670  * handle mapping creation for uClinux
671  */
672 unsigned long do_mmap_pgoff(struct file *file,
673 			    unsigned long addr,
674 			    unsigned long len,
675 			    unsigned long prot,
676 			    unsigned long flags,
677 			    unsigned long pgoff)
678 {
679 	struct vm_list_struct *vml = NULL;
680 	struct vm_area_struct *vma = NULL;
681 	struct rb_node *rb;
682 	unsigned long capabilities, vm_flags;
683 	void *result;
684 	int ret;
685 
686 	/* decide whether we should attempt the mapping, and if so what sort of
687 	 * mapping */
688 	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
689 				    &capabilities);
690 	if (ret < 0)
691 		return ret;
692 
693 	/* we've determined that we can make the mapping, now translate what we
694 	 * now know into VMA flags */
695 	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
696 
697 	/* we're going to need to record the mapping if it works */
698 	vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
699 	if (!vml)
700 		goto error_getting_vml;
701 	memset(vml, 0, sizeof(*vml));
702 
703 	down_write(&nommu_vma_sem);
704 
705 	/* if we want to share, we need to check for VMAs created by other
706 	 * mmap() calls that overlap with our proposed mapping
707 	 * - we can only share with an exact match on most regular files
708 	 * - shared mappings on character devices and memory backed files are
709 	 *   permitted to overlap inexactly as far as we are concerned for in
710 	 *   these cases, sharing is handled in the driver or filesystem rather
711 	 *   than here
712 	 */
713 	if (vm_flags & VM_MAYSHARE) {
714 		unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
715 		unsigned long vmpglen;
716 
717 		for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
718 			vma = rb_entry(rb, struct vm_area_struct, vm_rb);
719 
720 			if (!(vma->vm_flags & VM_MAYSHARE))
721 				continue;
722 
723 			/* search for overlapping mappings on the same file */
724 			if (vma->vm_file->f_dentry->d_inode != file->f_dentry->d_inode)
725 				continue;
726 
727 			if (vma->vm_pgoff >= pgoff + pglen)
728 				continue;
729 
730 			vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
731 			vmpglen >>= PAGE_SHIFT;
732 			if (pgoff >= vma->vm_pgoff + vmpglen)
733 				continue;
734 
735 			/* handle inexactly overlapping matches between mappings */
736 			if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
737 				if (!(capabilities & BDI_CAP_MAP_DIRECT))
738 					goto sharing_violation;
739 				continue;
740 			}
741 
742 			/* we've found a VMA we can share */
743 			atomic_inc(&vma->vm_usage);
744 
745 			vml->vma = vma;
746 			result = (void *) vma->vm_start;
747 			goto shared;
748 		}
749 
750 		vma = NULL;
751 
752 		/* obtain the address at which to make a shared mapping
753 		 * - this is the hook for quasi-memory character devices to
754 		 *   tell us the location of a shared mapping
755 		 */
756 		if (file && file->f_op->get_unmapped_area) {
757 			addr = file->f_op->get_unmapped_area(file, addr, len,
758 							     pgoff, flags);
759 			if (IS_ERR((void *) addr)) {
760 				ret = addr;
761 				if (ret != (unsigned long) -ENOSYS)
762 					goto error;
763 
764 				/* the driver refused to tell us where to site
765 				 * the mapping so we'll have to attempt to copy
766 				 * it */
767 				ret = (unsigned long) -ENODEV;
768 				if (!(capabilities & BDI_CAP_MAP_COPY))
769 					goto error;
770 
771 				capabilities &= ~BDI_CAP_MAP_DIRECT;
772 			}
773 		}
774 	}
775 
776 	/* we're going to need a VMA struct as well */
777 	vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
778 	if (!vma)
779 		goto error_getting_vma;
780 
781 	memset(vma, 0, sizeof(*vma));
782 	INIT_LIST_HEAD(&vma->anon_vma_node);
783 	atomic_set(&vma->vm_usage, 1);
784 	if (file)
785 		get_file(file);
786 	vma->vm_file	= file;
787 	vma->vm_flags	= vm_flags;
788 	vma->vm_start	= addr;
789 	vma->vm_end	= addr + len;
790 	vma->vm_pgoff	= pgoff;
791 
792 	vml->vma = vma;
793 
794 	/* set up the mapping */
795 	if (file && vma->vm_flags & VM_SHARED)
796 		ret = do_mmap_shared_file(vma, len);
797 	else
798 		ret = do_mmap_private(vma, len);
799 	if (ret < 0)
800 		goto error;
801 
802 	/* okay... we have a mapping; now we have to register it */
803 	result = (void *) vma->vm_start;
804 
805 	if (vma->vm_flags & VM_MAPPED_COPY) {
806 		realalloc += kobjsize(result);
807 		askedalloc += len;
808 	}
809 
810 	realalloc += kobjsize(vma);
811 	askedalloc += sizeof(*vma);
812 
813 	current->mm->total_vm += len >> PAGE_SHIFT;
814 
815 	add_nommu_vma(vma);
816 
817  shared:
818 	realalloc += kobjsize(vml);
819 	askedalloc += sizeof(*vml);
820 
821 	vml->next = current->mm->context.vmlist;
822 	current->mm->context.vmlist = vml;
823 
824 	up_write(&nommu_vma_sem);
825 
826 	if (prot & PROT_EXEC)
827 		flush_icache_range((unsigned long) result,
828 				   (unsigned long) result + len);
829 
830 #ifdef DEBUG
831 	printk("do_mmap:\n");
832 	show_process_blocks();
833 #endif
834 
835 	return (unsigned long) result;
836 
837  error:
838 	up_write(&nommu_vma_sem);
839 	kfree(vml);
840 	if (vma) {
841 		fput(vma->vm_file);
842 		kfree(vma);
843 	}
844 	return ret;
845 
846  sharing_violation:
847 	up_write(&nommu_vma_sem);
848 	printk("Attempt to share mismatched mappings\n");
849 	kfree(vml);
850 	return -EINVAL;
851 
852  error_getting_vma:
853 	up_write(&nommu_vma_sem);
854 	kfree(vml);
855 	printk("Allocation of vml for %lu byte allocation from process %d failed\n",
856 	       len, current->pid);
857 	show_free_areas();
858 	return -ENOMEM;
859 
860  error_getting_vml:
861 	printk("Allocation of vml for %lu byte allocation from process %d failed\n",
862 	       len, current->pid);
863 	show_free_areas();
864 	return -ENOMEM;
865 }
866 
867 /*
868  * handle mapping disposal for uClinux
869  */
870 static void put_vma(struct vm_area_struct *vma)
871 {
872 	if (vma) {
873 		down_write(&nommu_vma_sem);
874 
875 		if (atomic_dec_and_test(&vma->vm_usage)) {
876 			delete_nommu_vma(vma);
877 
878 			if (vma->vm_ops && vma->vm_ops->close)
879 				vma->vm_ops->close(vma);
880 
881 			/* IO memory and memory shared directly out of the pagecache from
882 			 * ramfs/tmpfs mustn't be released here */
883 			if (vma->vm_flags & VM_MAPPED_COPY) {
884 				realalloc -= kobjsize((void *) vma->vm_start);
885 				askedalloc -= vma->vm_end - vma->vm_start;
886 				kfree((void *) vma->vm_start);
887 			}
888 
889 			realalloc -= kobjsize(vma);
890 			askedalloc -= sizeof(*vma);
891 
892 			if (vma->vm_file)
893 				fput(vma->vm_file);
894 			kfree(vma);
895 		}
896 
897 		up_write(&nommu_vma_sem);
898 	}
899 }
900 
901 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
902 {
903 	struct vm_list_struct *vml, **parent;
904 	unsigned long end = addr + len;
905 
906 #ifdef DEBUG
907 	printk("do_munmap:\n");
908 #endif
909 
910 	for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next)
911 		if ((*parent)->vma->vm_start == addr &&
912 		    (*parent)->vma->vm_end == end)
913 			goto found;
914 
915 	printk("munmap of non-mmaped memory by process %d (%s): %p\n",
916 	       current->pid, current->comm, (void *) addr);
917 	return -EINVAL;
918 
919  found:
920 	vml = *parent;
921 
922 	put_vma(vml->vma);
923 
924 	*parent = vml->next;
925 	realalloc -= kobjsize(vml);
926 	askedalloc -= sizeof(*vml);
927 	kfree(vml);
928 	mm->total_vm -= len >> PAGE_SHIFT;
929 
930 #ifdef DEBUG
931 	show_process_blocks();
932 #endif
933 
934 	return 0;
935 }
936 
937 /* Release all mmaps. */
938 void exit_mmap(struct mm_struct * mm)
939 {
940 	struct vm_list_struct *tmp;
941 
942 	if (mm) {
943 #ifdef DEBUG
944 		printk("Exit_mmap:\n");
945 #endif
946 
947 		mm->total_vm = 0;
948 
949 		while ((tmp = mm->context.vmlist)) {
950 			mm->context.vmlist = tmp->next;
951 			put_vma(tmp->vma);
952 
953 			realalloc -= kobjsize(tmp);
954 			askedalloc -= sizeof(*tmp);
955 			kfree(tmp);
956 		}
957 
958 #ifdef DEBUG
959 		show_process_blocks();
960 #endif
961 	}
962 }
963 
964 asmlinkage long sys_munmap(unsigned long addr, size_t len)
965 {
966 	int ret;
967 	struct mm_struct *mm = current->mm;
968 
969 	down_write(&mm->mmap_sem);
970 	ret = do_munmap(mm, addr, len);
971 	up_write(&mm->mmap_sem);
972 	return ret;
973 }
974 
975 unsigned long do_brk(unsigned long addr, unsigned long len)
976 {
977 	return -ENOMEM;
978 }
979 
980 /*
981  * Expand (or shrink) an existing mapping, potentially moving it at the
982  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
983  *
984  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
985  * This option implies MREMAP_MAYMOVE.
986  *
987  * on uClinux, we only permit changing a mapping's size, and only as long as it stays within the
988  * hole allocated by the kmalloc() call in do_mmap_pgoff() and the block is not shareable
989  */
990 unsigned long do_mremap(unsigned long addr,
991 			unsigned long old_len, unsigned long new_len,
992 			unsigned long flags, unsigned long new_addr)
993 {
994 	struct vm_list_struct *vml = NULL;
995 
996 	/* insanity checks first */
997 	if (new_len == 0)
998 		return (unsigned long) -EINVAL;
999 
1000 	if (flags & MREMAP_FIXED && new_addr != addr)
1001 		return (unsigned long) -EINVAL;
1002 
1003 	for (vml = current->mm->context.vmlist; vml; vml = vml->next)
1004 		if (vml->vma->vm_start == addr)
1005 			goto found;
1006 
1007 	return (unsigned long) -EINVAL;
1008 
1009  found:
1010 	if (vml->vma->vm_end != vml->vma->vm_start + old_len)
1011 		return (unsigned long) -EFAULT;
1012 
1013 	if (vml->vma->vm_flags & VM_MAYSHARE)
1014 		return (unsigned long) -EPERM;
1015 
1016 	if (new_len > kobjsize((void *) addr))
1017 		return (unsigned long) -ENOMEM;
1018 
1019 	/* all checks complete - do it */
1020 	vml->vma->vm_end = vml->vma->vm_start + new_len;
1021 
1022 	askedalloc -= old_len;
1023 	askedalloc += new_len;
1024 
1025 	return vml->vma->vm_start;
1026 }
1027 
1028 /*
1029  * Look up the first VMA which satisfies  addr < vm_end,  NULL if none
1030  */
1031 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1032 {
1033 	struct vm_list_struct *vml;
1034 
1035 	for (vml = mm->context.vmlist; vml; vml = vml->next)
1036 		if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
1037 			return vml->vma;
1038 
1039 	return NULL;
1040 }
1041 
1042 EXPORT_SYMBOL(find_vma);
1043 
1044 struct page * follow_page(struct mm_struct *mm, unsigned long addr, int write)
1045 {
1046 	return NULL;
1047 }
1048 
1049 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
1050 {
1051 	return NULL;
1052 }
1053 
1054 int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1055 		unsigned long to, unsigned long size, pgprot_t prot)
1056 {
1057 	return -EPERM;
1058 }
1059 
1060 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1061 {
1062 }
1063 
1064 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1065 	unsigned long len, unsigned long pgoff, unsigned long flags)
1066 {
1067 	return -ENOMEM;
1068 }
1069 
1070 void arch_unmap_area(struct vm_area_struct *area)
1071 {
1072 }
1073 
1074 void update_mem_hiwater(struct task_struct *tsk)
1075 {
1076 	unsigned long rss = get_mm_counter(tsk->mm, rss);
1077 
1078 	if (likely(tsk->mm)) {
1079 		if (tsk->mm->hiwater_rss < rss)
1080 			tsk->mm->hiwater_rss = rss;
1081 		if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
1082 			tsk->mm->hiwater_vm = tsk->mm->total_vm;
1083 	}
1084 }
1085 
1086 void unmap_mapping_range(struct address_space *mapping,
1087 			 loff_t const holebegin, loff_t const holelen,
1088 			 int even_cows)
1089 {
1090 }
1091 
1092 /*
1093  * Check that a process has enough memory to allocate a new virtual
1094  * mapping. 0 means there is enough memory for the allocation to
1095  * succeed and -ENOMEM implies there is not.
1096  *
1097  * We currently support three overcommit policies, which are set via the
1098  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1099  *
1100  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1101  * Additional code 2002 Jul 20 by Robert Love.
1102  *
1103  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1104  *
1105  * Note this is a helper function intended to be used by LSMs which
1106  * wish to use this logic.
1107  */
1108 int __vm_enough_memory(long pages, int cap_sys_admin)
1109 {
1110 	unsigned long free, allowed;
1111 
1112 	vm_acct_memory(pages);
1113 
1114 	/*
1115 	 * Sometimes we want to use more memory than we have
1116 	 */
1117 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1118 		return 0;
1119 
1120 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1121 		unsigned long n;
1122 
1123 		free = get_page_cache_size();
1124 		free += nr_swap_pages;
1125 
1126 		/*
1127 		 * Any slabs which are created with the
1128 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1129 		 * which are reclaimable, under pressure.  The dentry
1130 		 * cache and most inode caches should fall into this
1131 		 */
1132 		free += atomic_read(&slab_reclaim_pages);
1133 
1134 		/*
1135 		 * Leave the last 3% for root
1136 		 */
1137 		if (!cap_sys_admin)
1138 			free -= free / 32;
1139 
1140 		if (free > pages)
1141 			return 0;
1142 
1143 		/*
1144 		 * nr_free_pages() is very expensive on large systems,
1145 		 * only call if we're about to fail.
1146 		 */
1147 		n = nr_free_pages();
1148 		if (!cap_sys_admin)
1149 			n -= n / 32;
1150 		free += n;
1151 
1152 		if (free > pages)
1153 			return 0;
1154 		vm_unacct_memory(pages);
1155 		return -ENOMEM;
1156 	}
1157 
1158 	allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1159 	/*
1160 	 * Leave the last 3% for root
1161 	 */
1162 	if (!cap_sys_admin)
1163 		allowed -= allowed / 32;
1164 	allowed += total_swap_pages;
1165 
1166 	/* Don't let a single process grow too big:
1167 	   leave 3% of the size of this process for other processes */
1168 	allowed -= current->mm->total_vm / 32;
1169 
1170 	if (atomic_read(&vm_committed_space) < allowed)
1171 		return 0;
1172 
1173 	vm_unacct_memory(pages);
1174 
1175 	return -ENOMEM;
1176 }
1177 
1178 int in_gate_area_no_task(unsigned long addr)
1179 {
1180 	return 0;
1181 }
1182