nommu.c (b92efa9abffc4a634cd2e7a0f81f8aa6310d67c9) nommu.c (33e5d76979cf01e3834814fe0aea569d1d602c1a)
1/*
2 * linux/mm/nommu.c
3 *
4 * Replacement code for mm functions to support CPU's that don't
5 * have any form of memory management unit (thus no virtual memory).
6 *
7 * See Documentation/nommu-mmap.txt
8 *

--- 55 unchanged lines hidden (view full) ---

64unsigned long num_physpages;
65atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
66int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
67int sysctl_overcommit_ratio = 50; /* default is 50% */
68int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
70int heap_stack_gap = 0;
71
1/*
2 * linux/mm/nommu.c
3 *
4 * Replacement code for mm functions to support CPU's that don't
5 * have any form of memory management unit (thus no virtual memory).
6 *
7 * See Documentation/nommu-mmap.txt
8 *

--- 55 unchanged lines hidden (view full) ---

64unsigned long num_physpages;
65atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
66int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
67int sysctl_overcommit_ratio = 50; /* default is 50% */
68int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
70int heap_stack_gap = 0;
71
72atomic_t mmap_pages_allocated;
72atomic_long_t mmap_pages_allocated;
73
74EXPORT_SYMBOL(mem_map);
75EXPORT_SYMBOL(num_physpages);
76
77/* list of mapped, potentially shareable regions */
78static struct kmem_cache *vm_region_jar;
79struct rb_root nommu_region_tree = RB_ROOT;
80DECLARE_RWSEM(nommu_region_sem);

--- 377 unchanged lines hidden (view full) ---

458 return mm->brk = brk;
459}
460
461/*
462 * initialise the VMA and region record slabs
463 */
464void __init mmap_init(void)
465{
73
74EXPORT_SYMBOL(mem_map);
75EXPORT_SYMBOL(num_physpages);
76
77/* list of mapped, potentially shareable regions */
78static struct kmem_cache *vm_region_jar;
79struct rb_root nommu_region_tree = RB_ROOT;
80DECLARE_RWSEM(nommu_region_sem);

--- 377 unchanged lines hidden (view full) ---

458 return mm->brk = brk;
459}
460
461/*
462 * initialise the VMA and region record slabs
463 */
464void __init mmap_init(void)
465{
466 vm_region_jar = kmem_cache_create("vm_region_jar",
467 sizeof(struct vm_region), 0,
468 SLAB_PANIC, NULL);
469 vm_area_cachep = kmem_cache_create("vm_area_struct",
470 sizeof(struct vm_area_struct), 0,
471 SLAB_PANIC, NULL);
466 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
472}
473
474/*
475 * validate the region tree
476 * - the caller must hold the region lock
477 */
478#ifdef CONFIG_DEBUG_NOMMU_REGIONS
479static noinline void validate_nommu_regions(void)
480{
481 struct vm_region *region, *last;
482 struct rb_node *p, *lastp;
483
484 lastp = rb_first(&nommu_region_tree);
485 if (!lastp)
486 return;
487
488 last = rb_entry(lastp, struct vm_region, vm_rb);
467}
468
469/*
470 * validate the region tree
471 * - the caller must hold the region lock
472 */
473#ifdef CONFIG_DEBUG_NOMMU_REGIONS
474static noinline void validate_nommu_regions(void)
475{
476 struct vm_region *region, *last;
477 struct rb_node *p, *lastp;
478
479 lastp = rb_first(&nommu_region_tree);
480 if (!lastp)
481 return;
482
483 last = rb_entry(lastp, struct vm_region, vm_rb);
489 if (unlikely(last->vm_end <= last->vm_start))
490 BUG();
491 if (unlikely(last->vm_top < last->vm_end))
492 BUG();
484 BUG_ON(unlikely(last->vm_end <= last->vm_start));
485 BUG_ON(unlikely(last->vm_top < last->vm_end));
493
494 while ((p = rb_next(lastp))) {
495 region = rb_entry(p, struct vm_region, vm_rb);
496 last = rb_entry(lastp, struct vm_region, vm_rb);
497
486
487 while ((p = rb_next(lastp))) {
488 region = rb_entry(p, struct vm_region, vm_rb);
489 last = rb_entry(lastp, struct vm_region, vm_rb);
490
498 if (unlikely(region->vm_end <= region->vm_start))
499 BUG();
500 if (unlikely(region->vm_top < region->vm_end))
501 BUG();
502 if (unlikely(region->vm_start < last->vm_top))
503 BUG();
491 BUG_ON(unlikely(region->vm_end <= region->vm_start));
492 BUG_ON(unlikely(region->vm_top < region->vm_end));
493 BUG_ON(unlikely(region->vm_start < last->vm_top));
504
505 lastp = p;
506 }
507}
508#else
494
495 lastp = p;
496 }
497}
498#else
509#define validate_nommu_regions() do {} while(0)
499static void validate_nommu_regions(void)
500{
501}
510#endif
511
512/*
513 * add a region into the global tree
514 */
515static void add_nommu_region(struct vm_region *region)
516{
517 struct vm_region *pregion;

--- 40 unchanged lines hidden (view full) ---

558 * free a contiguous series of pages
559 */
560static void free_page_series(unsigned long from, unsigned long to)
561{
562 for (; from < to; from += PAGE_SIZE) {
563 struct page *page = virt_to_page(from);
564
565 kdebug("- free %lx", from);
502#endif
503
504/*
505 * add a region into the global tree
506 */
507static void add_nommu_region(struct vm_region *region)
508{
509 struct vm_region *pregion;

--- 40 unchanged lines hidden (view full) ---

550 * free a contiguous series of pages
551 */
552static void free_page_series(unsigned long from, unsigned long to)
553{
554 for (; from < to; from += PAGE_SIZE) {
555 struct page *page = virt_to_page(from);
556
557 kdebug("- free %lx", from);
566 atomic_dec(&mmap_pages_allocated);
558 atomic_long_dec(&mmap_pages_allocated);
567 if (page_count(page) != 1)
559 if (page_count(page) != 1)
568 kdebug("free page %p [%d]", page, page_count(page));
560 kdebug("free page %p: refcount not one: %d",
561 page, page_count(page));
569 put_page(page);
570 }
571}
572
573/*
574 * release a reference to a region
562 put_page(page);
563 }
564}
565
566/*
567 * release a reference to a region
575 * - the caller must hold the region semaphore, which this releases
568 * - the caller must hold the region semaphore for writing, which this releases
576 * - the region may not have been added to the tree yet, in which case vm_top
577 * will equal vm_start
578 */
579static void __put_nommu_region(struct vm_region *region)
580 __releases(nommu_region_sem)
581{
582 kenter("%p{%d}", region, atomic_read(&region->vm_usage));
583

--- 507 unchanged lines hidden (view full) ---

1091 order = get_order(rlen);
1092 kdebug("alloc order %d for %lx", order, len);
1093
1094 pages = alloc_pages(GFP_KERNEL, order);
1095 if (!pages)
1096 goto enomem;
1097
1098 total = 1 << order;
569 * - the region may not have been added to the tree yet, in which case vm_top
570 * will equal vm_start
571 */
572static void __put_nommu_region(struct vm_region *region)
573 __releases(nommu_region_sem)
574{
575 kenter("%p{%d}", region, atomic_read(&region->vm_usage));
576

--- 507 unchanged lines hidden (view full) ---

1084 order = get_order(rlen);
1085 kdebug("alloc order %d for %lx", order, len);
1086
1087 pages = alloc_pages(GFP_KERNEL, order);
1088 if (!pages)
1089 goto enomem;
1090
1091 total = 1 << order;
1099 atomic_add(total, &mmap_pages_allocated);
1092 atomic_long_add(total, &mmap_pages_allocated);
1100
1101 point = rlen >> PAGE_SHIFT;
1102
1103 /* we allocated a power-of-2 sized page set, so we may want to trim off
1104 * the excess */
1105 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1106 while (total > point) {
1107 order = ilog2(total - point);
1108 n = 1 << order;
1109 kdebug("shave %lu/%lu @%lu", n, total - point, total);
1093
1094 point = rlen >> PAGE_SHIFT;
1095
1096 /* we allocated a power-of-2 sized page set, so we may want to trim off
1097 * the excess */
1098 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1099 while (total > point) {
1100 order = ilog2(total - point);
1101 n = 1 << order;
1102 kdebug("shave %lu/%lu @%lu", n, total - point, total);
1110 atomic_sub(n, &mmap_pages_allocated);
1103 atomic_long_sub(n, &mmap_pages_allocated);
1111 total -= n;
1112 set_page_refcounted(pages + total);
1113 __free_pages(pages + total, order);
1114 }
1115 }
1116
1117 for (point = 1; point < total; point++)
1118 set_page_refcounted(&pages[point]);

--- 412 unchanged lines hidden (view full) ---

1531 kenter(",%lx,%zx", start, len);
1532
1533 if (len == 0)
1534 return -EINVAL;
1535
1536 /* find the first potentially overlapping VMA */
1537 vma = find_vma(mm, start);
1538 if (!vma) {
1104 total -= n;
1105 set_page_refcounted(pages + total);
1106 __free_pages(pages + total, order);
1107 }
1108 }
1109
1110 for (point = 1; point < total; point++)
1111 set_page_refcounted(&pages[point]);

--- 412 unchanged lines hidden (view full) ---

1524 kenter(",%lx,%zx", start, len);
1525
1526 if (len == 0)
1527 return -EINVAL;
1528
1529 /* find the first potentially overlapping VMA */
1530 vma = find_vma(mm, start);
1531 if (!vma) {
1539 printk(KERN_WARNING
1540 "munmap of memory not mmapped by process %d (%s):"
1541 " 0x%lx-0x%lx\n",
1542 current->pid, current->comm, start, start + len - 1);
1532 static int limit = 0;
1533 if (limit < 5) {
1534 printk(KERN_WARNING
1535 "munmap of memory not mmapped by process %d"
1536 " (%s): 0x%lx-0x%lx\n",
1537 current->pid, current->comm,
1538 start, start + len - 1);
1539 limit++;
1540 }
1543 return -EINVAL;
1544 }
1545
1546 /* we're allowed to split an anonymous VMA but not a file-backed one */
1547 if (vma->vm_file) {
1548 do {
1549 if (start > vma->vm_start) {
1550 kleave(" = -EINVAL [miss]");

--- 365 unchanged lines hidden ---
1541 return -EINVAL;
1542 }
1543
1544 /* we're allowed to split an anonymous VMA but not a file-backed one */
1545 if (vma->vm_file) {
1546 do {
1547 if (start > vma->vm_start) {
1548 kleave(" = -EINVAL [miss]");

--- 365 unchanged lines hidden ---