1 /* 2 * linux/mm/mincore.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * The mincore() system call. 9 */ 10 #include <linux/slab.h> 11 #include <linux/pagemap.h> 12 #include <linux/mm.h> 13 #include <linux/mman.h> 14 #include <linux/syscalls.h> 15 16 #include <asm/uaccess.h> 17 #include <asm/pgtable.h> 18 19 /* 20 * Later we can get more picky about what "in core" means precisely. 21 * For now, simply check to see if the page is in the page cache, 22 * and is up to date; i.e. that no page-in operation would be required 23 * at this time if an application were to map and access this page. 24 */ 25 static unsigned char mincore_page(struct vm_area_struct * vma, 26 unsigned long pgoff) 27 { 28 unsigned char present = 0; 29 struct address_space * as = vma->vm_file->f_mapping; 30 struct page * page; 31 32 page = find_get_page(as, pgoff); 33 if (page) { 34 present = PageUptodate(page); 35 page_cache_release(page); 36 } 37 38 return present; 39 } 40 41 static long mincore_vma(struct vm_area_struct * vma, 42 unsigned long start, unsigned long end, unsigned char __user * vec) 43 { 44 long error, i, remaining; 45 unsigned char * tmp; 46 47 error = -ENOMEM; 48 if (!vma->vm_file) 49 return error; 50 51 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 52 if (end > vma->vm_end) 53 end = vma->vm_end; 54 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 55 56 error = -EAGAIN; 57 tmp = (unsigned char *) __get_free_page(GFP_KERNEL); 58 if (!tmp) 59 return error; 60 61 /* (end - start) is # of pages, and also # of bytes in "vec */ 62 remaining = (end - start), 63 64 error = 0; 65 for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) { 66 int j = 0; 67 long thispiece = (remaining < PAGE_SIZE) ? 68 remaining : PAGE_SIZE; 69 70 while (j < thispiece) 71 tmp[j++] = mincore_page(vma, start++); 72 73 if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) { 74 error = -EFAULT; 75 break; 76 } 77 } 78 79 free_page((unsigned long) tmp); 80 return error; 81 } 82 83 /* 84 * The mincore(2) system call. 85 * 86 * mincore() returns the memory residency status of the pages in the 87 * current process's address space specified by [addr, addr + len). 88 * The status is returned in a vector of bytes. The least significant 89 * bit of each byte is 1 if the referenced page is in memory, otherwise 90 * it is zero. 91 * 92 * Because the status of a page can change after mincore() checks it 93 * but before it returns to the application, the returned vector may 94 * contain stale information. Only locked pages are guaranteed to 95 * remain in memory. 96 * 97 * return values: 98 * zero - success 99 * -EFAULT - vec points to an illegal address 100 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE 101 * -ENOMEM - Addresses in the range [addr, addr + len] are 102 * invalid for the address space of this process, or 103 * specify one or more pages which are not currently 104 * mapped 105 * -EAGAIN - A kernel resource was temporarily unavailable. 106 */ 107 asmlinkage long sys_mincore(unsigned long start, size_t len, 108 unsigned char __user * vec) 109 { 110 int index = 0; 111 unsigned long end, limit; 112 struct vm_area_struct * vma; 113 size_t max; 114 int unmapped_error = 0; 115 long error; 116 117 /* check the arguments */ 118 if (start & ~PAGE_CACHE_MASK) 119 goto einval; 120 121 if (start < FIRST_USER_PGD_NR * PGDIR_SIZE) 122 goto enomem; 123 124 limit = TASK_SIZE; 125 if (start >= limit) 126 goto enomem; 127 128 if (!len) 129 return 0; 130 131 max = limit - start; 132 len = PAGE_CACHE_ALIGN(len); 133 if (len > max || !len) 134 goto enomem; 135 136 end = start + len; 137 138 /* check the output buffer whilst holding the lock */ 139 error = -EFAULT; 140 down_read(¤t->mm->mmap_sem); 141 142 if (!access_ok(VERIFY_WRITE, vec, len >> PAGE_SHIFT)) 143 goto out; 144 145 /* 146 * If the interval [start,end) covers some unmapped address 147 * ranges, just ignore them, but return -ENOMEM at the end. 148 */ 149 error = 0; 150 151 vma = find_vma(current->mm, start); 152 while (vma) { 153 /* Here start < vma->vm_end. */ 154 if (start < vma->vm_start) { 155 unmapped_error = -ENOMEM; 156 start = vma->vm_start; 157 } 158 159 /* Here vma->vm_start <= start < vma->vm_end. */ 160 if (end <= vma->vm_end) { 161 if (start < end) { 162 error = mincore_vma(vma, start, end, 163 &vec[index]); 164 if (error) 165 goto out; 166 } 167 error = unmapped_error; 168 goto out; 169 } 170 171 /* Here vma->vm_start <= start < vma->vm_end < end. */ 172 error = mincore_vma(vma, start, vma->vm_end, &vec[index]); 173 if (error) 174 goto out; 175 index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT; 176 start = vma->vm_end; 177 vma = vma->vm_next; 178 } 179 180 /* we found a hole in the area queried if we arrive here */ 181 error = -ENOMEM; 182 183 out: 184 up_read(¤t->mm->mmap_sem); 185 return error; 186 187 einval: 188 return -EINVAL; 189 enomem: 190 return -ENOMEM; 191 } 192