1 #include <linux/mm.h> 2 #include <linux/slab.h> 3 #include <linux/string.h> 4 #include <linux/compiler.h> 5 #include <linux/export.h> 6 #include <linux/err.h> 7 #include <linux/sched.h> 8 #include <linux/security.h> 9 #include <linux/swap.h> 10 #include <linux/swapops.h> 11 #include <linux/mman.h> 12 #include <linux/hugetlb.h> 13 #include <linux/vmalloc.h> 14 15 #include <asm/sections.h> 16 #include <asm/uaccess.h> 17 18 #include "internal.h" 19 20 static inline int is_kernel_rodata(unsigned long addr) 21 { 22 return addr >= (unsigned long)__start_rodata && 23 addr < (unsigned long)__end_rodata; 24 } 25 26 /** 27 * kfree_const - conditionally free memory 28 * @x: pointer to the memory 29 * 30 * Function calls kfree only if @x is not in .rodata section. 31 */ 32 void kfree_const(const void *x) 33 { 34 if (!is_kernel_rodata((unsigned long)x)) 35 kfree(x); 36 } 37 EXPORT_SYMBOL(kfree_const); 38 39 /** 40 * kstrdup - allocate space for and copy an existing string 41 * @s: the string to duplicate 42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 43 */ 44 char *kstrdup(const char *s, gfp_t gfp) 45 { 46 size_t len; 47 char *buf; 48 49 if (!s) 50 return NULL; 51 52 len = strlen(s) + 1; 53 buf = kmalloc_track_caller(len, gfp); 54 if (buf) 55 memcpy(buf, s, len); 56 return buf; 57 } 58 EXPORT_SYMBOL(kstrdup); 59 60 /** 61 * kstrdup_const - conditionally duplicate an existing const string 62 * @s: the string to duplicate 63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 64 * 65 * Function returns source string if it is in .rodata section otherwise it 66 * fallbacks to kstrdup. 67 * Strings allocated by kstrdup_const should be freed by kfree_const. 68 */ 69 const char *kstrdup_const(const char *s, gfp_t gfp) 70 { 71 if (is_kernel_rodata((unsigned long)s)) 72 return s; 73 74 return kstrdup(s, gfp); 75 } 76 EXPORT_SYMBOL(kstrdup_const); 77 78 /** 79 * kstrndup - allocate space for and copy an existing string 80 * @s: the string to duplicate 81 * @max: read at most @max chars from @s 82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 83 */ 84 char *kstrndup(const char *s, size_t max, gfp_t gfp) 85 { 86 size_t len; 87 char *buf; 88 89 if (!s) 90 return NULL; 91 92 len = strnlen(s, max); 93 buf = kmalloc_track_caller(len+1, gfp); 94 if (buf) { 95 memcpy(buf, s, len); 96 buf[len] = '\0'; 97 } 98 return buf; 99 } 100 EXPORT_SYMBOL(kstrndup); 101 102 /** 103 * kmemdup - duplicate region of memory 104 * 105 * @src: memory region to duplicate 106 * @len: memory region length 107 * @gfp: GFP mask to use 108 */ 109 void *kmemdup(const void *src, size_t len, gfp_t gfp) 110 { 111 void *p; 112 113 p = kmalloc_track_caller(len, gfp); 114 if (p) 115 memcpy(p, src, len); 116 return p; 117 } 118 EXPORT_SYMBOL(kmemdup); 119 120 /** 121 * memdup_user - duplicate memory region from user space 122 * 123 * @src: source address in user space 124 * @len: number of bytes to copy 125 * 126 * Returns an ERR_PTR() on failure. 127 */ 128 void *memdup_user(const void __user *src, size_t len) 129 { 130 void *p; 131 132 /* 133 * Always use GFP_KERNEL, since copy_from_user() can sleep and 134 * cause pagefault, which makes it pointless to use GFP_NOFS 135 * or GFP_ATOMIC. 136 */ 137 p = kmalloc_track_caller(len, GFP_KERNEL); 138 if (!p) 139 return ERR_PTR(-ENOMEM); 140 141 if (copy_from_user(p, src, len)) { 142 kfree(p); 143 return ERR_PTR(-EFAULT); 144 } 145 146 return p; 147 } 148 EXPORT_SYMBOL(memdup_user); 149 150 /* 151 * strndup_user - duplicate an existing string from user space 152 * @s: The string to duplicate 153 * @n: Maximum number of bytes to copy, including the trailing NUL. 154 */ 155 char *strndup_user(const char __user *s, long n) 156 { 157 char *p; 158 long length; 159 160 length = strnlen_user(s, n); 161 162 if (!length) 163 return ERR_PTR(-EFAULT); 164 165 if (length > n) 166 return ERR_PTR(-EINVAL); 167 168 p = memdup_user(s, length); 169 170 if (IS_ERR(p)) 171 return p; 172 173 p[length - 1] = '\0'; 174 175 return p; 176 } 177 EXPORT_SYMBOL(strndup_user); 178 179 /** 180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate 181 * 182 * @src: source address in user space 183 * @len: number of bytes to copy 184 * 185 * Returns an ERR_PTR() on failure. 186 */ 187 void *memdup_user_nul(const void __user *src, size_t len) 188 { 189 char *p; 190 191 /* 192 * Always use GFP_KERNEL, since copy_from_user() can sleep and 193 * cause pagefault, which makes it pointless to use GFP_NOFS 194 * or GFP_ATOMIC. 195 */ 196 p = kmalloc_track_caller(len + 1, GFP_KERNEL); 197 if (!p) 198 return ERR_PTR(-ENOMEM); 199 200 if (copy_from_user(p, src, len)) { 201 kfree(p); 202 return ERR_PTR(-EFAULT); 203 } 204 p[len] = '\0'; 205 206 return p; 207 } 208 EXPORT_SYMBOL(memdup_user_nul); 209 210 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 211 struct vm_area_struct *prev, struct rb_node *rb_parent) 212 { 213 struct vm_area_struct *next; 214 215 vma->vm_prev = prev; 216 if (prev) { 217 next = prev->vm_next; 218 prev->vm_next = vma; 219 } else { 220 mm->mmap = vma; 221 if (rb_parent) 222 next = rb_entry(rb_parent, 223 struct vm_area_struct, vm_rb); 224 else 225 next = NULL; 226 } 227 vma->vm_next = next; 228 if (next) 229 next->vm_prev = vma; 230 } 231 232 /* Check if the vma is being used as a stack by this task */ 233 static int vm_is_stack_for_task(struct task_struct *t, 234 struct vm_area_struct *vma) 235 { 236 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 237 } 238 239 /* 240 * Check if the vma is being used as a stack. 241 * If is_group is non-zero, check in the entire thread group or else 242 * just check in the current task. Returns the task_struct of the task 243 * that the vma is stack for. Must be called under rcu_read_lock(). 244 */ 245 struct task_struct *task_of_stack(struct task_struct *task, 246 struct vm_area_struct *vma, bool in_group) 247 { 248 if (vm_is_stack_for_task(task, vma)) 249 return task; 250 251 if (in_group) { 252 struct task_struct *t; 253 254 for_each_thread(task, t) { 255 if (vm_is_stack_for_task(t, vma)) 256 return t; 257 } 258 } 259 260 return NULL; 261 } 262 263 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 264 void arch_pick_mmap_layout(struct mm_struct *mm) 265 { 266 mm->mmap_base = TASK_UNMAPPED_BASE; 267 mm->get_unmapped_area = arch_get_unmapped_area; 268 } 269 #endif 270 271 /* 272 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 273 * back to the regular GUP. 274 * If the architecture not support this function, simply return with no 275 * page pinned 276 */ 277 int __weak __get_user_pages_fast(unsigned long start, 278 int nr_pages, int write, struct page **pages) 279 { 280 return 0; 281 } 282 EXPORT_SYMBOL_GPL(__get_user_pages_fast); 283 284 /** 285 * get_user_pages_fast() - pin user pages in memory 286 * @start: starting user address 287 * @nr_pages: number of pages from start to pin 288 * @write: whether pages will be written to 289 * @pages: array that receives pointers to the pages pinned. 290 * Should be at least nr_pages long. 291 * 292 * Returns number of pages pinned. This may be fewer than the number 293 * requested. If nr_pages is 0 or negative, returns 0. If no pages 294 * were pinned, returns -errno. 295 * 296 * get_user_pages_fast provides equivalent functionality to get_user_pages, 297 * operating on current and current->mm, with force=0 and vma=NULL. However 298 * unlike get_user_pages, it must be called without mmap_sem held. 299 * 300 * get_user_pages_fast may take mmap_sem and page table locks, so no 301 * assumptions can be made about lack of locking. get_user_pages_fast is to be 302 * implemented in a way that is advantageous (vs get_user_pages()) when the 303 * user memory area is already faulted in and present in ptes. However if the 304 * pages have to be faulted in, it may turn out to be slightly slower so 305 * callers need to carefully consider what to use. On many architectures, 306 * get_user_pages_fast simply falls back to get_user_pages. 307 */ 308 int __weak get_user_pages_fast(unsigned long start, 309 int nr_pages, int write, struct page **pages) 310 { 311 struct mm_struct *mm = current->mm; 312 return get_user_pages_unlocked(current, mm, start, nr_pages, 313 write, 0, pages); 314 } 315 EXPORT_SYMBOL_GPL(get_user_pages_fast); 316 317 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, 318 unsigned long len, unsigned long prot, 319 unsigned long flag, unsigned long pgoff) 320 { 321 unsigned long ret; 322 struct mm_struct *mm = current->mm; 323 unsigned long populate; 324 325 ret = security_mmap_file(file, prot, flag); 326 if (!ret) { 327 down_write(&mm->mmap_sem); 328 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, 329 &populate); 330 up_write(&mm->mmap_sem); 331 if (populate) 332 mm_populate(ret, populate); 333 } 334 return ret; 335 } 336 337 unsigned long vm_mmap(struct file *file, unsigned long addr, 338 unsigned long len, unsigned long prot, 339 unsigned long flag, unsigned long offset) 340 { 341 if (unlikely(offset + PAGE_ALIGN(len) < offset)) 342 return -EINVAL; 343 if (unlikely(offset_in_page(offset))) 344 return -EINVAL; 345 346 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 347 } 348 EXPORT_SYMBOL(vm_mmap); 349 350 void kvfree(const void *addr) 351 { 352 if (is_vmalloc_addr(addr)) 353 vfree(addr); 354 else 355 kfree(addr); 356 } 357 EXPORT_SYMBOL(kvfree); 358 359 static inline void *__page_rmapping(struct page *page) 360 { 361 unsigned long mapping; 362 363 mapping = (unsigned long)page->mapping; 364 mapping &= ~PAGE_MAPPING_FLAGS; 365 366 return (void *)mapping; 367 } 368 369 /* Neutral page->mapping pointer to address_space or anon_vma or other */ 370 void *page_rmapping(struct page *page) 371 { 372 page = compound_head(page); 373 return __page_rmapping(page); 374 } 375 376 struct anon_vma *page_anon_vma(struct page *page) 377 { 378 unsigned long mapping; 379 380 page = compound_head(page); 381 mapping = (unsigned long)page->mapping; 382 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 383 return NULL; 384 return __page_rmapping(page); 385 } 386 387 struct address_space *page_mapping(struct page *page) 388 { 389 struct address_space *mapping; 390 391 page = compound_head(page); 392 393 /* This happens if someone calls flush_dcache_page on slab page */ 394 if (unlikely(PageSlab(page))) 395 return NULL; 396 397 if (unlikely(PageSwapCache(page))) { 398 swp_entry_t entry; 399 400 entry.val = page_private(page); 401 return swap_address_space(entry); 402 } 403 404 mapping = page->mapping; 405 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) 406 return NULL; 407 return mapping; 408 } 409 410 /* Slow path of page_mapcount() for compound pages */ 411 int __page_mapcount(struct page *page) 412 { 413 int ret; 414 415 ret = atomic_read(&page->_mapcount) + 1; 416 page = compound_head(page); 417 ret += atomic_read(compound_mapcount_ptr(page)) + 1; 418 if (PageDoubleMap(page)) 419 ret--; 420 return ret; 421 } 422 EXPORT_SYMBOL_GPL(__page_mapcount); 423 424 int overcommit_ratio_handler(struct ctl_table *table, int write, 425 void __user *buffer, size_t *lenp, 426 loff_t *ppos) 427 { 428 int ret; 429 430 ret = proc_dointvec(table, write, buffer, lenp, ppos); 431 if (ret == 0 && write) 432 sysctl_overcommit_kbytes = 0; 433 return ret; 434 } 435 436 int overcommit_kbytes_handler(struct ctl_table *table, int write, 437 void __user *buffer, size_t *lenp, 438 loff_t *ppos) 439 { 440 int ret; 441 442 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 443 if (ret == 0 && write) 444 sysctl_overcommit_ratio = 0; 445 return ret; 446 } 447 448 /* 449 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used 450 */ 451 unsigned long vm_commit_limit(void) 452 { 453 unsigned long allowed; 454 455 if (sysctl_overcommit_kbytes) 456 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 457 else 458 allowed = ((totalram_pages - hugetlb_total_pages()) 459 * sysctl_overcommit_ratio / 100); 460 allowed += total_swap_pages; 461 462 return allowed; 463 } 464 465 /** 466 * get_cmdline() - copy the cmdline value to a buffer. 467 * @task: the task whose cmdline value to copy. 468 * @buffer: the buffer to copy to. 469 * @buflen: the length of the buffer. Larger cmdline values are truncated 470 * to this length. 471 * Returns the size of the cmdline field copied. Note that the copy does 472 * not guarantee an ending NULL byte. 473 */ 474 int get_cmdline(struct task_struct *task, char *buffer, int buflen) 475 { 476 int res = 0; 477 unsigned int len; 478 struct mm_struct *mm = get_task_mm(task); 479 unsigned long arg_start, arg_end, env_start, env_end; 480 if (!mm) 481 goto out; 482 if (!mm->arg_end) 483 goto out_mm; /* Shh! No looking before we're done */ 484 485 down_read(&mm->mmap_sem); 486 arg_start = mm->arg_start; 487 arg_end = mm->arg_end; 488 env_start = mm->env_start; 489 env_end = mm->env_end; 490 up_read(&mm->mmap_sem); 491 492 len = arg_end - arg_start; 493 494 if (len > buflen) 495 len = buflen; 496 497 res = access_process_vm(task, arg_start, buffer, len, 0); 498 499 /* 500 * If the nul at the end of args has been overwritten, then 501 * assume application is using setproctitle(3). 502 */ 503 if (res > 0 && buffer[res-1] != '\0' && len < buflen) { 504 len = strnlen(buffer, res); 505 if (len < res) { 506 res = len; 507 } else { 508 len = env_end - env_start; 509 if (len > buflen - res) 510 len = buflen - res; 511 res += access_process_vm(task, env_start, 512 buffer+res, len, 0); 513 res = strnlen(buffer, res); 514 } 515 } 516 out_mm: 517 mmput(mm); 518 out: 519 return res; 520 } 521