shmem.c (e6be37b2e7bddfe0c76585ee7c7eee5acc8efeab) | shmem.c (3460f6e5c1ed94c2ab7c1ccc032a5bebd88deaa7) |
---|---|
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. --- 2338 unchanged lines hidden (view full) --- 2347 } 2348 2349 lockdep_annotate_inode_mutex_key(inode); 2350 } else 2351 shmem_free_inode(sb); 2352 return inode; 2353} 2354 | 1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. --- 2338 unchanged lines hidden (view full) --- 2347 } 2348 2349 lockdep_annotate_inode_mutex_key(inode); 2350 } else 2351 shmem_free_inode(sb); 2352 return inode; 2353} 2354 |
2355static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 2356 pmd_t *dst_pmd, 2357 struct vm_area_struct *dst_vma, 2358 unsigned long dst_addr, 2359 unsigned long src_addr, 2360 bool zeropage, 2361 struct page **pagep) | 2355#ifdef CONFIG_USERFAULTFD 2356int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 2357 pmd_t *dst_pmd, 2358 struct vm_area_struct *dst_vma, 2359 unsigned long dst_addr, 2360 unsigned long src_addr, 2361 bool zeropage, 2362 struct page **pagep) |
2362{ 2363 struct inode *inode = file_inode(dst_vma->vm_file); 2364 struct shmem_inode_info *info = SHMEM_I(inode); 2365 struct address_space *mapping = inode->i_mapping; 2366 gfp_t gfp = mapping_gfp_mask(mapping); 2367 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2368 spinlock_t *ptl; 2369 void *page_kaddr; 2370 struct page *page; 2371 pte_t _dst_pte, *dst_pte; 2372 int ret; | 2363{ 2364 struct inode *inode = file_inode(dst_vma->vm_file); 2365 struct shmem_inode_info *info = SHMEM_I(inode); 2366 struct address_space *mapping = inode->i_mapping; 2367 gfp_t gfp = mapping_gfp_mask(mapping); 2368 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2369 spinlock_t *ptl; 2370 void *page_kaddr; 2371 struct page *page; 2372 pte_t _dst_pte, *dst_pte; 2373 int ret; |
2373 pgoff_t offset, max_off; | 2374 pgoff_t max_off; |
2374 2375 ret = -ENOMEM; 2376 if (!shmem_inode_acct_block(inode, 1)) { 2377 /* 2378 * We may have got a page, returned -ENOENT triggering a retry, 2379 * and now we find ourselves with -ENOMEM. Release the page, to 2380 * avoid a BUG_ON in our caller. 2381 */ --- 4 unchanged lines hidden (view full) --- 2386 goto out; 2387 } 2388 2389 if (!*pagep) { 2390 page = shmem_alloc_page(gfp, info, pgoff); 2391 if (!page) 2392 goto out_unacct_blocks; 2393 | 2375 2376 ret = -ENOMEM; 2377 if (!shmem_inode_acct_block(inode, 1)) { 2378 /* 2379 * We may have got a page, returned -ENOENT triggering a retry, 2380 * and now we find ourselves with -ENOMEM. Release the page, to 2381 * avoid a BUG_ON in our caller. 2382 */ --- 4 unchanged lines hidden (view full) --- 2387 goto out; 2388 } 2389 2390 if (!*pagep) { 2391 page = shmem_alloc_page(gfp, info, pgoff); 2392 if (!page) 2393 goto out_unacct_blocks; 2394 |
2394 if (!zeropage) { /* mcopy_atomic */ | 2395 if (!zeropage) { /* COPY */ |
2395 page_kaddr = kmap_atomic(page); 2396 ret = copy_from_user(page_kaddr, 2397 (const void __user *)src_addr, 2398 PAGE_SIZE); 2399 kunmap_atomic(page_kaddr); 2400 2401 /* fallback to copy_from_user outside mmap_lock */ 2402 if (unlikely(ret)) { 2403 *pagep = page; 2404 shmem_inode_unacct_blocks(inode, 1); 2405 /* don't free the page */ 2406 return -ENOENT; 2407 } | 2396 page_kaddr = kmap_atomic(page); 2397 ret = copy_from_user(page_kaddr, 2398 (const void __user *)src_addr, 2399 PAGE_SIZE); 2400 kunmap_atomic(page_kaddr); 2401 2402 /* fallback to copy_from_user outside mmap_lock */ 2403 if (unlikely(ret)) { 2404 *pagep = page; 2405 shmem_inode_unacct_blocks(inode, 1); 2406 /* don't free the page */ 2407 return -ENOENT; 2408 } |
2408 } else { /* mfill_zeropage_atomic */ | 2409 } else { /* ZEROPAGE */ |
2409 clear_highpage(page); 2410 } 2411 } else { 2412 page = *pagep; 2413 *pagep = NULL; 2414 } 2415 | 2410 clear_highpage(page); 2411 } 2412 } else { 2413 page = *pagep; 2414 *pagep = NULL; 2415 } 2416 |
2416 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page)); | 2417 VM_BUG_ON(PageLocked(page)); 2418 VM_BUG_ON(PageSwapBacked(page)); |
2417 __SetPageLocked(page); 2418 __SetPageSwapBacked(page); 2419 __SetPageUptodate(page); 2420 2421 ret = -EFAULT; | 2419 __SetPageLocked(page); 2420 __SetPageSwapBacked(page); 2421 __SetPageUptodate(page); 2422 2423 ret = -EFAULT; |
2422 offset = linear_page_index(dst_vma, dst_addr); | |
2423 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | 2424 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
2424 if (unlikely(offset >= max_off)) | 2425 if (unlikely(pgoff >= max_off)) |
2425 goto out_release; 2426 2427 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2428 gfp & GFP_RECLAIM_MASK, dst_mm); 2429 if (ret) 2430 goto out_release; 2431 2432 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); --- 9 unchanged lines hidden (view full) --- 2442 */ 2443 set_page_dirty(page); 2444 } 2445 2446 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2447 2448 ret = -EFAULT; 2449 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | 2426 goto out_release; 2427 2428 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2429 gfp & GFP_RECLAIM_MASK, dst_mm); 2430 if (ret) 2431 goto out_release; 2432 2433 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); --- 9 unchanged lines hidden (view full) --- 2443 */ 2444 set_page_dirty(page); 2445 } 2446 2447 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2448 2449 ret = -EFAULT; 2450 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
2450 if (unlikely(offset >= max_off)) | 2451 if (unlikely(pgoff >= max_off)) |
2451 goto out_release_unlock; 2452 2453 ret = -EEXIST; 2454 if (!pte_none(*dst_pte)) 2455 goto out_release_unlock; 2456 2457 lru_cache_add(page); 2458 --- 20 unchanged lines hidden (view full) --- 2479 delete_from_page_cache(page); 2480out_release: 2481 unlock_page(page); 2482 put_page(page); 2483out_unacct_blocks: 2484 shmem_inode_unacct_blocks(inode, 1); 2485 goto out; 2486} | 2452 goto out_release_unlock; 2453 2454 ret = -EEXIST; 2455 if (!pte_none(*dst_pte)) 2456 goto out_release_unlock; 2457 2458 lru_cache_add(page); 2459 --- 20 unchanged lines hidden (view full) --- 2480 delete_from_page_cache(page); 2481out_release: 2482 unlock_page(page); 2483 put_page(page); 2484out_unacct_blocks: 2485 shmem_inode_unacct_blocks(inode, 1); 2486 goto out; 2487} |
2488#endif /* CONFIG_USERFAULTFD */ |
|
2487 | 2489 |
2488int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, 2489 pmd_t *dst_pmd, 2490 struct vm_area_struct *dst_vma, 2491 unsigned long dst_addr, 2492 unsigned long src_addr, 2493 struct page **pagep) 2494{ 2495 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 2496 dst_addr, src_addr, false, pagep); 2497} 2498 2499int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, 2500 pmd_t *dst_pmd, 2501 struct vm_area_struct *dst_vma, 2502 unsigned long dst_addr) 2503{ 2504 struct page *page = NULL; 2505 2506 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 2507 dst_addr, 0, true, &page); 2508} 2509 | |
2510#ifdef CONFIG_TMPFS 2511static const struct inode_operations shmem_symlink_inode_operations; 2512static const struct inode_operations shmem_short_symlink_operations; 2513 2514#ifdef CONFIG_TMPFS_XATTR 2515static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2516#else 2517#define shmem_initxattrs NULL --- 1768 unchanged lines hidden --- | 2490#ifdef CONFIG_TMPFS 2491static const struct inode_operations shmem_symlink_inode_operations; 2492static const struct inode_operations shmem_short_symlink_operations; 2493 2494#ifdef CONFIG_TMPFS_XATTR 2495static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2496#else 2497#define shmem_initxattrs NULL --- 1768 unchanged lines hidden --- |