shmem.c (14235ab36019d169f5eb5bf0c064c5b12ca1bf46) | shmem.c (3fea5a499d57dec46043fcdb08e38eac1767bb0d) |
---|---|
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. --- 591 unchanged lines hidden (view full) --- 600 return false; 601} 602 603/* 604 * Like add_to_page_cache_locked, but error if expected item has gone. 605 */ 606static int shmem_add_to_page_cache(struct page *page, 607 struct address_space *mapping, | 1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. --- 591 unchanged lines hidden (view full) --- 600 return false; 601} 602 603/* 604 * Like add_to_page_cache_locked, but error if expected item has gone. 605 */ 606static int shmem_add_to_page_cache(struct page *page, 607 struct address_space *mapping, |
608 pgoff_t index, void *expected, gfp_t gfp) | 608 pgoff_t index, void *expected, gfp_t gfp, 609 struct mm_struct *charge_mm) |
609{ 610 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 611 unsigned long i = 0; 612 unsigned long nr = compound_nr(page); | 610{ 611 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 612 unsigned long i = 0; 613 unsigned long nr = compound_nr(page); |
614 int error; |
|
613 614 VM_BUG_ON_PAGE(PageTail(page), page); 615 VM_BUG_ON_PAGE(index != round_down(index, nr), page); 616 VM_BUG_ON_PAGE(!PageLocked(page), page); 617 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 618 VM_BUG_ON(expected && PageTransHuge(page)); 619 620 page_ref_add(page, nr); 621 page->mapping = mapping; 622 page->index = index; 623 | 615 616 VM_BUG_ON_PAGE(PageTail(page), page); 617 VM_BUG_ON_PAGE(index != round_down(index, nr), page); 618 VM_BUG_ON_PAGE(!PageLocked(page), page); 619 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 620 VM_BUG_ON(expected && PageTransHuge(page)); 621 622 page_ref_add(page, nr); 623 page->mapping = mapping; 624 page->index = index; 625 |
626 error = mem_cgroup_charge(page, charge_mm, gfp, PageSwapCache(page)); 627 if (error) { 628 if (!PageSwapCache(page) && PageTransHuge(page)) { 629 count_vm_event(THP_FILE_FALLBACK); 630 count_vm_event(THP_FILE_FALLBACK_CHARGE); 631 } 632 goto error; 633 } 634 cgroup_throttle_swaprate(page, gfp); 635 |
|
624 do { 625 void *entry; 626 xas_lock_irq(&xas); 627 entry = xas_find_conflict(&xas); 628 if (entry != expected) 629 xas_set_err(&xas, -EEXIST); 630 xas_create_range(&xas); 631 if (xas_error(&xas)) --- 11 unchanged lines hidden (view full) --- 643 mapping->nrpages += nr; 644 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 645 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 646unlock: 647 xas_unlock_irq(&xas); 648 } while (xas_nomem(&xas, gfp)); 649 650 if (xas_error(&xas)) { | 636 do { 637 void *entry; 638 xas_lock_irq(&xas); 639 entry = xas_find_conflict(&xas); 640 if (entry != expected) 641 xas_set_err(&xas, -EEXIST); 642 xas_create_range(&xas); 643 if (xas_error(&xas)) --- 11 unchanged lines hidden (view full) --- 655 mapping->nrpages += nr; 656 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 657 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 658unlock: 659 xas_unlock_irq(&xas); 660 } while (xas_nomem(&xas, gfp)); 661 662 if (xas_error(&xas)) { |
651 page->mapping = NULL; 652 page_ref_sub(page, nr); 653 return xas_error(&xas); | 663 error = xas_error(&xas); 664 goto error; |
654 } 655 656 return 0; | 665 } 666 667 return 0; |
668error: 669 page->mapping = NULL; 670 page_ref_sub(page, nr); 671 return error; |
|
657} 658 659/* 660 * Like delete_from_page_cache, but substitutes swap for page. 661 */ 662static void shmem_delete_from_page_cache(struct page *page, void *radswap) 663{ 664 struct address_space *mapping = page->mapping; --- 949 unchanged lines hidden (view full) --- 1614static int shmem_swapin_page(struct inode *inode, pgoff_t index, 1615 struct page **pagep, enum sgp_type sgp, 1616 gfp_t gfp, struct vm_area_struct *vma, 1617 vm_fault_t *fault_type) 1618{ 1619 struct address_space *mapping = inode->i_mapping; 1620 struct shmem_inode_info *info = SHMEM_I(inode); 1621 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; | 672} 673 674/* 675 * Like delete_from_page_cache, but substitutes swap for page. 676 */ 677static void shmem_delete_from_page_cache(struct page *page, void *radswap) 678{ 679 struct address_space *mapping = page->mapping; --- 949 unchanged lines hidden (view full) --- 1629static int shmem_swapin_page(struct inode *inode, pgoff_t index, 1630 struct page **pagep, enum sgp_type sgp, 1631 gfp_t gfp, struct vm_area_struct *vma, 1632 vm_fault_t *fault_type) 1633{ 1634 struct address_space *mapping = inode->i_mapping; 1635 struct shmem_inode_info *info = SHMEM_I(inode); 1636 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; |
1622 struct mem_cgroup *memcg; | |
1623 struct page *page; 1624 swp_entry_t swap; 1625 int error; 1626 1627 VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 1628 swap = radix_to_swp_entry(*pagep); 1629 *pagep = NULL; 1630 --- 28 unchanged lines hidden (view full) --- 1659 wait_on_page_writeback(page); 1660 1661 if (shmem_should_replace_page(page, gfp)) { 1662 error = shmem_replace_page(&page, gfp, info, index); 1663 if (error) 1664 goto failed; 1665 } 1666 | 1637 struct page *page; 1638 swp_entry_t swap; 1639 int error; 1640 1641 VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 1642 swap = radix_to_swp_entry(*pagep); 1643 *pagep = NULL; 1644 --- 28 unchanged lines hidden (view full) --- 1673 wait_on_page_writeback(page); 1674 1675 if (shmem_should_replace_page(page, gfp)) { 1676 error = shmem_replace_page(&page, gfp, info, index); 1677 if (error) 1678 goto failed; 1679 } 1680 |
1667 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg); | 1681 error = shmem_add_to_page_cache(page, mapping, index, 1682 swp_to_radix_entry(swap), gfp, 1683 charge_mm); |
1668 if (error) 1669 goto failed; 1670 | 1684 if (error) 1685 goto failed; 1686 |
1671 error = shmem_add_to_page_cache(page, mapping, index, 1672 swp_to_radix_entry(swap), gfp); 1673 if (error) { 1674 mem_cgroup_cancel_charge(page, memcg); 1675 goto failed; 1676 } 1677 1678 mem_cgroup_commit_charge(page, memcg, true); 1679 | |
1680 spin_lock_irq(&info->lock); 1681 info->swapped--; 1682 shmem_recalc_inode(inode); 1683 spin_unlock_irq(&info->lock); 1684 1685 if (sgp == SGP_WRITE) 1686 mark_page_accessed(page); 1687 --- 29 unchanged lines hidden (view full) --- 1717 struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1718 struct vm_area_struct *vma, struct vm_fault *vmf, 1719 vm_fault_t *fault_type) 1720{ 1721 struct address_space *mapping = inode->i_mapping; 1722 struct shmem_inode_info *info = SHMEM_I(inode); 1723 struct shmem_sb_info *sbinfo; 1724 struct mm_struct *charge_mm; | 1687 spin_lock_irq(&info->lock); 1688 info->swapped--; 1689 shmem_recalc_inode(inode); 1690 spin_unlock_irq(&info->lock); 1691 1692 if (sgp == SGP_WRITE) 1693 mark_page_accessed(page); 1694 --- 29 unchanged lines hidden (view full) --- 1724 struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1725 struct vm_area_struct *vma, struct vm_fault *vmf, 1726 vm_fault_t *fault_type) 1727{ 1728 struct address_space *mapping = inode->i_mapping; 1729 struct shmem_inode_info *info = SHMEM_I(inode); 1730 struct shmem_sb_info *sbinfo; 1731 struct mm_struct *charge_mm; |
1725 struct mem_cgroup *memcg; | |
1726 struct page *page; 1727 enum sgp_type sgp_huge = sgp; 1728 pgoff_t hindex = index; 1729 int error; 1730 int once = 0; 1731 int alloced = 0; 1732 1733 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) --- 108 unchanged lines hidden (view full) --- 1842 if (PageTransHuge(page)) 1843 hindex = round_down(index, HPAGE_PMD_NR); 1844 else 1845 hindex = index; 1846 1847 if (sgp == SGP_WRITE) 1848 __SetPageReferenced(page); 1849 | 1732 struct page *page; 1733 enum sgp_type sgp_huge = sgp; 1734 pgoff_t hindex = index; 1735 int error; 1736 int once = 0; 1737 int alloced = 0; 1738 1739 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) --- 108 unchanged lines hidden (view full) --- 1848 if (PageTransHuge(page)) 1849 hindex = round_down(index, HPAGE_PMD_NR); 1850 else 1851 hindex = index; 1852 1853 if (sgp == SGP_WRITE) 1854 __SetPageReferenced(page); 1855 |
1850 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg); 1851 if (error) { 1852 if (PageTransHuge(page)) { 1853 count_vm_event(THP_FILE_FALLBACK); 1854 count_vm_event(THP_FILE_FALLBACK_CHARGE); 1855 } 1856 goto unacct; 1857 } | |
1858 error = shmem_add_to_page_cache(page, mapping, hindex, | 1856 error = shmem_add_to_page_cache(page, mapping, hindex, |
1859 NULL, gfp & GFP_RECLAIM_MASK); 1860 if (error) { 1861 mem_cgroup_cancel_charge(page, memcg); | 1857 NULL, gfp & GFP_RECLAIM_MASK, 1858 charge_mm); 1859 if (error) |
1862 goto unacct; | 1860 goto unacct; |
1863 } 1864 mem_cgroup_commit_charge(page, memcg, false); | |
1865 lru_cache_add_anon(page); 1866 1867 spin_lock_irq(&info->lock); 1868 info->alloced += compound_nr(page); 1869 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 1870 shmem_recalc_inode(inode); 1871 spin_unlock_irq(&info->lock); 1872 alloced = true; --- 421 unchanged lines hidden (view full) --- 2294 bool zeropage, 2295 struct page **pagep) 2296{ 2297 struct inode *inode = file_inode(dst_vma->vm_file); 2298 struct shmem_inode_info *info = SHMEM_I(inode); 2299 struct address_space *mapping = inode->i_mapping; 2300 gfp_t gfp = mapping_gfp_mask(mapping); 2301 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); | 1861 lru_cache_add_anon(page); 1862 1863 spin_lock_irq(&info->lock); 1864 info->alloced += compound_nr(page); 1865 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 1866 shmem_recalc_inode(inode); 1867 spin_unlock_irq(&info->lock); 1868 alloced = true; --- 421 unchanged lines hidden (view full) --- 2290 bool zeropage, 2291 struct page **pagep) 2292{ 2293 struct inode *inode = file_inode(dst_vma->vm_file); 2294 struct shmem_inode_info *info = SHMEM_I(inode); 2295 struct address_space *mapping = inode->i_mapping; 2296 gfp_t gfp = mapping_gfp_mask(mapping); 2297 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); |
2302 struct mem_cgroup *memcg; | |
2303 spinlock_t *ptl; 2304 void *page_kaddr; 2305 struct page *page; 2306 pte_t _dst_pte, *dst_pte; 2307 int ret; 2308 pgoff_t offset, max_off; 2309 2310 ret = -ENOMEM; --- 33 unchanged lines hidden (view full) --- 2344 __SetPageUptodate(page); 2345 2346 ret = -EFAULT; 2347 offset = linear_page_index(dst_vma, dst_addr); 2348 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2349 if (unlikely(offset >= max_off)) 2350 goto out_release; 2351 | 2298 spinlock_t *ptl; 2299 void *page_kaddr; 2300 struct page *page; 2301 pte_t _dst_pte, *dst_pte; 2302 int ret; 2303 pgoff_t offset, max_off; 2304 2305 ret = -ENOMEM; --- 33 unchanged lines hidden (view full) --- 2339 __SetPageUptodate(page); 2340 2341 ret = -EFAULT; 2342 offset = linear_page_index(dst_vma, dst_addr); 2343 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2344 if (unlikely(offset >= max_off)) 2345 goto out_release; 2346 |
2352 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg); | 2347 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2348 gfp & GFP_RECLAIM_MASK, dst_mm); |
2353 if (ret) 2354 goto out_release; 2355 | 2349 if (ret) 2350 goto out_release; 2351 |
2356 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2357 gfp & GFP_RECLAIM_MASK); 2358 if (ret) 2359 goto out_release_uncharge; 2360 2361 mem_cgroup_commit_charge(page, memcg, false); 2362 | |
2363 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 2364 if (dst_vma->vm_flags & VM_WRITE) 2365 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 2366 else { 2367 /* 2368 * We don't set the pte dirty if the vma has no 2369 * VM_WRITE permission, so mark the page dirty or it 2370 * could be freed from under us. We could do it 2371 * unconditionally before unlock_page(), but doing it 2372 * only if VM_WRITE is not set is faster. 2373 */ 2374 set_page_dirty(page); 2375 } 2376 2377 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2378 2379 ret = -EFAULT; 2380 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2381 if (unlikely(offset >= max_off)) | 2352 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 2353 if (dst_vma->vm_flags & VM_WRITE) 2354 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 2355 else { 2356 /* 2357 * We don't set the pte dirty if the vma has no 2358 * VM_WRITE permission, so mark the page dirty or it 2359 * could be freed from under us. We could do it 2360 * unconditionally before unlock_page(), but doing it 2361 * only if VM_WRITE is not set is faster. 2362 */ 2363 set_page_dirty(page); 2364 } 2365 2366 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2367 2368 ret = -EFAULT; 2369 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2370 if (unlikely(offset >= max_off)) |
2382 goto out_release_uncharge_unlock; | 2371 goto out_release_unlock; |
2383 2384 ret = -EEXIST; 2385 if (!pte_none(*dst_pte)) | 2372 2373 ret = -EEXIST; 2374 if (!pte_none(*dst_pte)) |
2386 goto out_release_uncharge_unlock; | 2375 goto out_release_unlock; |
2387 2388 lru_cache_add_anon(page); 2389 2390 spin_lock_irq(&info->lock); 2391 info->alloced++; 2392 inode->i_blocks += BLOCKS_PER_PAGE; 2393 shmem_recalc_inode(inode); 2394 spin_unlock_irq(&info->lock); --- 4 unchanged lines hidden (view full) --- 2399 2400 /* No need to invalidate - it was non-present before */ 2401 update_mmu_cache(dst_vma, dst_addr, dst_pte); 2402 pte_unmap_unlock(dst_pte, ptl); 2403 unlock_page(page); 2404 ret = 0; 2405out: 2406 return ret; | 2376 2377 lru_cache_add_anon(page); 2378 2379 spin_lock_irq(&info->lock); 2380 info->alloced++; 2381 inode->i_blocks += BLOCKS_PER_PAGE; 2382 shmem_recalc_inode(inode); 2383 spin_unlock_irq(&info->lock); --- 4 unchanged lines hidden (view full) --- 2388 2389 /* No need to invalidate - it was non-present before */ 2390 update_mmu_cache(dst_vma, dst_addr, dst_pte); 2391 pte_unmap_unlock(dst_pte, ptl); 2392 unlock_page(page); 2393 ret = 0; 2394out: 2395 return ret; |
2407out_release_uncharge_unlock: | 2396out_release_unlock: |
2408 pte_unmap_unlock(dst_pte, ptl); 2409 ClearPageDirty(page); 2410 delete_from_page_cache(page); | 2397 pte_unmap_unlock(dst_pte, ptl); 2398 ClearPageDirty(page); 2399 delete_from_page_cache(page); |
2411out_release_uncharge: 2412 mem_cgroup_cancel_charge(page, memcg); | |
2413out_release: 2414 unlock_page(page); 2415 put_page(page); 2416out_unacct_blocks: 2417 shmem_inode_unacct_blocks(inode, 1); 2418 goto out; 2419} 2420 --- 1792 unchanged lines hidden --- | 2400out_release: 2401 unlock_page(page); 2402 put_page(page); 2403out_unacct_blocks: 2404 shmem_inode_unacct_blocks(inode, 1); 2405 goto out; 2406} 2407 --- 1792 unchanged lines hidden --- |