shmem.c (b11918bdbe79bd002d00a9f1d78958167ccfad99) shmem.c (b1cc94ab2f2ba31fcb2c59df0b9cf03f6d720553)
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 252 unchanged lines hidden (view full) ---

261bool shmem_charge(struct inode *inode, long pages)
262{
263 struct shmem_inode_info *info = SHMEM_I(inode);
264 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
265 unsigned long flags;
266
267 if (shmem_acct_block(info->flags, pages))
268 return false;
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 252 unchanged lines hidden (view full) ---

261bool shmem_charge(struct inode *inode, long pages)
262{
263 struct shmem_inode_info *info = SHMEM_I(inode);
264 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
265 unsigned long flags;
266
267 if (shmem_acct_block(info->flags, pages))
268 return false;
269
270 if (sbinfo->max_blocks) {
271 if (percpu_counter_compare(&sbinfo->used_blocks,
272 sbinfo->max_blocks - pages) > 0)
273 goto unacct;
274 percpu_counter_add(&sbinfo->used_blocks, pages);
275 }
276
269 spin_lock_irqsave(&info->lock, flags);
270 info->alloced += pages;
271 inode->i_blocks += pages * BLOCKS_PER_PAGE;
272 shmem_recalc_inode(inode);
273 spin_unlock_irqrestore(&info->lock, flags);
274 inode->i_mapping->nrpages += pages;
275
277 spin_lock_irqsave(&info->lock, flags);
278 info->alloced += pages;
279 inode->i_blocks += pages * BLOCKS_PER_PAGE;
280 shmem_recalc_inode(inode);
281 spin_unlock_irqrestore(&info->lock, flags);
282 inode->i_mapping->nrpages += pages;
283
276 if (!sbinfo->max_blocks)
277 return true;
278 if (percpu_counter_compare(&sbinfo->used_blocks,
279 sbinfo->max_blocks - pages) > 0) {
280 inode->i_mapping->nrpages -= pages;
281 spin_lock_irqsave(&info->lock, flags);
282 info->alloced -= pages;
283 shmem_recalc_inode(inode);
284 spin_unlock_irqrestore(&info->lock, flags);
285 shmem_unacct_blocks(info->flags, pages);
286 return false;
287 }
288 percpu_counter_add(&sbinfo->used_blocks, pages);
289 return true;
284 return true;
285
286unacct:
287 shmem_unacct_blocks(info->flags, pages);
288 return false;
290}
291
292void shmem_uncharge(struct inode *inode, long pages)
293{
294 struct shmem_inode_info *info = SHMEM_I(inode);
295 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
296 unsigned long flags;
297

--- 719 unchanged lines hidden (view full) ---

1017 holebegin, 0, 1);
1018
1019 /*
1020 * Part of the huge page can be beyond i_size: subject
1021 * to shrink under memory pressure.
1022 */
1023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1024 spin_lock(&sbinfo->shrinklist_lock);
289}
290
291void shmem_uncharge(struct inode *inode, long pages)
292{
293 struct shmem_inode_info *info = SHMEM_I(inode);
294 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
295 unsigned long flags;
296

--- 719 unchanged lines hidden (view full) ---

1016 holebegin, 0, 1);
1017
1018 /*
1019 * Part of the huge page can be beyond i_size: subject
1020 * to shrink under memory pressure.
1021 */
1022 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1023 spin_lock(&sbinfo->shrinklist_lock);
1025 if (list_empty(&info->shrinklist)) {
1024 /*
1025 * _careful to defend against unlocked access to
1026 * ->shrink_list in shmem_unused_huge_shrink()
1027 */
1028 if (list_empty_careful(&info->shrinklist)) {
1026 list_add_tail(&info->shrinklist,
1027 &sbinfo->shrinklist);
1028 sbinfo->shrinklist_len++;
1029 }
1030 spin_unlock(&sbinfo->shrinklist_lock);
1031 }
1032 }
1033 }

--- 778 unchanged lines hidden (view full) ---

1812 if (PageTransHuge(page) &&
1813 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1814 hindex + HPAGE_PMD_NR - 1) {
1815 /*
1816 * Part of the huge page is beyond i_size: subject
1817 * to shrink under memory pressure.
1818 */
1819 spin_lock(&sbinfo->shrinklist_lock);
1029 list_add_tail(&info->shrinklist,
1030 &sbinfo->shrinklist);
1031 sbinfo->shrinklist_len++;
1032 }
1033 spin_unlock(&sbinfo->shrinklist_lock);
1034 }
1035 }
1036 }

--- 778 unchanged lines hidden (view full) ---

1815 if (PageTransHuge(page) &&
1816 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1817 hindex + HPAGE_PMD_NR - 1) {
1818 /*
1819 * Part of the huge page is beyond i_size: subject
1820 * to shrink under memory pressure.
1821 */
1822 spin_lock(&sbinfo->shrinklist_lock);
1820 if (list_empty(&info->shrinklist)) {
1823 /*
1824 * _careful to defend against unlocked access to
1825 * ->shrink_list in shmem_unused_huge_shrink()
1826 */
1827 if (list_empty_careful(&info->shrinklist)) {
1821 list_add_tail(&info->shrinklist,
1822 &sbinfo->shrinklist);
1823 sbinfo->shrinklist_len++;
1824 }
1825 spin_unlock(&sbinfo->shrinklist_lock);
1826 }
1827
1828 /*

--- 2125 unchanged lines hidden (view full) ---

3954 shm_mnt = kern_mount(&shmem_fs_type);
3955 if (IS_ERR(shm_mnt)) {
3956 error = PTR_ERR(shm_mnt);
3957 pr_err("Could not kern_mount tmpfs\n");
3958 goto out1;
3959 }
3960
3961#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
1828 list_add_tail(&info->shrinklist,
1829 &sbinfo->shrinklist);
1830 sbinfo->shrinklist_len++;
1831 }
1832 spin_unlock(&sbinfo->shrinklist_lock);
1833 }
1834
1835 /*

--- 2125 unchanged lines hidden (view full) ---

3961 shm_mnt = kern_mount(&shmem_fs_type);
3962 if (IS_ERR(shm_mnt)) {
3963 error = PTR_ERR(shm_mnt);
3964 pr_err("Could not kern_mount tmpfs\n");
3965 goto out1;
3966 }
3967
3968#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3962 if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
3969 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3963 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3964 else
3965 shmem_huge = 0; /* just in case it was patched */
3966#endif
3967 return 0;
3968
3969out1:
3970 unregister_filesystem(&shmem_fs_type);

--- 44 unchanged lines hidden (view full) ---

4015 huge = shmem_parse_huge(tmp);
4016 if (huge == -EINVAL)
4017 return -EINVAL;
4018 if (!has_transparent_hugepage() &&
4019 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4020 return -EINVAL;
4021
4022 shmem_huge = huge;
3970 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3971 else
3972 shmem_huge = 0; /* just in case it was patched */
3973#endif
3974 return 0;
3975
3976out1:
3977 unregister_filesystem(&shmem_fs_type);

--- 44 unchanged lines hidden (view full) ---

4022 huge = shmem_parse_huge(tmp);
4023 if (huge == -EINVAL)
4024 return -EINVAL;
4025 if (!has_transparent_hugepage() &&
4026 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4027 return -EINVAL;
4028
4029 shmem_huge = huge;
4023 if (shmem_huge < SHMEM_HUGE_DENY)
4030 if (shmem_huge > SHMEM_HUGE_DENY)
4024 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4025 return count;
4026}
4027
4028struct kobj_attribute shmem_enabled_attr =
4029 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4030#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
4031

--- 259 unchanged lines hidden ---
4031 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4032 return count;
4033}
4034
4035struct kobj_attribute shmem_enabled_attr =
4036 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4037#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
4038

--- 259 unchanged lines hidden ---