Lines Matching refs:inode

148 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
210 static int shmem_inode_acct_block(struct inode *inode, long pages) in shmem_inode_acct_block() argument
212 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_inode_acct_block()
213 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_block()
225 err = dquot_alloc_block_nodirty(inode, pages); in shmem_inode_acct_block()
231 err = dquot_alloc_block_nodirty(inode, pages); in shmem_inode_acct_block()
243 static void shmem_inode_unacct_blocks(struct inode *inode, long pages) in shmem_inode_unacct_blocks() argument
245 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_inode_unacct_blocks()
246 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks()
249 dquot_free_block_nodirty(inode, pages); in shmem_inode_unacct_blocks()
314 static struct dquot __rcu **shmem_get_dquots(struct inode *inode) in shmem_get_dquots() argument
316 return SHMEM_I(inode)->i_dquot; in shmem_get_dquots()
418 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) in shmem_recalc_inode() argument
420 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_recalc_inode()
427 READ_ONCE(inode->i_mapping->nrpages); in shmem_recalc_inode()
443 shmem_inode_unacct_blocks(inode, freed); in shmem_recalc_inode()
446 bool shmem_charge(struct inode *inode, long pages) in shmem_charge() argument
448 struct address_space *mapping = inode->i_mapping; in shmem_charge()
450 if (shmem_inode_acct_block(inode, pages)) in shmem_charge()
458 shmem_recalc_inode(inode, pages, 0); in shmem_charge()
462 void shmem_uncharge(struct inode *inode, long pages) in shmem_uncharge() argument
467 shmem_recalc_inode(inode, 0, 0); in shmem_uncharge()
538 bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, in shmem_is_huge() argument
543 if (!S_ISREG(inode->i_mode)) in shmem_is_huge()
552 switch (SHMEM_SB(inode->i_sb)->huge) { in shmem_is_huge()
557 i_size = round_up(i_size_read(inode), PAGE_SIZE); in shmem_is_huge()
617 struct inode *inode; in shmem_unused_huge_shrink() local
631 inode = igrab(&info->vfs_inode); in shmem_unused_huge_shrink()
634 if (!inode) { in shmem_unused_huge_shrink()
640 if (round_up(inode->i_size, PAGE_SIZE) == in shmem_unused_huge_shrink()
641 round_up(inode->i_size, HPAGE_PMD_SIZE)) { in shmem_unused_huge_shrink()
656 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
658 iput(inode); in shmem_unused_huge_shrink()
666 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
671 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; in shmem_unused_huge_shrink()
672 folio = filemap_get_folio(inode->i_mapping, index); in shmem_unused_huge_shrink()
718 iput(inode); in shmem_unused_huge_shrink()
899 struct inode *inode = file_inode(vma->vm_file); in shmem_swap_usage() local
900 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_swap_usage()
901 struct address_space *mapping = inode->i_mapping; in shmem_swap_usage()
915 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) in shmem_swap_usage()
943 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) in shmem_get_partial_folio() argument
951 folio = filemap_get_entry(inode->i_mapping, index); in shmem_get_partial_folio()
956 if (folio->mapping == inode->i_mapping) in shmem_get_partial_folio()
967 shmem_get_folio(inode, index, &folio, SGP_READ); in shmem_get_partial_folio()
975 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, in shmem_undo_range() argument
978 struct address_space *mapping = inode->i_mapping; in shmem_undo_range()
979 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_undo_range()
1030 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); in shmem_undo_range()
1045 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); in shmem_undo_range()
1120 shmem_recalc_inode(inode, 0, -nr_swaps_freed); in shmem_undo_range()
1123 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) in shmem_truncate_range() argument
1125 shmem_undo_range(inode, lstart, lend, false); in shmem_truncate_range()
1126 inode->i_mtime = inode_set_ctime_current(inode); in shmem_truncate_range()
1127 inode_inc_iversion(inode); in shmem_truncate_range()
1135 struct inode *inode = path->dentry->d_inode; in shmem_getattr() local
1136 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_getattr()
1138 if (info->alloced - info->swapped != inode->i_mapping->nrpages) in shmem_getattr()
1139 shmem_recalc_inode(inode, 0, 0); in shmem_getattr()
1150 generic_fillattr(idmap, request_mask, inode, stat); in shmem_getattr()
1152 if (shmem_is_huge(inode, 0, false, NULL, 0)) in shmem_getattr()
1167 struct inode *inode = d_inode(dentry); in shmem_setattr() local
1168 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_setattr()
1178 if ((inode->i_mode ^ attr->ia_mode) & 0111) { in shmem_setattr()
1183 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in shmem_setattr()
1184 loff_t oldsize = inode->i_size; in shmem_setattr()
1193 error = shmem_reacct_size(SHMEM_I(inode)->flags, in shmem_setattr()
1197 i_size_write(inode, newsize); in shmem_setattr()
1205 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1208 shmem_truncate_range(inode, in shmem_setattr()
1212 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1217 if (is_quota_modification(idmap, inode, attr)) { in shmem_setattr()
1218 error = dquot_initialize(inode); in shmem_setattr()
1224 if (i_uid_needs_update(idmap, attr, inode) || in shmem_setattr()
1225 i_gid_needs_update(idmap, attr, inode)) { in shmem_setattr()
1226 error = dquot_transfer(idmap, inode, attr); in shmem_setattr()
1232 setattr_copy(idmap, inode, attr); in shmem_setattr()
1234 error = posix_acl_chmod(idmap, dentry, inode->i_mode); in shmem_setattr()
1236 inode_set_ctime_current(inode); in shmem_setattr()
1238 inode->i_mtime = inode_get_ctime(inode); in shmem_setattr()
1239 inode_inc_iversion(inode); in shmem_setattr()
1244 static void shmem_evict_inode(struct inode *inode) in shmem_evict_inode() argument
1246 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_evict_inode()
1247 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode()
1250 if (shmem_mapping(inode->i_mapping)) { in shmem_evict_inode()
1251 shmem_unacct_size(info->flags, inode->i_size); in shmem_evict_inode()
1252 inode->i_size = 0; in shmem_evict_inode()
1253 mapping_set_exiting(inode->i_mapping); in shmem_evict_inode()
1254 shmem_truncate_range(inode, 0, (loff_t)-1); in shmem_evict_inode()
1276 shmem_free_inode(inode->i_sb, freed); in shmem_evict_inode()
1277 WARN_ON(inode->i_blocks); in shmem_evict_inode()
1278 clear_inode(inode); in shmem_evict_inode()
1280 dquot_free_inode(inode); in shmem_evict_inode()
1281 dquot_drop(inode); in shmem_evict_inode()
1327 static int shmem_unuse_swap_entries(struct inode *inode, in shmem_unuse_swap_entries() argument
1333 struct address_space *mapping = inode->i_mapping; in shmem_unuse_swap_entries()
1340 error = shmem_swapin_folio(inode, indices[i], in shmem_unuse_swap_entries()
1359 static int shmem_unuse_inode(struct inode *inode, unsigned int type) in shmem_unuse_inode() argument
1361 struct address_space *mapping = inode->i_mapping; in shmem_unuse_inode()
1375 ret = shmem_unuse_swap_entries(inode, &fbatch, indices); in shmem_unuse_inode()
1437 struct inode *inode = mapping->host; in shmem_writepage() local
1438 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_writepage()
1439 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_writepage()
1487 if (inode->i_private) { in shmem_writepage()
1489 spin_lock(&inode->i_lock); in shmem_writepage()
1490 shmem_falloc = inode->i_private; in shmem_writepage()
1498 spin_unlock(&inode->i_lock); in shmem_writepage()
1526 shmem_recalc_inode(inode, 0, 1); in shmem_writepage()
1675 static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, in shmem_alloc_and_acct_folio() argument
1678 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_alloc_and_acct_folio()
1687 err = shmem_inode_acct_block(inode, nr); in shmem_alloc_and_acct_folio()
1702 shmem_inode_unacct_blocks(inode, nr); in shmem_alloc_and_acct_folio()
1793 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, in shmem_set_folio_swapin_error() argument
1796 struct address_space *mapping = inode->i_mapping; in shmem_set_folio_swapin_error()
1814 shmem_recalc_inode(inode, -1, -1); in shmem_set_folio_swapin_error()
1824 static int shmem_swapin_folio(struct inode *inode, pgoff_t index, in shmem_swapin_folio() argument
1829 struct address_space *mapping = inode->i_mapping; in shmem_swapin_folio()
1830 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_swapin_folio()
1901 shmem_recalc_inode(inode, 0, -1); in shmem_swapin_folio()
1917 shmem_set_folio_swapin_error(inode, index, folio, swap); in shmem_swapin_folio()
1938 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, in shmem_get_folio_gfp() argument
1943 struct address_space *mapping = inode->i_mapping; in shmem_get_folio_gfp()
1944 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_get_folio_gfp()
1958 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { in shmem_get_folio_gfp()
1962 sbinfo = SHMEM_SB(inode->i_sb); in shmem_get_folio_gfp()
1974 error = shmem_swapin_folio(inode, index, &folio, in shmem_get_folio_gfp()
2022 if (!shmem_is_huge(inode, index, false, in shmem_get_folio_gfp()
2028 folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); in shmem_get_folio_gfp()
2031 folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); in shmem_get_folio_gfp()
2068 shmem_recalc_inode(inode, folio_nr_pages(folio), 0); in shmem_get_folio_gfp()
2072 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < in shmem_get_folio_gfp()
2113 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { in shmem_get_folio_gfp()
2117 shmem_recalc_inode(inode, 0, 0); in shmem_get_folio_gfp()
2130 shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); in shmem_get_folio_gfp()
2143 shmem_recalc_inode(inode, 0, 0); in shmem_get_folio_gfp()
2151 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, in shmem_get_folio() argument
2154 return shmem_get_folio_gfp(inode, index, foliop, sgp, in shmem_get_folio()
2155 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); in shmem_get_folio()
2173 struct inode *inode = file_inode(vma->vm_file); in shmem_fault() local
2174 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in shmem_fault()
2196 if (unlikely(inode->i_private)) { in shmem_fault()
2199 spin_lock(&inode->i_lock); in shmem_fault()
2200 shmem_falloc = inode->i_private; in shmem_fault()
2217 spin_unlock(&inode->i_lock); in shmem_fault()
2227 spin_lock(&inode->i_lock); in shmem_fault()
2229 spin_unlock(&inode->i_lock); in shmem_fault()
2235 spin_unlock(&inode->i_lock); in shmem_fault()
2238 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, in shmem_fault()
2339 struct inode *inode = file_inode(vma->vm_file); in shmem_set_policy() local
2340 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); in shmem_set_policy()
2346 struct inode *inode = file_inode(vma->vm_file); in shmem_get_policy() local
2350 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); in shmem_get_policy()
2356 struct inode *inode = file_inode(file); in shmem_lock() local
2357 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_lock()
2366 if (!user_shm_lock(inode->i_size, ucounts)) in shmem_lock()
2372 user_shm_unlock(inode->i_size, ucounts); in shmem_lock()
2384 struct inode *inode = file_inode(file); in shmem_mmap() local
2385 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_mmap()
2397 if (inode->i_nlink) in shmem_mmap()
2404 static int shmem_file_open(struct inode *inode, struct file *file) in shmem_file_open() argument
2407 return generic_file_open(inode, file); in shmem_file_open()
2411 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2417 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) in shmem_set_inode_flags() argument
2430 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); in shmem_set_inode_flags()
2433 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) in shmem_set_inode_flags() argument
2439 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode) in shmem_get_offset_ctx() argument
2441 return &SHMEM_I(inode)->dir_offsets; in shmem_get_offset_ctx()
2444 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, in __shmem_get_inode()
2446 struct inode *dir, umode_t mode, in __shmem_get_inode()
2449 struct inode *inode; in __shmem_get_inode() local
2460 inode = new_inode(sb); in __shmem_get_inode()
2461 if (!inode) { in __shmem_get_inode()
2466 inode->i_ino = ino; in __shmem_get_inode()
2467 inode_init_owner(idmap, inode, dir, mode); in __shmem_get_inode()
2468 inode->i_blocks = 0; in __shmem_get_inode()
2469 inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); in __shmem_get_inode()
2470 inode->i_generation = get_random_u32(); in __shmem_get_inode()
2471 info = SHMEM_I(inode); in __shmem_get_inode()
2472 memset(info, 0, (char *)inode - (char *)info); in __shmem_get_inode()
2477 info->i_crtime = inode->i_mtime; in __shmem_get_inode()
2481 shmem_set_inode_flags(inode, info->fsflags); in __shmem_get_inode()
2486 mapping_set_unevictable(inode->i_mapping); in __shmem_get_inode()
2488 cache_no_acl(inode); in __shmem_get_inode()
2489 mapping_set_large_folios(inode->i_mapping); in __shmem_get_inode()
2493 inode->i_op = &shmem_special_inode_operations; in __shmem_get_inode()
2494 init_special_inode(inode, mode, dev); in __shmem_get_inode()
2497 inode->i_mapping->a_ops = &shmem_aops; in __shmem_get_inode()
2498 inode->i_op = &shmem_inode_operations; in __shmem_get_inode()
2499 inode->i_fop = &shmem_file_operations; in __shmem_get_inode()
2504 inc_nlink(inode); in __shmem_get_inode()
2506 inode->i_size = 2 * BOGO_DIRENT_SIZE; in __shmem_get_inode()
2507 inode->i_op = &shmem_dir_inode_operations; in __shmem_get_inode()
2508 inode->i_fop = &simple_offset_dir_operations; in __shmem_get_inode()
2509 simple_offset_init(shmem_get_offset_ctx(inode)); in __shmem_get_inode()
2520 lockdep_annotate_inode_mutex_key(inode); in __shmem_get_inode()
2521 return inode; in __shmem_get_inode()
2525 static struct inode *shmem_get_inode(struct mnt_idmap *idmap, in shmem_get_inode()
2526 struct super_block *sb, struct inode *dir, in shmem_get_inode()
2530 struct inode *inode; in shmem_get_inode() local
2532 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags); in shmem_get_inode()
2533 if (IS_ERR(inode)) in shmem_get_inode()
2534 return inode; in shmem_get_inode()
2536 err = dquot_initialize(inode); in shmem_get_inode()
2540 err = dquot_alloc_inode(inode); in shmem_get_inode()
2542 dquot_drop(inode); in shmem_get_inode()
2545 return inode; in shmem_get_inode()
2548 inode->i_flags |= S_NOQUOTA; in shmem_get_inode()
2549 iput(inode); in shmem_get_inode()
2553 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, in shmem_get_inode()
2554 struct super_block *sb, struct inode *dir, in shmem_get_inode()
2569 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() local
2570 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_mfill_atomic_pte()
2571 struct address_space *mapping = inode->i_mapping; in shmem_mfill_atomic_pte()
2579 if (shmem_inode_acct_block(inode, 1)) { in shmem_mfill_atomic_pte()
2647 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in shmem_mfill_atomic_pte()
2661 shmem_recalc_inode(inode, 1, 0); in shmem_mfill_atomic_pte()
2670 shmem_inode_unacct_blocks(inode, 1); in shmem_mfill_atomic_pte()
2684 struct inode *inode = mapping->host; in shmem_write_begin() local
2685 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_write_begin()
2695 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin()
2699 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); in shmem_write_begin()
2721 struct inode *inode = mapping->host; in shmem_write_end() local
2723 if (pos + copied > inode->i_size) in shmem_write_end()
2724 i_size_write(inode, pos + copied); in shmem_write_end()
2744 struct inode *inode = file_inode(file); in shmem_file_read_iter() local
2745 struct address_space *mapping = inode->i_mapping; in shmem_file_read_iter()
2760 loff_t i_size = i_size_read(inode); in shmem_file_read_iter()
2771 error = shmem_get_folio(inode, index, &folio, SGP_READ); in shmem_file_read_iter()
2793 i_size = i_size_read(inode); in shmem_file_read_iter()
2863 struct inode *inode = file->f_mapping->host; in shmem_file_write_iter() local
2866 inode_lock(inode); in shmem_file_write_iter()
2878 inode_unlock(inode); in shmem_file_write_iter()
2931 struct inode *inode = file_inode(in); in shmem_file_splice_read() local
2932 struct address_space *mapping = inode->i_mapping; in shmem_file_splice_read()
2944 if (*ppos >= i_size_read(inode)) in shmem_file_splice_read()
2947 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, in shmem_file_splice_read()
2973 isize = i_size_read(inode); in shmem_file_splice_read()
3020 struct inode *inode = mapping->host; in shmem_file_llseek() local
3024 MAX_LFS_FILESIZE, i_size_read(inode)); in shmem_file_llseek()
3028 inode_lock(inode); in shmem_file_llseek()
3030 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); in shmem_file_llseek()
3033 inode_unlock(inode); in shmem_file_llseek()
3040 struct inode *inode = file_inode(file); in shmem_fallocate() local
3041 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate()
3042 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_fallocate()
3050 inode_lock(inode); in shmem_fallocate()
3067 spin_lock(&inode->i_lock); in shmem_fallocate()
3068 inode->i_private = &shmem_falloc; in shmem_fallocate()
3069 spin_unlock(&inode->i_lock); in shmem_fallocate()
3074 shmem_truncate_range(inode, offset, offset + len - 1); in shmem_fallocate()
3077 spin_lock(&inode->i_lock); in shmem_fallocate()
3078 inode->i_private = NULL; in shmem_fallocate()
3081 spin_unlock(&inode->i_lock); in shmem_fallocate()
3087 error = inode_newsize_ok(inode, offset + len); in shmem_fallocate()
3091 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { in shmem_fallocate()
3109 spin_lock(&inode->i_lock); in shmem_fallocate()
3110 inode->i_private = &shmem_falloc; in shmem_fallocate()
3111 spin_unlock(&inode->i_lock); in shmem_fallocate()
3134 error = shmem_get_folio(inode, index, &folio, in shmem_fallocate()
3140 shmem_undo_range(inode, in shmem_fallocate()
3178 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) in shmem_fallocate()
3179 i_size_write(inode, offset + len); in shmem_fallocate()
3181 spin_lock(&inode->i_lock); in shmem_fallocate()
3182 inode->i_private = NULL; in shmem_fallocate()
3183 spin_unlock(&inode->i_lock); in shmem_fallocate()
3187 inode_unlock(inode); in shmem_fallocate()
3219 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, in shmem_mknod()
3222 struct inode *inode; in shmem_mknod() local
3225 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); in shmem_mknod()
3226 if (IS_ERR(inode)) in shmem_mknod()
3227 return PTR_ERR(inode); in shmem_mknod()
3229 error = simple_acl_create(dir, inode); in shmem_mknod()
3232 error = security_inode_init_security(inode, dir, in shmem_mknod()
3245 d_instantiate(dentry, inode); in shmem_mknod()
3250 iput(inode); in shmem_mknod()
3255 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, in shmem_tmpfile()
3258 struct inode *inode; in shmem_tmpfile() local
3261 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); in shmem_tmpfile()
3263 if (IS_ERR(inode)) { in shmem_tmpfile()
3264 error = PTR_ERR(inode); in shmem_tmpfile()
3268 error = security_inode_init_security(inode, dir, in shmem_tmpfile()
3273 error = simple_acl_create(dir, inode); in shmem_tmpfile()
3276 d_tmpfile(file, inode); in shmem_tmpfile()
3281 iput(inode); in shmem_tmpfile()
3285 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir, in shmem_mkdir()
3297 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, in shmem_create()
3306 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) in shmem_link()
3308 struct inode *inode = d_inode(old_dentry); in shmem_link() local
3318 if (inode->i_nlink) { in shmem_link()
3319 ret = shmem_reserve_inode(inode->i_sb, NULL); in shmem_link()
3326 if (inode->i_nlink) in shmem_link()
3327 shmem_free_inode(inode->i_sb, 0); in shmem_link()
3333 inode_set_ctime_current(inode)); in shmem_link()
3335 inc_nlink(inode); in shmem_link()
3336 ihold(inode); /* New dentry reference */ in shmem_link()
3338 d_instantiate(dentry, inode); in shmem_link()
3343 static int shmem_unlink(struct inode *dir, struct dentry *dentry) in shmem_unlink()
3345 struct inode *inode = d_inode(dentry); in shmem_unlink() local
3347 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) in shmem_unlink()
3348 shmem_free_inode(inode->i_sb, 0); in shmem_unlink()
3354 inode_set_ctime_current(inode)); in shmem_unlink()
3356 drop_nlink(inode); in shmem_unlink()
3361 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) in shmem_rmdir()
3372 struct inode *old_dir, struct dentry *old_dentry) in shmem_whiteout()
3405 struct inode *old_dir, struct dentry *old_dentry, in shmem_rename2()
3406 struct inode *new_dir, struct dentry *new_dentry, in shmem_rename2()
3409 struct inode *inode = d_inode(old_dentry); in shmem_rename2() local
3410 int they_are_dirs = S_ISDIR(inode->i_mode); in shmem_rename2()
3453 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, in shmem_symlink()
3458 struct inode *inode; in shmem_symlink() local
3465 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, in shmem_symlink()
3468 if (IS_ERR(inode)) in shmem_symlink()
3469 return PTR_ERR(inode); in shmem_symlink()
3471 error = security_inode_init_security(inode, dir, &dentry->d_name, in shmem_symlink()
3480 inode->i_size = len-1; in shmem_symlink()
3482 inode->i_link = kmemdup(symname, len, GFP_KERNEL); in shmem_symlink()
3483 if (!inode->i_link) { in shmem_symlink()
3487 inode->i_op = &shmem_short_symlink_operations; in shmem_symlink()
3489 inode_nohighmem(inode); in shmem_symlink()
3490 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); in shmem_symlink()
3493 inode->i_mapping->a_ops = &shmem_aops; in shmem_symlink()
3494 inode->i_op = &shmem_symlink_inode_operations; in shmem_symlink()
3504 d_instantiate(dentry, inode); in shmem_symlink()
3511 iput(inode); in shmem_symlink()
3522 struct inode *inode, in shmem_get_link() argument
3529 folio = filemap_get_folio(inode->i_mapping, 0); in shmem_get_link()
3538 error = shmem_get_folio(inode, 0, &folio, SGP_READ); in shmem_get_link()
3568 struct inode *inode = d_inode(dentry); in shmem_fileattr_set() local
3569 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_fileattr_set()
3579 shmem_set_inode_flags(inode, info->fsflags); in shmem_fileattr_set()
3580 inode_set_ctime_current(inode); in shmem_fileattr_set()
3581 inode_inc_iversion(inode); in shmem_fileattr_set()
3595 static int shmem_initxattrs(struct inode *inode, in shmem_initxattrs() argument
3599 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_initxattrs()
3600 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_initxattrs()
3658 struct dentry *unused, struct inode *inode, in shmem_xattr_handler_get() argument
3661 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_xattr_handler_get()
3669 struct dentry *unused, struct inode *inode, in shmem_xattr_handler_set() argument
3673 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_xattr_handler_set()
3674 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_xattr_handler_set()
3699 inode_set_ctime_current(inode); in shmem_xattr_handler_set()
3700 inode_inc_iversion(inode); in shmem_xattr_handler_set()
3765 static int shmem_match(struct inode *ino, void *vfh) in shmem_match()
3774 static struct dentry *shmem_find_alias(struct inode *inode) in shmem_find_alias() argument
3776 struct dentry *alias = d_find_alias(inode); in shmem_find_alias()
3778 return alias ?: d_find_any_alias(inode); in shmem_find_alias()
3785 struct inode *inode; in shmem_fh_to_dentry() local
3795 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), in shmem_fh_to_dentry()
3797 if (inode) { in shmem_fh_to_dentry()
3798 dentry = shmem_find_alias(inode); in shmem_fh_to_dentry()
3799 iput(inode); in shmem_fh_to_dentry()
3805 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, in shmem_encode_fh() argument
3806 struct inode *parent) in shmem_encode_fh()
3813 if (inode_unhashed(inode)) { in shmem_encode_fh()
3821 if (inode_unhashed(inode)) in shmem_encode_fh()
3822 __insert_inode_hash(inode, in shmem_encode_fh()
3823 inode->i_ino + inode->i_generation); in shmem_encode_fh()
3827 fh[0] = inode->i_generation; in shmem_encode_fh()
3828 fh[1] = inode->i_ino; in shmem_encode_fh()
3829 fh[2] = ((__u64)inode->i_ino) >> 32; in shmem_encode_fh()
4280 struct inode *inode; in shmem_fill_super() local
4365 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0, in shmem_fill_super()
4367 if (IS_ERR(inode)) { in shmem_fill_super()
4368 error = PTR_ERR(inode); in shmem_fill_super()
4371 inode->i_uid = sbinfo->uid; in shmem_fill_super()
4372 inode->i_gid = sbinfo->gid; in shmem_fill_super()
4373 sb->s_root = d_make_root(inode); in shmem_fill_super()
4410 static struct inode *shmem_alloc_inode(struct super_block *sb) in shmem_alloc_inode()
4419 static void shmem_free_in_core_inode(struct inode *inode) in shmem_free_in_core_inode() argument
4421 if (S_ISLNK(inode->i_mode)) in shmem_free_in_core_inode()
4422 kfree(inode->i_link); in shmem_free_in_core_inode()
4423 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); in shmem_free_in_core_inode()
4426 static void shmem_destroy_inode(struct inode *inode) in shmem_destroy_inode() argument
4428 if (S_ISREG(inode->i_mode)) in shmem_destroy_inode()
4429 mpol_free_shared_policy(&SHMEM_I(inode)->policy); in shmem_destroy_inode()
4430 if (S_ISDIR(inode->i_mode)) in shmem_destroy_inode()
4431 simple_offset_destroy(shmem_get_offset_ctx(inode)); in shmem_destroy_inode()
4758 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) in shmem_truncate_range() argument
4760 truncate_inode_pages_range(inode->i_mapping, lstart, lend); in shmem_truncate_range()
4770 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct… in shmem_get_inode()
4773 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); in shmem_get_inode() local
4774 return inode ? inode : ERR_PTR(-ENOSPC); in shmem_get_inode()
4784 struct inode *inode; in __shmem_file_setup() local
4799 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, in __shmem_file_setup()
4802 if (IS_ERR(inode)) { in __shmem_file_setup()
4804 return ERR_CAST(inode); in __shmem_file_setup()
4806 inode->i_flags |= i_flags; in __shmem_file_setup()
4807 inode->i_size = size; in __shmem_file_setup()
4808 clear_nlink(inode); /* It is unlinked */ in __shmem_file_setup()
4809 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); in __shmem_file_setup()
4811 res = alloc_file_pseudo(inode, mnt, name, O_RDWR, in __shmem_file_setup()
4814 iput(inode); in __shmem_file_setup()
4905 struct inode *inode = mapping->host; in shmem_read_folio_gfp() local
4910 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, in shmem_read_folio_gfp()