shmem.c (a3dd14c0d079c214c4de7939dfbb8738887e67e7) shmem.c (572a3d1e5d3a3e335b92e2c28a63c0b27944480c)
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 64 unchanged lines hidden (view full) ---

73#include <linux/highmem.h>
74#include <linux/seq_file.h>
75#include <linux/magic.h>
76#include <linux/syscalls.h>
77#include <linux/fcntl.h>
78#include <uapi/linux/memfd.h>
79#include <linux/rmap.h>
80#include <linux/uuid.h>
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 64 unchanged lines hidden (view full) ---

73#include <linux/highmem.h>
74#include <linux/seq_file.h>
75#include <linux/magic.h>
76#include <linux/syscalls.h>
77#include <linux/fcntl.h>
78#include <uapi/linux/memfd.h>
79#include <linux/rmap.h>
80#include <linux/uuid.h>
81#include <linux/quotaops.h>
81
82#include <linux/uaccess.h>
83
84#include "internal.h"
85
86#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
87#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
88
89/* Pretend that each entry is of this size in directory's i_size */
90#define BOGO_DIRENT_SIZE 20
91
82
83#include <linux/uaccess.h>
84
85#include "internal.h"
86
87#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
88#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
89
90/* Pretend that each entry is of this size in directory's i_size */
91#define BOGO_DIRENT_SIZE 20
92
93/* Pretend that one inode + its dentry occupy this much memory */
94#define BOGO_INODE_SIZE 1024
95
92/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
93#define SHORT_SYMLINK_LEN 128
94
95/*
96 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
97 * inode->i_private (with i_rwsem making sure that it has only one user at
98 * a time): we would prefer not to enlarge the shmem inode just for that.
99 */

--- 11 unchanged lines hidden (view full) ---

111 struct mempolicy *mpol;
112 kuid_t uid;
113 kgid_t gid;
114 umode_t mode;
115 bool full_inums;
116 int huge;
117 int seen;
118 bool noswap;
96/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
97#define SHORT_SYMLINK_LEN 128
98
99/*
100 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
101 * inode->i_private (with i_rwsem making sure that it has only one user at
102 * a time): we would prefer not to enlarge the shmem inode just for that.
103 */

--- 11 unchanged lines hidden (view full) ---

115 struct mempolicy *mpol;
116 kuid_t uid;
117 kgid_t gid;
118 umode_t mode;
119 bool full_inums;
120 int huge;
121 int seen;
122 bool noswap;
123 unsigned short quota_types;
124 struct shmem_quota_limits qlimits;
119#define SHMEM_SEEN_BLOCKS 1
120#define SHMEM_SEEN_INODES 2
121#define SHMEM_SEEN_HUGE 4
122#define SHMEM_SEEN_INUMS 8
123#define SHMEM_SEEN_NOSWAP 16
125#define SHMEM_SEEN_BLOCKS 1
126#define SHMEM_SEEN_INODES 2
127#define SHMEM_SEEN_HUGE 4
128#define SHMEM_SEEN_INUMS 8
129#define SHMEM_SEEN_NOSWAP 16
130#define SHMEM_SEEN_QUOTA 32
124};
125
126#ifdef CONFIG_TMPFS
127static unsigned long shmem_default_max_blocks(void)
128{
129 return totalram_pages() / 2;
130}
131
132static unsigned long shmem_default_max_inodes(void)
133{
134 unsigned long nr_pages = totalram_pages();
135
131};
132
133#ifdef CONFIG_TMPFS
134static unsigned long shmem_default_max_blocks(void)
135{
136 return totalram_pages() / 2;
137}
138
139static unsigned long shmem_default_max_inodes(void)
140{
141 unsigned long nr_pages = totalram_pages();
142
136 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
143 return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
144 ULONG_MAX / BOGO_INODE_SIZE);
137}
138#endif
139
140static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
141 struct folio **foliop, enum sgp_type sgp,
142 gfp_t gfp, struct vm_area_struct *vma,
143 vm_fault_t *fault_type);
144

--- 49 unchanged lines hidden (view full) ---

194}
195
196static inline void shmem_unacct_blocks(unsigned long flags, long pages)
197{
198 if (flags & VM_NORESERVE)
199 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
200}
201
145}
146#endif
147
148static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
149 struct folio **foliop, enum sgp_type sgp,
150 gfp_t gfp, struct vm_area_struct *vma,
151 vm_fault_t *fault_type);
152

--- 49 unchanged lines hidden (view full) ---

202}
203
204static inline void shmem_unacct_blocks(unsigned long flags, long pages)
205{
206 if (flags & VM_NORESERVE)
207 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
208}
209
202static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
210static int shmem_inode_acct_block(struct inode *inode, long pages)
203{
204 struct shmem_inode_info *info = SHMEM_I(inode);
205 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
211{
212 struct shmem_inode_info *info = SHMEM_I(inode);
213 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
214 int err = -ENOSPC;
206
207 if (shmem_acct_block(info->flags, pages))
215
216 if (shmem_acct_block(info->flags, pages))
208 return false;
217 return err;
209
218
219 might_sleep(); /* when quotas */
210 if (sbinfo->max_blocks) {
211 if (percpu_counter_compare(&sbinfo->used_blocks,
212 sbinfo->max_blocks - pages) > 0)
213 goto unacct;
220 if (sbinfo->max_blocks) {
221 if (percpu_counter_compare(&sbinfo->used_blocks,
222 sbinfo->max_blocks - pages) > 0)
223 goto unacct;
224
225 err = dquot_alloc_block_nodirty(inode, pages);
226 if (err)
227 goto unacct;
228
214 percpu_counter_add(&sbinfo->used_blocks, pages);
229 percpu_counter_add(&sbinfo->used_blocks, pages);
230 } else {
231 err = dquot_alloc_block_nodirty(inode, pages);
232 if (err)
233 goto unacct;
215 }
216
234 }
235
217 return true;
236 return 0;
218
219unacct:
220 shmem_unacct_blocks(info->flags, pages);
237
238unacct:
239 shmem_unacct_blocks(info->flags, pages);
221 return false;
240 return err;
222}
223
241}
242
224static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
243static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
225{
226 struct shmem_inode_info *info = SHMEM_I(inode);
227 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
228
244{
245 struct shmem_inode_info *info = SHMEM_I(inode);
246 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
247
248 might_sleep(); /* when quotas */
249 dquot_free_block_nodirty(inode, pages);
250
229 if (sbinfo->max_blocks)
230 percpu_counter_sub(&sbinfo->used_blocks, pages);
231 shmem_unacct_blocks(info->flags, pages);
232}
233
234static const struct super_operations shmem_ops;
235const struct address_space_operations shmem_aops;
236static const struct file_operations shmem_file_operations;

--- 12 unchanged lines hidden (view full) ---

249bool vma_is_shmem(struct vm_area_struct *vma)
250{
251 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
252}
253
254static LIST_HEAD(shmem_swaplist);
255static DEFINE_MUTEX(shmem_swaplist_mutex);
256
251 if (sbinfo->max_blocks)
252 percpu_counter_sub(&sbinfo->used_blocks, pages);
253 shmem_unacct_blocks(info->flags, pages);
254}
255
256static const struct super_operations shmem_ops;
257const struct address_space_operations shmem_aops;
258static const struct file_operations shmem_file_operations;

--- 12 unchanged lines hidden (view full) ---

271bool vma_is_shmem(struct vm_area_struct *vma)
272{
273 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
274}
275
276static LIST_HEAD(shmem_swaplist);
277static DEFINE_MUTEX(shmem_swaplist_mutex);
278
279#ifdef CONFIG_TMPFS_QUOTA
280
281static int shmem_enable_quotas(struct super_block *sb,
282 unsigned short quota_types)
283{
284 int type, err = 0;
285
286 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
287 for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
288 if (!(quota_types & (1 << type)))
289 continue;
290 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
291 DQUOT_USAGE_ENABLED |
292 DQUOT_LIMITS_ENABLED);
293 if (err)
294 goto out_err;
295 }
296 return 0;
297
298out_err:
299 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
300 type, err);
301 for (type--; type >= 0; type--)
302 dquot_quota_off(sb, type);
303 return err;
304}
305
306static void shmem_disable_quotas(struct super_block *sb)
307{
308 int type;
309
310 for (type = 0; type < SHMEM_MAXQUOTAS; type++)
311 dquot_quota_off(sb, type);
312}
313
314static struct dquot **shmem_get_dquots(struct inode *inode)
315{
316 return SHMEM_I(inode)->i_dquot;
317}
318#endif /* CONFIG_TMPFS_QUOTA */
319
257/*
258 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
259 * produces a novel ino for the newly allocated inode.
260 *
261 * It may also be called when making a hard link to permit the space needed by
262 * each dentry. However, in that case, no new inode number is needed since that
263 * internally draws from another pool of inode numbers (currently global
264 * get_next_ino()). This case is indicated by passing NULL as inop.
265 */
266#define SHMEM_INO_BATCH 1024
267static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
268{
269 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
270 ino_t ino;
271
272 if (!(sb->s_flags & SB_KERNMOUNT)) {
273 raw_spin_lock(&sbinfo->stat_lock);
274 if (sbinfo->max_inodes) {
320/*
321 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
322 * produces a novel ino for the newly allocated inode.
323 *
324 * It may also be called when making a hard link to permit the space needed by
325 * each dentry. However, in that case, no new inode number is needed since that
326 * internally draws from another pool of inode numbers (currently global
327 * get_next_ino()). This case is indicated by passing NULL as inop.
328 */
329#define SHMEM_INO_BATCH 1024
330static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
331{
332 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
333 ino_t ino;
334
335 if (!(sb->s_flags & SB_KERNMOUNT)) {
336 raw_spin_lock(&sbinfo->stat_lock);
337 if (sbinfo->max_inodes) {
275 if (!sbinfo->free_inodes) {
338 if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
276 raw_spin_unlock(&sbinfo->stat_lock);
277 return -ENOSPC;
278 }
339 raw_spin_unlock(&sbinfo->stat_lock);
340 return -ENOSPC;
341 }
279 sbinfo->free_inodes--;
342 sbinfo->free_ispace -= BOGO_INODE_SIZE;
280 }
281 if (inop) {
282 ino = sbinfo->next_ino++;
283 if (unlikely(is_zero_ino(ino)))
284 ino = sbinfo->next_ino++;
285 if (unlikely(!sbinfo->full_inums &&
286 ino > UINT_MAX)) {
287 /*

--- 37 unchanged lines hidden (view full) ---

325 *inop = ino;
326 *next_ino = ++ino;
327 put_cpu();
328 }
329
330 return 0;
331}
332
343 }
344 if (inop) {
345 ino = sbinfo->next_ino++;
346 if (unlikely(is_zero_ino(ino)))
347 ino = sbinfo->next_ino++;
348 if (unlikely(!sbinfo->full_inums &&
349 ino > UINT_MAX)) {
350 /*

--- 37 unchanged lines hidden (view full) ---

388 *inop = ino;
389 *next_ino = ++ino;
390 put_cpu();
391 }
392
393 return 0;
394}
395
333static void shmem_free_inode(struct super_block *sb)
396static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
334{
335 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
336 if (sbinfo->max_inodes) {
337 raw_spin_lock(&sbinfo->stat_lock);
397{
398 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
399 if (sbinfo->max_inodes) {
400 raw_spin_lock(&sbinfo->stat_lock);
338 sbinfo->free_inodes++;
401 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
339 raw_spin_unlock(&sbinfo->stat_lock);
340 }
341}
342
343/**
344 * shmem_recalc_inode - recalculate the block usage of an inode
345 * @inode: inode to recalc
402 raw_spin_unlock(&sbinfo->stat_lock);
403 }
404}
405
406/**
407 * shmem_recalc_inode - recalculate the block usage of an inode
408 * @inode: inode to recalc
409 * @alloced: the change in number of pages allocated to inode
410 * @swapped: the change in number of pages swapped from inode
346 *
347 * We have to calculate the free blocks since the mm can drop
348 * undirtied hole pages behind our back.
349 *
350 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
351 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
411 *
412 * We have to calculate the free blocks since the mm can drop
413 * undirtied hole pages behind our back.
414 *
415 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
416 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
352 *
353 * It has to be called with the spinlock held.
354 */
417 */
355static void shmem_recalc_inode(struct inode *inode)
418static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
356{
357 struct shmem_inode_info *info = SHMEM_I(inode);
358 long freed;
359
419{
420 struct shmem_inode_info *info = SHMEM_I(inode);
421 long freed;
422
360 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
361 if (freed > 0) {
423 spin_lock(&info->lock);
424 info->alloced += alloced;
425 info->swapped += swapped;
426 freed = info->alloced - info->swapped -
427 READ_ONCE(inode->i_mapping->nrpages);
428 /*
429 * Special case: whereas normally shmem_recalc_inode() is called
430 * after i_mapping->nrpages has already been adjusted (up or down),
431 * shmem_writepage() has to raise swapped before nrpages is lowered -
432 * to stop a racing shmem_recalc_inode() from thinking that a page has
433 * been freed. Compensate here, to avoid the need for a followup call.
434 */
435 if (swapped > 0)
436 freed += swapped;
437 if (freed > 0)
362 info->alloced -= freed;
438 info->alloced -= freed;
363 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
439 spin_unlock(&info->lock);
440
441 /* The quota case may block */
442 if (freed > 0)
364 shmem_inode_unacct_blocks(inode, freed);
443 shmem_inode_unacct_blocks(inode, freed);
365 }
366}
367
368bool shmem_charge(struct inode *inode, long pages)
369{
444}
445
446bool shmem_charge(struct inode *inode, long pages)
447{
370 struct shmem_inode_info *info = SHMEM_I(inode);
371 unsigned long flags;
448 struct address_space *mapping = inode->i_mapping;
372
449
373 if (!shmem_inode_acct_block(inode, pages))
450 if (shmem_inode_acct_block(inode, pages))
374 return false;
375
376 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
451 return false;
452
453 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
377 inode->i_mapping->nrpages += pages;
454 xa_lock_irq(&mapping->i_pages);
455 mapping->nrpages += pages;
456 xa_unlock_irq(&mapping->i_pages);
378
457
379 spin_lock_irqsave(&info->lock, flags);
380 info->alloced += pages;
381 inode->i_blocks += pages * BLOCKS_PER_PAGE;
382 shmem_recalc_inode(inode);
383 spin_unlock_irqrestore(&info->lock, flags);
384
458 shmem_recalc_inode(inode, pages, 0);
385 return true;
386}
387
388void shmem_uncharge(struct inode *inode, long pages)
389{
459 return true;
460}
461
462void shmem_uncharge(struct inode *inode, long pages)
463{
390 struct shmem_inode_info *info = SHMEM_I(inode);
391 unsigned long flags;
392
464 /* pages argument is currently unused: keep it to help debugging */
393 /* nrpages adjustment done by __filemap_remove_folio() or caller */
394
465 /* nrpages adjustment done by __filemap_remove_folio() or caller */
466
395 spin_lock_irqsave(&info->lock, flags);
396 info->alloced -= pages;
397 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
398 shmem_recalc_inode(inode);
399 spin_unlock_irqrestore(&info->lock, flags);
400
401 shmem_inode_unacct_blocks(inode, pages);
467 shmem_recalc_inode(inode, 0, 0);
402}
403
404/*
405 * Replace item expected in xarray by a new item, while holding xa_lock.
406 */
407static int shmem_replace_entry(struct address_space *mapping,
408 pgoff_t index, void *expected, void *replacement)
409{

--- 623 unchanged lines hidden (view full) ---

1033 truncate_inode_folio(mapping, folio);
1034 }
1035 folio_unlock(folio);
1036 }
1037 folio_batch_remove_exceptionals(&fbatch);
1038 folio_batch_release(&fbatch);
1039 }
1040
468}
469
470/*
471 * Replace item expected in xarray by a new item, while holding xa_lock.
472 */
473static int shmem_replace_entry(struct address_space *mapping,
474 pgoff_t index, void *expected, void *replacement)
475{

--- 623 unchanged lines hidden (view full) ---

1099 truncate_inode_folio(mapping, folio);
1100 }
1101 folio_unlock(folio);
1102 }
1103 folio_batch_remove_exceptionals(&fbatch);
1104 folio_batch_release(&fbatch);
1105 }
1106
1041 spin_lock_irq(&info->lock);
1042 info->swapped -= nr_swaps_freed;
1043 shmem_recalc_inode(inode);
1044 spin_unlock_irq(&info->lock);
1107 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1045}
1046
1047void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1048{
1049 shmem_undo_range(inode, lstart, lend, false);
1050 inode->i_ctime = inode->i_mtime = current_time(inode);
1051 inode_inc_iversion(inode);
1052}
1053EXPORT_SYMBOL_GPL(shmem_truncate_range);
1054
1055static int shmem_getattr(struct mnt_idmap *idmap,
1056 const struct path *path, struct kstat *stat,
1057 u32 request_mask, unsigned int query_flags)
1058{
1059 struct inode *inode = path->dentry->d_inode;
1060 struct shmem_inode_info *info = SHMEM_I(inode);
1061
1108}
1109
1110void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1111{
1112 shmem_undo_range(inode, lstart, lend, false);
1113 inode->i_ctime = inode->i_mtime = current_time(inode);
1114 inode_inc_iversion(inode);
1115}
1116EXPORT_SYMBOL_GPL(shmem_truncate_range);
1117
1118static int shmem_getattr(struct mnt_idmap *idmap,
1119 const struct path *path, struct kstat *stat,
1120 u32 request_mask, unsigned int query_flags)
1121{
1122 struct inode *inode = path->dentry->d_inode;
1123 struct shmem_inode_info *info = SHMEM_I(inode);
1124
1062 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1063 spin_lock_irq(&info->lock);
1064 shmem_recalc_inode(inode);
1065 spin_unlock_irq(&info->lock);
1066 }
1125 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1126 shmem_recalc_inode(inode, 0, 0);
1127
1067 if (info->fsflags & FS_APPEND_FL)
1068 stat->attributes |= STATX_ATTR_APPEND;
1069 if (info->fsflags & FS_IMMUTABLE_FL)
1070 stat->attributes |= STATX_ATTR_IMMUTABLE;
1071 if (info->fsflags & FS_NODUMP_FL)
1072 stat->attributes |= STATX_ATTR_NODUMP;
1073 stat->attributes_mask |= (STATX_ATTR_APPEND |
1074 STATX_ATTR_IMMUTABLE |

--- 60 unchanged lines hidden (view full) ---

1135 newsize, (loff_t)-1);
1136 /* unmap again to remove racily COWed private pages */
1137 if (oldsize > holebegin)
1138 unmap_mapping_range(inode->i_mapping,
1139 holebegin, 0, 1);
1140 }
1141 }
1142
1128 if (info->fsflags & FS_APPEND_FL)
1129 stat->attributes |= STATX_ATTR_APPEND;
1130 if (info->fsflags & FS_IMMUTABLE_FL)
1131 stat->attributes |= STATX_ATTR_IMMUTABLE;
1132 if (info->fsflags & FS_NODUMP_FL)
1133 stat->attributes |= STATX_ATTR_NODUMP;
1134 stat->attributes_mask |= (STATX_ATTR_APPEND |
1135 STATX_ATTR_IMMUTABLE |

--- 60 unchanged lines hidden (view full) ---

1196 newsize, (loff_t)-1);
1197 /* unmap again to remove racily COWed private pages */
1198 if (oldsize > holebegin)
1199 unmap_mapping_range(inode->i_mapping,
1200 holebegin, 0, 1);
1201 }
1202 }
1203
1204 if (is_quota_modification(idmap, inode, attr)) {
1205 error = dquot_initialize(inode);
1206 if (error)
1207 return error;
1208 }
1209
1210 /* Transfer quota accounting */
1211 if (i_uid_needs_update(idmap, attr, inode) ||
1212 i_gid_needs_update(idmap, attr, inode)) {
1213 error = dquot_transfer(idmap, inode, attr);
1214
1215 if (error)
1216 return error;
1217 }
1218
1143 setattr_copy(idmap, inode, attr);
1144 if (attr->ia_valid & ATTR_MODE)
1145 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1146 if (!error && update_ctime) {
1147 inode->i_ctime = current_time(inode);
1148 if (update_mtime)
1149 inode->i_mtime = inode->i_ctime;
1150 inode_inc_iversion(inode);
1151 }
1152 return error;
1153}
1154
1155static void shmem_evict_inode(struct inode *inode)
1156{
1157 struct shmem_inode_info *info = SHMEM_I(inode);
1158 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1219 setattr_copy(idmap, inode, attr);
1220 if (attr->ia_valid & ATTR_MODE)
1221 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1222 if (!error && update_ctime) {
1223 inode->i_ctime = current_time(inode);
1224 if (update_mtime)
1225 inode->i_mtime = inode->i_ctime;
1226 inode_inc_iversion(inode);
1227 }
1228 return error;
1229}
1230
1231static void shmem_evict_inode(struct inode *inode)
1232{
1233 struct shmem_inode_info *info = SHMEM_I(inode);
1234 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1235 size_t freed = 0;
1159
1160 if (shmem_mapping(inode->i_mapping)) {
1161 shmem_unacct_size(info->flags, inode->i_size);
1162 inode->i_size = 0;
1163 mapping_set_exiting(inode->i_mapping);
1164 shmem_truncate_range(inode, 0, (loff_t)-1);
1165 if (!list_empty(&info->shrinklist)) {
1166 spin_lock(&sbinfo->shrinklist_lock);

--- 10 unchanged lines hidden (view full) ---

1177 mutex_lock(&shmem_swaplist_mutex);
1178 /* ...but beware of the race if we peeked too early */
1179 if (!atomic_read(&info->stop_eviction))
1180 list_del_init(&info->swaplist);
1181 mutex_unlock(&shmem_swaplist_mutex);
1182 }
1183 }
1184
1236
1237 if (shmem_mapping(inode->i_mapping)) {
1238 shmem_unacct_size(info->flags, inode->i_size);
1239 inode->i_size = 0;
1240 mapping_set_exiting(inode->i_mapping);
1241 shmem_truncate_range(inode, 0, (loff_t)-1);
1242 if (!list_empty(&info->shrinklist)) {
1243 spin_lock(&sbinfo->shrinklist_lock);

--- 10 unchanged lines hidden (view full) ---

1254 mutex_lock(&shmem_swaplist_mutex);
1255 /* ...but beware of the race if we peeked too early */
1256 if (!atomic_read(&info->stop_eviction))
1257 list_del_init(&info->swaplist);
1258 mutex_unlock(&shmem_swaplist_mutex);
1259 }
1260 }
1261
1185 simple_xattrs_free(&info->xattrs);
1262 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1263 shmem_free_inode(inode->i_sb, freed);
1186 WARN_ON(inode->i_blocks);
1264 WARN_ON(inode->i_blocks);
1187 shmem_free_inode(inode->i_sb);
1188 clear_inode(inode);
1265 clear_inode(inode);
1266#ifdef CONFIG_TMPFS_QUOTA
1267 dquot_free_inode(inode);
1268 dquot_drop(inode);
1269#endif
1189}
1190
1191static int shmem_find_swap_entries(struct address_space *mapping,
1192 pgoff_t start, struct folio_batch *fbatch,
1193 pgoff_t *indices, unsigned int type)
1194{
1195 XA_STATE(xas, &mapping->i_pages, start);
1196 struct folio *folio;

--- 227 unchanged lines hidden (view full) ---

1424 */
1425 mutex_lock(&shmem_swaplist_mutex);
1426 if (list_empty(&info->swaplist))
1427 list_add(&info->swaplist, &shmem_swaplist);
1428
1429 if (add_to_swap_cache(folio, swap,
1430 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1431 NULL) == 0) {
1270}
1271
1272static int shmem_find_swap_entries(struct address_space *mapping,
1273 pgoff_t start, struct folio_batch *fbatch,
1274 pgoff_t *indices, unsigned int type)
1275{
1276 XA_STATE(xas, &mapping->i_pages, start);
1277 struct folio *folio;

--- 227 unchanged lines hidden (view full) ---

1505 */
1506 mutex_lock(&shmem_swaplist_mutex);
1507 if (list_empty(&info->swaplist))
1508 list_add(&info->swaplist, &shmem_swaplist);
1509
1510 if (add_to_swap_cache(folio, swap,
1511 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1512 NULL) == 0) {
1432 spin_lock_irq(&info->lock);
1433 shmem_recalc_inode(inode);
1434 info->swapped++;
1435 spin_unlock_irq(&info->lock);
1436
1513 shmem_recalc_inode(inode, 0, 1);
1437 swap_shmem_alloc(swap);
1438 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1439
1440 mutex_unlock(&shmem_swaplist_mutex);
1441 BUG_ON(folio_mapped(folio));
1442 swap_writepage(&folio->page, wbc);
1443 return 0;
1444 }

--- 138 unchanged lines hidden (view full) ---

1583}
1584
1585static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1586 pgoff_t index, bool huge)
1587{
1588 struct shmem_inode_info *info = SHMEM_I(inode);
1589 struct folio *folio;
1590 int nr;
1514 swap_shmem_alloc(swap);
1515 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1516
1517 mutex_unlock(&shmem_swaplist_mutex);
1518 BUG_ON(folio_mapped(folio));
1519 swap_writepage(&folio->page, wbc);
1520 return 0;
1521 }

--- 138 unchanged lines hidden (view full) ---

1660}
1661
1662static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1663 pgoff_t index, bool huge)
1664{
1665 struct shmem_inode_info *info = SHMEM_I(inode);
1666 struct folio *folio;
1667 int nr;
1591 int err = -ENOSPC;
1668 int err;
1592
1593 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1594 huge = false;
1595 nr = huge ? HPAGE_PMD_NR : 1;
1596
1669
1670 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1671 huge = false;
1672 nr = huge ? HPAGE_PMD_NR : 1;
1673
1597 if (!shmem_inode_acct_block(inode, nr))
1674 err = shmem_inode_acct_block(inode, nr);
1675 if (err)
1598 goto failed;
1599
1600 if (huge)
1601 folio = shmem_alloc_hugefolio(gfp, info, index);
1602 else
1603 folio = shmem_alloc_folio(gfp, info, index);
1604 if (folio) {
1605 __folio_set_locked(folio);

--- 92 unchanged lines hidden (view full) ---

1698 folio_put_refs(old, 2);
1699 return error;
1700}
1701
1702static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1703 struct folio *folio, swp_entry_t swap)
1704{
1705 struct address_space *mapping = inode->i_mapping;
1676 goto failed;
1677
1678 if (huge)
1679 folio = shmem_alloc_hugefolio(gfp, info, index);
1680 else
1681 folio = shmem_alloc_folio(gfp, info, index);
1682 if (folio) {
1683 __folio_set_locked(folio);

--- 92 unchanged lines hidden (view full) ---

1776 folio_put_refs(old, 2);
1777 return error;
1778}
1779
1780static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1781 struct folio *folio, swp_entry_t swap)
1782{
1783 struct address_space *mapping = inode->i_mapping;
1706 struct shmem_inode_info *info = SHMEM_I(inode);
1707 swp_entry_t swapin_error;
1708 void *old;
1709
1710 swapin_error = make_swapin_error_entry();
1711 old = xa_cmpxchg_irq(&mapping->i_pages, index,
1712 swp_to_radix_entry(swap),
1713 swp_to_radix_entry(swapin_error), 0);
1714 if (old != swp_to_radix_entry(swap))
1715 return;
1716
1717 folio_wait_writeback(folio);
1718 delete_from_swap_cache(folio);
1784 swp_entry_t swapin_error;
1785 void *old;
1786
1787 swapin_error = make_swapin_error_entry();
1788 old = xa_cmpxchg_irq(&mapping->i_pages, index,
1789 swp_to_radix_entry(swap),
1790 swp_to_radix_entry(swapin_error), 0);
1791 if (old != swp_to_radix_entry(swap))
1792 return;
1793
1794 folio_wait_writeback(folio);
1795 delete_from_swap_cache(folio);
1719 spin_lock_irq(&info->lock);
1720 /*
1796 /*
1721 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
1722 * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
1723 * shmem_evict_inode.
1797 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
1798 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
1799 * in shmem_evict_inode().
1724 */
1800 */
1725 info->alloced--;
1726 info->swapped--;
1727 shmem_recalc_inode(inode);
1728 spin_unlock_irq(&info->lock);
1801 shmem_recalc_inode(inode, -1, -1);
1729 swap_free(swap);
1730}
1731
1732/*
1733 * Swap in the folio pointed to by *foliop.
1734 * Caller has to make sure that *foliop contains a valid swapped folio.
1735 * Returns 0 and the folio in foliop if success. On failure, returns the
1736 * error code and NULL in *foliop.

--- 70 unchanged lines hidden (view full) ---

1807 }
1808
1809 error = shmem_add_to_page_cache(folio, mapping, index,
1810 swp_to_radix_entry(swap), gfp,
1811 charge_mm);
1812 if (error)
1813 goto failed;
1814
1802 swap_free(swap);
1803}
1804
1805/*
1806 * Swap in the folio pointed to by *foliop.
1807 * Caller has to make sure that *foliop contains a valid swapped folio.
1808 * Returns 0 and the folio in foliop if success. On failure, returns the
1809 * error code and NULL in *foliop.

--- 70 unchanged lines hidden (view full) ---

1880 }
1881
1882 error = shmem_add_to_page_cache(folio, mapping, index,
1883 swp_to_radix_entry(swap), gfp,
1884 charge_mm);
1885 if (error)
1886 goto failed;
1887
1815 spin_lock_irq(&info->lock);
1816 info->swapped--;
1817 shmem_recalc_inode(inode);
1818 spin_unlock_irq(&info->lock);
1888 shmem_recalc_inode(inode, 0, -1);
1819
1820 if (sgp == SGP_WRITE)
1821 folio_mark_accessed(folio);
1822
1823 delete_from_swap_cache(folio);
1824 folio_mark_dirty(folio);
1825 swap_free(swap);
1826 put_swap_device(si);

--- 148 unchanged lines hidden (view full) ---

1975 if (sgp == SGP_WRITE)
1976 __folio_set_referenced(folio);
1977
1978 error = shmem_add_to_page_cache(folio, mapping, hindex,
1979 NULL, gfp & GFP_RECLAIM_MASK,
1980 charge_mm);
1981 if (error)
1982 goto unacct;
1889
1890 if (sgp == SGP_WRITE)
1891 folio_mark_accessed(folio);
1892
1893 delete_from_swap_cache(folio);
1894 folio_mark_dirty(folio);
1895 swap_free(swap);
1896 put_swap_device(si);

--- 148 unchanged lines hidden (view full) ---

2045 if (sgp == SGP_WRITE)
2046 __folio_set_referenced(folio);
2047
2048 error = shmem_add_to_page_cache(folio, mapping, hindex,
2049 NULL, gfp & GFP_RECLAIM_MASK,
2050 charge_mm);
2051 if (error)
2052 goto unacct;
1983 folio_add_lru(folio);
1984
2053
1985 spin_lock_irq(&info->lock);
1986 info->alloced += folio_nr_pages(folio);
1987 inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
1988 shmem_recalc_inode(inode);
1989 spin_unlock_irq(&info->lock);
2054 folio_add_lru(folio);
2055 shmem_recalc_inode(inode, folio_nr_pages(folio), 0);
1990 alloced = true;
1991
1992 if (folio_test_pmd_mappable(folio) &&
1993 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1994 folio_next_index(folio) - 1) {
1995 /*
1996 * Part of the large folio is beyond i_size: subject
1997 * to shrink under memory pressure.

--- 32 unchanged lines hidden (view full) ---

2030 }
2031
2032 /* Perhaps the file has been truncated since we checked */
2033 if (sgp <= SGP_CACHE &&
2034 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2035 if (alloced) {
2036 folio_clear_dirty(folio);
2037 filemap_remove_folio(folio);
2056 alloced = true;
2057
2058 if (folio_test_pmd_mappable(folio) &&
2059 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2060 folio_next_index(folio) - 1) {
2061 /*
2062 * Part of the large folio is beyond i_size: subject
2063 * to shrink under memory pressure.

--- 32 unchanged lines hidden (view full) ---

2096 }
2097
2098 /* Perhaps the file has been truncated since we checked */
2099 if (sgp <= SGP_CACHE &&
2100 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2101 if (alloced) {
2102 folio_clear_dirty(folio);
2103 filemap_remove_folio(folio);
2038 spin_lock_irq(&info->lock);
2039 shmem_recalc_inode(inode);
2040 spin_unlock_irq(&info->lock);
2104 shmem_recalc_inode(inode, 0, 0);
2041 }
2042 error = -EINVAL;
2043 goto unlock;
2044 }
2045out:
2046 *foliop = folio;
2047 return 0;
2048

--- 9 unchanged lines hidden (view full) ---

2058 goto alloc_nohuge;
2059 }
2060unlock:
2061 if (folio) {
2062 folio_unlock(folio);
2063 folio_put(folio);
2064 }
2065 if (error == -ENOSPC && !once++) {
2105 }
2106 error = -EINVAL;
2107 goto unlock;
2108 }
2109out:
2110 *foliop = folio;
2111 return 0;
2112

--- 9 unchanged lines hidden (view full) ---

2122 goto alloc_nohuge;
2123 }
2124unlock:
2125 if (folio) {
2126 folio_unlock(folio);
2127 folio_put(folio);
2128 }
2129 if (error == -ENOSPC && !once++) {
2066 spin_lock_irq(&info->lock);
2067 shmem_recalc_inode(inode);
2068 spin_unlock_irq(&info->lock);
2130 shmem_recalc_inode(inode, 0, 0);
2069 goto repeat;
2070 }
2071 if (error == -EEXIST)
2072 goto repeat;
2073 return error;
2074}
2075
2076int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,

--- 244 unchanged lines hidden (view full) ---

2321 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2322 if (inode->i_nlink)
2323 vma->vm_ops = &shmem_vm_ops;
2324 else
2325 vma->vm_ops = &shmem_anon_vm_ops;
2326 return 0;
2327}
2328
2131 goto repeat;
2132 }
2133 if (error == -EEXIST)
2134 goto repeat;
2135 return error;
2136}
2137
2138int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,

--- 244 unchanged lines hidden (view full) ---

2383 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2384 if (inode->i_nlink)
2385 vma->vm_ops = &shmem_vm_ops;
2386 else
2387 vma->vm_ops = &shmem_anon_vm_ops;
2388 return 0;
2389}
2390
2391static int shmem_file_open(struct inode *inode, struct file *file)
2392{
2393 file->f_mode |= FMODE_CAN_ODIRECT;
2394 return generic_file_open(inode, file);
2395}
2396
2329#ifdef CONFIG_TMPFS_XATTR
2330static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2331
2332/*
2333 * chattr's fsflags are unrelated to extended attributes,
2334 * but tmpfs has chosen to enable them under the same config option.
2335 */
2336static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)

--- 13 unchanged lines hidden (view full) ---

2350}
2351#else
2352static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2353{
2354}
2355#define shmem_initxattrs NULL
2356#endif
2357
2397#ifdef CONFIG_TMPFS_XATTR
2398static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2399
2400/*
2401 * chattr's fsflags are unrelated to extended attributes,
2402 * but tmpfs has chosen to enable them under the same config option.
2403 */
2404static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)

--- 13 unchanged lines hidden (view full) ---

2418}
2419#else
2420static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2421{
2422}
2423#define shmem_initxattrs NULL
2424#endif
2425
2358static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb,
2359 struct inode *dir, umode_t mode, dev_t dev,
2360 unsigned long flags)
2426static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2361{
2427{
2428 return &SHMEM_I(inode)->dir_offsets;
2429}
2430
2431static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2432 struct super_block *sb,
2433 struct inode *dir, umode_t mode,
2434 dev_t dev, unsigned long flags)
2435{
2362 struct inode *inode;
2363 struct shmem_inode_info *info;
2364 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2365 ino_t ino;
2436 struct inode *inode;
2437 struct shmem_inode_info *info;
2438 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2439 ino_t ino;
2440 int err;
2366
2441
2367 if (shmem_reserve_inode(sb, &ino))
2368 return NULL;
2442 err = shmem_reserve_inode(sb, &ino);
2443 if (err)
2444 return ERR_PTR(err);
2369
2445
2446
2370 inode = new_inode(sb);
2447 inode = new_inode(sb);
2371 if (inode) {
2372 inode->i_ino = ino;
2373 inode_init_owner(idmap, inode, dir, mode);
2374 inode->i_blocks = 0;
2375 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2376 inode->i_generation = get_random_u32();
2377 info = SHMEM_I(inode);
2378 memset(info, 0, (char *)inode - (char *)info);
2379 spin_lock_init(&info->lock);
2380 atomic_set(&info->stop_eviction, 0);
2381 info->seals = F_SEAL_SEAL;
2382 info->flags = flags & VM_NORESERVE;
2383 info->i_crtime = inode->i_mtime;
2384 info->fsflags = (dir == NULL) ? 0 :
2385 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2386 if (info->fsflags)
2387 shmem_set_inode_flags(inode, info->fsflags);
2388 INIT_LIST_HEAD(&info->shrinklist);
2389 INIT_LIST_HEAD(&info->swaplist);
2390 if (sbinfo->noswap)
2391 mapping_set_unevictable(inode->i_mapping);
2392 simple_xattrs_init(&info->xattrs);
2393 cache_no_acl(inode);
2394 mapping_set_large_folios(inode->i_mapping);
2395
2448
2396 switch (mode & S_IFMT) {
2397 default:
2398 inode->i_op = &shmem_special_inode_operations;
2399 init_special_inode(inode, mode, dev);
2400 break;
2401 case S_IFREG:
2402 inode->i_mapping->a_ops = &shmem_aops;
2403 inode->i_op = &shmem_inode_operations;
2404 inode->i_fop = &shmem_file_operations;
2405 mpol_shared_policy_init(&info->policy,
2406 shmem_get_sbmpol(sbinfo));
2407 break;
2408 case S_IFDIR:
2409 inc_nlink(inode);
2410 /* Some things misbehave if size == 0 on a directory */
2411 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2412 inode->i_op = &shmem_dir_inode_operations;
2413 inode->i_fop = &simple_dir_operations;
2414 break;
2415 case S_IFLNK:
2416 /*
2417 * Must not load anything in the rbtree,
2418 * mpol_free_shared_policy will not be called.
2419 */
2420 mpol_shared_policy_init(&info->policy, NULL);
2421 break;
2422 }
2449 if (!inode) {
2450 shmem_free_inode(sb, 0);
2451 return ERR_PTR(-ENOSPC);
2452 }
2423
2453
2424 lockdep_annotate_inode_mutex_key(inode);
2425 } else
2426 shmem_free_inode(sb);
2454 inode->i_ino = ino;
2455 inode_init_owner(idmap, inode, dir, mode);
2456 inode->i_blocks = 0;
2457 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2458 inode->i_generation = get_random_u32();
2459 info = SHMEM_I(inode);
2460 memset(info, 0, (char *)inode - (char *)info);
2461 spin_lock_init(&info->lock);
2462 atomic_set(&info->stop_eviction, 0);
2463 info->seals = F_SEAL_SEAL;
2464 info->flags = flags & VM_NORESERVE;
2465 info->i_crtime = inode->i_mtime;
2466 info->fsflags = (dir == NULL) ? 0 :
2467 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2468 if (info->fsflags)
2469 shmem_set_inode_flags(inode, info->fsflags);
2470 INIT_LIST_HEAD(&info->shrinklist);
2471 INIT_LIST_HEAD(&info->swaplist);
2472 INIT_LIST_HEAD(&info->swaplist);
2473 if (sbinfo->noswap)
2474 mapping_set_unevictable(inode->i_mapping);
2475 simple_xattrs_init(&info->xattrs);
2476 cache_no_acl(inode);
2477 mapping_set_large_folios(inode->i_mapping);
2478
2479 switch (mode & S_IFMT) {
2480 default:
2481 inode->i_op = &shmem_special_inode_operations;
2482 init_special_inode(inode, mode, dev);
2483 break;
2484 case S_IFREG:
2485 inode->i_mapping->a_ops = &shmem_aops;
2486 inode->i_op = &shmem_inode_operations;
2487 inode->i_fop = &shmem_file_operations;
2488 mpol_shared_policy_init(&info->policy,
2489 shmem_get_sbmpol(sbinfo));
2490 break;
2491 case S_IFDIR:
2492 inc_nlink(inode);
2493 /* Some things misbehave if size == 0 on a directory */
2494 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2495 inode->i_op = &shmem_dir_inode_operations;
2496 inode->i_fop = &simple_offset_dir_operations;
2497 simple_offset_init(shmem_get_offset_ctx(inode));
2498 break;
2499 case S_IFLNK:
2500 /*
2501 * Must not load anything in the rbtree,
2502 * mpol_free_shared_policy will not be called.
2503 */
2504 mpol_shared_policy_init(&info->policy, NULL);
2505 break;
2506 }
2507
2508 lockdep_annotate_inode_mutex_key(inode);
2427 return inode;
2428}
2429
2509 return inode;
2510}
2511
2512#ifdef CONFIG_TMPFS_QUOTA
2513static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2514 struct super_block *sb, struct inode *dir,
2515 umode_t mode, dev_t dev, unsigned long flags)
2516{
2517 int err;
2518 struct inode *inode;
2519
2520 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2521 if (IS_ERR(inode))
2522 return inode;
2523
2524 err = dquot_initialize(inode);
2525 if (err)
2526 goto errout;
2527
2528 err = dquot_alloc_inode(inode);
2529 if (err) {
2530 dquot_drop(inode);
2531 goto errout;
2532 }
2533 return inode;
2534
2535errout:
2536 inode->i_flags |= S_NOQUOTA;
2537 iput(inode);
2538 return ERR_PTR(err);
2539}
2540#else
2541static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2542 struct super_block *sb, struct inode *dir,
2543 umode_t mode, dev_t dev, unsigned long flags)
2544{
2545 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2546}
2547#endif /* CONFIG_TMPFS_QUOTA */
2548
2430#ifdef CONFIG_USERFAULTFD
2431int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2432 struct vm_area_struct *dst_vma,
2433 unsigned long dst_addr,
2434 unsigned long src_addr,
2435 uffd_flags_t flags,
2436 struct folio **foliop)
2437{
2438 struct inode *inode = file_inode(dst_vma->vm_file);
2439 struct shmem_inode_info *info = SHMEM_I(inode);
2440 struct address_space *mapping = inode->i_mapping;
2441 gfp_t gfp = mapping_gfp_mask(mapping);
2442 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2443 void *page_kaddr;
2444 struct folio *folio;
2445 int ret;
2446 pgoff_t max_off;
2447
2549#ifdef CONFIG_USERFAULTFD
2550int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2551 struct vm_area_struct *dst_vma,
2552 unsigned long dst_addr,
2553 unsigned long src_addr,
2554 uffd_flags_t flags,
2555 struct folio **foliop)
2556{
2557 struct inode *inode = file_inode(dst_vma->vm_file);
2558 struct shmem_inode_info *info = SHMEM_I(inode);
2559 struct address_space *mapping = inode->i_mapping;
2560 gfp_t gfp = mapping_gfp_mask(mapping);
2561 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2562 void *page_kaddr;
2563 struct folio *folio;
2564 int ret;
2565 pgoff_t max_off;
2566
2448 if (!shmem_inode_acct_block(inode, 1)) {
2567 if (shmem_inode_acct_block(inode, 1)) {
2449 /*
2450 * We may have got a page, returned -ENOENT triggering a retry,
2451 * and now we find ourselves with -ENOMEM. Release the page, to
2452 * avoid a BUG_ON in our caller.
2453 */
2454 if (unlikely(*foliop)) {
2455 folio_put(*foliop);
2456 *foliop = NULL;

--- 65 unchanged lines hidden (view full) ---

2522 if (ret)
2523 goto out_release;
2524
2525 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2526 &folio->page, true, flags);
2527 if (ret)
2528 goto out_delete_from_cache;
2529
2568 /*
2569 * We may have got a page, returned -ENOENT triggering a retry,
2570 * and now we find ourselves with -ENOMEM. Release the page, to
2571 * avoid a BUG_ON in our caller.
2572 */
2573 if (unlikely(*foliop)) {
2574 folio_put(*foliop);
2575 *foliop = NULL;

--- 65 unchanged lines hidden (view full) ---

2641 if (ret)
2642 goto out_release;
2643
2644 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2645 &folio->page, true, flags);
2646 if (ret)
2647 goto out_delete_from_cache;
2648
2530 spin_lock_irq(&info->lock);
2531 info->alloced++;
2532 inode->i_blocks += BLOCKS_PER_PAGE;
2533 shmem_recalc_inode(inode);
2534 spin_unlock_irq(&info->lock);
2535
2649 shmem_recalc_inode(inode, 1, 0);
2536 folio_unlock(folio);
2537 return 0;
2538out_delete_from_cache:
2539 filemap_remove_folio(folio);
2540out_release:
2541 folio_unlock(folio);
2542 folio_put(folio);
2543out_unacct_blocks:

--- 182 unchanged lines hidden (view full) ---

2726 cond_resched();
2727 }
2728
2729 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2730 file_accessed(file);
2731 return retval ? retval : error;
2732}
2733
2650 folio_unlock(folio);
2651 return 0;
2652out_delete_from_cache:
2653 filemap_remove_folio(folio);
2654out_release:
2655 folio_unlock(folio);
2656 folio_put(folio);
2657out_unacct_blocks:

--- 182 unchanged lines hidden (view full) ---

2840 cond_resched();
2841 }
2842
2843 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2844 file_accessed(file);
2845 return retval ? retval : error;
2846}
2847
2848static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2849{
2850 struct file *file = iocb->ki_filp;
2851 struct inode *inode = file->f_mapping->host;
2852 ssize_t ret;
2853
2854 inode_lock(inode);
2855 ret = generic_write_checks(iocb, from);
2856 if (ret <= 0)
2857 goto unlock;
2858 ret = file_remove_privs(file);
2859 if (ret)
2860 goto unlock;
2861 ret = file_update_time(file);
2862 if (ret)
2863 goto unlock;
2864 ret = generic_perform_write(iocb, from);
2865unlock:
2866 inode_unlock(inode);
2867 return ret;
2868}
2869
2734static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
2735 struct pipe_buffer *buf)
2736{
2737 return true;
2738}
2739
2740static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
2741 struct pipe_buffer *buf)

--- 308 unchanged lines hidden (view full) ---

3050 if (sbinfo->max_blocks) {
3051 buf->f_blocks = sbinfo->max_blocks;
3052 buf->f_bavail =
3053 buf->f_bfree = sbinfo->max_blocks -
3054 percpu_counter_sum(&sbinfo->used_blocks);
3055 }
3056 if (sbinfo->max_inodes) {
3057 buf->f_files = sbinfo->max_inodes;
2870static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
2871 struct pipe_buffer *buf)
2872{
2873 return true;
2874}
2875
2876static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
2877 struct pipe_buffer *buf)

--- 308 unchanged lines hidden (view full) ---

3186 if (sbinfo->max_blocks) {
3187 buf->f_blocks = sbinfo->max_blocks;
3188 buf->f_bavail =
3189 buf->f_bfree = sbinfo->max_blocks -
3190 percpu_counter_sum(&sbinfo->used_blocks);
3191 }
3192 if (sbinfo->max_inodes) {
3193 buf->f_files = sbinfo->max_inodes;
3058 buf->f_ffree = sbinfo->free_inodes;
3194 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3059 }
3060 /* else leave those fields 0 like simple_statfs */
3061
3062 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3063
3064 return 0;
3065}
3066
3067/*
3068 * File creation. Allocate an inode, and we're done..
3069 */
3070static int
3071shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3072 struct dentry *dentry, umode_t mode, dev_t dev)
3073{
3074 struct inode *inode;
3195 }
3196 /* else leave those fields 0 like simple_statfs */
3197
3198 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3199
3200 return 0;
3201}
3202
3203/*
3204 * File creation. Allocate an inode, and we're done..
3205 */
3206static int
3207shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3208 struct dentry *dentry, umode_t mode, dev_t dev)
3209{
3210 struct inode *inode;
3075 int error = -ENOSPC;
3211 int error;
3076
3077 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3212
3213 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3078 if (inode) {
3079 error = simple_acl_create(dir, inode);
3080 if (error)
3081 goto out_iput;
3082 error = security_inode_init_security(inode, dir,
3083 &dentry->d_name,
3084 shmem_initxattrs, NULL);
3085 if (error && error != -EOPNOTSUPP)
3086 goto out_iput;
3087
3214
3088 error = 0;
3089 dir->i_size += BOGO_DIRENT_SIZE;
3090 dir->i_ctime = dir->i_mtime = current_time(dir);
3091 inode_inc_iversion(dir);
3092 d_instantiate(dentry, inode);
3093 dget(dentry); /* Extra count - pin the dentry in core */
3094 }
3215 if (IS_ERR(inode))
3216 return PTR_ERR(inode);
3217
3218 error = simple_acl_create(dir, inode);
3219 if (error)
3220 goto out_iput;
3221 error = security_inode_init_security(inode, dir,
3222 &dentry->d_name,
3223 shmem_initxattrs, NULL);
3224 if (error && error != -EOPNOTSUPP)
3225 goto out_iput;
3226
3227 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3228 if (error)
3229 goto out_iput;
3230
3231 dir->i_size += BOGO_DIRENT_SIZE;
3232 dir->i_ctime = dir->i_mtime = current_time(dir);
3233 inode_inc_iversion(dir);
3234 d_instantiate(dentry, inode);
3235 dget(dentry); /* Extra count - pin the dentry in core */
3095 return error;
3236 return error;
3237
3096out_iput:
3097 iput(inode);
3098 return error;
3099}
3100
3101static int
3102shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3103 struct file *file, umode_t mode)
3104{
3105 struct inode *inode;
3238out_iput:
3239 iput(inode);
3240 return error;
3241}
3242
3243static int
3244shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3245 struct file *file, umode_t mode)
3246{
3247 struct inode *inode;
3106 int error = -ENOSPC;
3248 int error;
3107
3108 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3249
3250 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3109 if (inode) {
3110 error = security_inode_init_security(inode, dir,
3111 NULL,
3112 shmem_initxattrs, NULL);
3113 if (error && error != -EOPNOTSUPP)
3114 goto out_iput;
3115 error = simple_acl_create(dir, inode);
3116 if (error)
3117 goto out_iput;
3118 d_tmpfile(file, inode);
3251
3252 if (IS_ERR(inode)) {
3253 error = PTR_ERR(inode);
3254 goto err_out;
3119 }
3255 }
3256
3257 error = security_inode_init_security(inode, dir,
3258 NULL,
3259 shmem_initxattrs, NULL);
3260 if (error && error != -EOPNOTSUPP)
3261 goto out_iput;
3262 error = simple_acl_create(dir, inode);
3263 if (error)
3264 goto out_iput;
3265 d_tmpfile(file, inode);
3266
3267err_out:
3120 return finish_open_simple(file, error);
3121out_iput:
3122 iput(inode);
3123 return error;
3124}
3125
3126static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3127 struct dentry *dentry, umode_t mode)

--- 29 unchanged lines hidden (view full) ---

3157 * first link must skip that, to get the accounting right.
3158 */
3159 if (inode->i_nlink) {
3160 ret = shmem_reserve_inode(inode->i_sb, NULL);
3161 if (ret)
3162 goto out;
3163 }
3164
3268 return finish_open_simple(file, error);
3269out_iput:
3270 iput(inode);
3271 return error;
3272}
3273
3274static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3275 struct dentry *dentry, umode_t mode)

--- 29 unchanged lines hidden (view full) ---

3305 * first link must skip that, to get the accounting right.
3306 */
3307 if (inode->i_nlink) {
3308 ret = shmem_reserve_inode(inode->i_sb, NULL);
3309 if (ret)
3310 goto out;
3311 }
3312
3313 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3314 if (ret) {
3315 if (inode->i_nlink)
3316 shmem_free_inode(inode->i_sb, 0);
3317 goto out;
3318 }
3319
3165 dir->i_size += BOGO_DIRENT_SIZE;
3166 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3167 inode_inc_iversion(dir);
3168 inc_nlink(inode);
3169 ihold(inode); /* New dentry reference */
3170 dget(dentry); /* Extra pinning count for the created dentry */
3171 d_instantiate(dentry, inode);
3172out:
3173 return ret;
3174}
3175
3176static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3177{
3178 struct inode *inode = d_inode(dentry);
3179
3180 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3320 dir->i_size += BOGO_DIRENT_SIZE;
3321 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3322 inode_inc_iversion(dir);
3323 inc_nlink(inode);
3324 ihold(inode); /* New dentry reference */
3325 dget(dentry); /* Extra pinning count for the created dentry */
3326 d_instantiate(dentry, inode);
3327out:
3328 return ret;
3329}
3330
3331static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3332{
3333 struct inode *inode = d_inode(dentry);
3334
3335 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3181 shmem_free_inode(inode->i_sb);
3336 shmem_free_inode(inode->i_sb, 0);
3182
3337
3338 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3339
3183 dir->i_size -= BOGO_DIRENT_SIZE;
3184 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3185 inode_inc_iversion(dir);
3186 drop_nlink(inode);
3187 dput(dentry); /* Undo the count from "create" - this does all the work */
3188 return 0;
3189}
3190

--- 42 unchanged lines hidden (view full) ---

3233 */
3234static int shmem_rename2(struct mnt_idmap *idmap,
3235 struct inode *old_dir, struct dentry *old_dentry,
3236 struct inode *new_dir, struct dentry *new_dentry,
3237 unsigned int flags)
3238{
3239 struct inode *inode = d_inode(old_dentry);
3240 int they_are_dirs = S_ISDIR(inode->i_mode);
3340 dir->i_size -= BOGO_DIRENT_SIZE;
3341 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3342 inode_inc_iversion(dir);
3343 drop_nlink(inode);
3344 dput(dentry); /* Undo the count from "create" - this does all the work */
3345 return 0;
3346}
3347

--- 42 unchanged lines hidden (view full) ---

3390 */
3391static int shmem_rename2(struct mnt_idmap *idmap,
3392 struct inode *old_dir, struct dentry *old_dentry,
3393 struct inode *new_dir, struct dentry *new_dentry,
3394 unsigned int flags)
3395{
3396 struct inode *inode = d_inode(old_dentry);
3397 int they_are_dirs = S_ISDIR(inode->i_mode);
3398 int error;
3241
3242 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3243 return -EINVAL;
3244
3245 if (flags & RENAME_EXCHANGE)
3399
3400 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3401 return -EINVAL;
3402
3403 if (flags & RENAME_EXCHANGE)
3246 return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
3404 return simple_offset_rename_exchange(old_dir, old_dentry,
3405 new_dir, new_dentry);
3247
3248 if (!simple_empty(new_dentry))
3249 return -ENOTEMPTY;
3250
3251 if (flags & RENAME_WHITEOUT) {
3406
3407 if (!simple_empty(new_dentry))
3408 return -ENOTEMPTY;
3409
3410 if (flags & RENAME_WHITEOUT) {
3252 int error;
3253
3254 error = shmem_whiteout(idmap, old_dir, old_dentry);
3255 if (error)
3256 return error;
3257 }
3258
3411 error = shmem_whiteout(idmap, old_dir, old_dentry);
3412 if (error)
3413 return error;
3414 }
3415
3416 simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
3417 error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
3418 if (error)
3419 return error;
3420
3259 if (d_really_is_positive(new_dentry)) {
3260 (void) shmem_unlink(new_dir, new_dentry);
3261 if (they_are_dirs) {
3262 drop_nlink(d_inode(new_dentry));
3263 drop_nlink(old_dir);
3264 }
3265 } else if (they_are_dirs) {
3266 drop_nlink(old_dir);

--- 19 unchanged lines hidden (view full) ---

3286 struct folio *folio;
3287
3288 len = strlen(symname) + 1;
3289 if (len > PAGE_SIZE)
3290 return -ENAMETOOLONG;
3291
3292 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3293 VM_NORESERVE);
3421 if (d_really_is_positive(new_dentry)) {
3422 (void) shmem_unlink(new_dir, new_dentry);
3423 if (they_are_dirs) {
3424 drop_nlink(d_inode(new_dentry));
3425 drop_nlink(old_dir);
3426 }
3427 } else if (they_are_dirs) {
3428 drop_nlink(old_dir);

--- 19 unchanged lines hidden (view full) ---

3448 struct folio *folio;
3449
3450 len = strlen(symname) + 1;
3451 if (len > PAGE_SIZE)
3452 return -ENAMETOOLONG;
3453
3454 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3455 VM_NORESERVE);
3294 if (!inode)
3295 return -ENOSPC;
3296
3456
3457 if (IS_ERR(inode))
3458 return PTR_ERR(inode);
3459
3297 error = security_inode_init_security(inode, dir, &dentry->d_name,
3298 shmem_initxattrs, NULL);
3460 error = security_inode_init_security(inode, dir, &dentry->d_name,
3461 shmem_initxattrs, NULL);
3299 if (error && error != -EOPNOTSUPP) {
3300 iput(inode);
3301 return error;
3302 }
3462 if (error && error != -EOPNOTSUPP)
3463 goto out_iput;
3303
3464
3465 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3466 if (error)
3467 goto out_iput;
3468
3304 inode->i_size = len-1;
3305 if (len <= SHORT_SYMLINK_LEN) {
3306 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3307 if (!inode->i_link) {
3469 inode->i_size = len-1;
3470 if (len <= SHORT_SYMLINK_LEN) {
3471 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3472 if (!inode->i_link) {
3308 iput(inode);
3309 return -ENOMEM;
3473 error = -ENOMEM;
3474 goto out_remove_offset;
3310 }
3311 inode->i_op = &shmem_short_symlink_operations;
3312 } else {
3313 inode_nohighmem(inode);
3314 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3475 }
3476 inode->i_op = &shmem_short_symlink_operations;
3477 } else {
3478 inode_nohighmem(inode);
3479 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3315 if (error) {
3316 iput(inode);
3317 return error;
3318 }
3480 if (error)
3481 goto out_remove_offset;
3319 inode->i_mapping->a_ops = &shmem_aops;
3320 inode->i_op = &shmem_symlink_inode_operations;
3321 memcpy(folio_address(folio), symname, len);
3322 folio_mark_uptodate(folio);
3323 folio_mark_dirty(folio);
3324 folio_unlock(folio);
3325 folio_put(folio);
3326 }
3327 dir->i_size += BOGO_DIRENT_SIZE;
3328 dir->i_ctime = dir->i_mtime = current_time(dir);
3329 inode_inc_iversion(dir);
3330 d_instantiate(dentry, inode);
3331 dget(dentry);
3332 return 0;
3482 inode->i_mapping->a_ops = &shmem_aops;
3483 inode->i_op = &shmem_symlink_inode_operations;
3484 memcpy(folio_address(folio), symname, len);
3485 folio_mark_uptodate(folio);
3486 folio_mark_dirty(folio);
3487 folio_unlock(folio);
3488 folio_put(folio);
3489 }
3490 dir->i_size += BOGO_DIRENT_SIZE;
3491 dir->i_ctime = dir->i_mtime = current_time(dir);
3492 inode_inc_iversion(dir);
3493 d_instantiate(dentry, inode);
3494 dget(dentry);
3495 return 0;
3496
3497out_remove_offset:
3498 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3499out_iput:
3500 iput(inode);
3501 return error;
3333}
3334
3335static void shmem_put_link(void *arg)
3336{
3337 folio_mark_accessed(arg);
3338 folio_put(arg);
3339}
3340

--- 71 unchanged lines hidden (view full) ---

3412/*
3413 * Callback for security_inode_init_security() for acquiring xattrs.
3414 */
3415static int shmem_initxattrs(struct inode *inode,
3416 const struct xattr *xattr_array,
3417 void *fs_info)
3418{
3419 struct shmem_inode_info *info = SHMEM_I(inode);
3502}
3503
3504static void shmem_put_link(void *arg)
3505{
3506 folio_mark_accessed(arg);
3507 folio_put(arg);
3508}
3509

--- 71 unchanged lines hidden (view full) ---

3581/*
3582 * Callback for security_inode_init_security() for acquiring xattrs.
3583 */
3584static int shmem_initxattrs(struct inode *inode,
3585 const struct xattr *xattr_array,
3586 void *fs_info)
3587{
3588 struct shmem_inode_info *info = SHMEM_I(inode);
3589 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3420 const struct xattr *xattr;
3421 struct simple_xattr *new_xattr;
3590 const struct xattr *xattr;
3591 struct simple_xattr *new_xattr;
3592 size_t ispace = 0;
3422 size_t len;
3423
3593 size_t len;
3594
3595 if (sbinfo->max_inodes) {
3596 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3597 ispace += simple_xattr_space(xattr->name,
3598 xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
3599 }
3600 if (ispace) {
3601 raw_spin_lock(&sbinfo->stat_lock);
3602 if (sbinfo->free_ispace < ispace)
3603 ispace = 0;
3604 else
3605 sbinfo->free_ispace -= ispace;
3606 raw_spin_unlock(&sbinfo->stat_lock);
3607 if (!ispace)
3608 return -ENOSPC;
3609 }
3610 }
3611
3424 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3425 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3426 if (!new_xattr)
3612 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3613 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3614 if (!new_xattr)
3427 return -ENOMEM;
3615 break;
3428
3429 len = strlen(xattr->name) + 1;
3430 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3616
3617 len = strlen(xattr->name) + 1;
3618 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3431 GFP_KERNEL);
3619 GFP_KERNEL_ACCOUNT);
3432 if (!new_xattr->name) {
3433 kvfree(new_xattr);
3620 if (!new_xattr->name) {
3621 kvfree(new_xattr);
3434 return -ENOMEM;
3622 break;
3435 }
3436
3437 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3438 XATTR_SECURITY_PREFIX_LEN);
3439 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3440 xattr->name, len);
3441
3442 simple_xattr_add(&info->xattrs, new_xattr);
3443 }
3444
3623 }
3624
3625 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3626 XATTR_SECURITY_PREFIX_LEN);
3627 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3628 xattr->name, len);
3629
3630 simple_xattr_add(&info->xattrs, new_xattr);
3631 }
3632
3633 if (xattr->name != NULL) {
3634 if (ispace) {
3635 raw_spin_lock(&sbinfo->stat_lock);
3636 sbinfo->free_ispace += ispace;
3637 raw_spin_unlock(&sbinfo->stat_lock);
3638 }
3639 simple_xattrs_free(&info->xattrs, NULL);
3640 return -ENOMEM;
3641 }
3642
3445 return 0;
3446}
3447
3448static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3449 struct dentry *unused, struct inode *inode,
3450 const char *name, void *buffer, size_t size)
3451{
3452 struct shmem_inode_info *info = SHMEM_I(inode);

--- 4 unchanged lines hidden (view full) ---

3457
3458static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3459 struct mnt_idmap *idmap,
3460 struct dentry *unused, struct inode *inode,
3461 const char *name, const void *value,
3462 size_t size, int flags)
3463{
3464 struct shmem_inode_info *info = SHMEM_I(inode);
3643 return 0;
3644}
3645
3646static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3647 struct dentry *unused, struct inode *inode,
3648 const char *name, void *buffer, size_t size)
3649{
3650 struct shmem_inode_info *info = SHMEM_I(inode);

--- 4 unchanged lines hidden (view full) ---

3655
3656static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3657 struct mnt_idmap *idmap,
3658 struct dentry *unused, struct inode *inode,
3659 const char *name, const void *value,
3660 size_t size, int flags)
3661{
3662 struct shmem_inode_info *info = SHMEM_I(inode);
3465 int err;
3663 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3664 struct simple_xattr *old_xattr;
3665 size_t ispace = 0;
3466
3467 name = xattr_full_name(handler, name);
3666
3667 name = xattr_full_name(handler, name);
3468 err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3469 if (!err) {
3668 if (value && sbinfo->max_inodes) {
3669 ispace = simple_xattr_space(name, size);
3670 raw_spin_lock(&sbinfo->stat_lock);
3671 if (sbinfo->free_ispace < ispace)
3672 ispace = 0;
3673 else
3674 sbinfo->free_ispace -= ispace;
3675 raw_spin_unlock(&sbinfo->stat_lock);
3676 if (!ispace)
3677 return -ENOSPC;
3678 }
3679
3680 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
3681 if (!IS_ERR(old_xattr)) {
3682 ispace = 0;
3683 if (old_xattr && sbinfo->max_inodes)
3684 ispace = simple_xattr_space(old_xattr->name,
3685 old_xattr->size);
3686 simple_xattr_free(old_xattr);
3687 old_xattr = NULL;
3470 inode->i_ctime = current_time(inode);
3471 inode_inc_iversion(inode);
3472 }
3688 inode->i_ctime = current_time(inode);
3689 inode_inc_iversion(inode);
3690 }
3473 return err;
3691 if (ispace) {
3692 raw_spin_lock(&sbinfo->stat_lock);
3693 sbinfo->free_ispace += ispace;
3694 raw_spin_unlock(&sbinfo->stat_lock);
3695 }
3696 return PTR_ERR(old_xattr);
3474}
3475
3476static const struct xattr_handler shmem_security_xattr_handler = {
3477 .prefix = XATTR_SECURITY_PREFIX,
3478 .get = shmem_xattr_handler_get,
3479 .set = shmem_xattr_handler_set,
3480};
3481
3482static const struct xattr_handler shmem_trusted_xattr_handler = {
3483 .prefix = XATTR_TRUSTED_PREFIX,
3484 .get = shmem_xattr_handler_get,
3485 .set = shmem_xattr_handler_set,
3486};
3487
3697}
3698
3699static const struct xattr_handler shmem_security_xattr_handler = {
3700 .prefix = XATTR_SECURITY_PREFIX,
3701 .get = shmem_xattr_handler_get,
3702 .set = shmem_xattr_handler_set,
3703};
3704
3705static const struct xattr_handler shmem_trusted_xattr_handler = {
3706 .prefix = XATTR_TRUSTED_PREFIX,
3707 .get = shmem_xattr_handler_get,
3708 .set = shmem_xattr_handler_set,
3709};
3710
3711static const struct xattr_handler shmem_user_xattr_handler = {
3712 .prefix = XATTR_USER_PREFIX,
3713 .get = shmem_xattr_handler_get,
3714 .set = shmem_xattr_handler_set,
3715};
3716
3488static const struct xattr_handler *shmem_xattr_handlers[] = {
3489 &shmem_security_xattr_handler,
3490 &shmem_trusted_xattr_handler,
3717static const struct xattr_handler *shmem_xattr_handlers[] = {
3718 &shmem_security_xattr_handler,
3719 &shmem_trusted_xattr_handler,
3720 &shmem_user_xattr_handler,
3491 NULL
3492};
3493
3494static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3495{
3496 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3497 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3498}
3499#endif /* CONFIG_TMPFS_XATTR */
3500
3501static const struct inode_operations shmem_short_symlink_operations = {
3502 .getattr = shmem_getattr,
3721 NULL
3722};
3723
3724static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3725{
3726 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3727 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3728}
3729#endif /* CONFIG_TMPFS_XATTR */
3730
3731static const struct inode_operations shmem_short_symlink_operations = {
3732 .getattr = shmem_getattr,
3733 .setattr = shmem_setattr,
3503 .get_link = simple_get_link,
3504#ifdef CONFIG_TMPFS_XATTR
3505 .listxattr = shmem_listxattr,
3506#endif
3507};
3508
3509static const struct inode_operations shmem_symlink_inode_operations = {
3510 .getattr = shmem_getattr,
3734 .get_link = simple_get_link,
3735#ifdef CONFIG_TMPFS_XATTR
3736 .listxattr = shmem_listxattr,
3737#endif
3738};
3739
3740static const struct inode_operations shmem_symlink_inode_operations = {
3741 .getattr = shmem_getattr,
3742 .setattr = shmem_setattr,
3511 .get_link = shmem_get_link,
3512#ifdef CONFIG_TMPFS_XATTR
3513 .listxattr = shmem_listxattr,
3514#endif
3515};
3516
3517static struct dentry *shmem_get_parent(struct dentry *child)
3518{

--- 83 unchanged lines hidden (view full) ---

3602 Opt_mpol,
3603 Opt_nr_blocks,
3604 Opt_nr_inodes,
3605 Opt_size,
3606 Opt_uid,
3607 Opt_inode32,
3608 Opt_inode64,
3609 Opt_noswap,
3743 .get_link = shmem_get_link,
3744#ifdef CONFIG_TMPFS_XATTR
3745 .listxattr = shmem_listxattr,
3746#endif
3747};
3748
3749static struct dentry *shmem_get_parent(struct dentry *child)
3750{

--- 83 unchanged lines hidden (view full) ---

3834 Opt_mpol,
3835 Opt_nr_blocks,
3836 Opt_nr_inodes,
3837 Opt_size,
3838 Opt_uid,
3839 Opt_inode32,
3840 Opt_inode64,
3841 Opt_noswap,
3842 Opt_quota,
3843 Opt_usrquota,
3844 Opt_grpquota,
3845 Opt_usrquota_block_hardlimit,
3846 Opt_usrquota_inode_hardlimit,
3847 Opt_grpquota_block_hardlimit,
3848 Opt_grpquota_inode_hardlimit,
3610};
3611
3612static const struct constant_table shmem_param_enums_huge[] = {
3613 {"never", SHMEM_HUGE_NEVER },
3614 {"always", SHMEM_HUGE_ALWAYS },
3615 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3616 {"advise", SHMEM_HUGE_ADVISE },
3617 {}

--- 6 unchanged lines hidden (view full) ---

3624 fsparam_string("mpol", Opt_mpol),
3625 fsparam_string("nr_blocks", Opt_nr_blocks),
3626 fsparam_string("nr_inodes", Opt_nr_inodes),
3627 fsparam_string("size", Opt_size),
3628 fsparam_u32 ("uid", Opt_uid),
3629 fsparam_flag ("inode32", Opt_inode32),
3630 fsparam_flag ("inode64", Opt_inode64),
3631 fsparam_flag ("noswap", Opt_noswap),
3849};
3850
3851static const struct constant_table shmem_param_enums_huge[] = {
3852 {"never", SHMEM_HUGE_NEVER },
3853 {"always", SHMEM_HUGE_ALWAYS },
3854 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3855 {"advise", SHMEM_HUGE_ADVISE },
3856 {}

--- 6 unchanged lines hidden (view full) ---

3863 fsparam_string("mpol", Opt_mpol),
3864 fsparam_string("nr_blocks", Opt_nr_blocks),
3865 fsparam_string("nr_inodes", Opt_nr_inodes),
3866 fsparam_string("size", Opt_size),
3867 fsparam_u32 ("uid", Opt_uid),
3868 fsparam_flag ("inode32", Opt_inode32),
3869 fsparam_flag ("inode64", Opt_inode64),
3870 fsparam_flag ("noswap", Opt_noswap),
3871#ifdef CONFIG_TMPFS_QUOTA
3872 fsparam_flag ("quota", Opt_quota),
3873 fsparam_flag ("usrquota", Opt_usrquota),
3874 fsparam_flag ("grpquota", Opt_grpquota),
3875 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
3876 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
3877 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
3878 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
3879#endif
3632 {}
3633};
3634
3635static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3636{
3637 struct shmem_options *ctx = fc->fs_private;
3638 struct fs_parse_result result;
3639 unsigned long long size;
3640 char *rest;
3641 int opt;
3880 {}
3881};
3882
3883static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3884{
3885 struct shmem_options *ctx = fc->fs_private;
3886 struct fs_parse_result result;
3887 unsigned long long size;
3888 char *rest;
3889 int opt;
3890 kuid_t kuid;
3891 kgid_t kgid;
3642
3643 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3644 if (opt < 0)
3645 return opt;
3646
3647 switch (opt) {
3648 case Opt_size:
3649 size = memparse(param->string, &rest);

--- 5 unchanged lines hidden (view full) ---

3655 }
3656 if (*rest)
3657 goto bad_value;
3658 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3659 ctx->seen |= SHMEM_SEEN_BLOCKS;
3660 break;
3661 case Opt_nr_blocks:
3662 ctx->blocks = memparse(param->string, &rest);
3892
3893 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3894 if (opt < 0)
3895 return opt;
3896
3897 switch (opt) {
3898 case Opt_size:
3899 size = memparse(param->string, &rest);

--- 5 unchanged lines hidden (view full) ---

3905 }
3906 if (*rest)
3907 goto bad_value;
3908 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3909 ctx->seen |= SHMEM_SEEN_BLOCKS;
3910 break;
3911 case Opt_nr_blocks:
3912 ctx->blocks = memparse(param->string, &rest);
3663 if (*rest || ctx->blocks > S64_MAX)
3913 if (*rest || ctx->blocks > LONG_MAX)
3664 goto bad_value;
3665 ctx->seen |= SHMEM_SEEN_BLOCKS;
3666 break;
3667 case Opt_nr_inodes:
3668 ctx->inodes = memparse(param->string, &rest);
3914 goto bad_value;
3915 ctx->seen |= SHMEM_SEEN_BLOCKS;
3916 break;
3917 case Opt_nr_inodes:
3918 ctx->inodes = memparse(param->string, &rest);
3669 if (*rest)
3919 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
3670 goto bad_value;
3671 ctx->seen |= SHMEM_SEEN_INODES;
3672 break;
3673 case Opt_mode:
3674 ctx->mode = result.uint_32 & 07777;
3675 break;
3676 case Opt_uid:
3920 goto bad_value;
3921 ctx->seen |= SHMEM_SEEN_INODES;
3922 break;
3923 case Opt_mode:
3924 ctx->mode = result.uint_32 & 07777;
3925 break;
3926 case Opt_uid:
3677 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3678 if (!uid_valid(ctx->uid))
3927 kuid = make_kuid(current_user_ns(), result.uint_32);
3928 if (!uid_valid(kuid))
3679 goto bad_value;
3929 goto bad_value;
3930
3931 /*
3932 * The requested uid must be representable in the
3933 * filesystem's idmapping.
3934 */
3935 if (!kuid_has_mapping(fc->user_ns, kuid))
3936 goto bad_value;
3937
3938 ctx->uid = kuid;
3680 break;
3681 case Opt_gid:
3939 break;
3940 case Opt_gid:
3682 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3683 if (!gid_valid(ctx->gid))
3941 kgid = make_kgid(current_user_ns(), result.uint_32);
3942 if (!gid_valid(kgid))
3684 goto bad_value;
3943 goto bad_value;
3944
3945 /*
3946 * The requested gid must be representable in the
3947 * filesystem's idmapping.
3948 */
3949 if (!kgid_has_mapping(fc->user_ns, kgid))
3950 goto bad_value;
3951
3952 ctx->gid = kgid;
3685 break;
3686 case Opt_huge:
3687 ctx->huge = result.uint_32;
3688 if (ctx->huge != SHMEM_HUGE_NEVER &&
3689 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3690 has_transparent_hugepage()))
3691 goto unsupported_parameter;
3692 ctx->seen |= SHMEM_SEEN_HUGE;

--- 22 unchanged lines hidden (view full) ---

3715 case Opt_noswap:
3716 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
3717 return invalfc(fc,
3718 "Turning off swap in unprivileged tmpfs mounts unsupported");
3719 }
3720 ctx->noswap = true;
3721 ctx->seen |= SHMEM_SEEN_NOSWAP;
3722 break;
3953 break;
3954 case Opt_huge:
3955 ctx->huge = result.uint_32;
3956 if (ctx->huge != SHMEM_HUGE_NEVER &&
3957 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3958 has_transparent_hugepage()))
3959 goto unsupported_parameter;
3960 ctx->seen |= SHMEM_SEEN_HUGE;

--- 22 unchanged lines hidden (view full) ---

3983 case Opt_noswap:
3984 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
3985 return invalfc(fc,
3986 "Turning off swap in unprivileged tmpfs mounts unsupported");
3987 }
3988 ctx->noswap = true;
3989 ctx->seen |= SHMEM_SEEN_NOSWAP;
3990 break;
3991 case Opt_quota:
3992 if (fc->user_ns != &init_user_ns)
3993 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
3994 ctx->seen |= SHMEM_SEEN_QUOTA;
3995 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
3996 break;
3997 case Opt_usrquota:
3998 if (fc->user_ns != &init_user_ns)
3999 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4000 ctx->seen |= SHMEM_SEEN_QUOTA;
4001 ctx->quota_types |= QTYPE_MASK_USR;
4002 break;
4003 case Opt_grpquota:
4004 if (fc->user_ns != &init_user_ns)
4005 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4006 ctx->seen |= SHMEM_SEEN_QUOTA;
4007 ctx->quota_types |= QTYPE_MASK_GRP;
4008 break;
4009 case Opt_usrquota_block_hardlimit:
4010 size = memparse(param->string, &rest);
4011 if (*rest || !size)
4012 goto bad_value;
4013 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4014 return invalfc(fc,
4015 "User quota block hardlimit too large.");
4016 ctx->qlimits.usrquota_bhardlimit = size;
4017 break;
4018 case Opt_grpquota_block_hardlimit:
4019 size = memparse(param->string, &rest);
4020 if (*rest || !size)
4021 goto bad_value;
4022 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4023 return invalfc(fc,
4024 "Group quota block hardlimit too large.");
4025 ctx->qlimits.grpquota_bhardlimit = size;
4026 break;
4027 case Opt_usrquota_inode_hardlimit:
4028 size = memparse(param->string, &rest);
4029 if (*rest || !size)
4030 goto bad_value;
4031 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4032 return invalfc(fc,
4033 "User quota inode hardlimit too large.");
4034 ctx->qlimits.usrquota_ihardlimit = size;
4035 break;
4036 case Opt_grpquota_inode_hardlimit:
4037 size = memparse(param->string, &rest);
4038 if (*rest || !size)
4039 goto bad_value;
4040 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4041 return invalfc(fc,
4042 "Group quota inode hardlimit too large.");
4043 ctx->qlimits.grpquota_ihardlimit = size;
4044 break;
3723 }
3724 return 0;
3725
3726unsupported_parameter:
3727 return invalfc(fc, "Unsupported parameter '%s'", param->key);
3728bad_value:
3729 return invalfc(fc, "Bad value for '%s'", param->key);
3730}

--- 39 unchanged lines hidden (view full) ---

3770 return err;
3771 }
3772 }
3773 return 0;
3774}
3775
3776/*
3777 * Reconfigure a shmem filesystem.
4045 }
4046 return 0;
4047
4048unsupported_parameter:
4049 return invalfc(fc, "Unsupported parameter '%s'", param->key);
4050bad_value:
4051 return invalfc(fc, "Bad value for '%s'", param->key);
4052}

--- 39 unchanged lines hidden (view full) ---

4092 return err;
4093 }
4094 }
4095 return 0;
4096}
4097
4098/*
4099 * Reconfigure a shmem filesystem.
3778 *
3779 * Note that we disallow change from limited->unlimited blocks/inodes while any
3780 * are in use; but we must separately disallow unlimited->limited, because in
3781 * that case we have no record of how much is already in use.
3782 */
3783static int shmem_reconfigure(struct fs_context *fc)
3784{
3785 struct shmem_options *ctx = fc->fs_private;
3786 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4100 */
4101static int shmem_reconfigure(struct fs_context *fc)
4102{
4103 struct shmem_options *ctx = fc->fs_private;
4104 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3787 unsigned long inodes;
4105 unsigned long used_isp;
3788 struct mempolicy *mpol = NULL;
3789 const char *err;
3790
3791 raw_spin_lock(&sbinfo->stat_lock);
4106 struct mempolicy *mpol = NULL;
4107 const char *err;
4108
4109 raw_spin_lock(&sbinfo->stat_lock);
3792 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
4110 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
3793
3794 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3795 if (!sbinfo->max_blocks) {
3796 err = "Cannot retroactively limit size";
3797 goto out;
3798 }
3799 if (percpu_counter_compare(&sbinfo->used_blocks,
3800 ctx->blocks) > 0) {
3801 err = "Too small a size for current use";
3802 goto out;
3803 }
3804 }
3805 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3806 if (!sbinfo->max_inodes) {
3807 err = "Cannot retroactively limit inodes";
3808 goto out;
3809 }
4111
4112 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4113 if (!sbinfo->max_blocks) {
4114 err = "Cannot retroactively limit size";
4115 goto out;
4116 }
4117 if (percpu_counter_compare(&sbinfo->used_blocks,
4118 ctx->blocks) > 0) {
4119 err = "Too small a size for current use";
4120 goto out;
4121 }
4122 }
4123 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4124 if (!sbinfo->max_inodes) {
4125 err = "Cannot retroactively limit inodes";
4126 goto out;
4127 }
3810 if (ctx->inodes < inodes) {
4128 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
3811 err = "Too few inodes for current use";
3812 goto out;
3813 }
3814 }
3815
3816 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3817 sbinfo->next_ino > UINT_MAX) {
3818 err = "Current inum too high to switch to 32-bit inums";
3819 goto out;
3820 }
3821 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
3822 err = "Cannot disable swap on remount";
3823 goto out;
3824 }
3825 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
3826 err = "Cannot enable swap on remount if it was disabled on first mount";
3827 goto out;
3828 }
3829
4129 err = "Too few inodes for current use";
4130 goto out;
4131 }
4132 }
4133
4134 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4135 sbinfo->next_ino > UINT_MAX) {
4136 err = "Current inum too high to switch to 32-bit inums";
4137 goto out;
4138 }
4139 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4140 err = "Cannot disable swap on remount";
4141 goto out;
4142 }
4143 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4144 err = "Cannot enable swap on remount if it was disabled on first mount";
4145 goto out;
4146 }
4147
4148 if (ctx->seen & SHMEM_SEEN_QUOTA &&
4149 !sb_any_quota_loaded(fc->root->d_sb)) {
4150 err = "Cannot enable quota on remount";
4151 goto out;
4152 }
4153
4154#ifdef CONFIG_TMPFS_QUOTA
4155#define CHANGED_LIMIT(name) \
4156 (ctx->qlimits.name## hardlimit && \
4157 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4158
4159 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4160 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4161 err = "Cannot change global quota limit on remount";
4162 goto out;
4163 }
4164#endif /* CONFIG_TMPFS_QUOTA */
4165
3830 if (ctx->seen & SHMEM_SEEN_HUGE)
3831 sbinfo->huge = ctx->huge;
3832 if (ctx->seen & SHMEM_SEEN_INUMS)
3833 sbinfo->full_inums = ctx->full_inums;
3834 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3835 sbinfo->max_blocks = ctx->blocks;
3836 if (ctx->seen & SHMEM_SEEN_INODES) {
3837 sbinfo->max_inodes = ctx->inodes;
4166 if (ctx->seen & SHMEM_SEEN_HUGE)
4167 sbinfo->huge = ctx->huge;
4168 if (ctx->seen & SHMEM_SEEN_INUMS)
4169 sbinfo->full_inums = ctx->full_inums;
4170 if (ctx->seen & SHMEM_SEEN_BLOCKS)
4171 sbinfo->max_blocks = ctx->blocks;
4172 if (ctx->seen & SHMEM_SEEN_INODES) {
4173 sbinfo->max_inodes = ctx->inodes;
3838 sbinfo->free_inodes = ctx->inodes - inodes;
4174 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
3839 }
3840
3841 /*
3842 * Preserve previous mempolicy unless mpol remount option was specified.
3843 */
3844 if (ctx->mpol) {
3845 mpol = sbinfo->mpol;
3846 sbinfo->mpol = ctx->mpol; /* transfers initial ref */

--- 66 unchanged lines hidden (view full) ---

3913}
3914
3915#endif /* CONFIG_TMPFS */
3916
3917static void shmem_put_super(struct super_block *sb)
3918{
3919 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3920
4175 }
4176
4177 /*
4178 * Preserve previous mempolicy unless mpol remount option was specified.
4179 */
4180 if (ctx->mpol) {
4181 mpol = sbinfo->mpol;
4182 sbinfo->mpol = ctx->mpol; /* transfers initial ref */

--- 66 unchanged lines hidden (view full) ---

4249}
4250
4251#endif /* CONFIG_TMPFS */
4252
4253static void shmem_put_super(struct super_block *sb)
4254{
4255 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4256
4257#ifdef CONFIG_TMPFS_QUOTA
4258 shmem_disable_quotas(sb);
4259#endif
3921 free_percpu(sbinfo->ino_batch);
3922 percpu_counter_destroy(&sbinfo->used_blocks);
3923 mpol_put(sbinfo->mpol);
3924 kfree(sbinfo);
3925 sb->s_fs_info = NULL;
3926}
3927
3928static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3929{
3930 struct shmem_options *ctx = fc->fs_private;
3931 struct inode *inode;
3932 struct shmem_sb_info *sbinfo;
4260 free_percpu(sbinfo->ino_batch);
4261 percpu_counter_destroy(&sbinfo->used_blocks);
4262 mpol_put(sbinfo->mpol);
4263 kfree(sbinfo);
4264 sb->s_fs_info = NULL;
4265}
4266
4267static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4268{
4269 struct shmem_options *ctx = fc->fs_private;
4270 struct inode *inode;
4271 struct shmem_sb_info *sbinfo;
4272 int error = -ENOMEM;
3933
3934 /* Round up to L1_CACHE_BYTES to resist false sharing */
3935 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3936 L1_CACHE_BYTES), GFP_KERNEL);
3937 if (!sbinfo)
4273
4274 /* Round up to L1_CACHE_BYTES to resist false sharing */
4275 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4276 L1_CACHE_BYTES), GFP_KERNEL);
4277 if (!sbinfo)
3938 return -ENOMEM;
4278 return error;
3939
3940 sb->s_fs_info = sbinfo;
3941
3942#ifdef CONFIG_TMPFS
3943 /*
3944 * Per default we only allow half of the physical ram per
3945 * tmpfs instance, limiting inodes to one per page of lowmem;
3946 * but the internal instance is left unlimited.

--- 10 unchanged lines hidden (view full) ---

3957 sb->s_flags |= SB_NOUSER;
3958 }
3959 sb->s_export_op = &shmem_export_ops;
3960 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
3961#else
3962 sb->s_flags |= SB_NOUSER;
3963#endif
3964 sbinfo->max_blocks = ctx->blocks;
4279
4280 sb->s_fs_info = sbinfo;
4281
4282#ifdef CONFIG_TMPFS
4283 /*
4284 * Per default we only allow half of the physical ram per
4285 * tmpfs instance, limiting inodes to one per page of lowmem;
4286 * but the internal instance is left unlimited.

--- 10 unchanged lines hidden (view full) ---

4297 sb->s_flags |= SB_NOUSER;
4298 }
4299 sb->s_export_op = &shmem_export_ops;
4300 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4301#else
4302 sb->s_flags |= SB_NOUSER;
4303#endif
4304 sbinfo->max_blocks = ctx->blocks;
3965 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
4305 sbinfo->max_inodes = ctx->inodes;
4306 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
3966 if (sb->s_flags & SB_KERNMOUNT) {
3967 sbinfo->ino_batch = alloc_percpu(ino_t);
3968 if (!sbinfo->ino_batch)
3969 goto failed;
3970 }
3971 sbinfo->uid = ctx->uid;
3972 sbinfo->gid = ctx->gid;
3973 sbinfo->full_inums = ctx->full_inums;

--- 17 unchanged lines hidden (view full) ---

3991#ifdef CONFIG_TMPFS_XATTR
3992 sb->s_xattr = shmem_xattr_handlers;
3993#endif
3994#ifdef CONFIG_TMPFS_POSIX_ACL
3995 sb->s_flags |= SB_POSIXACL;
3996#endif
3997 uuid_gen(&sb->s_uuid);
3998
4307 if (sb->s_flags & SB_KERNMOUNT) {
4308 sbinfo->ino_batch = alloc_percpu(ino_t);
4309 if (!sbinfo->ino_batch)
4310 goto failed;
4311 }
4312 sbinfo->uid = ctx->uid;
4313 sbinfo->gid = ctx->gid;
4314 sbinfo->full_inums = ctx->full_inums;

--- 17 unchanged lines hidden (view full) ---

4332#ifdef CONFIG_TMPFS_XATTR
4333 sb->s_xattr = shmem_xattr_handlers;
4334#endif
4335#ifdef CONFIG_TMPFS_POSIX_ACL
4336 sb->s_flags |= SB_POSIXACL;
4337#endif
4338 uuid_gen(&sb->s_uuid);
4339
4340#ifdef CONFIG_TMPFS_QUOTA
4341 if (ctx->seen & SHMEM_SEEN_QUOTA) {
4342 sb->dq_op = &shmem_quota_operations;
4343 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4344 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4345
4346 /* Copy the default limits from ctx into sbinfo */
4347 memcpy(&sbinfo->qlimits, &ctx->qlimits,
4348 sizeof(struct shmem_quota_limits));
4349
4350 if (shmem_enable_quotas(sb, ctx->quota_types))
4351 goto failed;
4352 }
4353#endif /* CONFIG_TMPFS_QUOTA */
4354
3999 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
4000 VM_NORESERVE);
4355 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
4356 VM_NORESERVE);
4001 if (!inode)
4357 if (IS_ERR(inode)) {
4358 error = PTR_ERR(inode);
4002 goto failed;
4359 goto failed;
4360 }
4003 inode->i_uid = sbinfo->uid;
4004 inode->i_gid = sbinfo->gid;
4005 sb->s_root = d_make_root(inode);
4006 if (!sb->s_root)
4007 goto failed;
4008 return 0;
4009
4010failed:
4011 shmem_put_super(sb);
4361 inode->i_uid = sbinfo->uid;
4362 inode->i_gid = sbinfo->gid;
4363 sb->s_root = d_make_root(inode);
4364 if (!sb->s_root)
4365 goto failed;
4366 return 0;
4367
4368failed:
4369 shmem_put_super(sb);
4012 return -ENOMEM;
4370 return error;
4013}
4014
4015static int shmem_get_tree(struct fs_context *fc)
4016{
4017 return get_tree_nodev(fc, shmem_fill_super);
4018}
4019
4020static void shmem_free_fc(struct fs_context *fc)

--- 33 unchanged lines hidden (view full) ---

4054 kfree(inode->i_link);
4055 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4056}
4057
4058static void shmem_destroy_inode(struct inode *inode)
4059{
4060 if (S_ISREG(inode->i_mode))
4061 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4371}
4372
4373static int shmem_get_tree(struct fs_context *fc)
4374{
4375 return get_tree_nodev(fc, shmem_fill_super);
4376}
4377
4378static void shmem_free_fc(struct fs_context *fc)

--- 33 unchanged lines hidden (view full) ---

4412 kfree(inode->i_link);
4413 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4414}
4415
4416static void shmem_destroy_inode(struct inode *inode)
4417{
4418 if (S_ISREG(inode->i_mode))
4419 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4420 if (S_ISDIR(inode->i_mode))
4421 simple_offset_destroy(shmem_get_offset_ctx(inode));
4062}
4063
4064static void shmem_init_inode(void *foo)
4065{
4066 struct shmem_inode_info *info = foo;
4067 inode_init_once(&info->vfs_inode);
4068}
4069

--- 27 unchanged lines hidden (view full) ---

4097 .migrate_folio = migrate_folio,
4098#endif
4099 .error_remove_page = shmem_error_remove_page,
4100};
4101EXPORT_SYMBOL(shmem_aops);
4102
4103static const struct file_operations shmem_file_operations = {
4104 .mmap = shmem_mmap,
4422}
4423
4424static void shmem_init_inode(void *foo)
4425{
4426 struct shmem_inode_info *info = foo;
4427 inode_init_once(&info->vfs_inode);
4428}
4429

--- 27 unchanged lines hidden (view full) ---

4457 .migrate_folio = migrate_folio,
4458#endif
4459 .error_remove_page = shmem_error_remove_page,
4460};
4461EXPORT_SYMBOL(shmem_aops);
4462
4463static const struct file_operations shmem_file_operations = {
4464 .mmap = shmem_mmap,
4105 .open = generic_file_open,
4465 .open = shmem_file_open,
4106 .get_unmapped_area = shmem_get_unmapped_area,
4107#ifdef CONFIG_TMPFS
4108 .llseek = shmem_file_llseek,
4109 .read_iter = shmem_file_read_iter,
4466 .get_unmapped_area = shmem_get_unmapped_area,
4467#ifdef CONFIG_TMPFS
4468 .llseek = shmem_file_llseek,
4469 .read_iter = shmem_file_read_iter,
4110 .write_iter = generic_file_write_iter,
4470 .write_iter = shmem_file_write_iter,
4111 .fsync = noop_fsync,
4112 .splice_read = shmem_file_splice_read,
4113 .splice_write = iter_file_splice_write,
4114 .fallocate = shmem_fallocate,
4115#endif
4116};
4117
4118static const struct inode_operations shmem_inode_operations = {

--- 15 unchanged lines hidden (view full) ---

4134 .link = shmem_link,
4135 .unlink = shmem_unlink,
4136 .symlink = shmem_symlink,
4137 .mkdir = shmem_mkdir,
4138 .rmdir = shmem_rmdir,
4139 .mknod = shmem_mknod,
4140 .rename = shmem_rename2,
4141 .tmpfile = shmem_tmpfile,
4471 .fsync = noop_fsync,
4472 .splice_read = shmem_file_splice_read,
4473 .splice_write = iter_file_splice_write,
4474 .fallocate = shmem_fallocate,
4475#endif
4476};
4477
4478static const struct inode_operations shmem_inode_operations = {

--- 15 unchanged lines hidden (view full) ---

4494 .link = shmem_link,
4495 .unlink = shmem_unlink,
4496 .symlink = shmem_symlink,
4497 .mkdir = shmem_mkdir,
4498 .rmdir = shmem_rmdir,
4499 .mknod = shmem_mknod,
4500 .rename = shmem_rename2,
4501 .tmpfile = shmem_tmpfile,
4502 .get_offset_ctx = shmem_get_offset_ctx,
4142#endif
4143#ifdef CONFIG_TMPFS_XATTR
4144 .listxattr = shmem_listxattr,
4145 .fileattr_get = shmem_fileattr_get,
4146 .fileattr_set = shmem_fileattr_set,
4147#endif
4148#ifdef CONFIG_TMPFS_POSIX_ACL
4149 .setattr = shmem_setattr,

--- 15 unchanged lines hidden (view full) ---

4165static const struct super_operations shmem_ops = {
4166 .alloc_inode = shmem_alloc_inode,
4167 .free_inode = shmem_free_in_core_inode,
4168 .destroy_inode = shmem_destroy_inode,
4169#ifdef CONFIG_TMPFS
4170 .statfs = shmem_statfs,
4171 .show_options = shmem_show_options,
4172#endif
4503#endif
4504#ifdef CONFIG_TMPFS_XATTR
4505 .listxattr = shmem_listxattr,
4506 .fileattr_get = shmem_fileattr_get,
4507 .fileattr_set = shmem_fileattr_set,
4508#endif
4509#ifdef CONFIG_TMPFS_POSIX_ACL
4510 .setattr = shmem_setattr,

--- 15 unchanged lines hidden (view full) ---

4526static const struct super_operations shmem_ops = {
4527 .alloc_inode = shmem_alloc_inode,
4528 .free_inode = shmem_free_in_core_inode,
4529 .destroy_inode = shmem_destroy_inode,
4530#ifdef CONFIG_TMPFS
4531 .statfs = shmem_statfs,
4532 .show_options = shmem_show_options,
4533#endif
4534#ifdef CONFIG_TMPFS_QUOTA
4535 .get_dquots = shmem_get_dquots,
4536#endif
4173 .evict_inode = shmem_evict_inode,
4174 .drop_inode = generic_delete_inode,
4175 .put_super = shmem_put_super,
4176#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4177 .nr_cached_objects = shmem_unused_huge_count,
4178 .free_cached_objects = shmem_unused_huge_scan,
4179#endif
4180};

--- 49 unchanged lines hidden (view full) ---

4230};
4231
4232void __init shmem_init(void)
4233{
4234 int error;
4235
4236 shmem_init_inodecache();
4237
4537 .evict_inode = shmem_evict_inode,
4538 .drop_inode = generic_delete_inode,
4539 .put_super = shmem_put_super,
4540#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4541 .nr_cached_objects = shmem_unused_huge_count,
4542 .free_cached_objects = shmem_unused_huge_scan,
4543#endif
4544};

--- 49 unchanged lines hidden (view full) ---

4594};
4595
4596void __init shmem_init(void)
4597{
4598 int error;
4599
4600 shmem_init_inodecache();
4601
4602#ifdef CONFIG_TMPFS_QUOTA
4603 error = register_quota_format(&shmem_quota_format);
4604 if (error < 0) {
4605 pr_err("Could not register quota format\n");
4606 goto out3;
4607 }
4608#endif
4609
4238 error = register_filesystem(&shmem_fs_type);
4239 if (error) {
4240 pr_err("Could not register tmpfs\n");
4241 goto out2;
4242 }
4243
4244 shm_mnt = kern_mount(&shmem_fs_type);
4245 if (IS_ERR(shm_mnt)) {

--- 8 unchanged lines hidden (view full) ---

4254 else
4255 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
4256#endif
4257 return;
4258
4259out1:
4260 unregister_filesystem(&shmem_fs_type);
4261out2:
4610 error = register_filesystem(&shmem_fs_type);
4611 if (error) {
4612 pr_err("Could not register tmpfs\n");
4613 goto out2;
4614 }
4615
4616 shm_mnt = kern_mount(&shmem_fs_type);
4617 if (IS_ERR(shm_mnt)) {

--- 8 unchanged lines hidden (view full) ---

4626 else
4627 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
4628#endif
4629 return;
4630
4631out1:
4632 unregister_filesystem(&shmem_fs_type);
4633out2:
4634#ifdef CONFIG_TMPFS_QUOTA
4635 unregister_quota_format(&shmem_quota_format);
4636out3:
4637#endif
4262 shmem_destroy_inodecache();
4263 shm_mnt = ERR_PTR(error);
4264}
4265
4266#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4267static ssize_t shmem_enabled_show(struct kobject *kobj,
4268 struct kobj_attribute *attr, char *buf)
4269{

--- 103 unchanged lines hidden (view full) ---

4373{
4374 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4375}
4376EXPORT_SYMBOL_GPL(shmem_truncate_range);
4377
4378#define shmem_vm_ops generic_file_vm_ops
4379#define shmem_anon_vm_ops generic_file_vm_ops
4380#define shmem_file_operations ramfs_file_operations
4638 shmem_destroy_inodecache();
4639 shm_mnt = ERR_PTR(error);
4640}
4641
4642#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4643static ssize_t shmem_enabled_show(struct kobject *kobj,
4644 struct kobj_attribute *attr, char *buf)
4645{

--- 103 unchanged lines hidden (view full) ---

4749{
4750 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4751}
4752EXPORT_SYMBOL_GPL(shmem_truncate_range);
4753
4754#define shmem_vm_ops generic_file_vm_ops
4755#define shmem_anon_vm_ops generic_file_vm_ops
4756#define shmem_file_operations ramfs_file_operations
4381#define shmem_get_inode(idmap, sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
4382#define shmem_acct_size(flags, size) 0
4383#define shmem_unacct_size(flags, size) do {} while (0)
4384
4757#define shmem_acct_size(flags, size) 0
4758#define shmem_unacct_size(flags, size) do {} while (0)
4759
4760static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir,
4761 umode_t mode, dev_t dev, unsigned long flags)
4762{
4763 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
4764 return inode ? inode : ERR_PTR(-ENOSPC);
4765}
4766
4385#endif /* CONFIG_SHMEM */
4386
4387/* common code */
4388
4389static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4390 unsigned long flags, unsigned int i_flags)
4391{
4392 struct inode *inode;

--- 8 unchanged lines hidden (view full) ---

4401 if (shmem_acct_size(flags, size))
4402 return ERR_PTR(-ENOMEM);
4403
4404 if (is_idmapped_mnt(mnt))
4405 return ERR_PTR(-EINVAL);
4406
4407 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
4408 S_IFREG | S_IRWXUGO, 0, flags);
4767#endif /* CONFIG_SHMEM */
4768
4769/* common code */
4770
4771static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4772 unsigned long flags, unsigned int i_flags)
4773{
4774 struct inode *inode;

--- 8 unchanged lines hidden (view full) ---

4783 if (shmem_acct_size(flags, size))
4784 return ERR_PTR(-ENOMEM);
4785
4786 if (is_idmapped_mnt(mnt))
4787 return ERR_PTR(-EINVAL);
4788
4789 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
4790 S_IFREG | S_IRWXUGO, 0, flags);
4409 if (unlikely(!inode)) {
4791
4792 if (IS_ERR(inode)) {
4410 shmem_unacct_size(flags, size);
4793 shmem_unacct_size(flags, size);
4411 return ERR_PTR(-ENOSPC);
4794 return ERR_CAST(inode);
4412 }
4413 inode->i_flags |= i_flags;
4414 inode->i_size = size;
4415 clear_nlink(inode); /* It is unlinked */
4416 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4417 if (!IS_ERR(res))
4418 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4419 &shmem_file_operations);

--- 131 unchanged lines hidden ---
4795 }
4796 inode->i_flags |= i_flags;
4797 inode->i_size = size;
4798 clear_nlink(inode); /* It is unlinked */
4799 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4800 if (!IS_ERR(res))
4801 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4802 &shmem_file_operations);

--- 131 unchanged lines hidden ---