shmem.c (9c9fa97a8edbc3668dfc7a25de516e80c146e86f) shmem.c (19deb7695e072deaff025e03de40c61b525bd57e)
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 23 unchanged lines hidden (view full) ---

32#include <linux/random.h>
33#include <linux/sched/signal.h>
34#include <linux/export.h>
35#include <linux/swap.h>
36#include <linux/uio.h>
37#include <linux/khugepaged.h>
38#include <linux/hugetlb.h>
39#include <linux/frontswap.h>
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.

--- 23 unchanged lines hidden (view full) ---

32#include <linux/random.h>
33#include <linux/sched/signal.h>
34#include <linux/export.h>
35#include <linux/swap.h>
36#include <linux/uio.h>
37#include <linux/khugepaged.h>
38#include <linux/hugetlb.h>
39#include <linux/frontswap.h>
40#include <linux/fs_parser.h>
41
42#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
43
44static struct vfsmount *shm_mnt;
45
46#ifdef CONFIG_SHMEM
47/*
48 * This virtual memory filesystem is heavily based on the ramfs. It

--- 54 unchanged lines hidden (view full) ---

103struct shmem_falloc {
104 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
105 pgoff_t start; /* start of range currently being fallocated */
106 pgoff_t next; /* the next page offset to be fallocated */
107 pgoff_t nr_falloced; /* how many new pages have been fallocated */
108 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
109};
110
40
41#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
42
43static struct vfsmount *shm_mnt;
44
45#ifdef CONFIG_SHMEM
46/*
47 * This virtual memory filesystem is heavily based on the ramfs. It

--- 54 unchanged lines hidden (view full) ---

102struct shmem_falloc {
103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
104 pgoff_t start; /* start of range currently being fallocated */
105 pgoff_t next; /* the next page offset to be fallocated */
106 pgoff_t nr_falloced; /* how many new pages have been fallocated */
107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
108};
109
111struct shmem_options {
112 unsigned long long blocks;
113 unsigned long long inodes;
114 struct mempolicy *mpol;
115 kuid_t uid;
116 kgid_t gid;
117 umode_t mode;
118 int huge;
119 int seen;
120#define SHMEM_SEEN_BLOCKS 1
121#define SHMEM_SEEN_INODES 2
122#define SHMEM_SEEN_HUGE 4
123};
124
125#ifdef CONFIG_TMPFS
126static unsigned long shmem_default_max_blocks(void)
127{
128 return totalram_pages() / 2;
129}
130
131static unsigned long shmem_default_max_inodes(void)
132{

--- 471 unchanged lines hidden (view full) ---

604 * Like add_to_page_cache_locked, but error if expected item has gone.
605 */
606static int shmem_add_to_page_cache(struct page *page,
607 struct address_space *mapping,
608 pgoff_t index, void *expected, gfp_t gfp)
609{
610 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
611 unsigned long i = 0;
110#ifdef CONFIG_TMPFS
111static unsigned long shmem_default_max_blocks(void)
112{
113 return totalram_pages() / 2;
114}
115
116static unsigned long shmem_default_max_inodes(void)
117{

--- 471 unchanged lines hidden (view full) ---

589 * Like add_to_page_cache_locked, but error if expected item has gone.
590 */
591static int shmem_add_to_page_cache(struct page *page,
592 struct address_space *mapping,
593 pgoff_t index, void *expected, gfp_t gfp)
594{
595 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
596 unsigned long i = 0;
612 unsigned long nr = compound_nr(page);
597 unsigned long nr = 1UL << compound_order(page);
613
614 VM_BUG_ON_PAGE(PageTail(page), page);
615 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
616 VM_BUG_ON_PAGE(!PageLocked(page), page);
617 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
618 VM_BUG_ON(expected && PageTransHuge(page));
619
620 page_ref_add(page, nr);

--- 5 unchanged lines hidden (view full) ---

626 xas_lock_irq(&xas);
627 entry = xas_find_conflict(&xas);
628 if (entry != expected)
629 xas_set_err(&xas, -EEXIST);
630 xas_create_range(&xas);
631 if (xas_error(&xas))
632 goto unlock;
633next:
598
599 VM_BUG_ON_PAGE(PageTail(page), page);
600 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
601 VM_BUG_ON_PAGE(!PageLocked(page), page);
602 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
603 VM_BUG_ON(expected && PageTransHuge(page));
604
605 page_ref_add(page, nr);

--- 5 unchanged lines hidden (view full) ---

611 xas_lock_irq(&xas);
612 entry = xas_find_conflict(&xas);
613 if (entry != expected)
614 xas_set_err(&xas, -EEXIST);
615 xas_create_range(&xas);
616 if (xas_error(&xas))
617 goto unlock;
618next:
634 xas_store(&xas, page);
619 xas_store(&xas, page + i);
635 if (++i < nr) {
636 xas_next(&xas);
637 goto next;
638 }
639 if (PageTransHuge(page)) {
640 count_vm_event(THP_FILE_ALLOC);
641 __inc_node_page_state(page, NR_SHMEM_THPS);
642 }

--- 833 unchanged lines hidden (view full) ---

1476
1477 hindex = round_down(index, HPAGE_PMD_NR);
1478 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1479 XA_PRESENT))
1480 return NULL;
1481
1482 shmem_pseudo_vma_init(&pvma, info, hindex);
1483 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
620 if (++i < nr) {
621 xas_next(&xas);
622 goto next;
623 }
624 if (PageTransHuge(page)) {
625 count_vm_event(THP_FILE_ALLOC);
626 __inc_node_page_state(page, NR_SHMEM_THPS);
627 }

--- 833 unchanged lines hidden (view full) ---

1461
1462 hindex = round_down(index, HPAGE_PMD_NR);
1463 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1464 XA_PRESENT))
1465 return NULL;
1466
1467 shmem_pseudo_vma_init(&pvma, info, hindex);
1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1484 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
1469 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1485 shmem_pseudo_vma_destroy(&pvma);
1486 if (page)
1487 prep_transhuge_page(page);
1488 return page;
1489}
1490
1491static struct page *shmem_alloc_page(gfp_t gfp,
1492 struct shmem_inode_info *info, pgoff_t index)

--- 236 unchanged lines hidden (view full) ---

1729
1730/*
1731 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1732 *
1733 * If we allocate a new one we do not mark it dirty. That's up to the
1734 * vm. If we swap it in we mark it dirty since we also free the swap
1735 * entry since a page cannot live in both the swap and page cache.
1736 *
1470 shmem_pseudo_vma_destroy(&pvma);
1471 if (page)
1472 prep_transhuge_page(page);
1473 return page;
1474}
1475
1476static struct page *shmem_alloc_page(gfp_t gfp,
1477 struct shmem_inode_info *info, pgoff_t index)

--- 236 unchanged lines hidden (view full) ---

1714
1715/*
1716 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1717 *
1718 * If we allocate a new one we do not mark it dirty. That's up to the
1719 * vm. If we swap it in we mark it dirty since we also free the swap
1720 * entry since a page cannot live in both the swap and page cache.
1721 *
1737 * vmf and fault_type are only supplied by shmem_fault:
1722 * fault_mm and fault_type are only supplied by shmem_fault:
1738 * otherwise they are NULL.
1739 */
1740static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1741 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1742 struct vm_area_struct *vma, struct vm_fault *vmf,
1743 vm_fault_t *fault_type)
1744{
1745 struct address_space *mapping = inode->i_mapping;

--- 133 unchanged lines hidden (view full) ---

1879 PageTransHuge(page));
1880 goto unacct;
1881 }
1882 mem_cgroup_commit_charge(page, memcg, false,
1883 PageTransHuge(page));
1884 lru_cache_add_anon(page);
1885
1886 spin_lock_irq(&info->lock);
1723 * otherwise they are NULL.
1724 */
1725static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1726 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1727 struct vm_area_struct *vma, struct vm_fault *vmf,
1728 vm_fault_t *fault_type)
1729{
1730 struct address_space *mapping = inode->i_mapping;

--- 133 unchanged lines hidden (view full) ---

1864 PageTransHuge(page));
1865 goto unacct;
1866 }
1867 mem_cgroup_commit_charge(page, memcg, false,
1868 PageTransHuge(page));
1869 lru_cache_add_anon(page);
1870
1871 spin_lock_irq(&info->lock);
1887 info->alloced += compound_nr(page);
1872 info->alloced += 1 << compound_order(page);
1888 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1889 shmem_recalc_inode(inode);
1890 spin_unlock_irq(&info->lock);
1891 alloced = true;
1892
1893 if (PageTransHuge(page) &&
1894 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1895 hindex + HPAGE_PMD_NR - 1) {

--- 24 unchanged lines hidden (view full) ---

1920 * Let SGP_WRITE caller clear ends if write does not fill page;
1921 * but SGP_FALLOC on a page fallocated earlier must initialize
1922 * it now, lest undo on failure cancel our earlier guarantee.
1923 */
1924 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1925 struct page *head = compound_head(page);
1926 int i;
1927
1873 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1874 shmem_recalc_inode(inode);
1875 spin_unlock_irq(&info->lock);
1876 alloced = true;
1877
1878 if (PageTransHuge(page) &&
1879 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1880 hindex + HPAGE_PMD_NR - 1) {

--- 24 unchanged lines hidden (view full) ---

1905 * Let SGP_WRITE caller clear ends if write does not fill page;
1906 * but SGP_FALLOC on a page fallocated earlier must initialize
1907 * it now, lest undo on failure cancel our earlier guarantee.
1908 */
1909 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1910 struct page *head = compound_head(page);
1911 int i;
1912
1928 for (i = 0; i < compound_nr(head); i++) {
1913 for (i = 0; i < (1 << compound_order(head)); i++) {
1929 clear_highpage(head + i);
1930 flush_dcache_page(head + i);
1931 }
1932 SetPageUptodate(head);
1933 }
1934
1935 /* Perhaps the file has been truncated since we checked */
1936 if (sgp <= SGP_CACHE &&

--- 10 unchanged lines hidden (view full) ---

1947 }
1948 *pagep = page + index - hindex;
1949 return 0;
1950
1951 /*
1952 * Error recovery.
1953 */
1954unacct:
1914 clear_highpage(head + i);
1915 flush_dcache_page(head + i);
1916 }
1917 SetPageUptodate(head);
1918 }
1919
1920 /* Perhaps the file has been truncated since we checked */
1921 if (sgp <= SGP_CACHE &&

--- 10 unchanged lines hidden (view full) ---

1932 }
1933 *pagep = page + index - hindex;
1934 return 0;
1935
1936 /*
1937 * Error recovery.
1938 */
1939unacct:
1955 shmem_inode_unacct_blocks(inode, compound_nr(page));
1940 shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1956
1957 if (PageTransHuge(page)) {
1958 unlock_page(page);
1959 put_page(page);
1960 goto alloc_nohuge;
1961 }
1962unlock:
1963 if (page) {

--- 1395 unchanged lines hidden (view full) ---

3359}
3360
3361static const struct export_operations shmem_export_ops = {
3362 .get_parent = shmem_get_parent,
3363 .encode_fh = shmem_encode_fh,
3364 .fh_to_dentry = shmem_fh_to_dentry,
3365};
3366
1941
1942 if (PageTransHuge(page)) {
1943 unlock_page(page);
1944 put_page(page);
1945 goto alloc_nohuge;
1946 }
1947unlock:
1948 if (page) {

--- 1395 unchanged lines hidden (view full) ---

3344}
3345
3346static const struct export_operations shmem_export_ops = {
3347 .get_parent = shmem_get_parent,
3348 .encode_fh = shmem_encode_fh,
3349 .fh_to_dentry = shmem_fh_to_dentry,
3350};
3351
3367enum shmem_param {
3368 Opt_gid,
3369 Opt_huge,
3370 Opt_mode,
3371 Opt_mpol,
3372 Opt_nr_blocks,
3373 Opt_nr_inodes,
3374 Opt_size,
3375 Opt_uid,
3376};
3377
3378static const struct fs_parameter_spec shmem_param_specs[] = {
3379 fsparam_u32 ("gid", Opt_gid),
3380 fsparam_enum ("huge", Opt_huge),
3381 fsparam_u32oct("mode", Opt_mode),
3382 fsparam_string("mpol", Opt_mpol),
3383 fsparam_string("nr_blocks", Opt_nr_blocks),
3384 fsparam_string("nr_inodes", Opt_nr_inodes),
3385 fsparam_string("size", Opt_size),
3386 fsparam_u32 ("uid", Opt_uid),
3387 {}
3388};
3389
3390static const struct fs_parameter_enum shmem_param_enums[] = {
3391 { Opt_huge, "never", SHMEM_HUGE_NEVER },
3392 { Opt_huge, "always", SHMEM_HUGE_ALWAYS },
3393 { Opt_huge, "within_size", SHMEM_HUGE_WITHIN_SIZE },
3394 { Opt_huge, "advise", SHMEM_HUGE_ADVISE },
3395 {}
3396};
3397
3398const struct fs_parameter_description shmem_fs_parameters = {
3399 .name = "tmpfs",
3400 .specs = shmem_param_specs,
3401 .enums = shmem_param_enums,
3402};
3403
3404static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3352static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3353 bool remount)
3405{
3354{
3406 struct shmem_options *ctx = fc->fs_private;
3407 struct fs_parse_result result;
3408 unsigned long long size;
3409 char *rest;
3410 int opt;
3355 char *this_char, *value, *rest;
3356 struct mempolicy *mpol = NULL;
3357 uid_t uid;
3358 gid_t gid;
3411
3359
3412 opt = fs_parse(fc, &shmem_fs_parameters, param, &result);
3413 if (opt < 0)
3414 return opt;
3415
3416 switch (opt) {
3417 case Opt_size:
3418 size = memparse(param->string, &rest);
3419 if (*rest == '%') {
3420 size <<= PAGE_SHIFT;
3421 size *= totalram_pages();
3422 do_div(size, 100);
3423 rest++;
3424 }
3425 if (*rest)
3426 goto bad_value;
3427 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3428 ctx->seen |= SHMEM_SEEN_BLOCKS;
3429 break;
3430 case Opt_nr_blocks:
3431 ctx->blocks = memparse(param->string, &rest);
3432 if (*rest)
3433 goto bad_value;
3434 ctx->seen |= SHMEM_SEEN_BLOCKS;
3435 break;
3436 case Opt_nr_inodes:
3437 ctx->inodes = memparse(param->string, &rest);
3438 if (*rest)
3439 goto bad_value;
3440 ctx->seen |= SHMEM_SEEN_INODES;
3441 break;
3442 case Opt_mode:
3443 ctx->mode = result.uint_32 & 07777;
3444 break;
3445 case Opt_uid:
3446 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3447 if (!uid_valid(ctx->uid))
3448 goto bad_value;
3449 break;
3450 case Opt_gid:
3451 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3452 if (!gid_valid(ctx->gid))
3453 goto bad_value;
3454 break;
3455 case Opt_huge:
3456 ctx->huge = result.uint_32;
3457 if (ctx->huge != SHMEM_HUGE_NEVER &&
3458 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
3459 has_transparent_hugepage()))
3460 goto unsupported_parameter;
3461 ctx->seen |= SHMEM_SEEN_HUGE;
3462 break;
3463 case Opt_mpol:
3464 if (IS_ENABLED(CONFIG_NUMA)) {
3465 mpol_put(ctx->mpol);
3466 ctx->mpol = NULL;
3467 if (mpol_parse_str(param->string, &ctx->mpol))
3468 goto bad_value;
3469 break;
3470 }
3471 goto unsupported_parameter;
3472 }
3473 return 0;
3474
3475unsupported_parameter:
3476 return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key);
3477bad_value:
3478 return invalf(fc, "tmpfs: Bad value for '%s'", param->key);
3479}
3480
3481static int shmem_parse_options(struct fs_context *fc, void *data)
3482{
3483 char *options = data;
3484
3485 while (options != NULL) {
3360 while (options != NULL) {
3486 char *this_char = options;
3361 this_char = options;
3487 for (;;) {
3488 /*
3489 * NUL-terminate this option: unfortunately,
3490 * mount options form a comma-separated list,
3491 * but mpol's nodelist may also contain commas.
3492 */
3493 options = strchr(options, ',');
3494 if (options == NULL)
3495 break;
3496 options++;
3497 if (!isdigit(*options)) {
3498 options[-1] = '\0';
3499 break;
3500 }
3501 }
3362 for (;;) {
3363 /*
3364 * NUL-terminate this option: unfortunately,
3365 * mount options form a comma-separated list,
3366 * but mpol's nodelist may also contain commas.
3367 */
3368 options = strchr(options, ',');
3369 if (options == NULL)
3370 break;
3371 options++;
3372 if (!isdigit(*options)) {
3373 options[-1] = '\0';
3374 break;
3375 }
3376 }
3502 if (*this_char) {
3503 char *value = strchr(this_char,'=');
3504 size_t len = 0;
3505 int err;
3377 if (!*this_char)
3378 continue;
3379 if ((value = strchr(this_char,'=')) != NULL) {
3380 *value++ = 0;
3381 } else {
3382 pr_err("tmpfs: No value for mount option '%s'\n",
3383 this_char);
3384 goto error;
3385 }
3506
3386
3507 if (value) {
3508 *value++ = '\0';
3509 len = strlen(value);
3387 if (!strcmp(this_char,"size")) {
3388 unsigned long long size;
3389 size = memparse(value,&rest);
3390 if (*rest == '%') {
3391 size <<= PAGE_SHIFT;
3392 size *= totalram_pages();
3393 do_div(size, 100);
3394 rest++;
3510 }
3395 }
3511 err = vfs_parse_fs_string(fc, this_char, value, len);
3512 if (err < 0)
3513 return err;
3396 if (*rest)
3397 goto bad_val;
3398 sbinfo->max_blocks =
3399 DIV_ROUND_UP(size, PAGE_SIZE);
3400 } else if (!strcmp(this_char,"nr_blocks")) {
3401 sbinfo->max_blocks = memparse(value, &rest);
3402 if (*rest)
3403 goto bad_val;
3404 } else if (!strcmp(this_char,"nr_inodes")) {
3405 sbinfo->max_inodes = memparse(value, &rest);
3406 if (*rest)
3407 goto bad_val;
3408 } else if (!strcmp(this_char,"mode")) {
3409 if (remount)
3410 continue;
3411 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
3412 if (*rest)
3413 goto bad_val;
3414 } else if (!strcmp(this_char,"uid")) {
3415 if (remount)
3416 continue;
3417 uid = simple_strtoul(value, &rest, 0);
3418 if (*rest)
3419 goto bad_val;
3420 sbinfo->uid = make_kuid(current_user_ns(), uid);
3421 if (!uid_valid(sbinfo->uid))
3422 goto bad_val;
3423 } else if (!strcmp(this_char,"gid")) {
3424 if (remount)
3425 continue;
3426 gid = simple_strtoul(value, &rest, 0);
3427 if (*rest)
3428 goto bad_val;
3429 sbinfo->gid = make_kgid(current_user_ns(), gid);
3430 if (!gid_valid(sbinfo->gid))
3431 goto bad_val;
3432#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3433 } else if (!strcmp(this_char, "huge")) {
3434 int huge;
3435 huge = shmem_parse_huge(value);
3436 if (huge < 0)
3437 goto bad_val;
3438 if (!has_transparent_hugepage() &&
3439 huge != SHMEM_HUGE_NEVER)
3440 goto bad_val;
3441 sbinfo->huge = huge;
3442#endif
3443#ifdef CONFIG_NUMA
3444 } else if (!strcmp(this_char,"mpol")) {
3445 mpol_put(mpol);
3446 mpol = NULL;
3447 if (mpol_parse_str(value, &mpol))
3448 goto bad_val;
3449#endif
3450 } else {
3451 pr_err("tmpfs: Bad mount option %s\n", this_char);
3452 goto error;
3514 }
3515 }
3453 }
3454 }
3455 sbinfo->mpol = mpol;
3516 return 0;
3456 return 0;
3457
3458bad_val:
3459 pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
3460 value, this_char);
3461error:
3462 mpol_put(mpol);
3463 return 1;
3464
3517}
3518
3465}
3466
3519/*
3520 * Reconfigure a shmem filesystem.
3521 *
3522 * Note that we disallow change from limited->unlimited blocks/inodes while any
3523 * are in use; but we must separately disallow unlimited->limited, because in
3524 * that case we have no record of how much is already in use.
3525 */
3526static int shmem_reconfigure(struct fs_context *fc)
3467static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3527{
3468{
3528 struct shmem_options *ctx = fc->fs_private;
3529 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3469 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3470 struct shmem_sb_info config = *sbinfo;
3530 unsigned long inodes;
3471 unsigned long inodes;
3531 const char *err;
3472 int error = -EINVAL;
3532
3473
3474 config.mpol = NULL;
3475 if (shmem_parse_options(data, &config, true))
3476 return error;
3477
3533 spin_lock(&sbinfo->stat_lock);
3534 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3478 spin_lock(&sbinfo->stat_lock);
3479 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3535 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3536 if (!sbinfo->max_blocks) {
3537 err = "Cannot retroactively limit size";
3538 goto out;
3539 }
3540 if (percpu_counter_compare(&sbinfo->used_blocks,
3541 ctx->blocks) > 0) {
3542 err = "Too small a size for current use";
3543 goto out;
3544 }
3545 }
3546 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3547 if (!sbinfo->max_inodes) {
3548 err = "Cannot retroactively limit inodes";
3549 goto out;
3550 }
3551 if (ctx->inodes < inodes) {
3552 err = "Too few inodes for current use";
3553 goto out;
3554 }
3555 }
3480 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
3481 goto out;
3482 if (config.max_inodes < inodes)
3483 goto out;
3484 /*
3485 * Those tests disallow limited->unlimited while any are in use;
3486 * but we must separately disallow unlimited->limited, because
3487 * in that case we have no record of how much is already in use.
3488 */
3489 if (config.max_blocks && !sbinfo->max_blocks)
3490 goto out;
3491 if (config.max_inodes && !sbinfo->max_inodes)
3492 goto out;
3556
3493
3557 if (ctx->seen & SHMEM_SEEN_HUGE)
3558 sbinfo->huge = ctx->huge;
3559 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3560 sbinfo->max_blocks = ctx->blocks;
3561 if (ctx->seen & SHMEM_SEEN_INODES) {
3562 sbinfo->max_inodes = ctx->inodes;
3563 sbinfo->free_inodes = ctx->inodes - inodes;
3564 }
3494 error = 0;
3495 sbinfo->huge = config.huge;
3496 sbinfo->max_blocks = config.max_blocks;
3497 sbinfo->max_inodes = config.max_inodes;
3498 sbinfo->free_inodes = config.max_inodes - inodes;
3565
3566 /*
3567 * Preserve previous mempolicy unless mpol remount option was specified.
3568 */
3499
3500 /*
3501 * Preserve previous mempolicy unless mpol remount option was specified.
3502 */
3569 if (ctx->mpol) {
3503 if (config.mpol) {
3570 mpol_put(sbinfo->mpol);
3504 mpol_put(sbinfo->mpol);
3571 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
3572 ctx->mpol = NULL;
3505 sbinfo->mpol = config.mpol; /* transfers initial ref */
3573 }
3506 }
3574 spin_unlock(&sbinfo->stat_lock);
3575 return 0;
3576out:
3577 spin_unlock(&sbinfo->stat_lock);
3507out:
3508 spin_unlock(&sbinfo->stat_lock);
3578 return invalf(fc, "tmpfs: %s", err);
3509 return error;
3579}
3580
3581static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3582{
3583 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3584
3585 if (sbinfo->max_blocks != shmem_default_max_blocks())
3586 seq_printf(seq, ",size=%luk",

--- 24 unchanged lines hidden (view full) ---

3611 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3612
3613 percpu_counter_destroy(&sbinfo->used_blocks);
3614 mpol_put(sbinfo->mpol);
3615 kfree(sbinfo);
3616 sb->s_fs_info = NULL;
3617}
3618
3510}
3511
3512static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3513{
3514 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3515
3516 if (sbinfo->max_blocks != shmem_default_max_blocks())
3517 seq_printf(seq, ",size=%luk",

--- 24 unchanged lines hidden (view full) ---

3542 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3543
3544 percpu_counter_destroy(&sbinfo->used_blocks);
3545 mpol_put(sbinfo->mpol);
3546 kfree(sbinfo);
3547 sb->s_fs_info = NULL;
3548}
3549
3619static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3550int shmem_fill_super(struct super_block *sb, void *data, int silent)
3620{
3551{
3621 struct shmem_options *ctx = fc->fs_private;
3622 struct inode *inode;
3623 struct shmem_sb_info *sbinfo;
3624 int err = -ENOMEM;
3625
3626 /* Round up to L1_CACHE_BYTES to resist false sharing */
3627 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3628 L1_CACHE_BYTES), GFP_KERNEL);
3629 if (!sbinfo)
3630 return -ENOMEM;
3631
3552 struct inode *inode;
3553 struct shmem_sb_info *sbinfo;
3554 int err = -ENOMEM;
3555
3556 /* Round up to L1_CACHE_BYTES to resist false sharing */
3557 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3558 L1_CACHE_BYTES), GFP_KERNEL);
3559 if (!sbinfo)
3560 return -ENOMEM;
3561
3562 sbinfo->mode = 0777 | S_ISVTX;
3563 sbinfo->uid = current_fsuid();
3564 sbinfo->gid = current_fsgid();
3632 sb->s_fs_info = sbinfo;
3633
3634#ifdef CONFIG_TMPFS
3635 /*
3636 * Per default we only allow half of the physical ram per
3637 * tmpfs instance, limiting inodes to one per page of lowmem;
3638 * but the internal instance is left unlimited.
3639 */
3640 if (!(sb->s_flags & SB_KERNMOUNT)) {
3565 sb->s_fs_info = sbinfo;
3566
3567#ifdef CONFIG_TMPFS
3568 /*
3569 * Per default we only allow half of the physical ram per
3570 * tmpfs instance, limiting inodes to one per page of lowmem;
3571 * but the internal instance is left unlimited.
3572 */
3573 if (!(sb->s_flags & SB_KERNMOUNT)) {
3641 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3642 ctx->blocks = shmem_default_max_blocks();
3643 if (!(ctx->seen & SHMEM_SEEN_INODES))
3644 ctx->inodes = shmem_default_max_inodes();
3574 sbinfo->max_blocks = shmem_default_max_blocks();
3575 sbinfo->max_inodes = shmem_default_max_inodes();
3576 if (shmem_parse_options(data, sbinfo, false)) {
3577 err = -EINVAL;
3578 goto failed;
3579 }
3645 } else {
3646 sb->s_flags |= SB_NOUSER;
3647 }
3648 sb->s_export_op = &shmem_export_ops;
3649 sb->s_flags |= SB_NOSEC;
3650#else
3651 sb->s_flags |= SB_NOUSER;
3652#endif
3580 } else {
3581 sb->s_flags |= SB_NOUSER;
3582 }
3583 sb->s_export_op = &shmem_export_ops;
3584 sb->s_flags |= SB_NOSEC;
3585#else
3586 sb->s_flags |= SB_NOUSER;
3587#endif
3653 sbinfo->max_blocks = ctx->blocks;
3654 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3655 sbinfo->uid = ctx->uid;
3656 sbinfo->gid = ctx->gid;
3657 sbinfo->mode = ctx->mode;
3658 sbinfo->huge = ctx->huge;
3659 sbinfo->mpol = ctx->mpol;
3660 ctx->mpol = NULL;
3661
3662 spin_lock_init(&sbinfo->stat_lock);
3663 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3664 goto failed;
3588
3589 spin_lock_init(&sbinfo->stat_lock);
3590 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3591 goto failed;
3592 sbinfo->free_inodes = sbinfo->max_inodes;
3665 spin_lock_init(&sbinfo->shrinklist_lock);
3666 INIT_LIST_HEAD(&sbinfo->shrinklist);
3667
3668 sb->s_maxbytes = MAX_LFS_FILESIZE;
3669 sb->s_blocksize = PAGE_SIZE;
3670 sb->s_blocksize_bits = PAGE_SHIFT;
3671 sb->s_magic = TMPFS_MAGIC;
3672 sb->s_op = &shmem_ops;

--- 16 unchanged lines hidden (view full) ---

3689 goto failed;
3690 return 0;
3691
3692failed:
3693 shmem_put_super(sb);
3694 return err;
3695}
3696
3593 spin_lock_init(&sbinfo->shrinklist_lock);
3594 INIT_LIST_HEAD(&sbinfo->shrinklist);
3595
3596 sb->s_maxbytes = MAX_LFS_FILESIZE;
3597 sb->s_blocksize = PAGE_SIZE;
3598 sb->s_blocksize_bits = PAGE_SHIFT;
3599 sb->s_magic = TMPFS_MAGIC;
3600 sb->s_op = &shmem_ops;

--- 16 unchanged lines hidden (view full) ---

3617 goto failed;
3618 return 0;
3619
3620failed:
3621 shmem_put_super(sb);
3622 return err;
3623}
3624
3697static int shmem_get_tree(struct fs_context *fc)
3698{
3699 return get_tree_nodev(fc, shmem_fill_super);
3700}
3701
3702static void shmem_free_fc(struct fs_context *fc)
3703{
3704 struct shmem_options *ctx = fc->fs_private;
3705
3706 if (ctx) {
3707 mpol_put(ctx->mpol);
3708 kfree(ctx);
3709 }
3710}
3711
3712static const struct fs_context_operations shmem_fs_context_ops = {
3713 .free = shmem_free_fc,
3714 .get_tree = shmem_get_tree,
3715#ifdef CONFIG_TMPFS
3716 .parse_monolithic = shmem_parse_options,
3717 .parse_param = shmem_parse_one,
3718 .reconfigure = shmem_reconfigure,
3719#endif
3720};
3721
3722static struct kmem_cache *shmem_inode_cachep;
3723
3724static struct inode *shmem_alloc_inode(struct super_block *sb)
3725{
3726 struct shmem_inode_info *info;
3727 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3728 if (!info)
3729 return NULL;

--- 100 unchanged lines hidden (view full) ---

3830};
3831
3832static const struct super_operations shmem_ops = {
3833 .alloc_inode = shmem_alloc_inode,
3834 .free_inode = shmem_free_in_core_inode,
3835 .destroy_inode = shmem_destroy_inode,
3836#ifdef CONFIG_TMPFS
3837 .statfs = shmem_statfs,
3625static struct kmem_cache *shmem_inode_cachep;
3626
3627static struct inode *shmem_alloc_inode(struct super_block *sb)
3628{
3629 struct shmem_inode_info *info;
3630 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3631 if (!info)
3632 return NULL;

--- 100 unchanged lines hidden (view full) ---

3733};
3734
3735static const struct super_operations shmem_ops = {
3736 .alloc_inode = shmem_alloc_inode,
3737 .free_inode = shmem_free_in_core_inode,
3738 .destroy_inode = shmem_destroy_inode,
3739#ifdef CONFIG_TMPFS
3740 .statfs = shmem_statfs,
3741 .remount_fs = shmem_remount_fs,
3838 .show_options = shmem_show_options,
3839#endif
3840 .evict_inode = shmem_evict_inode,
3841 .drop_inode = generic_delete_inode,
3842 .put_super = shmem_put_super,
3843#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3844 .nr_cached_objects = shmem_unused_huge_count,
3845 .free_cached_objects = shmem_unused_huge_scan,

--- 4 unchanged lines hidden (view full) ---

3850 .fault = shmem_fault,
3851 .map_pages = filemap_map_pages,
3852#ifdef CONFIG_NUMA
3853 .set_policy = shmem_set_policy,
3854 .get_policy = shmem_get_policy,
3855#endif
3856};
3857
3742 .show_options = shmem_show_options,
3743#endif
3744 .evict_inode = shmem_evict_inode,
3745 .drop_inode = generic_delete_inode,
3746 .put_super = shmem_put_super,
3747#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3748 .nr_cached_objects = shmem_unused_huge_count,
3749 .free_cached_objects = shmem_unused_huge_scan,

--- 4 unchanged lines hidden (view full) ---

3754 .fault = shmem_fault,
3755 .map_pages = filemap_map_pages,
3756#ifdef CONFIG_NUMA
3757 .set_policy = shmem_set_policy,
3758 .get_policy = shmem_get_policy,
3759#endif
3760};
3761
3858int shmem_init_fs_context(struct fs_context *fc)
3762static struct dentry *shmem_mount(struct file_system_type *fs_type,
3763 int flags, const char *dev_name, void *data)
3859{
3764{
3860 struct shmem_options *ctx;
3861
3862 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3863 if (!ctx)
3864 return -ENOMEM;
3865
3866 ctx->mode = 0777 | S_ISVTX;
3867 ctx->uid = current_fsuid();
3868 ctx->gid = current_fsgid();
3869
3870 fc->fs_private = ctx;
3871 fc->ops = &shmem_fs_context_ops;
3872 return 0;
3765 return mount_nodev(fs_type, flags, data, shmem_fill_super);
3873}
3874
3875static struct file_system_type shmem_fs_type = {
3876 .owner = THIS_MODULE,
3877 .name = "tmpfs",
3766}
3767
3768static struct file_system_type shmem_fs_type = {
3769 .owner = THIS_MODULE,
3770 .name = "tmpfs",
3878 .init_fs_context = shmem_init_fs_context,
3879#ifdef CONFIG_TMPFS
3880 .parameters = &shmem_fs_parameters,
3881#endif
3771 .mount = shmem_mount,
3882 .kill_sb = kill_litter_super,
3883 .fs_flags = FS_USERNS_MOUNT,
3884};
3885
3886int __init shmem_init(void)
3887{
3888 int error;
3889

--- 127 unchanged lines hidden (view full) ---

4017 * This is intended for small system where the benefits of the full
4018 * shmem code (swap-backed and resource-limited) are outweighed by
4019 * their complexity. On systems without swap this code should be
4020 * effectively equivalent, but much lighter weight.
4021 */
4022
4023static struct file_system_type shmem_fs_type = {
4024 .name = "tmpfs",
3772 .kill_sb = kill_litter_super,
3773 .fs_flags = FS_USERNS_MOUNT,
3774};
3775
3776int __init shmem_init(void)
3777{
3778 int error;
3779

--- 127 unchanged lines hidden (view full) ---

3907 * This is intended for small system where the benefits of the full
3908 * shmem code (swap-backed and resource-limited) are outweighed by
3909 * their complexity. On systems without swap this code should be
3910 * effectively equivalent, but much lighter weight.
3911 */
3912
3913static struct file_system_type shmem_fs_type = {
3914 .name = "tmpfs",
4025 .init_fs_context = ramfs_init_fs_context,
4026 .parameters = &ramfs_fs_parameters,
3915 .mount = ramfs_mount,
4027 .kill_sb = kill_litter_super,
4028 .fs_flags = FS_USERNS_MOUNT,
4029};
4030
4031int __init shmem_init(void)
4032{
4033 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4034

--- 192 unchanged lines hidden ---
3916 .kill_sb = kill_litter_super,
3917 .fs_flags = FS_USERNS_MOUNT,
3918};
3919
3920int __init shmem_init(void)
3921{
3922 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3923

--- 192 unchanged lines hidden ---