shmem.c (7711fb7dac1ab77fd1b4d948f4647a569e4a1ae2) | shmem.c (0b5071dd323da2e277bce7e68749dc0a5fba4703) |
---|---|
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. --- 93 unchanged lines hidden (view full) --- 102struct shmem_falloc { 103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 104 pgoff_t start; /* start of range currently being fallocated */ 105 pgoff_t next; /* the next page offset to be fallocated */ 106 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 108}; 109 | 1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. --- 93 unchanged lines hidden (view full) --- 102struct shmem_falloc { 103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 104 pgoff_t start; /* start of range currently being fallocated */ 105 pgoff_t next; /* the next page offset to be fallocated */ 106 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 108}; 109 |
110struct shmem_options { 111 unsigned long long blocks; 112 unsigned long long inodes; 113 struct mempolicy *mpol; 114 kuid_t uid; 115 kgid_t gid; 116 umode_t mode; 117 int huge; 118 int seen; 119#define SHMEM_SEEN_BLOCKS 1 120#define SHMEM_SEEN_INODES 2 121#define SHMEM_SEEN_HUGE 4 122}; 123 |
|
110#ifdef CONFIG_TMPFS 111static unsigned long shmem_default_max_blocks(void) 112{ 113 return totalram_pages() / 2; 114} 115 116static unsigned long shmem_default_max_inodes(void) 117{ --- 3226 unchanged lines hidden (view full) --- 3344} 3345 3346static const struct export_operations shmem_export_ops = { 3347 .get_parent = shmem_get_parent, 3348 .encode_fh = shmem_encode_fh, 3349 .fh_to_dentry = shmem_fh_to_dentry, 3350}; 3351 | 124#ifdef CONFIG_TMPFS 125static unsigned long shmem_default_max_blocks(void) 126{ 127 return totalram_pages() / 2; 128} 129 130static unsigned long shmem_default_max_inodes(void) 131{ --- 3226 unchanged lines hidden (view full) --- 3358} 3359 3360static const struct export_operations shmem_export_ops = { 3361 .get_parent = shmem_get_parent, 3362 .encode_fh = shmem_encode_fh, 3363 .fh_to_dentry = shmem_fh_to_dentry, 3364}; 3365 |
3352static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 3353 bool remount) | 3366static int shmem_parse_options(char *options, struct shmem_options *ctx) |
3354{ 3355 char *this_char, *value, *rest; 3356 struct mempolicy *mpol = NULL; 3357 uid_t uid; 3358 gid_t gid; 3359 3360 while (options != NULL) { 3361 this_char = options; --- 28 unchanged lines hidden (view full) --- 3390 if (*rest == '%') { 3391 size <<= PAGE_SHIFT; 3392 size *= totalram_pages(); 3393 do_div(size, 100); 3394 rest++; 3395 } 3396 if (*rest) 3397 goto bad_val; | 3367{ 3368 char *this_char, *value, *rest; 3369 struct mempolicy *mpol = NULL; 3370 uid_t uid; 3371 gid_t gid; 3372 3373 while (options != NULL) { 3374 this_char = options; --- 28 unchanged lines hidden (view full) --- 3403 if (*rest == '%') { 3404 size <<= PAGE_SHIFT; 3405 size *= totalram_pages(); 3406 do_div(size, 100); 3407 rest++; 3408 } 3409 if (*rest) 3410 goto bad_val; |
3398 sbinfo->max_blocks = 3399 DIV_ROUND_UP(size, PAGE_SIZE); | 3411 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3412 ctx->seen |= SHMEM_SEEN_BLOCKS; |
3400 } else if (!strcmp(this_char,"nr_blocks")) { | 3413 } else if (!strcmp(this_char,"nr_blocks")) { |
3401 sbinfo->max_blocks = memparse(value, &rest); | 3414 ctx->blocks = memparse(value, &rest); |
3402 if (*rest) 3403 goto bad_val; | 3415 if (*rest) 3416 goto bad_val; |
3417 ctx->seen |= SHMEM_SEEN_BLOCKS; |
|
3404 } else if (!strcmp(this_char,"nr_inodes")) { | 3418 } else if (!strcmp(this_char,"nr_inodes")) { |
3405 sbinfo->max_inodes = memparse(value, &rest); | 3419 ctx->inodes = memparse(value, &rest); |
3406 if (*rest) 3407 goto bad_val; | 3420 if (*rest) 3421 goto bad_val; |
3422 ctx->seen |= SHMEM_SEEN_INODES; |
|
3408 } else if (!strcmp(this_char,"mode")) { | 3423 } else if (!strcmp(this_char,"mode")) { |
3409 if (remount) 3410 continue; 3411 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; | 3424 ctx->mode = simple_strtoul(value, &rest, 8) & 07777; |
3412 if (*rest) 3413 goto bad_val; 3414 } else if (!strcmp(this_char,"uid")) { | 3425 if (*rest) 3426 goto bad_val; 3427 } else if (!strcmp(this_char,"uid")) { |
3415 if (remount) 3416 continue; | |
3417 uid = simple_strtoul(value, &rest, 0); 3418 if (*rest) 3419 goto bad_val; | 3428 uid = simple_strtoul(value, &rest, 0); 3429 if (*rest) 3430 goto bad_val; |
3420 sbinfo->uid = make_kuid(current_user_ns(), uid); 3421 if (!uid_valid(sbinfo->uid)) | 3431 ctx->uid = make_kuid(current_user_ns(), uid); 3432 if (!uid_valid(ctx->uid)) |
3422 goto bad_val; 3423 } else if (!strcmp(this_char,"gid")) { | 3433 goto bad_val; 3434 } else if (!strcmp(this_char,"gid")) { |
3424 if (remount) 3425 continue; | |
3426 gid = simple_strtoul(value, &rest, 0); 3427 if (*rest) 3428 goto bad_val; | 3435 gid = simple_strtoul(value, &rest, 0); 3436 if (*rest) 3437 goto bad_val; |
3429 sbinfo->gid = make_kgid(current_user_ns(), gid); 3430 if (!gid_valid(sbinfo->gid)) | 3438 ctx->gid = make_kgid(current_user_ns(), gid); 3439 if (!gid_valid(ctx->gid)) |
3431 goto bad_val; 3432#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3433 } else if (!strcmp(this_char, "huge")) { 3434 int huge; 3435 huge = shmem_parse_huge(value); 3436 if (huge < 0) 3437 goto bad_val; 3438 if (!has_transparent_hugepage() && 3439 huge != SHMEM_HUGE_NEVER) 3440 goto bad_val; | 3440 goto bad_val; 3441#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3442 } else if (!strcmp(this_char, "huge")) { 3443 int huge; 3444 huge = shmem_parse_huge(value); 3445 if (huge < 0) 3446 goto bad_val; 3447 if (!has_transparent_hugepage() && 3448 huge != SHMEM_HUGE_NEVER) 3449 goto bad_val; |
3441 sbinfo->huge = huge; | 3450 ctx->huge = huge; 3451 ctx->seen |= SHMEM_SEEN_HUGE; |
3442#endif 3443#ifdef CONFIG_NUMA 3444 } else if (!strcmp(this_char,"mpol")) { 3445 mpol_put(mpol); 3446 mpol = NULL; 3447 if (mpol_parse_str(value, &mpol)) 3448 goto bad_val; 3449#endif 3450 } else { 3451 pr_err("tmpfs: Bad mount option %s\n", this_char); 3452 goto error; 3453 } 3454 } | 3452#endif 3453#ifdef CONFIG_NUMA 3454 } else if (!strcmp(this_char,"mpol")) { 3455 mpol_put(mpol); 3456 mpol = NULL; 3457 if (mpol_parse_str(value, &mpol)) 3458 goto bad_val; 3459#endif 3460 } else { 3461 pr_err("tmpfs: Bad mount option %s\n", this_char); 3462 goto error; 3463 } 3464 } |
3455 sbinfo->mpol = mpol; | 3465 ctx->mpol = mpol; |
3456 return 0; 3457 3458bad_val: 3459 pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", 3460 value, this_char); 3461error: 3462 mpol_put(mpol); 3463 return 1; 3464 3465} 3466 3467static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 3468{ 3469 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 3466 return 0; 3467 3468bad_val: 3469 pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", 3470 value, this_char); 3471error: 3472 mpol_put(mpol); 3473 return 1; 3474 3475} 3476 3477static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 3478{ 3479 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
3470 struct shmem_sb_info config = *sbinfo; | 3480 struct shmem_options ctx = {.seen = 0}; |
3471 unsigned long inodes; 3472 int error = -EINVAL; 3473 | 3481 unsigned long inodes; 3482 int error = -EINVAL; 3483 |
3474 config.mpol = NULL; 3475 if (shmem_parse_options(data, &config, true)) | 3484 if (shmem_parse_options(data, &ctx)) |
3476 return error; 3477 3478 spin_lock(&sbinfo->stat_lock); 3479 inodes = sbinfo->max_inodes - sbinfo->free_inodes; | 3485 return error; 3486 3487 spin_lock(&sbinfo->stat_lock); 3488 inodes = sbinfo->max_inodes - sbinfo->free_inodes; |
3480 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 3481 goto out; 3482 if (config.max_inodes < inodes) 3483 goto out; | |
3484 /* 3485 * Those tests disallow limited->unlimited while any are in use; 3486 * but we must separately disallow unlimited->limited, because 3487 * in that case we have no record of how much is already in use. 3488 */ | 3489 /* 3490 * Those tests disallow limited->unlimited while any are in use; 3491 * but we must separately disallow unlimited->limited, because 3492 * in that case we have no record of how much is already in use. 3493 */ |
3489 if (config.max_blocks && !sbinfo->max_blocks) 3490 goto out; 3491 if (config.max_inodes && !sbinfo->max_inodes) 3492 goto out; | 3494 if ((ctx.seen & SHMEM_SEEN_BLOCKS) && ctx.blocks) { 3495 if (!sbinfo->max_blocks) 3496 goto out; 3497 if (percpu_counter_compare(&sbinfo->used_blocks, 3498 ctx.blocks) > 0) 3499 goto out; 3500 } 3501 if ((ctx.seen & SHMEM_SEEN_INODES) && ctx.inodes) { 3502 if (!sbinfo->max_inodes) 3503 goto out; 3504 if (ctx.inodes < inodes) 3505 goto out; 3506 } |
3493 3494 error = 0; | 3507 3508 error = 0; |
3495 sbinfo->huge = config.huge; 3496 sbinfo->max_blocks = config.max_blocks; 3497 sbinfo->max_inodes = config.max_inodes; 3498 sbinfo->free_inodes = config.max_inodes - inodes; | 3509 if (ctx.seen & SHMEM_SEEN_HUGE) 3510 sbinfo->huge = ctx.huge; 3511 if (ctx.seen & SHMEM_SEEN_BLOCKS) 3512 sbinfo->max_blocks = ctx.blocks; 3513 if (ctx.seen & SHMEM_SEEN_INODES) { 3514 sbinfo->max_inodes = ctx.inodes; 3515 sbinfo->free_inodes = ctx.inodes - inodes; 3516 } |
3499 3500 /* 3501 * Preserve previous mempolicy unless mpol remount option was specified. 3502 */ | 3517 3518 /* 3519 * Preserve previous mempolicy unless mpol remount option was specified. 3520 */ |
3503 if (config.mpol) { | 3521 if (ctx.mpol) { |
3504 mpol_put(sbinfo->mpol); | 3522 mpol_put(sbinfo->mpol); |
3505 sbinfo->mpol = config.mpol; /* transfers initial ref */ | 3523 sbinfo->mpol = ctx.mpol; /* transfers initial ref */ |
3506 } 3507out: 3508 spin_unlock(&sbinfo->stat_lock); 3509 return error; 3510} 3511 3512static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3513{ --- 28 unchanged lines hidden (view full) --- 3542 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3543 3544 percpu_counter_destroy(&sbinfo->used_blocks); 3545 mpol_put(sbinfo->mpol); 3546 kfree(sbinfo); 3547 sb->s_fs_info = NULL; 3548} 3549 | 3524 } 3525out: 3526 spin_unlock(&sbinfo->stat_lock); 3527 return error; 3528} 3529 3530static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3531{ --- 28 unchanged lines hidden (view full) --- 3560 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3561 3562 percpu_counter_destroy(&sbinfo->used_blocks); 3563 mpol_put(sbinfo->mpol); 3564 kfree(sbinfo); 3565 sb->s_fs_info = NULL; 3566} 3567 |
3550int shmem_fill_super(struct super_block *sb, void *data, int silent) | 3568static int shmem_fill_super(struct super_block *sb, void *data, int silent) |
3551{ 3552 struct inode *inode; 3553 struct shmem_sb_info *sbinfo; | 3569{ 3570 struct inode *inode; 3571 struct shmem_sb_info *sbinfo; |
3572 struct shmem_options ctx = {.mode = 0777 | S_ISVTX, 3573 .uid = current_fsuid(), 3574 .gid = current_fsgid()}; |
|
3554 int err = -ENOMEM; 3555 3556 /* Round up to L1_CACHE_BYTES to resist false sharing */ 3557 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3558 L1_CACHE_BYTES), GFP_KERNEL); 3559 if (!sbinfo) 3560 return -ENOMEM; 3561 | 3575 int err = -ENOMEM; 3576 3577 /* Round up to L1_CACHE_BYTES to resist false sharing */ 3578 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3579 L1_CACHE_BYTES), GFP_KERNEL); 3580 if (!sbinfo) 3581 return -ENOMEM; 3582 |
3562 sbinfo->mode = 0777 | S_ISVTX; 3563 sbinfo->uid = current_fsuid(); 3564 sbinfo->gid = current_fsgid(); | |
3565 sb->s_fs_info = sbinfo; 3566 3567#ifdef CONFIG_TMPFS 3568 /* 3569 * Per default we only allow half of the physical ram per 3570 * tmpfs instance, limiting inodes to one per page of lowmem; 3571 * but the internal instance is left unlimited. 3572 */ 3573 if (!(sb->s_flags & SB_KERNMOUNT)) { | 3583 sb->s_fs_info = sbinfo; 3584 3585#ifdef CONFIG_TMPFS 3586 /* 3587 * Per default we only allow half of the physical ram per 3588 * tmpfs instance, limiting inodes to one per page of lowmem; 3589 * but the internal instance is left unlimited. 3590 */ 3591 if (!(sb->s_flags & SB_KERNMOUNT)) { |
3574 sbinfo->max_blocks = shmem_default_max_blocks(); 3575 sbinfo->max_inodes = shmem_default_max_inodes(); 3576 if (shmem_parse_options(data, sbinfo, false)) { | 3592 ctx.blocks = shmem_default_max_blocks(); 3593 ctx.inodes = shmem_default_max_inodes(); 3594 if (shmem_parse_options(data, &ctx)) { |
3577 err = -EINVAL; 3578 goto failed; 3579 } 3580 } else { 3581 sb->s_flags |= SB_NOUSER; 3582 } 3583 sb->s_export_op = &shmem_export_ops; 3584 sb->s_flags |= SB_NOSEC; 3585#else 3586 sb->s_flags |= SB_NOUSER; 3587#endif | 3595 err = -EINVAL; 3596 goto failed; 3597 } 3598 } else { 3599 sb->s_flags |= SB_NOUSER; 3600 } 3601 sb->s_export_op = &shmem_export_ops; 3602 sb->s_flags |= SB_NOSEC; 3603#else 3604 sb->s_flags |= SB_NOUSER; 3605#endif |
3606 sbinfo->max_blocks = ctx.blocks; 3607 sbinfo->free_inodes = sbinfo->max_inodes = ctx.inodes; 3608 sbinfo->uid = ctx.uid; 3609 sbinfo->gid = ctx.gid; 3610 sbinfo->mode = ctx.mode; 3611 sbinfo->huge = ctx.huge; 3612 sbinfo->mpol = ctx.mpol; |
|
3588 3589 spin_lock_init(&sbinfo->stat_lock); 3590 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3591 goto failed; | 3613 3614 spin_lock_init(&sbinfo->stat_lock); 3615 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3616 goto failed; |
3592 sbinfo->free_inodes = sbinfo->max_inodes; | |
3593 spin_lock_init(&sbinfo->shrinklist_lock); 3594 INIT_LIST_HEAD(&sbinfo->shrinklist); 3595 3596 sb->s_maxbytes = MAX_LFS_FILESIZE; 3597 sb->s_blocksize = PAGE_SIZE; 3598 sb->s_blocksize_bits = PAGE_SHIFT; 3599 sb->s_magic = TMPFS_MAGIC; 3600 sb->s_op = &shmem_ops; --- 153 unchanged lines hidden (view full) --- 3754 .fault = shmem_fault, 3755 .map_pages = filemap_map_pages, 3756#ifdef CONFIG_NUMA 3757 .set_policy = shmem_set_policy, 3758 .get_policy = shmem_get_policy, 3759#endif 3760}; 3761 | 3617 spin_lock_init(&sbinfo->shrinklist_lock); 3618 INIT_LIST_HEAD(&sbinfo->shrinklist); 3619 3620 sb->s_maxbytes = MAX_LFS_FILESIZE; 3621 sb->s_blocksize = PAGE_SIZE; 3622 sb->s_blocksize_bits = PAGE_SHIFT; 3623 sb->s_magic = TMPFS_MAGIC; 3624 sb->s_op = &shmem_ops; --- 153 unchanged lines hidden (view full) --- 3778 .fault = shmem_fault, 3779 .map_pages = filemap_map_pages, 3780#ifdef CONFIG_NUMA 3781 .set_policy = shmem_set_policy, 3782 .get_policy = shmem_get_policy, 3783#endif 3784}; 3785 |
3762static struct dentry *shmem_mount(struct file_system_type *fs_type, | 3786struct dentry *shmem_mount(struct file_system_type *fs_type, |
3763 int flags, const char *dev_name, void *data) 3764{ 3765 return mount_nodev(fs_type, flags, data, shmem_fill_super); 3766} 3767 3768static struct file_system_type shmem_fs_type = { 3769 .owner = THIS_MODULE, 3770 .name = "tmpfs", --- 345 unchanged lines hidden --- | 3787 int flags, const char *dev_name, void *data) 3788{ 3789 return mount_nodev(fs_type, flags, data, shmem_fill_super); 3790} 3791 3792static struct file_system_type shmem_fs_type = { 3793 .owner = THIS_MODULE, 3794 .name = "tmpfs", --- 345 unchanged lines hidden --- |