xref: /openbmc/linux/fs/jfs/super.c (revision 171fa692)
1 /*
2  *   Copyright (C) International Business Machines Corp., 2000-2004
3  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
4  *
5  *   This program is free software;  you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation; either version 2 of the License, or
8  *   (at your option) any later version.
9  *
10  *   This program is distributed in the hope that it will be useful,
11  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
13  *   the GNU General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program;  if not, write to the Free Software
17  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 
20 #include <linux/fs.h>
21 #include <linux/module.h>
22 #include <linux/parser.h>
23 #include <linux/completion.h>
24 #include <linux/vfs.h>
25 #include <linux/quotaops.h>
26 #include <linux/mount.h>
27 #include <linux/moduleparam.h>
28 #include <linux/kthread.h>
29 #include <linux/posix_acl.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/crc32.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/seq_file.h>
36 #include <linux/blkdev.h>
37 
38 #include "jfs_incore.h"
39 #include "jfs_filsys.h"
40 #include "jfs_inode.h"
41 #include "jfs_metapage.h"
42 #include "jfs_superblock.h"
43 #include "jfs_dmap.h"
44 #include "jfs_imap.h"
45 #include "jfs_acl.h"
46 #include "jfs_debug.h"
47 #include "jfs_xattr.h"
48 #include "jfs_dinode.h"
49 
50 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
51 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
52 MODULE_LICENSE("GPL");
53 
54 static struct kmem_cache *jfs_inode_cachep;
55 
56 static const struct super_operations jfs_super_operations;
57 static const struct export_operations jfs_export_operations;
58 static struct file_system_type jfs_fs_type;
59 
60 #define MAX_COMMIT_THREADS 64
61 static int commit_threads;
62 module_param(commit_threads, int, 0);
63 MODULE_PARM_DESC(commit_threads, "Number of commit threads");
64 
65 static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
66 struct task_struct *jfsIOthread;
67 struct task_struct *jfsSyncThread;
68 
69 #ifdef CONFIG_JFS_DEBUG
70 int jfsloglevel = JFS_LOGLEVEL_WARN;
71 module_param(jfsloglevel, int, 0644);
72 MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
73 #endif
74 
75 static void jfs_handle_error(struct super_block *sb)
76 {
77 	struct jfs_sb_info *sbi = JFS_SBI(sb);
78 
79 	if (sb->s_flags & MS_RDONLY)
80 		return;
81 
82 	updateSuper(sb, FM_DIRTY);
83 
84 	if (sbi->flag & JFS_ERR_PANIC)
85 		panic("JFS (device %s): panic forced after error\n",
86 			sb->s_id);
87 	else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
88 		jfs_err("ERROR: (device %s): remounting filesystem as read-only",
89 			sb->s_id);
90 		sb->s_flags |= MS_RDONLY;
91 	}
92 
93 	/* nothing is done for continue beyond marking the superblock dirty */
94 }
95 
96 void jfs_error(struct super_block *sb, const char *fmt, ...)
97 {
98 	struct va_format vaf;
99 	va_list args;
100 
101 	va_start(args, fmt);
102 
103 	vaf.fmt = fmt;
104 	vaf.va = &args;
105 
106 	pr_err("ERROR: (device %s): %ps: %pV\n",
107 	       sb->s_id, __builtin_return_address(0), &vaf);
108 
109 	va_end(args);
110 
111 	jfs_handle_error(sb);
112 }
113 
114 static struct inode *jfs_alloc_inode(struct super_block *sb)
115 {
116 	struct jfs_inode_info *jfs_inode;
117 
118 	jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
119 	if (!jfs_inode)
120 		return NULL;
121 #ifdef CONFIG_QUOTA
122 	memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot));
123 #endif
124 	return &jfs_inode->vfs_inode;
125 }
126 
127 static void jfs_i_callback(struct rcu_head *head)
128 {
129 	struct inode *inode = container_of(head, struct inode, i_rcu);
130 	struct jfs_inode_info *ji = JFS_IP(inode);
131 	kmem_cache_free(jfs_inode_cachep, ji);
132 }
133 
134 static void jfs_destroy_inode(struct inode *inode)
135 {
136 	struct jfs_inode_info *ji = JFS_IP(inode);
137 
138 	BUG_ON(!list_empty(&ji->anon_inode_list));
139 
140 	spin_lock_irq(&ji->ag_lock);
141 	if (ji->active_ag != -1) {
142 		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
143 		atomic_dec(&bmap->db_active[ji->active_ag]);
144 		ji->active_ag = -1;
145 	}
146 	spin_unlock_irq(&ji->ag_lock);
147 	call_rcu(&inode->i_rcu, jfs_i_callback);
148 }
149 
150 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
151 {
152 	struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
153 	s64 maxinodes;
154 	struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
155 
156 	jfs_info("In jfs_statfs");
157 	buf->f_type = JFS_SUPER_MAGIC;
158 	buf->f_bsize = sbi->bsize;
159 	buf->f_blocks = sbi->bmap->db_mapsize;
160 	buf->f_bfree = sbi->bmap->db_nfree;
161 	buf->f_bavail = sbi->bmap->db_nfree;
162 	/*
163 	 * If we really return the number of allocated & free inodes, some
164 	 * applications will fail because they won't see enough free inodes.
165 	 * We'll try to calculate some guess as to how many inodes we can
166 	 * really allocate
167 	 *
168 	 * buf->f_files = atomic_read(&imap->im_numinos);
169 	 * buf->f_ffree = atomic_read(&imap->im_numfree);
170 	 */
171 	maxinodes = min((s64) atomic_read(&imap->im_numinos) +
172 			((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
173 			 << L2INOSPEREXT), (s64) 0xffffffffLL);
174 	buf->f_files = maxinodes;
175 	buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
176 				    atomic_read(&imap->im_numfree));
177 	buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
178 	buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
179 					sizeof(sbi->uuid)/2);
180 
181 	buf->f_namelen = JFS_NAME_MAX;
182 	return 0;
183 }
184 
185 #ifdef CONFIG_QUOTA
186 static int jfs_quota_off(struct super_block *sb, int type);
187 static int jfs_quota_on(struct super_block *sb, int type, int format_id,
188 			const struct path *path);
189 
190 static void jfs_quota_off_umount(struct super_block *sb)
191 {
192 	int type;
193 
194 	for (type = 0; type < MAXQUOTAS; type++)
195 		jfs_quota_off(sb, type);
196 }
197 
198 static const struct quotactl_ops jfs_quotactl_ops = {
199 	.quota_on	= jfs_quota_on,
200 	.quota_off	= jfs_quota_off,
201 	.quota_sync	= dquot_quota_sync,
202 	.get_state	= dquot_get_state,
203 	.set_info	= dquot_set_dqinfo,
204 	.get_dqblk	= dquot_get_dqblk,
205 	.set_dqblk	= dquot_set_dqblk,
206 	.get_nextdqblk	= dquot_get_next_dqblk,
207 };
208 #else
209 static inline void jfs_quota_off_umount(struct super_block *sb)
210 {
211 }
212 #endif
213 
214 static void jfs_put_super(struct super_block *sb)
215 {
216 	struct jfs_sb_info *sbi = JFS_SBI(sb);
217 	int rc;
218 
219 	jfs_info("In jfs_put_super");
220 
221 	jfs_quota_off_umount(sb);
222 
223 	rc = jfs_umount(sb);
224 	if (rc)
225 		jfs_err("jfs_umount failed with return code %d", rc);
226 
227 	unload_nls(sbi->nls_tab);
228 
229 	truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
230 	iput(sbi->direct_inode);
231 
232 	kfree(sbi);
233 }
234 
235 enum {
236 	Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
237 	Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
238 	Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
239 	Opt_discard, Opt_nodiscard, Opt_discard_minblk
240 };
241 
242 static const match_table_t tokens = {
243 	{Opt_integrity, "integrity"},
244 	{Opt_nointegrity, "nointegrity"},
245 	{Opt_iocharset, "iocharset=%s"},
246 	{Opt_resize, "resize=%u"},
247 	{Opt_resize_nosize, "resize"},
248 	{Opt_errors, "errors=%s"},
249 	{Opt_ignore, "noquota"},
250 	{Opt_ignore, "quota"},
251 	{Opt_usrquota, "usrquota"},
252 	{Opt_grpquota, "grpquota"},
253 	{Opt_uid, "uid=%u"},
254 	{Opt_gid, "gid=%u"},
255 	{Opt_umask, "umask=%u"},
256 	{Opt_discard, "discard"},
257 	{Opt_nodiscard, "nodiscard"},
258 	{Opt_discard_minblk, "discard=%u"},
259 	{Opt_err, NULL}
260 };
261 
262 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
263 			 int *flag)
264 {
265 	void *nls_map = (void *)-1;	/* -1: no change;  NULL: none */
266 	char *p;
267 	struct jfs_sb_info *sbi = JFS_SBI(sb);
268 
269 	*newLVSize = 0;
270 
271 	if (!options)
272 		return 1;
273 
274 	while ((p = strsep(&options, ",")) != NULL) {
275 		substring_t args[MAX_OPT_ARGS];
276 		int token;
277 		if (!*p)
278 			continue;
279 
280 		token = match_token(p, tokens, args);
281 		switch (token) {
282 		case Opt_integrity:
283 			*flag &= ~JFS_NOINTEGRITY;
284 			break;
285 		case Opt_nointegrity:
286 			*flag |= JFS_NOINTEGRITY;
287 			break;
288 		case Opt_ignore:
289 			/* Silently ignore the quota options */
290 			/* Don't do anything ;-) */
291 			break;
292 		case Opt_iocharset:
293 			if (nls_map && nls_map != (void *) -1)
294 				unload_nls(nls_map);
295 			if (!strcmp(args[0].from, "none"))
296 				nls_map = NULL;
297 			else {
298 				nls_map = load_nls(args[0].from);
299 				if (!nls_map) {
300 					pr_err("JFS: charset not found\n");
301 					goto cleanup;
302 				}
303 			}
304 			break;
305 		case Opt_resize:
306 		{
307 			char *resize = args[0].from;
308 			int rc = kstrtoll(resize, 0, newLVSize);
309 
310 			if (rc)
311 				goto cleanup;
312 			break;
313 		}
314 		case Opt_resize_nosize:
315 		{
316 			*newLVSize = sb->s_bdev->bd_inode->i_size >>
317 				sb->s_blocksize_bits;
318 			if (*newLVSize == 0)
319 				pr_err("JFS: Cannot determine volume size\n");
320 			break;
321 		}
322 		case Opt_errors:
323 		{
324 			char *errors = args[0].from;
325 			if (!errors || !*errors)
326 				goto cleanup;
327 			if (!strcmp(errors, "continue")) {
328 				*flag &= ~JFS_ERR_REMOUNT_RO;
329 				*flag &= ~JFS_ERR_PANIC;
330 				*flag |= JFS_ERR_CONTINUE;
331 			} else if (!strcmp(errors, "remount-ro")) {
332 				*flag &= ~JFS_ERR_CONTINUE;
333 				*flag &= ~JFS_ERR_PANIC;
334 				*flag |= JFS_ERR_REMOUNT_RO;
335 			} else if (!strcmp(errors, "panic")) {
336 				*flag &= ~JFS_ERR_CONTINUE;
337 				*flag &= ~JFS_ERR_REMOUNT_RO;
338 				*flag |= JFS_ERR_PANIC;
339 			} else {
340 				pr_err("JFS: %s is an invalid error handler\n",
341 				       errors);
342 				goto cleanup;
343 			}
344 			break;
345 		}
346 
347 #ifdef CONFIG_QUOTA
348 		case Opt_quota:
349 		case Opt_usrquota:
350 			*flag |= JFS_USRQUOTA;
351 			break;
352 		case Opt_grpquota:
353 			*flag |= JFS_GRPQUOTA;
354 			break;
355 #else
356 		case Opt_usrquota:
357 		case Opt_grpquota:
358 		case Opt_quota:
359 			pr_err("JFS: quota operations not supported\n");
360 			break;
361 #endif
362 		case Opt_uid:
363 		{
364 			char *uid = args[0].from;
365 			uid_t val;
366 			int rc = kstrtouint(uid, 0, &val);
367 
368 			if (rc)
369 				goto cleanup;
370 			sbi->uid = make_kuid(current_user_ns(), val);
371 			if (!uid_valid(sbi->uid))
372 				goto cleanup;
373 			break;
374 		}
375 
376 		case Opt_gid:
377 		{
378 			char *gid = args[0].from;
379 			gid_t val;
380 			int rc = kstrtouint(gid, 0, &val);
381 
382 			if (rc)
383 				goto cleanup;
384 			sbi->gid = make_kgid(current_user_ns(), val);
385 			if (!gid_valid(sbi->gid))
386 				goto cleanup;
387 			break;
388 		}
389 
390 		case Opt_umask:
391 		{
392 			char *umask = args[0].from;
393 			int rc = kstrtouint(umask, 8, &sbi->umask);
394 
395 			if (rc)
396 				goto cleanup;
397 			if (sbi->umask & ~0777) {
398 				pr_err("JFS: Invalid value of umask\n");
399 				goto cleanup;
400 			}
401 			break;
402 		}
403 
404 		case Opt_discard:
405 		{
406 			struct request_queue *q = bdev_get_queue(sb->s_bdev);
407 			/* if set to 1, even copying files will cause
408 			 * trimming :O
409 			 * -> user has more control over the online trimming
410 			 */
411 			sbi->minblks_trim = 64;
412 			if (blk_queue_discard(q))
413 				*flag |= JFS_DISCARD;
414 			else
415 				pr_err("JFS: discard option not supported on device\n");
416 			break;
417 		}
418 
419 		case Opt_nodiscard:
420 			*flag &= ~JFS_DISCARD;
421 			break;
422 
423 		case Opt_discard_minblk:
424 		{
425 			struct request_queue *q = bdev_get_queue(sb->s_bdev);
426 			char *minblks_trim = args[0].from;
427 			int rc;
428 			if (blk_queue_discard(q)) {
429 				*flag |= JFS_DISCARD;
430 				rc = kstrtouint(minblks_trim, 0,
431 						&sbi->minblks_trim);
432 				if (rc)
433 					goto cleanup;
434 			} else
435 				pr_err("JFS: discard option not supported on device\n");
436 			break;
437 		}
438 
439 		default:
440 			printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
441 			       p);
442 			goto cleanup;
443 		}
444 	}
445 
446 	if (nls_map != (void *) -1) {
447 		/* Discard old (if remount) */
448 		unload_nls(sbi->nls_tab);
449 		sbi->nls_tab = nls_map;
450 	}
451 	return 1;
452 
453 cleanup:
454 	if (nls_map && nls_map != (void *) -1)
455 		unload_nls(nls_map);
456 	return 0;
457 }
458 
459 static int jfs_remount(struct super_block *sb, int *flags, char *data)
460 {
461 	s64 newLVSize = 0;
462 	int rc = 0;
463 	int flag = JFS_SBI(sb)->flag;
464 	int ret;
465 
466 	sync_filesystem(sb);
467 	if (!parse_options(data, sb, &newLVSize, &flag))
468 		return -EINVAL;
469 
470 	if (newLVSize) {
471 		if (sb->s_flags & MS_RDONLY) {
472 			pr_err("JFS: resize requires volume to be mounted read-write\n");
473 			return -EROFS;
474 		}
475 		rc = jfs_extendfs(sb, newLVSize, 0);
476 		if (rc)
477 			return rc;
478 	}
479 
480 	if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
481 		/*
482 		 * Invalidate any previously read metadata.  fsck may have
483 		 * changed the on-disk data since we mounted r/o
484 		 */
485 		truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
486 
487 		JFS_SBI(sb)->flag = flag;
488 		ret = jfs_mount_rw(sb, 1);
489 
490 		/* mark the fs r/w for quota activity */
491 		sb->s_flags &= ~MS_RDONLY;
492 
493 		dquot_resume(sb, -1);
494 		return ret;
495 	}
496 	if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
497 		rc = dquot_suspend(sb, -1);
498 		if (rc < 0)
499 			return rc;
500 		rc = jfs_umount_rw(sb);
501 		JFS_SBI(sb)->flag = flag;
502 		return rc;
503 	}
504 	if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
505 		if (!(sb->s_flags & MS_RDONLY)) {
506 			rc = jfs_umount_rw(sb);
507 			if (rc)
508 				return rc;
509 
510 			JFS_SBI(sb)->flag = flag;
511 			ret = jfs_mount_rw(sb, 1);
512 			return ret;
513 		}
514 	JFS_SBI(sb)->flag = flag;
515 
516 	return 0;
517 }
518 
519 static int jfs_fill_super(struct super_block *sb, void *data, int silent)
520 {
521 	struct jfs_sb_info *sbi;
522 	struct inode *inode;
523 	int rc;
524 	s64 newLVSize = 0;
525 	int flag, ret = -EINVAL;
526 
527 	jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
528 
529 	sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
530 	if (!sbi)
531 		return -ENOMEM;
532 
533 	sb->s_fs_info = sbi;
534 	sb->s_max_links = JFS_LINK_MAX;
535 	sbi->sb = sb;
536 	sbi->uid = INVALID_UID;
537 	sbi->gid = INVALID_GID;
538 	sbi->umask = -1;
539 
540 	/* initialize the mount flag and determine the default error handler */
541 	flag = JFS_ERR_REMOUNT_RO;
542 
543 	if (!parse_options((char *) data, sb, &newLVSize, &flag))
544 		goto out_kfree;
545 	sbi->flag = flag;
546 
547 #ifdef CONFIG_JFS_POSIX_ACL
548 	sb->s_flags |= MS_POSIXACL;
549 #endif
550 
551 	if (newLVSize) {
552 		pr_err("resize option for remount only\n");
553 		goto out_kfree;
554 	}
555 
556 	/*
557 	 * Initialize blocksize to 4K.
558 	 */
559 	sb_set_blocksize(sb, PSIZE);
560 
561 	/*
562 	 * Set method vectors.
563 	 */
564 	sb->s_op = &jfs_super_operations;
565 	sb->s_export_op = &jfs_export_operations;
566 	sb->s_xattr = jfs_xattr_handlers;
567 #ifdef CONFIG_QUOTA
568 	sb->dq_op = &dquot_operations;
569 	sb->s_qcop = &jfs_quotactl_ops;
570 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
571 #endif
572 
573 	/*
574 	 * Initialize direct-mapping inode/address-space
575 	 */
576 	inode = new_inode(sb);
577 	if (inode == NULL) {
578 		ret = -ENOMEM;
579 		goto out_unload;
580 	}
581 	inode->i_ino = 0;
582 	inode->i_size = sb->s_bdev->bd_inode->i_size;
583 	inode->i_mapping->a_ops = &jfs_metapage_aops;
584 	hlist_add_fake(&inode->i_hash);
585 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
586 
587 	sbi->direct_inode = inode;
588 
589 	rc = jfs_mount(sb);
590 	if (rc) {
591 		if (!silent)
592 			jfs_err("jfs_mount failed w/return code = %d", rc);
593 		goto out_mount_failed;
594 	}
595 	if (sb->s_flags & MS_RDONLY)
596 		sbi->log = NULL;
597 	else {
598 		rc = jfs_mount_rw(sb, 0);
599 		if (rc) {
600 			if (!silent) {
601 				jfs_err("jfs_mount_rw failed, return code = %d",
602 					rc);
603 			}
604 			goto out_no_rw;
605 		}
606 	}
607 
608 	sb->s_magic = JFS_SUPER_MAGIC;
609 
610 	if (sbi->mntflag & JFS_OS2)
611 		sb->s_d_op = &jfs_ci_dentry_operations;
612 
613 	inode = jfs_iget(sb, ROOT_I);
614 	if (IS_ERR(inode)) {
615 		ret = PTR_ERR(inode);
616 		goto out_no_rw;
617 	}
618 	sb->s_root = d_make_root(inode);
619 	if (!sb->s_root)
620 		goto out_no_root;
621 
622 	/* logical blocks are represented by 40 bits in pxd_t, etc. */
623 	sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
624 #if BITS_PER_LONG == 32
625 	/*
626 	 * Page cache is indexed by long.
627 	 * I would use MAX_LFS_FILESIZE, but it's only half as big
628 	 */
629 	sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
630 			     (u64)sb->s_maxbytes);
631 #endif
632 	sb->s_time_gran = 1;
633 	return 0;
634 
635 out_no_root:
636 	jfs_err("jfs_read_super: get root dentry failed");
637 
638 out_no_rw:
639 	rc = jfs_umount(sb);
640 	if (rc)
641 		jfs_err("jfs_umount failed with return code %d", rc);
642 out_mount_failed:
643 	filemap_write_and_wait(sbi->direct_inode->i_mapping);
644 	truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
645 	make_bad_inode(sbi->direct_inode);
646 	iput(sbi->direct_inode);
647 	sbi->direct_inode = NULL;
648 out_unload:
649 	unload_nls(sbi->nls_tab);
650 out_kfree:
651 	kfree(sbi);
652 	return ret;
653 }
654 
655 static int jfs_freeze(struct super_block *sb)
656 {
657 	struct jfs_sb_info *sbi = JFS_SBI(sb);
658 	struct jfs_log *log = sbi->log;
659 	int rc = 0;
660 
661 	if (!(sb->s_flags & MS_RDONLY)) {
662 		txQuiesce(sb);
663 		rc = lmLogShutdown(log);
664 		if (rc) {
665 			jfs_error(sb, "lmLogShutdown failed\n");
666 
667 			/* let operations fail rather than hang */
668 			txResume(sb);
669 
670 			return rc;
671 		}
672 		rc = updateSuper(sb, FM_CLEAN);
673 		if (rc) {
674 			jfs_err("jfs_freeze: updateSuper failed");
675 			/*
676 			 * Don't fail here. Everything succeeded except
677 			 * marking the superblock clean, so there's really
678 			 * no harm in leaving it frozen for now.
679 			 */
680 		}
681 	}
682 	return 0;
683 }
684 
685 static int jfs_unfreeze(struct super_block *sb)
686 {
687 	struct jfs_sb_info *sbi = JFS_SBI(sb);
688 	struct jfs_log *log = sbi->log;
689 	int rc = 0;
690 
691 	if (!(sb->s_flags & MS_RDONLY)) {
692 		rc = updateSuper(sb, FM_MOUNT);
693 		if (rc) {
694 			jfs_error(sb, "updateSuper failed\n");
695 			goto out;
696 		}
697 		rc = lmLogInit(log);
698 		if (rc)
699 			jfs_error(sb, "lmLogInit failed\n");
700 out:
701 		txResume(sb);
702 	}
703 	return rc;
704 }
705 
706 static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
707 	int flags, const char *dev_name, void *data)
708 {
709 	return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
710 }
711 
712 static int jfs_sync_fs(struct super_block *sb, int wait)
713 {
714 	struct jfs_log *log = JFS_SBI(sb)->log;
715 
716 	/* log == NULL indicates read-only mount */
717 	if (log) {
718 		/*
719 		 * Write quota structures to quota file, sync_blockdev() will
720 		 * write them to disk later
721 		 */
722 		dquot_writeback_dquots(sb, -1);
723 		jfs_flush_journal(log, wait);
724 		jfs_syncpt(log, 0);
725 	}
726 
727 	return 0;
728 }
729 
730 static int jfs_show_options(struct seq_file *seq, struct dentry *root)
731 {
732 	struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
733 
734 	if (uid_valid(sbi->uid))
735 		seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
736 	if (gid_valid(sbi->gid))
737 		seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
738 	if (sbi->umask != -1)
739 		seq_printf(seq, ",umask=%03o", sbi->umask);
740 	if (sbi->flag & JFS_NOINTEGRITY)
741 		seq_puts(seq, ",nointegrity");
742 	if (sbi->flag & JFS_DISCARD)
743 		seq_printf(seq, ",discard=%u", sbi->minblks_trim);
744 	if (sbi->nls_tab)
745 		seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
746 	if (sbi->flag & JFS_ERR_CONTINUE)
747 		seq_printf(seq, ",errors=continue");
748 	if (sbi->flag & JFS_ERR_PANIC)
749 		seq_printf(seq, ",errors=panic");
750 
751 #ifdef CONFIG_QUOTA
752 	if (sbi->flag & JFS_USRQUOTA)
753 		seq_puts(seq, ",usrquota");
754 
755 	if (sbi->flag & JFS_GRPQUOTA)
756 		seq_puts(seq, ",grpquota");
757 #endif
758 
759 	return 0;
760 }
761 
762 #ifdef CONFIG_QUOTA
763 
764 /* Read data from quotafile - avoid pagecache and such because we cannot afford
765  * acquiring the locks... As quota files are never truncated and quota code
766  * itself serializes the operations (and no one else should touch the files)
767  * we don't have to be afraid of races */
768 static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
769 			      size_t len, loff_t off)
770 {
771 	struct inode *inode = sb_dqopt(sb)->files[type];
772 	sector_t blk = off >> sb->s_blocksize_bits;
773 	int err = 0;
774 	int offset = off & (sb->s_blocksize - 1);
775 	int tocopy;
776 	size_t toread;
777 	struct buffer_head tmp_bh;
778 	struct buffer_head *bh;
779 	loff_t i_size = i_size_read(inode);
780 
781 	if (off > i_size)
782 		return 0;
783 	if (off+len > i_size)
784 		len = i_size-off;
785 	toread = len;
786 	while (toread > 0) {
787 		tocopy = sb->s_blocksize - offset < toread ?
788 				sb->s_blocksize - offset : toread;
789 
790 		tmp_bh.b_state = 0;
791 		tmp_bh.b_size = i_blocksize(inode);
792 		err = jfs_get_block(inode, blk, &tmp_bh, 0);
793 		if (err)
794 			return err;
795 		if (!buffer_mapped(&tmp_bh))	/* A hole? */
796 			memset(data, 0, tocopy);
797 		else {
798 			bh = sb_bread(sb, tmp_bh.b_blocknr);
799 			if (!bh)
800 				return -EIO;
801 			memcpy(data, bh->b_data+offset, tocopy);
802 			brelse(bh);
803 		}
804 		offset = 0;
805 		toread -= tocopy;
806 		data += tocopy;
807 		blk++;
808 	}
809 	return len;
810 }
811 
812 /* Write to quotafile */
813 static ssize_t jfs_quota_write(struct super_block *sb, int type,
814 			       const char *data, size_t len, loff_t off)
815 {
816 	struct inode *inode = sb_dqopt(sb)->files[type];
817 	sector_t blk = off >> sb->s_blocksize_bits;
818 	int err = 0;
819 	int offset = off & (sb->s_blocksize - 1);
820 	int tocopy;
821 	size_t towrite = len;
822 	struct buffer_head tmp_bh;
823 	struct buffer_head *bh;
824 
825 	inode_lock(inode);
826 	while (towrite > 0) {
827 		tocopy = sb->s_blocksize - offset < towrite ?
828 				sb->s_blocksize - offset : towrite;
829 
830 		tmp_bh.b_state = 0;
831 		tmp_bh.b_size = i_blocksize(inode);
832 		err = jfs_get_block(inode, blk, &tmp_bh, 1);
833 		if (err)
834 			goto out;
835 		if (offset || tocopy != sb->s_blocksize)
836 			bh = sb_bread(sb, tmp_bh.b_blocknr);
837 		else
838 			bh = sb_getblk(sb, tmp_bh.b_blocknr);
839 		if (!bh) {
840 			err = -EIO;
841 			goto out;
842 		}
843 		lock_buffer(bh);
844 		memcpy(bh->b_data+offset, data, tocopy);
845 		flush_dcache_page(bh->b_page);
846 		set_buffer_uptodate(bh);
847 		mark_buffer_dirty(bh);
848 		unlock_buffer(bh);
849 		brelse(bh);
850 		offset = 0;
851 		towrite -= tocopy;
852 		data += tocopy;
853 		blk++;
854 	}
855 out:
856 	if (len == towrite) {
857 		inode_unlock(inode);
858 		return err;
859 	}
860 	if (inode->i_size < off+len-towrite)
861 		i_size_write(inode, off+len-towrite);
862 	inode->i_version++;
863 	inode->i_mtime = inode->i_ctime = current_time(inode);
864 	mark_inode_dirty(inode);
865 	inode_unlock(inode);
866 	return len - towrite;
867 }
868 
869 static struct dquot **jfs_get_dquots(struct inode *inode)
870 {
871 	return JFS_IP(inode)->i_dquot;
872 }
873 
874 static int jfs_quota_on(struct super_block *sb, int type, int format_id,
875 			const struct path *path)
876 {
877 	int err;
878 	struct inode *inode;
879 
880 	err = dquot_quota_on(sb, type, format_id, path);
881 	if (err)
882 		return err;
883 
884 	inode = d_inode(path->dentry);
885 	inode_lock(inode);
886 	JFS_IP(inode)->mode2 |= JFS_NOATIME_FL | JFS_IMMUTABLE_FL;
887 	inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
888 			S_NOATIME | S_IMMUTABLE);
889 	inode_unlock(inode);
890 	mark_inode_dirty(inode);
891 
892 	return 0;
893 }
894 
895 static int jfs_quota_off(struct super_block *sb, int type)
896 {
897 	struct inode *inode = sb_dqopt(sb)->files[type];
898 	int err;
899 
900 	if (!inode || !igrab(inode))
901 		goto out;
902 
903 	err = dquot_quota_off(sb, type);
904 	if (err)
905 		goto out_put;
906 
907 	inode_lock(inode);
908 	JFS_IP(inode)->mode2 &= ~(JFS_NOATIME_FL | JFS_IMMUTABLE_FL);
909 	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
910 	inode_unlock(inode);
911 	mark_inode_dirty(inode);
912 out_put:
913 	iput(inode);
914 	return err;
915 out:
916 	return dquot_quota_off(sb, type);
917 }
918 #endif
919 
920 static const struct super_operations jfs_super_operations = {
921 	.alloc_inode	= jfs_alloc_inode,
922 	.destroy_inode	= jfs_destroy_inode,
923 	.dirty_inode	= jfs_dirty_inode,
924 	.write_inode	= jfs_write_inode,
925 	.evict_inode	= jfs_evict_inode,
926 	.put_super	= jfs_put_super,
927 	.sync_fs	= jfs_sync_fs,
928 	.freeze_fs	= jfs_freeze,
929 	.unfreeze_fs	= jfs_unfreeze,
930 	.statfs		= jfs_statfs,
931 	.remount_fs	= jfs_remount,
932 	.show_options	= jfs_show_options,
933 #ifdef CONFIG_QUOTA
934 	.quota_read	= jfs_quota_read,
935 	.quota_write	= jfs_quota_write,
936 	.get_dquots	= jfs_get_dquots,
937 #endif
938 };
939 
940 static const struct export_operations jfs_export_operations = {
941 	.fh_to_dentry	= jfs_fh_to_dentry,
942 	.fh_to_parent	= jfs_fh_to_parent,
943 	.get_parent	= jfs_get_parent,
944 };
945 
946 static struct file_system_type jfs_fs_type = {
947 	.owner		= THIS_MODULE,
948 	.name		= "jfs",
949 	.mount		= jfs_do_mount,
950 	.kill_sb	= kill_block_super,
951 	.fs_flags	= FS_REQUIRES_DEV,
952 };
953 MODULE_ALIAS_FS("jfs");
954 
955 static void init_once(void *foo)
956 {
957 	struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
958 
959 	memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
960 	INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
961 	init_rwsem(&jfs_ip->rdwrlock);
962 	mutex_init(&jfs_ip->commit_mutex);
963 	init_rwsem(&jfs_ip->xattr_sem);
964 	spin_lock_init(&jfs_ip->ag_lock);
965 	jfs_ip->active_ag = -1;
966 	inode_init_once(&jfs_ip->vfs_inode);
967 }
968 
969 static int __init init_jfs_fs(void)
970 {
971 	int i;
972 	int rc;
973 
974 	jfs_inode_cachep =
975 	    kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
976 			    SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
977 			    init_once);
978 	if (jfs_inode_cachep == NULL)
979 		return -ENOMEM;
980 
981 	/*
982 	 * Metapage initialization
983 	 */
984 	rc = metapage_init();
985 	if (rc) {
986 		jfs_err("metapage_init failed w/rc = %d", rc);
987 		goto free_slab;
988 	}
989 
990 	/*
991 	 * Transaction Manager initialization
992 	 */
993 	rc = txInit();
994 	if (rc) {
995 		jfs_err("txInit failed w/rc = %d", rc);
996 		goto free_metapage;
997 	}
998 
999 	/*
1000 	 * I/O completion thread (endio)
1001 	 */
1002 	jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
1003 	if (IS_ERR(jfsIOthread)) {
1004 		rc = PTR_ERR(jfsIOthread);
1005 		jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
1006 		goto end_txmngr;
1007 	}
1008 
1009 	if (commit_threads < 1)
1010 		commit_threads = num_online_cpus();
1011 	if (commit_threads > MAX_COMMIT_THREADS)
1012 		commit_threads = MAX_COMMIT_THREADS;
1013 
1014 	for (i = 0; i < commit_threads; i++) {
1015 		jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL,
1016 						 "jfsCommit");
1017 		if (IS_ERR(jfsCommitThread[i])) {
1018 			rc = PTR_ERR(jfsCommitThread[i]);
1019 			jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
1020 			commit_threads = i;
1021 			goto kill_committask;
1022 		}
1023 	}
1024 
1025 	jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
1026 	if (IS_ERR(jfsSyncThread)) {
1027 		rc = PTR_ERR(jfsSyncThread);
1028 		jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
1029 		goto kill_committask;
1030 	}
1031 
1032 #ifdef PROC_FS_JFS
1033 	jfs_proc_init();
1034 #endif
1035 
1036 	rc = register_filesystem(&jfs_fs_type);
1037 	if (!rc)
1038 		return 0;
1039 
1040 #ifdef PROC_FS_JFS
1041 	jfs_proc_clean();
1042 #endif
1043 	kthread_stop(jfsSyncThread);
1044 kill_committask:
1045 	for (i = 0; i < commit_threads; i++)
1046 		kthread_stop(jfsCommitThread[i]);
1047 	kthread_stop(jfsIOthread);
1048 end_txmngr:
1049 	txExit();
1050 free_metapage:
1051 	metapage_exit();
1052 free_slab:
1053 	kmem_cache_destroy(jfs_inode_cachep);
1054 	return rc;
1055 }
1056 
1057 static void __exit exit_jfs_fs(void)
1058 {
1059 	int i;
1060 
1061 	jfs_info("exit_jfs_fs called");
1062 
1063 	txExit();
1064 	metapage_exit();
1065 
1066 	kthread_stop(jfsIOthread);
1067 	for (i = 0; i < commit_threads; i++)
1068 		kthread_stop(jfsCommitThread[i]);
1069 	kthread_stop(jfsSyncThread);
1070 #ifdef PROC_FS_JFS
1071 	jfs_proc_clean();
1072 #endif
1073 	unregister_filesystem(&jfs_fs_type);
1074 
1075 	/*
1076 	 * Make sure all delayed rcu free inodes are flushed before we
1077 	 * destroy cache.
1078 	 */
1079 	rcu_barrier();
1080 	kmem_cache_destroy(jfs_inode_cachep);
1081 }
1082 
1083 module_init(init_jfs_fs)
1084 module_exit(exit_jfs_fs)
1085