xref: /openbmc/linux/fs/nilfs2/the_nilfs.c (revision 36bccb11)
1 /*
2  * the_nilfs.c - the_nilfs shared structure.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/random.h>
29 #include <linux/crc32.h>
30 #include "nilfs.h"
31 #include "segment.h"
32 #include "alloc.h"
33 #include "cpfile.h"
34 #include "sufile.h"
35 #include "dat.h"
36 #include "segbuf.h"
37 
38 
39 static int nilfs_valid_sb(struct nilfs_super_block *sbp);
40 
41 void nilfs_set_last_segment(struct the_nilfs *nilfs,
42 			    sector_t start_blocknr, u64 seq, __u64 cno)
43 {
44 	spin_lock(&nilfs->ns_last_segment_lock);
45 	nilfs->ns_last_pseg = start_blocknr;
46 	nilfs->ns_last_seq = seq;
47 	nilfs->ns_last_cno = cno;
48 
49 	if (!nilfs_sb_dirty(nilfs)) {
50 		if (nilfs->ns_prev_seq == nilfs->ns_last_seq)
51 			goto stay_cursor;
52 
53 		set_nilfs_sb_dirty(nilfs);
54 	}
55 	nilfs->ns_prev_seq = nilfs->ns_last_seq;
56 
57  stay_cursor:
58 	spin_unlock(&nilfs->ns_last_segment_lock);
59 }
60 
61 /**
62  * alloc_nilfs - allocate a nilfs object
63  * @bdev: block device to which the_nilfs is related
64  *
65  * Return Value: On success, pointer to the_nilfs is returned.
66  * On error, NULL is returned.
67  */
68 struct the_nilfs *alloc_nilfs(struct block_device *bdev)
69 {
70 	struct the_nilfs *nilfs;
71 
72 	nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL);
73 	if (!nilfs)
74 		return NULL;
75 
76 	nilfs->ns_bdev = bdev;
77 	atomic_set(&nilfs->ns_ndirtyblks, 0);
78 	init_rwsem(&nilfs->ns_sem);
79 	mutex_init(&nilfs->ns_snapshot_mount_mutex);
80 	INIT_LIST_HEAD(&nilfs->ns_dirty_files);
81 	INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
82 	spin_lock_init(&nilfs->ns_inode_lock);
83 	spin_lock_init(&nilfs->ns_next_gen_lock);
84 	spin_lock_init(&nilfs->ns_last_segment_lock);
85 	nilfs->ns_cptree = RB_ROOT;
86 	spin_lock_init(&nilfs->ns_cptree_lock);
87 	init_rwsem(&nilfs->ns_segctor_sem);
88 
89 	return nilfs;
90 }
91 
92 /**
93  * destroy_nilfs - destroy nilfs object
94  * @nilfs: nilfs object to be released
95  */
96 void destroy_nilfs(struct the_nilfs *nilfs)
97 {
98 	might_sleep();
99 	if (nilfs_init(nilfs)) {
100 		brelse(nilfs->ns_sbh[0]);
101 		brelse(nilfs->ns_sbh[1]);
102 	}
103 	kfree(nilfs);
104 }
105 
106 static int nilfs_load_super_root(struct the_nilfs *nilfs,
107 				 struct super_block *sb, sector_t sr_block)
108 {
109 	struct buffer_head *bh_sr;
110 	struct nilfs_super_root *raw_sr;
111 	struct nilfs_super_block **sbp = nilfs->ns_sbp;
112 	struct nilfs_inode *rawi;
113 	unsigned dat_entry_size, segment_usage_size, checkpoint_size;
114 	unsigned inode_size;
115 	int err;
116 
117 	err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1);
118 	if (unlikely(err))
119 		return err;
120 
121 	down_read(&nilfs->ns_sem);
122 	dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size);
123 	checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size);
124 	segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size);
125 	up_read(&nilfs->ns_sem);
126 
127 	inode_size = nilfs->ns_inode_size;
128 
129 	rawi = (void *)bh_sr->b_data + NILFS_SR_DAT_OFFSET(inode_size);
130 	err = nilfs_dat_read(sb, dat_entry_size, rawi, &nilfs->ns_dat);
131 	if (err)
132 		goto failed;
133 
134 	rawi = (void *)bh_sr->b_data + NILFS_SR_CPFILE_OFFSET(inode_size);
135 	err = nilfs_cpfile_read(sb, checkpoint_size, rawi, &nilfs->ns_cpfile);
136 	if (err)
137 		goto failed_dat;
138 
139 	rawi = (void *)bh_sr->b_data + NILFS_SR_SUFILE_OFFSET(inode_size);
140 	err = nilfs_sufile_read(sb, segment_usage_size, rawi,
141 				&nilfs->ns_sufile);
142 	if (err)
143 		goto failed_cpfile;
144 
145 	raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
146 	nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime);
147 
148  failed:
149 	brelse(bh_sr);
150 	return err;
151 
152  failed_cpfile:
153 	iput(nilfs->ns_cpfile);
154 
155  failed_dat:
156 	iput(nilfs->ns_dat);
157 	goto failed;
158 }
159 
160 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri)
161 {
162 	memset(ri, 0, sizeof(*ri));
163 	INIT_LIST_HEAD(&ri->ri_used_segments);
164 }
165 
166 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
167 {
168 	nilfs_dispose_segment_list(&ri->ri_used_segments);
169 }
170 
171 /**
172  * nilfs_store_log_cursor - load log cursor from a super block
173  * @nilfs: nilfs object
174  * @sbp: buffer storing super block to be read
175  *
176  * nilfs_store_log_cursor() reads the last position of the log
177  * containing a super root from a given super block, and initializes
178  * relevant information on the nilfs object preparatory for log
179  * scanning and recovery.
180  */
181 static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
182 				  struct nilfs_super_block *sbp)
183 {
184 	int ret = 0;
185 
186 	nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg);
187 	nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno);
188 	nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq);
189 
190 	nilfs->ns_prev_seq = nilfs->ns_last_seq;
191 	nilfs->ns_seg_seq = nilfs->ns_last_seq;
192 	nilfs->ns_segnum =
193 		nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
194 	nilfs->ns_cno = nilfs->ns_last_cno + 1;
195 	if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
196 		printk(KERN_ERR "NILFS invalid last segment number.\n");
197 		ret = -EINVAL;
198 	}
199 	return ret;
200 }
201 
202 /**
203  * load_nilfs - load and recover the nilfs
204  * @nilfs: the_nilfs structure to be released
205  * @sb: super block isntance used to recover past segment
206  *
207  * load_nilfs() searches and load the latest super root,
208  * attaches the last segment, and does recovery if needed.
209  * The caller must call this exclusively for simultaneous mounts.
210  */
211 int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
212 {
213 	struct nilfs_recovery_info ri;
214 	unsigned int s_flags = sb->s_flags;
215 	int really_read_only = bdev_read_only(nilfs->ns_bdev);
216 	int valid_fs = nilfs_valid_fs(nilfs);
217 	int err;
218 
219 	if (!valid_fs) {
220 		printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
221 		if (s_flags & MS_RDONLY) {
222 			printk(KERN_INFO "NILFS: INFO: recovery "
223 			       "required for readonly filesystem.\n");
224 			printk(KERN_INFO "NILFS: write access will "
225 			       "be enabled during recovery.\n");
226 		}
227 	}
228 
229 	nilfs_init_recovery_info(&ri);
230 
231 	err = nilfs_search_super_root(nilfs, &ri);
232 	if (unlikely(err)) {
233 		struct nilfs_super_block **sbp = nilfs->ns_sbp;
234 		int blocksize;
235 
236 		if (err != -EINVAL)
237 			goto scan_error;
238 
239 		if (!nilfs_valid_sb(sbp[1])) {
240 			printk(KERN_WARNING
241 			       "NILFS warning: unable to fall back to spare"
242 			       "super block\n");
243 			goto scan_error;
244 		}
245 		printk(KERN_INFO
246 		       "NILFS: try rollback from an earlier position\n");
247 
248 		/*
249 		 * restore super block with its spare and reconfigure
250 		 * relevant states of the nilfs object.
251 		 */
252 		memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
253 		nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed);
254 		nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
255 
256 		/* verify consistency between two super blocks */
257 		blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
258 		if (blocksize != nilfs->ns_blocksize) {
259 			printk(KERN_WARNING
260 			       "NILFS warning: blocksize differs between "
261 			       "two super blocks (%d != %d)\n",
262 			       blocksize, nilfs->ns_blocksize);
263 			goto scan_error;
264 		}
265 
266 		err = nilfs_store_log_cursor(nilfs, sbp[0]);
267 		if (err)
268 			goto scan_error;
269 
270 		/* drop clean flag to allow roll-forward and recovery */
271 		nilfs->ns_mount_state &= ~NILFS_VALID_FS;
272 		valid_fs = 0;
273 
274 		err = nilfs_search_super_root(nilfs, &ri);
275 		if (err)
276 			goto scan_error;
277 	}
278 
279 	err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root);
280 	if (unlikely(err)) {
281 		printk(KERN_ERR "NILFS: error loading super root.\n");
282 		goto failed;
283 	}
284 
285 	if (valid_fs)
286 		goto skip_recovery;
287 
288 	if (s_flags & MS_RDONLY) {
289 		__u64 features;
290 
291 		if (nilfs_test_opt(nilfs, NORECOVERY)) {
292 			printk(KERN_INFO "NILFS: norecovery option specified. "
293 			       "skipping roll-forward recovery\n");
294 			goto skip_recovery;
295 		}
296 		features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
297 			~NILFS_FEATURE_COMPAT_RO_SUPP;
298 		if (features) {
299 			printk(KERN_ERR "NILFS: couldn't proceed with "
300 			       "recovery because of unsupported optional "
301 			       "features (%llx)\n",
302 			       (unsigned long long)features);
303 			err = -EROFS;
304 			goto failed_unload;
305 		}
306 		if (really_read_only) {
307 			printk(KERN_ERR "NILFS: write access "
308 			       "unavailable, cannot proceed.\n");
309 			err = -EROFS;
310 			goto failed_unload;
311 		}
312 		sb->s_flags &= ~MS_RDONLY;
313 	} else if (nilfs_test_opt(nilfs, NORECOVERY)) {
314 		printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
315 		       "option was specified for a read/write mount\n");
316 		err = -EINVAL;
317 		goto failed_unload;
318 	}
319 
320 	err = nilfs_salvage_orphan_logs(nilfs, sb, &ri);
321 	if (err)
322 		goto failed_unload;
323 
324 	down_write(&nilfs->ns_sem);
325 	nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */
326 	err = nilfs_cleanup_super(sb);
327 	up_write(&nilfs->ns_sem);
328 
329 	if (err) {
330 		printk(KERN_ERR "NILFS: failed to update super block. "
331 		       "recovery unfinished.\n");
332 		goto failed_unload;
333 	}
334 	printk(KERN_INFO "NILFS: recovery complete.\n");
335 
336  skip_recovery:
337 	nilfs_clear_recovery_info(&ri);
338 	sb->s_flags = s_flags;
339 	return 0;
340 
341  scan_error:
342 	printk(KERN_ERR "NILFS: error searching super root.\n");
343 	goto failed;
344 
345  failed_unload:
346 	iput(nilfs->ns_cpfile);
347 	iput(nilfs->ns_sufile);
348 	iput(nilfs->ns_dat);
349 
350  failed:
351 	nilfs_clear_recovery_info(&ri);
352 	sb->s_flags = s_flags;
353 	return err;
354 }
355 
356 static unsigned long long nilfs_max_size(unsigned int blkbits)
357 {
358 	unsigned int max_bits;
359 	unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */
360 
361 	max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */
362 	if (max_bits < 64)
363 		res = min_t(unsigned long long, res, (1ULL << max_bits) - 1);
364 	return res;
365 }
366 
367 /**
368  * nilfs_nrsvsegs - calculate the number of reserved segments
369  * @nilfs: nilfs object
370  * @nsegs: total number of segments
371  */
372 unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
373 {
374 	return max_t(unsigned long, NILFS_MIN_NRSVSEGS,
375 		     DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage,
376 				  100));
377 }
378 
379 void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
380 {
381 	nilfs->ns_nsegments = nsegs;
382 	nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs);
383 }
384 
385 static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
386 				   struct nilfs_super_block *sbp)
387 {
388 	if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
389 		printk(KERN_ERR "NILFS: unsupported revision "
390 		       "(superblock rev.=%d.%d, current rev.=%d.%d). "
391 		       "Please check the version of mkfs.nilfs.\n",
392 		       le32_to_cpu(sbp->s_rev_level),
393 		       le16_to_cpu(sbp->s_minor_rev_level),
394 		       NILFS_CURRENT_REV, NILFS_MINOR_REV);
395 		return -EINVAL;
396 	}
397 	nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes);
398 	if (nilfs->ns_sbsize > BLOCK_SIZE)
399 		return -EINVAL;
400 
401 	nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size);
402 	if (nilfs->ns_inode_size > nilfs->ns_blocksize) {
403 		printk(KERN_ERR "NILFS: too large inode size: %d bytes.\n",
404 		       nilfs->ns_inode_size);
405 		return -EINVAL;
406 	} else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) {
407 		printk(KERN_ERR "NILFS: too small inode size: %d bytes.\n",
408 		       nilfs->ns_inode_size);
409 		return -EINVAL;
410 	}
411 
412 	nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
413 
414 	nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
415 	if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
416 		printk(KERN_ERR "NILFS: too short segment.\n");
417 		return -EINVAL;
418 	}
419 
420 	nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
421 	nilfs->ns_r_segments_percentage =
422 		le32_to_cpu(sbp->s_r_segments_percentage);
423 	if (nilfs->ns_r_segments_percentage < 1 ||
424 	    nilfs->ns_r_segments_percentage > 99) {
425 		printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
426 		return -EINVAL;
427 	}
428 
429 	nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
430 	nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
431 	return 0;
432 }
433 
434 static int nilfs_valid_sb(struct nilfs_super_block *sbp)
435 {
436 	static unsigned char sum[4];
437 	const int sumoff = offsetof(struct nilfs_super_block, s_sum);
438 	size_t bytes;
439 	u32 crc;
440 
441 	if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
442 		return 0;
443 	bytes = le16_to_cpu(sbp->s_bytes);
444 	if (bytes > BLOCK_SIZE)
445 		return 0;
446 	crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
447 		       sumoff);
448 	crc = crc32_le(crc, sum, 4);
449 	crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4,
450 		       bytes - sumoff - 4);
451 	return crc == le32_to_cpu(sbp->s_sum);
452 }
453 
454 static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
455 {
456 	return offset < ((le64_to_cpu(sbp->s_nsegments) *
457 			  le32_to_cpu(sbp->s_blocks_per_segment)) <<
458 			 (le32_to_cpu(sbp->s_log_block_size) + 10));
459 }
460 
461 static void nilfs_release_super_block(struct the_nilfs *nilfs)
462 {
463 	int i;
464 
465 	for (i = 0; i < 2; i++) {
466 		if (nilfs->ns_sbp[i]) {
467 			brelse(nilfs->ns_sbh[i]);
468 			nilfs->ns_sbh[i] = NULL;
469 			nilfs->ns_sbp[i] = NULL;
470 		}
471 	}
472 }
473 
474 void nilfs_fall_back_super_block(struct the_nilfs *nilfs)
475 {
476 	brelse(nilfs->ns_sbh[0]);
477 	nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
478 	nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
479 	nilfs->ns_sbh[1] = NULL;
480 	nilfs->ns_sbp[1] = NULL;
481 }
482 
483 void nilfs_swap_super_block(struct the_nilfs *nilfs)
484 {
485 	struct buffer_head *tsbh = nilfs->ns_sbh[0];
486 	struct nilfs_super_block *tsbp = nilfs->ns_sbp[0];
487 
488 	nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
489 	nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
490 	nilfs->ns_sbh[1] = tsbh;
491 	nilfs->ns_sbp[1] = tsbp;
492 }
493 
494 static int nilfs_load_super_block(struct the_nilfs *nilfs,
495 				  struct super_block *sb, int blocksize,
496 				  struct nilfs_super_block **sbpp)
497 {
498 	struct nilfs_super_block **sbp = nilfs->ns_sbp;
499 	struct buffer_head **sbh = nilfs->ns_sbh;
500 	u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
501 	int valid[2], swp = 0;
502 
503 	sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
504 					&sbh[0]);
505 	sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
506 
507 	if (!sbp[0]) {
508 		if (!sbp[1]) {
509 			printk(KERN_ERR "NILFS: unable to read superblock\n");
510 			return -EIO;
511 		}
512 		printk(KERN_WARNING
513 		       "NILFS warning: unable to read primary superblock "
514 		       "(blocksize = %d)\n", blocksize);
515 	} else if (!sbp[1]) {
516 		printk(KERN_WARNING
517 		       "NILFS warning: unable to read secondary superblock "
518 		       "(blocksize = %d)\n", blocksize);
519 	}
520 
521 	/*
522 	 * Compare two super blocks and set 1 in swp if the secondary
523 	 * super block is valid and newer.  Otherwise, set 0 in swp.
524 	 */
525 	valid[0] = nilfs_valid_sb(sbp[0]);
526 	valid[1] = nilfs_valid_sb(sbp[1]);
527 	swp = valid[1] && (!valid[0] ||
528 			   le64_to_cpu(sbp[1]->s_last_cno) >
529 			   le64_to_cpu(sbp[0]->s_last_cno));
530 
531 	if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) {
532 		brelse(sbh[1]);
533 		sbh[1] = NULL;
534 		sbp[1] = NULL;
535 		valid[1] = 0;
536 		swp = 0;
537 	}
538 	if (!valid[swp]) {
539 		nilfs_release_super_block(nilfs);
540 		printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n",
541 		       sb->s_id);
542 		return -EINVAL;
543 	}
544 
545 	if (!valid[!swp])
546 		printk(KERN_WARNING "NILFS warning: broken superblock. "
547 		       "using spare superblock (blocksize = %d).\n", blocksize);
548 	if (swp)
549 		nilfs_swap_super_block(nilfs);
550 
551 	nilfs->ns_sbwcount = 0;
552 	nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
553 	nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
554 	*sbpp = sbp[0];
555 	return 0;
556 }
557 
558 /**
559  * init_nilfs - initialize a NILFS instance.
560  * @nilfs: the_nilfs structure
561  * @sb: super block
562  * @data: mount options
563  *
564  * init_nilfs() performs common initialization per block device (e.g.
565  * reading the super block, getting disk layout information, initializing
566  * shared fields in the_nilfs).
567  *
568  * Return Value: On success, 0 is returned. On error, a negative error
569  * code is returned.
570  */
571 int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
572 {
573 	struct nilfs_super_block *sbp;
574 	int blocksize;
575 	int err;
576 
577 	down_write(&nilfs->ns_sem);
578 
579 	blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
580 	if (!blocksize) {
581 		printk(KERN_ERR "NILFS: unable to set blocksize\n");
582 		err = -EINVAL;
583 		goto out;
584 	}
585 	err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
586 	if (err)
587 		goto out;
588 
589 	err = nilfs_store_magic_and_option(sb, sbp, data);
590 	if (err)
591 		goto failed_sbh;
592 
593 	err = nilfs_check_feature_compatibility(sb, sbp);
594 	if (err)
595 		goto failed_sbh;
596 
597 	blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
598 	if (blocksize < NILFS_MIN_BLOCK_SIZE ||
599 	    blocksize > NILFS_MAX_BLOCK_SIZE) {
600 		printk(KERN_ERR "NILFS: couldn't mount because of unsupported "
601 		       "filesystem blocksize %d\n", blocksize);
602 		err = -EINVAL;
603 		goto failed_sbh;
604 	}
605 	if (sb->s_blocksize != blocksize) {
606 		int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
607 
608 		if (blocksize < hw_blocksize) {
609 			printk(KERN_ERR
610 			       "NILFS: blocksize %d too small for device "
611 			       "(sector-size = %d).\n",
612 			       blocksize, hw_blocksize);
613 			err = -EINVAL;
614 			goto failed_sbh;
615 		}
616 		nilfs_release_super_block(nilfs);
617 		sb_set_blocksize(sb, blocksize);
618 
619 		err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
620 		if (err)
621 			goto out;
622 			/* not failed_sbh; sbh is released automatically
623 			   when reloading fails. */
624 	}
625 	nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
626 	nilfs->ns_blocksize = blocksize;
627 
628 	get_random_bytes(&nilfs->ns_next_generation,
629 			 sizeof(nilfs->ns_next_generation));
630 
631 	err = nilfs_store_disk_layout(nilfs, sbp);
632 	if (err)
633 		goto failed_sbh;
634 
635 	sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits);
636 
637 	nilfs->ns_mount_state = le16_to_cpu(sbp->s_state);
638 
639 	err = nilfs_store_log_cursor(nilfs, sbp);
640 	if (err)
641 		goto failed_sbh;
642 
643 	set_nilfs_init(nilfs);
644 	err = 0;
645  out:
646 	up_write(&nilfs->ns_sem);
647 	return err;
648 
649  failed_sbh:
650 	nilfs_release_super_block(nilfs);
651 	goto out;
652 }
653 
654 int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
655 			    size_t nsegs)
656 {
657 	sector_t seg_start, seg_end;
658 	sector_t start = 0, nblocks = 0;
659 	unsigned int sects_per_block;
660 	__u64 *sn;
661 	int ret = 0;
662 
663 	sects_per_block = (1 << nilfs->ns_blocksize_bits) /
664 		bdev_logical_block_size(nilfs->ns_bdev);
665 	for (sn = segnump; sn < segnump + nsegs; sn++) {
666 		nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end);
667 
668 		if (!nblocks) {
669 			start = seg_start;
670 			nblocks = seg_end - seg_start + 1;
671 		} else if (start + nblocks == seg_start) {
672 			nblocks += seg_end - seg_start + 1;
673 		} else {
674 			ret = blkdev_issue_discard(nilfs->ns_bdev,
675 						   start * sects_per_block,
676 						   nblocks * sects_per_block,
677 						   GFP_NOFS, 0);
678 			if (ret < 0)
679 				return ret;
680 			nblocks = 0;
681 		}
682 	}
683 	if (nblocks)
684 		ret = blkdev_issue_discard(nilfs->ns_bdev,
685 					   start * sects_per_block,
686 					   nblocks * sects_per_block,
687 					   GFP_NOFS, 0);
688 	return ret;
689 }
690 
691 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
692 {
693 	unsigned long ncleansegs;
694 
695 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
696 	ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
697 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
698 	*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
699 	return 0;
700 }
701 
702 int nilfs_near_disk_full(struct the_nilfs *nilfs)
703 {
704 	unsigned long ncleansegs, nincsegs;
705 
706 	ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
707 	nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
708 		nilfs->ns_blocks_per_segment + 1;
709 
710 	return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
711 }
712 
713 struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno)
714 {
715 	struct rb_node *n;
716 	struct nilfs_root *root;
717 
718 	spin_lock(&nilfs->ns_cptree_lock);
719 	n = nilfs->ns_cptree.rb_node;
720 	while (n) {
721 		root = rb_entry(n, struct nilfs_root, rb_node);
722 
723 		if (cno < root->cno) {
724 			n = n->rb_left;
725 		} else if (cno > root->cno) {
726 			n = n->rb_right;
727 		} else {
728 			atomic_inc(&root->count);
729 			spin_unlock(&nilfs->ns_cptree_lock);
730 			return root;
731 		}
732 	}
733 	spin_unlock(&nilfs->ns_cptree_lock);
734 
735 	return NULL;
736 }
737 
738 struct nilfs_root *
739 nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
740 {
741 	struct rb_node **p, *parent;
742 	struct nilfs_root *root, *new;
743 
744 	root = nilfs_lookup_root(nilfs, cno);
745 	if (root)
746 		return root;
747 
748 	new = kmalloc(sizeof(*root), GFP_KERNEL);
749 	if (!new)
750 		return NULL;
751 
752 	spin_lock(&nilfs->ns_cptree_lock);
753 
754 	p = &nilfs->ns_cptree.rb_node;
755 	parent = NULL;
756 
757 	while (*p) {
758 		parent = *p;
759 		root = rb_entry(parent, struct nilfs_root, rb_node);
760 
761 		if (cno < root->cno) {
762 			p = &(*p)->rb_left;
763 		} else if (cno > root->cno) {
764 			p = &(*p)->rb_right;
765 		} else {
766 			atomic_inc(&root->count);
767 			spin_unlock(&nilfs->ns_cptree_lock);
768 			kfree(new);
769 			return root;
770 		}
771 	}
772 
773 	new->cno = cno;
774 	new->ifile = NULL;
775 	new->nilfs = nilfs;
776 	atomic_set(&new->count, 1);
777 	atomic64_set(&new->inodes_count, 0);
778 	atomic64_set(&new->blocks_count, 0);
779 
780 	rb_link_node(&new->rb_node, parent, p);
781 	rb_insert_color(&new->rb_node, &nilfs->ns_cptree);
782 
783 	spin_unlock(&nilfs->ns_cptree_lock);
784 
785 	return new;
786 }
787 
788 void nilfs_put_root(struct nilfs_root *root)
789 {
790 	if (atomic_dec_and_test(&root->count)) {
791 		struct the_nilfs *nilfs = root->nilfs;
792 
793 		spin_lock(&nilfs->ns_cptree_lock);
794 		rb_erase(&root->rb_node, &nilfs->ns_cptree);
795 		spin_unlock(&nilfs->ns_cptree_lock);
796 		if (root->ifile)
797 			iput(root->ifile);
798 
799 		kfree(root);
800 	}
801 }
802