xref: /openbmc/linux/fs/ufs/inode.c (revision 619cfac0)
1 /*
2  *  linux/fs/ufs/inode.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  *  from
9  *
10  *  linux/fs/ext2/inode.c
11  *
12  * Copyright (C) 1992, 1993, 1994, 1995
13  * Remy Card (card@masi.ibp.fr)
14  * Laboratoire MASI - Institut Blaise Pascal
15  * Universite Pierre et Marie Curie (Paris VI)
16  *
17  *  from
18  *
19  *  linux/fs/minix/inode.c
20  *
21  *  Copyright (C) 1991, 1992  Linus Torvalds
22  *
23  *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24  *  Big-endian to little-endian byte-swapping/bitmaps by
25  *        David S. Miller (davem@caip.rutgers.edu), 1995
26  */
27 
28 #include <asm/uaccess.h>
29 
30 #include <linux/errno.h>
31 #include <linux/fs.h>
32 #include <linux/time.h>
33 #include <linux/stat.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 
39 #include "ufs_fs.h"
40 #include "ufs.h"
41 #include "swab.h"
42 #include "util.h"
43 
44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
45 {
46 	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
47 	int ptrs = uspi->s_apb;
48 	int ptrs_bits = uspi->s_apbshift;
49 	const long direct_blocks = UFS_NDADDR,
50 		indirect_blocks = ptrs,
51 		double_blocks = (1 << (ptrs_bits * 2));
52 	int n = 0;
53 
54 
55 	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
56 	if (i_block < direct_blocks) {
57 		offsets[n++] = i_block;
58 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
59 		offsets[n++] = UFS_IND_BLOCK;
60 		offsets[n++] = i_block;
61 	} else if ((i_block -= indirect_blocks) < double_blocks) {
62 		offsets[n++] = UFS_DIND_BLOCK;
63 		offsets[n++] = i_block >> ptrs_bits;
64 		offsets[n++] = i_block & (ptrs - 1);
65 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
66 		offsets[n++] = UFS_TIND_BLOCK;
67 		offsets[n++] = i_block >> (ptrs_bits * 2);
68 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
69 		offsets[n++] = i_block & (ptrs - 1);
70 	} else {
71 		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
72 	}
73 	return n;
74 }
75 
76 typedef struct {
77 	void	*p;
78 	union {
79 		__fs32	key32;
80 		__fs64	key64;
81 	};
82 	struct buffer_head *bh;
83 } Indirect;
84 
85 static inline int grow_chain32(struct ufs_inode_info *ufsi,
86 			       struct buffer_head *bh, __fs32 *v,
87 			       Indirect *from, Indirect *to)
88 {
89 	Indirect *p;
90 	unsigned seq;
91 	to->bh = bh;
92 	do {
93 		seq = read_seqbegin(&ufsi->meta_lock);
94 		to->key32 = *(__fs32 *)(to->p = v);
95 		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
96 			;
97 	} while (read_seqretry(&ufsi->meta_lock, seq));
98 	return (p > to);
99 }
100 
101 static inline int grow_chain64(struct ufs_inode_info *ufsi,
102 			       struct buffer_head *bh, __fs64 *v,
103 			       Indirect *from, Indirect *to)
104 {
105 	Indirect *p;
106 	unsigned seq;
107 	to->bh = bh;
108 	do {
109 		seq = read_seqbegin(&ufsi->meta_lock);
110 		to->key64 = *(__fs64 *)(to->p = v);
111 		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
112 			;
113 	} while (read_seqretry(&ufsi->meta_lock, seq));
114 	return (p > to);
115 }
116 
117 /*
118  * Returns the location of the fragment from
119  * the beginning of the filesystem.
120  */
121 
122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
123 {
124 	struct ufs_inode_info *ufsi = UFS_I(inode);
125 	struct super_block *sb = inode->i_sb;
126 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
127 	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
128 	int shift = uspi->s_apbshift-uspi->s_fpbshift;
129 	Indirect chain[4], *q = chain;
130 	unsigned *p;
131 	unsigned flags = UFS_SB(sb)->s_flags;
132 	u64 res = 0;
133 
134 	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
135 		uspi->s_fpbshift, uspi->s_apbmask,
136 		(unsigned long long)mask);
137 
138 	if (depth == 0)
139 		goto no_block;
140 
141 again:
142 	p = offsets;
143 
144 	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
145 		goto ufs2;
146 
147 	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
148 		goto changed;
149 	if (!q->key32)
150 		goto no_block;
151 	while (--depth) {
152 		__fs32 *ptr;
153 		struct buffer_head *bh;
154 		unsigned n = *p++;
155 
156 		bh = sb_bread(sb, uspi->s_sbbase +
157 				  fs32_to_cpu(sb, q->key32) + (n>>shift));
158 		if (!bh)
159 			goto no_block;
160 		ptr = (__fs32 *)bh->b_data + (n & mask);
161 		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
162 			goto changed;
163 		if (!q->key32)
164 			goto no_block;
165 	}
166 	res = fs32_to_cpu(sb, q->key32);
167 	goto found;
168 
169 ufs2:
170 	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
171 		goto changed;
172 	if (!q->key64)
173 		goto no_block;
174 
175 	while (--depth) {
176 		__fs64 *ptr;
177 		struct buffer_head *bh;
178 		unsigned n = *p++;
179 
180 		bh = sb_bread(sb, uspi->s_sbbase +
181 				  fs64_to_cpu(sb, q->key64) + (n>>shift));
182 		if (!bh)
183 			goto no_block;
184 		ptr = (__fs64 *)bh->b_data + (n & mask);
185 		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
186 			goto changed;
187 		if (!q->key64)
188 			goto no_block;
189 	}
190 	res = fs64_to_cpu(sb, q->key64);
191 found:
192 	res += uspi->s_sbbase;
193 no_block:
194 	while (q > chain) {
195 		brelse(q->bh);
196 		q--;
197 	}
198 	return res;
199 
200 changed:
201 	while (q > chain) {
202 		brelse(q->bh);
203 		q--;
204 	}
205 	goto again;
206 }
207 
208 /**
209  * ufs_inode_getfrag() - allocate new fragment(s)
210  * @inode: pointer to inode
211  * @fragment: number of `fragment' which hold pointer
212  *   to new allocated fragment(s)
213  * @new_fragment: number of new allocated fragment(s)
214  * @required: how many fragment(s) we require
215  * @err: we set it if something wrong
216  * @phys: pointer to where we save physical number of new allocated fragments,
217  *   NULL if we allocate not data(indirect blocks for example).
218  * @new: we set it if we allocate new block
219  * @locked_page: for ufs_new_fragments()
220  */
221 static u64
222 ufs_inode_getfrag(struct inode *inode, u64 fragment,
223 		  sector_t new_fragment, unsigned int required, int *err,
224 		  long *phys, int *new, struct page *locked_page)
225 {
226 	struct ufs_inode_info *ufsi = UFS_I(inode);
227 	struct super_block *sb = inode->i_sb;
228 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
229 	unsigned blockoff, lastblockoff;
230 	u64 tmp, goal, lastfrag, block, lastblock;
231 	void *p, *p2;
232 
233 	UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
234 	     "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
235 	     (unsigned long long)new_fragment, required, !phys);
236 
237         /* TODO : to be done for write support
238         if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
239              goto ufs2;
240          */
241 
242 	block = ufs_fragstoblks (fragment);
243 	blockoff = ufs_fragnum (fragment);
244 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
245 
246 	goal = 0;
247 
248 	tmp = ufs_data_ptr_to_cpu(sb, p);
249 
250 	lastfrag = ufsi->i_lastfrag;
251 	if (tmp && fragment < lastfrag)
252 		goto out;
253 
254 	lastblock = ufs_fragstoblks (lastfrag);
255 	lastblockoff = ufs_fragnum (lastfrag);
256 	/*
257 	 * We will extend file into new block beyond last allocated block
258 	 */
259 	if (lastblock < block) {
260 		/*
261 		 * We must reallocate last allocated block
262 		 */
263 		if (lastblockoff) {
264 			p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
265 			tmp = ufs_new_fragments(inode, p2, lastfrag,
266 						ufs_data_ptr_to_cpu(sb, p2),
267 						uspi->s_fpb - lastblockoff,
268 						err, locked_page);
269 			if (!tmp)
270 				return 0;
271 			lastfrag = ufsi->i_lastfrag;
272 		}
273 		tmp = ufs_data_ptr_to_cpu(sb,
274 					 ufs_get_direct_data_ptr(uspi, ufsi,
275 								 lastblock));
276 		if (tmp)
277 			goal = tmp + uspi->s_fpb;
278 		tmp = ufs_new_fragments (inode, p, fragment - blockoff,
279 					 goal, required + blockoff,
280 					 err,
281 					 phys != NULL ? locked_page : NULL);
282 	} else if (lastblock == block) {
283 	/*
284 	 * We will extend last allocated block
285 	 */
286 		tmp = ufs_new_fragments(inode, p, fragment -
287 					(blockoff - lastblockoff),
288 					ufs_data_ptr_to_cpu(sb, p),
289 					required +  (blockoff - lastblockoff),
290 					err, phys != NULL ? locked_page : NULL);
291 	} else /* (lastblock > block) */ {
292 	/*
293 	 * We will allocate new block before last allocated block
294 	 */
295 		if (block) {
296 			tmp = ufs_data_ptr_to_cpu(sb,
297 						 ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
298 			if (tmp)
299 				goal = tmp + uspi->s_fpb;
300 		}
301 		tmp = ufs_new_fragments(inode, p, fragment - blockoff,
302 					goal, uspi->s_fpb, err,
303 					phys != NULL ? locked_page : NULL);
304 	}
305 	if (!tmp) {
306 		*err = -ENOSPC;
307 		return 0;
308 	}
309 
310 	if (phys) {
311 		*err = 0;
312 		*new = 1;
313 	}
314 	inode->i_ctime = CURRENT_TIME_SEC;
315 	if (IS_SYNC(inode))
316 		ufs_sync_inode (inode);
317 	mark_inode_dirty(inode);
318 out:
319 	return tmp + uspi->s_sbbase;
320 
321      /* This part : To be implemented ....
322         Required only for writing, not required for READ-ONLY.
323 ufs2:
324 
325 	u2_block = ufs_fragstoblks(fragment);
326 	u2_blockoff = ufs_fragnum(fragment);
327 	p = ufsi->i_u1.u2_i_data + block;
328 	goal = 0;
329 
330 repeat2:
331 	tmp = fs32_to_cpu(sb, *p);
332 	lastfrag = ufsi->i_lastfrag;
333 
334      */
335 }
336 
337 /**
338  * ufs_inode_getblock() - allocate new block
339  * @inode: pointer to inode
340  * @ind_block: block number of the indirect block
341  * @index: number of pointer within the indirect block
342  * @new_fragment: number of new allocated fragment
343  *  (block will hold this fragment and also uspi->s_fpb-1)
344  * @err: see ufs_inode_getfrag()
345  * @phys: see ufs_inode_getfrag()
346  * @new: see ufs_inode_getfrag()
347  * @locked_page: see ufs_inode_getfrag()
348  */
349 static u64
350 ufs_inode_getblock(struct inode *inode, u64 ind_block,
351 		  unsigned index, sector_t new_fragment, int *err,
352 		  long *phys, int *new, struct page *locked_page)
353 {
354 	struct super_block *sb = inode->i_sb;
355 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
356 	int shift = uspi->s_apbshift - uspi->s_fpbshift;
357 	u64 tmp = 0, goal;
358 	struct buffer_head *bh;
359 	void *p;
360 
361 	if (!ind_block)
362 		return 0;
363 
364 	bh = sb_bread(sb, ind_block + (index >> shift));
365 	if (unlikely(!bh))
366 		return 0;
367 
368 	index &= uspi->s_apbmask >> uspi->s_fpbshift;
369 	if (uspi->fs_magic == UFS2_MAGIC)
370 		p = (__fs64 *)bh->b_data + index;
371 	else
372 		p = (__fs32 *)bh->b_data + index;
373 
374 	tmp = ufs_data_ptr_to_cpu(sb, p);
375 	if (tmp)
376 		goto out;
377 
378 	if (index && (uspi->fs_magic == UFS2_MAGIC ?
379 		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
380 		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
381 		goal = tmp + uspi->s_fpb;
382 	else
383 		goal = bh->b_blocknr + uspi->s_fpb;
384 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
385 				uspi->s_fpb, err, locked_page);
386 	if (!tmp)
387 		goto out;
388 
389 	if (new)
390 		*new = 1;
391 
392 	mark_buffer_dirty(bh);
393 	if (IS_SYNC(inode))
394 		sync_dirty_buffer(bh);
395 	inode->i_ctime = CURRENT_TIME_SEC;
396 	mark_inode_dirty(inode);
397 out:
398 	brelse (bh);
399 	UFSD("EXIT\n");
400 	if (tmp)
401 		tmp += uspi->s_sbbase;
402 	return tmp;
403 }
404 
405 /**
406  * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
407  * readpage, writepage and so on
408  */
409 
410 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
411 {
412 	struct super_block * sb = inode->i_sb;
413 	struct ufs_sb_info * sbi = UFS_SB(sb);
414 	struct ufs_sb_private_info * uspi = sbi->s_uspi;
415 	struct buffer_head * bh;
416 	int ret, err, new;
417 	unsigned offsets[4];
418 	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
419 	unsigned long ptr,phys;
420 	u64 phys64 = 0;
421 	unsigned frag = fragment & uspi->s_fpbmask;
422 	unsigned mask = uspi->s_apbmask >> uspi->s_fpbshift;
423 
424 	if (!create) {
425 		phys64 = ufs_frag_map(inode, offsets, depth);
426 		if (phys64) {
427 			phys64 += frag;
428 			map_bh(bh_result, sb, phys64);
429 		}
430 		return 0;
431 	}
432 
433         /* This code entered only while writing ....? */
434 
435 	err = -EIO;
436 	new = 0;
437 	ret = 0;
438 	bh = NULL;
439 
440 	mutex_lock(&UFS_I(inode)->truncate_mutex);
441 
442 	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
443 	if (!depth)
444 		goto abort_too_big;
445 
446 	err = 0;
447 	ptr = fragment;
448 
449 	if (depth == 1) {
450 		phys64 = ufs_inode_getfrag(inode, ptr, fragment, 1, &err, &phys,
451 					&new, bh_result->b_page);
452 		if (phys64) {
453 			phys64 += frag;
454 			phys = phys64;
455 		}
456 		goto out;
457 	}
458 	ptr -= UFS_NDIR_FRAGMENT;
459 	if (depth == 2) {
460 		phys64 = ufs_inode_getfrag(inode,
461 					UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift),
462 					fragment, uspi->s_fpb, &err, NULL, NULL,
463 					bh_result->b_page);
464 		goto get_indirect;
465 	}
466 	ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
467 	if (depth == 3) {
468 		phys64 = ufs_inode_getfrag(inode,
469 					UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift),
470 					fragment, uspi->s_fpb, &err, NULL, NULL,
471 					bh_result->b_page);
472 		goto get_double;
473 	}
474 	ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
475 	phys64 = ufs_inode_getfrag(inode,
476 				UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift),
477 				fragment, uspi->s_fpb, &err, NULL, NULL,
478 				bh_result->b_page);
479 	phys64 = ufs_inode_getblock(inode, phys64, offsets[1],
480 				fragment, &err, NULL, NULL, NULL);
481 get_double:
482 	phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 2],
483 				fragment, &err, NULL, NULL, NULL);
484 get_indirect:
485 	phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
486 				fragment, &err, &phys, &new, bh_result->b_page);
487 	if (phys64) {
488 		phys64 += frag;
489 		phys = phys64;
490 	}
491 out:
492 	if (err)
493 		goto abort;
494 	if (new)
495 		set_buffer_new(bh_result);
496 	map_bh(bh_result, sb, phys);
497 abort:
498 	mutex_unlock(&UFS_I(inode)->truncate_mutex);
499 
500 	return err;
501 
502 abort_too_big:
503 	ufs_warning(sb, "ufs_get_block", "block > big");
504 	goto abort;
505 }
506 
507 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
508 {
509 	return block_write_full_page(page,ufs_getfrag_block,wbc);
510 }
511 
512 static int ufs_readpage(struct file *file, struct page *page)
513 {
514 	return block_read_full_page(page,ufs_getfrag_block);
515 }
516 
517 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
518 {
519 	return __block_write_begin(page, pos, len, ufs_getfrag_block);
520 }
521 
522 static void ufs_truncate_blocks(struct inode *);
523 
524 static void ufs_write_failed(struct address_space *mapping, loff_t to)
525 {
526 	struct inode *inode = mapping->host;
527 
528 	if (to > inode->i_size) {
529 		truncate_pagecache(inode, inode->i_size);
530 		ufs_truncate_blocks(inode);
531 	}
532 }
533 
534 static int ufs_write_begin(struct file *file, struct address_space *mapping,
535 			loff_t pos, unsigned len, unsigned flags,
536 			struct page **pagep, void **fsdata)
537 {
538 	int ret;
539 
540 	ret = block_write_begin(mapping, pos, len, flags, pagep,
541 				ufs_getfrag_block);
542 	if (unlikely(ret))
543 		ufs_write_failed(mapping, pos + len);
544 
545 	return ret;
546 }
547 
548 static int ufs_write_end(struct file *file, struct address_space *mapping,
549 			loff_t pos, unsigned len, unsigned copied,
550 			struct page *page, void *fsdata)
551 {
552 	int ret;
553 
554 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
555 	if (ret < len)
556 		ufs_write_failed(mapping, pos + len);
557 	return ret;
558 }
559 
560 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
561 {
562 	return generic_block_bmap(mapping,block,ufs_getfrag_block);
563 }
564 
565 const struct address_space_operations ufs_aops = {
566 	.readpage = ufs_readpage,
567 	.writepage = ufs_writepage,
568 	.write_begin = ufs_write_begin,
569 	.write_end = ufs_write_end,
570 	.bmap = ufs_bmap
571 };
572 
573 static void ufs_set_inode_ops(struct inode *inode)
574 {
575 	if (S_ISREG(inode->i_mode)) {
576 		inode->i_op = &ufs_file_inode_operations;
577 		inode->i_fop = &ufs_file_operations;
578 		inode->i_mapping->a_ops = &ufs_aops;
579 	} else if (S_ISDIR(inode->i_mode)) {
580 		inode->i_op = &ufs_dir_inode_operations;
581 		inode->i_fop = &ufs_dir_operations;
582 		inode->i_mapping->a_ops = &ufs_aops;
583 	} else if (S_ISLNK(inode->i_mode)) {
584 		if (!inode->i_blocks) {
585 			inode->i_op = &ufs_fast_symlink_inode_operations;
586 			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
587 		} else {
588 			inode->i_op = &ufs_symlink_inode_operations;
589 			inode->i_mapping->a_ops = &ufs_aops;
590 		}
591 	} else
592 		init_special_inode(inode, inode->i_mode,
593 				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
594 }
595 
596 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
597 {
598 	struct ufs_inode_info *ufsi = UFS_I(inode);
599 	struct super_block *sb = inode->i_sb;
600 	umode_t mode;
601 
602 	/*
603 	 * Copy data to the in-core inode.
604 	 */
605 	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
606 	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
607 	if (inode->i_nlink == 0) {
608 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
609 		return -1;
610 	}
611 
612 	/*
613 	 * Linux now has 32-bit uid and gid, so we can support EFT.
614 	 */
615 	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
616 	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
617 
618 	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
619 	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
620 	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
621 	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
622 	inode->i_mtime.tv_nsec = 0;
623 	inode->i_atime.tv_nsec = 0;
624 	inode->i_ctime.tv_nsec = 0;
625 	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
626 	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
627 	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
628 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
629 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
630 
631 
632 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
633 		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
634 		       sizeof(ufs_inode->ui_u2.ui_addr));
635 	} else {
636 		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
637 		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
638 		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
639 	}
640 	return 0;
641 }
642 
643 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
644 {
645 	struct ufs_inode_info *ufsi = UFS_I(inode);
646 	struct super_block *sb = inode->i_sb;
647 	umode_t mode;
648 
649 	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
650 	/*
651 	 * Copy data to the in-core inode.
652 	 */
653 	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
654 	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
655 	if (inode->i_nlink == 0) {
656 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
657 		return -1;
658 	}
659 
660         /*
661          * Linux now has 32-bit uid and gid, so we can support EFT.
662          */
663 	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
664 	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
665 
666 	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
667 	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
668 	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
669 	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
670 	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
671 	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
672 	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
673 	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
674 	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
675 	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
676 	/*
677 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
678 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
679 	*/
680 
681 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
682 		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
683 		       sizeof(ufs2_inode->ui_u2.ui_addr));
684 	} else {
685 		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
686 		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
687 		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
688 	}
689 	return 0;
690 }
691 
692 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
693 {
694 	struct ufs_inode_info *ufsi;
695 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
696 	struct buffer_head * bh;
697 	struct inode *inode;
698 	int err;
699 
700 	UFSD("ENTER, ino %lu\n", ino);
701 
702 	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
703 		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
704 			    ino);
705 		return ERR_PTR(-EIO);
706 	}
707 
708 	inode = iget_locked(sb, ino);
709 	if (!inode)
710 		return ERR_PTR(-ENOMEM);
711 	if (!(inode->i_state & I_NEW))
712 		return inode;
713 
714 	ufsi = UFS_I(inode);
715 
716 	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
717 	if (!bh) {
718 		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
719 			    inode->i_ino);
720 		goto bad_inode;
721 	}
722 	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
723 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
724 
725 		err = ufs2_read_inode(inode,
726 				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
727 	} else {
728 		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
729 
730 		err = ufs1_read_inode(inode,
731 				      ufs_inode + ufs_inotofsbo(inode->i_ino));
732 	}
733 
734 	if (err)
735 		goto bad_inode;
736 	inode->i_version++;
737 	ufsi->i_lastfrag =
738 		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
739 	ufsi->i_dir_start_lookup = 0;
740 	ufsi->i_osync = 0;
741 
742 	ufs_set_inode_ops(inode);
743 
744 	brelse(bh);
745 
746 	UFSD("EXIT\n");
747 	unlock_new_inode(inode);
748 	return inode;
749 
750 bad_inode:
751 	iget_failed(inode);
752 	return ERR_PTR(-EIO);
753 }
754 
755 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
756 {
757 	struct super_block *sb = inode->i_sb;
758  	struct ufs_inode_info *ufsi = UFS_I(inode);
759 
760 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
761 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
762 
763 	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
764 	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
765 
766 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
767 	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
768 	ufs_inode->ui_atime.tv_usec = 0;
769 	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
770 	ufs_inode->ui_ctime.tv_usec = 0;
771 	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
772 	ufs_inode->ui_mtime.tv_usec = 0;
773 	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
774 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
775 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
776 
777 	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
778 		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
779 		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
780 	}
781 
782 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
783 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
784 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
785 	} else if (inode->i_blocks) {
786 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
787 		       sizeof(ufs_inode->ui_u2.ui_addr));
788 	}
789 	else {
790 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
791 		       sizeof(ufs_inode->ui_u2.ui_symlink));
792 	}
793 
794 	if (!inode->i_nlink)
795 		memset (ufs_inode, 0, sizeof(struct ufs_inode));
796 }
797 
798 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
799 {
800 	struct super_block *sb = inode->i_sb;
801  	struct ufs_inode_info *ufsi = UFS_I(inode);
802 
803 	UFSD("ENTER\n");
804 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
805 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
806 
807 	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
808 	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
809 
810 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
811 	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
812 	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
813 	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
814 	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
815 	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
816 	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
817 
818 	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
819 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
820 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
821 
822 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
823 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
824 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
825 	} else if (inode->i_blocks) {
826 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
827 		       sizeof(ufs_inode->ui_u2.ui_addr));
828 	} else {
829 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
830 		       sizeof(ufs_inode->ui_u2.ui_symlink));
831  	}
832 
833 	if (!inode->i_nlink)
834 		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
835 	UFSD("EXIT\n");
836 }
837 
838 static int ufs_update_inode(struct inode * inode, int do_sync)
839 {
840 	struct super_block *sb = inode->i_sb;
841 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
842 	struct buffer_head * bh;
843 
844 	UFSD("ENTER, ino %lu\n", inode->i_ino);
845 
846 	if (inode->i_ino < UFS_ROOTINO ||
847 	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
848 		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
849 		return -1;
850 	}
851 
852 	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
853 	if (!bh) {
854 		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
855 		return -1;
856 	}
857 	if (uspi->fs_magic == UFS2_MAGIC) {
858 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
859 
860 		ufs2_update_inode(inode,
861 				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
862 	} else {
863 		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
864 
865 		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
866 	}
867 
868 	mark_buffer_dirty(bh);
869 	if (do_sync)
870 		sync_dirty_buffer(bh);
871 	brelse (bh);
872 
873 	UFSD("EXIT\n");
874 	return 0;
875 }
876 
877 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
878 {
879 	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
880 }
881 
882 int ufs_sync_inode (struct inode *inode)
883 {
884 	return ufs_update_inode (inode, 1);
885 }
886 
887 void ufs_evict_inode(struct inode * inode)
888 {
889 	int want_delete = 0;
890 
891 	if (!inode->i_nlink && !is_bad_inode(inode))
892 		want_delete = 1;
893 
894 	truncate_inode_pages_final(&inode->i_data);
895 	if (want_delete) {
896 		inode->i_size = 0;
897 		if (inode->i_blocks)
898 			ufs_truncate_blocks(inode);
899 	}
900 
901 	invalidate_inode_buffers(inode);
902 	clear_inode(inode);
903 
904 	if (want_delete)
905 		ufs_free_inode(inode);
906 }
907 
908 struct to_free {
909 	struct inode *inode;
910 	u64 to;
911 	unsigned count;
912 };
913 
914 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
915 {
916 	if (ctx->count && ctx->to != from) {
917 		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
918 		ctx->count = 0;
919 	}
920 	ctx->count += count;
921 	ctx->to = from + count;
922 }
923 
924 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
925 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
926 
927 static void ufs_trunc_direct(struct inode *inode)
928 {
929 	struct ufs_inode_info *ufsi = UFS_I(inode);
930 	struct super_block * sb;
931 	struct ufs_sb_private_info * uspi;
932 	void *p;
933 	u64 frag1, frag2, frag3, frag4, block1, block2;
934 	struct to_free ctx = {.inode = inode};
935 	unsigned i, tmp;
936 
937 	UFSD("ENTER: ino %lu\n", inode->i_ino);
938 
939 	sb = inode->i_sb;
940 	uspi = UFS_SB(sb)->s_uspi;
941 
942 	frag1 = DIRECT_FRAGMENT;
943 	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
944 	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
945 	frag3 = frag4 & ~uspi->s_fpbmask;
946 	block1 = block2 = 0;
947 	if (frag2 > frag3) {
948 		frag2 = frag4;
949 		frag3 = frag4 = 0;
950 	} else if (frag2 < frag3) {
951 		block1 = ufs_fragstoblks (frag2);
952 		block2 = ufs_fragstoblks (frag3);
953 	}
954 
955 	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
956 	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
957 	     (unsigned long long)frag1, (unsigned long long)frag2,
958 	     (unsigned long long)block1, (unsigned long long)block2,
959 	     (unsigned long long)frag3, (unsigned long long)frag4);
960 
961 	if (frag1 >= frag2)
962 		goto next1;
963 
964 	/*
965 	 * Free first free fragments
966 	 */
967 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
968 	tmp = ufs_data_ptr_to_cpu(sb, p);
969 	if (!tmp )
970 		ufs_panic (sb, "ufs_trunc_direct", "internal error");
971 	frag2 -= frag1;
972 	frag1 = ufs_fragnum (frag1);
973 
974 	ufs_free_fragments(inode, tmp + frag1, frag2);
975 
976 next1:
977 	/*
978 	 * Free whole blocks
979 	 */
980 	for (i = block1 ; i < block2; i++) {
981 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
982 		tmp = ufs_data_ptr_to_cpu(sb, p);
983 		if (!tmp)
984 			continue;
985 		write_seqlock(&ufsi->meta_lock);
986 		ufs_data_ptr_clear(uspi, p);
987 		write_sequnlock(&ufsi->meta_lock);
988 
989 		free_data(&ctx, tmp, uspi->s_fpb);
990 	}
991 
992 	free_data(&ctx, 0, 0);
993 
994 	if (frag3 >= frag4)
995 		goto next3;
996 
997 	/*
998 	 * Free last free fragments
999 	 */
1000 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
1001 	tmp = ufs_data_ptr_to_cpu(sb, p);
1002 	if (!tmp )
1003 		ufs_panic(sb, "ufs_truncate_direct", "internal error");
1004 	frag4 = ufs_fragnum (frag4);
1005 	write_seqlock(&ufsi->meta_lock);
1006 	ufs_data_ptr_clear(uspi, p);
1007 	write_sequnlock(&ufsi->meta_lock);
1008 
1009 	ufs_free_fragments (inode, tmp, frag4);
1010  next3:
1011 
1012 	UFSD("EXIT: ino %lu\n", inode->i_ino);
1013 }
1014 
1015 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
1016 {
1017 	struct super_block *sb = inode->i_sb;
1018 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1019 	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
1020 	unsigned i;
1021 
1022 	if (!ubh)
1023 		return;
1024 
1025 	if (--depth) {
1026 		for (i = 0; i < uspi->s_apb; i++) {
1027 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1028 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1029 			if (block)
1030 				free_full_branch(inode, block, depth);
1031 		}
1032 	} else {
1033 		struct to_free ctx = {.inode = inode};
1034 
1035 		for (i = 0; i < uspi->s_apb; i++) {
1036 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1037 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1038 			if (block)
1039 				free_data(&ctx, block, uspi->s_fpb);
1040 		}
1041 		free_data(&ctx, 0, 0);
1042 	}
1043 
1044 	ubh_bforget(ubh);
1045 	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1046 }
1047 
1048 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1049 {
1050 	struct super_block *sb = inode->i_sb;
1051 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1052 	unsigned i;
1053 
1054 	if (--depth) {
1055 		for (i = from; i < uspi->s_apb ; i++) {
1056 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1057 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1058 			if (block) {
1059 				write_seqlock(&UFS_I(inode)->meta_lock);
1060 				ufs_data_ptr_clear(uspi, p);
1061 				write_sequnlock(&UFS_I(inode)->meta_lock);
1062 				ubh_mark_buffer_dirty(ubh);
1063 				free_full_branch(inode, block, depth);
1064 			}
1065 		}
1066 	} else {
1067 		struct to_free ctx = {.inode = inode};
1068 
1069 		for (i = from; i < uspi->s_apb; i++) {
1070 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1071 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1072 			if (block) {
1073 				write_seqlock(&UFS_I(inode)->meta_lock);
1074 				ufs_data_ptr_clear(uspi, p);
1075 				write_sequnlock(&UFS_I(inode)->meta_lock);
1076 				ubh_mark_buffer_dirty(ubh);
1077 				free_data(&ctx, block, uspi->s_fpb);
1078 			}
1079 		}
1080 		free_data(&ctx, 0, 0);
1081 	}
1082 	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1083 		ubh_sync_block(ubh);
1084 	ubh_brelse(ubh);
1085 }
1086 
1087 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1088 {
1089 	int err = 0;
1090 	struct super_block *sb = inode->i_sb;
1091 	struct address_space *mapping = inode->i_mapping;
1092 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1093 	unsigned i, end;
1094 	sector_t lastfrag;
1095 	struct page *lastpage;
1096 	struct buffer_head *bh;
1097 	u64 phys64;
1098 
1099 	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1100 
1101 	if (!lastfrag)
1102 		goto out;
1103 
1104 	lastfrag--;
1105 
1106 	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1107 				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
1108        if (IS_ERR(lastpage)) {
1109                err = -EIO;
1110                goto out;
1111        }
1112 
1113        end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
1114        bh = page_buffers(lastpage);
1115        for (i = 0; i < end; ++i)
1116                bh = bh->b_this_page;
1117 
1118 
1119        err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1120 
1121        if (unlikely(err))
1122 	       goto out_unlock;
1123 
1124        if (buffer_new(bh)) {
1125 	       clear_buffer_new(bh);
1126 	       unmap_underlying_metadata(bh->b_bdev,
1127 					 bh->b_blocknr);
1128 	       /*
1129 		* we do not zeroize fragment, because of
1130 		* if it maped to hole, it already contains zeroes
1131 		*/
1132 	       set_buffer_uptodate(bh);
1133 	       mark_buffer_dirty(bh);
1134 	       set_page_dirty(lastpage);
1135        }
1136 
1137        if (lastfrag >= UFS_IND_FRAGMENT) {
1138 	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1139 	       phys64 = bh->b_blocknr + 1;
1140 	       for (i = 0; i < end; ++i) {
1141 		       bh = sb_getblk(sb, i + phys64);
1142 		       lock_buffer(bh);
1143 		       memset(bh->b_data, 0, sb->s_blocksize);
1144 		       set_buffer_uptodate(bh);
1145 		       mark_buffer_dirty(bh);
1146 		       unlock_buffer(bh);
1147 		       sync_dirty_buffer(bh);
1148 		       brelse(bh);
1149 	       }
1150        }
1151 out_unlock:
1152        ufs_put_locked_page(lastpage);
1153 out:
1154        return err;
1155 }
1156 
1157 static void __ufs_truncate_blocks(struct inode *inode)
1158 {
1159 	struct ufs_inode_info *ufsi = UFS_I(inode);
1160 	struct super_block *sb = inode->i_sb;
1161 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1162 	unsigned offsets[4];
1163 	int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
1164 	int depth2;
1165 	unsigned i;
1166 	struct ufs_buffer_head *ubh[3];
1167 	void *p;
1168 	u64 block;
1169 
1170 	if (!depth)
1171 		return;
1172 
1173 	/* find the last non-zero in offsets[] */
1174 	for (depth2 = depth - 1; depth2; depth2--)
1175 		if (offsets[depth2])
1176 			break;
1177 
1178 	mutex_lock(&ufsi->truncate_mutex);
1179 	if (depth == 1) {
1180 		ufs_trunc_direct(inode);
1181 		offsets[0] = UFS_IND_BLOCK;
1182 	} else {
1183 		/* get the blocks that should be partially emptied */
1184 		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
1185 		for (i = 0; i < depth2; i++) {
1186 			offsets[i]++;	/* next branch is fully freed */
1187 			block = ufs_data_ptr_to_cpu(sb, p);
1188 			if (!block)
1189 				break;
1190 			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1191 			if (!ubh[i]) {
1192 				write_seqlock(&ufsi->meta_lock);
1193 				ufs_data_ptr_clear(uspi, p);
1194 				write_sequnlock(&ufsi->meta_lock);
1195 				break;
1196 			}
1197 			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
1198 		}
1199 		while (i--)
1200 			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1201 	}
1202 	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1203 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1204 		block = ufs_data_ptr_to_cpu(sb, p);
1205 		if (block) {
1206 			write_seqlock(&ufsi->meta_lock);
1207 			ufs_data_ptr_clear(uspi, p);
1208 			write_sequnlock(&ufsi->meta_lock);
1209 			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1210 		}
1211 	}
1212 	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1213 	mark_inode_dirty(inode);
1214 	mutex_unlock(&ufsi->truncate_mutex);
1215 }
1216 
1217 static int ufs_truncate(struct inode *inode, loff_t size)
1218 {
1219 	int err = 0;
1220 
1221 	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1222 	     inode->i_ino, (unsigned long long)size,
1223 	     (unsigned long long)i_size_read(inode));
1224 
1225 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1226 	      S_ISLNK(inode->i_mode)))
1227 		return -EINVAL;
1228 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1229 		return -EPERM;
1230 
1231 	err = ufs_alloc_lastblock(inode, size);
1232 
1233 	if (err)
1234 		goto out;
1235 
1236 	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1237 
1238 	truncate_setsize(inode, size);
1239 
1240 	__ufs_truncate_blocks(inode);
1241 	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1242 	mark_inode_dirty(inode);
1243 out:
1244 	UFSD("EXIT: err %d\n", err);
1245 	return err;
1246 }
1247 
1248 void ufs_truncate_blocks(struct inode *inode)
1249 {
1250 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1251 	      S_ISLNK(inode->i_mode)))
1252 		return;
1253 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1254 		return;
1255 	__ufs_truncate_blocks(inode);
1256 }
1257 
1258 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1259 {
1260 	struct inode *inode = d_inode(dentry);
1261 	unsigned int ia_valid = attr->ia_valid;
1262 	int error;
1263 
1264 	error = inode_change_ok(inode, attr);
1265 	if (error)
1266 		return error;
1267 
1268 	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1269 		error = ufs_truncate(inode, attr->ia_size);
1270 		if (error)
1271 			return error;
1272 	}
1273 
1274 	setattr_copy(inode, attr);
1275 	mark_inode_dirty(inode);
1276 	return 0;
1277 }
1278 
1279 const struct inode_operations ufs_file_inode_operations = {
1280 	.setattr = ufs_setattr,
1281 };
1282