xref: /openbmc/linux/fs/ufs/inode.c (revision 5336970b)
1 /*
2  *  linux/fs/ufs/inode.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  *  from
9  *
10  *  linux/fs/ext2/inode.c
11  *
12  * Copyright (C) 1992, 1993, 1994, 1995
13  * Remy Card (card@masi.ibp.fr)
14  * Laboratoire MASI - Institut Blaise Pascal
15  * Universite Pierre et Marie Curie (Paris VI)
16  *
17  *  from
18  *
19  *  linux/fs/minix/inode.c
20  *
21  *  Copyright (C) 1991, 1992  Linus Torvalds
22  *
23  *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24  *  Big-endian to little-endian byte-swapping/bitmaps by
25  *        David S. Miller (davem@caip.rutgers.edu), 1995
26  */
27 
28 #include <asm/uaccess.h>
29 
30 #include <linux/errno.h>
31 #include <linux/fs.h>
32 #include <linux/time.h>
33 #include <linux/stat.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 
39 #include "ufs_fs.h"
40 #include "ufs.h"
41 #include "swab.h"
42 #include "util.h"
43 
44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
45 {
46 	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
47 	int ptrs = uspi->s_apb;
48 	int ptrs_bits = uspi->s_apbshift;
49 	const long direct_blocks = UFS_NDADDR,
50 		indirect_blocks = ptrs,
51 		double_blocks = (1 << (ptrs_bits * 2));
52 	int n = 0;
53 
54 
55 	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
56 	if (i_block < direct_blocks) {
57 		offsets[n++] = i_block;
58 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
59 		offsets[n++] = UFS_IND_BLOCK;
60 		offsets[n++] = i_block;
61 	} else if ((i_block -= indirect_blocks) < double_blocks) {
62 		offsets[n++] = UFS_DIND_BLOCK;
63 		offsets[n++] = i_block >> ptrs_bits;
64 		offsets[n++] = i_block & (ptrs - 1);
65 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
66 		offsets[n++] = UFS_TIND_BLOCK;
67 		offsets[n++] = i_block >> (ptrs_bits * 2);
68 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
69 		offsets[n++] = i_block & (ptrs - 1);
70 	} else {
71 		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
72 	}
73 	return n;
74 }
75 
76 typedef struct {
77 	void	*p;
78 	union {
79 		__fs32	key32;
80 		__fs64	key64;
81 	};
82 	struct buffer_head *bh;
83 } Indirect;
84 
85 static inline int grow_chain32(struct ufs_inode_info *ufsi,
86 			       struct buffer_head *bh, __fs32 *v,
87 			       Indirect *from, Indirect *to)
88 {
89 	Indirect *p;
90 	unsigned seq;
91 	to->bh = bh;
92 	do {
93 		seq = read_seqbegin(&ufsi->meta_lock);
94 		to->key32 = *(__fs32 *)(to->p = v);
95 		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
96 			;
97 	} while (read_seqretry(&ufsi->meta_lock, seq));
98 	return (p > to);
99 }
100 
101 static inline int grow_chain64(struct ufs_inode_info *ufsi,
102 			       struct buffer_head *bh, __fs64 *v,
103 			       Indirect *from, Indirect *to)
104 {
105 	Indirect *p;
106 	unsigned seq;
107 	to->bh = bh;
108 	do {
109 		seq = read_seqbegin(&ufsi->meta_lock);
110 		to->key64 = *(__fs64 *)(to->p = v);
111 		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
112 			;
113 	} while (read_seqretry(&ufsi->meta_lock, seq));
114 	return (p > to);
115 }
116 
117 /*
118  * Returns the location of the fragment from
119  * the beginning of the filesystem.
120  */
121 
122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
123 {
124 	struct ufs_inode_info *ufsi = UFS_I(inode);
125 	struct super_block *sb = inode->i_sb;
126 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
127 	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
128 	int shift = uspi->s_apbshift-uspi->s_fpbshift;
129 	Indirect chain[4], *q = chain;
130 	unsigned *p;
131 	unsigned flags = UFS_SB(sb)->s_flags;
132 	u64 res = 0;
133 
134 	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
135 		uspi->s_fpbshift, uspi->s_apbmask,
136 		(unsigned long long)mask);
137 
138 	if (depth == 0)
139 		goto no_block;
140 
141 again:
142 	p = offsets;
143 
144 	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
145 		goto ufs2;
146 
147 	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
148 		goto changed;
149 	if (!q->key32)
150 		goto no_block;
151 	while (--depth) {
152 		__fs32 *ptr;
153 		struct buffer_head *bh;
154 		unsigned n = *p++;
155 
156 		bh = sb_bread(sb, uspi->s_sbbase +
157 				  fs32_to_cpu(sb, q->key32) + (n>>shift));
158 		if (!bh)
159 			goto no_block;
160 		ptr = (__fs32 *)bh->b_data + (n & mask);
161 		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
162 			goto changed;
163 		if (!q->key32)
164 			goto no_block;
165 	}
166 	res = fs32_to_cpu(sb, q->key32);
167 	goto found;
168 
169 ufs2:
170 	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
171 		goto changed;
172 	if (!q->key64)
173 		goto no_block;
174 
175 	while (--depth) {
176 		__fs64 *ptr;
177 		struct buffer_head *bh;
178 		unsigned n = *p++;
179 
180 		bh = sb_bread(sb, uspi->s_sbbase +
181 				  fs64_to_cpu(sb, q->key64) + (n>>shift));
182 		if (!bh)
183 			goto no_block;
184 		ptr = (__fs64 *)bh->b_data + (n & mask);
185 		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
186 			goto changed;
187 		if (!q->key64)
188 			goto no_block;
189 	}
190 	res = fs64_to_cpu(sb, q->key64);
191 found:
192 	res += uspi->s_sbbase;
193 no_block:
194 	while (q > chain) {
195 		brelse(q->bh);
196 		q--;
197 	}
198 	return res;
199 
200 changed:
201 	while (q > chain) {
202 		brelse(q->bh);
203 		q--;
204 	}
205 	goto again;
206 }
207 
208 /*
209  * Unpacking tails: we have a file with partial final block and
210  * we had been asked to extend it.  If the fragment being written
211  * is within the same block, we need to extend the tail just to cover
212  * that fragment.  Otherwise the tail is extended to full block.
213  *
214  * Note that we might need to create a _new_ tail, but that will
215  * be handled elsewhere; this is strictly for resizing old
216  * ones.
217  */
218 static bool
219 ufs_extend_tail(struct inode *inode, u64 writes_to,
220 		  int *err, struct page *locked_page)
221 {
222 	struct ufs_inode_info *ufsi = UFS_I(inode);
223 	struct super_block *sb = inode->i_sb;
224 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
225 	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
226 	unsigned block = ufs_fragstoblks(lastfrag);
227 	unsigned new_size;
228 	void *p;
229 	u64 tmp;
230 
231 	if (writes_to < (lastfrag | uspi->s_fpbmask))
232 		new_size = (writes_to & uspi->s_fpbmask) + 1;
233 	else
234 		new_size = uspi->s_fpb;
235 
236 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
237 	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
238 				new_size, err, locked_page);
239 	return tmp != 0;
240 }
241 
242 /**
243  * ufs_inode_getfrag() - allocate new fragment(s)
244  * @inode: pointer to inode
245  * @index: number of block pointer within the inode's array.
246  * @new_fragment: number of new allocated fragment(s)
247  * @err: we set it if something wrong
248  * @phys: pointer to where we save physical number of new allocated fragments,
249  *   NULL if we allocate not data(indirect blocks for example).
250  * @new: we set it if we allocate new block
251  * @locked_page: for ufs_new_fragments()
252  */
253 static u64
254 ufs_inode_getfrag(struct inode *inode, unsigned index,
255 		  sector_t new_fragment, int *err,
256 		  long *phys, int *new, struct page *locked_page)
257 {
258 	struct ufs_inode_info *ufsi = UFS_I(inode);
259 	struct super_block *sb = inode->i_sb;
260 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
261 	u64 tmp, goal, lastfrag;
262 	unsigned nfrags = uspi->s_fpb;
263 	void *p;
264 
265         /* TODO : to be done for write support
266         if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
267              goto ufs2;
268          */
269 
270 	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
271 	tmp = ufs_data_ptr_to_cpu(sb, p);
272 	if (tmp)
273 		goto out;
274 
275 	lastfrag = ufsi->i_lastfrag;
276 
277 	/* will that be a new tail? */
278 	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
279 		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
280 
281 	goal = 0;
282 	if (index) {
283 		goal = ufs_data_ptr_to_cpu(sb,
284 				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
285 		if (goal)
286 			goal += uspi->s_fpb;
287 	}
288 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
289 				goal, uspi->s_fpb, err,
290 				phys != NULL ? locked_page : NULL);
291 
292 	if (!tmp) {
293 		*err = -ENOSPC;
294 		return 0;
295 	}
296 
297 	if (phys) {
298 		*err = 0;
299 		*new = 1;
300 	}
301 	inode->i_ctime = CURRENT_TIME_SEC;
302 	if (IS_SYNC(inode))
303 		ufs_sync_inode (inode);
304 	mark_inode_dirty(inode);
305 out:
306 	return tmp + uspi->s_sbbase;
307 
308      /* This part : To be implemented ....
309         Required only for writing, not required for READ-ONLY.
310 ufs2:
311 
312 	u2_block = ufs_fragstoblks(fragment);
313 	u2_blockoff = ufs_fragnum(fragment);
314 	p = ufsi->i_u1.u2_i_data + block;
315 	goal = 0;
316 
317 repeat2:
318 	tmp = fs32_to_cpu(sb, *p);
319 	lastfrag = ufsi->i_lastfrag;
320 
321      */
322 }
323 
324 /**
325  * ufs_inode_getblock() - allocate new block
326  * @inode: pointer to inode
327  * @ind_block: block number of the indirect block
328  * @index: number of pointer within the indirect block
329  * @new_fragment: number of new allocated fragment
330  *  (block will hold this fragment and also uspi->s_fpb-1)
331  * @err: see ufs_inode_getfrag()
332  * @phys: see ufs_inode_getfrag()
333  * @new: see ufs_inode_getfrag()
334  * @locked_page: see ufs_inode_getfrag()
335  */
336 static u64
337 ufs_inode_getblock(struct inode *inode, u64 ind_block,
338 		  unsigned index, sector_t new_fragment, int *err,
339 		  long *phys, int *new, struct page *locked_page)
340 {
341 	struct super_block *sb = inode->i_sb;
342 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
343 	int shift = uspi->s_apbshift - uspi->s_fpbshift;
344 	u64 tmp = 0, goal;
345 	struct buffer_head *bh;
346 	void *p;
347 
348 	if (!ind_block)
349 		return 0;
350 
351 	bh = sb_bread(sb, ind_block + (index >> shift));
352 	if (unlikely(!bh))
353 		return 0;
354 
355 	index &= uspi->s_apbmask >> uspi->s_fpbshift;
356 	if (uspi->fs_magic == UFS2_MAGIC)
357 		p = (__fs64 *)bh->b_data + index;
358 	else
359 		p = (__fs32 *)bh->b_data + index;
360 
361 	tmp = ufs_data_ptr_to_cpu(sb, p);
362 	if (tmp)
363 		goto out;
364 
365 	if (index && (uspi->fs_magic == UFS2_MAGIC ?
366 		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
367 		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
368 		goal = tmp + uspi->s_fpb;
369 	else
370 		goal = bh->b_blocknr + uspi->s_fpb;
371 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
372 				uspi->s_fpb, err, locked_page);
373 	if (!tmp)
374 		goto out;
375 
376 	if (new)
377 		*new = 1;
378 
379 	mark_buffer_dirty(bh);
380 	if (IS_SYNC(inode))
381 		sync_dirty_buffer(bh);
382 	inode->i_ctime = CURRENT_TIME_SEC;
383 	mark_inode_dirty(inode);
384 out:
385 	brelse (bh);
386 	UFSD("EXIT\n");
387 	if (tmp)
388 		tmp += uspi->s_sbbase;
389 	return tmp;
390 }
391 
392 /**
393  * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
394  * readpage, writepage and so on
395  */
396 
397 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
398 {
399 	struct super_block * sb = inode->i_sb;
400 	struct ufs_sb_info * sbi = UFS_SB(sb);
401 	struct ufs_sb_private_info * uspi = sbi->s_uspi;
402 	struct buffer_head * bh;
403 	int ret, err, new;
404 	unsigned offsets[4];
405 	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
406 	unsigned long phys;
407 	u64 phys64 = 0;
408 	unsigned frag = fragment & uspi->s_fpbmask;
409 
410 	if (!create) {
411 		phys64 = ufs_frag_map(inode, offsets, depth);
412 		if (phys64) {
413 			phys64 += frag;
414 			map_bh(bh_result, sb, phys64);
415 		}
416 		return 0;
417 	}
418 
419         /* This code entered only while writing ....? */
420 
421 	err = -EIO;
422 	new = 0;
423 	ret = 0;
424 	bh = NULL;
425 
426 	mutex_lock(&UFS_I(inode)->truncate_mutex);
427 
428 	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
429 	if (!depth)
430 		goto abort_too_big;
431 
432 	err = 0;
433 
434 	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
435 		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
436 		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
437 		if (tailfrags && fragment >= lastfrag) {
438 			if (!ufs_extend_tail(inode, fragment,
439 					     &err, bh_result->b_page))
440 				goto abort;
441 		}
442 	}
443 
444 	if (depth == 1) {
445 		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
446 					   &err, &phys, &new, bh_result->b_page);
447 		if (phys64) {
448 			phys64 += frag;
449 			phys = phys64;
450 		}
451 		goto out;
452 	}
453 	if (depth == 2) {
454 		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
455 					   &err, NULL, NULL, bh_result->b_page);
456 		goto get_indirect;
457 	}
458 	if (depth == 3) {
459 		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
460 					   &err, NULL, NULL, bh_result->b_page);
461 		goto get_double;
462 	}
463 	phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
464 				   &err, NULL, NULL, bh_result->b_page);
465 	phys64 = ufs_inode_getblock(inode, phys64, offsets[1],
466 				fragment, &err, NULL, NULL, NULL);
467 get_double:
468 	phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 2],
469 				fragment, &err, NULL, NULL, NULL);
470 get_indirect:
471 	phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
472 				fragment, &err, &phys, &new, bh_result->b_page);
473 	if (phys64) {
474 		phys64 += frag;
475 		phys = phys64;
476 	}
477 out:
478 	if (err)
479 		goto abort;
480 	if (new)
481 		set_buffer_new(bh_result);
482 	map_bh(bh_result, sb, phys);
483 abort:
484 	mutex_unlock(&UFS_I(inode)->truncate_mutex);
485 
486 	return err;
487 
488 abort_too_big:
489 	ufs_warning(sb, "ufs_get_block", "block > big");
490 	goto abort;
491 }
492 
493 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
494 {
495 	return block_write_full_page(page,ufs_getfrag_block,wbc);
496 }
497 
498 static int ufs_readpage(struct file *file, struct page *page)
499 {
500 	return block_read_full_page(page,ufs_getfrag_block);
501 }
502 
503 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
504 {
505 	return __block_write_begin(page, pos, len, ufs_getfrag_block);
506 }
507 
508 static void ufs_truncate_blocks(struct inode *);
509 
510 static void ufs_write_failed(struct address_space *mapping, loff_t to)
511 {
512 	struct inode *inode = mapping->host;
513 
514 	if (to > inode->i_size) {
515 		truncate_pagecache(inode, inode->i_size);
516 		ufs_truncate_blocks(inode);
517 	}
518 }
519 
520 static int ufs_write_begin(struct file *file, struct address_space *mapping,
521 			loff_t pos, unsigned len, unsigned flags,
522 			struct page **pagep, void **fsdata)
523 {
524 	int ret;
525 
526 	ret = block_write_begin(mapping, pos, len, flags, pagep,
527 				ufs_getfrag_block);
528 	if (unlikely(ret))
529 		ufs_write_failed(mapping, pos + len);
530 
531 	return ret;
532 }
533 
534 static int ufs_write_end(struct file *file, struct address_space *mapping,
535 			loff_t pos, unsigned len, unsigned copied,
536 			struct page *page, void *fsdata)
537 {
538 	int ret;
539 
540 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
541 	if (ret < len)
542 		ufs_write_failed(mapping, pos + len);
543 	return ret;
544 }
545 
546 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
547 {
548 	return generic_block_bmap(mapping,block,ufs_getfrag_block);
549 }
550 
551 const struct address_space_operations ufs_aops = {
552 	.readpage = ufs_readpage,
553 	.writepage = ufs_writepage,
554 	.write_begin = ufs_write_begin,
555 	.write_end = ufs_write_end,
556 	.bmap = ufs_bmap
557 };
558 
559 static void ufs_set_inode_ops(struct inode *inode)
560 {
561 	if (S_ISREG(inode->i_mode)) {
562 		inode->i_op = &ufs_file_inode_operations;
563 		inode->i_fop = &ufs_file_operations;
564 		inode->i_mapping->a_ops = &ufs_aops;
565 	} else if (S_ISDIR(inode->i_mode)) {
566 		inode->i_op = &ufs_dir_inode_operations;
567 		inode->i_fop = &ufs_dir_operations;
568 		inode->i_mapping->a_ops = &ufs_aops;
569 	} else if (S_ISLNK(inode->i_mode)) {
570 		if (!inode->i_blocks) {
571 			inode->i_op = &ufs_fast_symlink_inode_operations;
572 			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
573 		} else {
574 			inode->i_op = &ufs_symlink_inode_operations;
575 			inode->i_mapping->a_ops = &ufs_aops;
576 		}
577 	} else
578 		init_special_inode(inode, inode->i_mode,
579 				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
580 }
581 
582 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
583 {
584 	struct ufs_inode_info *ufsi = UFS_I(inode);
585 	struct super_block *sb = inode->i_sb;
586 	umode_t mode;
587 
588 	/*
589 	 * Copy data to the in-core inode.
590 	 */
591 	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
592 	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
593 	if (inode->i_nlink == 0) {
594 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
595 		return -1;
596 	}
597 
598 	/*
599 	 * Linux now has 32-bit uid and gid, so we can support EFT.
600 	 */
601 	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
602 	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
603 
604 	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
605 	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
606 	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
607 	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
608 	inode->i_mtime.tv_nsec = 0;
609 	inode->i_atime.tv_nsec = 0;
610 	inode->i_ctime.tv_nsec = 0;
611 	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
612 	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
613 	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
614 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
615 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
616 
617 
618 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
619 		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
620 		       sizeof(ufs_inode->ui_u2.ui_addr));
621 	} else {
622 		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
623 		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
624 		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
625 	}
626 	return 0;
627 }
628 
629 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
630 {
631 	struct ufs_inode_info *ufsi = UFS_I(inode);
632 	struct super_block *sb = inode->i_sb;
633 	umode_t mode;
634 
635 	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
636 	/*
637 	 * Copy data to the in-core inode.
638 	 */
639 	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
640 	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
641 	if (inode->i_nlink == 0) {
642 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
643 		return -1;
644 	}
645 
646         /*
647          * Linux now has 32-bit uid and gid, so we can support EFT.
648          */
649 	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
650 	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
651 
652 	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
653 	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
654 	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
655 	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
656 	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
657 	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
658 	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
659 	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
660 	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
661 	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
662 	/*
663 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
664 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
665 	*/
666 
667 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
668 		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
669 		       sizeof(ufs2_inode->ui_u2.ui_addr));
670 	} else {
671 		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
672 		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
673 		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
674 	}
675 	return 0;
676 }
677 
678 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
679 {
680 	struct ufs_inode_info *ufsi;
681 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
682 	struct buffer_head * bh;
683 	struct inode *inode;
684 	int err;
685 
686 	UFSD("ENTER, ino %lu\n", ino);
687 
688 	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
689 		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
690 			    ino);
691 		return ERR_PTR(-EIO);
692 	}
693 
694 	inode = iget_locked(sb, ino);
695 	if (!inode)
696 		return ERR_PTR(-ENOMEM);
697 	if (!(inode->i_state & I_NEW))
698 		return inode;
699 
700 	ufsi = UFS_I(inode);
701 
702 	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
703 	if (!bh) {
704 		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
705 			    inode->i_ino);
706 		goto bad_inode;
707 	}
708 	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
709 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
710 
711 		err = ufs2_read_inode(inode,
712 				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
713 	} else {
714 		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
715 
716 		err = ufs1_read_inode(inode,
717 				      ufs_inode + ufs_inotofsbo(inode->i_ino));
718 	}
719 
720 	if (err)
721 		goto bad_inode;
722 	inode->i_version++;
723 	ufsi->i_lastfrag =
724 		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
725 	ufsi->i_dir_start_lookup = 0;
726 	ufsi->i_osync = 0;
727 
728 	ufs_set_inode_ops(inode);
729 
730 	brelse(bh);
731 
732 	UFSD("EXIT\n");
733 	unlock_new_inode(inode);
734 	return inode;
735 
736 bad_inode:
737 	iget_failed(inode);
738 	return ERR_PTR(-EIO);
739 }
740 
741 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
742 {
743 	struct super_block *sb = inode->i_sb;
744  	struct ufs_inode_info *ufsi = UFS_I(inode);
745 
746 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
747 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
748 
749 	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
750 	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
751 
752 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
753 	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
754 	ufs_inode->ui_atime.tv_usec = 0;
755 	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
756 	ufs_inode->ui_ctime.tv_usec = 0;
757 	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
758 	ufs_inode->ui_mtime.tv_usec = 0;
759 	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
760 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
761 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
762 
763 	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
764 		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
765 		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
766 	}
767 
768 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
769 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
770 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
771 	} else if (inode->i_blocks) {
772 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
773 		       sizeof(ufs_inode->ui_u2.ui_addr));
774 	}
775 	else {
776 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
777 		       sizeof(ufs_inode->ui_u2.ui_symlink));
778 	}
779 
780 	if (!inode->i_nlink)
781 		memset (ufs_inode, 0, sizeof(struct ufs_inode));
782 }
783 
784 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
785 {
786 	struct super_block *sb = inode->i_sb;
787  	struct ufs_inode_info *ufsi = UFS_I(inode);
788 
789 	UFSD("ENTER\n");
790 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
791 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
792 
793 	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
794 	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
795 
796 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
797 	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
798 	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
799 	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
800 	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
801 	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
802 	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
803 
804 	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
805 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
806 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
807 
808 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
809 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
810 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
811 	} else if (inode->i_blocks) {
812 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
813 		       sizeof(ufs_inode->ui_u2.ui_addr));
814 	} else {
815 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
816 		       sizeof(ufs_inode->ui_u2.ui_symlink));
817  	}
818 
819 	if (!inode->i_nlink)
820 		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
821 	UFSD("EXIT\n");
822 }
823 
824 static int ufs_update_inode(struct inode * inode, int do_sync)
825 {
826 	struct super_block *sb = inode->i_sb;
827 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
828 	struct buffer_head * bh;
829 
830 	UFSD("ENTER, ino %lu\n", inode->i_ino);
831 
832 	if (inode->i_ino < UFS_ROOTINO ||
833 	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
834 		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
835 		return -1;
836 	}
837 
838 	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
839 	if (!bh) {
840 		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
841 		return -1;
842 	}
843 	if (uspi->fs_magic == UFS2_MAGIC) {
844 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
845 
846 		ufs2_update_inode(inode,
847 				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
848 	} else {
849 		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
850 
851 		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
852 	}
853 
854 	mark_buffer_dirty(bh);
855 	if (do_sync)
856 		sync_dirty_buffer(bh);
857 	brelse (bh);
858 
859 	UFSD("EXIT\n");
860 	return 0;
861 }
862 
863 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
864 {
865 	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
866 }
867 
868 int ufs_sync_inode (struct inode *inode)
869 {
870 	return ufs_update_inode (inode, 1);
871 }
872 
873 void ufs_evict_inode(struct inode * inode)
874 {
875 	int want_delete = 0;
876 
877 	if (!inode->i_nlink && !is_bad_inode(inode))
878 		want_delete = 1;
879 
880 	truncate_inode_pages_final(&inode->i_data);
881 	if (want_delete) {
882 		inode->i_size = 0;
883 		if (inode->i_blocks)
884 			ufs_truncate_blocks(inode);
885 	}
886 
887 	invalidate_inode_buffers(inode);
888 	clear_inode(inode);
889 
890 	if (want_delete)
891 		ufs_free_inode(inode);
892 }
893 
894 struct to_free {
895 	struct inode *inode;
896 	u64 to;
897 	unsigned count;
898 };
899 
900 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
901 {
902 	if (ctx->count && ctx->to != from) {
903 		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
904 		ctx->count = 0;
905 	}
906 	ctx->count += count;
907 	ctx->to = from + count;
908 }
909 
910 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
911 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
912 
913 static void ufs_trunc_direct(struct inode *inode)
914 {
915 	struct ufs_inode_info *ufsi = UFS_I(inode);
916 	struct super_block * sb;
917 	struct ufs_sb_private_info * uspi;
918 	void *p;
919 	u64 frag1, frag2, frag3, frag4, block1, block2;
920 	struct to_free ctx = {.inode = inode};
921 	unsigned i, tmp;
922 
923 	UFSD("ENTER: ino %lu\n", inode->i_ino);
924 
925 	sb = inode->i_sb;
926 	uspi = UFS_SB(sb)->s_uspi;
927 
928 	frag1 = DIRECT_FRAGMENT;
929 	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
930 	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
931 	frag3 = frag4 & ~uspi->s_fpbmask;
932 	block1 = block2 = 0;
933 	if (frag2 > frag3) {
934 		frag2 = frag4;
935 		frag3 = frag4 = 0;
936 	} else if (frag2 < frag3) {
937 		block1 = ufs_fragstoblks (frag2);
938 		block2 = ufs_fragstoblks (frag3);
939 	}
940 
941 	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
942 	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
943 	     (unsigned long long)frag1, (unsigned long long)frag2,
944 	     (unsigned long long)block1, (unsigned long long)block2,
945 	     (unsigned long long)frag3, (unsigned long long)frag4);
946 
947 	if (frag1 >= frag2)
948 		goto next1;
949 
950 	/*
951 	 * Free first free fragments
952 	 */
953 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
954 	tmp = ufs_data_ptr_to_cpu(sb, p);
955 	if (!tmp )
956 		ufs_panic (sb, "ufs_trunc_direct", "internal error");
957 	frag2 -= frag1;
958 	frag1 = ufs_fragnum (frag1);
959 
960 	ufs_free_fragments(inode, tmp + frag1, frag2);
961 
962 next1:
963 	/*
964 	 * Free whole blocks
965 	 */
966 	for (i = block1 ; i < block2; i++) {
967 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
968 		tmp = ufs_data_ptr_to_cpu(sb, p);
969 		if (!tmp)
970 			continue;
971 		write_seqlock(&ufsi->meta_lock);
972 		ufs_data_ptr_clear(uspi, p);
973 		write_sequnlock(&ufsi->meta_lock);
974 
975 		free_data(&ctx, tmp, uspi->s_fpb);
976 	}
977 
978 	free_data(&ctx, 0, 0);
979 
980 	if (frag3 >= frag4)
981 		goto next3;
982 
983 	/*
984 	 * Free last free fragments
985 	 */
986 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
987 	tmp = ufs_data_ptr_to_cpu(sb, p);
988 	if (!tmp )
989 		ufs_panic(sb, "ufs_truncate_direct", "internal error");
990 	frag4 = ufs_fragnum (frag4);
991 	write_seqlock(&ufsi->meta_lock);
992 	ufs_data_ptr_clear(uspi, p);
993 	write_sequnlock(&ufsi->meta_lock);
994 
995 	ufs_free_fragments (inode, tmp, frag4);
996  next3:
997 
998 	UFSD("EXIT: ino %lu\n", inode->i_ino);
999 }
1000 
1001 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
1002 {
1003 	struct super_block *sb = inode->i_sb;
1004 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1005 	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
1006 	unsigned i;
1007 
1008 	if (!ubh)
1009 		return;
1010 
1011 	if (--depth) {
1012 		for (i = 0; i < uspi->s_apb; i++) {
1013 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1014 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1015 			if (block)
1016 				free_full_branch(inode, block, depth);
1017 		}
1018 	} else {
1019 		struct to_free ctx = {.inode = inode};
1020 
1021 		for (i = 0; i < uspi->s_apb; i++) {
1022 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1023 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1024 			if (block)
1025 				free_data(&ctx, block, uspi->s_fpb);
1026 		}
1027 		free_data(&ctx, 0, 0);
1028 	}
1029 
1030 	ubh_bforget(ubh);
1031 	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1032 }
1033 
1034 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1035 {
1036 	struct super_block *sb = inode->i_sb;
1037 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1038 	unsigned i;
1039 
1040 	if (--depth) {
1041 		for (i = from; i < uspi->s_apb ; i++) {
1042 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1043 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1044 			if (block) {
1045 				write_seqlock(&UFS_I(inode)->meta_lock);
1046 				ufs_data_ptr_clear(uspi, p);
1047 				write_sequnlock(&UFS_I(inode)->meta_lock);
1048 				ubh_mark_buffer_dirty(ubh);
1049 				free_full_branch(inode, block, depth);
1050 			}
1051 		}
1052 	} else {
1053 		struct to_free ctx = {.inode = inode};
1054 
1055 		for (i = from; i < uspi->s_apb; i++) {
1056 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1057 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1058 			if (block) {
1059 				write_seqlock(&UFS_I(inode)->meta_lock);
1060 				ufs_data_ptr_clear(uspi, p);
1061 				write_sequnlock(&UFS_I(inode)->meta_lock);
1062 				ubh_mark_buffer_dirty(ubh);
1063 				free_data(&ctx, block, uspi->s_fpb);
1064 			}
1065 		}
1066 		free_data(&ctx, 0, 0);
1067 	}
1068 	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1069 		ubh_sync_block(ubh);
1070 	ubh_brelse(ubh);
1071 }
1072 
1073 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1074 {
1075 	int err = 0;
1076 	struct super_block *sb = inode->i_sb;
1077 	struct address_space *mapping = inode->i_mapping;
1078 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1079 	unsigned i, end;
1080 	sector_t lastfrag;
1081 	struct page *lastpage;
1082 	struct buffer_head *bh;
1083 	u64 phys64;
1084 
1085 	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1086 
1087 	if (!lastfrag)
1088 		goto out;
1089 
1090 	lastfrag--;
1091 
1092 	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1093 				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
1094        if (IS_ERR(lastpage)) {
1095                err = -EIO;
1096                goto out;
1097        }
1098 
1099        end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
1100        bh = page_buffers(lastpage);
1101        for (i = 0; i < end; ++i)
1102                bh = bh->b_this_page;
1103 
1104 
1105        err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1106 
1107        if (unlikely(err))
1108 	       goto out_unlock;
1109 
1110        if (buffer_new(bh)) {
1111 	       clear_buffer_new(bh);
1112 	       unmap_underlying_metadata(bh->b_bdev,
1113 					 bh->b_blocknr);
1114 	       /*
1115 		* we do not zeroize fragment, because of
1116 		* if it maped to hole, it already contains zeroes
1117 		*/
1118 	       set_buffer_uptodate(bh);
1119 	       mark_buffer_dirty(bh);
1120 	       set_page_dirty(lastpage);
1121        }
1122 
1123        if (lastfrag >= UFS_IND_FRAGMENT) {
1124 	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1125 	       phys64 = bh->b_blocknr + 1;
1126 	       for (i = 0; i < end; ++i) {
1127 		       bh = sb_getblk(sb, i + phys64);
1128 		       lock_buffer(bh);
1129 		       memset(bh->b_data, 0, sb->s_blocksize);
1130 		       set_buffer_uptodate(bh);
1131 		       mark_buffer_dirty(bh);
1132 		       unlock_buffer(bh);
1133 		       sync_dirty_buffer(bh);
1134 		       brelse(bh);
1135 	       }
1136        }
1137 out_unlock:
1138        ufs_put_locked_page(lastpage);
1139 out:
1140        return err;
1141 }
1142 
1143 static void __ufs_truncate_blocks(struct inode *inode)
1144 {
1145 	struct ufs_inode_info *ufsi = UFS_I(inode);
1146 	struct super_block *sb = inode->i_sb;
1147 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1148 	unsigned offsets[4];
1149 	int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
1150 	int depth2;
1151 	unsigned i;
1152 	struct ufs_buffer_head *ubh[3];
1153 	void *p;
1154 	u64 block;
1155 
1156 	if (!depth)
1157 		return;
1158 
1159 	/* find the last non-zero in offsets[] */
1160 	for (depth2 = depth - 1; depth2; depth2--)
1161 		if (offsets[depth2])
1162 			break;
1163 
1164 	mutex_lock(&ufsi->truncate_mutex);
1165 	if (depth == 1) {
1166 		ufs_trunc_direct(inode);
1167 		offsets[0] = UFS_IND_BLOCK;
1168 	} else {
1169 		/* get the blocks that should be partially emptied */
1170 		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
1171 		for (i = 0; i < depth2; i++) {
1172 			offsets[i]++;	/* next branch is fully freed */
1173 			block = ufs_data_ptr_to_cpu(sb, p);
1174 			if (!block)
1175 				break;
1176 			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1177 			if (!ubh[i]) {
1178 				write_seqlock(&ufsi->meta_lock);
1179 				ufs_data_ptr_clear(uspi, p);
1180 				write_sequnlock(&ufsi->meta_lock);
1181 				break;
1182 			}
1183 			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
1184 		}
1185 		while (i--)
1186 			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1187 	}
1188 	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1189 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1190 		block = ufs_data_ptr_to_cpu(sb, p);
1191 		if (block) {
1192 			write_seqlock(&ufsi->meta_lock);
1193 			ufs_data_ptr_clear(uspi, p);
1194 			write_sequnlock(&ufsi->meta_lock);
1195 			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1196 		}
1197 	}
1198 	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1199 	mark_inode_dirty(inode);
1200 	mutex_unlock(&ufsi->truncate_mutex);
1201 }
1202 
1203 static int ufs_truncate(struct inode *inode, loff_t size)
1204 {
1205 	int err = 0;
1206 
1207 	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1208 	     inode->i_ino, (unsigned long long)size,
1209 	     (unsigned long long)i_size_read(inode));
1210 
1211 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1212 	      S_ISLNK(inode->i_mode)))
1213 		return -EINVAL;
1214 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1215 		return -EPERM;
1216 
1217 	err = ufs_alloc_lastblock(inode, size);
1218 
1219 	if (err)
1220 		goto out;
1221 
1222 	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1223 
1224 	truncate_setsize(inode, size);
1225 
1226 	__ufs_truncate_blocks(inode);
1227 	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1228 	mark_inode_dirty(inode);
1229 out:
1230 	UFSD("EXIT: err %d\n", err);
1231 	return err;
1232 }
1233 
1234 void ufs_truncate_blocks(struct inode *inode)
1235 {
1236 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1237 	      S_ISLNK(inode->i_mode)))
1238 		return;
1239 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1240 		return;
1241 	__ufs_truncate_blocks(inode);
1242 }
1243 
1244 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1245 {
1246 	struct inode *inode = d_inode(dentry);
1247 	unsigned int ia_valid = attr->ia_valid;
1248 	int error;
1249 
1250 	error = inode_change_ok(inode, attr);
1251 	if (error)
1252 		return error;
1253 
1254 	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1255 		error = ufs_truncate(inode, attr->ia_size);
1256 		if (error)
1257 			return error;
1258 	}
1259 
1260 	setattr_copy(inode, attr);
1261 	mark_inode_dirty(inode);
1262 	return 0;
1263 }
1264 
1265 const struct inode_operations ufs_file_inode_operations = {
1266 	.setattr = ufs_setattr,
1267 };
1268