xref: /openbmc/linux/fs/ufs/inode.c (revision f53bd142)
1 /*
2  *  linux/fs/ufs/inode.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  *  from
9  *
10  *  linux/fs/ext2/inode.c
11  *
12  * Copyright (C) 1992, 1993, 1994, 1995
13  * Remy Card (card@masi.ibp.fr)
14  * Laboratoire MASI - Institut Blaise Pascal
15  * Universite Pierre et Marie Curie (Paris VI)
16  *
17  *  from
18  *
19  *  linux/fs/minix/inode.c
20  *
21  *  Copyright (C) 1991, 1992  Linus Torvalds
22  *
23  *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24  *  Big-endian to little-endian byte-swapping/bitmaps by
25  *        David S. Miller (davem@caip.rutgers.edu), 1995
26  */
27 
28 #include <asm/uaccess.h>
29 
30 #include <linux/errno.h>
31 #include <linux/fs.h>
32 #include <linux/time.h>
33 #include <linux/stat.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 
39 #include "ufs_fs.h"
40 #include "ufs.h"
41 #include "swab.h"
42 #include "util.h"
43 
44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
45 {
46 	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
47 	int ptrs = uspi->s_apb;
48 	int ptrs_bits = uspi->s_apbshift;
49 	const long direct_blocks = UFS_NDADDR,
50 		indirect_blocks = ptrs,
51 		double_blocks = (1 << (ptrs_bits * 2));
52 	int n = 0;
53 
54 
55 	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
56 	if (i_block < direct_blocks) {
57 		offsets[n++] = i_block;
58 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
59 		offsets[n++] = UFS_IND_BLOCK;
60 		offsets[n++] = i_block;
61 	} else if ((i_block -= indirect_blocks) < double_blocks) {
62 		offsets[n++] = UFS_DIND_BLOCK;
63 		offsets[n++] = i_block >> ptrs_bits;
64 		offsets[n++] = i_block & (ptrs - 1);
65 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
66 		offsets[n++] = UFS_TIND_BLOCK;
67 		offsets[n++] = i_block >> (ptrs_bits * 2);
68 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
69 		offsets[n++] = i_block & (ptrs - 1);
70 	} else {
71 		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
72 	}
73 	return n;
74 }
75 
76 typedef struct {
77 	void	*p;
78 	union {
79 		__fs32	key32;
80 		__fs64	key64;
81 	};
82 	struct buffer_head *bh;
83 } Indirect;
84 
85 static inline int grow_chain32(struct ufs_inode_info *ufsi,
86 			       struct buffer_head *bh, __fs32 *v,
87 			       Indirect *from, Indirect *to)
88 {
89 	Indirect *p;
90 	unsigned seq;
91 	to->bh = bh;
92 	do {
93 		seq = read_seqbegin(&ufsi->meta_lock);
94 		to->key32 = *(__fs32 *)(to->p = v);
95 		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
96 			;
97 	} while (read_seqretry(&ufsi->meta_lock, seq));
98 	return (p > to);
99 }
100 
101 static inline int grow_chain64(struct ufs_inode_info *ufsi,
102 			       struct buffer_head *bh, __fs64 *v,
103 			       Indirect *from, Indirect *to)
104 {
105 	Indirect *p;
106 	unsigned seq;
107 	to->bh = bh;
108 	do {
109 		seq = read_seqbegin(&ufsi->meta_lock);
110 		to->key64 = *(__fs64 *)(to->p = v);
111 		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
112 			;
113 	} while (read_seqretry(&ufsi->meta_lock, seq));
114 	return (p > to);
115 }
116 
117 /*
118  * Returns the location of the fragment from
119  * the beginning of the filesystem.
120  */
121 
122 static u64 ufs_frag_map(struct inode *inode, sector_t frag)
123 {
124 	struct ufs_inode_info *ufsi = UFS_I(inode);
125 	struct super_block *sb = inode->i_sb;
126 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
127 	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
128 	int shift = uspi->s_apbshift-uspi->s_fpbshift;
129 	unsigned offsets[4], *p;
130 	Indirect chain[4], *q = chain;
131 	int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
132 	unsigned flags = UFS_SB(sb)->s_flags;
133 	u64 res = 0;
134 
135 	UFSD(": frag = %llu  depth = %d\n", (unsigned long long)frag, depth);
136 	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
137 		uspi->s_fpbshift, uspi->s_apbmask,
138 		(unsigned long long)mask);
139 
140 	if (depth == 0)
141 		goto no_block;
142 
143 again:
144 	p = offsets;
145 
146 	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
147 		goto ufs2;
148 
149 	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
150 		goto changed;
151 	if (!q->key32)
152 		goto no_block;
153 	while (--depth) {
154 		__fs32 *ptr;
155 		struct buffer_head *bh;
156 		unsigned n = *p++;
157 
158 		bh = sb_bread(sb, uspi->s_sbbase +
159 				  fs32_to_cpu(sb, q->key32) + (n>>shift));
160 		if (!bh)
161 			goto no_block;
162 		ptr = (__fs32 *)bh->b_data + (n & mask);
163 		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
164 			goto changed;
165 		if (!q->key32)
166 			goto no_block;
167 	}
168 	res = fs32_to_cpu(sb, q->key32);
169 	goto found;
170 
171 ufs2:
172 	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
173 		goto changed;
174 	if (!q->key64)
175 		goto no_block;
176 
177 	while (--depth) {
178 		__fs64 *ptr;
179 		struct buffer_head *bh;
180 		unsigned n = *p++;
181 
182 		bh = sb_bread(sb, uspi->s_sbbase +
183 				  fs64_to_cpu(sb, q->key64) + (n>>shift));
184 		if (!bh)
185 			goto no_block;
186 		ptr = (__fs64 *)bh->b_data + (n & mask);
187 		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
188 			goto changed;
189 		if (!q->key64)
190 			goto no_block;
191 	}
192 	res = fs64_to_cpu(sb, q->key64);
193 found:
194 	res += uspi->s_sbbase + (frag & uspi->s_fpbmask);
195 no_block:
196 	while (q > chain) {
197 		brelse(q->bh);
198 		q--;
199 	}
200 	return res;
201 
202 changed:
203 	while (q > chain) {
204 		brelse(q->bh);
205 		q--;
206 	}
207 	goto again;
208 }
209 
210 /**
211  * ufs_inode_getfrag() - allocate new fragment(s)
212  * @inode: pointer to inode
213  * @fragment: number of `fragment' which hold pointer
214  *   to new allocated fragment(s)
215  * @new_fragment: number of new allocated fragment(s)
216  * @required: how many fragment(s) we require
217  * @err: we set it if something wrong
218  * @phys: pointer to where we save physical number of new allocated fragments,
219  *   NULL if we allocate not data(indirect blocks for example).
220  * @new: we set it if we allocate new block
221  * @locked_page: for ufs_new_fragments()
222  */
223 static struct buffer_head *
224 ufs_inode_getfrag(struct inode *inode, u64 fragment,
225 		  sector_t new_fragment, unsigned int required, int *err,
226 		  long *phys, int *new, struct page *locked_page)
227 {
228 	struct ufs_inode_info *ufsi = UFS_I(inode);
229 	struct super_block *sb = inode->i_sb;
230 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
231 	struct buffer_head * result;
232 	unsigned blockoff, lastblockoff;
233 	u64 tmp, goal, lastfrag, block, lastblock;
234 	void *p, *p2;
235 
236 	UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
237 	     "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
238 	     (unsigned long long)new_fragment, required, !phys);
239 
240         /* TODO : to be done for write support
241         if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
242              goto ufs2;
243          */
244 
245 	block = ufs_fragstoblks (fragment);
246 	blockoff = ufs_fragnum (fragment);
247 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
248 
249 	goal = 0;
250 
251 repeat:
252 	tmp = ufs_data_ptr_to_cpu(sb, p);
253 
254 	lastfrag = ufsi->i_lastfrag;
255 	if (tmp && fragment < lastfrag) {
256 		if (!phys) {
257 			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
258 			if (tmp == ufs_data_ptr_to_cpu(sb, p)) {
259 				UFSD("EXIT, result %llu\n",
260 				     (unsigned long long)tmp + blockoff);
261 				return result;
262 			}
263 			brelse (result);
264 			goto repeat;
265 		} else {
266 			*phys = uspi->s_sbbase + tmp + blockoff;
267 			return NULL;
268 		}
269 	}
270 
271 	lastblock = ufs_fragstoblks (lastfrag);
272 	lastblockoff = ufs_fragnum (lastfrag);
273 	/*
274 	 * We will extend file into new block beyond last allocated block
275 	 */
276 	if (lastblock < block) {
277 		/*
278 		 * We must reallocate last allocated block
279 		 */
280 		if (lastblockoff) {
281 			p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
282 			tmp = ufs_new_fragments(inode, p2, lastfrag,
283 						ufs_data_ptr_to_cpu(sb, p2),
284 						uspi->s_fpb - lastblockoff,
285 						err, locked_page);
286 			if (!tmp) {
287 				if (lastfrag != ufsi->i_lastfrag)
288 					goto repeat;
289 				else
290 					return NULL;
291 			}
292 			lastfrag = ufsi->i_lastfrag;
293 
294 		}
295 		tmp = ufs_data_ptr_to_cpu(sb,
296 					 ufs_get_direct_data_ptr(uspi, ufsi,
297 								 lastblock));
298 		if (tmp)
299 			goal = tmp + uspi->s_fpb;
300 		tmp = ufs_new_fragments (inode, p, fragment - blockoff,
301 					 goal, required + blockoff,
302 					 err,
303 					 phys != NULL ? locked_page : NULL);
304 	} else if (lastblock == block) {
305 	/*
306 	 * We will extend last allocated block
307 	 */
308 		tmp = ufs_new_fragments(inode, p, fragment -
309 					(blockoff - lastblockoff),
310 					ufs_data_ptr_to_cpu(sb, p),
311 					required +  (blockoff - lastblockoff),
312 					err, phys != NULL ? locked_page : NULL);
313 	} else /* (lastblock > block) */ {
314 	/*
315 	 * We will allocate new block before last allocated block
316 	 */
317 		if (block) {
318 			tmp = ufs_data_ptr_to_cpu(sb,
319 						 ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
320 			if (tmp)
321 				goal = tmp + uspi->s_fpb;
322 		}
323 		tmp = ufs_new_fragments(inode, p, fragment - blockoff,
324 					goal, uspi->s_fpb, err,
325 					phys != NULL ? locked_page : NULL);
326 	}
327 	if (!tmp) {
328 		if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) ||
329 		    (blockoff && lastfrag != ufsi->i_lastfrag))
330 			goto repeat;
331 		*err = -ENOSPC;
332 		return NULL;
333 	}
334 
335 	if (!phys) {
336 		result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
337 	} else {
338 		*phys = uspi->s_sbbase + tmp + blockoff;
339 		result = NULL;
340 		*err = 0;
341 		*new = 1;
342 	}
343 
344 	inode->i_ctime = CURRENT_TIME_SEC;
345 	if (IS_SYNC(inode))
346 		ufs_sync_inode (inode);
347 	mark_inode_dirty(inode);
348 	UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff);
349 	return result;
350 
351      /* This part : To be implemented ....
352         Required only for writing, not required for READ-ONLY.
353 ufs2:
354 
355 	u2_block = ufs_fragstoblks(fragment);
356 	u2_blockoff = ufs_fragnum(fragment);
357 	p = ufsi->i_u1.u2_i_data + block;
358 	goal = 0;
359 
360 repeat2:
361 	tmp = fs32_to_cpu(sb, *p);
362 	lastfrag = ufsi->i_lastfrag;
363 
364      */
365 }
366 
367 /**
368  * ufs_inode_getblock() - allocate new block
369  * @inode: pointer to inode
370  * @bh: pointer to block which hold "pointer" to new allocated block
371  * @fragment: number of `fragment' which hold pointer
372  *   to new allocated block
373  * @new_fragment: number of new allocated fragment
374  *  (block will hold this fragment and also uspi->s_fpb-1)
375  * @err: see ufs_inode_getfrag()
376  * @phys: see ufs_inode_getfrag()
377  * @new: see ufs_inode_getfrag()
378  * @locked_page: see ufs_inode_getfrag()
379  */
380 static struct buffer_head *
381 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
382 		  u64 fragment, sector_t new_fragment, int *err,
383 		  long *phys, int *new, struct page *locked_page)
384 {
385 	struct super_block *sb = inode->i_sb;
386 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
387 	struct buffer_head * result;
388 	unsigned blockoff;
389 	u64 tmp, goal, block;
390 	void *p;
391 
392 	block = ufs_fragstoblks (fragment);
393 	blockoff = ufs_fragnum (fragment);
394 
395 	UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
396 	     inode->i_ino, (unsigned long long)fragment,
397 	     (unsigned long long)new_fragment, !phys);
398 
399 	result = NULL;
400 	if (!bh)
401 		goto out;
402 	if (!buffer_uptodate(bh)) {
403 		ll_rw_block (READ, 1, &bh);
404 		wait_on_buffer (bh);
405 		if (!buffer_uptodate(bh))
406 			goto out;
407 	}
408 	if (uspi->fs_magic == UFS2_MAGIC)
409 		p = (__fs64 *)bh->b_data + block;
410 	else
411 		p = (__fs32 *)bh->b_data + block;
412 repeat:
413 	tmp = ufs_data_ptr_to_cpu(sb, p);
414 	if (tmp) {
415 		if (!phys) {
416 			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
417 			if (tmp == ufs_data_ptr_to_cpu(sb, p))
418 				goto out;
419 			brelse (result);
420 			goto repeat;
421 		} else {
422 			*phys = uspi->s_sbbase + tmp + blockoff;
423 			goto out;
424 		}
425 	}
426 
427 	if (block && (uspi->fs_magic == UFS2_MAGIC ?
428 		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) :
429 		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1]))))
430 		goal = tmp + uspi->s_fpb;
431 	else
432 		goal = bh->b_blocknr + uspi->s_fpb;
433 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
434 				uspi->s_fpb, err, locked_page);
435 	if (!tmp) {
436 		if (ufs_data_ptr_to_cpu(sb, p))
437 			goto repeat;
438 		goto out;
439 	}
440 
441 
442 	if (!phys) {
443 		result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
444 	} else {
445 		*phys = uspi->s_sbbase + tmp + blockoff;
446 		*new = 1;
447 	}
448 
449 	mark_buffer_dirty(bh);
450 	if (IS_SYNC(inode))
451 		sync_dirty_buffer(bh);
452 	inode->i_ctime = CURRENT_TIME_SEC;
453 	mark_inode_dirty(inode);
454 	UFSD("result %llu\n", (unsigned long long)tmp + blockoff);
455 out:
456 	brelse (bh);
457 	UFSD("EXIT\n");
458 	return result;
459 }
460 
461 /**
462  * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
463  * readpage, writepage and so on
464  */
465 
466 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
467 {
468 	struct super_block * sb = inode->i_sb;
469 	struct ufs_sb_info * sbi = UFS_SB(sb);
470 	struct ufs_sb_private_info * uspi = sbi->s_uspi;
471 	struct buffer_head * bh;
472 	int ret, err, new;
473 	unsigned long ptr,phys;
474 	u64 phys64 = 0;
475 
476 	if (!create) {
477 		phys64 = ufs_frag_map(inode, fragment);
478 		UFSD("phys64 = %llu\n", (unsigned long long)phys64);
479 		if (phys64)
480 			map_bh(bh_result, sb, phys64);
481 		return 0;
482 	}
483 
484         /* This code entered only while writing ....? */
485 
486 	err = -EIO;
487 	new = 0;
488 	ret = 0;
489 	bh = NULL;
490 
491 	mutex_lock(&UFS_I(inode)->truncate_mutex);
492 
493 	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
494 	if (fragment >
495 	    ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
496 	     << uspi->s_fpbshift))
497 		goto abort_too_big;
498 
499 	err = 0;
500 	ptr = fragment;
501 
502 	/*
503 	 * ok, these macros clean the logic up a bit and make
504 	 * it much more readable:
505 	 */
506 #define GET_INODE_DATABLOCK(x) \
507 	ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
508 			  bh_result->b_page)
509 #define GET_INODE_PTR(x) \
510 	ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
511 			  bh_result->b_page)
512 #define GET_INDIRECT_DATABLOCK(x) \
513 	ufs_inode_getblock(inode, bh, x, fragment,	\
514 			  &err, &phys, &new, bh_result->b_page)
515 #define GET_INDIRECT_PTR(x) \
516 	ufs_inode_getblock(inode, bh, x, fragment,	\
517 			  &err, NULL, NULL, NULL)
518 
519 	if (ptr < UFS_NDIR_FRAGMENT) {
520 		bh = GET_INODE_DATABLOCK(ptr);
521 		goto out;
522 	}
523 	ptr -= UFS_NDIR_FRAGMENT;
524 	if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
525 		bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
526 		goto get_indirect;
527 	}
528 	ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
529 	if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
530 		bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
531 		goto get_double;
532 	}
533 	ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
534 	bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
535 	bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
536 get_double:
537 	bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
538 get_indirect:
539 	bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
540 
541 #undef GET_INODE_DATABLOCK
542 #undef GET_INODE_PTR
543 #undef GET_INDIRECT_DATABLOCK
544 #undef GET_INDIRECT_PTR
545 
546 out:
547 	if (err)
548 		goto abort;
549 	if (new)
550 		set_buffer_new(bh_result);
551 	map_bh(bh_result, sb, phys);
552 abort:
553 	mutex_unlock(&UFS_I(inode)->truncate_mutex);
554 
555 	return err;
556 
557 abort_too_big:
558 	ufs_warning(sb, "ufs_get_block", "block > big");
559 	goto abort;
560 }
561 
562 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
563 {
564 	return block_write_full_page(page,ufs_getfrag_block,wbc);
565 }
566 
567 static int ufs_readpage(struct file *file, struct page *page)
568 {
569 	return block_read_full_page(page,ufs_getfrag_block);
570 }
571 
572 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
573 {
574 	return __block_write_begin(page, pos, len, ufs_getfrag_block);
575 }
576 
577 static void ufs_truncate_blocks(struct inode *);
578 
579 static void ufs_write_failed(struct address_space *mapping, loff_t to)
580 {
581 	struct inode *inode = mapping->host;
582 
583 	if (to > inode->i_size) {
584 		truncate_pagecache(inode, inode->i_size);
585 		ufs_truncate_blocks(inode);
586 	}
587 }
588 
589 static int ufs_write_begin(struct file *file, struct address_space *mapping,
590 			loff_t pos, unsigned len, unsigned flags,
591 			struct page **pagep, void **fsdata)
592 {
593 	int ret;
594 
595 	ret = block_write_begin(mapping, pos, len, flags, pagep,
596 				ufs_getfrag_block);
597 	if (unlikely(ret))
598 		ufs_write_failed(mapping, pos + len);
599 
600 	return ret;
601 }
602 
603 static int ufs_write_end(struct file *file, struct address_space *mapping,
604 			loff_t pos, unsigned len, unsigned copied,
605 			struct page *page, void *fsdata)
606 {
607 	int ret;
608 
609 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
610 	if (ret < len)
611 		ufs_write_failed(mapping, pos + len);
612 	return ret;
613 }
614 
615 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
616 {
617 	return generic_block_bmap(mapping,block,ufs_getfrag_block);
618 }
619 
620 const struct address_space_operations ufs_aops = {
621 	.readpage = ufs_readpage,
622 	.writepage = ufs_writepage,
623 	.write_begin = ufs_write_begin,
624 	.write_end = ufs_write_end,
625 	.bmap = ufs_bmap
626 };
627 
628 static void ufs_set_inode_ops(struct inode *inode)
629 {
630 	if (S_ISREG(inode->i_mode)) {
631 		inode->i_op = &ufs_file_inode_operations;
632 		inode->i_fop = &ufs_file_operations;
633 		inode->i_mapping->a_ops = &ufs_aops;
634 	} else if (S_ISDIR(inode->i_mode)) {
635 		inode->i_op = &ufs_dir_inode_operations;
636 		inode->i_fop = &ufs_dir_operations;
637 		inode->i_mapping->a_ops = &ufs_aops;
638 	} else if (S_ISLNK(inode->i_mode)) {
639 		if (!inode->i_blocks) {
640 			inode->i_op = &ufs_fast_symlink_inode_operations;
641 			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
642 		} else {
643 			inode->i_op = &ufs_symlink_inode_operations;
644 			inode->i_mapping->a_ops = &ufs_aops;
645 		}
646 	} else
647 		init_special_inode(inode, inode->i_mode,
648 				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
649 }
650 
651 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
652 {
653 	struct ufs_inode_info *ufsi = UFS_I(inode);
654 	struct super_block *sb = inode->i_sb;
655 	umode_t mode;
656 
657 	/*
658 	 * Copy data to the in-core inode.
659 	 */
660 	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
661 	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
662 	if (inode->i_nlink == 0) {
663 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
664 		return -1;
665 	}
666 
667 	/*
668 	 * Linux now has 32-bit uid and gid, so we can support EFT.
669 	 */
670 	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
671 	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
672 
673 	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
674 	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
675 	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
676 	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
677 	inode->i_mtime.tv_nsec = 0;
678 	inode->i_atime.tv_nsec = 0;
679 	inode->i_ctime.tv_nsec = 0;
680 	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
681 	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
682 	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
683 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
684 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
685 
686 
687 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
688 		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
689 		       sizeof(ufs_inode->ui_u2.ui_addr));
690 	} else {
691 		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
692 		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
693 		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
694 	}
695 	return 0;
696 }
697 
698 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
699 {
700 	struct ufs_inode_info *ufsi = UFS_I(inode);
701 	struct super_block *sb = inode->i_sb;
702 	umode_t mode;
703 
704 	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
705 	/*
706 	 * Copy data to the in-core inode.
707 	 */
708 	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
709 	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
710 	if (inode->i_nlink == 0) {
711 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
712 		return -1;
713 	}
714 
715         /*
716          * Linux now has 32-bit uid and gid, so we can support EFT.
717          */
718 	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
719 	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
720 
721 	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
722 	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
723 	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
724 	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
725 	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
726 	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
727 	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
728 	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
729 	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
730 	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
731 	/*
732 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
733 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
734 	*/
735 
736 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
737 		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
738 		       sizeof(ufs2_inode->ui_u2.ui_addr));
739 	} else {
740 		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
741 		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
742 		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
743 	}
744 	return 0;
745 }
746 
747 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
748 {
749 	struct ufs_inode_info *ufsi;
750 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
751 	struct buffer_head * bh;
752 	struct inode *inode;
753 	int err;
754 
755 	UFSD("ENTER, ino %lu\n", ino);
756 
757 	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
758 		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
759 			    ino);
760 		return ERR_PTR(-EIO);
761 	}
762 
763 	inode = iget_locked(sb, ino);
764 	if (!inode)
765 		return ERR_PTR(-ENOMEM);
766 	if (!(inode->i_state & I_NEW))
767 		return inode;
768 
769 	ufsi = UFS_I(inode);
770 
771 	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
772 	if (!bh) {
773 		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
774 			    inode->i_ino);
775 		goto bad_inode;
776 	}
777 	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
778 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
779 
780 		err = ufs2_read_inode(inode,
781 				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
782 	} else {
783 		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
784 
785 		err = ufs1_read_inode(inode,
786 				      ufs_inode + ufs_inotofsbo(inode->i_ino));
787 	}
788 
789 	if (err)
790 		goto bad_inode;
791 	inode->i_version++;
792 	ufsi->i_lastfrag =
793 		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
794 	ufsi->i_dir_start_lookup = 0;
795 	ufsi->i_osync = 0;
796 
797 	ufs_set_inode_ops(inode);
798 
799 	brelse(bh);
800 
801 	UFSD("EXIT\n");
802 	unlock_new_inode(inode);
803 	return inode;
804 
805 bad_inode:
806 	iget_failed(inode);
807 	return ERR_PTR(-EIO);
808 }
809 
810 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
811 {
812 	struct super_block *sb = inode->i_sb;
813  	struct ufs_inode_info *ufsi = UFS_I(inode);
814 
815 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
816 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
817 
818 	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
819 	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
820 
821 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
822 	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
823 	ufs_inode->ui_atime.tv_usec = 0;
824 	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
825 	ufs_inode->ui_ctime.tv_usec = 0;
826 	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
827 	ufs_inode->ui_mtime.tv_usec = 0;
828 	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
829 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
830 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
831 
832 	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
833 		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
834 		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
835 	}
836 
837 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
838 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
839 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
840 	} else if (inode->i_blocks) {
841 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
842 		       sizeof(ufs_inode->ui_u2.ui_addr));
843 	}
844 	else {
845 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
846 		       sizeof(ufs_inode->ui_u2.ui_symlink));
847 	}
848 
849 	if (!inode->i_nlink)
850 		memset (ufs_inode, 0, sizeof(struct ufs_inode));
851 }
852 
853 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
854 {
855 	struct super_block *sb = inode->i_sb;
856  	struct ufs_inode_info *ufsi = UFS_I(inode);
857 
858 	UFSD("ENTER\n");
859 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
860 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
861 
862 	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
863 	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
864 
865 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
866 	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
867 	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
868 	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
869 	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
870 	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
871 	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
872 
873 	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
874 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
875 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
876 
877 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
878 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
879 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
880 	} else if (inode->i_blocks) {
881 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
882 		       sizeof(ufs_inode->ui_u2.ui_addr));
883 	} else {
884 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
885 		       sizeof(ufs_inode->ui_u2.ui_symlink));
886  	}
887 
888 	if (!inode->i_nlink)
889 		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
890 	UFSD("EXIT\n");
891 }
892 
893 static int ufs_update_inode(struct inode * inode, int do_sync)
894 {
895 	struct super_block *sb = inode->i_sb;
896 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
897 	struct buffer_head * bh;
898 
899 	UFSD("ENTER, ino %lu\n", inode->i_ino);
900 
901 	if (inode->i_ino < UFS_ROOTINO ||
902 	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
903 		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
904 		return -1;
905 	}
906 
907 	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
908 	if (!bh) {
909 		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
910 		return -1;
911 	}
912 	if (uspi->fs_magic == UFS2_MAGIC) {
913 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
914 
915 		ufs2_update_inode(inode,
916 				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
917 	} else {
918 		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
919 
920 		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
921 	}
922 
923 	mark_buffer_dirty(bh);
924 	if (do_sync)
925 		sync_dirty_buffer(bh);
926 	brelse (bh);
927 
928 	UFSD("EXIT\n");
929 	return 0;
930 }
931 
932 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
933 {
934 	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
935 }
936 
937 int ufs_sync_inode (struct inode *inode)
938 {
939 	return ufs_update_inode (inode, 1);
940 }
941 
942 void ufs_evict_inode(struct inode * inode)
943 {
944 	int want_delete = 0;
945 
946 	if (!inode->i_nlink && !is_bad_inode(inode))
947 		want_delete = 1;
948 
949 	truncate_inode_pages_final(&inode->i_data);
950 	if (want_delete) {
951 		inode->i_size = 0;
952 		if (inode->i_blocks)
953 			ufs_truncate_blocks(inode);
954 	}
955 
956 	invalidate_inode_buffers(inode);
957 	clear_inode(inode);
958 
959 	if (want_delete)
960 		ufs_free_inode(inode);
961 }
962 
963 struct to_free {
964 	struct inode *inode;
965 	u64 to;
966 	unsigned count;
967 };
968 
969 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
970 {
971 	if (ctx->count && ctx->to != from) {
972 		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
973 		ctx->count = 0;
974 	}
975 	ctx->count += count;
976 	ctx->to = from + count;
977 }
978 
979 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
980 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
981 
982 static void ufs_trunc_direct(struct inode *inode)
983 {
984 	struct ufs_inode_info *ufsi = UFS_I(inode);
985 	struct super_block * sb;
986 	struct ufs_sb_private_info * uspi;
987 	void *p;
988 	u64 frag1, frag2, frag3, frag4, block1, block2;
989 	struct to_free ctx = {.inode = inode};
990 	unsigned i, tmp;
991 
992 	UFSD("ENTER: ino %lu\n", inode->i_ino);
993 
994 	sb = inode->i_sb;
995 	uspi = UFS_SB(sb)->s_uspi;
996 
997 	frag1 = DIRECT_FRAGMENT;
998 	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
999 	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
1000 	frag3 = frag4 & ~uspi->s_fpbmask;
1001 	block1 = block2 = 0;
1002 	if (frag2 > frag3) {
1003 		frag2 = frag4;
1004 		frag3 = frag4 = 0;
1005 	} else if (frag2 < frag3) {
1006 		block1 = ufs_fragstoblks (frag2);
1007 		block2 = ufs_fragstoblks (frag3);
1008 	}
1009 
1010 	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
1011 	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
1012 	     (unsigned long long)frag1, (unsigned long long)frag2,
1013 	     (unsigned long long)block1, (unsigned long long)block2,
1014 	     (unsigned long long)frag3, (unsigned long long)frag4);
1015 
1016 	if (frag1 >= frag2)
1017 		goto next1;
1018 
1019 	/*
1020 	 * Free first free fragments
1021 	 */
1022 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
1023 	tmp = ufs_data_ptr_to_cpu(sb, p);
1024 	if (!tmp )
1025 		ufs_panic (sb, "ufs_trunc_direct", "internal error");
1026 	frag2 -= frag1;
1027 	frag1 = ufs_fragnum (frag1);
1028 
1029 	ufs_free_fragments(inode, tmp + frag1, frag2);
1030 
1031 next1:
1032 	/*
1033 	 * Free whole blocks
1034 	 */
1035 	for (i = block1 ; i < block2; i++) {
1036 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1037 		tmp = ufs_data_ptr_to_cpu(sb, p);
1038 		if (!tmp)
1039 			continue;
1040 		write_seqlock(&ufsi->meta_lock);
1041 		ufs_data_ptr_clear(uspi, p);
1042 		write_sequnlock(&ufsi->meta_lock);
1043 
1044 		free_data(&ctx, tmp, uspi->s_fpb);
1045 	}
1046 
1047 	free_data(&ctx, 0, 0);
1048 
1049 	if (frag3 >= frag4)
1050 		goto next3;
1051 
1052 	/*
1053 	 * Free last free fragments
1054 	 */
1055 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
1056 	tmp = ufs_data_ptr_to_cpu(sb, p);
1057 	if (!tmp )
1058 		ufs_panic(sb, "ufs_truncate_direct", "internal error");
1059 	frag4 = ufs_fragnum (frag4);
1060 	write_seqlock(&ufsi->meta_lock);
1061 	ufs_data_ptr_clear(uspi, p);
1062 	write_sequnlock(&ufsi->meta_lock);
1063 
1064 	ufs_free_fragments (inode, tmp, frag4);
1065  next3:
1066 
1067 	UFSD("EXIT: ino %lu\n", inode->i_ino);
1068 }
1069 
1070 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
1071 {
1072 	struct super_block *sb = inode->i_sb;
1073 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1074 	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
1075 	unsigned i;
1076 
1077 	if (!ubh)
1078 		return;
1079 
1080 	if (--depth) {
1081 		for (i = 0; i < uspi->s_apb; i++) {
1082 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1083 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1084 			if (block)
1085 				free_full_branch(inode, block, depth);
1086 		}
1087 	} else {
1088 		struct to_free ctx = {.inode = inode};
1089 
1090 		for (i = 0; i < uspi->s_apb; i++) {
1091 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1092 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1093 			if (block)
1094 				free_data(&ctx, block, uspi->s_fpb);
1095 		}
1096 		free_data(&ctx, 0, 0);
1097 	}
1098 
1099 	ubh_bforget(ubh);
1100 	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1101 }
1102 
1103 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1104 {
1105 	struct super_block *sb = inode->i_sb;
1106 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1107 	unsigned i;
1108 
1109 	if (--depth) {
1110 		for (i = from; i < uspi->s_apb ; i++) {
1111 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1112 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1113 			if (block) {
1114 				write_seqlock(&UFS_I(inode)->meta_lock);
1115 				ufs_data_ptr_clear(uspi, p);
1116 				write_sequnlock(&UFS_I(inode)->meta_lock);
1117 				ubh_mark_buffer_dirty(ubh);
1118 				free_full_branch(inode, block, depth);
1119 			}
1120 		}
1121 	} else {
1122 		struct to_free ctx = {.inode = inode};
1123 
1124 		for (i = from; i < uspi->s_apb; i++) {
1125 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1126 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1127 			if (block) {
1128 				write_seqlock(&UFS_I(inode)->meta_lock);
1129 				ufs_data_ptr_clear(uspi, p);
1130 				write_sequnlock(&UFS_I(inode)->meta_lock);
1131 				ubh_mark_buffer_dirty(ubh);
1132 				free_data(&ctx, block, uspi->s_fpb);
1133 			}
1134 		}
1135 		free_data(&ctx, 0, 0);
1136 	}
1137 	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1138 		ubh_sync_block(ubh);
1139 	ubh_brelse(ubh);
1140 }
1141 
1142 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1143 {
1144 	int err = 0;
1145 	struct super_block *sb = inode->i_sb;
1146 	struct address_space *mapping = inode->i_mapping;
1147 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1148 	unsigned i, end;
1149 	sector_t lastfrag;
1150 	struct page *lastpage;
1151 	struct buffer_head *bh;
1152 	u64 phys64;
1153 
1154 	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1155 
1156 	if (!lastfrag)
1157 		goto out;
1158 
1159 	lastfrag--;
1160 
1161 	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1162 				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
1163        if (IS_ERR(lastpage)) {
1164                err = -EIO;
1165                goto out;
1166        }
1167 
1168        end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
1169        bh = page_buffers(lastpage);
1170        for (i = 0; i < end; ++i)
1171                bh = bh->b_this_page;
1172 
1173 
1174        err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1175 
1176        if (unlikely(err))
1177 	       goto out_unlock;
1178 
1179        if (buffer_new(bh)) {
1180 	       clear_buffer_new(bh);
1181 	       unmap_underlying_metadata(bh->b_bdev,
1182 					 bh->b_blocknr);
1183 	       /*
1184 		* we do not zeroize fragment, because of
1185 		* if it maped to hole, it already contains zeroes
1186 		*/
1187 	       set_buffer_uptodate(bh);
1188 	       mark_buffer_dirty(bh);
1189 	       set_page_dirty(lastpage);
1190        }
1191 
1192        if (lastfrag >= UFS_IND_FRAGMENT) {
1193 	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1194 	       phys64 = bh->b_blocknr + 1;
1195 	       for (i = 0; i < end; ++i) {
1196 		       bh = sb_getblk(sb, i + phys64);
1197 		       lock_buffer(bh);
1198 		       memset(bh->b_data, 0, sb->s_blocksize);
1199 		       set_buffer_uptodate(bh);
1200 		       mark_buffer_dirty(bh);
1201 		       unlock_buffer(bh);
1202 		       sync_dirty_buffer(bh);
1203 		       brelse(bh);
1204 	       }
1205        }
1206 out_unlock:
1207        ufs_put_locked_page(lastpage);
1208 out:
1209        return err;
1210 }
1211 
1212 static void __ufs_truncate_blocks(struct inode *inode)
1213 {
1214 	struct ufs_inode_info *ufsi = UFS_I(inode);
1215 	struct super_block *sb = inode->i_sb;
1216 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1217 	unsigned offsets[4];
1218 	int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
1219 	int depth2;
1220 	unsigned i;
1221 	struct ufs_buffer_head *ubh[3];
1222 	void *p;
1223 	u64 block;
1224 
1225 	if (!depth)
1226 		return;
1227 
1228 	/* find the last non-zero in offsets[] */
1229 	for (depth2 = depth - 1; depth2; depth2--)
1230 		if (offsets[depth2])
1231 			break;
1232 
1233 	mutex_lock(&ufsi->truncate_mutex);
1234 	if (depth == 1) {
1235 		ufs_trunc_direct(inode);
1236 		offsets[0] = UFS_IND_BLOCK;
1237 	} else {
1238 		/* get the blocks that should be partially emptied */
1239 		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
1240 		for (i = 0; i < depth2; i++) {
1241 			offsets[i]++;	/* next branch is fully freed */
1242 			block = ufs_data_ptr_to_cpu(sb, p);
1243 			if (!block)
1244 				break;
1245 			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1246 			if (!ubh[i]) {
1247 				write_seqlock(&ufsi->meta_lock);
1248 				ufs_data_ptr_clear(uspi, p);
1249 				write_sequnlock(&ufsi->meta_lock);
1250 				break;
1251 			}
1252 			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
1253 		}
1254 		while (i--)
1255 			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1256 	}
1257 	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1258 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1259 		block = ufs_data_ptr_to_cpu(sb, p);
1260 		if (block) {
1261 			write_seqlock(&ufsi->meta_lock);
1262 			ufs_data_ptr_clear(uspi, p);
1263 			write_sequnlock(&ufsi->meta_lock);
1264 			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1265 		}
1266 	}
1267 	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1268 	mark_inode_dirty(inode);
1269 	mutex_unlock(&ufsi->truncate_mutex);
1270 }
1271 
1272 static int ufs_truncate(struct inode *inode, loff_t size)
1273 {
1274 	int err = 0;
1275 
1276 	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1277 	     inode->i_ino, (unsigned long long)size,
1278 	     (unsigned long long)i_size_read(inode));
1279 
1280 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1281 	      S_ISLNK(inode->i_mode)))
1282 		return -EINVAL;
1283 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1284 		return -EPERM;
1285 
1286 	err = ufs_alloc_lastblock(inode, size);
1287 
1288 	if (err)
1289 		goto out;
1290 
1291 	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1292 
1293 	truncate_setsize(inode, size);
1294 
1295 	__ufs_truncate_blocks(inode);
1296 	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1297 	mark_inode_dirty(inode);
1298 out:
1299 	UFSD("EXIT: err %d\n", err);
1300 	return err;
1301 }
1302 
1303 void ufs_truncate_blocks(struct inode *inode)
1304 {
1305 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1306 	      S_ISLNK(inode->i_mode)))
1307 		return;
1308 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1309 		return;
1310 	__ufs_truncate_blocks(inode);
1311 }
1312 
1313 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1314 {
1315 	struct inode *inode = d_inode(dentry);
1316 	unsigned int ia_valid = attr->ia_valid;
1317 	int error;
1318 
1319 	error = inode_change_ok(inode, attr);
1320 	if (error)
1321 		return error;
1322 
1323 	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1324 		error = ufs_truncate(inode, attr->ia_size);
1325 		if (error)
1326 			return error;
1327 	}
1328 
1329 	setattr_copy(inode, attr);
1330 	mark_inode_dirty(inode);
1331 	return 0;
1332 }
1333 
1334 const struct inode_operations ufs_file_inode_operations = {
1335 	.setattr = ufs_setattr,
1336 };
1337