1 /* 2 * linux/fs/ufs/inode.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 * 8 * from 9 * 10 * linux/fs/ext2/inode.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 24 * Big-endian to little-endian byte-swapping/bitmaps by 25 * David S. Miller (davem@caip.rutgers.edu), 1995 26 */ 27 28 #include <asm/uaccess.h> 29 30 #include <linux/errno.h> 31 #include <linux/fs.h> 32 #include <linux/time.h> 33 #include <linux/stat.h> 34 #include <linux/string.h> 35 #include <linux/mm.h> 36 #include <linux/buffer_head.h> 37 #include <linux/writeback.h> 38 39 #include "ufs_fs.h" 40 #include "ufs.h" 41 #include "swab.h" 42 #include "util.h" 43 44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) 45 { 46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 47 int ptrs = uspi->s_apb; 48 int ptrs_bits = uspi->s_apbshift; 49 const long direct_blocks = UFS_NDADDR, 50 indirect_blocks = ptrs, 51 double_blocks = (1 << (ptrs_bits * 2)); 52 int n = 0; 53 54 55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); 56 if (i_block < direct_blocks) { 57 offsets[n++] = i_block; 58 } else if ((i_block -= direct_blocks) < indirect_blocks) { 59 offsets[n++] = UFS_IND_BLOCK; 60 offsets[n++] = i_block; 61 } else if ((i_block -= indirect_blocks) < double_blocks) { 62 offsets[n++] = UFS_DIND_BLOCK; 63 offsets[n++] = i_block >> ptrs_bits; 64 offsets[n++] = i_block & (ptrs - 1); 65 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 66 offsets[n++] = UFS_TIND_BLOCK; 67 offsets[n++] = i_block >> (ptrs_bits * 2); 68 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 69 offsets[n++] = i_block & (ptrs - 1); 70 } else { 71 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 72 } 73 return n; 74 } 75 76 typedef struct { 77 void *p; 78 union { 79 __fs32 key32; 80 __fs64 key64; 81 }; 82 struct buffer_head *bh; 83 } Indirect; 84 85 static inline int grow_chain32(struct ufs_inode_info *ufsi, 86 struct buffer_head *bh, __fs32 *v, 87 Indirect *from, Indirect *to) 88 { 89 Indirect *p; 90 unsigned seq; 91 to->bh = bh; 92 do { 93 seq = read_seqbegin(&ufsi->meta_lock); 94 to->key32 = *(__fs32 *)(to->p = v); 95 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) 96 ; 97 } while (read_seqretry(&ufsi->meta_lock, seq)); 98 return (p > to); 99 } 100 101 static inline int grow_chain64(struct ufs_inode_info *ufsi, 102 struct buffer_head *bh, __fs64 *v, 103 Indirect *from, Indirect *to) 104 { 105 Indirect *p; 106 unsigned seq; 107 to->bh = bh; 108 do { 109 seq = read_seqbegin(&ufsi->meta_lock); 110 to->key64 = *(__fs64 *)(to->p = v); 111 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) 112 ; 113 } while (read_seqretry(&ufsi->meta_lock, seq)); 114 return (p > to); 115 } 116 117 /* 118 * Returns the location of the fragment from 119 * the beginning of the filesystem. 120 */ 121 122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) 123 { 124 struct ufs_inode_info *ufsi = UFS_I(inode); 125 struct super_block *sb = inode->i_sb; 126 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 127 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 128 int shift = uspi->s_apbshift-uspi->s_fpbshift; 129 Indirect chain[4], *q = chain; 130 unsigned *p; 131 unsigned flags = UFS_SB(sb)->s_flags; 132 u64 res = 0; 133 134 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", 135 uspi->s_fpbshift, uspi->s_apbmask, 136 (unsigned long long)mask); 137 138 if (depth == 0) 139 goto no_block; 140 141 again: 142 p = offsets; 143 144 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 145 goto ufs2; 146 147 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q)) 148 goto changed; 149 if (!q->key32) 150 goto no_block; 151 while (--depth) { 152 __fs32 *ptr; 153 struct buffer_head *bh; 154 unsigned n = *p++; 155 156 bh = sb_bread(sb, uspi->s_sbbase + 157 fs32_to_cpu(sb, q->key32) + (n>>shift)); 158 if (!bh) 159 goto no_block; 160 ptr = (__fs32 *)bh->b_data + (n & mask); 161 if (!grow_chain32(ufsi, bh, ptr, chain, ++q)) 162 goto changed; 163 if (!q->key32) 164 goto no_block; 165 } 166 res = fs32_to_cpu(sb, q->key32); 167 goto found; 168 169 ufs2: 170 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q)) 171 goto changed; 172 if (!q->key64) 173 goto no_block; 174 175 while (--depth) { 176 __fs64 *ptr; 177 struct buffer_head *bh; 178 unsigned n = *p++; 179 180 bh = sb_bread(sb, uspi->s_sbbase + 181 fs64_to_cpu(sb, q->key64) + (n>>shift)); 182 if (!bh) 183 goto no_block; 184 ptr = (__fs64 *)bh->b_data + (n & mask); 185 if (!grow_chain64(ufsi, bh, ptr, chain, ++q)) 186 goto changed; 187 if (!q->key64) 188 goto no_block; 189 } 190 res = fs64_to_cpu(sb, q->key64); 191 found: 192 res += uspi->s_sbbase; 193 no_block: 194 while (q > chain) { 195 brelse(q->bh); 196 q--; 197 } 198 return res; 199 200 changed: 201 while (q > chain) { 202 brelse(q->bh); 203 q--; 204 } 205 goto again; 206 } 207 208 /** 209 * ufs_inode_getfrag() - allocate new fragment(s) 210 * @inode: pointer to inode 211 * @fragment: number of `fragment' which hold pointer 212 * to new allocated fragment(s) 213 * @new_fragment: number of new allocated fragment(s) 214 * @required: how many fragment(s) we require 215 * @err: we set it if something wrong 216 * @phys: pointer to where we save physical number of new allocated fragments, 217 * NULL if we allocate not data(indirect blocks for example). 218 * @new: we set it if we allocate new block 219 * @locked_page: for ufs_new_fragments() 220 */ 221 static struct buffer_head * 222 ufs_inode_getfrag(struct inode *inode, u64 fragment, 223 sector_t new_fragment, unsigned int required, int *err, 224 long *phys, int *new, struct page *locked_page) 225 { 226 struct ufs_inode_info *ufsi = UFS_I(inode); 227 struct super_block *sb = inode->i_sb; 228 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 229 struct buffer_head * result; 230 unsigned blockoff, lastblockoff; 231 u64 tmp, goal, lastfrag, block, lastblock; 232 void *p, *p2; 233 234 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, " 235 "metadata %d\n", inode->i_ino, (unsigned long long)fragment, 236 (unsigned long long)new_fragment, required, !phys); 237 238 /* TODO : to be done for write support 239 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 240 goto ufs2; 241 */ 242 243 block = ufs_fragstoblks (fragment); 244 blockoff = ufs_fragnum (fragment); 245 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 246 247 goal = 0; 248 249 tmp = ufs_data_ptr_to_cpu(sb, p); 250 251 lastfrag = ufsi->i_lastfrag; 252 if (tmp && fragment < lastfrag) { 253 if (!phys) { 254 return sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 255 } else { 256 *phys = uspi->s_sbbase + tmp + blockoff; 257 return NULL; 258 } 259 } 260 261 lastblock = ufs_fragstoblks (lastfrag); 262 lastblockoff = ufs_fragnum (lastfrag); 263 /* 264 * We will extend file into new block beyond last allocated block 265 */ 266 if (lastblock < block) { 267 /* 268 * We must reallocate last allocated block 269 */ 270 if (lastblockoff) { 271 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock); 272 tmp = ufs_new_fragments(inode, p2, lastfrag, 273 ufs_data_ptr_to_cpu(sb, p2), 274 uspi->s_fpb - lastblockoff, 275 err, locked_page); 276 if (!tmp) 277 return NULL; 278 lastfrag = ufsi->i_lastfrag; 279 } 280 tmp = ufs_data_ptr_to_cpu(sb, 281 ufs_get_direct_data_ptr(uspi, ufsi, 282 lastblock)); 283 if (tmp) 284 goal = tmp + uspi->s_fpb; 285 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 286 goal, required + blockoff, 287 err, 288 phys != NULL ? locked_page : NULL); 289 } else if (lastblock == block) { 290 /* 291 * We will extend last allocated block 292 */ 293 tmp = ufs_new_fragments(inode, p, fragment - 294 (blockoff - lastblockoff), 295 ufs_data_ptr_to_cpu(sb, p), 296 required + (blockoff - lastblockoff), 297 err, phys != NULL ? locked_page : NULL); 298 } else /* (lastblock > block) */ { 299 /* 300 * We will allocate new block before last allocated block 301 */ 302 if (block) { 303 tmp = ufs_data_ptr_to_cpu(sb, 304 ufs_get_direct_data_ptr(uspi, ufsi, block - 1)); 305 if (tmp) 306 goal = tmp + uspi->s_fpb; 307 } 308 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 309 goal, uspi->s_fpb, err, 310 phys != NULL ? locked_page : NULL); 311 } 312 if (!tmp) { 313 *err = -ENOSPC; 314 return NULL; 315 } 316 317 if (!phys) { 318 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 319 } else { 320 *phys = uspi->s_sbbase + tmp + blockoff; 321 result = NULL; 322 *err = 0; 323 *new = 1; 324 } 325 326 inode->i_ctime = CURRENT_TIME_SEC; 327 if (IS_SYNC(inode)) 328 ufs_sync_inode (inode); 329 mark_inode_dirty(inode); 330 return result; 331 332 /* This part : To be implemented .... 333 Required only for writing, not required for READ-ONLY. 334 ufs2: 335 336 u2_block = ufs_fragstoblks(fragment); 337 u2_blockoff = ufs_fragnum(fragment); 338 p = ufsi->i_u1.u2_i_data + block; 339 goal = 0; 340 341 repeat2: 342 tmp = fs32_to_cpu(sb, *p); 343 lastfrag = ufsi->i_lastfrag; 344 345 */ 346 } 347 348 /** 349 * ufs_inode_getblock() - allocate new block 350 * @inode: pointer to inode 351 * @bh: pointer to block which hold "pointer" to new allocated block 352 * @fragment: number of `fragment' which hold pointer 353 * to new allocated block 354 * @new_fragment: number of new allocated fragment 355 * (block will hold this fragment and also uspi->s_fpb-1) 356 * @err: see ufs_inode_getfrag() 357 * @phys: see ufs_inode_getfrag() 358 * @new: see ufs_inode_getfrag() 359 * @locked_page: see ufs_inode_getfrag() 360 */ 361 static struct buffer_head * 362 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, 363 u64 fragment, sector_t new_fragment, int *err, 364 long *phys, int *new, struct page *locked_page) 365 { 366 struct super_block *sb = inode->i_sb; 367 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 368 struct buffer_head * result; 369 unsigned blockoff; 370 u64 tmp, goal, block; 371 void *p; 372 373 block = ufs_fragstoblks (fragment); 374 blockoff = ufs_fragnum (fragment); 375 376 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n", 377 inode->i_ino, (unsigned long long)fragment, 378 (unsigned long long)new_fragment, !phys); 379 380 result = NULL; 381 if (!bh) 382 goto out; 383 if (!buffer_uptodate(bh)) { 384 ll_rw_block (READ, 1, &bh); 385 wait_on_buffer (bh); 386 if (!buffer_uptodate(bh)) 387 goto out; 388 } 389 if (uspi->fs_magic == UFS2_MAGIC) 390 p = (__fs64 *)bh->b_data + block; 391 else 392 p = (__fs32 *)bh->b_data + block; 393 394 tmp = ufs_data_ptr_to_cpu(sb, p); 395 if (tmp) { 396 if (!phys) 397 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 398 else 399 *phys = uspi->s_sbbase + tmp + blockoff; 400 goto out; 401 } 402 403 if (block && (uspi->fs_magic == UFS2_MAGIC ? 404 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) : 405 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1])))) 406 goal = tmp + uspi->s_fpb; 407 else 408 goal = bh->b_blocknr + uspi->s_fpb; 409 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 410 uspi->s_fpb, err, locked_page); 411 if (!tmp) 412 goto out; 413 414 if (!phys) { 415 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 416 } else { 417 *phys = uspi->s_sbbase + tmp + blockoff; 418 *new = 1; 419 } 420 421 mark_buffer_dirty(bh); 422 if (IS_SYNC(inode)) 423 sync_dirty_buffer(bh); 424 inode->i_ctime = CURRENT_TIME_SEC; 425 mark_inode_dirty(inode); 426 out: 427 brelse (bh); 428 UFSD("EXIT\n"); 429 return result; 430 } 431 432 /** 433 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and 434 * readpage, writepage and so on 435 */ 436 437 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 438 { 439 struct super_block * sb = inode->i_sb; 440 struct ufs_sb_info * sbi = UFS_SB(sb); 441 struct ufs_sb_private_info * uspi = sbi->s_uspi; 442 struct buffer_head * bh; 443 int ret, err, new; 444 unsigned offsets[4]; 445 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets); 446 unsigned long ptr,phys; 447 u64 phys64 = 0; 448 449 if (!create) { 450 phys64 = ufs_frag_map(inode, offsets, depth); 451 if (phys64) { 452 phys64 += fragment & uspi->s_fpbmask; 453 map_bh(bh_result, sb, phys64); 454 } 455 return 0; 456 } 457 458 /* This code entered only while writing ....? */ 459 460 err = -EIO; 461 new = 0; 462 ret = 0; 463 bh = NULL; 464 465 mutex_lock(&UFS_I(inode)->truncate_mutex); 466 467 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); 468 if (!depth) 469 goto abort_too_big; 470 471 err = 0; 472 ptr = fragment; 473 474 /* 475 * ok, these macros clean the logic up a bit and make 476 * it much more readable: 477 */ 478 #define GET_INODE_DATABLOCK(x) \ 479 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\ 480 bh_result->b_page) 481 #define GET_INODE_PTR(x) \ 482 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\ 483 bh_result->b_page) 484 #define GET_INDIRECT_DATABLOCK(x) \ 485 ufs_inode_getblock(inode, bh, x, fragment, \ 486 &err, &phys, &new, bh_result->b_page) 487 #define GET_INDIRECT_PTR(x) \ 488 ufs_inode_getblock(inode, bh, x, fragment, \ 489 &err, NULL, NULL, NULL) 490 491 if (depth == 1) { 492 bh = GET_INODE_DATABLOCK(ptr); 493 goto out; 494 } 495 ptr -= UFS_NDIR_FRAGMENT; 496 if (depth == 2) { 497 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); 498 goto get_indirect; 499 } 500 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); 501 if (depth == 3) { 502 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); 503 goto get_double; 504 } 505 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); 506 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); 507 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); 508 get_double: 509 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); 510 get_indirect: 511 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); 512 513 #undef GET_INODE_DATABLOCK 514 #undef GET_INODE_PTR 515 #undef GET_INDIRECT_DATABLOCK 516 #undef GET_INDIRECT_PTR 517 518 out: 519 if (err) 520 goto abort; 521 if (new) 522 set_buffer_new(bh_result); 523 map_bh(bh_result, sb, phys); 524 abort: 525 mutex_unlock(&UFS_I(inode)->truncate_mutex); 526 527 return err; 528 529 abort_too_big: 530 ufs_warning(sb, "ufs_get_block", "block > big"); 531 goto abort; 532 } 533 534 static int ufs_writepage(struct page *page, struct writeback_control *wbc) 535 { 536 return block_write_full_page(page,ufs_getfrag_block,wbc); 537 } 538 539 static int ufs_readpage(struct file *file, struct page *page) 540 { 541 return block_read_full_page(page,ufs_getfrag_block); 542 } 543 544 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) 545 { 546 return __block_write_begin(page, pos, len, ufs_getfrag_block); 547 } 548 549 static void ufs_truncate_blocks(struct inode *); 550 551 static void ufs_write_failed(struct address_space *mapping, loff_t to) 552 { 553 struct inode *inode = mapping->host; 554 555 if (to > inode->i_size) { 556 truncate_pagecache(inode, inode->i_size); 557 ufs_truncate_blocks(inode); 558 } 559 } 560 561 static int ufs_write_begin(struct file *file, struct address_space *mapping, 562 loff_t pos, unsigned len, unsigned flags, 563 struct page **pagep, void **fsdata) 564 { 565 int ret; 566 567 ret = block_write_begin(mapping, pos, len, flags, pagep, 568 ufs_getfrag_block); 569 if (unlikely(ret)) 570 ufs_write_failed(mapping, pos + len); 571 572 return ret; 573 } 574 575 static int ufs_write_end(struct file *file, struct address_space *mapping, 576 loff_t pos, unsigned len, unsigned copied, 577 struct page *page, void *fsdata) 578 { 579 int ret; 580 581 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 582 if (ret < len) 583 ufs_write_failed(mapping, pos + len); 584 return ret; 585 } 586 587 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 588 { 589 return generic_block_bmap(mapping,block,ufs_getfrag_block); 590 } 591 592 const struct address_space_operations ufs_aops = { 593 .readpage = ufs_readpage, 594 .writepage = ufs_writepage, 595 .write_begin = ufs_write_begin, 596 .write_end = ufs_write_end, 597 .bmap = ufs_bmap 598 }; 599 600 static void ufs_set_inode_ops(struct inode *inode) 601 { 602 if (S_ISREG(inode->i_mode)) { 603 inode->i_op = &ufs_file_inode_operations; 604 inode->i_fop = &ufs_file_operations; 605 inode->i_mapping->a_ops = &ufs_aops; 606 } else if (S_ISDIR(inode->i_mode)) { 607 inode->i_op = &ufs_dir_inode_operations; 608 inode->i_fop = &ufs_dir_operations; 609 inode->i_mapping->a_ops = &ufs_aops; 610 } else if (S_ISLNK(inode->i_mode)) { 611 if (!inode->i_blocks) { 612 inode->i_op = &ufs_fast_symlink_inode_operations; 613 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; 614 } else { 615 inode->i_op = &ufs_symlink_inode_operations; 616 inode->i_mapping->a_ops = &ufs_aops; 617 } 618 } else 619 init_special_inode(inode, inode->i_mode, 620 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 621 } 622 623 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) 624 { 625 struct ufs_inode_info *ufsi = UFS_I(inode); 626 struct super_block *sb = inode->i_sb; 627 umode_t mode; 628 629 /* 630 * Copy data to the in-core inode. 631 */ 632 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 633 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 634 if (inode->i_nlink == 0) { 635 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 636 return -1; 637 } 638 639 /* 640 * Linux now has 32-bit uid and gid, so we can support EFT. 641 */ 642 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); 643 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 644 645 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 646 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 647 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 648 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 649 inode->i_mtime.tv_nsec = 0; 650 inode->i_atime.tv_nsec = 0; 651 inode->i_ctime.tv_nsec = 0; 652 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 653 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); 654 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 655 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 656 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 657 658 659 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 660 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, 661 sizeof(ufs_inode->ui_u2.ui_addr)); 662 } else { 663 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, 664 sizeof(ufs_inode->ui_u2.ui_symlink) - 1); 665 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; 666 } 667 return 0; 668 } 669 670 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) 671 { 672 struct ufs_inode_info *ufsi = UFS_I(inode); 673 struct super_block *sb = inode->i_sb; 674 umode_t mode; 675 676 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 677 /* 678 * Copy data to the in-core inode. 679 */ 680 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 681 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 682 if (inode->i_nlink == 0) { 683 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 684 return -1; 685 } 686 687 /* 688 * Linux now has 32-bit uid and gid, so we can support EFT. 689 */ 690 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); 691 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); 692 693 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 694 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); 695 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); 696 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); 697 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); 698 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); 699 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); 700 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 701 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); 702 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 703 /* 704 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 705 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 706 */ 707 708 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 709 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, 710 sizeof(ufs2_inode->ui_u2.ui_addr)); 711 } else { 712 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, 713 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); 714 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; 715 } 716 return 0; 717 } 718 719 struct inode *ufs_iget(struct super_block *sb, unsigned long ino) 720 { 721 struct ufs_inode_info *ufsi; 722 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 723 struct buffer_head * bh; 724 struct inode *inode; 725 int err; 726 727 UFSD("ENTER, ino %lu\n", ino); 728 729 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { 730 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", 731 ino); 732 return ERR_PTR(-EIO); 733 } 734 735 inode = iget_locked(sb, ino); 736 if (!inode) 737 return ERR_PTR(-ENOMEM); 738 if (!(inode->i_state & I_NEW)) 739 return inode; 740 741 ufsi = UFS_I(inode); 742 743 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 744 if (!bh) { 745 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", 746 inode->i_ino); 747 goto bad_inode; 748 } 749 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 750 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 751 752 err = ufs2_read_inode(inode, 753 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 754 } else { 755 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; 756 757 err = ufs1_read_inode(inode, 758 ufs_inode + ufs_inotofsbo(inode->i_ino)); 759 } 760 761 if (err) 762 goto bad_inode; 763 inode->i_version++; 764 ufsi->i_lastfrag = 765 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 766 ufsi->i_dir_start_lookup = 0; 767 ufsi->i_osync = 0; 768 769 ufs_set_inode_ops(inode); 770 771 brelse(bh); 772 773 UFSD("EXIT\n"); 774 unlock_new_inode(inode); 775 return inode; 776 777 bad_inode: 778 iget_failed(inode); 779 return ERR_PTR(-EIO); 780 } 781 782 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 783 { 784 struct super_block *sb = inode->i_sb; 785 struct ufs_inode_info *ufsi = UFS_I(inode); 786 787 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 788 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 789 790 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); 791 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); 792 793 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 794 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 795 ufs_inode->ui_atime.tv_usec = 0; 796 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); 797 ufs_inode->ui_ctime.tv_usec = 0; 798 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); 799 ufs_inode->ui_mtime.tv_usec = 0; 800 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 801 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 802 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 803 804 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { 805 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 806 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 807 } 808 809 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 810 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 811 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 812 } else if (inode->i_blocks) { 813 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, 814 sizeof(ufs_inode->ui_u2.ui_addr)); 815 } 816 else { 817 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 818 sizeof(ufs_inode->ui_u2.ui_symlink)); 819 } 820 821 if (!inode->i_nlink) 822 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 823 } 824 825 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) 826 { 827 struct super_block *sb = inode->i_sb; 828 struct ufs_inode_info *ufsi = UFS_I(inode); 829 830 UFSD("ENTER\n"); 831 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 832 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 833 834 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); 835 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); 836 837 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 838 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); 839 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); 840 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); 841 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); 842 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); 843 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); 844 845 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); 846 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 847 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 848 849 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 850 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 851 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; 852 } else if (inode->i_blocks) { 853 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, 854 sizeof(ufs_inode->ui_u2.ui_addr)); 855 } else { 856 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 857 sizeof(ufs_inode->ui_u2.ui_symlink)); 858 } 859 860 if (!inode->i_nlink) 861 memset (ufs_inode, 0, sizeof(struct ufs2_inode)); 862 UFSD("EXIT\n"); 863 } 864 865 static int ufs_update_inode(struct inode * inode, int do_sync) 866 { 867 struct super_block *sb = inode->i_sb; 868 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 869 struct buffer_head * bh; 870 871 UFSD("ENTER, ino %lu\n", inode->i_ino); 872 873 if (inode->i_ino < UFS_ROOTINO || 874 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 875 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 876 return -1; 877 } 878 879 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 880 if (!bh) { 881 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 882 return -1; 883 } 884 if (uspi->fs_magic == UFS2_MAGIC) { 885 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 886 887 ufs2_update_inode(inode, 888 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 889 } else { 890 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; 891 892 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 893 } 894 895 mark_buffer_dirty(bh); 896 if (do_sync) 897 sync_dirty_buffer(bh); 898 brelse (bh); 899 900 UFSD("EXIT\n"); 901 return 0; 902 } 903 904 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) 905 { 906 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 907 } 908 909 int ufs_sync_inode (struct inode *inode) 910 { 911 return ufs_update_inode (inode, 1); 912 } 913 914 void ufs_evict_inode(struct inode * inode) 915 { 916 int want_delete = 0; 917 918 if (!inode->i_nlink && !is_bad_inode(inode)) 919 want_delete = 1; 920 921 truncate_inode_pages_final(&inode->i_data); 922 if (want_delete) { 923 inode->i_size = 0; 924 if (inode->i_blocks) 925 ufs_truncate_blocks(inode); 926 } 927 928 invalidate_inode_buffers(inode); 929 clear_inode(inode); 930 931 if (want_delete) 932 ufs_free_inode(inode); 933 } 934 935 struct to_free { 936 struct inode *inode; 937 u64 to; 938 unsigned count; 939 }; 940 941 static inline void free_data(struct to_free *ctx, u64 from, unsigned count) 942 { 943 if (ctx->count && ctx->to != from) { 944 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); 945 ctx->count = 0; 946 } 947 ctx->count += count; 948 ctx->to = from + count; 949 } 950 951 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 952 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 953 954 static void ufs_trunc_direct(struct inode *inode) 955 { 956 struct ufs_inode_info *ufsi = UFS_I(inode); 957 struct super_block * sb; 958 struct ufs_sb_private_info * uspi; 959 void *p; 960 u64 frag1, frag2, frag3, frag4, block1, block2; 961 struct to_free ctx = {.inode = inode}; 962 unsigned i, tmp; 963 964 UFSD("ENTER: ino %lu\n", inode->i_ino); 965 966 sb = inode->i_sb; 967 uspi = UFS_SB(sb)->s_uspi; 968 969 frag1 = DIRECT_FRAGMENT; 970 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 971 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 972 frag3 = frag4 & ~uspi->s_fpbmask; 973 block1 = block2 = 0; 974 if (frag2 > frag3) { 975 frag2 = frag4; 976 frag3 = frag4 = 0; 977 } else if (frag2 < frag3) { 978 block1 = ufs_fragstoblks (frag2); 979 block2 = ufs_fragstoblks (frag3); 980 } 981 982 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," 983 " frag3 %llu, frag4 %llu\n", inode->i_ino, 984 (unsigned long long)frag1, (unsigned long long)frag2, 985 (unsigned long long)block1, (unsigned long long)block2, 986 (unsigned long long)frag3, (unsigned long long)frag4); 987 988 if (frag1 >= frag2) 989 goto next1; 990 991 /* 992 * Free first free fragments 993 */ 994 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); 995 tmp = ufs_data_ptr_to_cpu(sb, p); 996 if (!tmp ) 997 ufs_panic (sb, "ufs_trunc_direct", "internal error"); 998 frag2 -= frag1; 999 frag1 = ufs_fragnum (frag1); 1000 1001 ufs_free_fragments(inode, tmp + frag1, frag2); 1002 1003 next1: 1004 /* 1005 * Free whole blocks 1006 */ 1007 for (i = block1 ; i < block2; i++) { 1008 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1009 tmp = ufs_data_ptr_to_cpu(sb, p); 1010 if (!tmp) 1011 continue; 1012 write_seqlock(&ufsi->meta_lock); 1013 ufs_data_ptr_clear(uspi, p); 1014 write_sequnlock(&ufsi->meta_lock); 1015 1016 free_data(&ctx, tmp, uspi->s_fpb); 1017 } 1018 1019 free_data(&ctx, 0, 0); 1020 1021 if (frag3 >= frag4) 1022 goto next3; 1023 1024 /* 1025 * Free last free fragments 1026 */ 1027 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); 1028 tmp = ufs_data_ptr_to_cpu(sb, p); 1029 if (!tmp ) 1030 ufs_panic(sb, "ufs_truncate_direct", "internal error"); 1031 frag4 = ufs_fragnum (frag4); 1032 write_seqlock(&ufsi->meta_lock); 1033 ufs_data_ptr_clear(uspi, p); 1034 write_sequnlock(&ufsi->meta_lock); 1035 1036 ufs_free_fragments (inode, tmp, frag4); 1037 next3: 1038 1039 UFSD("EXIT: ino %lu\n", inode->i_ino); 1040 } 1041 1042 static void free_full_branch(struct inode *inode, u64 ind_block, int depth) 1043 { 1044 struct super_block *sb = inode->i_sb; 1045 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1046 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize); 1047 unsigned i; 1048 1049 if (!ubh) 1050 return; 1051 1052 if (--depth) { 1053 for (i = 0; i < uspi->s_apb; i++) { 1054 void *p = ubh_get_data_ptr(uspi, ubh, i); 1055 u64 block = ufs_data_ptr_to_cpu(sb, p); 1056 if (block) 1057 free_full_branch(inode, block, depth); 1058 } 1059 } else { 1060 struct to_free ctx = {.inode = inode}; 1061 1062 for (i = 0; i < uspi->s_apb; i++) { 1063 void *p = ubh_get_data_ptr(uspi, ubh, i); 1064 u64 block = ufs_data_ptr_to_cpu(sb, p); 1065 if (block) 1066 free_data(&ctx, block, uspi->s_fpb); 1067 } 1068 free_data(&ctx, 0, 0); 1069 } 1070 1071 ubh_bforget(ubh); 1072 ufs_free_blocks(inode, ind_block, uspi->s_fpb); 1073 } 1074 1075 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) 1076 { 1077 struct super_block *sb = inode->i_sb; 1078 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1079 unsigned i; 1080 1081 if (--depth) { 1082 for (i = from; i < uspi->s_apb ; i++) { 1083 void *p = ubh_get_data_ptr(uspi, ubh, i); 1084 u64 block = ufs_data_ptr_to_cpu(sb, p); 1085 if (block) { 1086 write_seqlock(&UFS_I(inode)->meta_lock); 1087 ufs_data_ptr_clear(uspi, p); 1088 write_sequnlock(&UFS_I(inode)->meta_lock); 1089 ubh_mark_buffer_dirty(ubh); 1090 free_full_branch(inode, block, depth); 1091 } 1092 } 1093 } else { 1094 struct to_free ctx = {.inode = inode}; 1095 1096 for (i = from; i < uspi->s_apb; i++) { 1097 void *p = ubh_get_data_ptr(uspi, ubh, i); 1098 u64 block = ufs_data_ptr_to_cpu(sb, p); 1099 if (block) { 1100 write_seqlock(&UFS_I(inode)->meta_lock); 1101 ufs_data_ptr_clear(uspi, p); 1102 write_sequnlock(&UFS_I(inode)->meta_lock); 1103 ubh_mark_buffer_dirty(ubh); 1104 free_data(&ctx, block, uspi->s_fpb); 1105 } 1106 } 1107 free_data(&ctx, 0, 0); 1108 } 1109 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) 1110 ubh_sync_block(ubh); 1111 ubh_brelse(ubh); 1112 } 1113 1114 static int ufs_alloc_lastblock(struct inode *inode, loff_t size) 1115 { 1116 int err = 0; 1117 struct super_block *sb = inode->i_sb; 1118 struct address_space *mapping = inode->i_mapping; 1119 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1120 unsigned i, end; 1121 sector_t lastfrag; 1122 struct page *lastpage; 1123 struct buffer_head *bh; 1124 u64 phys64; 1125 1126 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 1127 1128 if (!lastfrag) 1129 goto out; 1130 1131 lastfrag--; 1132 1133 lastpage = ufs_get_locked_page(mapping, lastfrag >> 1134 (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1135 if (IS_ERR(lastpage)) { 1136 err = -EIO; 1137 goto out; 1138 } 1139 1140 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1141 bh = page_buffers(lastpage); 1142 for (i = 0; i < end; ++i) 1143 bh = bh->b_this_page; 1144 1145 1146 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 1147 1148 if (unlikely(err)) 1149 goto out_unlock; 1150 1151 if (buffer_new(bh)) { 1152 clear_buffer_new(bh); 1153 unmap_underlying_metadata(bh->b_bdev, 1154 bh->b_blocknr); 1155 /* 1156 * we do not zeroize fragment, because of 1157 * if it maped to hole, it already contains zeroes 1158 */ 1159 set_buffer_uptodate(bh); 1160 mark_buffer_dirty(bh); 1161 set_page_dirty(lastpage); 1162 } 1163 1164 if (lastfrag >= UFS_IND_FRAGMENT) { 1165 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; 1166 phys64 = bh->b_blocknr + 1; 1167 for (i = 0; i < end; ++i) { 1168 bh = sb_getblk(sb, i + phys64); 1169 lock_buffer(bh); 1170 memset(bh->b_data, 0, sb->s_blocksize); 1171 set_buffer_uptodate(bh); 1172 mark_buffer_dirty(bh); 1173 unlock_buffer(bh); 1174 sync_dirty_buffer(bh); 1175 brelse(bh); 1176 } 1177 } 1178 out_unlock: 1179 ufs_put_locked_page(lastpage); 1180 out: 1181 return err; 1182 } 1183 1184 static void __ufs_truncate_blocks(struct inode *inode) 1185 { 1186 struct ufs_inode_info *ufsi = UFS_I(inode); 1187 struct super_block *sb = inode->i_sb; 1188 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1189 unsigned offsets[4]; 1190 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1191 int depth2; 1192 unsigned i; 1193 struct ufs_buffer_head *ubh[3]; 1194 void *p; 1195 u64 block; 1196 1197 if (!depth) 1198 return; 1199 1200 /* find the last non-zero in offsets[] */ 1201 for (depth2 = depth - 1; depth2; depth2--) 1202 if (offsets[depth2]) 1203 break; 1204 1205 mutex_lock(&ufsi->truncate_mutex); 1206 if (depth == 1) { 1207 ufs_trunc_direct(inode); 1208 offsets[0] = UFS_IND_BLOCK; 1209 } else { 1210 /* get the blocks that should be partially emptied */ 1211 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1212 for (i = 0; i < depth2; i++) { 1213 offsets[i]++; /* next branch is fully freed */ 1214 block = ufs_data_ptr_to_cpu(sb, p); 1215 if (!block) 1216 break; 1217 ubh[i] = ubh_bread(sb, block, uspi->s_bsize); 1218 if (!ubh[i]) { 1219 write_seqlock(&ufsi->meta_lock); 1220 ufs_data_ptr_clear(uspi, p); 1221 write_sequnlock(&ufsi->meta_lock); 1222 break; 1223 } 1224 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1225 } 1226 while (i--) 1227 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); 1228 } 1229 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) { 1230 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1231 block = ufs_data_ptr_to_cpu(sb, p); 1232 if (block) { 1233 write_seqlock(&ufsi->meta_lock); 1234 ufs_data_ptr_clear(uspi, p); 1235 write_sequnlock(&ufsi->meta_lock); 1236 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1237 } 1238 } 1239 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1240 mark_inode_dirty(inode); 1241 mutex_unlock(&ufsi->truncate_mutex); 1242 } 1243 1244 static int ufs_truncate(struct inode *inode, loff_t size) 1245 { 1246 int err = 0; 1247 1248 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", 1249 inode->i_ino, (unsigned long long)size, 1250 (unsigned long long)i_size_read(inode)); 1251 1252 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1253 S_ISLNK(inode->i_mode))) 1254 return -EINVAL; 1255 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1256 return -EPERM; 1257 1258 err = ufs_alloc_lastblock(inode, size); 1259 1260 if (err) 1261 goto out; 1262 1263 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); 1264 1265 truncate_setsize(inode, size); 1266 1267 __ufs_truncate_blocks(inode); 1268 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 1269 mark_inode_dirty(inode); 1270 out: 1271 UFSD("EXIT: err %d\n", err); 1272 return err; 1273 } 1274 1275 void ufs_truncate_blocks(struct inode *inode) 1276 { 1277 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1278 S_ISLNK(inode->i_mode))) 1279 return; 1280 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1281 return; 1282 __ufs_truncate_blocks(inode); 1283 } 1284 1285 int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1286 { 1287 struct inode *inode = d_inode(dentry); 1288 unsigned int ia_valid = attr->ia_valid; 1289 int error; 1290 1291 error = inode_change_ok(inode, attr); 1292 if (error) 1293 return error; 1294 1295 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 1296 error = ufs_truncate(inode, attr->ia_size); 1297 if (error) 1298 return error; 1299 } 1300 1301 setattr_copy(inode, attr); 1302 mark_inode_dirty(inode); 1303 return 0; 1304 } 1305 1306 const struct inode_operations ufs_file_inode_operations = { 1307 .setattr = ufs_setattr, 1308 }; 1309