1 /* 2 * linux/fs/ufs/inode.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 * 8 * from 9 * 10 * linux/fs/ext2/inode.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 24 * Big-endian to little-endian byte-swapping/bitmaps by 25 * David S. Miller (davem@caip.rutgers.edu), 1995 26 */ 27 28 #include <asm/uaccess.h> 29 30 #include <linux/errno.h> 31 #include <linux/fs.h> 32 #include <linux/time.h> 33 #include <linux/stat.h> 34 #include <linux/string.h> 35 #include <linux/mm.h> 36 #include <linux/buffer_head.h> 37 #include <linux/writeback.h> 38 39 #include "ufs_fs.h" 40 #include "ufs.h" 41 #include "swab.h" 42 #include "util.h" 43 44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) 45 { 46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 47 int ptrs = uspi->s_apb; 48 int ptrs_bits = uspi->s_apbshift; 49 const long direct_blocks = UFS_NDADDR, 50 indirect_blocks = ptrs, 51 double_blocks = (1 << (ptrs_bits * 2)); 52 int n = 0; 53 54 55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); 56 if (i_block < direct_blocks) { 57 offsets[n++] = i_block; 58 } else if ((i_block -= direct_blocks) < indirect_blocks) { 59 offsets[n++] = UFS_IND_BLOCK; 60 offsets[n++] = i_block; 61 } else if ((i_block -= indirect_blocks) < double_blocks) { 62 offsets[n++] = UFS_DIND_BLOCK; 63 offsets[n++] = i_block >> ptrs_bits; 64 offsets[n++] = i_block & (ptrs - 1); 65 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 66 offsets[n++] = UFS_TIND_BLOCK; 67 offsets[n++] = i_block >> (ptrs_bits * 2); 68 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 69 offsets[n++] = i_block & (ptrs - 1); 70 } else { 71 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 72 } 73 return n; 74 } 75 76 typedef struct { 77 void *p; 78 union { 79 __fs32 key32; 80 __fs64 key64; 81 }; 82 struct buffer_head *bh; 83 } Indirect; 84 85 static inline int grow_chain32(struct ufs_inode_info *ufsi, 86 struct buffer_head *bh, __fs32 *v, 87 Indirect *from, Indirect *to) 88 { 89 Indirect *p; 90 unsigned seq; 91 to->bh = bh; 92 do { 93 seq = read_seqbegin(&ufsi->meta_lock); 94 to->key32 = *(__fs32 *)(to->p = v); 95 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) 96 ; 97 } while (read_seqretry(&ufsi->meta_lock, seq)); 98 return (p > to); 99 } 100 101 static inline int grow_chain64(struct ufs_inode_info *ufsi, 102 struct buffer_head *bh, __fs64 *v, 103 Indirect *from, Indirect *to) 104 { 105 Indirect *p; 106 unsigned seq; 107 to->bh = bh; 108 do { 109 seq = read_seqbegin(&ufsi->meta_lock); 110 to->key64 = *(__fs64 *)(to->p = v); 111 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) 112 ; 113 } while (read_seqretry(&ufsi->meta_lock, seq)); 114 return (p > to); 115 } 116 117 /* 118 * Returns the location of the fragment from 119 * the beginning of the filesystem. 120 */ 121 122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) 123 { 124 struct ufs_inode_info *ufsi = UFS_I(inode); 125 struct super_block *sb = inode->i_sb; 126 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 127 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 128 int shift = uspi->s_apbshift-uspi->s_fpbshift; 129 Indirect chain[4], *q = chain; 130 unsigned *p; 131 unsigned flags = UFS_SB(sb)->s_flags; 132 u64 res = 0; 133 134 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", 135 uspi->s_fpbshift, uspi->s_apbmask, 136 (unsigned long long)mask); 137 138 if (depth == 0) 139 goto no_block; 140 141 again: 142 p = offsets; 143 144 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 145 goto ufs2; 146 147 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q)) 148 goto changed; 149 if (!q->key32) 150 goto no_block; 151 while (--depth) { 152 __fs32 *ptr; 153 struct buffer_head *bh; 154 unsigned n = *p++; 155 156 bh = sb_bread(sb, uspi->s_sbbase + 157 fs32_to_cpu(sb, q->key32) + (n>>shift)); 158 if (!bh) 159 goto no_block; 160 ptr = (__fs32 *)bh->b_data + (n & mask); 161 if (!grow_chain32(ufsi, bh, ptr, chain, ++q)) 162 goto changed; 163 if (!q->key32) 164 goto no_block; 165 } 166 res = fs32_to_cpu(sb, q->key32); 167 goto found; 168 169 ufs2: 170 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q)) 171 goto changed; 172 if (!q->key64) 173 goto no_block; 174 175 while (--depth) { 176 __fs64 *ptr; 177 struct buffer_head *bh; 178 unsigned n = *p++; 179 180 bh = sb_bread(sb, uspi->s_sbbase + 181 fs64_to_cpu(sb, q->key64) + (n>>shift)); 182 if (!bh) 183 goto no_block; 184 ptr = (__fs64 *)bh->b_data + (n & mask); 185 if (!grow_chain64(ufsi, bh, ptr, chain, ++q)) 186 goto changed; 187 if (!q->key64) 188 goto no_block; 189 } 190 res = fs64_to_cpu(sb, q->key64); 191 found: 192 res += uspi->s_sbbase; 193 no_block: 194 while (q > chain) { 195 brelse(q->bh); 196 q--; 197 } 198 return res; 199 200 changed: 201 while (q > chain) { 202 brelse(q->bh); 203 q--; 204 } 205 goto again; 206 } 207 208 /** 209 * ufs_inode_getfrag() - allocate new fragment(s) 210 * @inode: pointer to inode 211 * @fragment: number of `fragment' which hold pointer 212 * to new allocated fragment(s) 213 * @new_fragment: number of new allocated fragment(s) 214 * @required: how many fragment(s) we require 215 * @err: we set it if something wrong 216 * @phys: pointer to where we save physical number of new allocated fragments, 217 * NULL if we allocate not data(indirect blocks for example). 218 * @new: we set it if we allocate new block 219 * @locked_page: for ufs_new_fragments() 220 */ 221 static u64 222 ufs_inode_getfrag(struct inode *inode, u64 fragment, 223 sector_t new_fragment, unsigned int required, int *err, 224 long *phys, int *new, struct page *locked_page) 225 { 226 struct ufs_inode_info *ufsi = UFS_I(inode); 227 struct super_block *sb = inode->i_sb; 228 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 229 unsigned blockoff, lastblockoff; 230 u64 tmp, goal, lastfrag, block, lastblock; 231 void *p, *p2; 232 233 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, " 234 "metadata %d\n", inode->i_ino, (unsigned long long)fragment, 235 (unsigned long long)new_fragment, required, !phys); 236 237 /* TODO : to be done for write support 238 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 239 goto ufs2; 240 */ 241 242 block = ufs_fragstoblks (fragment); 243 blockoff = ufs_fragnum (fragment); 244 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 245 246 goal = 0; 247 248 tmp = ufs_data_ptr_to_cpu(sb, p); 249 250 lastfrag = ufsi->i_lastfrag; 251 if (tmp && fragment < lastfrag) 252 goto out; 253 254 lastblock = ufs_fragstoblks (lastfrag); 255 lastblockoff = ufs_fragnum (lastfrag); 256 /* 257 * We will extend file into new block beyond last allocated block 258 */ 259 if (lastblock < block) { 260 /* 261 * We must reallocate last allocated block 262 */ 263 if (lastblockoff) { 264 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock); 265 tmp = ufs_new_fragments(inode, p2, lastfrag, 266 ufs_data_ptr_to_cpu(sb, p2), 267 uspi->s_fpb - lastblockoff, 268 err, locked_page); 269 if (!tmp) 270 return 0; 271 lastfrag = ufsi->i_lastfrag; 272 } 273 tmp = ufs_data_ptr_to_cpu(sb, 274 ufs_get_direct_data_ptr(uspi, ufsi, 275 lastblock)); 276 if (tmp) 277 goal = tmp + uspi->s_fpb; 278 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 279 goal, required + blockoff, 280 err, 281 phys != NULL ? locked_page : NULL); 282 } else if (lastblock == block) { 283 /* 284 * We will extend last allocated block 285 */ 286 tmp = ufs_new_fragments(inode, p, fragment - 287 (blockoff - lastblockoff), 288 ufs_data_ptr_to_cpu(sb, p), 289 required + (blockoff - lastblockoff), 290 err, phys != NULL ? locked_page : NULL); 291 } else /* (lastblock > block) */ { 292 /* 293 * We will allocate new block before last allocated block 294 */ 295 if (block) { 296 tmp = ufs_data_ptr_to_cpu(sb, 297 ufs_get_direct_data_ptr(uspi, ufsi, block - 1)); 298 if (tmp) 299 goal = tmp + uspi->s_fpb; 300 } 301 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 302 goal, uspi->s_fpb, err, 303 phys != NULL ? locked_page : NULL); 304 } 305 if (!tmp) { 306 *err = -ENOSPC; 307 return 0; 308 } 309 310 if (phys) { 311 *err = 0; 312 *new = 1; 313 } 314 inode->i_ctime = CURRENT_TIME_SEC; 315 if (IS_SYNC(inode)) 316 ufs_sync_inode (inode); 317 mark_inode_dirty(inode); 318 out: 319 return tmp + uspi->s_sbbase; 320 321 /* This part : To be implemented .... 322 Required only for writing, not required for READ-ONLY. 323 ufs2: 324 325 u2_block = ufs_fragstoblks(fragment); 326 u2_blockoff = ufs_fragnum(fragment); 327 p = ufsi->i_u1.u2_i_data + block; 328 goal = 0; 329 330 repeat2: 331 tmp = fs32_to_cpu(sb, *p); 332 lastfrag = ufsi->i_lastfrag; 333 334 */ 335 } 336 337 /** 338 * ufs_inode_getblock() - allocate new block 339 * @inode: pointer to inode 340 * @bh: pointer to block which hold "pointer" to new allocated block 341 * @fragment: number of `fragment' which hold pointer 342 * to new allocated block 343 * @new_fragment: number of new allocated fragment 344 * (block will hold this fragment and also uspi->s_fpb-1) 345 * @err: see ufs_inode_getfrag() 346 * @phys: see ufs_inode_getfrag() 347 * @new: see ufs_inode_getfrag() 348 * @locked_page: see ufs_inode_getfrag() 349 */ 350 static u64 351 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, 352 u64 fragment, sector_t new_fragment, int *err, 353 long *phys, int *new, struct page *locked_page) 354 { 355 struct super_block *sb = inode->i_sb; 356 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 357 u64 tmp = 0, goal, block; 358 void *p; 359 360 block = ufs_fragstoblks (fragment); 361 362 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n", 363 inode->i_ino, (unsigned long long)fragment, 364 (unsigned long long)new_fragment, !phys); 365 366 if (!bh) 367 goto out; 368 if (!buffer_uptodate(bh)) { 369 ll_rw_block (READ, 1, &bh); 370 wait_on_buffer (bh); 371 if (!buffer_uptodate(bh)) 372 goto out; 373 } 374 if (uspi->fs_magic == UFS2_MAGIC) 375 p = (__fs64 *)bh->b_data + block; 376 else 377 p = (__fs32 *)bh->b_data + block; 378 379 tmp = ufs_data_ptr_to_cpu(sb, p); 380 if (tmp) 381 goto out; 382 383 if (block && (uspi->fs_magic == UFS2_MAGIC ? 384 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) : 385 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1])))) 386 goal = tmp + uspi->s_fpb; 387 else 388 goal = bh->b_blocknr + uspi->s_fpb; 389 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 390 uspi->s_fpb, err, locked_page); 391 if (!tmp) 392 goto out; 393 394 if (new) 395 *new = 1; 396 397 mark_buffer_dirty(bh); 398 if (IS_SYNC(inode)) 399 sync_dirty_buffer(bh); 400 inode->i_ctime = CURRENT_TIME_SEC; 401 mark_inode_dirty(inode); 402 out: 403 brelse (bh); 404 UFSD("EXIT\n"); 405 if (tmp) 406 tmp += uspi->s_sbbase; 407 return tmp; 408 } 409 410 /** 411 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and 412 * readpage, writepage and so on 413 */ 414 415 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 416 { 417 struct super_block * sb = inode->i_sb; 418 struct ufs_sb_info * sbi = UFS_SB(sb); 419 struct ufs_sb_private_info * uspi = sbi->s_uspi; 420 struct buffer_head * bh; 421 int ret, err, new; 422 unsigned offsets[4]; 423 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets); 424 unsigned long ptr,phys; 425 u64 phys64 = 0; 426 unsigned frag = fragment & uspi->s_fpbmask; 427 428 if (!create) { 429 phys64 = ufs_frag_map(inode, offsets, depth); 430 if (phys64) { 431 phys64 += frag; 432 map_bh(bh_result, sb, phys64); 433 } 434 return 0; 435 } 436 437 /* This code entered only while writing ....? */ 438 439 err = -EIO; 440 new = 0; 441 ret = 0; 442 bh = NULL; 443 444 mutex_lock(&UFS_I(inode)->truncate_mutex); 445 446 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); 447 if (!depth) 448 goto abort_too_big; 449 450 err = 0; 451 ptr = fragment; 452 453 if (depth == 1) { 454 phys64 = ufs_inode_getfrag(inode, ptr, fragment, 1, &err, &phys, 455 &new, bh_result->b_page); 456 if (phys64) { 457 phys64 += frag; 458 phys = phys64; 459 } 460 goto out; 461 } 462 ptr -= UFS_NDIR_FRAGMENT; 463 if (depth == 2) { 464 phys64 = ufs_inode_getfrag(inode, 465 UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift), 466 fragment, uspi->s_fpb, &err, NULL, NULL, 467 bh_result->b_page); 468 if (phys64) { 469 phys64 += (ptr >> uspi->s_apbshift) & uspi->s_fpbmask; 470 bh = sb_getblk(sb, phys64); 471 } else { 472 bh = NULL; 473 } 474 goto get_indirect; 475 } 476 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); 477 if (depth == 3) { 478 phys64 = ufs_inode_getfrag(inode, 479 UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift), 480 fragment, uspi->s_fpb, &err, NULL, NULL, 481 bh_result->b_page); 482 if (phys64) { 483 phys64 += (ptr >> uspi->s_2apbshift) & uspi->s_fpbmask; 484 bh = sb_getblk(sb, phys64); 485 } else { 486 bh = NULL; 487 } 488 goto get_double; 489 } 490 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); 491 phys64 = ufs_inode_getfrag(inode, 492 UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift), 493 fragment, uspi->s_fpb, &err, NULL, NULL, 494 bh_result->b_page); 495 if (phys64) { 496 phys64 += (ptr >> uspi->s_3apbshift) & uspi->s_fpbmask; 497 bh = sb_getblk(sb, phys64); 498 } else { 499 bh = NULL; 500 } 501 phys64 = ufs_inode_getblock(inode, bh, 502 (ptr >> uspi->s_2apbshift) & uspi->s_apbmask, 503 fragment, &err, NULL, NULL, NULL); 504 if (phys64) { 505 phys64 += (ptr >> uspi->s_2apbshift) & uspi->s_fpbmask, 506 bh = sb_getblk(sb, phys64); 507 } else { 508 bh = NULL; 509 } 510 get_double: 511 phys64 = ufs_inode_getblock(inode, bh, 512 (ptr >> uspi->s_apbshift) & uspi->s_apbmask, 513 fragment, &err, NULL, NULL, NULL); 514 if (phys64) { 515 phys64 += (ptr >> uspi->s_apbshift) & uspi->s_fpbmask, 516 bh = sb_getblk(sb, phys64); 517 } else { 518 bh = NULL; 519 } 520 get_indirect: 521 phys64 = ufs_inode_getblock(inode, bh, ptr & uspi->s_apbmask, fragment, 522 &err, &phys, &new, bh_result->b_page); 523 if (phys64) { 524 phys64 += frag; 525 phys = phys64; 526 } 527 out: 528 if (err) 529 goto abort; 530 if (new) 531 set_buffer_new(bh_result); 532 map_bh(bh_result, sb, phys); 533 abort: 534 mutex_unlock(&UFS_I(inode)->truncate_mutex); 535 536 return err; 537 538 abort_too_big: 539 ufs_warning(sb, "ufs_get_block", "block > big"); 540 goto abort; 541 } 542 543 static int ufs_writepage(struct page *page, struct writeback_control *wbc) 544 { 545 return block_write_full_page(page,ufs_getfrag_block,wbc); 546 } 547 548 static int ufs_readpage(struct file *file, struct page *page) 549 { 550 return block_read_full_page(page,ufs_getfrag_block); 551 } 552 553 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) 554 { 555 return __block_write_begin(page, pos, len, ufs_getfrag_block); 556 } 557 558 static void ufs_truncate_blocks(struct inode *); 559 560 static void ufs_write_failed(struct address_space *mapping, loff_t to) 561 { 562 struct inode *inode = mapping->host; 563 564 if (to > inode->i_size) { 565 truncate_pagecache(inode, inode->i_size); 566 ufs_truncate_blocks(inode); 567 } 568 } 569 570 static int ufs_write_begin(struct file *file, struct address_space *mapping, 571 loff_t pos, unsigned len, unsigned flags, 572 struct page **pagep, void **fsdata) 573 { 574 int ret; 575 576 ret = block_write_begin(mapping, pos, len, flags, pagep, 577 ufs_getfrag_block); 578 if (unlikely(ret)) 579 ufs_write_failed(mapping, pos + len); 580 581 return ret; 582 } 583 584 static int ufs_write_end(struct file *file, struct address_space *mapping, 585 loff_t pos, unsigned len, unsigned copied, 586 struct page *page, void *fsdata) 587 { 588 int ret; 589 590 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 591 if (ret < len) 592 ufs_write_failed(mapping, pos + len); 593 return ret; 594 } 595 596 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 597 { 598 return generic_block_bmap(mapping,block,ufs_getfrag_block); 599 } 600 601 const struct address_space_operations ufs_aops = { 602 .readpage = ufs_readpage, 603 .writepage = ufs_writepage, 604 .write_begin = ufs_write_begin, 605 .write_end = ufs_write_end, 606 .bmap = ufs_bmap 607 }; 608 609 static void ufs_set_inode_ops(struct inode *inode) 610 { 611 if (S_ISREG(inode->i_mode)) { 612 inode->i_op = &ufs_file_inode_operations; 613 inode->i_fop = &ufs_file_operations; 614 inode->i_mapping->a_ops = &ufs_aops; 615 } else if (S_ISDIR(inode->i_mode)) { 616 inode->i_op = &ufs_dir_inode_operations; 617 inode->i_fop = &ufs_dir_operations; 618 inode->i_mapping->a_ops = &ufs_aops; 619 } else if (S_ISLNK(inode->i_mode)) { 620 if (!inode->i_blocks) { 621 inode->i_op = &ufs_fast_symlink_inode_operations; 622 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; 623 } else { 624 inode->i_op = &ufs_symlink_inode_operations; 625 inode->i_mapping->a_ops = &ufs_aops; 626 } 627 } else 628 init_special_inode(inode, inode->i_mode, 629 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 630 } 631 632 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) 633 { 634 struct ufs_inode_info *ufsi = UFS_I(inode); 635 struct super_block *sb = inode->i_sb; 636 umode_t mode; 637 638 /* 639 * Copy data to the in-core inode. 640 */ 641 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 642 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 643 if (inode->i_nlink == 0) { 644 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 645 return -1; 646 } 647 648 /* 649 * Linux now has 32-bit uid and gid, so we can support EFT. 650 */ 651 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); 652 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 653 654 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 655 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 656 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 657 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 658 inode->i_mtime.tv_nsec = 0; 659 inode->i_atime.tv_nsec = 0; 660 inode->i_ctime.tv_nsec = 0; 661 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 662 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); 663 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 664 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 665 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 666 667 668 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 669 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, 670 sizeof(ufs_inode->ui_u2.ui_addr)); 671 } else { 672 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, 673 sizeof(ufs_inode->ui_u2.ui_symlink) - 1); 674 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; 675 } 676 return 0; 677 } 678 679 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) 680 { 681 struct ufs_inode_info *ufsi = UFS_I(inode); 682 struct super_block *sb = inode->i_sb; 683 umode_t mode; 684 685 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 686 /* 687 * Copy data to the in-core inode. 688 */ 689 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 690 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 691 if (inode->i_nlink == 0) { 692 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 693 return -1; 694 } 695 696 /* 697 * Linux now has 32-bit uid and gid, so we can support EFT. 698 */ 699 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); 700 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); 701 702 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 703 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); 704 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); 705 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); 706 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); 707 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); 708 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); 709 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 710 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); 711 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 712 /* 713 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 714 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 715 */ 716 717 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 718 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, 719 sizeof(ufs2_inode->ui_u2.ui_addr)); 720 } else { 721 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, 722 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); 723 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; 724 } 725 return 0; 726 } 727 728 struct inode *ufs_iget(struct super_block *sb, unsigned long ino) 729 { 730 struct ufs_inode_info *ufsi; 731 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 732 struct buffer_head * bh; 733 struct inode *inode; 734 int err; 735 736 UFSD("ENTER, ino %lu\n", ino); 737 738 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { 739 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", 740 ino); 741 return ERR_PTR(-EIO); 742 } 743 744 inode = iget_locked(sb, ino); 745 if (!inode) 746 return ERR_PTR(-ENOMEM); 747 if (!(inode->i_state & I_NEW)) 748 return inode; 749 750 ufsi = UFS_I(inode); 751 752 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 753 if (!bh) { 754 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", 755 inode->i_ino); 756 goto bad_inode; 757 } 758 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 759 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 760 761 err = ufs2_read_inode(inode, 762 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 763 } else { 764 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; 765 766 err = ufs1_read_inode(inode, 767 ufs_inode + ufs_inotofsbo(inode->i_ino)); 768 } 769 770 if (err) 771 goto bad_inode; 772 inode->i_version++; 773 ufsi->i_lastfrag = 774 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 775 ufsi->i_dir_start_lookup = 0; 776 ufsi->i_osync = 0; 777 778 ufs_set_inode_ops(inode); 779 780 brelse(bh); 781 782 UFSD("EXIT\n"); 783 unlock_new_inode(inode); 784 return inode; 785 786 bad_inode: 787 iget_failed(inode); 788 return ERR_PTR(-EIO); 789 } 790 791 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 792 { 793 struct super_block *sb = inode->i_sb; 794 struct ufs_inode_info *ufsi = UFS_I(inode); 795 796 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 797 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 798 799 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); 800 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); 801 802 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 803 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 804 ufs_inode->ui_atime.tv_usec = 0; 805 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); 806 ufs_inode->ui_ctime.tv_usec = 0; 807 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); 808 ufs_inode->ui_mtime.tv_usec = 0; 809 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 810 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 811 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 812 813 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { 814 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 815 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 816 } 817 818 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 819 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 820 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 821 } else if (inode->i_blocks) { 822 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, 823 sizeof(ufs_inode->ui_u2.ui_addr)); 824 } 825 else { 826 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 827 sizeof(ufs_inode->ui_u2.ui_symlink)); 828 } 829 830 if (!inode->i_nlink) 831 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 832 } 833 834 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) 835 { 836 struct super_block *sb = inode->i_sb; 837 struct ufs_inode_info *ufsi = UFS_I(inode); 838 839 UFSD("ENTER\n"); 840 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 841 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 842 843 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); 844 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); 845 846 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 847 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); 848 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); 849 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); 850 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); 851 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); 852 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); 853 854 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); 855 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 856 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 857 858 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 859 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 860 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; 861 } else if (inode->i_blocks) { 862 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, 863 sizeof(ufs_inode->ui_u2.ui_addr)); 864 } else { 865 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 866 sizeof(ufs_inode->ui_u2.ui_symlink)); 867 } 868 869 if (!inode->i_nlink) 870 memset (ufs_inode, 0, sizeof(struct ufs2_inode)); 871 UFSD("EXIT\n"); 872 } 873 874 static int ufs_update_inode(struct inode * inode, int do_sync) 875 { 876 struct super_block *sb = inode->i_sb; 877 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 878 struct buffer_head * bh; 879 880 UFSD("ENTER, ino %lu\n", inode->i_ino); 881 882 if (inode->i_ino < UFS_ROOTINO || 883 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 884 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 885 return -1; 886 } 887 888 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 889 if (!bh) { 890 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 891 return -1; 892 } 893 if (uspi->fs_magic == UFS2_MAGIC) { 894 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 895 896 ufs2_update_inode(inode, 897 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 898 } else { 899 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; 900 901 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 902 } 903 904 mark_buffer_dirty(bh); 905 if (do_sync) 906 sync_dirty_buffer(bh); 907 brelse (bh); 908 909 UFSD("EXIT\n"); 910 return 0; 911 } 912 913 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) 914 { 915 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 916 } 917 918 int ufs_sync_inode (struct inode *inode) 919 { 920 return ufs_update_inode (inode, 1); 921 } 922 923 void ufs_evict_inode(struct inode * inode) 924 { 925 int want_delete = 0; 926 927 if (!inode->i_nlink && !is_bad_inode(inode)) 928 want_delete = 1; 929 930 truncate_inode_pages_final(&inode->i_data); 931 if (want_delete) { 932 inode->i_size = 0; 933 if (inode->i_blocks) 934 ufs_truncate_blocks(inode); 935 } 936 937 invalidate_inode_buffers(inode); 938 clear_inode(inode); 939 940 if (want_delete) 941 ufs_free_inode(inode); 942 } 943 944 struct to_free { 945 struct inode *inode; 946 u64 to; 947 unsigned count; 948 }; 949 950 static inline void free_data(struct to_free *ctx, u64 from, unsigned count) 951 { 952 if (ctx->count && ctx->to != from) { 953 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); 954 ctx->count = 0; 955 } 956 ctx->count += count; 957 ctx->to = from + count; 958 } 959 960 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 961 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 962 963 static void ufs_trunc_direct(struct inode *inode) 964 { 965 struct ufs_inode_info *ufsi = UFS_I(inode); 966 struct super_block * sb; 967 struct ufs_sb_private_info * uspi; 968 void *p; 969 u64 frag1, frag2, frag3, frag4, block1, block2; 970 struct to_free ctx = {.inode = inode}; 971 unsigned i, tmp; 972 973 UFSD("ENTER: ino %lu\n", inode->i_ino); 974 975 sb = inode->i_sb; 976 uspi = UFS_SB(sb)->s_uspi; 977 978 frag1 = DIRECT_FRAGMENT; 979 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 980 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 981 frag3 = frag4 & ~uspi->s_fpbmask; 982 block1 = block2 = 0; 983 if (frag2 > frag3) { 984 frag2 = frag4; 985 frag3 = frag4 = 0; 986 } else if (frag2 < frag3) { 987 block1 = ufs_fragstoblks (frag2); 988 block2 = ufs_fragstoblks (frag3); 989 } 990 991 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," 992 " frag3 %llu, frag4 %llu\n", inode->i_ino, 993 (unsigned long long)frag1, (unsigned long long)frag2, 994 (unsigned long long)block1, (unsigned long long)block2, 995 (unsigned long long)frag3, (unsigned long long)frag4); 996 997 if (frag1 >= frag2) 998 goto next1; 999 1000 /* 1001 * Free first free fragments 1002 */ 1003 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); 1004 tmp = ufs_data_ptr_to_cpu(sb, p); 1005 if (!tmp ) 1006 ufs_panic (sb, "ufs_trunc_direct", "internal error"); 1007 frag2 -= frag1; 1008 frag1 = ufs_fragnum (frag1); 1009 1010 ufs_free_fragments(inode, tmp + frag1, frag2); 1011 1012 next1: 1013 /* 1014 * Free whole blocks 1015 */ 1016 for (i = block1 ; i < block2; i++) { 1017 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1018 tmp = ufs_data_ptr_to_cpu(sb, p); 1019 if (!tmp) 1020 continue; 1021 write_seqlock(&ufsi->meta_lock); 1022 ufs_data_ptr_clear(uspi, p); 1023 write_sequnlock(&ufsi->meta_lock); 1024 1025 free_data(&ctx, tmp, uspi->s_fpb); 1026 } 1027 1028 free_data(&ctx, 0, 0); 1029 1030 if (frag3 >= frag4) 1031 goto next3; 1032 1033 /* 1034 * Free last free fragments 1035 */ 1036 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); 1037 tmp = ufs_data_ptr_to_cpu(sb, p); 1038 if (!tmp ) 1039 ufs_panic(sb, "ufs_truncate_direct", "internal error"); 1040 frag4 = ufs_fragnum (frag4); 1041 write_seqlock(&ufsi->meta_lock); 1042 ufs_data_ptr_clear(uspi, p); 1043 write_sequnlock(&ufsi->meta_lock); 1044 1045 ufs_free_fragments (inode, tmp, frag4); 1046 next3: 1047 1048 UFSD("EXIT: ino %lu\n", inode->i_ino); 1049 } 1050 1051 static void free_full_branch(struct inode *inode, u64 ind_block, int depth) 1052 { 1053 struct super_block *sb = inode->i_sb; 1054 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1055 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize); 1056 unsigned i; 1057 1058 if (!ubh) 1059 return; 1060 1061 if (--depth) { 1062 for (i = 0; i < uspi->s_apb; i++) { 1063 void *p = ubh_get_data_ptr(uspi, ubh, i); 1064 u64 block = ufs_data_ptr_to_cpu(sb, p); 1065 if (block) 1066 free_full_branch(inode, block, depth); 1067 } 1068 } else { 1069 struct to_free ctx = {.inode = inode}; 1070 1071 for (i = 0; i < uspi->s_apb; i++) { 1072 void *p = ubh_get_data_ptr(uspi, ubh, i); 1073 u64 block = ufs_data_ptr_to_cpu(sb, p); 1074 if (block) 1075 free_data(&ctx, block, uspi->s_fpb); 1076 } 1077 free_data(&ctx, 0, 0); 1078 } 1079 1080 ubh_bforget(ubh); 1081 ufs_free_blocks(inode, ind_block, uspi->s_fpb); 1082 } 1083 1084 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) 1085 { 1086 struct super_block *sb = inode->i_sb; 1087 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1088 unsigned i; 1089 1090 if (--depth) { 1091 for (i = from; i < uspi->s_apb ; i++) { 1092 void *p = ubh_get_data_ptr(uspi, ubh, i); 1093 u64 block = ufs_data_ptr_to_cpu(sb, p); 1094 if (block) { 1095 write_seqlock(&UFS_I(inode)->meta_lock); 1096 ufs_data_ptr_clear(uspi, p); 1097 write_sequnlock(&UFS_I(inode)->meta_lock); 1098 ubh_mark_buffer_dirty(ubh); 1099 free_full_branch(inode, block, depth); 1100 } 1101 } 1102 } else { 1103 struct to_free ctx = {.inode = inode}; 1104 1105 for (i = from; i < uspi->s_apb; i++) { 1106 void *p = ubh_get_data_ptr(uspi, ubh, i); 1107 u64 block = ufs_data_ptr_to_cpu(sb, p); 1108 if (block) { 1109 write_seqlock(&UFS_I(inode)->meta_lock); 1110 ufs_data_ptr_clear(uspi, p); 1111 write_sequnlock(&UFS_I(inode)->meta_lock); 1112 ubh_mark_buffer_dirty(ubh); 1113 free_data(&ctx, block, uspi->s_fpb); 1114 } 1115 } 1116 free_data(&ctx, 0, 0); 1117 } 1118 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) 1119 ubh_sync_block(ubh); 1120 ubh_brelse(ubh); 1121 } 1122 1123 static int ufs_alloc_lastblock(struct inode *inode, loff_t size) 1124 { 1125 int err = 0; 1126 struct super_block *sb = inode->i_sb; 1127 struct address_space *mapping = inode->i_mapping; 1128 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1129 unsigned i, end; 1130 sector_t lastfrag; 1131 struct page *lastpage; 1132 struct buffer_head *bh; 1133 u64 phys64; 1134 1135 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 1136 1137 if (!lastfrag) 1138 goto out; 1139 1140 lastfrag--; 1141 1142 lastpage = ufs_get_locked_page(mapping, lastfrag >> 1143 (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1144 if (IS_ERR(lastpage)) { 1145 err = -EIO; 1146 goto out; 1147 } 1148 1149 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1150 bh = page_buffers(lastpage); 1151 for (i = 0; i < end; ++i) 1152 bh = bh->b_this_page; 1153 1154 1155 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 1156 1157 if (unlikely(err)) 1158 goto out_unlock; 1159 1160 if (buffer_new(bh)) { 1161 clear_buffer_new(bh); 1162 unmap_underlying_metadata(bh->b_bdev, 1163 bh->b_blocknr); 1164 /* 1165 * we do not zeroize fragment, because of 1166 * if it maped to hole, it already contains zeroes 1167 */ 1168 set_buffer_uptodate(bh); 1169 mark_buffer_dirty(bh); 1170 set_page_dirty(lastpage); 1171 } 1172 1173 if (lastfrag >= UFS_IND_FRAGMENT) { 1174 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; 1175 phys64 = bh->b_blocknr + 1; 1176 for (i = 0; i < end; ++i) { 1177 bh = sb_getblk(sb, i + phys64); 1178 lock_buffer(bh); 1179 memset(bh->b_data, 0, sb->s_blocksize); 1180 set_buffer_uptodate(bh); 1181 mark_buffer_dirty(bh); 1182 unlock_buffer(bh); 1183 sync_dirty_buffer(bh); 1184 brelse(bh); 1185 } 1186 } 1187 out_unlock: 1188 ufs_put_locked_page(lastpage); 1189 out: 1190 return err; 1191 } 1192 1193 static void __ufs_truncate_blocks(struct inode *inode) 1194 { 1195 struct ufs_inode_info *ufsi = UFS_I(inode); 1196 struct super_block *sb = inode->i_sb; 1197 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1198 unsigned offsets[4]; 1199 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1200 int depth2; 1201 unsigned i; 1202 struct ufs_buffer_head *ubh[3]; 1203 void *p; 1204 u64 block; 1205 1206 if (!depth) 1207 return; 1208 1209 /* find the last non-zero in offsets[] */ 1210 for (depth2 = depth - 1; depth2; depth2--) 1211 if (offsets[depth2]) 1212 break; 1213 1214 mutex_lock(&ufsi->truncate_mutex); 1215 if (depth == 1) { 1216 ufs_trunc_direct(inode); 1217 offsets[0] = UFS_IND_BLOCK; 1218 } else { 1219 /* get the blocks that should be partially emptied */ 1220 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1221 for (i = 0; i < depth2; i++) { 1222 offsets[i]++; /* next branch is fully freed */ 1223 block = ufs_data_ptr_to_cpu(sb, p); 1224 if (!block) 1225 break; 1226 ubh[i] = ubh_bread(sb, block, uspi->s_bsize); 1227 if (!ubh[i]) { 1228 write_seqlock(&ufsi->meta_lock); 1229 ufs_data_ptr_clear(uspi, p); 1230 write_sequnlock(&ufsi->meta_lock); 1231 break; 1232 } 1233 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1234 } 1235 while (i--) 1236 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); 1237 } 1238 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) { 1239 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1240 block = ufs_data_ptr_to_cpu(sb, p); 1241 if (block) { 1242 write_seqlock(&ufsi->meta_lock); 1243 ufs_data_ptr_clear(uspi, p); 1244 write_sequnlock(&ufsi->meta_lock); 1245 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1246 } 1247 } 1248 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1249 mark_inode_dirty(inode); 1250 mutex_unlock(&ufsi->truncate_mutex); 1251 } 1252 1253 static int ufs_truncate(struct inode *inode, loff_t size) 1254 { 1255 int err = 0; 1256 1257 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", 1258 inode->i_ino, (unsigned long long)size, 1259 (unsigned long long)i_size_read(inode)); 1260 1261 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1262 S_ISLNK(inode->i_mode))) 1263 return -EINVAL; 1264 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1265 return -EPERM; 1266 1267 err = ufs_alloc_lastblock(inode, size); 1268 1269 if (err) 1270 goto out; 1271 1272 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); 1273 1274 truncate_setsize(inode, size); 1275 1276 __ufs_truncate_blocks(inode); 1277 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 1278 mark_inode_dirty(inode); 1279 out: 1280 UFSD("EXIT: err %d\n", err); 1281 return err; 1282 } 1283 1284 void ufs_truncate_blocks(struct inode *inode) 1285 { 1286 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1287 S_ISLNK(inode->i_mode))) 1288 return; 1289 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1290 return; 1291 __ufs_truncate_blocks(inode); 1292 } 1293 1294 int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1295 { 1296 struct inode *inode = d_inode(dentry); 1297 unsigned int ia_valid = attr->ia_valid; 1298 int error; 1299 1300 error = inode_change_ok(inode, attr); 1301 if (error) 1302 return error; 1303 1304 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 1305 error = ufs_truncate(inode, attr->ia_size); 1306 if (error) 1307 return error; 1308 } 1309 1310 setattr_copy(inode, attr); 1311 mark_inode_dirty(inode); 1312 return 0; 1313 } 1314 1315 const struct inode_operations ufs_file_inode_operations = { 1316 .setattr = ufs_setattr, 1317 }; 1318