1 /* 2 * linux/fs/ufs/inode.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 * 8 * from 9 * 10 * linux/fs/ext2/inode.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 24 * Big-endian to little-endian byte-swapping/bitmaps by 25 * David S. Miller (davem@caip.rutgers.edu), 1995 26 */ 27 28 #include <asm/uaccess.h> 29 30 #include <linux/errno.h> 31 #include <linux/fs.h> 32 #include <linux/time.h> 33 #include <linux/stat.h> 34 #include <linux/string.h> 35 #include <linux/mm.h> 36 #include <linux/buffer_head.h> 37 #include <linux/writeback.h> 38 39 #include "ufs_fs.h" 40 #include "ufs.h" 41 #include "swab.h" 42 #include "util.h" 43 44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) 45 { 46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 47 int ptrs = uspi->s_apb; 48 int ptrs_bits = uspi->s_apbshift; 49 const long direct_blocks = UFS_NDADDR, 50 indirect_blocks = ptrs, 51 double_blocks = (1 << (ptrs_bits * 2)); 52 int n = 0; 53 54 55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); 56 if (i_block < direct_blocks) { 57 offsets[n++] = i_block; 58 } else if ((i_block -= direct_blocks) < indirect_blocks) { 59 offsets[n++] = UFS_IND_BLOCK; 60 offsets[n++] = i_block; 61 } else if ((i_block -= indirect_blocks) < double_blocks) { 62 offsets[n++] = UFS_DIND_BLOCK; 63 offsets[n++] = i_block >> ptrs_bits; 64 offsets[n++] = i_block & (ptrs - 1); 65 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 66 offsets[n++] = UFS_TIND_BLOCK; 67 offsets[n++] = i_block >> (ptrs_bits * 2); 68 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 69 offsets[n++] = i_block & (ptrs - 1); 70 } else { 71 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 72 } 73 return n; 74 } 75 76 typedef struct { 77 void *p; 78 union { 79 __fs32 key32; 80 __fs64 key64; 81 }; 82 struct buffer_head *bh; 83 } Indirect; 84 85 static inline int grow_chain32(struct ufs_inode_info *ufsi, 86 struct buffer_head *bh, __fs32 *v, 87 Indirect *from, Indirect *to) 88 { 89 Indirect *p; 90 unsigned seq; 91 to->bh = bh; 92 do { 93 seq = read_seqbegin(&ufsi->meta_lock); 94 to->key32 = *(__fs32 *)(to->p = v); 95 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) 96 ; 97 } while (read_seqretry(&ufsi->meta_lock, seq)); 98 return (p > to); 99 } 100 101 static inline int grow_chain64(struct ufs_inode_info *ufsi, 102 struct buffer_head *bh, __fs64 *v, 103 Indirect *from, Indirect *to) 104 { 105 Indirect *p; 106 unsigned seq; 107 to->bh = bh; 108 do { 109 seq = read_seqbegin(&ufsi->meta_lock); 110 to->key64 = *(__fs64 *)(to->p = v); 111 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) 112 ; 113 } while (read_seqretry(&ufsi->meta_lock, seq)); 114 return (p > to); 115 } 116 117 /* 118 * Returns the location of the fragment from 119 * the beginning of the filesystem. 120 */ 121 122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) 123 { 124 struct ufs_inode_info *ufsi = UFS_I(inode); 125 struct super_block *sb = inode->i_sb; 126 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 127 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 128 int shift = uspi->s_apbshift-uspi->s_fpbshift; 129 Indirect chain[4], *q = chain; 130 unsigned *p; 131 unsigned flags = UFS_SB(sb)->s_flags; 132 u64 res = 0; 133 134 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", 135 uspi->s_fpbshift, uspi->s_apbmask, 136 (unsigned long long)mask); 137 138 if (depth == 0) 139 goto no_block; 140 141 again: 142 p = offsets; 143 144 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 145 goto ufs2; 146 147 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q)) 148 goto changed; 149 if (!q->key32) 150 goto no_block; 151 while (--depth) { 152 __fs32 *ptr; 153 struct buffer_head *bh; 154 unsigned n = *p++; 155 156 bh = sb_bread(sb, uspi->s_sbbase + 157 fs32_to_cpu(sb, q->key32) + (n>>shift)); 158 if (!bh) 159 goto no_block; 160 ptr = (__fs32 *)bh->b_data + (n & mask); 161 if (!grow_chain32(ufsi, bh, ptr, chain, ++q)) 162 goto changed; 163 if (!q->key32) 164 goto no_block; 165 } 166 res = fs32_to_cpu(sb, q->key32); 167 goto found; 168 169 ufs2: 170 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q)) 171 goto changed; 172 if (!q->key64) 173 goto no_block; 174 175 while (--depth) { 176 __fs64 *ptr; 177 struct buffer_head *bh; 178 unsigned n = *p++; 179 180 bh = sb_bread(sb, uspi->s_sbbase + 181 fs64_to_cpu(sb, q->key64) + (n>>shift)); 182 if (!bh) 183 goto no_block; 184 ptr = (__fs64 *)bh->b_data + (n & mask); 185 if (!grow_chain64(ufsi, bh, ptr, chain, ++q)) 186 goto changed; 187 if (!q->key64) 188 goto no_block; 189 } 190 res = fs64_to_cpu(sb, q->key64); 191 found: 192 res += uspi->s_sbbase; 193 no_block: 194 while (q > chain) { 195 brelse(q->bh); 196 q--; 197 } 198 return res; 199 200 changed: 201 while (q > chain) { 202 brelse(q->bh); 203 q--; 204 } 205 goto again; 206 } 207 208 /** 209 * ufs_inode_getfrag() - allocate new fragment(s) 210 * @inode: pointer to inode 211 * @fragment: number of `fragment' which hold pointer 212 * to new allocated fragment(s) 213 * @new_fragment: number of new allocated fragment(s) 214 * @required: how many fragment(s) we require 215 * @err: we set it if something wrong 216 * @phys: pointer to where we save physical number of new allocated fragments, 217 * NULL if we allocate not data(indirect blocks for example). 218 * @new: we set it if we allocate new block 219 * @locked_page: for ufs_new_fragments() 220 */ 221 static struct buffer_head * 222 ufs_inode_getfrag(struct inode *inode, u64 fragment, 223 sector_t new_fragment, unsigned int required, int *err, 224 long *phys, int *new, struct page *locked_page) 225 { 226 struct ufs_inode_info *ufsi = UFS_I(inode); 227 struct super_block *sb = inode->i_sb; 228 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 229 unsigned blockoff, lastblockoff; 230 u64 tmp, goal, lastfrag, block, lastblock; 231 void *p, *p2; 232 233 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, " 234 "metadata %d\n", inode->i_ino, (unsigned long long)fragment, 235 (unsigned long long)new_fragment, required, !phys); 236 237 /* TODO : to be done for write support 238 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 239 goto ufs2; 240 */ 241 242 block = ufs_fragstoblks (fragment); 243 blockoff = ufs_fragnum (fragment); 244 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 245 246 goal = 0; 247 248 tmp = ufs_data_ptr_to_cpu(sb, p); 249 250 lastfrag = ufsi->i_lastfrag; 251 if (tmp && fragment < lastfrag) 252 goto out; 253 254 lastblock = ufs_fragstoblks (lastfrag); 255 lastblockoff = ufs_fragnum (lastfrag); 256 /* 257 * We will extend file into new block beyond last allocated block 258 */ 259 if (lastblock < block) { 260 /* 261 * We must reallocate last allocated block 262 */ 263 if (lastblockoff) { 264 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock); 265 tmp = ufs_new_fragments(inode, p2, lastfrag, 266 ufs_data_ptr_to_cpu(sb, p2), 267 uspi->s_fpb - lastblockoff, 268 err, locked_page); 269 if (!tmp) 270 return NULL; 271 lastfrag = ufsi->i_lastfrag; 272 } 273 tmp = ufs_data_ptr_to_cpu(sb, 274 ufs_get_direct_data_ptr(uspi, ufsi, 275 lastblock)); 276 if (tmp) 277 goal = tmp + uspi->s_fpb; 278 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 279 goal, required + blockoff, 280 err, 281 phys != NULL ? locked_page : NULL); 282 } else if (lastblock == block) { 283 /* 284 * We will extend last allocated block 285 */ 286 tmp = ufs_new_fragments(inode, p, fragment - 287 (blockoff - lastblockoff), 288 ufs_data_ptr_to_cpu(sb, p), 289 required + (blockoff - lastblockoff), 290 err, phys != NULL ? locked_page : NULL); 291 } else /* (lastblock > block) */ { 292 /* 293 * We will allocate new block before last allocated block 294 */ 295 if (block) { 296 tmp = ufs_data_ptr_to_cpu(sb, 297 ufs_get_direct_data_ptr(uspi, ufsi, block - 1)); 298 if (tmp) 299 goal = tmp + uspi->s_fpb; 300 } 301 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 302 goal, uspi->s_fpb, err, 303 phys != NULL ? locked_page : NULL); 304 } 305 if (!tmp) { 306 *err = -ENOSPC; 307 return NULL; 308 } 309 310 if (phys) { 311 *err = 0; 312 *new = 1; 313 } 314 inode->i_ctime = CURRENT_TIME_SEC; 315 if (IS_SYNC(inode)) 316 ufs_sync_inode (inode); 317 mark_inode_dirty(inode); 318 out: 319 tmp += uspi->s_sbbase + blockoff; 320 if (!phys) { 321 return sb_getblk(sb, tmp); 322 } else { 323 *phys = tmp; 324 return NULL; 325 } 326 327 /* This part : To be implemented .... 328 Required only for writing, not required for READ-ONLY. 329 ufs2: 330 331 u2_block = ufs_fragstoblks(fragment); 332 u2_blockoff = ufs_fragnum(fragment); 333 p = ufsi->i_u1.u2_i_data + block; 334 goal = 0; 335 336 repeat2: 337 tmp = fs32_to_cpu(sb, *p); 338 lastfrag = ufsi->i_lastfrag; 339 340 */ 341 } 342 343 /** 344 * ufs_inode_getblock() - allocate new block 345 * @inode: pointer to inode 346 * @bh: pointer to block which hold "pointer" to new allocated block 347 * @fragment: number of `fragment' which hold pointer 348 * to new allocated block 349 * @new_fragment: number of new allocated fragment 350 * (block will hold this fragment and also uspi->s_fpb-1) 351 * @err: see ufs_inode_getfrag() 352 * @phys: see ufs_inode_getfrag() 353 * @new: see ufs_inode_getfrag() 354 * @locked_page: see ufs_inode_getfrag() 355 */ 356 static struct buffer_head * 357 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, 358 u64 fragment, sector_t new_fragment, int *err, 359 long *phys, int *new, struct page *locked_page) 360 { 361 struct super_block *sb = inode->i_sb; 362 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 363 struct buffer_head * result; 364 unsigned blockoff; 365 u64 tmp = 0, goal, block; 366 void *p; 367 368 block = ufs_fragstoblks (fragment); 369 blockoff = ufs_fragnum (fragment); 370 371 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n", 372 inode->i_ino, (unsigned long long)fragment, 373 (unsigned long long)new_fragment, !phys); 374 375 result = NULL; 376 if (!bh) 377 goto out; 378 if (!buffer_uptodate(bh)) { 379 ll_rw_block (READ, 1, &bh); 380 wait_on_buffer (bh); 381 if (!buffer_uptodate(bh)) 382 goto out; 383 } 384 if (uspi->fs_magic == UFS2_MAGIC) 385 p = (__fs64 *)bh->b_data + block; 386 else 387 p = (__fs32 *)bh->b_data + block; 388 389 tmp = ufs_data_ptr_to_cpu(sb, p); 390 if (tmp) 391 goto out; 392 393 if (block && (uspi->fs_magic == UFS2_MAGIC ? 394 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) : 395 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1])))) 396 goal = tmp + uspi->s_fpb; 397 else 398 goal = bh->b_blocknr + uspi->s_fpb; 399 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 400 uspi->s_fpb, err, locked_page); 401 if (!tmp) 402 goto out; 403 404 if (new) 405 *new = 1; 406 407 mark_buffer_dirty(bh); 408 if (IS_SYNC(inode)) 409 sync_dirty_buffer(bh); 410 inode->i_ctime = CURRENT_TIME_SEC; 411 mark_inode_dirty(inode); 412 out: 413 brelse (bh); 414 if (tmp) { 415 tmp += uspi->s_sbbase + blockoff; 416 if (phys) { 417 *phys = tmp; 418 } else { 419 result = sb_getblk(sb, tmp); 420 } 421 } 422 UFSD("EXIT\n"); 423 return result; 424 } 425 426 /** 427 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and 428 * readpage, writepage and so on 429 */ 430 431 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 432 { 433 struct super_block * sb = inode->i_sb; 434 struct ufs_sb_info * sbi = UFS_SB(sb); 435 struct ufs_sb_private_info * uspi = sbi->s_uspi; 436 struct buffer_head * bh; 437 int ret, err, new; 438 unsigned offsets[4]; 439 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets); 440 unsigned long ptr,phys; 441 u64 phys64 = 0; 442 443 if (!create) { 444 phys64 = ufs_frag_map(inode, offsets, depth); 445 if (phys64) { 446 phys64 += fragment & uspi->s_fpbmask; 447 map_bh(bh_result, sb, phys64); 448 } 449 return 0; 450 } 451 452 /* This code entered only while writing ....? */ 453 454 err = -EIO; 455 new = 0; 456 ret = 0; 457 bh = NULL; 458 459 mutex_lock(&UFS_I(inode)->truncate_mutex); 460 461 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); 462 if (!depth) 463 goto abort_too_big; 464 465 err = 0; 466 ptr = fragment; 467 468 /* 469 * ok, these macros clean the logic up a bit and make 470 * it much more readable: 471 */ 472 #define GET_INODE_DATABLOCK(x) \ 473 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\ 474 bh_result->b_page) 475 #define GET_INODE_PTR(x) \ 476 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\ 477 bh_result->b_page) 478 #define GET_INDIRECT_DATABLOCK(x) \ 479 ufs_inode_getblock(inode, bh, x, fragment, \ 480 &err, &phys, &new, bh_result->b_page) 481 #define GET_INDIRECT_PTR(x) \ 482 ufs_inode_getblock(inode, bh, x, fragment, \ 483 &err, NULL, NULL, NULL) 484 485 if (depth == 1) { 486 bh = GET_INODE_DATABLOCK(ptr); 487 goto out; 488 } 489 ptr -= UFS_NDIR_FRAGMENT; 490 if (depth == 2) { 491 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); 492 goto get_indirect; 493 } 494 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); 495 if (depth == 3) { 496 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); 497 goto get_double; 498 } 499 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); 500 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); 501 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); 502 get_double: 503 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); 504 get_indirect: 505 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); 506 507 #undef GET_INODE_DATABLOCK 508 #undef GET_INODE_PTR 509 #undef GET_INDIRECT_DATABLOCK 510 #undef GET_INDIRECT_PTR 511 512 out: 513 if (err) 514 goto abort; 515 if (new) 516 set_buffer_new(bh_result); 517 map_bh(bh_result, sb, phys); 518 abort: 519 mutex_unlock(&UFS_I(inode)->truncate_mutex); 520 521 return err; 522 523 abort_too_big: 524 ufs_warning(sb, "ufs_get_block", "block > big"); 525 goto abort; 526 } 527 528 static int ufs_writepage(struct page *page, struct writeback_control *wbc) 529 { 530 return block_write_full_page(page,ufs_getfrag_block,wbc); 531 } 532 533 static int ufs_readpage(struct file *file, struct page *page) 534 { 535 return block_read_full_page(page,ufs_getfrag_block); 536 } 537 538 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) 539 { 540 return __block_write_begin(page, pos, len, ufs_getfrag_block); 541 } 542 543 static void ufs_truncate_blocks(struct inode *); 544 545 static void ufs_write_failed(struct address_space *mapping, loff_t to) 546 { 547 struct inode *inode = mapping->host; 548 549 if (to > inode->i_size) { 550 truncate_pagecache(inode, inode->i_size); 551 ufs_truncate_blocks(inode); 552 } 553 } 554 555 static int ufs_write_begin(struct file *file, struct address_space *mapping, 556 loff_t pos, unsigned len, unsigned flags, 557 struct page **pagep, void **fsdata) 558 { 559 int ret; 560 561 ret = block_write_begin(mapping, pos, len, flags, pagep, 562 ufs_getfrag_block); 563 if (unlikely(ret)) 564 ufs_write_failed(mapping, pos + len); 565 566 return ret; 567 } 568 569 static int ufs_write_end(struct file *file, struct address_space *mapping, 570 loff_t pos, unsigned len, unsigned copied, 571 struct page *page, void *fsdata) 572 { 573 int ret; 574 575 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 576 if (ret < len) 577 ufs_write_failed(mapping, pos + len); 578 return ret; 579 } 580 581 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 582 { 583 return generic_block_bmap(mapping,block,ufs_getfrag_block); 584 } 585 586 const struct address_space_operations ufs_aops = { 587 .readpage = ufs_readpage, 588 .writepage = ufs_writepage, 589 .write_begin = ufs_write_begin, 590 .write_end = ufs_write_end, 591 .bmap = ufs_bmap 592 }; 593 594 static void ufs_set_inode_ops(struct inode *inode) 595 { 596 if (S_ISREG(inode->i_mode)) { 597 inode->i_op = &ufs_file_inode_operations; 598 inode->i_fop = &ufs_file_operations; 599 inode->i_mapping->a_ops = &ufs_aops; 600 } else if (S_ISDIR(inode->i_mode)) { 601 inode->i_op = &ufs_dir_inode_operations; 602 inode->i_fop = &ufs_dir_operations; 603 inode->i_mapping->a_ops = &ufs_aops; 604 } else if (S_ISLNK(inode->i_mode)) { 605 if (!inode->i_blocks) { 606 inode->i_op = &ufs_fast_symlink_inode_operations; 607 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; 608 } else { 609 inode->i_op = &ufs_symlink_inode_operations; 610 inode->i_mapping->a_ops = &ufs_aops; 611 } 612 } else 613 init_special_inode(inode, inode->i_mode, 614 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 615 } 616 617 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) 618 { 619 struct ufs_inode_info *ufsi = UFS_I(inode); 620 struct super_block *sb = inode->i_sb; 621 umode_t mode; 622 623 /* 624 * Copy data to the in-core inode. 625 */ 626 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 627 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 628 if (inode->i_nlink == 0) { 629 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 630 return -1; 631 } 632 633 /* 634 * Linux now has 32-bit uid and gid, so we can support EFT. 635 */ 636 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); 637 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 638 639 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 640 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 641 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 642 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 643 inode->i_mtime.tv_nsec = 0; 644 inode->i_atime.tv_nsec = 0; 645 inode->i_ctime.tv_nsec = 0; 646 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 647 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); 648 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 649 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 650 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 651 652 653 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 654 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, 655 sizeof(ufs_inode->ui_u2.ui_addr)); 656 } else { 657 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, 658 sizeof(ufs_inode->ui_u2.ui_symlink) - 1); 659 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; 660 } 661 return 0; 662 } 663 664 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) 665 { 666 struct ufs_inode_info *ufsi = UFS_I(inode); 667 struct super_block *sb = inode->i_sb; 668 umode_t mode; 669 670 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 671 /* 672 * Copy data to the in-core inode. 673 */ 674 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 675 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 676 if (inode->i_nlink == 0) { 677 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 678 return -1; 679 } 680 681 /* 682 * Linux now has 32-bit uid and gid, so we can support EFT. 683 */ 684 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); 685 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); 686 687 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 688 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); 689 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); 690 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); 691 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); 692 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); 693 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); 694 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 695 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); 696 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 697 /* 698 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 699 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 700 */ 701 702 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 703 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, 704 sizeof(ufs2_inode->ui_u2.ui_addr)); 705 } else { 706 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, 707 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); 708 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; 709 } 710 return 0; 711 } 712 713 struct inode *ufs_iget(struct super_block *sb, unsigned long ino) 714 { 715 struct ufs_inode_info *ufsi; 716 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 717 struct buffer_head * bh; 718 struct inode *inode; 719 int err; 720 721 UFSD("ENTER, ino %lu\n", ino); 722 723 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { 724 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", 725 ino); 726 return ERR_PTR(-EIO); 727 } 728 729 inode = iget_locked(sb, ino); 730 if (!inode) 731 return ERR_PTR(-ENOMEM); 732 if (!(inode->i_state & I_NEW)) 733 return inode; 734 735 ufsi = UFS_I(inode); 736 737 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 738 if (!bh) { 739 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", 740 inode->i_ino); 741 goto bad_inode; 742 } 743 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 744 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 745 746 err = ufs2_read_inode(inode, 747 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 748 } else { 749 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; 750 751 err = ufs1_read_inode(inode, 752 ufs_inode + ufs_inotofsbo(inode->i_ino)); 753 } 754 755 if (err) 756 goto bad_inode; 757 inode->i_version++; 758 ufsi->i_lastfrag = 759 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 760 ufsi->i_dir_start_lookup = 0; 761 ufsi->i_osync = 0; 762 763 ufs_set_inode_ops(inode); 764 765 brelse(bh); 766 767 UFSD("EXIT\n"); 768 unlock_new_inode(inode); 769 return inode; 770 771 bad_inode: 772 iget_failed(inode); 773 return ERR_PTR(-EIO); 774 } 775 776 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 777 { 778 struct super_block *sb = inode->i_sb; 779 struct ufs_inode_info *ufsi = UFS_I(inode); 780 781 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 782 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 783 784 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); 785 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); 786 787 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 788 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 789 ufs_inode->ui_atime.tv_usec = 0; 790 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); 791 ufs_inode->ui_ctime.tv_usec = 0; 792 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); 793 ufs_inode->ui_mtime.tv_usec = 0; 794 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 795 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 796 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 797 798 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { 799 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 800 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 801 } 802 803 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 804 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 805 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 806 } else if (inode->i_blocks) { 807 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, 808 sizeof(ufs_inode->ui_u2.ui_addr)); 809 } 810 else { 811 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 812 sizeof(ufs_inode->ui_u2.ui_symlink)); 813 } 814 815 if (!inode->i_nlink) 816 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 817 } 818 819 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) 820 { 821 struct super_block *sb = inode->i_sb; 822 struct ufs_inode_info *ufsi = UFS_I(inode); 823 824 UFSD("ENTER\n"); 825 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 826 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 827 828 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); 829 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); 830 831 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 832 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); 833 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); 834 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); 835 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); 836 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); 837 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); 838 839 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); 840 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 841 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 842 843 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 844 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 845 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; 846 } else if (inode->i_blocks) { 847 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, 848 sizeof(ufs_inode->ui_u2.ui_addr)); 849 } else { 850 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 851 sizeof(ufs_inode->ui_u2.ui_symlink)); 852 } 853 854 if (!inode->i_nlink) 855 memset (ufs_inode, 0, sizeof(struct ufs2_inode)); 856 UFSD("EXIT\n"); 857 } 858 859 static int ufs_update_inode(struct inode * inode, int do_sync) 860 { 861 struct super_block *sb = inode->i_sb; 862 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 863 struct buffer_head * bh; 864 865 UFSD("ENTER, ino %lu\n", inode->i_ino); 866 867 if (inode->i_ino < UFS_ROOTINO || 868 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 869 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 870 return -1; 871 } 872 873 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 874 if (!bh) { 875 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 876 return -1; 877 } 878 if (uspi->fs_magic == UFS2_MAGIC) { 879 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 880 881 ufs2_update_inode(inode, 882 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 883 } else { 884 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; 885 886 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 887 } 888 889 mark_buffer_dirty(bh); 890 if (do_sync) 891 sync_dirty_buffer(bh); 892 brelse (bh); 893 894 UFSD("EXIT\n"); 895 return 0; 896 } 897 898 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) 899 { 900 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 901 } 902 903 int ufs_sync_inode (struct inode *inode) 904 { 905 return ufs_update_inode (inode, 1); 906 } 907 908 void ufs_evict_inode(struct inode * inode) 909 { 910 int want_delete = 0; 911 912 if (!inode->i_nlink && !is_bad_inode(inode)) 913 want_delete = 1; 914 915 truncate_inode_pages_final(&inode->i_data); 916 if (want_delete) { 917 inode->i_size = 0; 918 if (inode->i_blocks) 919 ufs_truncate_blocks(inode); 920 } 921 922 invalidate_inode_buffers(inode); 923 clear_inode(inode); 924 925 if (want_delete) 926 ufs_free_inode(inode); 927 } 928 929 struct to_free { 930 struct inode *inode; 931 u64 to; 932 unsigned count; 933 }; 934 935 static inline void free_data(struct to_free *ctx, u64 from, unsigned count) 936 { 937 if (ctx->count && ctx->to != from) { 938 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); 939 ctx->count = 0; 940 } 941 ctx->count += count; 942 ctx->to = from + count; 943 } 944 945 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 946 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 947 948 static void ufs_trunc_direct(struct inode *inode) 949 { 950 struct ufs_inode_info *ufsi = UFS_I(inode); 951 struct super_block * sb; 952 struct ufs_sb_private_info * uspi; 953 void *p; 954 u64 frag1, frag2, frag3, frag4, block1, block2; 955 struct to_free ctx = {.inode = inode}; 956 unsigned i, tmp; 957 958 UFSD("ENTER: ino %lu\n", inode->i_ino); 959 960 sb = inode->i_sb; 961 uspi = UFS_SB(sb)->s_uspi; 962 963 frag1 = DIRECT_FRAGMENT; 964 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 965 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 966 frag3 = frag4 & ~uspi->s_fpbmask; 967 block1 = block2 = 0; 968 if (frag2 > frag3) { 969 frag2 = frag4; 970 frag3 = frag4 = 0; 971 } else if (frag2 < frag3) { 972 block1 = ufs_fragstoblks (frag2); 973 block2 = ufs_fragstoblks (frag3); 974 } 975 976 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," 977 " frag3 %llu, frag4 %llu\n", inode->i_ino, 978 (unsigned long long)frag1, (unsigned long long)frag2, 979 (unsigned long long)block1, (unsigned long long)block2, 980 (unsigned long long)frag3, (unsigned long long)frag4); 981 982 if (frag1 >= frag2) 983 goto next1; 984 985 /* 986 * Free first free fragments 987 */ 988 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); 989 tmp = ufs_data_ptr_to_cpu(sb, p); 990 if (!tmp ) 991 ufs_panic (sb, "ufs_trunc_direct", "internal error"); 992 frag2 -= frag1; 993 frag1 = ufs_fragnum (frag1); 994 995 ufs_free_fragments(inode, tmp + frag1, frag2); 996 997 next1: 998 /* 999 * Free whole blocks 1000 */ 1001 for (i = block1 ; i < block2; i++) { 1002 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1003 tmp = ufs_data_ptr_to_cpu(sb, p); 1004 if (!tmp) 1005 continue; 1006 write_seqlock(&ufsi->meta_lock); 1007 ufs_data_ptr_clear(uspi, p); 1008 write_sequnlock(&ufsi->meta_lock); 1009 1010 free_data(&ctx, tmp, uspi->s_fpb); 1011 } 1012 1013 free_data(&ctx, 0, 0); 1014 1015 if (frag3 >= frag4) 1016 goto next3; 1017 1018 /* 1019 * Free last free fragments 1020 */ 1021 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); 1022 tmp = ufs_data_ptr_to_cpu(sb, p); 1023 if (!tmp ) 1024 ufs_panic(sb, "ufs_truncate_direct", "internal error"); 1025 frag4 = ufs_fragnum (frag4); 1026 write_seqlock(&ufsi->meta_lock); 1027 ufs_data_ptr_clear(uspi, p); 1028 write_sequnlock(&ufsi->meta_lock); 1029 1030 ufs_free_fragments (inode, tmp, frag4); 1031 next3: 1032 1033 UFSD("EXIT: ino %lu\n", inode->i_ino); 1034 } 1035 1036 static void free_full_branch(struct inode *inode, u64 ind_block, int depth) 1037 { 1038 struct super_block *sb = inode->i_sb; 1039 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1040 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize); 1041 unsigned i; 1042 1043 if (!ubh) 1044 return; 1045 1046 if (--depth) { 1047 for (i = 0; i < uspi->s_apb; i++) { 1048 void *p = ubh_get_data_ptr(uspi, ubh, i); 1049 u64 block = ufs_data_ptr_to_cpu(sb, p); 1050 if (block) 1051 free_full_branch(inode, block, depth); 1052 } 1053 } else { 1054 struct to_free ctx = {.inode = inode}; 1055 1056 for (i = 0; i < uspi->s_apb; i++) { 1057 void *p = ubh_get_data_ptr(uspi, ubh, i); 1058 u64 block = ufs_data_ptr_to_cpu(sb, p); 1059 if (block) 1060 free_data(&ctx, block, uspi->s_fpb); 1061 } 1062 free_data(&ctx, 0, 0); 1063 } 1064 1065 ubh_bforget(ubh); 1066 ufs_free_blocks(inode, ind_block, uspi->s_fpb); 1067 } 1068 1069 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) 1070 { 1071 struct super_block *sb = inode->i_sb; 1072 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1073 unsigned i; 1074 1075 if (--depth) { 1076 for (i = from; i < uspi->s_apb ; i++) { 1077 void *p = ubh_get_data_ptr(uspi, ubh, i); 1078 u64 block = ufs_data_ptr_to_cpu(sb, p); 1079 if (block) { 1080 write_seqlock(&UFS_I(inode)->meta_lock); 1081 ufs_data_ptr_clear(uspi, p); 1082 write_sequnlock(&UFS_I(inode)->meta_lock); 1083 ubh_mark_buffer_dirty(ubh); 1084 free_full_branch(inode, block, depth); 1085 } 1086 } 1087 } else { 1088 struct to_free ctx = {.inode = inode}; 1089 1090 for (i = from; i < uspi->s_apb; i++) { 1091 void *p = ubh_get_data_ptr(uspi, ubh, i); 1092 u64 block = ufs_data_ptr_to_cpu(sb, p); 1093 if (block) { 1094 write_seqlock(&UFS_I(inode)->meta_lock); 1095 ufs_data_ptr_clear(uspi, p); 1096 write_sequnlock(&UFS_I(inode)->meta_lock); 1097 ubh_mark_buffer_dirty(ubh); 1098 free_data(&ctx, block, uspi->s_fpb); 1099 } 1100 } 1101 free_data(&ctx, 0, 0); 1102 } 1103 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) 1104 ubh_sync_block(ubh); 1105 ubh_brelse(ubh); 1106 } 1107 1108 static int ufs_alloc_lastblock(struct inode *inode, loff_t size) 1109 { 1110 int err = 0; 1111 struct super_block *sb = inode->i_sb; 1112 struct address_space *mapping = inode->i_mapping; 1113 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1114 unsigned i, end; 1115 sector_t lastfrag; 1116 struct page *lastpage; 1117 struct buffer_head *bh; 1118 u64 phys64; 1119 1120 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 1121 1122 if (!lastfrag) 1123 goto out; 1124 1125 lastfrag--; 1126 1127 lastpage = ufs_get_locked_page(mapping, lastfrag >> 1128 (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1129 if (IS_ERR(lastpage)) { 1130 err = -EIO; 1131 goto out; 1132 } 1133 1134 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1135 bh = page_buffers(lastpage); 1136 for (i = 0; i < end; ++i) 1137 bh = bh->b_this_page; 1138 1139 1140 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 1141 1142 if (unlikely(err)) 1143 goto out_unlock; 1144 1145 if (buffer_new(bh)) { 1146 clear_buffer_new(bh); 1147 unmap_underlying_metadata(bh->b_bdev, 1148 bh->b_blocknr); 1149 /* 1150 * we do not zeroize fragment, because of 1151 * if it maped to hole, it already contains zeroes 1152 */ 1153 set_buffer_uptodate(bh); 1154 mark_buffer_dirty(bh); 1155 set_page_dirty(lastpage); 1156 } 1157 1158 if (lastfrag >= UFS_IND_FRAGMENT) { 1159 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; 1160 phys64 = bh->b_blocknr + 1; 1161 for (i = 0; i < end; ++i) { 1162 bh = sb_getblk(sb, i + phys64); 1163 lock_buffer(bh); 1164 memset(bh->b_data, 0, sb->s_blocksize); 1165 set_buffer_uptodate(bh); 1166 mark_buffer_dirty(bh); 1167 unlock_buffer(bh); 1168 sync_dirty_buffer(bh); 1169 brelse(bh); 1170 } 1171 } 1172 out_unlock: 1173 ufs_put_locked_page(lastpage); 1174 out: 1175 return err; 1176 } 1177 1178 static void __ufs_truncate_blocks(struct inode *inode) 1179 { 1180 struct ufs_inode_info *ufsi = UFS_I(inode); 1181 struct super_block *sb = inode->i_sb; 1182 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1183 unsigned offsets[4]; 1184 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1185 int depth2; 1186 unsigned i; 1187 struct ufs_buffer_head *ubh[3]; 1188 void *p; 1189 u64 block; 1190 1191 if (!depth) 1192 return; 1193 1194 /* find the last non-zero in offsets[] */ 1195 for (depth2 = depth - 1; depth2; depth2--) 1196 if (offsets[depth2]) 1197 break; 1198 1199 mutex_lock(&ufsi->truncate_mutex); 1200 if (depth == 1) { 1201 ufs_trunc_direct(inode); 1202 offsets[0] = UFS_IND_BLOCK; 1203 } else { 1204 /* get the blocks that should be partially emptied */ 1205 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1206 for (i = 0; i < depth2; i++) { 1207 offsets[i]++; /* next branch is fully freed */ 1208 block = ufs_data_ptr_to_cpu(sb, p); 1209 if (!block) 1210 break; 1211 ubh[i] = ubh_bread(sb, block, uspi->s_bsize); 1212 if (!ubh[i]) { 1213 write_seqlock(&ufsi->meta_lock); 1214 ufs_data_ptr_clear(uspi, p); 1215 write_sequnlock(&ufsi->meta_lock); 1216 break; 1217 } 1218 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1219 } 1220 while (i--) 1221 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); 1222 } 1223 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) { 1224 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1225 block = ufs_data_ptr_to_cpu(sb, p); 1226 if (block) { 1227 write_seqlock(&ufsi->meta_lock); 1228 ufs_data_ptr_clear(uspi, p); 1229 write_sequnlock(&ufsi->meta_lock); 1230 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1231 } 1232 } 1233 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1234 mark_inode_dirty(inode); 1235 mutex_unlock(&ufsi->truncate_mutex); 1236 } 1237 1238 static int ufs_truncate(struct inode *inode, loff_t size) 1239 { 1240 int err = 0; 1241 1242 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", 1243 inode->i_ino, (unsigned long long)size, 1244 (unsigned long long)i_size_read(inode)); 1245 1246 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1247 S_ISLNK(inode->i_mode))) 1248 return -EINVAL; 1249 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1250 return -EPERM; 1251 1252 err = ufs_alloc_lastblock(inode, size); 1253 1254 if (err) 1255 goto out; 1256 1257 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); 1258 1259 truncate_setsize(inode, size); 1260 1261 __ufs_truncate_blocks(inode); 1262 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 1263 mark_inode_dirty(inode); 1264 out: 1265 UFSD("EXIT: err %d\n", err); 1266 return err; 1267 } 1268 1269 void ufs_truncate_blocks(struct inode *inode) 1270 { 1271 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1272 S_ISLNK(inode->i_mode))) 1273 return; 1274 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1275 return; 1276 __ufs_truncate_blocks(inode); 1277 } 1278 1279 int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1280 { 1281 struct inode *inode = d_inode(dentry); 1282 unsigned int ia_valid = attr->ia_valid; 1283 int error; 1284 1285 error = inode_change_ok(inode, attr); 1286 if (error) 1287 return error; 1288 1289 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 1290 error = ufs_truncate(inode, attr->ia_size); 1291 if (error) 1292 return error; 1293 } 1294 1295 setattr_copy(inode, attr); 1296 mark_inode_dirty(inode); 1297 return 0; 1298 } 1299 1300 const struct inode_operations ufs_file_inode_operations = { 1301 .setattr = ufs_setattr, 1302 }; 1303