1 /* 2 * linux/fs/ufs/inode.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 * 8 * from 9 * 10 * linux/fs/ext2/inode.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 24 * Big-endian to little-endian byte-swapping/bitmaps by 25 * David S. Miller (davem@caip.rutgers.edu), 1995 26 */ 27 28 #include <asm/uaccess.h> 29 30 #include <linux/errno.h> 31 #include <linux/fs.h> 32 #include <linux/time.h> 33 #include <linux/stat.h> 34 #include <linux/string.h> 35 #include <linux/mm.h> 36 #include <linux/buffer_head.h> 37 #include <linux/writeback.h> 38 39 #include "ufs_fs.h" 40 #include "ufs.h" 41 #include "swab.h" 42 #include "util.h" 43 44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) 45 { 46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 47 int ptrs = uspi->s_apb; 48 int ptrs_bits = uspi->s_apbshift; 49 const long direct_blocks = UFS_NDADDR, 50 indirect_blocks = ptrs, 51 double_blocks = (1 << (ptrs_bits * 2)); 52 int n = 0; 53 54 55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); 56 if (i_block < direct_blocks) { 57 offsets[n++] = i_block; 58 } else if ((i_block -= direct_blocks) < indirect_blocks) { 59 offsets[n++] = UFS_IND_BLOCK; 60 offsets[n++] = i_block; 61 } else if ((i_block -= indirect_blocks) < double_blocks) { 62 offsets[n++] = UFS_DIND_BLOCK; 63 offsets[n++] = i_block >> ptrs_bits; 64 offsets[n++] = i_block & (ptrs - 1); 65 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 66 offsets[n++] = UFS_TIND_BLOCK; 67 offsets[n++] = i_block >> (ptrs_bits * 2); 68 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 69 offsets[n++] = i_block & (ptrs - 1); 70 } else { 71 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 72 } 73 return n; 74 } 75 76 typedef struct { 77 void *p; 78 union { 79 __fs32 key32; 80 __fs64 key64; 81 }; 82 struct buffer_head *bh; 83 } Indirect; 84 85 static inline int grow_chain32(struct ufs_inode_info *ufsi, 86 struct buffer_head *bh, __fs32 *v, 87 Indirect *from, Indirect *to) 88 { 89 Indirect *p; 90 unsigned seq; 91 to->bh = bh; 92 do { 93 seq = read_seqbegin(&ufsi->meta_lock); 94 to->key32 = *(__fs32 *)(to->p = v); 95 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) 96 ; 97 } while (read_seqretry(&ufsi->meta_lock, seq)); 98 return (p > to); 99 } 100 101 static inline int grow_chain64(struct ufs_inode_info *ufsi, 102 struct buffer_head *bh, __fs64 *v, 103 Indirect *from, Indirect *to) 104 { 105 Indirect *p; 106 unsigned seq; 107 to->bh = bh; 108 do { 109 seq = read_seqbegin(&ufsi->meta_lock); 110 to->key64 = *(__fs64 *)(to->p = v); 111 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) 112 ; 113 } while (read_seqretry(&ufsi->meta_lock, seq)); 114 return (p > to); 115 } 116 117 /* 118 * Returns the location of the fragment from 119 * the beginning of the filesystem. 120 */ 121 122 static u64 ufs_frag_map(struct inode *inode, sector_t frag) 123 { 124 struct ufs_inode_info *ufsi = UFS_I(inode); 125 struct super_block *sb = inode->i_sb; 126 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 127 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 128 int shift = uspi->s_apbshift-uspi->s_fpbshift; 129 unsigned offsets[4], *p; 130 Indirect chain[4], *q = chain; 131 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); 132 unsigned flags = UFS_SB(sb)->s_flags; 133 u64 res = 0; 134 135 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth); 136 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", 137 uspi->s_fpbshift, uspi->s_apbmask, 138 (unsigned long long)mask); 139 140 if (depth == 0) 141 goto no_block; 142 143 again: 144 p = offsets; 145 146 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 147 goto ufs2; 148 149 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q)) 150 goto changed; 151 if (!q->key32) 152 goto no_block; 153 while (--depth) { 154 __fs32 *ptr; 155 struct buffer_head *bh; 156 unsigned n = *p++; 157 158 bh = sb_bread(sb, uspi->s_sbbase + 159 fs32_to_cpu(sb, q->key32) + (n>>shift)); 160 if (!bh) 161 goto no_block; 162 ptr = (__fs32 *)bh->b_data + (n & mask); 163 if (!grow_chain32(ufsi, bh, ptr, chain, ++q)) 164 goto changed; 165 if (!q->key32) 166 goto no_block; 167 } 168 res = fs32_to_cpu(sb, q->key32); 169 goto found; 170 171 ufs2: 172 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q)) 173 goto changed; 174 if (!q->key64) 175 goto no_block; 176 177 while (--depth) { 178 __fs64 *ptr; 179 struct buffer_head *bh; 180 unsigned n = *p++; 181 182 bh = sb_bread(sb, uspi->s_sbbase + 183 fs64_to_cpu(sb, q->key64) + (n>>shift)); 184 if (!bh) 185 goto no_block; 186 ptr = (__fs64 *)bh->b_data + (n & mask); 187 if (!grow_chain64(ufsi, bh, ptr, chain, ++q)) 188 goto changed; 189 if (!q->key64) 190 goto no_block; 191 } 192 res = fs64_to_cpu(sb, q->key64); 193 found: 194 res += uspi->s_sbbase + (frag & uspi->s_fpbmask); 195 no_block: 196 while (q > chain) { 197 brelse(q->bh); 198 q--; 199 } 200 return res; 201 202 changed: 203 while (q > chain) { 204 brelse(q->bh); 205 q--; 206 } 207 goto again; 208 } 209 210 /** 211 * ufs_inode_getfrag() - allocate new fragment(s) 212 * @inode: pointer to inode 213 * @fragment: number of `fragment' which hold pointer 214 * to new allocated fragment(s) 215 * @new_fragment: number of new allocated fragment(s) 216 * @required: how many fragment(s) we require 217 * @err: we set it if something wrong 218 * @phys: pointer to where we save physical number of new allocated fragments, 219 * NULL if we allocate not data(indirect blocks for example). 220 * @new: we set it if we allocate new block 221 * @locked_page: for ufs_new_fragments() 222 */ 223 static struct buffer_head * 224 ufs_inode_getfrag(struct inode *inode, u64 fragment, 225 sector_t new_fragment, unsigned int required, int *err, 226 long *phys, int *new, struct page *locked_page) 227 { 228 struct ufs_inode_info *ufsi = UFS_I(inode); 229 struct super_block *sb = inode->i_sb; 230 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 231 struct buffer_head * result; 232 unsigned blockoff, lastblockoff; 233 u64 tmp, goal, lastfrag, block, lastblock; 234 void *p, *p2; 235 236 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, " 237 "metadata %d\n", inode->i_ino, (unsigned long long)fragment, 238 (unsigned long long)new_fragment, required, !phys); 239 240 /* TODO : to be done for write support 241 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 242 goto ufs2; 243 */ 244 245 block = ufs_fragstoblks (fragment); 246 blockoff = ufs_fragnum (fragment); 247 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 248 249 goal = 0; 250 251 tmp = ufs_data_ptr_to_cpu(sb, p); 252 253 lastfrag = ufsi->i_lastfrag; 254 if (tmp && fragment < lastfrag) { 255 if (!phys) { 256 return sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 257 } else { 258 *phys = uspi->s_sbbase + tmp + blockoff; 259 return NULL; 260 } 261 } 262 263 lastblock = ufs_fragstoblks (lastfrag); 264 lastblockoff = ufs_fragnum (lastfrag); 265 /* 266 * We will extend file into new block beyond last allocated block 267 */ 268 if (lastblock < block) { 269 /* 270 * We must reallocate last allocated block 271 */ 272 if (lastblockoff) { 273 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock); 274 tmp = ufs_new_fragments(inode, p2, lastfrag, 275 ufs_data_ptr_to_cpu(sb, p2), 276 uspi->s_fpb - lastblockoff, 277 err, locked_page); 278 if (!tmp) 279 return NULL; 280 lastfrag = ufsi->i_lastfrag; 281 } 282 tmp = ufs_data_ptr_to_cpu(sb, 283 ufs_get_direct_data_ptr(uspi, ufsi, 284 lastblock)); 285 if (tmp) 286 goal = tmp + uspi->s_fpb; 287 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 288 goal, required + blockoff, 289 err, 290 phys != NULL ? locked_page : NULL); 291 } else if (lastblock == block) { 292 /* 293 * We will extend last allocated block 294 */ 295 tmp = ufs_new_fragments(inode, p, fragment - 296 (blockoff - lastblockoff), 297 ufs_data_ptr_to_cpu(sb, p), 298 required + (blockoff - lastblockoff), 299 err, phys != NULL ? locked_page : NULL); 300 } else /* (lastblock > block) */ { 301 /* 302 * We will allocate new block before last allocated block 303 */ 304 if (block) { 305 tmp = ufs_data_ptr_to_cpu(sb, 306 ufs_get_direct_data_ptr(uspi, ufsi, block - 1)); 307 if (tmp) 308 goal = tmp + uspi->s_fpb; 309 } 310 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 311 goal, uspi->s_fpb, err, 312 phys != NULL ? locked_page : NULL); 313 } 314 if (!tmp) { 315 *err = -ENOSPC; 316 return NULL; 317 } 318 319 if (!phys) { 320 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 321 } else { 322 *phys = uspi->s_sbbase + tmp + blockoff; 323 result = NULL; 324 *err = 0; 325 *new = 1; 326 } 327 328 inode->i_ctime = CURRENT_TIME_SEC; 329 if (IS_SYNC(inode)) 330 ufs_sync_inode (inode); 331 mark_inode_dirty(inode); 332 return result; 333 334 /* This part : To be implemented .... 335 Required only for writing, not required for READ-ONLY. 336 ufs2: 337 338 u2_block = ufs_fragstoblks(fragment); 339 u2_blockoff = ufs_fragnum(fragment); 340 p = ufsi->i_u1.u2_i_data + block; 341 goal = 0; 342 343 repeat2: 344 tmp = fs32_to_cpu(sb, *p); 345 lastfrag = ufsi->i_lastfrag; 346 347 */ 348 } 349 350 /** 351 * ufs_inode_getblock() - allocate new block 352 * @inode: pointer to inode 353 * @bh: pointer to block which hold "pointer" to new allocated block 354 * @fragment: number of `fragment' which hold pointer 355 * to new allocated block 356 * @new_fragment: number of new allocated fragment 357 * (block will hold this fragment and also uspi->s_fpb-1) 358 * @err: see ufs_inode_getfrag() 359 * @phys: see ufs_inode_getfrag() 360 * @new: see ufs_inode_getfrag() 361 * @locked_page: see ufs_inode_getfrag() 362 */ 363 static struct buffer_head * 364 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, 365 u64 fragment, sector_t new_fragment, int *err, 366 long *phys, int *new, struct page *locked_page) 367 { 368 struct super_block *sb = inode->i_sb; 369 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 370 struct buffer_head * result; 371 unsigned blockoff; 372 u64 tmp, goal, block; 373 void *p; 374 375 block = ufs_fragstoblks (fragment); 376 blockoff = ufs_fragnum (fragment); 377 378 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n", 379 inode->i_ino, (unsigned long long)fragment, 380 (unsigned long long)new_fragment, !phys); 381 382 result = NULL; 383 if (!bh) 384 goto out; 385 if (!buffer_uptodate(bh)) { 386 ll_rw_block (READ, 1, &bh); 387 wait_on_buffer (bh); 388 if (!buffer_uptodate(bh)) 389 goto out; 390 } 391 if (uspi->fs_magic == UFS2_MAGIC) 392 p = (__fs64 *)bh->b_data + block; 393 else 394 p = (__fs32 *)bh->b_data + block; 395 396 tmp = ufs_data_ptr_to_cpu(sb, p); 397 if (tmp) { 398 if (!phys) 399 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 400 else 401 *phys = uspi->s_sbbase + tmp + blockoff; 402 goto out; 403 } 404 405 if (block && (uspi->fs_magic == UFS2_MAGIC ? 406 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) : 407 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1])))) 408 goal = tmp + uspi->s_fpb; 409 else 410 goal = bh->b_blocknr + uspi->s_fpb; 411 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 412 uspi->s_fpb, err, locked_page); 413 if (!tmp) 414 goto out; 415 416 if (!phys) { 417 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 418 } else { 419 *phys = uspi->s_sbbase + tmp + blockoff; 420 *new = 1; 421 } 422 423 mark_buffer_dirty(bh); 424 if (IS_SYNC(inode)) 425 sync_dirty_buffer(bh); 426 inode->i_ctime = CURRENT_TIME_SEC; 427 mark_inode_dirty(inode); 428 out: 429 brelse (bh); 430 UFSD("EXIT\n"); 431 return result; 432 } 433 434 /** 435 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and 436 * readpage, writepage and so on 437 */ 438 439 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 440 { 441 struct super_block * sb = inode->i_sb; 442 struct ufs_sb_info * sbi = UFS_SB(sb); 443 struct ufs_sb_private_info * uspi = sbi->s_uspi; 444 struct buffer_head * bh; 445 int ret, err, new; 446 unsigned long ptr,phys; 447 u64 phys64 = 0; 448 449 if (!create) { 450 phys64 = ufs_frag_map(inode, fragment); 451 UFSD("phys64 = %llu\n", (unsigned long long)phys64); 452 if (phys64) 453 map_bh(bh_result, sb, phys64); 454 return 0; 455 } 456 457 /* This code entered only while writing ....? */ 458 459 err = -EIO; 460 new = 0; 461 ret = 0; 462 bh = NULL; 463 464 mutex_lock(&UFS_I(inode)->truncate_mutex); 465 466 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); 467 if (fragment > 468 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) 469 << uspi->s_fpbshift)) 470 goto abort_too_big; 471 472 err = 0; 473 ptr = fragment; 474 475 /* 476 * ok, these macros clean the logic up a bit and make 477 * it much more readable: 478 */ 479 #define GET_INODE_DATABLOCK(x) \ 480 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\ 481 bh_result->b_page) 482 #define GET_INODE_PTR(x) \ 483 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\ 484 bh_result->b_page) 485 #define GET_INDIRECT_DATABLOCK(x) \ 486 ufs_inode_getblock(inode, bh, x, fragment, \ 487 &err, &phys, &new, bh_result->b_page) 488 #define GET_INDIRECT_PTR(x) \ 489 ufs_inode_getblock(inode, bh, x, fragment, \ 490 &err, NULL, NULL, NULL) 491 492 if (ptr < UFS_NDIR_FRAGMENT) { 493 bh = GET_INODE_DATABLOCK(ptr); 494 goto out; 495 } 496 ptr -= UFS_NDIR_FRAGMENT; 497 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { 498 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); 499 goto get_indirect; 500 } 501 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); 502 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { 503 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); 504 goto get_double; 505 } 506 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); 507 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); 508 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); 509 get_double: 510 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); 511 get_indirect: 512 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); 513 514 #undef GET_INODE_DATABLOCK 515 #undef GET_INODE_PTR 516 #undef GET_INDIRECT_DATABLOCK 517 #undef GET_INDIRECT_PTR 518 519 out: 520 if (err) 521 goto abort; 522 if (new) 523 set_buffer_new(bh_result); 524 map_bh(bh_result, sb, phys); 525 abort: 526 mutex_unlock(&UFS_I(inode)->truncate_mutex); 527 528 return err; 529 530 abort_too_big: 531 ufs_warning(sb, "ufs_get_block", "block > big"); 532 goto abort; 533 } 534 535 static int ufs_writepage(struct page *page, struct writeback_control *wbc) 536 { 537 return block_write_full_page(page,ufs_getfrag_block,wbc); 538 } 539 540 static int ufs_readpage(struct file *file, struct page *page) 541 { 542 return block_read_full_page(page,ufs_getfrag_block); 543 } 544 545 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) 546 { 547 return __block_write_begin(page, pos, len, ufs_getfrag_block); 548 } 549 550 static void ufs_truncate_blocks(struct inode *); 551 552 static void ufs_write_failed(struct address_space *mapping, loff_t to) 553 { 554 struct inode *inode = mapping->host; 555 556 if (to > inode->i_size) { 557 truncate_pagecache(inode, inode->i_size); 558 ufs_truncate_blocks(inode); 559 } 560 } 561 562 static int ufs_write_begin(struct file *file, struct address_space *mapping, 563 loff_t pos, unsigned len, unsigned flags, 564 struct page **pagep, void **fsdata) 565 { 566 int ret; 567 568 ret = block_write_begin(mapping, pos, len, flags, pagep, 569 ufs_getfrag_block); 570 if (unlikely(ret)) 571 ufs_write_failed(mapping, pos + len); 572 573 return ret; 574 } 575 576 static int ufs_write_end(struct file *file, struct address_space *mapping, 577 loff_t pos, unsigned len, unsigned copied, 578 struct page *page, void *fsdata) 579 { 580 int ret; 581 582 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 583 if (ret < len) 584 ufs_write_failed(mapping, pos + len); 585 return ret; 586 } 587 588 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 589 { 590 return generic_block_bmap(mapping,block,ufs_getfrag_block); 591 } 592 593 const struct address_space_operations ufs_aops = { 594 .readpage = ufs_readpage, 595 .writepage = ufs_writepage, 596 .write_begin = ufs_write_begin, 597 .write_end = ufs_write_end, 598 .bmap = ufs_bmap 599 }; 600 601 static void ufs_set_inode_ops(struct inode *inode) 602 { 603 if (S_ISREG(inode->i_mode)) { 604 inode->i_op = &ufs_file_inode_operations; 605 inode->i_fop = &ufs_file_operations; 606 inode->i_mapping->a_ops = &ufs_aops; 607 } else if (S_ISDIR(inode->i_mode)) { 608 inode->i_op = &ufs_dir_inode_operations; 609 inode->i_fop = &ufs_dir_operations; 610 inode->i_mapping->a_ops = &ufs_aops; 611 } else if (S_ISLNK(inode->i_mode)) { 612 if (!inode->i_blocks) { 613 inode->i_op = &ufs_fast_symlink_inode_operations; 614 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; 615 } else { 616 inode->i_op = &ufs_symlink_inode_operations; 617 inode->i_mapping->a_ops = &ufs_aops; 618 } 619 } else 620 init_special_inode(inode, inode->i_mode, 621 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 622 } 623 624 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) 625 { 626 struct ufs_inode_info *ufsi = UFS_I(inode); 627 struct super_block *sb = inode->i_sb; 628 umode_t mode; 629 630 /* 631 * Copy data to the in-core inode. 632 */ 633 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 634 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 635 if (inode->i_nlink == 0) { 636 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 637 return -1; 638 } 639 640 /* 641 * Linux now has 32-bit uid and gid, so we can support EFT. 642 */ 643 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); 644 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 645 646 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 647 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 648 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 649 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 650 inode->i_mtime.tv_nsec = 0; 651 inode->i_atime.tv_nsec = 0; 652 inode->i_ctime.tv_nsec = 0; 653 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 654 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); 655 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 656 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 657 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 658 659 660 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 661 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, 662 sizeof(ufs_inode->ui_u2.ui_addr)); 663 } else { 664 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, 665 sizeof(ufs_inode->ui_u2.ui_symlink) - 1); 666 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; 667 } 668 return 0; 669 } 670 671 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) 672 { 673 struct ufs_inode_info *ufsi = UFS_I(inode); 674 struct super_block *sb = inode->i_sb; 675 umode_t mode; 676 677 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 678 /* 679 * Copy data to the in-core inode. 680 */ 681 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 682 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 683 if (inode->i_nlink == 0) { 684 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 685 return -1; 686 } 687 688 /* 689 * Linux now has 32-bit uid and gid, so we can support EFT. 690 */ 691 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); 692 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); 693 694 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 695 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); 696 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); 697 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); 698 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); 699 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); 700 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); 701 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 702 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); 703 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 704 /* 705 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 706 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 707 */ 708 709 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 710 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, 711 sizeof(ufs2_inode->ui_u2.ui_addr)); 712 } else { 713 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, 714 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); 715 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; 716 } 717 return 0; 718 } 719 720 struct inode *ufs_iget(struct super_block *sb, unsigned long ino) 721 { 722 struct ufs_inode_info *ufsi; 723 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 724 struct buffer_head * bh; 725 struct inode *inode; 726 int err; 727 728 UFSD("ENTER, ino %lu\n", ino); 729 730 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { 731 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", 732 ino); 733 return ERR_PTR(-EIO); 734 } 735 736 inode = iget_locked(sb, ino); 737 if (!inode) 738 return ERR_PTR(-ENOMEM); 739 if (!(inode->i_state & I_NEW)) 740 return inode; 741 742 ufsi = UFS_I(inode); 743 744 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 745 if (!bh) { 746 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", 747 inode->i_ino); 748 goto bad_inode; 749 } 750 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 751 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 752 753 err = ufs2_read_inode(inode, 754 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 755 } else { 756 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; 757 758 err = ufs1_read_inode(inode, 759 ufs_inode + ufs_inotofsbo(inode->i_ino)); 760 } 761 762 if (err) 763 goto bad_inode; 764 inode->i_version++; 765 ufsi->i_lastfrag = 766 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 767 ufsi->i_dir_start_lookup = 0; 768 ufsi->i_osync = 0; 769 770 ufs_set_inode_ops(inode); 771 772 brelse(bh); 773 774 UFSD("EXIT\n"); 775 unlock_new_inode(inode); 776 return inode; 777 778 bad_inode: 779 iget_failed(inode); 780 return ERR_PTR(-EIO); 781 } 782 783 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 784 { 785 struct super_block *sb = inode->i_sb; 786 struct ufs_inode_info *ufsi = UFS_I(inode); 787 788 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 789 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 790 791 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); 792 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); 793 794 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 795 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 796 ufs_inode->ui_atime.tv_usec = 0; 797 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); 798 ufs_inode->ui_ctime.tv_usec = 0; 799 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); 800 ufs_inode->ui_mtime.tv_usec = 0; 801 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 802 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 803 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 804 805 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { 806 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 807 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 808 } 809 810 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 811 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 812 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 813 } else if (inode->i_blocks) { 814 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, 815 sizeof(ufs_inode->ui_u2.ui_addr)); 816 } 817 else { 818 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 819 sizeof(ufs_inode->ui_u2.ui_symlink)); 820 } 821 822 if (!inode->i_nlink) 823 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 824 } 825 826 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) 827 { 828 struct super_block *sb = inode->i_sb; 829 struct ufs_inode_info *ufsi = UFS_I(inode); 830 831 UFSD("ENTER\n"); 832 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 833 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 834 835 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); 836 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); 837 838 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 839 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); 840 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); 841 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); 842 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); 843 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); 844 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); 845 846 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); 847 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 848 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 849 850 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 851 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 852 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; 853 } else if (inode->i_blocks) { 854 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, 855 sizeof(ufs_inode->ui_u2.ui_addr)); 856 } else { 857 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 858 sizeof(ufs_inode->ui_u2.ui_symlink)); 859 } 860 861 if (!inode->i_nlink) 862 memset (ufs_inode, 0, sizeof(struct ufs2_inode)); 863 UFSD("EXIT\n"); 864 } 865 866 static int ufs_update_inode(struct inode * inode, int do_sync) 867 { 868 struct super_block *sb = inode->i_sb; 869 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 870 struct buffer_head * bh; 871 872 UFSD("ENTER, ino %lu\n", inode->i_ino); 873 874 if (inode->i_ino < UFS_ROOTINO || 875 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 876 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 877 return -1; 878 } 879 880 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 881 if (!bh) { 882 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 883 return -1; 884 } 885 if (uspi->fs_magic == UFS2_MAGIC) { 886 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 887 888 ufs2_update_inode(inode, 889 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 890 } else { 891 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; 892 893 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 894 } 895 896 mark_buffer_dirty(bh); 897 if (do_sync) 898 sync_dirty_buffer(bh); 899 brelse (bh); 900 901 UFSD("EXIT\n"); 902 return 0; 903 } 904 905 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) 906 { 907 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 908 } 909 910 int ufs_sync_inode (struct inode *inode) 911 { 912 return ufs_update_inode (inode, 1); 913 } 914 915 void ufs_evict_inode(struct inode * inode) 916 { 917 int want_delete = 0; 918 919 if (!inode->i_nlink && !is_bad_inode(inode)) 920 want_delete = 1; 921 922 truncate_inode_pages_final(&inode->i_data); 923 if (want_delete) { 924 inode->i_size = 0; 925 if (inode->i_blocks) 926 ufs_truncate_blocks(inode); 927 } 928 929 invalidate_inode_buffers(inode); 930 clear_inode(inode); 931 932 if (want_delete) 933 ufs_free_inode(inode); 934 } 935 936 struct to_free { 937 struct inode *inode; 938 u64 to; 939 unsigned count; 940 }; 941 942 static inline void free_data(struct to_free *ctx, u64 from, unsigned count) 943 { 944 if (ctx->count && ctx->to != from) { 945 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); 946 ctx->count = 0; 947 } 948 ctx->count += count; 949 ctx->to = from + count; 950 } 951 952 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 953 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 954 955 static void ufs_trunc_direct(struct inode *inode) 956 { 957 struct ufs_inode_info *ufsi = UFS_I(inode); 958 struct super_block * sb; 959 struct ufs_sb_private_info * uspi; 960 void *p; 961 u64 frag1, frag2, frag3, frag4, block1, block2; 962 struct to_free ctx = {.inode = inode}; 963 unsigned i, tmp; 964 965 UFSD("ENTER: ino %lu\n", inode->i_ino); 966 967 sb = inode->i_sb; 968 uspi = UFS_SB(sb)->s_uspi; 969 970 frag1 = DIRECT_FRAGMENT; 971 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 972 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 973 frag3 = frag4 & ~uspi->s_fpbmask; 974 block1 = block2 = 0; 975 if (frag2 > frag3) { 976 frag2 = frag4; 977 frag3 = frag4 = 0; 978 } else if (frag2 < frag3) { 979 block1 = ufs_fragstoblks (frag2); 980 block2 = ufs_fragstoblks (frag3); 981 } 982 983 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," 984 " frag3 %llu, frag4 %llu\n", inode->i_ino, 985 (unsigned long long)frag1, (unsigned long long)frag2, 986 (unsigned long long)block1, (unsigned long long)block2, 987 (unsigned long long)frag3, (unsigned long long)frag4); 988 989 if (frag1 >= frag2) 990 goto next1; 991 992 /* 993 * Free first free fragments 994 */ 995 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); 996 tmp = ufs_data_ptr_to_cpu(sb, p); 997 if (!tmp ) 998 ufs_panic (sb, "ufs_trunc_direct", "internal error"); 999 frag2 -= frag1; 1000 frag1 = ufs_fragnum (frag1); 1001 1002 ufs_free_fragments(inode, tmp + frag1, frag2); 1003 1004 next1: 1005 /* 1006 * Free whole blocks 1007 */ 1008 for (i = block1 ; i < block2; i++) { 1009 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1010 tmp = ufs_data_ptr_to_cpu(sb, p); 1011 if (!tmp) 1012 continue; 1013 write_seqlock(&ufsi->meta_lock); 1014 ufs_data_ptr_clear(uspi, p); 1015 write_sequnlock(&ufsi->meta_lock); 1016 1017 free_data(&ctx, tmp, uspi->s_fpb); 1018 } 1019 1020 free_data(&ctx, 0, 0); 1021 1022 if (frag3 >= frag4) 1023 goto next3; 1024 1025 /* 1026 * Free last free fragments 1027 */ 1028 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); 1029 tmp = ufs_data_ptr_to_cpu(sb, p); 1030 if (!tmp ) 1031 ufs_panic(sb, "ufs_truncate_direct", "internal error"); 1032 frag4 = ufs_fragnum (frag4); 1033 write_seqlock(&ufsi->meta_lock); 1034 ufs_data_ptr_clear(uspi, p); 1035 write_sequnlock(&ufsi->meta_lock); 1036 1037 ufs_free_fragments (inode, tmp, frag4); 1038 next3: 1039 1040 UFSD("EXIT: ino %lu\n", inode->i_ino); 1041 } 1042 1043 static void free_full_branch(struct inode *inode, u64 ind_block, int depth) 1044 { 1045 struct super_block *sb = inode->i_sb; 1046 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1047 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize); 1048 unsigned i; 1049 1050 if (!ubh) 1051 return; 1052 1053 if (--depth) { 1054 for (i = 0; i < uspi->s_apb; i++) { 1055 void *p = ubh_get_data_ptr(uspi, ubh, i); 1056 u64 block = ufs_data_ptr_to_cpu(sb, p); 1057 if (block) 1058 free_full_branch(inode, block, depth); 1059 } 1060 } else { 1061 struct to_free ctx = {.inode = inode}; 1062 1063 for (i = 0; i < uspi->s_apb; i++) { 1064 void *p = ubh_get_data_ptr(uspi, ubh, i); 1065 u64 block = ufs_data_ptr_to_cpu(sb, p); 1066 if (block) 1067 free_data(&ctx, block, uspi->s_fpb); 1068 } 1069 free_data(&ctx, 0, 0); 1070 } 1071 1072 ubh_bforget(ubh); 1073 ufs_free_blocks(inode, ind_block, uspi->s_fpb); 1074 } 1075 1076 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) 1077 { 1078 struct super_block *sb = inode->i_sb; 1079 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1080 unsigned i; 1081 1082 if (--depth) { 1083 for (i = from; i < uspi->s_apb ; i++) { 1084 void *p = ubh_get_data_ptr(uspi, ubh, i); 1085 u64 block = ufs_data_ptr_to_cpu(sb, p); 1086 if (block) { 1087 write_seqlock(&UFS_I(inode)->meta_lock); 1088 ufs_data_ptr_clear(uspi, p); 1089 write_sequnlock(&UFS_I(inode)->meta_lock); 1090 ubh_mark_buffer_dirty(ubh); 1091 free_full_branch(inode, block, depth); 1092 } 1093 } 1094 } else { 1095 struct to_free ctx = {.inode = inode}; 1096 1097 for (i = from; i < uspi->s_apb; i++) { 1098 void *p = ubh_get_data_ptr(uspi, ubh, i); 1099 u64 block = ufs_data_ptr_to_cpu(sb, p); 1100 if (block) { 1101 write_seqlock(&UFS_I(inode)->meta_lock); 1102 ufs_data_ptr_clear(uspi, p); 1103 write_sequnlock(&UFS_I(inode)->meta_lock); 1104 ubh_mark_buffer_dirty(ubh); 1105 free_data(&ctx, block, uspi->s_fpb); 1106 } 1107 } 1108 free_data(&ctx, 0, 0); 1109 } 1110 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) 1111 ubh_sync_block(ubh); 1112 ubh_brelse(ubh); 1113 } 1114 1115 static int ufs_alloc_lastblock(struct inode *inode, loff_t size) 1116 { 1117 int err = 0; 1118 struct super_block *sb = inode->i_sb; 1119 struct address_space *mapping = inode->i_mapping; 1120 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1121 unsigned i, end; 1122 sector_t lastfrag; 1123 struct page *lastpage; 1124 struct buffer_head *bh; 1125 u64 phys64; 1126 1127 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 1128 1129 if (!lastfrag) 1130 goto out; 1131 1132 lastfrag--; 1133 1134 lastpage = ufs_get_locked_page(mapping, lastfrag >> 1135 (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1136 if (IS_ERR(lastpage)) { 1137 err = -EIO; 1138 goto out; 1139 } 1140 1141 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1142 bh = page_buffers(lastpage); 1143 for (i = 0; i < end; ++i) 1144 bh = bh->b_this_page; 1145 1146 1147 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 1148 1149 if (unlikely(err)) 1150 goto out_unlock; 1151 1152 if (buffer_new(bh)) { 1153 clear_buffer_new(bh); 1154 unmap_underlying_metadata(bh->b_bdev, 1155 bh->b_blocknr); 1156 /* 1157 * we do not zeroize fragment, because of 1158 * if it maped to hole, it already contains zeroes 1159 */ 1160 set_buffer_uptodate(bh); 1161 mark_buffer_dirty(bh); 1162 set_page_dirty(lastpage); 1163 } 1164 1165 if (lastfrag >= UFS_IND_FRAGMENT) { 1166 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; 1167 phys64 = bh->b_blocknr + 1; 1168 for (i = 0; i < end; ++i) { 1169 bh = sb_getblk(sb, i + phys64); 1170 lock_buffer(bh); 1171 memset(bh->b_data, 0, sb->s_blocksize); 1172 set_buffer_uptodate(bh); 1173 mark_buffer_dirty(bh); 1174 unlock_buffer(bh); 1175 sync_dirty_buffer(bh); 1176 brelse(bh); 1177 } 1178 } 1179 out_unlock: 1180 ufs_put_locked_page(lastpage); 1181 out: 1182 return err; 1183 } 1184 1185 static void __ufs_truncate_blocks(struct inode *inode) 1186 { 1187 struct ufs_inode_info *ufsi = UFS_I(inode); 1188 struct super_block *sb = inode->i_sb; 1189 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1190 unsigned offsets[4]; 1191 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1192 int depth2; 1193 unsigned i; 1194 struct ufs_buffer_head *ubh[3]; 1195 void *p; 1196 u64 block; 1197 1198 if (!depth) 1199 return; 1200 1201 /* find the last non-zero in offsets[] */ 1202 for (depth2 = depth - 1; depth2; depth2--) 1203 if (offsets[depth2]) 1204 break; 1205 1206 mutex_lock(&ufsi->truncate_mutex); 1207 if (depth == 1) { 1208 ufs_trunc_direct(inode); 1209 offsets[0] = UFS_IND_BLOCK; 1210 } else { 1211 /* get the blocks that should be partially emptied */ 1212 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1213 for (i = 0; i < depth2; i++) { 1214 offsets[i]++; /* next branch is fully freed */ 1215 block = ufs_data_ptr_to_cpu(sb, p); 1216 if (!block) 1217 break; 1218 ubh[i] = ubh_bread(sb, block, uspi->s_bsize); 1219 if (!ubh[i]) { 1220 write_seqlock(&ufsi->meta_lock); 1221 ufs_data_ptr_clear(uspi, p); 1222 write_sequnlock(&ufsi->meta_lock); 1223 break; 1224 } 1225 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1226 } 1227 while (i--) 1228 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); 1229 } 1230 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) { 1231 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1232 block = ufs_data_ptr_to_cpu(sb, p); 1233 if (block) { 1234 write_seqlock(&ufsi->meta_lock); 1235 ufs_data_ptr_clear(uspi, p); 1236 write_sequnlock(&ufsi->meta_lock); 1237 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1238 } 1239 } 1240 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1241 mark_inode_dirty(inode); 1242 mutex_unlock(&ufsi->truncate_mutex); 1243 } 1244 1245 static int ufs_truncate(struct inode *inode, loff_t size) 1246 { 1247 int err = 0; 1248 1249 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", 1250 inode->i_ino, (unsigned long long)size, 1251 (unsigned long long)i_size_read(inode)); 1252 1253 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1254 S_ISLNK(inode->i_mode))) 1255 return -EINVAL; 1256 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1257 return -EPERM; 1258 1259 err = ufs_alloc_lastblock(inode, size); 1260 1261 if (err) 1262 goto out; 1263 1264 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); 1265 1266 truncate_setsize(inode, size); 1267 1268 __ufs_truncate_blocks(inode); 1269 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 1270 mark_inode_dirty(inode); 1271 out: 1272 UFSD("EXIT: err %d\n", err); 1273 return err; 1274 } 1275 1276 void ufs_truncate_blocks(struct inode *inode) 1277 { 1278 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1279 S_ISLNK(inode->i_mode))) 1280 return; 1281 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1282 return; 1283 __ufs_truncate_blocks(inode); 1284 } 1285 1286 int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1287 { 1288 struct inode *inode = d_inode(dentry); 1289 unsigned int ia_valid = attr->ia_valid; 1290 int error; 1291 1292 error = inode_change_ok(inode, attr); 1293 if (error) 1294 return error; 1295 1296 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 1297 error = ufs_truncate(inode, attr->ia_size); 1298 if (error) 1299 return error; 1300 } 1301 1302 setattr_copy(inode, attr); 1303 mark_inode_dirty(inode); 1304 return 0; 1305 } 1306 1307 const struct inode_operations ufs_file_inode_operations = { 1308 .setattr = ufs_setattr, 1309 }; 1310