1 /* 2 * linux/fs/ufs/inode.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 * 8 * from 9 * 10 * linux/fs/ext2/inode.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 24 * Big-endian to little-endian byte-swapping/bitmaps by 25 * David S. Miller (davem@caip.rutgers.edu), 1995 26 */ 27 28 #include <asm/uaccess.h> 29 #include <asm/system.h> 30 31 #include <linux/errno.h> 32 #include <linux/fs.h> 33 #include <linux/ufs_fs.h> 34 #include <linux/time.h> 35 #include <linux/stat.h> 36 #include <linux/string.h> 37 #include <linux/mm.h> 38 #include <linux/smp_lock.h> 39 #include <linux/buffer_head.h> 40 41 #include "swab.h" 42 #include "util.h" 43 44 static u64 ufs_frag_map(struct inode *inode, sector_t frag); 45 46 static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) 47 { 48 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 49 int ptrs = uspi->s_apb; 50 int ptrs_bits = uspi->s_apbshift; 51 const long direct_blocks = UFS_NDADDR, 52 indirect_blocks = ptrs, 53 double_blocks = (1 << (ptrs_bits * 2)); 54 int n = 0; 55 56 57 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); 58 if (i_block < 0) { 59 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0"); 60 } else if (i_block < direct_blocks) { 61 offsets[n++] = i_block; 62 } else if ((i_block -= direct_blocks) < indirect_blocks) { 63 offsets[n++] = UFS_IND_BLOCK; 64 offsets[n++] = i_block; 65 } else if ((i_block -= indirect_blocks) < double_blocks) { 66 offsets[n++] = UFS_DIND_BLOCK; 67 offsets[n++] = i_block >> ptrs_bits; 68 offsets[n++] = i_block & (ptrs - 1); 69 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 70 offsets[n++] = UFS_TIND_BLOCK; 71 offsets[n++] = i_block >> (ptrs_bits * 2); 72 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 73 offsets[n++] = i_block & (ptrs - 1); 74 } else { 75 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 76 } 77 return n; 78 } 79 80 /* 81 * Returns the location of the fragment from 82 * the begining of the filesystem. 83 */ 84 85 static u64 ufs_frag_map(struct inode *inode, sector_t frag) 86 { 87 struct ufs_inode_info *ufsi = UFS_I(inode); 88 struct super_block *sb = inode->i_sb; 89 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 90 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 91 int shift = uspi->s_apbshift-uspi->s_fpbshift; 92 sector_t offsets[4], *p; 93 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); 94 u64 ret = 0L; 95 __fs32 block; 96 __fs64 u2_block = 0L; 97 unsigned flags = UFS_SB(sb)->s_flags; 98 u64 temp = 0L; 99 100 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth); 101 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", 102 uspi->s_fpbshift, uspi->s_apbmask, 103 (unsigned long long)mask); 104 105 if (depth == 0) 106 return 0; 107 108 p = offsets; 109 110 lock_kernel(); 111 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 112 goto ufs2; 113 114 block = ufsi->i_u1.i_data[*p++]; 115 if (!block) 116 goto out; 117 while (--depth) { 118 struct buffer_head *bh; 119 sector_t n = *p++; 120 121 bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift)); 122 if (!bh) 123 goto out; 124 block = ((__fs32 *) bh->b_data)[n & mask]; 125 brelse (bh); 126 if (!block) 127 goto out; 128 } 129 ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); 130 goto out; 131 ufs2: 132 u2_block = ufsi->i_u1.u2_i_data[*p++]; 133 if (!u2_block) 134 goto out; 135 136 137 while (--depth) { 138 struct buffer_head *bh; 139 sector_t n = *p++; 140 141 142 temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block); 143 bh = sb_bread(sb, temp +(u64) (n>>shift)); 144 if (!bh) 145 goto out; 146 u2_block = ((__fs64 *)bh->b_data)[n & mask]; 147 brelse(bh); 148 if (!u2_block) 149 goto out; 150 } 151 temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block); 152 ret = temp + (u64) (frag & uspi->s_fpbmask); 153 154 out: 155 unlock_kernel(); 156 return ret; 157 } 158 159 static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh) 160 { 161 lock_buffer(bh); 162 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 163 set_buffer_uptodate(bh); 164 mark_buffer_dirty(bh); 165 unlock_buffer(bh); 166 if (IS_SYNC(inode)) 167 sync_dirty_buffer(bh); 168 } 169 170 static struct buffer_head * 171 ufs_clear_frags(struct inode *inode, sector_t beg, 172 unsigned int n, sector_t want) 173 { 174 struct buffer_head *res = NULL, *bh; 175 sector_t end = beg + n; 176 177 for (; beg < end; ++beg) { 178 bh = sb_getblk(inode->i_sb, beg); 179 ufs_clear_frag(inode, bh); 180 if (want != beg) 181 brelse(bh); 182 else 183 res = bh; 184 } 185 BUG_ON(!res); 186 return res; 187 } 188 189 /** 190 * ufs_inode_getfrag() - allocate new fragment(s) 191 * @inode - pointer to inode 192 * @fragment - number of `fragment' which hold pointer 193 * to new allocated fragment(s) 194 * @new_fragment - number of new allocated fragment(s) 195 * @required - how many fragment(s) we require 196 * @err - we set it if something wrong 197 * @phys - pointer to where we save physical number of new allocated fragments, 198 * NULL if we allocate not data(indirect blocks for example). 199 * @new - we set it if we allocate new block 200 * @locked_page - for ufs_new_fragments() 201 */ 202 static struct buffer_head * 203 ufs_inode_getfrag(struct inode *inode, unsigned int fragment, 204 sector_t new_fragment, unsigned int required, int *err, 205 long *phys, int *new, struct page *locked_page) 206 { 207 struct ufs_inode_info *ufsi = UFS_I(inode); 208 struct super_block *sb = inode->i_sb; 209 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 210 struct buffer_head * result; 211 unsigned block, blockoff, lastfrag, lastblock, lastblockoff; 212 unsigned tmp, goal; 213 __fs32 * p, * p2; 214 215 UFSD("ENTER, ino %lu, fragment %u, new_fragment %llu, required %u, " 216 "metadata %d\n", inode->i_ino, fragment, 217 (unsigned long long)new_fragment, required, !phys); 218 219 /* TODO : to be done for write support 220 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 221 goto ufs2; 222 */ 223 224 block = ufs_fragstoblks (fragment); 225 blockoff = ufs_fragnum (fragment); 226 p = ufsi->i_u1.i_data + block; 227 goal = 0; 228 229 repeat: 230 tmp = fs32_to_cpu(sb, *p); 231 lastfrag = ufsi->i_lastfrag; 232 if (tmp && fragment < lastfrag) { 233 if (!phys) { 234 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 235 if (tmp == fs32_to_cpu(sb, *p)) { 236 UFSD("EXIT, result %u\n", tmp + blockoff); 237 return result; 238 } 239 brelse (result); 240 goto repeat; 241 } else { 242 *phys = tmp + blockoff; 243 return NULL; 244 } 245 } 246 247 lastblock = ufs_fragstoblks (lastfrag); 248 lastblockoff = ufs_fragnum (lastfrag); 249 /* 250 * We will extend file into new block beyond last allocated block 251 */ 252 if (lastblock < block) { 253 /* 254 * We must reallocate last allocated block 255 */ 256 if (lastblockoff) { 257 p2 = ufsi->i_u1.i_data + lastblock; 258 tmp = ufs_new_fragments (inode, p2, lastfrag, 259 fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, 260 err, locked_page); 261 if (!tmp) { 262 if (lastfrag != ufsi->i_lastfrag) 263 goto repeat; 264 else 265 return NULL; 266 } 267 lastfrag = ufsi->i_lastfrag; 268 269 } 270 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]); 271 if (tmp) 272 goal = tmp + uspi->s_fpb; 273 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 274 goal, required + blockoff, 275 err, locked_page); 276 } 277 /* 278 * We will extend last allocated block 279 */ 280 else if (lastblock == block) { 281 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), 282 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), 283 err, locked_page); 284 } else /* (lastblock > block) */ { 285 /* 286 * We will allocate new block before last allocated block 287 */ 288 if (block) { 289 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]); 290 if (tmp) 291 goal = tmp + uspi->s_fpb; 292 } 293 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 294 goal, uspi->s_fpb, err, locked_page); 295 } 296 if (!tmp) { 297 if ((!blockoff && *p) || 298 (blockoff && lastfrag != ufsi->i_lastfrag)) 299 goto repeat; 300 *err = -ENOSPC; 301 return NULL; 302 } 303 304 if (!phys) { 305 result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); 306 } else { 307 *phys = tmp + blockoff; 308 result = NULL; 309 *err = 0; 310 *new = 1; 311 } 312 313 inode->i_ctime = CURRENT_TIME_SEC; 314 if (IS_SYNC(inode)) 315 ufs_sync_inode (inode); 316 mark_inode_dirty(inode); 317 UFSD("EXIT, result %u\n", tmp + blockoff); 318 return result; 319 320 /* This part : To be implemented .... 321 Required only for writing, not required for READ-ONLY. 322 ufs2: 323 324 u2_block = ufs_fragstoblks(fragment); 325 u2_blockoff = ufs_fragnum(fragment); 326 p = ufsi->i_u1.u2_i_data + block; 327 goal = 0; 328 329 repeat2: 330 tmp = fs32_to_cpu(sb, *p); 331 lastfrag = ufsi->i_lastfrag; 332 333 */ 334 } 335 336 /** 337 * ufs_inode_getblock() - allocate new block 338 * @inode - pointer to inode 339 * @bh - pointer to block which hold "pointer" to new allocated block 340 * @fragment - number of `fragment' which hold pointer 341 * to new allocated block 342 * @new_fragment - number of new allocated fragment 343 * (block will hold this fragment and also uspi->s_fpb-1) 344 * @err - see ufs_inode_getfrag() 345 * @phys - see ufs_inode_getfrag() 346 * @new - see ufs_inode_getfrag() 347 * @locked_page - see ufs_inode_getfrag() 348 */ 349 static struct buffer_head * 350 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, 351 unsigned int fragment, sector_t new_fragment, int *err, 352 long *phys, int *new, struct page *locked_page) 353 { 354 struct super_block *sb = inode->i_sb; 355 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 356 struct buffer_head * result; 357 unsigned tmp, goal, block, blockoff; 358 __fs32 * p; 359 360 block = ufs_fragstoblks (fragment); 361 blockoff = ufs_fragnum (fragment); 362 363 UFSD("ENTER, ino %lu, fragment %u, new_fragment %llu, metadata %d\n", 364 inode->i_ino, fragment, (unsigned long long)new_fragment, !phys); 365 366 result = NULL; 367 if (!bh) 368 goto out; 369 if (!buffer_uptodate(bh)) { 370 ll_rw_block (READ, 1, &bh); 371 wait_on_buffer (bh); 372 if (!buffer_uptodate(bh)) 373 goto out; 374 } 375 376 p = (__fs32 *) bh->b_data + block; 377 repeat: 378 tmp = fs32_to_cpu(sb, *p); 379 if (tmp) { 380 if (!phys) { 381 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 382 if (tmp == fs32_to_cpu(sb, *p)) 383 goto out; 384 brelse (result); 385 goto repeat; 386 } else { 387 *phys = tmp + blockoff; 388 goto out; 389 } 390 } 391 392 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]))) 393 goal = tmp + uspi->s_fpb; 394 else 395 goal = bh->b_blocknr + uspi->s_fpb; 396 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 397 uspi->s_fpb, err, locked_page); 398 if (!tmp) { 399 if (fs32_to_cpu(sb, *p)) 400 goto repeat; 401 goto out; 402 } 403 404 405 if (!phys) { 406 result = ufs_clear_frags(inode, tmp, uspi->s_fpb, 407 tmp + blockoff); 408 } else { 409 *phys = tmp + blockoff; 410 *new = 1; 411 } 412 413 mark_buffer_dirty(bh); 414 if (IS_SYNC(inode)) 415 sync_dirty_buffer(bh); 416 inode->i_ctime = CURRENT_TIME_SEC; 417 mark_inode_dirty(inode); 418 UFSD("result %u\n", tmp + blockoff); 419 out: 420 brelse (bh); 421 UFSD("EXIT\n"); 422 return result; 423 } 424 425 /** 426 * ufs_getfrag_bloc() - `get_block_t' function, interface between UFS and 427 * readpage, writepage and so on 428 */ 429 430 int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 431 { 432 struct super_block * sb = inode->i_sb; 433 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; 434 struct buffer_head * bh; 435 int ret, err, new; 436 unsigned long ptr,phys; 437 u64 phys64 = 0; 438 439 if (!create) { 440 phys64 = ufs_frag_map(inode, fragment); 441 UFSD("phys64 = %llu\n", (unsigned long long)phys64); 442 if (phys64) 443 map_bh(bh_result, sb, phys64); 444 return 0; 445 } 446 447 /* This code entered only while writing ....? */ 448 449 err = -EIO; 450 new = 0; 451 ret = 0; 452 bh = NULL; 453 454 lock_kernel(); 455 456 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); 457 if (fragment < 0) 458 goto abort_negative; 459 if (fragment > 460 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) 461 << uspi->s_fpbshift)) 462 goto abort_too_big; 463 464 err = 0; 465 ptr = fragment; 466 467 /* 468 * ok, these macros clean the logic up a bit and make 469 * it much more readable: 470 */ 471 #define GET_INODE_DATABLOCK(x) \ 472 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new, bh_result->b_page) 473 #define GET_INODE_PTR(x) \ 474 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, bh_result->b_page) 475 #define GET_INDIRECT_DATABLOCK(x) \ 476 ufs_inode_getblock(inode, bh, x, fragment, \ 477 &err, &phys, &new, bh_result->b_page); 478 #define GET_INDIRECT_PTR(x) \ 479 ufs_inode_getblock(inode, bh, x, fragment, \ 480 &err, NULL, NULL, bh_result->b_page); 481 482 if (ptr < UFS_NDIR_FRAGMENT) { 483 bh = GET_INODE_DATABLOCK(ptr); 484 goto out; 485 } 486 ptr -= UFS_NDIR_FRAGMENT; 487 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { 488 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); 489 goto get_indirect; 490 } 491 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); 492 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { 493 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); 494 goto get_double; 495 } 496 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); 497 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); 498 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); 499 get_double: 500 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); 501 get_indirect: 502 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); 503 504 #undef GET_INODE_DATABLOCK 505 #undef GET_INODE_PTR 506 #undef GET_INDIRECT_DATABLOCK 507 #undef GET_INDIRECT_PTR 508 509 out: 510 if (err) 511 goto abort; 512 if (new) 513 set_buffer_new(bh_result); 514 map_bh(bh_result, sb, phys); 515 abort: 516 unlock_kernel(); 517 return err; 518 519 abort_negative: 520 ufs_warning(sb, "ufs_get_block", "block < 0"); 521 goto abort; 522 523 abort_too_big: 524 ufs_warning(sb, "ufs_get_block", "block > big"); 525 goto abort; 526 } 527 528 static struct buffer_head *ufs_getfrag(struct inode *inode, 529 unsigned int fragment, 530 int create, int *err) 531 { 532 struct buffer_head dummy; 533 int error; 534 535 dummy.b_state = 0; 536 dummy.b_blocknr = -1000; 537 error = ufs_getfrag_block(inode, fragment, &dummy, create); 538 *err = error; 539 if (!error && buffer_mapped(&dummy)) { 540 struct buffer_head *bh; 541 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 542 if (buffer_new(&dummy)) { 543 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 544 set_buffer_uptodate(bh); 545 mark_buffer_dirty(bh); 546 } 547 return bh; 548 } 549 return NULL; 550 } 551 552 struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment, 553 int create, int * err) 554 { 555 struct buffer_head * bh; 556 557 UFSD("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment); 558 bh = ufs_getfrag (inode, fragment, create, err); 559 if (!bh || buffer_uptodate(bh)) 560 return bh; 561 ll_rw_block (READ, 1, &bh); 562 wait_on_buffer (bh); 563 if (buffer_uptodate(bh)) 564 return bh; 565 brelse (bh); 566 *err = -EIO; 567 return NULL; 568 } 569 570 static int ufs_writepage(struct page *page, struct writeback_control *wbc) 571 { 572 return block_write_full_page(page,ufs_getfrag_block,wbc); 573 } 574 static int ufs_readpage(struct file *file, struct page *page) 575 { 576 return block_read_full_page(page,ufs_getfrag_block); 577 } 578 static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) 579 { 580 return block_prepare_write(page,from,to,ufs_getfrag_block); 581 } 582 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 583 { 584 return generic_block_bmap(mapping,block,ufs_getfrag_block); 585 } 586 const struct address_space_operations ufs_aops = { 587 .readpage = ufs_readpage, 588 .writepage = ufs_writepage, 589 .sync_page = block_sync_page, 590 .prepare_write = ufs_prepare_write, 591 .commit_write = generic_commit_write, 592 .bmap = ufs_bmap 593 }; 594 595 static void ufs_set_inode_ops(struct inode *inode) 596 { 597 if (S_ISREG(inode->i_mode)) { 598 inode->i_op = &ufs_file_inode_operations; 599 inode->i_fop = &ufs_file_operations; 600 inode->i_mapping->a_ops = &ufs_aops; 601 } else if (S_ISDIR(inode->i_mode)) { 602 inode->i_op = &ufs_dir_inode_operations; 603 inode->i_fop = &ufs_dir_operations; 604 inode->i_mapping->a_ops = &ufs_aops; 605 } else if (S_ISLNK(inode->i_mode)) { 606 if (!inode->i_blocks) 607 inode->i_op = &ufs_fast_symlink_inode_operations; 608 else { 609 inode->i_op = &page_symlink_inode_operations; 610 inode->i_mapping->a_ops = &ufs_aops; 611 } 612 } else 613 init_special_inode(inode, inode->i_mode, 614 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 615 } 616 617 static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) 618 { 619 struct ufs_inode_info *ufsi = UFS_I(inode); 620 struct super_block *sb = inode->i_sb; 621 mode_t mode; 622 unsigned i; 623 624 /* 625 * Copy data to the in-core inode. 626 */ 627 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 628 inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink); 629 if (inode->i_nlink == 0) 630 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 631 632 /* 633 * Linux now has 32-bit uid and gid, so we can support EFT. 634 */ 635 inode->i_uid = ufs_get_inode_uid(sb, ufs_inode); 636 inode->i_gid = ufs_get_inode_gid(sb, ufs_inode); 637 638 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 639 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 640 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 641 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 642 inode->i_mtime.tv_nsec = 0; 643 inode->i_atime.tv_nsec = 0; 644 inode->i_ctime.tv_nsec = 0; 645 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 646 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 647 ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen); 648 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 649 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 650 651 652 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 653 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 654 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; 655 } else { 656 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 657 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; 658 } 659 } 660 661 static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) 662 { 663 struct ufs_inode_info *ufsi = UFS_I(inode); 664 struct super_block *sb = inode->i_sb; 665 mode_t mode; 666 unsigned i; 667 668 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 669 /* 670 * Copy data to the in-core inode. 671 */ 672 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 673 inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink); 674 if (inode->i_nlink == 0) 675 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 676 677 /* 678 * Linux now has 32-bit uid and gid, so we can support EFT. 679 */ 680 inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid); 681 inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid); 682 683 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 684 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec); 685 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec); 686 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec); 687 inode->i_mtime.tv_nsec = 0; 688 inode->i_atime.tv_nsec = 0; 689 inode->i_ctime.tv_nsec = 0; 690 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 691 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 692 ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen); 693 /* 694 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 695 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 696 */ 697 698 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 699 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 700 ufsi->i_u1.u2_i_data[i] = 701 ufs2_inode->ui_u2.ui_addr.ui_db[i]; 702 } else { 703 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 704 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; 705 } 706 } 707 708 void ufs_read_inode(struct inode * inode) 709 { 710 struct ufs_inode_info *ufsi = UFS_I(inode); 711 struct super_block * sb; 712 struct ufs_sb_private_info * uspi; 713 struct buffer_head * bh; 714 715 UFSD("ENTER, ino %lu\n", inode->i_ino); 716 717 sb = inode->i_sb; 718 uspi = UFS_SB(sb)->s_uspi; 719 720 if (inode->i_ino < UFS_ROOTINO || 721 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 722 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", 723 inode->i_ino); 724 goto bad_inode; 725 } 726 727 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 728 if (!bh) { 729 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", 730 inode->i_ino); 731 goto bad_inode; 732 } 733 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 734 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 735 736 ufs2_read_inode(inode, 737 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 738 } else { 739 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; 740 741 ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 742 } 743 744 inode->i_blksize = PAGE_SIZE;/*This is the optimal IO size (for stat)*/ 745 inode->i_version++; 746 ufsi->i_lastfrag = 747 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 748 ufsi->i_dir_start_lookup = 0; 749 ufsi->i_osync = 0; 750 751 ufs_set_inode_ops(inode); 752 753 brelse(bh); 754 755 UFSD("EXIT\n"); 756 return; 757 758 bad_inode: 759 make_bad_inode(inode); 760 } 761 762 static int ufs_update_inode(struct inode * inode, int do_sync) 763 { 764 struct ufs_inode_info *ufsi = UFS_I(inode); 765 struct super_block * sb; 766 struct ufs_sb_private_info * uspi; 767 struct buffer_head * bh; 768 struct ufs_inode * ufs_inode; 769 unsigned i; 770 unsigned flags; 771 772 UFSD("ENTER, ino %lu\n", inode->i_ino); 773 774 sb = inode->i_sb; 775 uspi = UFS_SB(sb)->s_uspi; 776 flags = UFS_SB(sb)->s_flags; 777 778 if (inode->i_ino < UFS_ROOTINO || 779 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 780 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 781 return -1; 782 } 783 784 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 785 if (!bh) { 786 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 787 return -1; 788 } 789 ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode)); 790 791 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 792 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 793 794 ufs_set_inode_uid(sb, ufs_inode, inode->i_uid); 795 ufs_set_inode_gid(sb, ufs_inode, inode->i_gid); 796 797 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 798 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 799 ufs_inode->ui_atime.tv_usec = 0; 800 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); 801 ufs_inode->ui_ctime.tv_usec = 0; 802 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); 803 ufs_inode->ui_mtime.tv_usec = 0; 804 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 805 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 806 ufs_inode->ui_gen = cpu_to_fs32(sb, ufsi->i_gen); 807 808 if ((flags & UFS_UID_MASK) == UFS_UID_EFT) { 809 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 810 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 811 } 812 813 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 814 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 815 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 816 } else if (inode->i_blocks) { 817 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 818 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i]; 819 } 820 else { 821 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 822 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; 823 } 824 825 if (!inode->i_nlink) 826 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 827 828 mark_buffer_dirty(bh); 829 if (do_sync) 830 sync_dirty_buffer(bh); 831 brelse (bh); 832 833 UFSD("EXIT\n"); 834 return 0; 835 } 836 837 int ufs_write_inode (struct inode * inode, int wait) 838 { 839 int ret; 840 lock_kernel(); 841 ret = ufs_update_inode (inode, wait); 842 unlock_kernel(); 843 return ret; 844 } 845 846 int ufs_sync_inode (struct inode *inode) 847 { 848 return ufs_update_inode (inode, 1); 849 } 850 851 void ufs_delete_inode (struct inode * inode) 852 { 853 loff_t old_i_size; 854 855 truncate_inode_pages(&inode->i_data, 0); 856 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ 857 lock_kernel(); 858 mark_inode_dirty(inode); 859 ufs_update_inode(inode, IS_SYNC(inode)); 860 old_i_size = inode->i_size; 861 inode->i_size = 0; 862 if (inode->i_blocks && ufs_truncate(inode, old_i_size)) 863 ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n"); 864 ufs_free_inode (inode); 865 unlock_kernel(); 866 } 867