1 /* 2 * linux/fs/ufs/inode.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 * 8 * from 9 * 10 * linux/fs/ext2/inode.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 24 * Big-endian to little-endian byte-swapping/bitmaps by 25 * David S. Miller (davem@caip.rutgers.edu), 1995 26 */ 27 28 #include <asm/uaccess.h> 29 #include <asm/system.h> 30 31 #include <linux/errno.h> 32 #include <linux/fs.h> 33 #include <linux/ufs_fs.h> 34 #include <linux/time.h> 35 #include <linux/stat.h> 36 #include <linux/string.h> 37 #include <linux/mm.h> 38 #include <linux/smp_lock.h> 39 #include <linux/buffer_head.h> 40 41 #include "swab.h" 42 #include "util.h" 43 44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) 45 { 46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 47 int ptrs = uspi->s_apb; 48 int ptrs_bits = uspi->s_apbshift; 49 const long direct_blocks = UFS_NDADDR, 50 indirect_blocks = ptrs, 51 double_blocks = (1 << (ptrs_bits * 2)); 52 int n = 0; 53 54 55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); 56 if (i_block < 0) { 57 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0"); 58 } else if (i_block < direct_blocks) { 59 offsets[n++] = i_block; 60 } else if ((i_block -= direct_blocks) < indirect_blocks) { 61 offsets[n++] = UFS_IND_BLOCK; 62 offsets[n++] = i_block; 63 } else if ((i_block -= indirect_blocks) < double_blocks) { 64 offsets[n++] = UFS_DIND_BLOCK; 65 offsets[n++] = i_block >> ptrs_bits; 66 offsets[n++] = i_block & (ptrs - 1); 67 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 68 offsets[n++] = UFS_TIND_BLOCK; 69 offsets[n++] = i_block >> (ptrs_bits * 2); 70 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 71 offsets[n++] = i_block & (ptrs - 1); 72 } else { 73 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 74 } 75 return n; 76 } 77 78 /* 79 * Returns the location of the fragment from 80 * the begining of the filesystem. 81 */ 82 83 u64 ufs_frag_map(struct inode *inode, sector_t frag) 84 { 85 struct ufs_inode_info *ufsi = UFS_I(inode); 86 struct super_block *sb = inode->i_sb; 87 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 88 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 89 int shift = uspi->s_apbshift-uspi->s_fpbshift; 90 sector_t offsets[4], *p; 91 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); 92 u64 ret = 0L; 93 __fs32 block; 94 __fs64 u2_block = 0L; 95 unsigned flags = UFS_SB(sb)->s_flags; 96 u64 temp = 0L; 97 98 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth); 99 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask); 100 101 if (depth == 0) 102 return 0; 103 104 p = offsets; 105 106 lock_kernel(); 107 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 108 goto ufs2; 109 110 block = ufsi->i_u1.i_data[*p++]; 111 if (!block) 112 goto out; 113 while (--depth) { 114 struct buffer_head *bh; 115 sector_t n = *p++; 116 117 bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift)); 118 if (!bh) 119 goto out; 120 block = ((__fs32 *) bh->b_data)[n & mask]; 121 brelse (bh); 122 if (!block) 123 goto out; 124 } 125 ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); 126 goto out; 127 ufs2: 128 u2_block = ufsi->i_u1.u2_i_data[*p++]; 129 if (!u2_block) 130 goto out; 131 132 133 while (--depth) { 134 struct buffer_head *bh; 135 sector_t n = *p++; 136 137 138 temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block); 139 bh = sb_bread(sb, temp +(u64) (n>>shift)); 140 if (!bh) 141 goto out; 142 u2_block = ((__fs64 *)bh->b_data)[n & mask]; 143 brelse(bh); 144 if (!u2_block) 145 goto out; 146 } 147 temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block); 148 ret = temp + (u64) (frag & uspi->s_fpbmask); 149 150 out: 151 unlock_kernel(); 152 return ret; 153 } 154 155 static void ufs_clear_block(struct inode *inode, struct buffer_head *bh) 156 { 157 lock_buffer(bh); 158 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 159 set_buffer_uptodate(bh); 160 mark_buffer_dirty(bh); 161 unlock_buffer(bh); 162 if (IS_SYNC(inode)) 163 sync_dirty_buffer(bh); 164 } 165 166 static struct buffer_head *ufs_inode_getfrag(struct inode *inode, 167 unsigned int fragment, unsigned int new_fragment, 168 unsigned int required, int *err, int metadata, 169 long *phys, int *new, struct page *locked_page) 170 { 171 struct ufs_inode_info *ufsi = UFS_I(inode); 172 struct super_block * sb; 173 struct ufs_sb_private_info * uspi; 174 struct buffer_head * result; 175 unsigned block, blockoff, lastfrag, lastblock, lastblockoff; 176 unsigned tmp, goal; 177 __fs32 * p, * p2; 178 unsigned flags = 0; 179 180 UFSD("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n", 181 inode->i_ino, fragment, new_fragment, required); 182 183 sb = inode->i_sb; 184 uspi = UFS_SB(sb)->s_uspi; 185 186 flags = UFS_SB(sb)->s_flags; 187 /* TODO : to be done for write support 188 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 189 goto ufs2; 190 */ 191 192 block = ufs_fragstoblks (fragment); 193 blockoff = ufs_fragnum (fragment); 194 p = ufsi->i_u1.i_data + block; 195 goal = 0; 196 197 repeat: 198 tmp = fs32_to_cpu(sb, *p); 199 lastfrag = ufsi->i_lastfrag; 200 if (tmp && fragment < lastfrag) { 201 if (metadata) { 202 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 203 if (tmp == fs32_to_cpu(sb, *p)) { 204 UFSD("EXIT, result %u\n", tmp + blockoff); 205 return result; 206 } 207 brelse (result); 208 goto repeat; 209 } else { 210 *phys = tmp + blockoff; 211 return NULL; 212 } 213 } 214 215 lastblock = ufs_fragstoblks (lastfrag); 216 lastblockoff = ufs_fragnum (lastfrag); 217 /* 218 * We will extend file into new block beyond last allocated block 219 */ 220 if (lastblock < block) { 221 /* 222 * We must reallocate last allocated block 223 */ 224 if (lastblockoff) { 225 p2 = ufsi->i_u1.i_data + lastblock; 226 tmp = ufs_new_fragments (inode, p2, lastfrag, 227 fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, 228 err, locked_page); 229 if (!tmp) { 230 if (lastfrag != ufsi->i_lastfrag) 231 goto repeat; 232 else 233 return NULL; 234 } 235 lastfrag = ufsi->i_lastfrag; 236 237 } 238 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; 239 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 240 goal, required + blockoff, 241 err, locked_page); 242 } 243 /* 244 * We will extend last allocated block 245 */ 246 else if (lastblock == block) { 247 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), 248 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), 249 err, locked_page); 250 } 251 /* 252 * We will allocate new block before last allocated block 253 */ 254 else /* (lastblock > block) */ { 255 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) 256 goal = tmp + uspi->s_fpb; 257 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 258 goal, uspi->s_fpb, err, locked_page); 259 } 260 if (!tmp) { 261 if ((!blockoff && *p) || 262 (blockoff && lastfrag != ufsi->i_lastfrag)) 263 goto repeat; 264 *err = -ENOSPC; 265 return NULL; 266 } 267 268 if (metadata) { 269 result = sb_getblk(inode->i_sb, tmp + blockoff); 270 ufs_clear_block(inode, result); 271 } else { 272 *phys = tmp + blockoff; 273 result = NULL; 274 *err = 0; 275 *new = 1; 276 } 277 278 inode->i_ctime = CURRENT_TIME_SEC; 279 if (IS_SYNC(inode)) 280 ufs_sync_inode (inode); 281 mark_inode_dirty(inode); 282 UFSD("EXIT, result %u\n", tmp + blockoff); 283 return result; 284 285 /* This part : To be implemented .... 286 Required only for writing, not required for READ-ONLY. 287 ufs2: 288 289 u2_block = ufs_fragstoblks(fragment); 290 u2_blockoff = ufs_fragnum(fragment); 291 p = ufsi->i_u1.u2_i_data + block; 292 goal = 0; 293 294 repeat2: 295 tmp = fs32_to_cpu(sb, *p); 296 lastfrag = ufsi->i_lastfrag; 297 298 */ 299 } 300 301 static struct buffer_head *ufs_block_getfrag(struct inode *inode, struct buffer_head *bh, 302 unsigned int fragment, unsigned int new_fragment, 303 unsigned int blocksize, int * err, int metadata, 304 long *phys, int *new, struct page *locked_page) 305 { 306 struct super_block * sb; 307 struct ufs_sb_private_info * uspi; 308 struct buffer_head * result; 309 unsigned tmp, goal, block, blockoff; 310 __fs32 * p; 311 312 sb = inode->i_sb; 313 uspi = UFS_SB(sb)->s_uspi; 314 block = ufs_fragstoblks (fragment); 315 blockoff = ufs_fragnum (fragment); 316 317 UFSD("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment); 318 319 result = NULL; 320 if (!bh) 321 goto out; 322 if (!buffer_uptodate(bh)) { 323 ll_rw_block (READ, 1, &bh); 324 wait_on_buffer (bh); 325 if (!buffer_uptodate(bh)) 326 goto out; 327 } 328 329 p = (__fs32 *) bh->b_data + block; 330 repeat: 331 tmp = fs32_to_cpu(sb, *p); 332 if (tmp) { 333 if (metadata) { 334 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 335 if (tmp == fs32_to_cpu(sb, *p)) 336 goto out; 337 brelse (result); 338 goto repeat; 339 } else { 340 *phys = tmp + blockoff; 341 goto out; 342 } 343 } 344 345 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) 346 goal = tmp + uspi->s_fpb; 347 else 348 goal = bh->b_blocknr + uspi->s_fpb; 349 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 350 uspi->s_fpb, err, locked_page); 351 if (!tmp) { 352 if (fs32_to_cpu(sb, *p)) 353 goto repeat; 354 goto out; 355 } 356 357 358 if (metadata) { 359 result = sb_getblk(sb, tmp + blockoff); 360 ufs_clear_block(inode, result); 361 } else { 362 *phys = tmp + blockoff; 363 *new = 1; 364 } 365 366 mark_buffer_dirty(bh); 367 if (IS_SYNC(inode)) 368 sync_dirty_buffer(bh); 369 inode->i_ctime = CURRENT_TIME_SEC; 370 mark_inode_dirty(inode); 371 UFSD("result %u\n", tmp + blockoff); 372 out: 373 brelse (bh); 374 UFSD("EXIT\n"); 375 return result; 376 } 377 378 /* 379 * This function gets the block which contains the fragment. 380 */ 381 382 int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 383 { 384 struct super_block * sb = inode->i_sb; 385 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; 386 struct buffer_head * bh; 387 int ret, err, new; 388 unsigned long ptr,phys; 389 u64 phys64 = 0; 390 391 if (!create) { 392 phys64 = ufs_frag_map(inode, fragment); 393 UFSD("phys64 = %llu \n",phys64); 394 if (phys64) 395 map_bh(bh_result, sb, phys64); 396 return 0; 397 } 398 399 /* This code entered only while writing ....? */ 400 401 err = -EIO; 402 new = 0; 403 ret = 0; 404 bh = NULL; 405 406 lock_kernel(); 407 408 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); 409 if (fragment < 0) 410 goto abort_negative; 411 if (fragment > 412 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) 413 << uspi->s_fpbshift)) 414 goto abort_too_big; 415 416 err = 0; 417 ptr = fragment; 418 419 /* 420 * ok, these macros clean the logic up a bit and make 421 * it much more readable: 422 */ 423 #define GET_INODE_DATABLOCK(x) \ 424 ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new, bh_result->b_page) 425 #define GET_INODE_PTR(x) \ 426 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL, bh_result->b_page) 427 #define GET_INDIRECT_DATABLOCK(x) \ 428 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ 429 &err, 0, &phys, &new, bh_result->b_page); 430 #define GET_INDIRECT_PTR(x) \ 431 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ 432 &err, 1, NULL, NULL, bh_result->b_page); 433 434 if (ptr < UFS_NDIR_FRAGMENT) { 435 bh = GET_INODE_DATABLOCK(ptr); 436 goto out; 437 } 438 ptr -= UFS_NDIR_FRAGMENT; 439 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { 440 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); 441 goto get_indirect; 442 } 443 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); 444 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { 445 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); 446 goto get_double; 447 } 448 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); 449 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); 450 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); 451 get_double: 452 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); 453 get_indirect: 454 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); 455 456 #undef GET_INODE_DATABLOCK 457 #undef GET_INODE_PTR 458 #undef GET_INDIRECT_DATABLOCK 459 #undef GET_INDIRECT_PTR 460 461 out: 462 if (err) 463 goto abort; 464 if (new) 465 set_buffer_new(bh_result); 466 map_bh(bh_result, sb, phys); 467 abort: 468 unlock_kernel(); 469 return err; 470 471 abort_negative: 472 ufs_warning(sb, "ufs_get_block", "block < 0"); 473 goto abort; 474 475 abort_too_big: 476 ufs_warning(sb, "ufs_get_block", "block > big"); 477 goto abort; 478 } 479 480 struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment, 481 int create, int *err) 482 { 483 struct buffer_head dummy; 484 int error; 485 486 dummy.b_state = 0; 487 dummy.b_blocknr = -1000; 488 error = ufs_getfrag_block(inode, fragment, &dummy, create); 489 *err = error; 490 if (!error && buffer_mapped(&dummy)) { 491 struct buffer_head *bh; 492 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 493 if (buffer_new(&dummy)) { 494 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 495 set_buffer_uptodate(bh); 496 mark_buffer_dirty(bh); 497 } 498 return bh; 499 } 500 return NULL; 501 } 502 503 struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment, 504 int create, int * err) 505 { 506 struct buffer_head * bh; 507 508 UFSD("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment); 509 bh = ufs_getfrag (inode, fragment, create, err); 510 if (!bh || buffer_uptodate(bh)) 511 return bh; 512 ll_rw_block (READ, 1, &bh); 513 wait_on_buffer (bh); 514 if (buffer_uptodate(bh)) 515 return bh; 516 brelse (bh); 517 *err = -EIO; 518 return NULL; 519 } 520 521 static int ufs_writepage(struct page *page, struct writeback_control *wbc) 522 { 523 return block_write_full_page(page,ufs_getfrag_block,wbc); 524 } 525 static int ufs_readpage(struct file *file, struct page *page) 526 { 527 return block_read_full_page(page,ufs_getfrag_block); 528 } 529 static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) 530 { 531 return block_prepare_write(page,from,to,ufs_getfrag_block); 532 } 533 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 534 { 535 return generic_block_bmap(mapping,block,ufs_getfrag_block); 536 } 537 struct address_space_operations ufs_aops = { 538 .readpage = ufs_readpage, 539 .writepage = ufs_writepage, 540 .sync_page = block_sync_page, 541 .prepare_write = ufs_prepare_write, 542 .commit_write = generic_commit_write, 543 .bmap = ufs_bmap 544 }; 545 546 static void ufs_set_inode_ops(struct inode *inode) 547 { 548 if (S_ISREG(inode->i_mode)) { 549 inode->i_op = &ufs_file_inode_operations; 550 inode->i_fop = &ufs_file_operations; 551 inode->i_mapping->a_ops = &ufs_aops; 552 } else if (S_ISDIR(inode->i_mode)) { 553 inode->i_op = &ufs_dir_inode_operations; 554 inode->i_fop = &ufs_dir_operations; 555 inode->i_mapping->a_ops = &ufs_aops; 556 } else if (S_ISLNK(inode->i_mode)) { 557 if (!inode->i_blocks) 558 inode->i_op = &ufs_fast_symlink_inode_operations; 559 else { 560 inode->i_op = &page_symlink_inode_operations; 561 inode->i_mapping->a_ops = &ufs_aops; 562 } 563 } else 564 init_special_inode(inode, inode->i_mode, 565 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 566 } 567 568 void ufs_read_inode (struct inode * inode) 569 { 570 struct ufs_inode_info *ufsi = UFS_I(inode); 571 struct super_block * sb; 572 struct ufs_sb_private_info * uspi; 573 struct ufs_inode * ufs_inode; 574 struct ufs2_inode *ufs2_inode; 575 struct buffer_head * bh; 576 mode_t mode; 577 unsigned i; 578 unsigned flags; 579 580 UFSD("ENTER, ino %lu\n", inode->i_ino); 581 582 sb = inode->i_sb; 583 uspi = UFS_SB(sb)->s_uspi; 584 flags = UFS_SB(sb)->s_flags; 585 586 if (inode->i_ino < UFS_ROOTINO || 587 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 588 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 589 goto bad_inode; 590 } 591 592 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 593 if (!bh) { 594 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 595 goto bad_inode; 596 } 597 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 598 goto ufs2_inode; 599 600 ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino)); 601 602 /* 603 * Copy data to the in-core inode. 604 */ 605 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 606 inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink); 607 if (inode->i_nlink == 0) 608 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 609 610 /* 611 * Linux now has 32-bit uid and gid, so we can support EFT. 612 */ 613 inode->i_uid = ufs_get_inode_uid(sb, ufs_inode); 614 inode->i_gid = ufs_get_inode_gid(sb, ufs_inode); 615 616 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 617 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 618 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 619 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 620 inode->i_mtime.tv_nsec = 0; 621 inode->i_atime.tv_nsec = 0; 622 inode->i_ctime.tv_nsec = 0; 623 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 624 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */ 625 inode->i_version++; 626 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 627 ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen); 628 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 629 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 630 ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 631 632 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 633 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 634 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; 635 } 636 else { 637 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 638 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; 639 } 640 ufsi->i_osync = 0; 641 642 ufs_set_inode_ops(inode); 643 644 brelse (bh); 645 646 UFSD("EXIT\n"); 647 return; 648 649 bad_inode: 650 make_bad_inode(inode); 651 return; 652 653 ufs2_inode : 654 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 655 656 ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino)); 657 658 /* 659 * Copy data to the in-core inode. 660 */ 661 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 662 inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink); 663 if (inode->i_nlink == 0) 664 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 665 666 /* 667 * Linux now has 32-bit uid and gid, so we can support EFT. 668 */ 669 inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid); 670 inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid); 671 672 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 673 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec); 674 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec); 675 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec); 676 inode->i_mtime.tv_nsec = 0; 677 inode->i_atime.tv_nsec = 0; 678 inode->i_ctime.tv_nsec = 0; 679 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 680 inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/ 681 682 inode->i_version++; 683 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 684 ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen); 685 /* 686 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 687 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 688 */ 689 ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift; 690 691 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 692 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 693 ufsi->i_u1.u2_i_data[i] = 694 ufs2_inode->ui_u2.ui_addr.ui_db[i]; 695 } 696 else { 697 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 698 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; 699 } 700 ufsi->i_osync = 0; 701 702 ufs_set_inode_ops(inode); 703 704 brelse(bh); 705 706 UFSD("EXIT\n"); 707 return; 708 } 709 710 static int ufs_update_inode(struct inode * inode, int do_sync) 711 { 712 struct ufs_inode_info *ufsi = UFS_I(inode); 713 struct super_block * sb; 714 struct ufs_sb_private_info * uspi; 715 struct buffer_head * bh; 716 struct ufs_inode * ufs_inode; 717 unsigned i; 718 unsigned flags; 719 720 UFSD("ENTER, ino %lu\n", inode->i_ino); 721 722 sb = inode->i_sb; 723 uspi = UFS_SB(sb)->s_uspi; 724 flags = UFS_SB(sb)->s_flags; 725 726 if (inode->i_ino < UFS_ROOTINO || 727 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 728 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 729 return -1; 730 } 731 732 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 733 if (!bh) { 734 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 735 return -1; 736 } 737 ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode)); 738 739 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 740 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 741 742 ufs_set_inode_uid(sb, ufs_inode, inode->i_uid); 743 ufs_set_inode_gid(sb, ufs_inode, inode->i_gid); 744 745 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 746 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 747 ufs_inode->ui_atime.tv_usec = 0; 748 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); 749 ufs_inode->ui_ctime.tv_usec = 0; 750 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); 751 ufs_inode->ui_mtime.tv_usec = 0; 752 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 753 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 754 ufs_inode->ui_gen = cpu_to_fs32(sb, ufsi->i_gen); 755 756 if ((flags & UFS_UID_MASK) == UFS_UID_EFT) { 757 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 758 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 759 } 760 761 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 762 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 763 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 764 } else if (inode->i_blocks) { 765 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 766 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i]; 767 } 768 else { 769 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 770 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; 771 } 772 773 if (!inode->i_nlink) 774 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 775 776 mark_buffer_dirty(bh); 777 if (do_sync) 778 sync_dirty_buffer(bh); 779 brelse (bh); 780 781 UFSD("EXIT\n"); 782 return 0; 783 } 784 785 int ufs_write_inode (struct inode * inode, int wait) 786 { 787 int ret; 788 lock_kernel(); 789 ret = ufs_update_inode (inode, wait); 790 unlock_kernel(); 791 return ret; 792 } 793 794 int ufs_sync_inode (struct inode *inode) 795 { 796 return ufs_update_inode (inode, 1); 797 } 798 799 void ufs_delete_inode (struct inode * inode) 800 { 801 truncate_inode_pages(&inode->i_data, 0); 802 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ 803 lock_kernel(); 804 mark_inode_dirty(inode); 805 ufs_update_inode(inode, IS_SYNC(inode)); 806 inode->i_size = 0; 807 if (inode->i_blocks) 808 ufs_truncate (inode); 809 ufs_free_inode (inode); 810 unlock_kernel(); 811 } 812