1 /* 2 * linux/fs/ufs/inode.c 3 * 4 * Copyright (C) 1998 5 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Charles University, Faculty of Mathematics and Physics 7 * 8 * from 9 * 10 * linux/fs/ext2/inode.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 24 * Big-endian to little-endian byte-swapping/bitmaps by 25 * David S. Miller (davem@caip.rutgers.edu), 1995 26 */ 27 28 #include <asm/uaccess.h> 29 #include <asm/system.h> 30 31 #include <linux/errno.h> 32 #include <linux/fs.h> 33 #include <linux/ufs_fs.h> 34 #include <linux/time.h> 35 #include <linux/stat.h> 36 #include <linux/string.h> 37 #include <linux/mm.h> 38 #include <linux/smp_lock.h> 39 #include <linux/buffer_head.h> 40 41 #include "swab.h" 42 #include "util.h" 43 44 #undef UFS_INODE_DEBUG 45 #undef UFS_INODE_DEBUG_MORE 46 47 #ifdef UFS_INODE_DEBUG 48 #define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x; 49 #else 50 #define UFSD(x) 51 #endif 52 53 static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) 54 { 55 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 56 int ptrs = uspi->s_apb; 57 int ptrs_bits = uspi->s_apbshift; 58 const long direct_blocks = UFS_NDADDR, 59 indirect_blocks = ptrs, 60 double_blocks = (1 << (ptrs_bits * 2)); 61 int n = 0; 62 63 64 UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks)); 65 if (i_block < 0) { 66 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0"); 67 } else if (i_block < direct_blocks) { 68 offsets[n++] = i_block; 69 } else if ((i_block -= direct_blocks) < indirect_blocks) { 70 offsets[n++] = UFS_IND_BLOCK; 71 offsets[n++] = i_block; 72 } else if ((i_block -= indirect_blocks) < double_blocks) { 73 offsets[n++] = UFS_DIND_BLOCK; 74 offsets[n++] = i_block >> ptrs_bits; 75 offsets[n++] = i_block & (ptrs - 1); 76 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 77 offsets[n++] = UFS_TIND_BLOCK; 78 offsets[n++] = i_block >> (ptrs_bits * 2); 79 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 80 offsets[n++] = i_block & (ptrs - 1); 81 } else { 82 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 83 } 84 return n; 85 } 86 87 /* 88 * Returns the location of the fragment from 89 * the begining of the filesystem. 90 */ 91 92 u64 ufs_frag_map(struct inode *inode, sector_t frag) 93 { 94 struct ufs_inode_info *ufsi = UFS_I(inode); 95 struct super_block *sb = inode->i_sb; 96 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 97 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 98 int shift = uspi->s_apbshift-uspi->s_fpbshift; 99 sector_t offsets[4], *p; 100 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); 101 u64 ret = 0L; 102 __fs32 block; 103 __fs64 u2_block = 0L; 104 unsigned flags = UFS_SB(sb)->s_flags; 105 u64 temp = 0L; 106 107 UFSD((": frag = %llu depth = %d\n", (unsigned long long)frag, depth)); 108 UFSD((": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask)); 109 110 if (depth == 0) 111 return 0; 112 113 p = offsets; 114 115 lock_kernel(); 116 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 117 goto ufs2; 118 119 block = ufsi->i_u1.i_data[*p++]; 120 if (!block) 121 goto out; 122 while (--depth) { 123 struct buffer_head *bh; 124 sector_t n = *p++; 125 126 bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift)); 127 if (!bh) 128 goto out; 129 block = ((__fs32 *) bh->b_data)[n & mask]; 130 brelse (bh); 131 if (!block) 132 goto out; 133 } 134 ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); 135 goto out; 136 ufs2: 137 u2_block = ufsi->i_u1.u2_i_data[*p++]; 138 if (!u2_block) 139 goto out; 140 141 142 while (--depth) { 143 struct buffer_head *bh; 144 sector_t n = *p++; 145 146 147 temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block); 148 bh = sb_bread(sb, temp +(u64) (n>>shift)); 149 if (!bh) 150 goto out; 151 u2_block = ((__fs64 *)bh->b_data)[n & mask]; 152 brelse(bh); 153 if (!u2_block) 154 goto out; 155 } 156 temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block); 157 ret = temp + (u64) (frag & uspi->s_fpbmask); 158 159 out: 160 unlock_kernel(); 161 return ret; 162 } 163 164 static void ufs_clear_block(struct inode *inode, struct buffer_head *bh) 165 { 166 lock_buffer(bh); 167 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 168 set_buffer_uptodate(bh); 169 mark_buffer_dirty(bh); 170 unlock_buffer(bh); 171 if (IS_SYNC(inode)) 172 sync_dirty_buffer(bh); 173 } 174 175 static struct buffer_head *ufs_inode_getfrag(struct inode *inode, 176 unsigned int fragment, unsigned int new_fragment, 177 unsigned int required, int *err, int metadata, 178 long *phys, int *new, struct page *locked_page) 179 { 180 struct ufs_inode_info *ufsi = UFS_I(inode); 181 struct super_block * sb; 182 struct ufs_sb_private_info * uspi; 183 struct buffer_head * result; 184 unsigned block, blockoff, lastfrag, lastblock, lastblockoff; 185 unsigned tmp, goal; 186 __fs32 * p, * p2; 187 unsigned flags = 0; 188 189 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n", 190 inode->i_ino, fragment, new_fragment, required)) 191 192 sb = inode->i_sb; 193 uspi = UFS_SB(sb)->s_uspi; 194 195 flags = UFS_SB(sb)->s_flags; 196 /* TODO : to be done for write support 197 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 198 goto ufs2; 199 */ 200 201 block = ufs_fragstoblks (fragment); 202 blockoff = ufs_fragnum (fragment); 203 p = ufsi->i_u1.i_data + block; 204 goal = 0; 205 206 repeat: 207 tmp = fs32_to_cpu(sb, *p); 208 lastfrag = ufsi->i_lastfrag; 209 if (tmp && fragment < lastfrag) { 210 if (metadata) { 211 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 212 if (tmp == fs32_to_cpu(sb, *p)) { 213 UFSD(("EXIT, result %u\n", tmp + blockoff)) 214 return result; 215 } 216 brelse (result); 217 goto repeat; 218 } else { 219 *phys = tmp + blockoff; 220 return NULL; 221 } 222 } 223 224 lastblock = ufs_fragstoblks (lastfrag); 225 lastblockoff = ufs_fragnum (lastfrag); 226 /* 227 * We will extend file into new block beyond last allocated block 228 */ 229 if (lastblock < block) { 230 /* 231 * We must reallocate last allocated block 232 */ 233 if (lastblockoff) { 234 p2 = ufsi->i_u1.i_data + lastblock; 235 tmp = ufs_new_fragments (inode, p2, lastfrag, 236 fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, 237 err, locked_page); 238 if (!tmp) { 239 if (lastfrag != ufsi->i_lastfrag) 240 goto repeat; 241 else 242 return NULL; 243 } 244 lastfrag = ufsi->i_lastfrag; 245 246 } 247 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; 248 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 249 goal, required + blockoff, 250 err, locked_page); 251 } 252 /* 253 * We will extend last allocated block 254 */ 255 else if (lastblock == block) { 256 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), 257 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), 258 err, locked_page); 259 } 260 /* 261 * We will allocate new block before last allocated block 262 */ 263 else /* (lastblock > block) */ { 264 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) 265 goal = tmp + uspi->s_fpb; 266 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 267 goal, uspi->s_fpb, err, locked_page); 268 } 269 if (!tmp) { 270 if ((!blockoff && *p) || 271 (blockoff && lastfrag != ufsi->i_lastfrag)) 272 goto repeat; 273 *err = -ENOSPC; 274 return NULL; 275 } 276 277 if (metadata) { 278 result = sb_getblk(inode->i_sb, tmp + blockoff); 279 ufs_clear_block(inode, result); 280 } else { 281 *phys = tmp + blockoff; 282 result = NULL; 283 *err = 0; 284 *new = 1; 285 } 286 287 inode->i_ctime = CURRENT_TIME_SEC; 288 if (IS_SYNC(inode)) 289 ufs_sync_inode (inode); 290 mark_inode_dirty(inode); 291 UFSD(("EXIT, result %u\n", tmp + blockoff)) 292 return result; 293 294 /* This part : To be implemented .... 295 Required only for writing, not required for READ-ONLY. 296 ufs2: 297 298 u2_block = ufs_fragstoblks(fragment); 299 u2_blockoff = ufs_fragnum(fragment); 300 p = ufsi->i_u1.u2_i_data + block; 301 goal = 0; 302 303 repeat2: 304 tmp = fs32_to_cpu(sb, *p); 305 lastfrag = ufsi->i_lastfrag; 306 307 */ 308 } 309 310 static struct buffer_head *ufs_block_getfrag(struct inode *inode, struct buffer_head *bh, 311 unsigned int fragment, unsigned int new_fragment, 312 unsigned int blocksize, int * err, int metadata, 313 long *phys, int *new, struct page *locked_page) 314 { 315 struct super_block * sb; 316 struct ufs_sb_private_info * uspi; 317 struct buffer_head * result; 318 unsigned tmp, goal, block, blockoff; 319 __fs32 * p; 320 321 sb = inode->i_sb; 322 uspi = UFS_SB(sb)->s_uspi; 323 block = ufs_fragstoblks (fragment); 324 blockoff = ufs_fragnum (fragment); 325 326 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment)) 327 328 result = NULL; 329 if (!bh) 330 goto out; 331 if (!buffer_uptodate(bh)) { 332 ll_rw_block (READ, 1, &bh); 333 wait_on_buffer (bh); 334 if (!buffer_uptodate(bh)) 335 goto out; 336 } 337 338 p = (__fs32 *) bh->b_data + block; 339 repeat: 340 tmp = fs32_to_cpu(sb, *p); 341 if (tmp) { 342 if (metadata) { 343 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 344 if (tmp == fs32_to_cpu(sb, *p)) 345 goto out; 346 brelse (result); 347 goto repeat; 348 } else { 349 *phys = tmp + blockoff; 350 goto out; 351 } 352 } 353 354 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) 355 goal = tmp + uspi->s_fpb; 356 else 357 goal = bh->b_blocknr + uspi->s_fpb; 358 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 359 uspi->s_fpb, err, locked_page); 360 if (!tmp) { 361 if (fs32_to_cpu(sb, *p)) 362 goto repeat; 363 goto out; 364 } 365 366 367 if (metadata) { 368 result = sb_getblk(sb, tmp + blockoff); 369 ufs_clear_block(inode, result); 370 } else { 371 *phys = tmp + blockoff; 372 *new = 1; 373 } 374 375 mark_buffer_dirty(bh); 376 if (IS_SYNC(inode)) 377 sync_dirty_buffer(bh); 378 inode->i_ctime = CURRENT_TIME_SEC; 379 mark_inode_dirty(inode); 380 UFSD(("result %u\n", tmp + blockoff)); 381 out: 382 brelse (bh); 383 UFSD(("EXIT\n")); 384 return result; 385 } 386 387 /* 388 * This function gets the block which contains the fragment. 389 */ 390 391 int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 392 { 393 struct super_block * sb = inode->i_sb; 394 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; 395 struct buffer_head * bh; 396 int ret, err, new; 397 unsigned long ptr,phys; 398 u64 phys64 = 0; 399 400 if (!create) { 401 phys64 = ufs_frag_map(inode, fragment); 402 UFSD(("phys64 = %llu \n",phys64)); 403 if (phys64) 404 map_bh(bh_result, sb, phys64); 405 return 0; 406 } 407 408 /* This code entered only while writing ....? */ 409 410 err = -EIO; 411 new = 0; 412 ret = 0; 413 bh = NULL; 414 415 lock_kernel(); 416 417 UFSD(("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment)) 418 if (fragment < 0) 419 goto abort_negative; 420 if (fragment > 421 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) 422 << uspi->s_fpbshift)) 423 goto abort_too_big; 424 425 err = 0; 426 ptr = fragment; 427 428 /* 429 * ok, these macros clean the logic up a bit and make 430 * it much more readable: 431 */ 432 #define GET_INODE_DATABLOCK(x) \ 433 ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new, bh_result->b_page) 434 #define GET_INODE_PTR(x) \ 435 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL, bh_result->b_page) 436 #define GET_INDIRECT_DATABLOCK(x) \ 437 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ 438 &err, 0, &phys, &new, bh_result->b_page); 439 #define GET_INDIRECT_PTR(x) \ 440 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ 441 &err, 1, NULL, NULL, bh_result->b_page); 442 443 if (ptr < UFS_NDIR_FRAGMENT) { 444 bh = GET_INODE_DATABLOCK(ptr); 445 goto out; 446 } 447 ptr -= UFS_NDIR_FRAGMENT; 448 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { 449 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); 450 goto get_indirect; 451 } 452 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); 453 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { 454 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); 455 goto get_double; 456 } 457 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); 458 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); 459 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); 460 get_double: 461 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); 462 get_indirect: 463 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); 464 465 #undef GET_INODE_DATABLOCK 466 #undef GET_INODE_PTR 467 #undef GET_INDIRECT_DATABLOCK 468 #undef GET_INDIRECT_PTR 469 470 out: 471 if (err) 472 goto abort; 473 if (new) 474 set_buffer_new(bh_result); 475 map_bh(bh_result, sb, phys); 476 abort: 477 unlock_kernel(); 478 return err; 479 480 abort_negative: 481 ufs_warning(sb, "ufs_get_block", "block < 0"); 482 goto abort; 483 484 abort_too_big: 485 ufs_warning(sb, "ufs_get_block", "block > big"); 486 goto abort; 487 } 488 489 struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment, 490 int create, int *err) 491 { 492 struct buffer_head dummy; 493 int error; 494 495 dummy.b_state = 0; 496 dummy.b_blocknr = -1000; 497 error = ufs_getfrag_block(inode, fragment, &dummy, create); 498 *err = error; 499 if (!error && buffer_mapped(&dummy)) { 500 struct buffer_head *bh; 501 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 502 if (buffer_new(&dummy)) { 503 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 504 set_buffer_uptodate(bh); 505 mark_buffer_dirty(bh); 506 } 507 return bh; 508 } 509 return NULL; 510 } 511 512 struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment, 513 int create, int * err) 514 { 515 struct buffer_head * bh; 516 517 UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment)) 518 bh = ufs_getfrag (inode, fragment, create, err); 519 if (!bh || buffer_uptodate(bh)) 520 return bh; 521 ll_rw_block (READ, 1, &bh); 522 wait_on_buffer (bh); 523 if (buffer_uptodate(bh)) 524 return bh; 525 brelse (bh); 526 *err = -EIO; 527 return NULL; 528 } 529 530 static int ufs_writepage(struct page *page, struct writeback_control *wbc) 531 { 532 return block_write_full_page(page,ufs_getfrag_block,wbc); 533 } 534 static int ufs_readpage(struct file *file, struct page *page) 535 { 536 return block_read_full_page(page,ufs_getfrag_block); 537 } 538 static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) 539 { 540 return block_prepare_write(page,from,to,ufs_getfrag_block); 541 } 542 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 543 { 544 return generic_block_bmap(mapping,block,ufs_getfrag_block); 545 } 546 struct address_space_operations ufs_aops = { 547 .readpage = ufs_readpage, 548 .writepage = ufs_writepage, 549 .sync_page = block_sync_page, 550 .prepare_write = ufs_prepare_write, 551 .commit_write = generic_commit_write, 552 .bmap = ufs_bmap 553 }; 554 555 static void ufs_set_inode_ops(struct inode *inode) 556 { 557 if (S_ISREG(inode->i_mode)) { 558 inode->i_op = &ufs_file_inode_operations; 559 inode->i_fop = &ufs_file_operations; 560 inode->i_mapping->a_ops = &ufs_aops; 561 } else if (S_ISDIR(inode->i_mode)) { 562 inode->i_op = &ufs_dir_inode_operations; 563 inode->i_fop = &ufs_dir_operations; 564 inode->i_mapping->a_ops = &ufs_aops; 565 } else if (S_ISLNK(inode->i_mode)) { 566 if (!inode->i_blocks) 567 inode->i_op = &ufs_fast_symlink_inode_operations; 568 else { 569 inode->i_op = &page_symlink_inode_operations; 570 inode->i_mapping->a_ops = &ufs_aops; 571 } 572 } else 573 init_special_inode(inode, inode->i_mode, 574 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 575 } 576 577 void ufs_read_inode (struct inode * inode) 578 { 579 struct ufs_inode_info *ufsi = UFS_I(inode); 580 struct super_block * sb; 581 struct ufs_sb_private_info * uspi; 582 struct ufs_inode * ufs_inode; 583 struct ufs2_inode *ufs2_inode; 584 struct buffer_head * bh; 585 mode_t mode; 586 unsigned i; 587 unsigned flags; 588 589 UFSD(("ENTER, ino %lu\n", inode->i_ino)) 590 591 sb = inode->i_sb; 592 uspi = UFS_SB(sb)->s_uspi; 593 flags = UFS_SB(sb)->s_flags; 594 595 if (inode->i_ino < UFS_ROOTINO || 596 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 597 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 598 goto bad_inode; 599 } 600 601 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 602 if (!bh) { 603 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 604 goto bad_inode; 605 } 606 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 607 goto ufs2_inode; 608 609 ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino)); 610 611 /* 612 * Copy data to the in-core inode. 613 */ 614 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 615 inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink); 616 if (inode->i_nlink == 0) 617 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 618 619 /* 620 * Linux now has 32-bit uid and gid, so we can support EFT. 621 */ 622 inode->i_uid = ufs_get_inode_uid(sb, ufs_inode); 623 inode->i_gid = ufs_get_inode_gid(sb, ufs_inode); 624 625 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 626 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 627 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 628 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 629 inode->i_mtime.tv_nsec = 0; 630 inode->i_atime.tv_nsec = 0; 631 inode->i_ctime.tv_nsec = 0; 632 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 633 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */ 634 inode->i_version++; 635 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 636 ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen); 637 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 638 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 639 ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 640 641 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 642 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 643 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; 644 } 645 else { 646 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 647 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; 648 } 649 ufsi->i_osync = 0; 650 651 ufs_set_inode_ops(inode); 652 653 brelse (bh); 654 655 UFSD(("EXIT\n")) 656 return; 657 658 bad_inode: 659 make_bad_inode(inode); 660 return; 661 662 ufs2_inode : 663 UFSD(("Reading ufs2 inode, ino %lu\n", inode->i_ino)) 664 665 ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino)); 666 667 /* 668 * Copy data to the in-core inode. 669 */ 670 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 671 inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink); 672 if (inode->i_nlink == 0) 673 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 674 675 /* 676 * Linux now has 32-bit uid and gid, so we can support EFT. 677 */ 678 inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid); 679 inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid); 680 681 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 682 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec); 683 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec); 684 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec); 685 inode->i_mtime.tv_nsec = 0; 686 inode->i_atime.tv_nsec = 0; 687 inode->i_ctime.tv_nsec = 0; 688 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 689 inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/ 690 691 inode->i_version++; 692 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 693 ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen); 694 /* 695 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 696 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 697 */ 698 ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift; 699 700 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 701 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 702 ufsi->i_u1.u2_i_data[i] = 703 ufs2_inode->ui_u2.ui_addr.ui_db[i]; 704 } 705 else { 706 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 707 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; 708 } 709 ufsi->i_osync = 0; 710 711 ufs_set_inode_ops(inode); 712 713 brelse(bh); 714 715 UFSD(("EXIT\n")) 716 return; 717 } 718 719 static int ufs_update_inode(struct inode * inode, int do_sync) 720 { 721 struct ufs_inode_info *ufsi = UFS_I(inode); 722 struct super_block * sb; 723 struct ufs_sb_private_info * uspi; 724 struct buffer_head * bh; 725 struct ufs_inode * ufs_inode; 726 unsigned i; 727 unsigned flags; 728 729 UFSD(("ENTER, ino %lu\n", inode->i_ino)) 730 731 sb = inode->i_sb; 732 uspi = UFS_SB(sb)->s_uspi; 733 flags = UFS_SB(sb)->s_flags; 734 735 if (inode->i_ino < UFS_ROOTINO || 736 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 737 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 738 return -1; 739 } 740 741 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 742 if (!bh) { 743 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 744 return -1; 745 } 746 ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode)); 747 748 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 749 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 750 751 ufs_set_inode_uid(sb, ufs_inode, inode->i_uid); 752 ufs_set_inode_gid(sb, ufs_inode, inode->i_gid); 753 754 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 755 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 756 ufs_inode->ui_atime.tv_usec = 0; 757 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); 758 ufs_inode->ui_ctime.tv_usec = 0; 759 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); 760 ufs_inode->ui_mtime.tv_usec = 0; 761 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 762 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 763 ufs_inode->ui_gen = cpu_to_fs32(sb, ufsi->i_gen); 764 765 if ((flags & UFS_UID_MASK) == UFS_UID_EFT) { 766 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 767 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 768 } 769 770 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 771 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 772 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 773 } else if (inode->i_blocks) { 774 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 775 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i]; 776 } 777 else { 778 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 779 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; 780 } 781 782 if (!inode->i_nlink) 783 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 784 785 mark_buffer_dirty(bh); 786 if (do_sync) 787 sync_dirty_buffer(bh); 788 brelse (bh); 789 790 UFSD(("EXIT\n")) 791 return 0; 792 } 793 794 int ufs_write_inode (struct inode * inode, int wait) 795 { 796 int ret; 797 lock_kernel(); 798 ret = ufs_update_inode (inode, wait); 799 unlock_kernel(); 800 return ret; 801 } 802 803 int ufs_sync_inode (struct inode *inode) 804 { 805 return ufs_update_inode (inode, 1); 806 } 807 808 void ufs_delete_inode (struct inode * inode) 809 { 810 truncate_inode_pages(&inode->i_data, 0); 811 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ 812 lock_kernel(); 813 mark_inode_dirty(inode); 814 ufs_update_inode(inode, IS_SYNC(inode)); 815 inode->i_size = 0; 816 if (inode->i_blocks) 817 ufs_truncate (inode); 818 ufs_free_inode (inode); 819 unlock_kernel(); 820 } 821