1 /* 2 * linux/fs/affs/file.c 3 * 4 * (c) 1996 Hans-Joachim Widmaier - Rewritten 5 * 6 * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. 7 * 8 * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. 9 * 10 * (C) 1991 Linus Torvalds - minix filesystem 11 * 12 * affs regular file handling primitives 13 */ 14 15 #include "affs.h" 16 17 #if PAGE_SIZE < 4096 18 #error PAGE_SIZE must be at least 4096 19 #endif 20 21 static int affs_grow_extcache(struct inode *inode, u32 lc_idx); 22 static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext); 23 static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext); 24 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); 25 static int affs_file_open(struct inode *inode, struct file *filp); 26 static int affs_file_release(struct inode *inode, struct file *filp); 27 28 struct file_operations affs_file_operations = { 29 .llseek = generic_file_llseek, 30 .read = generic_file_read, 31 .write = generic_file_write, 32 .mmap = generic_file_mmap, 33 .open = affs_file_open, 34 .release = affs_file_release, 35 .fsync = file_fsync, 36 .sendfile = generic_file_sendfile, 37 }; 38 39 struct inode_operations affs_file_inode_operations = { 40 .truncate = affs_truncate, 41 .setattr = affs_notify_change, 42 }; 43 44 static int 45 affs_file_open(struct inode *inode, struct file *filp) 46 { 47 if (atomic_read(&filp->f_count) != 1) 48 return 0; 49 pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt); 50 AFFS_I(inode)->i_opencnt++; 51 return 0; 52 } 53 54 static int 55 affs_file_release(struct inode *inode, struct file *filp) 56 { 57 if (atomic_read(&filp->f_count) != 0) 58 return 0; 59 pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt); 60 AFFS_I(inode)->i_opencnt--; 61 if (!AFFS_I(inode)->i_opencnt) 62 affs_free_prealloc(inode); 63 64 return 0; 65 } 66 67 static int 68 affs_grow_extcache(struct inode *inode, u32 lc_idx) 69 { 70 struct super_block *sb = inode->i_sb; 71 struct buffer_head *bh; 72 u32 lc_max; 73 int i, j, key; 74 75 if (!AFFS_I(inode)->i_lc) { 76 char *ptr = (char *)get_zeroed_page(GFP_NOFS); 77 if (!ptr) 78 return -ENOMEM; 79 AFFS_I(inode)->i_lc = (u32 *)ptr; 80 AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2); 81 } 82 83 lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift; 84 85 if (AFFS_I(inode)->i_extcnt > lc_max) { 86 u32 lc_shift, lc_mask, tmp, off; 87 88 /* need to recalculate linear cache, start from old size */ 89 lc_shift = AFFS_I(inode)->i_lc_shift; 90 tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift; 91 for (; tmp; tmp >>= 1) 92 lc_shift++; 93 lc_mask = (1 << lc_shift) - 1; 94 95 /* fix idx and old size to new shift */ 96 lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift); 97 AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift); 98 99 /* first shrink old cache to make more space */ 100 off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift); 101 for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off) 102 AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j]; 103 104 AFFS_I(inode)->i_lc_shift = lc_shift; 105 AFFS_I(inode)->i_lc_mask = lc_mask; 106 } 107 108 /* fill cache to the needed index */ 109 i = AFFS_I(inode)->i_lc_size; 110 AFFS_I(inode)->i_lc_size = lc_idx + 1; 111 for (; i <= lc_idx; i++) { 112 if (!i) { 113 AFFS_I(inode)->i_lc[0] = inode->i_ino; 114 continue; 115 } 116 key = AFFS_I(inode)->i_lc[i - 1]; 117 j = AFFS_I(inode)->i_lc_mask + 1; 118 // unlock cache 119 for (; j > 0; j--) { 120 bh = affs_bread(sb, key); 121 if (!bh) 122 goto err; 123 key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 124 affs_brelse(bh); 125 } 126 // lock cache 127 AFFS_I(inode)->i_lc[i] = key; 128 } 129 130 return 0; 131 132 err: 133 // lock cache 134 return -EIO; 135 } 136 137 static struct buffer_head * 138 affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) 139 { 140 struct super_block *sb = inode->i_sb; 141 struct buffer_head *new_bh; 142 u32 blocknr, tmp; 143 144 blocknr = affs_alloc_block(inode, bh->b_blocknr); 145 if (!blocknr) 146 return ERR_PTR(-ENOSPC); 147 148 new_bh = affs_getzeroblk(sb, blocknr); 149 if (!new_bh) { 150 affs_free_block(sb, blocknr); 151 return ERR_PTR(-EIO); 152 } 153 154 AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST); 155 AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr); 156 AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE); 157 AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino); 158 affs_fix_checksum(sb, new_bh); 159 160 mark_buffer_dirty_inode(new_bh, inode); 161 162 tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 163 if (tmp) 164 affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp); 165 AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); 166 affs_adjust_checksum(bh, blocknr - tmp); 167 mark_buffer_dirty_inode(bh, inode); 168 169 AFFS_I(inode)->i_extcnt++; 170 mark_inode_dirty(inode); 171 172 return new_bh; 173 } 174 175 static inline struct buffer_head * 176 affs_get_extblock(struct inode *inode, u32 ext) 177 { 178 /* inline the simplest case: same extended block as last time */ 179 struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; 180 if (ext == AFFS_I(inode)->i_ext_last) 181 atomic_inc(&bh->b_count); 182 else 183 /* we have to do more (not inlined) */ 184 bh = affs_get_extblock_slow(inode, ext); 185 186 return bh; 187 } 188 189 static struct buffer_head * 190 affs_get_extblock_slow(struct inode *inode, u32 ext) 191 { 192 struct super_block *sb = inode->i_sb; 193 struct buffer_head *bh; 194 u32 ext_key; 195 u32 lc_idx, lc_off, ac_idx; 196 u32 tmp, idx; 197 198 if (ext == AFFS_I(inode)->i_ext_last + 1) { 199 /* read the next extended block from the current one */ 200 bh = AFFS_I(inode)->i_ext_bh; 201 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 202 if (ext < AFFS_I(inode)->i_extcnt) 203 goto read_ext; 204 if (ext > AFFS_I(inode)->i_extcnt) 205 BUG(); 206 bh = affs_alloc_extblock(inode, bh, ext); 207 if (IS_ERR(bh)) 208 return bh; 209 goto store_ext; 210 } 211 212 if (ext == 0) { 213 /* we seek back to the file header block */ 214 ext_key = inode->i_ino; 215 goto read_ext; 216 } 217 218 if (ext >= AFFS_I(inode)->i_extcnt) { 219 struct buffer_head *prev_bh; 220 221 /* allocate a new extended block */ 222 if (ext > AFFS_I(inode)->i_extcnt) 223 BUG(); 224 225 /* get previous extended block */ 226 prev_bh = affs_get_extblock(inode, ext - 1); 227 if (IS_ERR(prev_bh)) 228 return prev_bh; 229 bh = affs_alloc_extblock(inode, prev_bh, ext); 230 affs_brelse(prev_bh); 231 if (IS_ERR(bh)) 232 return bh; 233 goto store_ext; 234 } 235 236 again: 237 /* check if there is an extended cache and whether it's large enough */ 238 lc_idx = ext >> AFFS_I(inode)->i_lc_shift; 239 lc_off = ext & AFFS_I(inode)->i_lc_mask; 240 241 if (lc_idx >= AFFS_I(inode)->i_lc_size) { 242 int err; 243 244 err = affs_grow_extcache(inode, lc_idx); 245 if (err) 246 return ERR_PTR(err); 247 goto again; 248 } 249 250 /* every n'th key we find in the linear cache */ 251 if (!lc_off) { 252 ext_key = AFFS_I(inode)->i_lc[lc_idx]; 253 goto read_ext; 254 } 255 256 /* maybe it's still in the associative cache */ 257 ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK; 258 if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) { 259 ext_key = AFFS_I(inode)->i_ac[ac_idx].key; 260 goto read_ext; 261 } 262 263 /* try to find one of the previous extended blocks */ 264 tmp = ext; 265 idx = ac_idx; 266 while (--tmp, --lc_off > 0) { 267 idx = (idx - 1) & AFFS_AC_MASK; 268 if (AFFS_I(inode)->i_ac[idx].ext == tmp) { 269 ext_key = AFFS_I(inode)->i_ac[idx].key; 270 goto find_ext; 271 } 272 } 273 274 /* fall back to the linear cache */ 275 ext_key = AFFS_I(inode)->i_lc[lc_idx]; 276 find_ext: 277 /* read all extended blocks until we find the one we need */ 278 //unlock cache 279 do { 280 bh = affs_bread(sb, ext_key); 281 if (!bh) 282 goto err_bread; 283 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 284 affs_brelse(bh); 285 tmp++; 286 } while (tmp < ext); 287 //lock cache 288 289 /* store it in the associative cache */ 290 // recalculate ac_idx? 291 AFFS_I(inode)->i_ac[ac_idx].ext = ext; 292 AFFS_I(inode)->i_ac[ac_idx].key = ext_key; 293 294 read_ext: 295 /* finally read the right extended block */ 296 //unlock cache 297 bh = affs_bread(sb, ext_key); 298 if (!bh) 299 goto err_bread; 300 //lock cache 301 302 store_ext: 303 /* release old cached extended block and store the new one */ 304 affs_brelse(AFFS_I(inode)->i_ext_bh); 305 AFFS_I(inode)->i_ext_last = ext; 306 AFFS_I(inode)->i_ext_bh = bh; 307 atomic_inc(&bh->b_count); 308 309 return bh; 310 311 err_bread: 312 affs_brelse(bh); 313 return ERR_PTR(-EIO); 314 } 315 316 static int 317 affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) 318 { 319 struct super_block *sb = inode->i_sb; 320 struct buffer_head *ext_bh; 321 u32 ext; 322 323 pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block); 324 325 326 if (block > (sector_t)0x7fffffffUL) 327 BUG(); 328 329 if (block >= AFFS_I(inode)->i_blkcnt) { 330 if (block > AFFS_I(inode)->i_blkcnt || !create) 331 goto err_big; 332 } else 333 create = 0; 334 335 //lock cache 336 affs_lock_ext(inode); 337 338 ext = (u32)block / AFFS_SB(sb)->s_hashsize; 339 block -= ext * AFFS_SB(sb)->s_hashsize; 340 ext_bh = affs_get_extblock(inode, ext); 341 if (IS_ERR(ext_bh)) 342 goto err_ext; 343 map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); 344 345 if (create) { 346 u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); 347 if (!blocknr) 348 goto err_alloc; 349 set_buffer_new(bh_result); 350 AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; 351 AFFS_I(inode)->i_blkcnt++; 352 353 /* store new block */ 354 if (bh_result->b_blocknr) 355 affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr); 356 AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); 357 AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); 358 affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); 359 bh_result->b_blocknr = blocknr; 360 361 if (!block) { 362 /* insert first block into header block */ 363 u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data); 364 if (tmp) 365 affs_warning(sb, "get_block", "first block already set (%d)", tmp); 366 AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr); 367 affs_adjust_checksum(ext_bh, blocknr - tmp); 368 } 369 } 370 371 affs_brelse(ext_bh); 372 //unlock cache 373 affs_unlock_ext(inode); 374 return 0; 375 376 err_big: 377 affs_error(inode->i_sb,"get_block","strange block request %d", block); 378 return -EIO; 379 err_ext: 380 // unlock cache 381 affs_unlock_ext(inode); 382 return PTR_ERR(ext_bh); 383 err_alloc: 384 brelse(ext_bh); 385 clear_buffer_mapped(bh_result); 386 bh_result->b_bdev = NULL; 387 // unlock cache 388 affs_unlock_ext(inode); 389 return -ENOSPC; 390 } 391 392 static int affs_writepage(struct page *page, struct writeback_control *wbc) 393 { 394 return block_write_full_page(page, affs_get_block, wbc); 395 } 396 static int affs_readpage(struct file *file, struct page *page) 397 { 398 return block_read_full_page(page, affs_get_block); 399 } 400 static int affs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) 401 { 402 return cont_prepare_write(page, from, to, affs_get_block, 403 &AFFS_I(page->mapping->host)->mmu_private); 404 } 405 static sector_t _affs_bmap(struct address_space *mapping, sector_t block) 406 { 407 return generic_block_bmap(mapping,block,affs_get_block); 408 } 409 struct address_space_operations affs_aops = { 410 .readpage = affs_readpage, 411 .writepage = affs_writepage, 412 .sync_page = block_sync_page, 413 .prepare_write = affs_prepare_write, 414 .commit_write = generic_commit_write, 415 .bmap = _affs_bmap 416 }; 417 418 static inline struct buffer_head * 419 affs_bread_ino(struct inode *inode, int block, int create) 420 { 421 struct buffer_head *bh, tmp_bh; 422 int err; 423 424 tmp_bh.b_state = 0; 425 err = affs_get_block(inode, block, &tmp_bh, create); 426 if (!err) { 427 bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr); 428 if (bh) { 429 bh->b_state |= tmp_bh.b_state; 430 return bh; 431 } 432 err = -EIO; 433 } 434 return ERR_PTR(err); 435 } 436 437 static inline struct buffer_head * 438 affs_getzeroblk_ino(struct inode *inode, int block) 439 { 440 struct buffer_head *bh, tmp_bh; 441 int err; 442 443 tmp_bh.b_state = 0; 444 err = affs_get_block(inode, block, &tmp_bh, 1); 445 if (!err) { 446 bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr); 447 if (bh) { 448 bh->b_state |= tmp_bh.b_state; 449 return bh; 450 } 451 err = -EIO; 452 } 453 return ERR_PTR(err); 454 } 455 456 static inline struct buffer_head * 457 affs_getemptyblk_ino(struct inode *inode, int block) 458 { 459 struct buffer_head *bh, tmp_bh; 460 int err; 461 462 tmp_bh.b_state = 0; 463 err = affs_get_block(inode, block, &tmp_bh, 1); 464 if (!err) { 465 bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr); 466 if (bh) { 467 bh->b_state |= tmp_bh.b_state; 468 return bh; 469 } 470 err = -EIO; 471 } 472 return ERR_PTR(err); 473 } 474 475 static int 476 affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to) 477 { 478 struct inode *inode = page->mapping->host; 479 struct super_block *sb = inode->i_sb; 480 struct buffer_head *bh; 481 char *data; 482 u32 bidx, boff, bsize; 483 u32 tmp; 484 485 pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); 486 if (from > to || to > PAGE_CACHE_SIZE) 487 BUG(); 488 kmap(page); 489 data = page_address(page); 490 bsize = AFFS_SB(sb)->s_data_blksize; 491 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 492 bidx = tmp / bsize; 493 boff = tmp % bsize; 494 495 while (from < to) { 496 bh = affs_bread_ino(inode, bidx, 0); 497 if (IS_ERR(bh)) 498 return PTR_ERR(bh); 499 tmp = min(bsize - boff, to - from); 500 if (from + tmp > to || tmp > bsize) 501 BUG(); 502 memcpy(data + from, AFFS_DATA(bh) + boff, tmp); 503 affs_brelse(bh); 504 bidx++; 505 from += tmp; 506 boff = 0; 507 } 508 flush_dcache_page(page); 509 kunmap(page); 510 return 0; 511 } 512 513 static int 514 affs_extent_file_ofs(struct inode *inode, u32 newsize) 515 { 516 struct super_block *sb = inode->i_sb; 517 struct buffer_head *bh, *prev_bh; 518 u32 bidx, boff; 519 u32 size, bsize; 520 u32 tmp; 521 522 pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize); 523 bsize = AFFS_SB(sb)->s_data_blksize; 524 bh = NULL; 525 size = AFFS_I(inode)->mmu_private; 526 bidx = size / bsize; 527 boff = size % bsize; 528 if (boff) { 529 bh = affs_bread_ino(inode, bidx, 0); 530 if (IS_ERR(bh)) 531 return PTR_ERR(bh); 532 tmp = min(bsize - boff, newsize - size); 533 if (boff + tmp > bsize || tmp > bsize) 534 BUG(); 535 memset(AFFS_DATA(bh) + boff, 0, tmp); 536 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp); 537 affs_fix_checksum(sb, bh); 538 mark_buffer_dirty_inode(bh, inode); 539 size += tmp; 540 bidx++; 541 } else if (bidx) { 542 bh = affs_bread_ino(inode, bidx - 1, 0); 543 if (IS_ERR(bh)) 544 return PTR_ERR(bh); 545 } 546 547 while (size < newsize) { 548 prev_bh = bh; 549 bh = affs_getzeroblk_ino(inode, bidx); 550 if (IS_ERR(bh)) 551 goto out; 552 tmp = min(bsize, newsize - size); 553 if (tmp > bsize) 554 BUG(); 555 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 556 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 557 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); 558 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 559 affs_fix_checksum(sb, bh); 560 bh->b_state &= ~(1UL << BH_New); 561 mark_buffer_dirty_inode(bh, inode); 562 if (prev_bh) { 563 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 564 if (tmp) 565 affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp); 566 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 567 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp); 568 mark_buffer_dirty_inode(prev_bh, inode); 569 affs_brelse(prev_bh); 570 } 571 size += bsize; 572 bidx++; 573 } 574 affs_brelse(bh); 575 inode->i_size = AFFS_I(inode)->mmu_private = newsize; 576 return 0; 577 578 out: 579 inode->i_size = AFFS_I(inode)->mmu_private = newsize; 580 return PTR_ERR(bh); 581 } 582 583 static int 584 affs_readpage_ofs(struct file *file, struct page *page) 585 { 586 struct inode *inode = page->mapping->host; 587 u32 to; 588 int err; 589 590 pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index); 591 to = PAGE_CACHE_SIZE; 592 if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { 593 to = inode->i_size & ~PAGE_CACHE_MASK; 594 memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); 595 } 596 597 err = affs_do_readpage_ofs(file, page, 0, to); 598 if (!err) 599 SetPageUptodate(page); 600 unlock_page(page); 601 return err; 602 } 603 604 static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to) 605 { 606 struct inode *inode = page->mapping->host; 607 u32 size, offset; 608 u32 tmp; 609 int err = 0; 610 611 pr_debug("AFFS: prepare_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); 612 offset = page->index << PAGE_CACHE_SHIFT; 613 if (offset + from > AFFS_I(inode)->mmu_private) { 614 err = affs_extent_file_ofs(inode, offset + from); 615 if (err) 616 return err; 617 } 618 size = inode->i_size; 619 620 if (PageUptodate(page)) 621 return 0; 622 623 if (from) { 624 err = affs_do_readpage_ofs(file, page, 0, from); 625 if (err) 626 return err; 627 } 628 if (to < PAGE_CACHE_SIZE) { 629 char *kaddr = kmap_atomic(page, KM_USER0); 630 631 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); 632 flush_dcache_page(page); 633 kunmap_atomic(kaddr, KM_USER0); 634 if (size > offset + to) { 635 if (size < offset + PAGE_CACHE_SIZE) 636 tmp = size & ~PAGE_CACHE_MASK; 637 else 638 tmp = PAGE_CACHE_SIZE; 639 err = affs_do_readpage_ofs(file, page, to, tmp); 640 } 641 } 642 return err; 643 } 644 645 static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to) 646 { 647 struct inode *inode = page->mapping->host; 648 struct super_block *sb = inode->i_sb; 649 struct buffer_head *bh, *prev_bh; 650 char *data; 651 u32 bidx, boff, bsize; 652 u32 tmp; 653 int written; 654 655 pr_debug("AFFS: commit_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); 656 bsize = AFFS_SB(sb)->s_data_blksize; 657 data = page_address(page); 658 659 bh = NULL; 660 written = 0; 661 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 662 bidx = tmp / bsize; 663 boff = tmp % bsize; 664 if (boff) { 665 bh = affs_bread_ino(inode, bidx, 0); 666 if (IS_ERR(bh)) 667 return PTR_ERR(bh); 668 tmp = min(bsize - boff, to - from); 669 if (boff + tmp > bsize || tmp > bsize) 670 BUG(); 671 memcpy(AFFS_DATA(bh) + boff, data + from, tmp); 672 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp); 673 affs_fix_checksum(sb, bh); 674 mark_buffer_dirty_inode(bh, inode); 675 written += tmp; 676 from += tmp; 677 bidx++; 678 } else if (bidx) { 679 bh = affs_bread_ino(inode, bidx - 1, 0); 680 if (IS_ERR(bh)) 681 return PTR_ERR(bh); 682 } 683 while (from + bsize <= to) { 684 prev_bh = bh; 685 bh = affs_getemptyblk_ino(inode, bidx); 686 if (IS_ERR(bh)) 687 goto out; 688 memcpy(AFFS_DATA(bh), data + from, bsize); 689 if (buffer_new(bh)) { 690 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 691 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 692 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); 693 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); 694 AFFS_DATA_HEAD(bh)->next = 0; 695 bh->b_state &= ~(1UL << BH_New); 696 if (prev_bh) { 697 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 698 if (tmp) 699 affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp); 700 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 701 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp); 702 mark_buffer_dirty_inode(prev_bh, inode); 703 } 704 } 705 affs_brelse(prev_bh); 706 affs_fix_checksum(sb, bh); 707 mark_buffer_dirty_inode(bh, inode); 708 written += bsize; 709 from += bsize; 710 bidx++; 711 } 712 if (from < to) { 713 prev_bh = bh; 714 bh = affs_bread_ino(inode, bidx, 1); 715 if (IS_ERR(bh)) 716 goto out; 717 tmp = min(bsize, to - from); 718 if (tmp > bsize) 719 BUG(); 720 memcpy(AFFS_DATA(bh), data + from, tmp); 721 if (buffer_new(bh)) { 722 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 723 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 724 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); 725 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 726 AFFS_DATA_HEAD(bh)->next = 0; 727 bh->b_state &= ~(1UL << BH_New); 728 if (prev_bh) { 729 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 730 if (tmp) 731 affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp); 732 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 733 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp); 734 mark_buffer_dirty_inode(prev_bh, inode); 735 } 736 } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp) 737 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 738 affs_brelse(prev_bh); 739 affs_fix_checksum(sb, bh); 740 mark_buffer_dirty_inode(bh, inode); 741 written += tmp; 742 from += tmp; 743 bidx++; 744 } 745 SetPageUptodate(page); 746 747 done: 748 affs_brelse(bh); 749 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 750 if (tmp > inode->i_size) 751 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 752 753 return written; 754 755 out: 756 bh = prev_bh; 757 if (!written) 758 written = PTR_ERR(bh); 759 goto done; 760 } 761 762 struct address_space_operations affs_aops_ofs = { 763 .readpage = affs_readpage_ofs, 764 //.writepage = affs_writepage_ofs, 765 //.sync_page = affs_sync_page_ofs, 766 .prepare_write = affs_prepare_write_ofs, 767 .commit_write = affs_commit_write_ofs 768 }; 769 770 /* Free any preallocated blocks. */ 771 772 void 773 affs_free_prealloc(struct inode *inode) 774 { 775 struct super_block *sb = inode->i_sb; 776 777 pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino); 778 779 while (AFFS_I(inode)->i_pa_cnt) { 780 AFFS_I(inode)->i_pa_cnt--; 781 affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc); 782 } 783 } 784 785 /* Truncate (or enlarge) a file to the requested size. */ 786 787 void 788 affs_truncate(struct inode *inode) 789 { 790 struct super_block *sb = inode->i_sb; 791 u32 ext, ext_key; 792 u32 last_blk, blkcnt, blk; 793 u32 size; 794 struct buffer_head *ext_bh; 795 int i; 796 797 pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n", 798 (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size); 799 800 last_blk = 0; 801 ext = 0; 802 if (inode->i_size) { 803 last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; 804 ext = last_blk / AFFS_SB(sb)->s_hashsize; 805 } 806 807 if (inode->i_size > AFFS_I(inode)->mmu_private) { 808 struct address_space *mapping = inode->i_mapping; 809 struct page *page; 810 u32 size = inode->i_size - 1; 811 int res; 812 813 page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT); 814 if (!page) 815 return; 816 size = (size & (PAGE_CACHE_SIZE - 1)) + 1; 817 res = mapping->a_ops->prepare_write(NULL, page, size, size); 818 if (!res) 819 res = mapping->a_ops->commit_write(NULL, page, size, size); 820 unlock_page(page); 821 page_cache_release(page); 822 mark_inode_dirty(inode); 823 return; 824 } else if (inode->i_size == AFFS_I(inode)->mmu_private) 825 return; 826 827 // lock cache 828 ext_bh = affs_get_extblock(inode, ext); 829 if (IS_ERR(ext_bh)) { 830 affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)", 831 ext, PTR_ERR(ext_bh)); 832 return; 833 } 834 if (AFFS_I(inode)->i_lc) { 835 /* clear linear cache */ 836 i = (ext + 1) >> AFFS_I(inode)->i_lc_shift; 837 if (AFFS_I(inode)->i_lc_size > i) { 838 AFFS_I(inode)->i_lc_size = i; 839 for (; i < AFFS_LC_SIZE; i++) 840 AFFS_I(inode)->i_lc[i] = 0; 841 } 842 /* clear associative cache */ 843 for (i = 0; i < AFFS_AC_SIZE; i++) 844 if (AFFS_I(inode)->i_ac[i].ext >= ext) 845 AFFS_I(inode)->i_ac[i].ext = 0; 846 } 847 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); 848 849 blkcnt = AFFS_I(inode)->i_blkcnt; 850 i = 0; 851 blk = last_blk; 852 if (inode->i_size) { 853 i = last_blk % AFFS_SB(sb)->s_hashsize + 1; 854 blk++; 855 } else 856 AFFS_HEAD(ext_bh)->first_data = 0; 857 size = AFFS_SB(sb)->s_hashsize; 858 if (size > blkcnt - blk + i) 859 size = blkcnt - blk + i; 860 for (; i < size; i++, blk++) { 861 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); 862 AFFS_BLOCK(sb, ext_bh, i) = 0; 863 } 864 AFFS_TAIL(sb, ext_bh)->extension = 0; 865 affs_fix_checksum(sb, ext_bh); 866 mark_buffer_dirty_inode(ext_bh, inode); 867 affs_brelse(ext_bh); 868 869 if (inode->i_size) { 870 AFFS_I(inode)->i_blkcnt = last_blk + 1; 871 AFFS_I(inode)->i_extcnt = ext + 1; 872 if (AFFS_SB(sb)->s_flags & SF_OFS) { 873 struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); 874 u32 tmp; 875 if (IS_ERR(ext_bh)) { 876 affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)", 877 ext, PTR_ERR(ext_bh)); 878 return; 879 } 880 tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); 881 AFFS_DATA_HEAD(bh)->next = 0; 882 affs_adjust_checksum(bh, -tmp); 883 affs_brelse(bh); 884 } 885 } else { 886 AFFS_I(inode)->i_blkcnt = 0; 887 AFFS_I(inode)->i_extcnt = 1; 888 } 889 AFFS_I(inode)->mmu_private = inode->i_size; 890 // unlock cache 891 892 while (ext_key) { 893 ext_bh = affs_bread(sb, ext_key); 894 size = AFFS_SB(sb)->s_hashsize; 895 if (size > blkcnt - blk) 896 size = blkcnt - blk; 897 for (i = 0; i < size; i++, blk++) 898 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); 899 affs_free_block(sb, ext_key); 900 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); 901 affs_brelse(ext_bh); 902 } 903 affs_free_prealloc(inode); 904 } 905